summaryrefslogtreecommitdiffstats
path: root/debian/patches
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:04:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:04:07 +0000
commit1221c736f9a90756d47ea6d28320b6b83602dd2a (patch)
treeb453ba7b1393205258c9b098a773b4330984672f /debian/patches
parentAdding upstream version 2.4.38. (diff)
downloadapache2-f35b715de7e7c7bbfee87ecb39ca91936e294a35.tar.xz
apache2-f35b715de7e7c7bbfee87ecb39ca91936e294a35.zip
Adding debian version 2.4.38-3+deb10u8.debian/2.4.38-3+deb10u8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches')
-rw-r--r--debian/patches/CVE-2019-0196.patch27
-rw-r--r--debian/patches/CVE-2019-0197.patch93
-rw-r--r--debian/patches/CVE-2019-0211.patch249
-rw-r--r--debian/patches/CVE-2019-0215.patch52
-rw-r--r--debian/patches/CVE-2019-0217.patch147
-rw-r--r--debian/patches/CVE-2019-0220-1.patch278
-rw-r--r--debian/patches/CVE-2019-0220-2.patch50
-rw-r--r--debian/patches/CVE-2019-0220-3.patch43
-rw-r--r--debian/patches/CVE-2019-10092.patch193
-rw-r--r--debian/patches/CVE-2019-10097.patch72
-rw-r--r--debian/patches/CVE-2019-10098.patch20
-rw-r--r--debian/patches/CVE-2020-11984.patch45
-rw-r--r--debian/patches/CVE-2020-1927.patch92
-rw-r--r--debian/patches/CVE-2020-1934.patch75
-rw-r--r--debian/patches/CVE-2020-35452.patch27
-rw-r--r--debian/patches/CVE-2021-26690.patch20
-rw-r--r--debian/patches/CVE-2021-26691.patch18
-rw-r--r--debian/patches/CVE-2021-30641.patch50
-rw-r--r--debian/patches/CVE-2021-31618.patch20
-rw-r--r--debian/patches/CVE-2021-34798.patch40
-rw-r--r--debian/patches/CVE-2021-36160-2.patch32
-rw-r--r--debian/patches/CVE-2021-36160.patch51
-rw-r--r--debian/patches/CVE-2021-39275.patch35
-rw-r--r--debian/patches/CVE-2021-40438.patch124
-rw-r--r--debian/patches/CVE-2021-44224-1.patch206
-rw-r--r--debian/patches/CVE-2021-44224-2.patch93
-rw-r--r--debian/patches/CVE-2021-44790.patch18
-rw-r--r--debian/patches/CVE-2022-22719.patch95
-rw-r--r--debian/patches/CVE-2022-22720.patch190
-rw-r--r--debian/patches/CVE-2022-22721.patch116
-rw-r--r--debian/patches/CVE-2022-23943-1.patch360
-rw-r--r--debian/patches/CVE-2022-23943-2.patch63
-rw-r--r--debian/patches/CVE-2022-26377.patch39
-rw-r--r--debian/patches/CVE-2022-28614.patch65
-rw-r--r--debian/patches/CVE-2022-28615.patch35
-rw-r--r--debian/patches/CVE-2022-29404.patch82
-rw-r--r--debian/patches/CVE-2022-30522.patch561
-rw-r--r--debian/patches/CVE-2022-30556.patch250
-rw-r--r--debian/patches/CVE-2022-31813.patch242
-rw-r--r--debian/patches/build_suexec-custom.patch69
-rw-r--r--debian/patches/customize_apxs.patch220
-rw-r--r--debian/patches/fhs_compliance.patch64
-rw-r--r--debian/patches/import-http2-module-from-2.4.46.patch7588
-rw-r--r--debian/patches/no_LD_LIBRARY_PATH.patch18
-rw-r--r--debian/patches/reproducible_builds.diff40
-rw-r--r--debian/patches/series51
-rw-r--r--debian/patches/spelling-errors.patch196
-rw-r--r--debian/patches/suexec-CVE-2007-1742.patch66
-rw-r--r--debian/patches/suexec-custom.patch192
49 files changed, 12772 insertions, 0 deletions
diff --git a/debian/patches/CVE-2019-0196.patch b/debian/patches/CVE-2019-0196.patch
new file mode 100644
index 0000000..eaec989
--- /dev/null
+++ b/debian/patches/CVE-2019-0196.patch
@@ -0,0 +1,27 @@
+From 8de3c6f2a0df79d1476c89ec480a96f9282cea28 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <icing@apache.org>
+Date: Tue, 5 Feb 2019 11:52:28 +0000
+Subject: [PATCH] Merge of r1852986 from trunk:
+
+mod_http2: disentangelment of stream and request method.
+
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1852989 13f79535-47bb-0310-9956-ffa450edef68
+---
+ modules/http2/h2_request.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
+index 8899c4feb75..5ee88e9679f 100644
+--- a/modules/http2/h2_request.c
++++ b/modules/http2/h2_request.c
+@@ -266,7 +266,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
+
+ /* Time to populate r with the data we have. */
+ r->request_time = req->request_time;
+- r->method = req->method;
++ r->method = apr_pstrdup(r->pool, req->method);
+ /* Provide quick information about the request method as soon as known */
+ r->method_number = ap_method_number_of(r->method);
+ if (r->method_number == M_GET && r->method[0] == 'H') {
diff --git a/debian/patches/CVE-2019-0197.patch b/debian/patches/CVE-2019-0197.patch
new file mode 100644
index 0000000..92d2943
--- /dev/null
+++ b/debian/patches/CVE-2019-0197.patch
@@ -0,0 +1,93 @@
+# https://svn.apache.org/r1855406
+--- apache2.orig/modules/http2/h2_conn.c
++++ apache2/modules/http2/h2_conn.c
+@@ -305,6 +305,10 @@ conn_rec *h2_slave_create(conn_rec *mast
+ c->notes = apr_table_make(pool, 5);
+ c->input_filters = NULL;
+ c->output_filters = NULL;
++ c->keepalives = 0;
++#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
++ c->filter_conn_ctx = NULL;
++#endif
+ c->bucket_alloc = apr_bucket_alloc_create(pool);
+ c->data_in_input_filters = 0;
+ c->data_in_output_filters = 0;
+@@ -332,16 +336,15 @@ conn_rec *h2_slave_create(conn_rec *mast
+ ap_set_module_config(c->conn_config, mpm, cfg);
+ }
+
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+- "h2_stream(%ld-%d): created slave", master->id, slave_id);
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
++ "h2_slave(%s): created", c->log_id);
+ return c;
+ }
+
+ void h2_slave_destroy(conn_rec *slave)
+ {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
+- "h2_stream(%s): destroy slave",
+- apr_table_get(slave->notes, H2_TASK_ID_NOTE));
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
++ "h2_slave(%s): destroy", slave->log_id);
+ slave->sbh = NULL;
+ apr_pool_destroy(slave->pool);
+ }
+@@ -365,6 +368,7 @@ apr_status_t h2_slave_run_pre_connection
+ slave->keepalive = AP_CONN_CLOSE;
+ return ap_run_pre_connection(slave, csd);
+ }
++ ap_assert(slave->output_filters);
+ return APR_SUCCESS;
+ }
+
+--- apache2.orig/modules/http2/h2_mplx.c
++++ apache2/modules/http2/h2_mplx.c
+@@ -327,7 +327,8 @@ static int stream_destroy_iter(void *ctx
+ && !task->rst_error);
+ }
+
+- if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
++ task->c = NULL;
++ if (reuse_slave) {
+ h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
+ APLOGNO(03385) "h2_task_destroy, reuse slave");
+ h2_task_destroy(task);
+@@ -437,6 +438,8 @@ void h2_mplx_release_and_join(h2_mplx *m
+ apr_status_t status;
+ int i, wait_secs = 60;
+
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
++ "h2_mplx(%ld): start release", m->id);
+ /* How to shut down a h2 connection:
+ * 0. abort and tell the workers that no more tasks will come from us */
+ m->aborted = 1;
+@@ -977,6 +980,9 @@ static apr_status_t unschedule_slow_task
+ */
+ n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
+ while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
++ "h2_mplx(%s): unschedule, resetting task for redo later",
++ stream->task->id);
+ h2_task_rst(stream->task, H2_ERR_CANCEL);
+ h2_ihash_add(m->sredo, stream);
+ --n;
+--- apache2.orig/modules/http2/h2_task.c
++++ apache2/modules/http2/h2_task.c
+@@ -504,7 +504,7 @@ static int h2_task_pre_conn(conn_rec* c,
+ (void)arg;
+ if (h2_ctx_is_task(ctx)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+- "h2_h2, pre_connection, found stream task");
++ "h2_slave(%s), pre_connection, adding filters", c->log_id);
+ ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
+ ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
+ ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
+@@ -545,7 +545,6 @@ h2_task *h2_task_create(conn_rec *slave,
+ void h2_task_destroy(h2_task *task)
+ {
+ if (task->output.beam) {
+- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
+ h2_beam_destroy(task->output.beam);
+ task->output.beam = NULL;
+ }
diff --git a/debian/patches/CVE-2019-0211.patch b/debian/patches/CVE-2019-0211.patch
new file mode 100644
index 0000000..1b69f45
--- /dev/null
+++ b/debian/patches/CVE-2019-0211.patch
@@ -0,0 +1,249 @@
+From df7edb5ddae609ea1fd4285f7439f0d590d97b37 Mon Sep 17 00:00:00 2001
+From: Yann Ylavic <ylavic@apache.org>
+Date: Wed, 13 Mar 2019 08:59:54 +0000
+Subject: [PATCH] Merge r1855306 from trunk:
+
+MPMs unix: bind the bucket number of each child to its slot number
+
+We need not remember each child's bucket number in SHM for restarts, for the
+lifetime of the httpd main process the bucket number can be bound to the slot
+number such that: bucket = slot % num_buckets.
+
+This both simplifies the logic and helps children maintenance per bucket in
+threaded MPMs, where previously perform_idle_server_maintenance() could create
+or kill children processes for the buckets it was not in charge of.
+
+Submitted by: ylavic
+Reviewed by: ylavic, rpluem, jorton
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855378 13f79535-47bb-0310-9956-ffa450edef68
+---
+ CHANGES | 3 +++
+ include/scoreboard.h | 4 +++-
+ server/mpm/event/event.c | 13 ++++++++-----
+ server/mpm/prefork/prefork.c | 19 +++++++------------
+ server/mpm/worker/worker.c | 10 ++++++----
+ 5 files changed, 27 insertions(+), 22 deletions(-)
+
+#diff --git a/CHANGES b/CHANGES
+#index e79251389d5..6b560802119 100644
+#--- a/CHANGES
+#+++ b/CHANGES
+#@@ -1,6 +1,9 @@
+# -*- coding: utf-8 -*-
+# Changes with Apache 2.4.39
+#
+#+ *) MPMs unix: bind the bucket number of each child to its slot number, for a
+#+ more efficient per bucket maintenance. [Yann Ylavic]
+#+
+# *) mod_auth_digest: Fix a race condition. Authentication with valid
+# credentials could be refused in case of concurrent accesses from
+# different users. PR 63124. [Simon Kappel <simon.kappel axis.com>]
+diff --git a/include/scoreboard.h b/include/scoreboard.h
+index 9376da246b0..92d198d6de1 100644
+--- a/include/scoreboard.h
++++ b/include/scoreboard.h
+@@ -148,7 +148,9 @@ struct process_score {
+ apr_uint32_t lingering_close; /* async connections in lingering close */
+ apr_uint32_t keep_alive; /* async connections in keep alive */
+ apr_uint32_t suspended; /* connections suspended by some module */
+- int bucket; /* Listener bucket used by this child */
++ int bucket; /* Listener bucket used by this child; this field is DEPRECATED
++ * and no longer updated by the MPMs (i.e. always zero).
++ */
+ };
+
+ /* Scoreboard is now in 'local' memory, since it isn't updated once created,
+diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
+index 4cfb09c5b28..5e5af339adc 100644
+--- a/server/mpm/event/event.c
++++ b/server/mpm/event/event.c
+@@ -2696,7 +2696,6 @@ static int make_child(server_rec * s, int slot, int bucket)
+
+ ap_scoreboard_image->parent[slot].quiescing = 0;
+ ap_scoreboard_image->parent[slot].not_accepting = 0;
+- ap_scoreboard_image->parent[slot].bucket = bucket;
+ event_note_child_started(slot, pid);
+ active_daemons++;
+ retained->total_daemons++;
+@@ -2735,6 +2734,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
+ * that threads_per_child is always > 0 */
+ int status = SERVER_DEAD;
+ int child_threads_active = 0;
++ int bucket = i % num_buckets;
+
+ if (i >= retained->max_daemons_limit &&
+ free_length == retained->idle_spawn_rate[child_bucket]) {
+@@ -2758,7 +2758,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
+ */
+ if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
+ && ps->generation == retained->mpm->my_generation
+- && ps->bucket == child_bucket)
++ && bucket == child_bucket)
+ {
+ ++idle_thread_count;
+ }
+@@ -2769,7 +2769,9 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
+ last_non_dead = i;
+ }
+ active_thread_count += child_threads_active;
+- if (!ps->pid && free_length < retained->idle_spawn_rate[child_bucket])
++ if (!ps->pid
++ && bucket == child_bucket
++ && free_length < retained->idle_spawn_rate[child_bucket])
+ free_slots[free_length++] = i;
+ else if (child_threads_active == threads_per_child)
+ had_healthy_child = 1;
+@@ -2962,13 +2964,14 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets)
+ retained->total_daemons--;
+ if (processed_status == APEXIT_CHILDSICK) {
+ /* resource shortage, minimize the fork rate */
+- retained->idle_spawn_rate[ps->bucket] = 1;
++ retained->idle_spawn_rate[child_slot % num_buckets] = 1;
+ }
+ else if (remaining_children_to_start) {
+ /* we're still doing a 1-for-1 replacement of dead
+ * children with new children
+ */
+- make_child(ap_server_conf, child_slot, ps->bucket);
++ make_child(ap_server_conf, child_slot,
++ child_slot % num_buckets);
+ --remaining_children_to_start;
+ }
+ }
+diff --git a/server/mpm/prefork/prefork.c b/server/mpm/prefork/prefork.c
+index 8efda72ee18..7c006257301 100644
+--- a/server/mpm/prefork/prefork.c
++++ b/server/mpm/prefork/prefork.c
+@@ -637,8 +637,9 @@ static void child_main(int child_num_arg, int child_bucket)
+ }
+
+
+-static int make_child(server_rec *s, int slot, int bucket)
++static int make_child(server_rec *s, int slot)
+ {
++ int bucket = slot % retained->mpm->num_buckets;
+ int pid;
+
+ if (slot + 1 > retained->max_daemons_limit) {
+@@ -716,7 +717,6 @@ static int make_child(server_rec *s, int slot, int bucket)
+ child_main(slot, bucket);
+ }
+
+- ap_scoreboard_image->parent[slot].bucket = bucket;
+ prefork_note_child_started(slot, pid);
+
+ return 0;
+@@ -732,7 +732,7 @@ static void startup_children(int number_to_start)
+ if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) {
+ continue;
+ }
+- if (make_child(ap_server_conf, i, i % retained->mpm->num_buckets) < 0) {
++ if (make_child(ap_server_conf, i) < 0) {
+ break;
+ }
+ --number_to_start;
+@@ -741,8 +741,6 @@ static void startup_children(int number_to_start)
+
+ static void perform_idle_server_maintenance(apr_pool_t *p)
+ {
+- static int bucket_make_child_record = -1;
+- static int bucket_kill_child_record = -1;
+ int i;
+ int idle_count;
+ worker_score *ws;
+@@ -789,6 +787,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p)
+ }
+ retained->max_daemons_limit = last_non_dead + 1;
+ if (idle_count > ap_daemons_max_free) {
++ static int bucket_kill_child_record = -1;
+ /* kill off one child... we use the pod because that'll cause it to
+ * shut down gracefully, in case it happened to pick up a request
+ * while we were counting
+@@ -819,10 +818,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p)
+ idle_count, total_non_dead);
+ }
+ for (i = 0; i < free_length; ++i) {
+- bucket_make_child_record++;
+- bucket_make_child_record %= retained->mpm->num_buckets;
+- make_child(ap_server_conf, free_slots[i],
+- bucket_make_child_record);
++ make_child(ap_server_conf, free_slots[i]);
+ }
+ /* the next time around we want to spawn twice as many if this
+ * wasn't good enough, but not if we've just done a graceful
+@@ -867,7 +863,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
+
+ if (one_process) {
+ AP_MONCONTROL(1);
+- make_child(ap_server_conf, 0, 0);
++ make_child(ap_server_conf, 0);
+ /* NOTREACHED */
+ ap_assert(0);
+ return !OK;
+@@ -976,8 +972,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
+ /* we're still doing a 1-for-1 replacement of dead
+ * children with new children
+ */
+- make_child(ap_server_conf, child_slot,
+- ap_get_scoreboard_process(child_slot)->bucket);
++ make_child(ap_server_conf, child_slot);
+ --remaining_children_to_start;
+ }
+ #if APR_HAS_OTHER_CHILD
+diff --git a/server/mpm/worker/worker.c b/server/mpm/worker/worker.c
+index 8012fe29d8d..a92794245c5 100644
+--- a/server/mpm/worker/worker.c
++++ b/server/mpm/worker/worker.c
+@@ -1339,7 +1339,6 @@ static int make_child(server_rec *s, int slot, int bucket)
+ worker_note_child_lost_slot(slot, pid);
+ }
+ ap_scoreboard_image->parent[slot].quiescing = 0;
+- ap_scoreboard_image->parent[slot].bucket = bucket;
+ worker_note_child_started(slot, pid);
+ return 0;
+ }
+@@ -1388,6 +1387,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
+ int any_dead_threads = 0;
+ int all_dead_threads = 1;
+ int child_threads_active = 0;
++ int bucket = i % num_buckets;
+
+ if (i >= retained->max_daemons_limit &&
+ totally_free_length == retained->idle_spawn_rate[child_bucket]) {
+@@ -1420,7 +1420,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
+ if (status <= SERVER_READY &&
+ !ps->quiescing &&
+ ps->generation == retained->mpm->my_generation &&
+- ps->bucket == child_bucket) {
++ bucket == child_bucket) {
+ ++idle_thread_count;
+ }
+ if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
+@@ -1430,6 +1430,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
+ }
+ active_thread_count += child_threads_active;
+ if (any_dead_threads
++ && bucket == child_bucket
+ && totally_free_length < retained->idle_spawn_rate[child_bucket]
+ && free_length < MAX_SPAWN_RATE / num_buckets
+ && (!ps->pid /* no process in the slot */
+@@ -1615,14 +1616,15 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets)
+ ps->quiescing = 0;
+ if (processed_status == APEXIT_CHILDSICK) {
+ /* resource shortage, minimize the fork rate */
+- retained->idle_spawn_rate[ps->bucket] = 1;
++ retained->idle_spawn_rate[child_slot % num_buckets] = 1;
+ }
+ else if (remaining_children_to_start
+ && child_slot < ap_daemons_limit) {
+ /* we're still doing a 1-for-1 replacement of dead
+ * children with new children
+ */
+- make_child(ap_server_conf, child_slot, ps->bucket);
++ make_child(ap_server_conf, child_slot,
++ child_slot % num_buckets);
+ --remaining_children_to_start;
+ }
+ }
diff --git a/debian/patches/CVE-2019-0215.patch b/debian/patches/CVE-2019-0215.patch
new file mode 100644
index 0000000..6c0461e
--- /dev/null
+++ b/debian/patches/CVE-2019-0215.patch
@@ -0,0 +1,52 @@
+From 84edf5f49db23ced03259812bbf9426685f7d82a Mon Sep 17 00:00:00 2001
+From: Joe Orton <jorton@apache.org>
+Date: Wed, 20 Mar 2019 15:45:16 +0000
+Subject: [PATCH] Merge r1855849 from trunk:
+
+* modules/ssl/ssl_engine_kernel.c (ssl_hook_Access_modern): Correctly
+ restore SSL verify state after PHA failure in TLSv1.3.
+
+Submitted by: Michael Kaufmann <mail michael-kaufmann.ch>
+Reviewed by: jorton, covener, jim
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855917 13f79535-47bb-0310-9956-ffa450edef68
+---
+ CHANGES | 3 +++
+ modules/ssl/ssl_engine_kernel.c | 2 ++
+ 2 files changed, 5 insertions(+)
+
+#diff --git a/CHANGES b/CHANGES
+#index 6b03eadfa07..6f20d688ece 100644
+#--- a/CHANGES
+#+++ b/CHANGES
+#@@ -1,6 +1,9 @@
+# -*- coding: utf-8 -*-
+# Changes with Apache 2.4.39
+#
+#+ *) mod_ssl: Correctly restore SSL verify state after TLSv1.3 PHA failure.
+#+ [Michael Kaufmann <mail michael-kaufmann.ch>]
+#+
+# *) mod_log_config: Support %{c}h for conn-hostname, %h for useragent_host
+# PR 55348
+#
+Index: apache2-2.4.38/modules/ssl/ssl_engine_kernel.c
+===================================================================
+--- apache2-2.4.38.orig/modules/ssl/ssl_engine_kernel.c 2019-04-03 14:31:14.279214679 -0400
++++ apache2-2.4.38/modules/ssl/ssl_engine_kernel.c 2019-04-03 14:31:14.279214679 -0400
+@@ -1154,6 +1154,7 @@ static int ssl_hook_Access_modern(reques
+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server);
+ apr_table_setn(r->notes, "error-notes",
+ "Reason: Cannot perform Post-Handshake Authentication.<br />");
++ SSL_set_verify(ssl, vmode_inplace, NULL);
+ return HTTP_FORBIDDEN;
+ }
+
+@@ -1175,6 +1176,7 @@ static int ssl_hook_Access_modern(reques
+ * Finally check for acceptable renegotiation results
+ */
+ if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) {
++ SSL_set_verify(ssl, vmode_inplace, NULL);
+ return rc;
+ }
+ }
diff --git a/debian/patches/CVE-2019-0217.patch b/debian/patches/CVE-2019-0217.patch
new file mode 100644
index 0000000..e8f1090
--- /dev/null
+++ b/debian/patches/CVE-2019-0217.patch
@@ -0,0 +1,147 @@
+From 44b3ddc560c490c60600998fa2bf59b142d08e05 Mon Sep 17 00:00:00 2001
+From: Joe Orton <jorton@apache.org>
+Date: Tue, 12 Mar 2019 09:24:26 +0000
+Subject: [PATCH] Merge r1853190 from trunk:
+
+Fix a race condition. Authentication with valid credentials could be
+refused in case of concurrent accesses from different users.
+
+PR: 63124
+Submitted by: Simon Kappel <simon.kappel axis.com>
+Reviewed by: jailletc36, icing, jorton
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855298 13f79535-47bb-0310-9956-ffa450edef68
+---
+ CHANGES | 4 ++++
+ modules/aaa/mod_auth_digest.c | 26 ++++++++++++--------------
+ 2 files changed, 16 insertions(+), 14 deletions(-)
+
+#diff --git a/CHANGES b/CHANGES
+#index 08fc740db30..e79251389d5 100644
+#--- a/CHANGES
+#+++ b/CHANGES
+#@@ -1,6 +1,10 @@
+# -*- coding: utf-8 -*-
+# Changes with Apache 2.4.39
+#
+#+ *) mod_auth_digest: Fix a race condition. Authentication with valid
+#+ credentials could be refused in case of concurrent accesses from
+#+ different users. PR 63124. [Simon Kappel <simon.kappel axis.com>]
+#+
+# *) mod_proxy_wstunnel: Fix websocket proxy over UDS.
+# PR 62932 <pavel dcmsys.com>
+#
+diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c
+index a67f06986f2..b76094114dd 100644
+--- a/modules/aaa/mod_auth_digest.c
++++ b/modules/aaa/mod_auth_digest.c
+@@ -92,7 +92,6 @@ typedef struct digest_config_struct {
+ int check_nc;
+ const char *algorithm;
+ char *uri_list;
+- const char *ha1;
+ } digest_config_rec;
+
+
+@@ -153,6 +152,7 @@ typedef struct digest_header_struct {
+ apr_time_t nonce_time;
+ enum hdr_sts auth_hdr_sts;
+ int needed_auth;
++ const char *ha1;
+ client_entry *client;
+ } digest_header_rec;
+
+@@ -1304,7 +1304,7 @@ static int hook_note_digest_auth_failure(request_rec *r, const char *auth_type)
+ */
+
+ static authn_status get_hash(request_rec *r, const char *user,
+- digest_config_rec *conf)
++ digest_config_rec *conf, const char **rethash)
+ {
+ authn_status auth_result;
+ char *password;
+@@ -1356,7 +1356,7 @@ static authn_status get_hash(request_rec *r, const char *user,
+ } while (current_provider);
+
+ if (auth_result == AUTH_USER_FOUND) {
+- conf->ha1 = password;
++ *rethash = password;
+ }
+
+ return auth_result;
+@@ -1483,25 +1483,24 @@ static int check_nonce(request_rec *r, digest_header_rec *resp,
+
+ /* RFC-2069 */
+ static const char *old_digest(const request_rec *r,
+- const digest_header_rec *resp, const char *ha1)
++ const digest_header_rec *resp)
+ {
+ const char *ha2;
+
+ ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":",
+ resp->uri, NULL));
+ return ap_md5(r->pool,
+- (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce,
+- ":", ha2, NULL));
++ (unsigned char *)apr_pstrcat(r->pool, resp->ha1, ":",
++ resp->nonce, ":", ha2, NULL));
+ }
+
+ /* RFC-2617 */
+ static const char *new_digest(const request_rec *r,
+- digest_header_rec *resp,
+- const digest_config_rec *conf)
++ digest_header_rec *resp)
+ {
+ const char *ha1, *ha2, *a2;
+
+- ha1 = conf->ha1;
++ ha1 = resp->ha1;
+
+ a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL);
+ ha2 = ap_md5(r->pool, (const unsigned char *)a2);
+@@ -1514,7 +1513,6 @@ static const char *new_digest(const request_rec *r,
+ NULL));
+ }
+
+-
+ static void copy_uri_components(apr_uri_t *dst,
+ apr_uri_t *src, request_rec *r) {
+ if (src->scheme && src->scheme[0] != '\0') {
+@@ -1759,7 +1757,7 @@ static int authenticate_digest_user(request_rec *r)
+ return HTTP_UNAUTHORIZED;
+ }
+
+- return_code = get_hash(r, r->user, conf);
++ return_code = get_hash(r, r->user, conf, &resp->ha1);
+
+ if (return_code == AUTH_USER_NOT_FOUND) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01790)
+@@ -1789,7 +1787,7 @@ static int authenticate_digest_user(request_rec *r)
+
+ if (resp->message_qop == NULL) {
+ /* old (rfc-2069) style digest */
+- if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) {
++ if (strcmp(resp->digest, old_digest(r, resp))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01792)
+ "user %s: password mismatch: %s", r->user,
+ r->uri);
+@@ -1819,7 +1817,7 @@ static int authenticate_digest_user(request_rec *r)
+ return HTTP_UNAUTHORIZED;
+ }
+
+- exp_digest = new_digest(r, resp, conf);
++ exp_digest = new_digest(r, resp);
+ if (!exp_digest) {
+ /* we failed to allocate a client struct */
+ return HTTP_INTERNAL_SERVER_ERROR;
+@@ -1903,7 +1901,7 @@ static int add_auth_info(request_rec *r)
+
+ /* calculate rspauth attribute
+ */
+- ha1 = conf->ha1;
++ ha1 = resp->ha1;
+
+ a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL);
+ ha2 = ap_md5(r->pool, (const unsigned char *)a2);
diff --git a/debian/patches/CVE-2019-0220-1.patch b/debian/patches/CVE-2019-0220-1.patch
new file mode 100644
index 0000000..021c369
--- /dev/null
+++ b/debian/patches/CVE-2019-0220-1.patch
@@ -0,0 +1,278 @@
+From 9bc1917a27a2323e535aadb081e38172ae0e3fc2 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <icing@apache.org>
+Date: Mon, 18 Mar 2019 08:49:59 +0000
+Subject: [PATCH] Merge of r1855705 from trunk:
+
+core: merge consecutive slashes in the path
+
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855737 13f79535-47bb-0310-9956-ffa450edef68
+---
+ CHANGES | 4 ++++
+ docs/manual/mod/core.xml | 26 ++++++++++++++++++++++++++
+ include/ap_mmn.h | 4 +++-
+ include/http_core.h | 2 +-
+ include/httpd.h | 14 ++++++++++++--
+ server/core.c | 13 +++++++++++++
+ server/request.c | 25 +++++++++----------------
+ server/util.c | 10 +++++++---
+ 8 files changed, 75 insertions(+), 23 deletions(-)
+
+#diff --git a/CHANGES b/CHANGES
+#index e3e8a98db24..9dd7045c232 100644
+#--- a/CHANGES
+#+++ b/CHANGES
+#@@ -1,6 +1,10 @@
+# -*- coding: utf-8 -*-
+# Changes with Apache 2.4.39
+#
+#+ *) core: new configuration option 'MergeSlashes on|off' that controls handling of
+#+ multiple, consecutive slash ('/') characters in the path component of the request URL.
+#+ [Eric Covener]
+#+
+# *) mod_http2: when SSL renegotiation is inhibited and a 403 ErrorDocument is
+# in play, the proper HTTP/2 stream reset did not trigger with H2_ERR_HTTP_1_1_REQUIRED.
+# Fixed. [Michael Kaufmann]
+#diff --git a/docs/manual/mod/core.xml b/docs/manual/mod/core.xml
+#index fc664116727..460b4367621 100644
+#--- a/docs/manual/mod/core.xml
+#+++ b/docs/manual/mod/core.xml
+#@@ -5138,4 +5138,30 @@ recognized methods to modules.</p>
+# <seealso><directive module="mod_allowmethods">AllowMethods</directive></seealso>
+# </directivesynopsis>
+#
+#+<directivesynopsis>
+#+<name>MergeSlashes</name>
+#+<description>Controls whether the server merges consecutive slashes in URLs.
+#+</description>
+#+<syntax>MergeSlashes ON|OFF</syntax>
+#+<default>MergeSlashes ON</default>
+#+<contextlist><context>server config</context><context>virtual host</context>
+#+</contextlist>
+#+<compatibility>Added in 2.5.1</compatibility>
+#+
+#+<usage>
+#+ <p>By default, the server merges (or collapses) multiple consecutive slash
+#+ ('/') characters in the path component of the request URL.</p>
+#+
+#+ <p>When mapping URL's to the filesystem, these multiple slashes are not
+#+ significant. However, URL's handled other ways, such as by CGI or proxy,
+#+ might prefer to retain the significance of multiple consecutive slashes.
+#+ In these cases <directive>MergeSlashes</directive> can be set to
+#+ <em>OFF</em> to retain the multiple consecutive slashes. In these
+#+ configurations, regular expressions used in the configuration file that match
+#+ the path component of the URL (<directive>LocationMatch</directive>,
+#+ <directive>RewriteRule</directive>, ...) need to take into account multiple
+#+ consecutive slashes.</p>
+#+</usage>
+#+</directivesynopsis>
+#+
+# </modulesynopsis>
+diff --git a/include/ap_mmn.h b/include/ap_mmn.h
+index 2167baa0325..4739f7f64d3 100644
+--- a/include/ap_mmn.h
++++ b/include/ap_mmn.h
+@@ -523,6 +523,8 @@
+ * 20120211.82 (2.4.35-dev) Add optional function declaration for
+ * ap_proxy_balancer_get_best_worker to mod_proxy.h.
+ * 20120211.83 (2.4.35-dev) Add client64 field to worker_score struct
++ * 20120211.84 (2.4.35-dev) Add ap_no2slash_ex() and merge_slashes to
++ * core_server_conf.
+ *
+ */
+
+@@ -531,7 +533,7 @@
+ #ifndef MODULE_MAGIC_NUMBER_MAJOR
+ #define MODULE_MAGIC_NUMBER_MAJOR 20120211
+ #endif
+-#define MODULE_MAGIC_NUMBER_MINOR 83 /* 0...n */
++#define MODULE_MAGIC_NUMBER_MINOR 84 /* 0...n */
+
+ /**
+ * Determine if the server's current MODULE_MAGIC_NUMBER is at least a
+diff --git a/include/http_core.h b/include/http_core.h
+index 35df5dc9601..8e109882244 100644
+--- a/include/http_core.h
++++ b/include/http_core.h
+@@ -740,7 +740,7 @@ typedef struct {
+ #define AP_HTTP_METHODS_LENIENT 1
+ #define AP_HTTP_METHODS_REGISTERED 2
+ char http_methods;
+-
++ unsigned int merge_slashes;
+ } core_server_config;
+
+ /* for AddOutputFiltersByType in core.c */
+diff --git a/include/httpd.h b/include/httpd.h
+index 65392f83546..99f7f041aea 100644
+--- a/include/httpd.h
++++ b/include/httpd.h
+@@ -1697,11 +1697,21 @@ AP_DECLARE(int) ap_unescape_url_keep2f(char *url, int decode_slashes);
+ AP_DECLARE(int) ap_unescape_urlencoded(char *query);
+
+ /**
+- * Convert all double slashes to single slashes
+- * @param name The string to convert
++ * Convert all double slashes to single slashes, except where significant
++ * to the filesystem on the current platform.
++ * @param name The string to convert, assumed to be a filesystem path
+ */
+ AP_DECLARE(void) ap_no2slash(char *name);
+
++/**
++ * Convert all double slashes to single slashes, except where significant
++ * to the filesystem on the current platform.
++ * @param name The string to convert
++ * @param is_fs_path if set to 0, the significance of any double-slashes is
++ * ignored.
++ */
++AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path);
++
+ /**
+ * Remove all ./ and xx/../ substrings from a file name. Also remove
+ * any leading ../ or /../ substrings.
+diff --git a/server/core.c b/server/core.c
+index e2a91c7a0c6..eacb54fecec 100644
+--- a/server/core.c
++++ b/server/core.c
+@@ -490,6 +490,7 @@ static void *create_core_server_config(apr_pool_t *a, server_rec *s)
+
+ conf->protocols = apr_array_make(a, 5, sizeof(const char *));
+ conf->protocols_honor_order = -1;
++ conf->merge_slashes = AP_CORE_CONFIG_UNSET;
+
+ return (void *)conf;
+ }
+@@ -555,6 +556,7 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv)
+ conf->protocols_honor_order = ((virt->protocols_honor_order < 0)?
+ base->protocols_honor_order :
+ virt->protocols_honor_order);
++ AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt);
+
+ return conf;
+ }
+@@ -1863,6 +1865,13 @@ static const char *set_qualify_redirect_url(cmd_parms *cmd, void *d_, int flag)
+ return NULL;
+ }
+
++static const char *set_core_server_flag(cmd_parms *cmd, void *s_, int flag)
++{
++ core_server_config *conf =
++ ap_get_core_module_config(cmd->server->module_config);
++ return ap_set_flag_slot(cmd, conf, flag);
++}
++
+ static const char *set_override_list(cmd_parms *cmd, void *d_, int argc, char *const argv[])
+ {
+ core_dir_config *d = d_;
+@@ -4562,6 +4571,10 @@ AP_INIT_ITERATE("HttpProtocolOptions", set_http_protocol_options, NULL, RSRC_CON
+ "'Unsafe' or 'Strict' (default). Sets HTTP acceptance rules"),
+ AP_INIT_ITERATE("RegisterHttpMethod", set_http_method, NULL, RSRC_CONF,
+ "Registers non-standard HTTP methods"),
++AP_INIT_FLAG("MergeSlashes", set_core_server_flag,
++ (void *)APR_OFFSETOF(core_server_config, merge_slashes),
++ RSRC_CONF,
++ "Controls whether consecutive slashes in the URI path are merged"),
+ { NULL }
+ };
+
+diff --git a/server/request.c b/server/request.c
+index dbe3e07f150..1ce8908824b 100644
+--- a/server/request.c
++++ b/server/request.c
+@@ -167,6 +167,8 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
+ int file_req = (r->main && r->filename);
+ int access_status;
+ core_dir_config *d;
++ core_server_config *sconf =
++ ap_get_core_module_config(r->server->module_config);
+
+ /* Ignore embedded %2F's in path for proxy requests */
+ if (!r->proxyreq && r->parsed_uri.path) {
+@@ -191,6 +193,10 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
+ }
+
+ ap_getparents(r->uri); /* OK --- shrinking transformations... */
++ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
++ ap_no2slash(r->uri);
++ ap_no2slash(r->parsed_uri.path);
++ }
+
+ /* All file subrequests are a huge pain... they cannot bubble through the
+ * next several steps. Only file subrequests are allowed an empty uri,
+@@ -1411,20 +1417,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
+
+ cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r);
+ cached = (cache->cached != NULL);
+-
+- /* Location and LocationMatch differ on their behaviour w.r.t. multiple
+- * slashes. Location matches multiple slashes with a single slash,
+- * LocationMatch doesn't. An exception, for backwards brokenness is
+- * absoluteURIs... in which case neither match multiple slashes.
+- */
+- if (r->uri[0] != '/') {
+- entry_uri = r->uri;
+- }
+- else {
+- char *uri = apr_pstrdup(r->pool, r->uri);
+- ap_no2slash(uri);
+- entry_uri = uri;
+- }
++ entry_uri = r->uri;
+
+ /* If we have an cache->cached location that matches r->uri,
+ * and the vhost's list of locations hasn't changed, we can skip
+@@ -1491,7 +1484,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
+ pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t));
+ }
+
+- if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) {
++ if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) {
+ continue;
+ }
+
+@@ -1501,7 +1494,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
+ apr_table_setn(r->subprocess_env,
+ ((const char **)entry_core->refs->elts)[i],
+ apr_pstrndup(r->pool,
+- r->uri + pmatch[i].rm_so,
++ entry_uri + pmatch[i].rm_so,
+ pmatch[i].rm_eo - pmatch[i].rm_so));
+ }
+ }
+diff --git a/server/util.c b/server/util.c
+index fd7a0a14763..607c4850d86 100644
+--- a/server/util.c
++++ b/server/util.c
+@@ -561,16 +561,16 @@ AP_DECLARE(void) ap_getparents(char *name)
+ name[l] = '\0';
+ }
+ }
+-
+-AP_DECLARE(void) ap_no2slash(char *name)
++AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
+ {
++
+ char *d, *s;
+
+ s = d = name;
+
+ #ifdef HAVE_UNC_PATHS
+ /* Check for UNC names. Leave leading two slashes. */
+- if (s[0] == '/' && s[1] == '/')
++ if (is_fs_path && s[0] == '/' && s[1] == '/')
+ *d++ = *s++;
+ #endif
+
+@@ -587,6 +587,10 @@ AP_DECLARE(void) ap_no2slash(char *name)
+ *d = '\0';
+ }
+
++AP_DECLARE(void) ap_no2slash(char *name)
++{
++ ap_no2slash_ex(name, 1);
++}
+
+ /*
+ * copy at most n leading directories of s into d
diff --git a/debian/patches/CVE-2019-0220-2.patch b/debian/patches/CVE-2019-0220-2.patch
new file mode 100644
index 0000000..0204259
--- /dev/null
+++ b/debian/patches/CVE-2019-0220-2.patch
@@ -0,0 +1,50 @@
+From c4ef468b25718a26f2b92cbea3ca093729b79331 Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Mon, 18 Mar 2019 12:10:15 +0000
+Subject: [PATCH] merge 1855743,1855744 ^/httpd/httpd/trunk .
+
+r->parsed_uri.path safety in recent backport
+
+*) core: fix SEGFAULT in CONNECT with recent change
+ 2.4.x: svn merge -c 1855743,1855744 ^/httpd/httpd/trunk .
+ +1: rpluem, icing, covener
+
+
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855751 13f79535-47bb-0310-9956-ffa450edef68
+---
+ server/request.c | 4 +++-
+ server/util.c | 4 ++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/server/request.c b/server/request.c
+index 1ce8908824b..d5c558afa30 100644
+--- a/server/request.c
++++ b/server/request.c
+@@ -195,7 +195,9 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
+ ap_getparents(r->uri); /* OK --- shrinking transformations... */
+ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
+ ap_no2slash(r->uri);
+- ap_no2slash(r->parsed_uri.path);
++ if (r->parsed_uri.path) {
++ ap_no2slash(r->parsed_uri.path);
++ }
+ }
+
+ /* All file subrequests are a huge pain... they cannot bubble through the
+diff --git a/server/util.c b/server/util.c
+index 607c4850d86..f3b17f1581e 100644
+--- a/server/util.c
++++ b/server/util.c
+@@ -566,6 +566,10 @@ AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
+
+ char *d, *s;
+
++ if (!name || !*name) {
++ return;
++ }
++
+ s = d = name;
+
+ #ifdef HAVE_UNC_PATHS
diff --git a/debian/patches/CVE-2019-0220-3.patch b/debian/patches/CVE-2019-0220-3.patch
new file mode 100644
index 0000000..7b3ff6f
--- /dev/null
+++ b/debian/patches/CVE-2019-0220-3.patch
@@ -0,0 +1,43 @@
+From 3451fc2bf8708b0dc8cd6a7d0ac0fe5b6401befc Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Tue, 19 Mar 2019 18:01:21 +0000
+Subject: [PATCH] *) maintainer mode fix for util.c no2slash_ex trunk
+ patch: http://svn.apache.org/r1855755 2.4.x patch svn merge -c 1855755
+ ^/httpd/httpd/trunk . +1: covener, rpluem, jim, ylavic
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855853 13f79535-47bb-0310-9956-ffa450edef68
+---
+ STATUS | 6 ------
+ server/util.c | 2 +-
+ 2 files changed, 1 insertion(+), 7 deletions(-)
+
+#diff --git a/STATUS b/STATUS
+#index ffe5d22550c..1f8cb2f7884 100644
+#--- a/STATUS
+#+++ b/STATUS
+#@@ -126,12 +126,6 @@ RELEASE SHOWSTOPPERS:
+# PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
+# [ start all new proposals below, under PATCHES PROPOSED. ]
+#
+#- *) maintainer mode fix for util.c no2slash_ex
+#- trunk patch: http://svn.apache.org/r1855755
+#- 2.4.x patch svn merge -c 1855755 ^/httpd/httpd/trunk .
+#- +1: covener, rpluem, jim, ylavic
+#-
+#-
+# PATCHES PROPOSED TO BACKPORT FROM TRUNK:
+# [ New proposals should be added at the end of the list ]
+#
+diff --git a/server/util.c b/server/util.c
+index f3b17f1581e..e0c558cee2d 100644
+--- a/server/util.c
++++ b/server/util.c
+@@ -566,7 +566,7 @@ AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
+
+ char *d, *s;
+
+- if (!name || !*name) {
++ if (!*name) {
+ return;
+ }
+
diff --git a/debian/patches/CVE-2019-10092.patch b/debian/patches/CVE-2019-10092.patch
new file mode 100644
index 0000000..eb3352c
--- /dev/null
+++ b/debian/patches/CVE-2019-10092.patch
@@ -0,0 +1,193 @@
+Description: Fix for CVE-2019-10092
+Author: Stefan Eissing
+Origin: upstream, https://svn.apache.org/viewvc?view=revision&revision=1864191
+Bug: https://security-tracker.debian.org/tracker/CVE-2019-10092
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2019-10-11
+[Salvatore Bonaccorso: Add additional change from https://svn.apache.org/r1864699
+to add missing APLOGNO's in mod_proxy.c and mod_proxy_ftp.c]
+--- a/modules/http/http_protocol.c
++++ b/modules/http/http_protocol.c
+@@ -1132,13 +1132,10 @@
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_USE_PROXY:
+- return(apr_pstrcat(p,
+- "<p>This resource is only accessible "
+- "through the proxy\n",
+- ap_escape_html(r->pool, location),
+- "<br />\nYou will need to configure "
+- "your client to use that proxy.</p>\n",
+- NULL));
++ return("<p>This resource is only accessible "
++ "through the proxy\n"
++ "<br />\nYou will need to configure "
++ "your client to use that proxy.</p>\n");
+ case HTTP_PROXY_AUTHENTICATION_REQUIRED:
+ case HTTP_UNAUTHORIZED:
+ return("<p>This server could not verify that you\n"
+@@ -1154,34 +1151,20 @@
+ "error-notes",
+ "</p>\n"));
+ case HTTP_FORBIDDEN:
+- s1 = apr_pstrcat(p,
+- "<p>You don't have permission to access ",
+- ap_escape_html(r->pool, r->uri),
+- "\non this server.<br />\n",
+- NULL);
+- return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
++ return(add_optional_notes(r, "<p>You don't have permission to access this resource.", "error-notes", "</p>\n"));
+ case HTTP_NOT_FOUND:
+- return(apr_pstrcat(p,
+- "<p>The requested URL ",
+- ap_escape_html(r->pool, r->uri),
+- " was not found on this server.</p>\n",
+- NULL));
++ return("<p>The requested URL was not found on this server.</p>\n");
+ case HTTP_METHOD_NOT_ALLOWED:
+ return(apr_pstrcat(p,
+ "<p>The requested method ",
+ ap_escape_html(r->pool, r->method),
+- " is not allowed for the URL ",
+- ap_escape_html(r->pool, r->uri),
+- ".</p>\n",
++ " is not allowed for this URL.</p>\n",
+ NULL));
+ case HTTP_NOT_ACCEPTABLE:
+- s1 = apr_pstrcat(p,
+- "<p>An appropriate representation of the "
+- "requested resource ",
+- ap_escape_html(r->pool, r->uri),
+- " could not be found on this server.</p>\n",
+- NULL);
+- return(add_optional_notes(r, s1, "variant-list", ""));
++ return(add_optional_notes(r,
++ "<p>An appropriate representation of the requested resource "
++ "could not be found on this server.</p>\n",
++ "variant-list", ""));
+ case HTTP_MULTIPLE_CHOICES:
+ return(add_optional_notes(r, "", "variant-list", ""));
+ case HTTP_LENGTH_REQUIRED:
+@@ -1192,18 +1175,13 @@
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_PRECONDITION_FAILED:
+- return(apr_pstrcat(p,
+- "<p>The precondition on the request "
+- "for the URL ",
+- ap_escape_html(r->pool, r->uri),
+- " evaluated to false.</p>\n",
+- NULL));
++ return("<p>The precondition on the request "
++ "for this URL evaluated to false.</p>\n");
+ case HTTP_NOT_IMPLEMENTED:
+ s1 = apr_pstrcat(p,
+ "<p>",
+- ap_escape_html(r->pool, r->method), " to ",
+- ap_escape_html(r->pool, r->uri),
+- " not supported.<br />\n",
++ ap_escape_html(r->pool, r->method), " ",
++ " not supported for current URL.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_BAD_GATEWAY:
+@@ -1211,29 +1189,19 @@
+ "response from an upstream server.<br />" CRLF;
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_VARIANT_ALSO_VARIES:
+- return(apr_pstrcat(p,
+- "<p>A variant for the requested "
+- "resource\n<pre>\n",
+- ap_escape_html(r->pool, r->uri),
+- "\n</pre>\nis itself a negotiable resource. "
+- "This indicates a configuration error.</p>\n",
+- NULL));
++ return("<p>A variant for the requested "
++ "resource\n<pre>\n"
++ "\n</pre>\nis itself a negotiable resource. "
++ "This indicates a configuration error.</p>\n");
+ case HTTP_REQUEST_TIME_OUT:
+ return("<p>Server timeout waiting for the HTTP request from the client.</p>\n");
+ case HTTP_GONE:
+- return(apr_pstrcat(p,
+- "<p>The requested resource<br />",
+- ap_escape_html(r->pool, r->uri),
+- "<br />\nis no longer available on this server "
+- "and there is no forwarding address.\n"
+- "Please remove all references to this "
+- "resource.</p>\n",
+- NULL));
++ return("<p>The requested resource is no longer available on this server"
++ " and there is no forwarding address.\n"
++ "Please remove all references to this resource.</p>\n");
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return(apr_pstrcat(p,
+- "The requested resource<br />",
+- ap_escape_html(r->pool, r->uri), "<br />\n",
+- "does not allow request data with ",
++ "The requested resource does not allow request data with ",
+ ap_escape_html(r->pool, r->method),
+ " requests, or the amount of data provided in\n"
+ "the request exceeds the capacity limit.\n",
+@@ -1317,11 +1285,9 @@
+ "the Server Name Indication (SNI) in use for this\n"
+ "connection.</p>\n");
+ case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
+- s1 = apr_pstrcat(p,
+- "<p>Access to ", ap_escape_html(r->pool, r->uri),
+- "\nhas been denied for legal reasons.<br />\n",
+- NULL);
+- return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
++ return(add_optional_notes(r,
++ "<p>Access to this URL has been denied for legal reasons.<br />\n",
++ "error-notes", "</p>\n"));
+ default: /* HTTP_INTERNAL_SERVER_ERROR */
+ /*
+ * This comparison to expose error-notes could be modified to
+--- a/modules/proxy/mod_proxy.c
++++ b/modules/proxy/mod_proxy.c
+@@ -1049,9 +1049,10 @@
+ char *end;
+ maxfwd = apr_strtoi64(str, &end, 10);
+ if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) {
+- return ap_proxyerror(r, HTTP_BAD_REQUEST,
+- apr_psprintf(r->pool,
+- "Max-Forwards value '%s' could not be parsed", str));
++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10188)
++ "Max-Forwards value '%s' could not be parsed", str);
++ return ap_proxyerror(r, HTTP_BAD_REQUEST,
++ "Max-Forwards request header could not be parsed");
+ }
+ else if (maxfwd == 0) {
+ switch (r->method_number) {
+--- a/modules/proxy/mod_proxy_ftp.c
++++ b/modules/proxy/mod_proxy_ftp.c
+@@ -1024,8 +1024,9 @@
+ /* We break the URL into host, port, path-search */
+ if (r->parsed_uri.hostname == NULL) {
+ if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) {
+- return ap_proxyerror(r, HTTP_BAD_REQUEST,
+- apr_psprintf(p, "URI cannot be parsed: %s", url));
++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10189)
++ "URI cannot be parsed: %s", url);
++ return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed");
+ }
+ connectname = uri.hostname;
+ connectport = uri.port;
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -368,12 +368,9 @@
+
+ PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
+ {
+- const char *uri = ap_escape_html(r->pool, r->uri);
+ apr_table_setn(r->notes, "error-notes",
+ apr_pstrcat(r->pool,
+- "The proxy server could not handle the request <em><a href=\"",
+- uri, "\">", ap_escape_html(r->pool, r->method), "&nbsp;", uri,
+- "</a></em>.<p>\n"
++ "The proxy server could not handle the request<p>"
+ "Reason: <strong>", ap_escape_html(r->pool, message),
+ "</strong></p>",
+ NULL));
diff --git a/debian/patches/CVE-2019-10097.patch b/debian/patches/CVE-2019-10097.patch
new file mode 100644
index 0000000..0be05f5
--- /dev/null
+++ b/debian/patches/CVE-2019-10097.patch
@@ -0,0 +1,72 @@
+Description: Fix for CVE-2019-10097
+Author: jorton
+Origin: upstream, https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864613
+Bug: https://security-tracker.debian.org/tracker/CVE-2019-10097
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2019-08-17
+
+--- a/modules/metadata/mod_remoteip.c
++++ b/modules/metadata/mod_remoteip.c
+@@ -987,15 +987,13 @@
+ return HDR_ERROR;
+ #endif
+ default:
+- /* unsupported protocol, keep local connection address */
+- return HDR_DONE;
++ /* unsupported protocol */
++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10183)
++ "RemoteIPProxyProtocol: unsupported protocol %.2hx",
++ (unsigned short)hdr->v2.fam);
++ return HDR_ERROR;
+ }
+ break; /* we got a sockaddr now */
+-
+- case 0x00: /* LOCAL command */
+- /* keep local connection address for LOCAL */
+- return HDR_DONE;
+-
+ default:
+ /* not a supported command */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03507)
+@@ -1087,11 +1085,24 @@
+ /* try to read a header's worth of data */
+ while (!ctx->done) {
+ if (APR_BRIGADE_EMPTY(ctx->bb)) {
+- ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block,
+- ctx->need - ctx->rcvd);
++ apr_off_t got, want = ctx->need - ctx->rcvd;
++
++ ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, want);
+ if (ret != APR_SUCCESS) {
++ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10184)
++ "failed reading input");
+ return ret;
+ }
++
++ ret = apr_brigade_length(ctx->bb, 1, &got);
++ if (ret || got > want) {
++ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10185)
++ "RemoteIPProxyProtocol header too long, "
++ "got %" APR_OFF_T_FMT " expected %" APR_OFF_T_FMT,
++ got, want);
++ f->c->aborted = 1;
++ return APR_ECONNABORTED;
++ }
+ }
+ if (APR_BRIGADE_EMPTY(ctx->bb)) {
+ return block == APR_NONBLOCK_READ ? APR_SUCCESS : APR_EOF;
+@@ -1139,6 +1150,13 @@
+ if (ctx->rcvd >= MIN_V2_HDR_LEN) {
+ ctx->need = MIN_V2_HDR_LEN +
+ remoteip_get_v2_len((proxy_header *) ctx->header);
++ if (ctx->need > sizeof(proxy_v2)) {
++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(10186)
++ "RemoteIPProxyProtocol protocol header length too long");
++ f->c->aborted = 1;
++ apr_brigade_destroy(ctx->bb);
++ return APR_ECONNABORTED;
++ }
+ }
+ if (ctx->rcvd >= ctx->need) {
+ psts = remoteip_process_v2_header(f->c, conn_conf,
diff --git a/debian/patches/CVE-2019-10098.patch b/debian/patches/CVE-2019-10098.patch
new file mode 100644
index 0000000..b2c66b2
--- /dev/null
+++ b/debian/patches/CVE-2019-10098.patch
@@ -0,0 +1,20 @@
+Description: patch to set PCRE_DOTALL by default
+Author: ylavic
+Origin: upstream, https://svn.apache.org/viewvc?view=revision&revision=1864192
+Bug: https://security-tracker.debian.org/tracker/CVE-2019-10098
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2019-08-18
+
+--- a/server/util_pcre.c
++++ b/server/util_pcre.c
+@@ -120,7 +120,8 @@
+ * Compile a regular expression *
+ *************************************************/
+
+-static int default_cflags = AP_REG_DOLLAR_ENDONLY;
++static int default_cflags = AP_REG_DOTALL |
++ AP_REG_DOLLAR_ENDONLY;
+
+ AP_DECLARE(int) ap_regcomp_get_default_cflags(void)
+ {
diff --git a/debian/patches/CVE-2020-11984.patch b/debian/patches/CVE-2020-11984.patch
new file mode 100644
index 0000000..409f958
--- /dev/null
+++ b/debian/patches/CVE-2020-11984.patch
@@ -0,0 +1,45 @@
+Description: fix error out on HTTP header larger than 16K
+ The uwsgi protocol does not let us serialize more than 16K of HTTP header,
+ so fail early with 500 if it happens.
+Author: ylavic
+Origin: upstream, https://github.com/apache/httpd/commit/0c543e3f
+Bug: https://security-tracker.debian.org/tracker/CVE-2020-11984
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2020-08-25
+
+--- a/modules/proxy/mod_proxy_uwsgi.c
++++ b/modules/proxy/mod_proxy_uwsgi.c
+@@ -136,7 +136,7 @@
+ int j;
+
+ apr_size_t headerlen = 4;
+- apr_uint16_t pktsize, keylen, vallen;
++ apr_size_t pktsize, keylen, vallen;
+ const char *script_name;
+ const char *path_info;
+ const char *auth;
+@@ -177,6 +177,14 @@
+ for (j = 0; j < env_table->nelts; ++j) {
+ headerlen += 2 + strlen(env[j].key) + 2 + strlen(env[j].val);
+ }
++ pktsize = headerlen - 4;
++ if (pktsize > APR_UINT16_MAX) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10259)
++ "can't send headers to %s:%u: packet size too "
++ "large (%" APR_SIZE_T_FMT ")",
++ conn->hostname, conn->port, pktsize);
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
+
+ ptr = buf = apr_palloc(r->pool, headerlen);
+
+@@ -196,8 +204,6 @@
+ ptr += vallen;
+ }
+
+- pktsize = headerlen - 4;
+-
+ buf[0] = 0;
+ buf[1] = (apr_byte_t) (pktsize & 0xff);
+ buf[2] = (apr_byte_t) ((pktsize >> 8) & 0xff);
diff --git a/debian/patches/CVE-2020-1927.patch b/debian/patches/CVE-2020-1927.patch
new file mode 100644
index 0000000..cbdd84f
--- /dev/null
+++ b/debian/patches/CVE-2020-1927.patch
@@ -0,0 +1,92 @@
+Description: fix for CVE-2020-1927
+Author: covener
+Origin: upstream, https://svn.apache.org/r1873905
+ https://svn.apache.org/r1874191
+Bug: https://security-tracker.debian.org/tracker/CVE-2020-1927
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2020-08-25
+
+--- a/include/ap_regex.h
++++ b/include/ap_regex.h
+@@ -84,7 +84,11 @@
+
+ #define AP_REG_DOLLAR_ENDONLY 0x200 /* '$' matches at end of subject string only */
+
+-#define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */
++#define AP_REG_NO_DEFAULT 0x400 /**< Don't implicitely add AP_REG_DEFAULT options */
++
++#define AP_REG_MATCH "MATCH_" /**< suggested prefix for ap_regname */
++
++#define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY)
+
+ /* Error values: */
+ enum {
+--- a/modules/filters/mod_substitute.c
++++ b/modules/filters/mod_substitute.c
+@@ -667,8 +667,10 @@
+
+ /* first see if we can compile the regex */
+ if (!is_pattern) {
+- r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED |
+- (ignore_case ? AP_REG_ICASE : 0));
++ int flags = AP_REG_NO_DEFAULT
++ | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY)
++ | (ignore_case ? AP_REG_ICASE : 0);
++ r = ap_pregcomp(cmd->pool, from, flags);
+ if (!r)
+ return "Substitute could not compile regex";
+ }
+--- a/server/core.c
++++ b/server/core.c
+@@ -4937,7 +4937,7 @@
+ apr_pool_cleanup_register(pconf, NULL, reset_config_defines,
+ apr_pool_cleanup_null);
+
+- ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY);
++ ap_regcomp_set_default_cflags(AP_REG_DEFAULT);
+
+ mpm_common_pre_config(pconf);
+
+--- a/server/util_pcre.c
++++ b/server/util_pcre.c
+@@ -120,8 +120,7 @@
+ * Compile a regular expression *
+ *************************************************/
+
+-static int default_cflags = AP_REG_DOTALL |
+- AP_REG_DOLLAR_ENDONLY;
++static int default_cflags = AP_REG_DEFAULT;
+
+ AP_DECLARE(int) ap_regcomp_get_default_cflags(void)
+ {
+@@ -169,7 +168,9 @@
+ int errcode = 0;
+ int options = PCRE_DUPNAMES;
+
+- cflags |= default_cflags;
++ if ((cflags & AP_REG_NO_DEFAULT) == 0)
++ cflags |= default_cflags;
++
+ if ((cflags & AP_REG_ICASE) != 0)
+ options |= PCRE_CASELESS;
+ if ((cflags & AP_REG_NEWLINE) != 0)
+--- a/server/util_regex.c
++++ b/server/util_regex.c
+@@ -94,6 +94,7 @@
+ }
+
+ /* anything after the current delimiter is flags */
++ ret->flags = ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY;
+ while (*++endp) {
+ switch (*endp) {
+ case 'i': ret->flags |= AP_REG_ICASE; break;
+@@ -106,7 +107,7 @@
+ default: break; /* we should probably be stricter here */
+ }
+ }
+- if (ap_regcomp(&ret->rx, rxstr, ret->flags) == 0) {
++ if (ap_regcomp(&ret->rx, rxstr, AP_REG_NO_DEFAULT | ret->flags) == 0) {
+ apr_pool_cleanup_register(pool, &ret->rx, rxplus_cleanup,
+ apr_pool_cleanup_null);
+ }
diff --git a/debian/patches/CVE-2020-1934.patch b/debian/patches/CVE-2020-1934.patch
new file mode 100644
index 0000000..295ab45
--- /dev/null
+++ b/debian/patches/CVE-2020-1934.patch
@@ -0,0 +1,75 @@
+Description: fix uninitialized memory when proxying to a malicious FTP server
+Author: covener
+Origin: upstream, https://svn.apache.org/viewvc?view=revision&revision=1873745
+Bug: https://security-tracker.debian.org/tracker/CVE-2020-1934
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2020-08-25
+
+--- a/modules/proxy/mod_proxy_ftp.c
++++ b/modules/proxy/mod_proxy_ftp.c
+@@ -218,7 +218,7 @@
+ * (EBCDIC) machines either.
+ */
+ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
+- char *buff, apr_size_t bufflen, int *eos)
++ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen)
+ {
+ apr_bucket *e;
+ apr_status_t rv;
+@@ -230,6 +230,7 @@
+ /* start with an empty string */
+ buff[0] = 0;
+ *eos = 0;
++ *outlen = 0;
+
+ /* loop through each brigade */
+ while (!found) {
+@@ -273,6 +274,7 @@
+ if (len > 0) {
+ memcpy(pos, response, len);
+ pos += len;
++ *outlen += len;
+ }
+ }
+ apr_bucket_delete(e);
+@@ -385,28 +387,35 @@
+ char buff[5];
+ char *mb = msgbuf, *me = &msgbuf[msglen];
+ apr_status_t rv;
++ apr_size_t nread;
++
+ int eos;
+
+- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
+ return -1;
+ }
+ /*
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233)
+ "<%s", response);
+ */
++ if (nread < 4) {
++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response);
++ *mb = '\0';
++ return -1;
++ }
+ if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) ||
+- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
++ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
+ status = 0;
+ else
+ status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0';
+
+ mb = apr_cpystrn(mb, response + 4, me - mb);
+
+- if (response[3] == '-') {
++ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */
+ memcpy(buff, response, 3);
+ buff[3] = ' ';
+ do {
+- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
+ return -1;
+ }
+ mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb);
diff --git a/debian/patches/CVE-2020-35452.patch b/debian/patches/CVE-2020-35452.patch
new file mode 100644
index 0000000..5204210
--- /dev/null
+++ b/debian/patches/CVE-2020-35452.patch
@@ -0,0 +1,27 @@
+Description: <short summary of the patch>
+Author: Apache authors
+Origin: upstream, https://github.com/apache/httpd/commit/3b6431e
+Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2020-35452
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-06-10
+
+--- a/modules/aaa/mod_auth_digest.c
++++ b/modules/aaa/mod_auth_digest.c
+@@ -1422,9 +1422,14 @@
+ time_rec nonce_time;
+ char tmp, hash[NONCE_HASH_LEN+1];
+
+- if (strlen(resp->nonce) != NONCE_LEN) {
++ /* Since the time part of the nonce is a base64 encoding of an
++ * apr_time_t (8 bytes), it should end with a '=', fail early otherwise.
++ */
++ if (strlen(resp->nonce) != NONCE_LEN
++ || resp->nonce[NONCE_TIME_LEN - 1] != '=') {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01775)
+- "invalid nonce %s received - length is not %d",
++ "invalid nonce '%s' received - length is not %d "
++ "or time encoding is incorrect",
+ resp->nonce, NONCE_LEN);
+ note_digest_auth_failure(r, conf, resp, 1);
+ return HTTP_UNAUTHORIZED;
diff --git a/debian/patches/CVE-2021-26690.patch b/debian/patches/CVE-2021-26690.patch
new file mode 100644
index 0000000..72c7457
--- /dev/null
+++ b/debian/patches/CVE-2021-26690.patch
@@ -0,0 +1,20 @@
+Description: <short summary of the patch>
+Author: Apache authors
+Origin: upstream, https://github.com/apache/httpd/commit/67bd9bfe
+Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2021-26690
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-06-10
+
+--- a/modules/session/mod_session.c
++++ b/modules/session/mod_session.c
+@@ -392,8 +392,8 @@
+ char *plast = NULL;
+ const char *psep = "=";
+ char *key = apr_strtok(pair, psep, &plast);
+- char *val = apr_strtok(NULL, psep, &plast);
+ if (key && *key) {
++ char *val = apr_strtok(NULL, sep, &plast);
+ if (!val || !*val) {
+ apr_table_unset(z->entries, key);
+ }
diff --git a/debian/patches/CVE-2021-26691.patch b/debian/patches/CVE-2021-26691.patch
new file mode 100644
index 0000000..7b96fad
--- /dev/null
+++ b/debian/patches/CVE-2021-26691.patch
@@ -0,0 +1,18 @@
+Description: mod_session: account for the '&' in identity_concat().
+Author: Apache authors
+Origin: upstream, https://github.com/apache/httpd/commit/7e09dd71
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-06-10
+
+--- a/modules/session/mod_session.c
++++ b/modules/session/mod_session.c
+@@ -305,7 +305,7 @@
+ static int identity_count(void *v, const char *key, const char *val)
+ {
+ int *count = v;
+- *count += strlen(key) * 3 + strlen(val) * 3 + 1;
++ *count += strlen(key) * 3 + strlen(val) * 3 + 2;
+ return 1;
+ }
+
diff --git a/debian/patches/CVE-2021-30641.patch b/debian/patches/CVE-2021-30641.patch
new file mode 100644
index 0000000..7486e1b
--- /dev/null
+++ b/debian/patches/CVE-2021-30641.patch
@@ -0,0 +1,50 @@
+Description: legacy default slash-matching behavior w/ 'MergeSlashes OFF'
+Author: Apache authors
+Origin: upstream, https://github.com/apache/httpd/commit/eb986059
+Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2021-30641
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-06-10
+
+--- a/server/request.c
++++ b/server/request.c
+@@ -1419,7 +1419,20 @@
+
+ cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r);
+ cached = (cache->cached != NULL);
+- entry_uri = r->uri;
++
++ /*
++ * When merge_slashes is set to AP_CORE_CONFIG_OFF the slashes in r->uri
++ * have not been merged. But for Location walks we always go with merged
++ * slashes no matter what merge_slashes is set to.
++ */
++ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
++ entry_uri = r->uri;
++ }
++ else {
++ char *uri = apr_pstrdup(r->pool, r->uri);
++ ap_no2slash(uri);
++ entry_uri = uri;
++ }
+
+ /* If we have an cache->cached location that matches r->uri,
+ * and the vhost's list of locations hasn't changed, we can skip
+@@ -1486,7 +1499,7 @@
+ pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t));
+ }
+
+- if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) {
++ if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) {
+ continue;
+ }
+
+@@ -1496,7 +1509,7 @@
+ apr_table_setn(r->subprocess_env,
+ ((const char **)entry_core->refs->elts)[i],
+ apr_pstrndup(r->pool,
+- entry_uri + pmatch[i].rm_so,
++ r->uri + pmatch[i].rm_so,
+ pmatch[i].rm_eo - pmatch[i].rm_so));
+ }
+ }
diff --git a/debian/patches/CVE-2021-31618.patch b/debian/patches/CVE-2021-31618.patch
new file mode 100644
index 0000000..12d59c8
--- /dev/null
+++ b/debian/patches/CVE-2021-31618.patch
@@ -0,0 +1,20 @@
+Description: fix NULL pointer dereference on specially crafted HTTP/2 request
+Author: Upstream
+Origin: upstream, http://svn.apache.org/viewvc/httpd/httpd/branches/2.4.x/modules/http2/h2_stream.c?r1=1889759&r2=1889758&pathrev=1889759
+Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2021-31618
+Bug-Debian: https://bugs.debian.org/989562
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-06-10
+
+--- a/modules/http2/h2_stream.c
++++ b/modules/http2/h2_stream.c
+@@ -638,7 +638,7 @@
+
+ static void set_error_response(h2_stream *stream, int http_status)
+ {
+- if (!h2_stream_is_ready(stream)) {
++ if (!h2_stream_is_ready(stream) && stream->rtmp) {
+ conn_rec *c = stream->session->c;
+ apr_bucket *b;
+ h2_headers *response;
diff --git a/debian/patches/CVE-2021-34798.patch b/debian/patches/CVE-2021-34798.patch
new file mode 100644
index 0000000..bd6261a
--- /dev/null
+++ b/debian/patches/CVE-2021-34798.patch
@@ -0,0 +1,40 @@
+Description: Initialize the request fields on read failure to avoid NULLs
+Origin: upstream, https://github.com/apache/httpd/commit/74c097f0,
+ https://github.com/apache/httpd/commit/6945bb2
+Bug: https://security-tracker.debian.org/tracker/CVE-2021-34798
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-09-21
+
+--- a/server/protocol.c
++++ b/server/protocol.c
+@@ -643,6 +643,8 @@
+ return end - field;
+ }
+
++static const char m_invalid_str[] = "-";
++
+ static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
+ {
+ enum {
+@@ -685,6 +687,11 @@
+ if (rv != APR_SUCCESS) {
+ r->request_time = apr_time_now();
+
++ /* Fall through with an invalid (non NULL) request */
++ r->method = m_invalid_str;
++ r->method_number = M_INVALID;
++ r->uri = r->unparsed_uri = apr_pstrdup(r->pool, "-");
++
+ /* ap_rgetline returns APR_ENOSPC if it fills up the
+ * buffer before finding the end-of-line. This is only going to
+ * happen if it exceeds the configured limit for a request-line.
+@@ -1330,7 +1337,7 @@
+ "request failed: client's request-line exceeds LimitRequestLine (longer than %d)",
+ r->server->limit_req_line);
+ }
+- else if (r->method == NULL) {
++ else if (r->method == m_invalid_str) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00566)
+ "request failed: malformed request line");
+ }
diff --git a/debian/patches/CVE-2021-36160-2.patch b/debian/patches/CVE-2021-36160-2.patch
new file mode 100644
index 0000000..cad5774
--- /dev/null
+++ b/debian/patches/CVE-2021-36160-2.patch
@@ -0,0 +1,32 @@
+Description: mod_proxy_uwsgi: Remove duplicate slashes at the beginning of PATH_INFO.
+ Relaxes the behaviour introduced by the CVE-2021-36160 fix
+Author: Stefan Eissing <icing@apache.org>
+Origin: upstream, https://github.com/apache/httpd/commit/8966e290a
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-12-21
+
+--- a/modules/proxy/mod_proxy_uwsgi.c
++++ b/modules/proxy/mod_proxy_uwsgi.c
+@@ -467,11 +467,20 @@
+
+ /* ADD PATH_INFO (unescaped) */
+ u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/');
+- if (!u_path_info || ap_unescape_url(u_path_info) != OK) {
++ if (!u_path_info) {
++ u_path_info = apr_pstrdup(r->pool, "/");
++ }
++ else if (ap_unescape_url(u_path_info) != OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100)
+ "unable to decode uwsgi uri: %s", url);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
++ else {
++ /* Remove duplicate slashes at the beginning of PATH_INFO */
++ while (u_path_info[1] == '/') {
++ u_path_info++;
++ }
++ }
+ apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info);
+
+
diff --git a/debian/patches/CVE-2021-36160.patch b/debian/patches/CVE-2021-36160.patch
new file mode 100644
index 0000000..fcd8087
--- /dev/null
+++ b/debian/patches/CVE-2021-36160.patch
@@ -0,0 +1,51 @@
+Description: mod_proxy_uwsgi: Fix PATH_INFO setting for generic worker
+Author: Yann Ylavic <ylavic@apache.org>
+Origin: upstream, https://github.com/apache/httpd/commit/b364cad7
+Bug: https://security-tracker.debian.org/tracker/CVE-2021-36160
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-09-21
+
+--- a/modules/proxy/mod_proxy_uwsgi.c
++++ b/modules/proxy/mod_proxy_uwsgi.c
+@@ -452,11 +452,8 @@
+ const char *proxyname, apr_port_t proxyport)
+ {
+ int status;
+- int delta = 0;
+- int decode_status;
+ proxy_conn_rec *backend = NULL;
+ apr_pool_t *p = r->pool;
+- size_t w_len;
+ char server_portstr[32];
+ char *u_path_info;
+ apr_uri_t *uri;
+@@ -468,23 +465,14 @@
+
+ uri = apr_palloc(r->pool, sizeof(*uri));
+
+- /* ADD PATH_INFO */
+-#if AP_MODULE_MAGIC_AT_LEAST(20111130,0)
+- w_len = strlen(worker->s->name);
+-#else
+- w_len = strlen(worker->name);
+-#endif
+- u_path_info = r->filename + 6 + w_len;
+- if (u_path_info[0] != '/') {
+- delta = 1;
+- }
+- decode_status = ap_unescape_url(url + w_len - delta);
+- if (decode_status) {
++ /* ADD PATH_INFO (unescaped) */
++ u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/');
++ if (!u_path_info || ap_unescape_url(u_path_info) != OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100)
+- "unable to decode uri: %s", url + w_len - delta);
++ "unable to decode uwsgi uri: %s", url);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+- apr_table_add(r->subprocess_env, "PATH_INFO", url + w_len - delta);
++ apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info);
+
+
+ /* Create space for state information */
diff --git a/debian/patches/CVE-2021-39275.patch b/debian/patches/CVE-2021-39275.patch
new file mode 100644
index 0000000..d489891
--- /dev/null
+++ b/debian/patches/CVE-2021-39275.patch
@@ -0,0 +1,35 @@
+Description: Backport of
+ From c69d4cc90c0e27703030b3ff09f91bf4dcbcfd51 Mon Sep 17 00:00:00 2001
+ From: Stefan Eissing <icing@apache.org>
+ Date: Tue, 10 Aug 2021 08:55:54 +0000
+ Subject: [PATCH] Merged r1892012 from trunk:
+ .
+ and
+ From ac62c7e7436560cf4f7725ee586364ce95c07804 Mon Sep 17 00:00:00 2001
+ From: Graham Leggett <minfrin@apache.org>
+ Date: Sat, 21 Aug 2021 21:35:04 +0000
+ Subject: [PATCH] Backport:
+Author: Moritz Muehlenhoff <jmm@inutil.org>
+Origin: upstream
+Forwarded: not-needed
+Last-Update: 2021-09-30
+
+--- a/server/util.c
++++ b/server/util.c
+@@ -2460,13 +2460,12 @@
+ * in front of every " that doesn't already have one.
+ */
+ while (*inchr != '\0') {
+- if ((*inchr == '\\') && (inchr[1] != '\0')) {
+- *outchr++ = *inchr++;
+- *outchr++ = *inchr++;
+- }
+ if (*inchr == '"') {
+ *outchr++ = '\\';
+ }
++ if ((*inchr == '\\') && (inchr[1] != '\0')) {
++ *outchr++ = *inchr++;
++ }
+ if (*inchr != '\0') {
+ *outchr++ = *inchr++;
+ }
diff --git a/debian/patches/CVE-2021-40438.patch b/debian/patches/CVE-2021-40438.patch
new file mode 100644
index 0000000..8cf60a7
--- /dev/null
+++ b/debian/patches/CVE-2021-40438.patch
@@ -0,0 +1,124 @@
+Description: Backport of the following patches:
+Origin: upstream,
+ https://github.com/apache/httpd/commit/496c863776c68bd08cdbeb7d8fa5935ba63b76c2
+ https://github.com/apache/httpd/commit/d4901cb32133bc0e59ad193a29d1665597080d67
+ https://github.com/apache/httpd/commit/81a8b0133b46c4cf7dfc4b5476ad46eb34aa0a5c
+ https://github.com/apache/httpd/commit/6e768a811c59ca6a0769b72681aaef381823339f
+Forwarded: not-needed
+Reviewed-By: Moritz Muehlenhoff <jmm@inutil.org>
+Last-Update: 2021-09-30
+
+--- a/modules/mappers/mod_rewrite.c
++++ b/modules/mappers/mod_rewrite.c
+@@ -620,6 +620,13 @@
+ return 6;
+ }
+ break;
++
++ case 'u':
++ case 'U':
++ if (!ap_cstr_casecmpn(uri, "nix:", 4)) { /* unix: */
++ *sqs = 1;
++ return (uri[4] == '/' && uri[5] == '/') ? 7 : 5;
++ }
+ }
+
+ return 0;
+--- a/modules/proxy/mod_proxy.c
++++ b/modules/proxy/mod_proxy.c
+@@ -1690,7 +1690,7 @@
+ * the UDS path... ignore it
+ */
+ if (!strncasecmp(url, "unix:", 5) &&
+- ((ptr = ap_strchr_c(url, '|')) != NULL)) {
++ ((ptr = ap_strchr_c(url + 5, '|')) != NULL)) {
+ /* move past the 'unix:...|' UDS path info */
+ const char *ret, *c;
+
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -2077,33 +2077,43 @@
+ * were passed a UDS url (eg: from mod_proxy) and adjust uds_path
+ * as required.
+ */
+-static void fix_uds_filename(request_rec *r, char **url)
++static int fix_uds_filename(request_rec *r, char **url)
+ {
+- char *ptr, *ptr2;
+- if (!r || !r->filename) return;
++ char *uds_url = r->filename + 6, *origin_url;
+
+ if (!strncmp(r->filename, "proxy:", 6) &&
+- (ptr2 = ap_strcasestr(r->filename, "unix:")) &&
+- (ptr = ap_strchr(ptr2, '|'))) {
++ !ap_cstr_casecmpn(uds_url, "unix:", 5) &&
++ (origin_url = ap_strchr(uds_url + 5, '|'))) {
++ char *uds_path = NULL;
++ apr_size_t url_len;
+ apr_uri_t urisock;
+ apr_status_t rv;
+- *ptr = '\0';
+- rv = apr_uri_parse(r->pool, ptr2, &urisock);
+- if (rv == APR_SUCCESS) {
+- char *rurl = ptr+1;
+- char *sockpath = ap_runtime_dir_relative(r->pool, urisock.path);
+- apr_table_setn(r->notes, "uds_path", sockpath);
+- *url = apr_pstrdup(r->pool, rurl); /* so we get the scheme for the uds */
+- /* r->filename starts w/ "proxy:", so add after that */
+- memmove(r->filename+6, rurl, strlen(rurl)+1);
+- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+- "*: rewrite of url due to UDS(%s): %s (%s)",
+- sockpath, *url, r->filename);
++
++ *origin_url = '\0';
++ rv = apr_uri_parse(r->pool, uds_url, &urisock);
++ *origin_url++ = '|';
++
++ if (rv == APR_SUCCESS && urisock.path && (!urisock.hostname
++ || !urisock.hostname[0])) {
++ uds_path = ap_runtime_dir_relative(r->pool, urisock.path);
+ }
+- else {
+- *ptr = '|';
++ if (!uds_path) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10292)
++ "Invalid proxy UDS filename (%s)", r->filename);
++ return 0;
+ }
++ apr_table_setn(r->notes, "uds_path", uds_path);
++
++ /* Remove the UDS path from *url and r->filename */
++ url_len = strlen(origin_url);
++ *url = apr_pstrmemdup(r->pool, origin_url, url_len);
++ memcpy(uds_url, *url, url_len + 1);
++
++ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
++ "*: rewrite of url due to UDS(%s): %s (%s)",
++ uds_path, *url, r->filename);
+ }
++ return 1;
+ }
+
+ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
+@@ -2121,7 +2131,9 @@
+ "%s: found worker %s for %s",
+ (*worker)->s->scheme, (*worker)->s->name, *url);
+ *balancer = NULL;
+- fix_uds_filename(r, url);
++ if (!fix_uds_filename(r, url)) {
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
+ access_status = OK;
+ }
+ else if (r->proxyreq == PROXYREQ_PROXY) {
+@@ -2152,7 +2164,9 @@
+ * regarding the Connection header in the request.
+ */
+ apr_table_setn(r->subprocess_env, "proxy-nokeepalive", "1");
+- fix_uds_filename(r, url);
++ if (!fix_uds_filename(r, url)) {
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
+ }
+ }
+ }
diff --git a/debian/patches/CVE-2021-44224-1.patch b/debian/patches/CVE-2021-44224-1.patch
new file mode 100644
index 0000000..0f540c8
--- /dev/null
+++ b/debian/patches/CVE-2021-44224-1.patch
@@ -0,0 +1,206 @@
+Description: CVE-2021-44224
+Author: Yann Ylavic <ylavic@apache.org>
+Origin: upstream, https://github.com/apache/httpd/commit/a962ba73
+Bug: https://security-tracker.debian.org/tracker/CVE-2021-44224
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-12-21
+
+--- a/include/http_protocol.h
++++ b/include/http_protocol.h
+@@ -75,6 +75,13 @@
+ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r,
+ apr_bucket_brigade *bb);
+
++/**
++ * Run post_read_request hook and validate.
++ * @param r The current request
++ * @return OK or HTTP_...
++ */
++AP_DECLARE(int) ap_post_read_request(request_rec *r);
++
+ /* Finish up stuff after a request */
+
+ /**
+--- a/modules/http/http_request.c
++++ b/modules/http/http_request.c
+@@ -681,7 +681,7 @@
+ * to do their thing on internal redirects as well. Perhaps this is a
+ * misnamed function.
+ */
+- if ((access_status = ap_run_post_read_request(new))) {
++ if ((access_status = ap_post_read_request(new))) {
+ ap_die(access_status, new);
+ return NULL;
+ }
+--- a/modules/http2/h2_request.c
++++ b/modules/http2/h2_request.c
+@@ -337,7 +337,7 @@
+ NULL, r, r->connection);
+
+ if (access_status != HTTP_OK
+- || (access_status = ap_run_post_read_request(r))) {
++ || (access_status = ap_post_read_request(r))) {
+ /* Request check post hooks failed. An example of this would be a
+ * request for a vhost where h2 is disabled --> 421.
+ */
+--- a/modules/proxy/mod_proxy.c
++++ b/modules/proxy/mod_proxy.c
+@@ -576,13 +576,13 @@
+
+ /* Ick... msvc (perhaps others) promotes ternary short results to int */
+
+- if (conf->req && r->parsed_uri.scheme) {
++ if (conf->req && r->parsed_uri.scheme && r->parsed_uri.hostname) {
+ /* but it might be something vhosted */
+- if (!(r->parsed_uri.hostname
+- && !strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r))
+- && ap_matches_request_vhost(r, r->parsed_uri.hostname,
+- (apr_port_t)(r->parsed_uri.port_str ? r->parsed_uri.port
+- : ap_default_port(r))))) {
++ if (strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
++ || !ap_matches_request_vhost(r, r->parsed_uri.hostname,
++ (apr_port_t)(r->parsed_uri.port_str
++ ? r->parsed_uri.port
++ : ap_default_port(r)))) {
+ r->proxyreq = PROXYREQ_PROXY;
+ r->uri = r->unparsed_uri;
+ r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL);
+@@ -1722,6 +1722,7 @@
+ struct proxy_alias *new;
+ char *f = cmd->path;
+ char *r = NULL;
++ const char *real;
+ char *word;
+ apr_table_t *params = apr_table_make(cmd->pool, 5);
+ const apr_array_header_t *arr;
+@@ -1787,6 +1788,10 @@
+ if (r == NULL) {
+ return "ProxyPass|ProxyPassMatch needs a path when not defined in a location";
+ }
++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, r))) {
++ return "ProxyPass|ProxyPassMatch uses an invalid \"unix:\" URL";
++ }
++
+
+ /* if per directory, save away the single alias */
+ if (cmd->path) {
+@@ -1803,7 +1808,7 @@
+ }
+
+ new->fake = apr_pstrdup(cmd->pool, f);
+- new->real = apr_pstrdup(cmd->pool, ap_proxy_de_socketfy(cmd->pool, r));
++ new->real = apr_pstrdup(cmd->pool, real);
+ new->flags = flags;
+ if (use_regex) {
+ new->regex = ap_pregcomp(cmd->pool, f, AP_REG_EXTENDED);
+@@ -2280,6 +2285,7 @@
+ proxy_worker *worker;
+ char *path = cmd->path;
+ char *name = NULL;
++ const char *real;
+ char *word;
+ apr_table_t *params = apr_table_make(cmd->pool, 5);
+ const apr_array_header_t *arr;
+@@ -2320,6 +2326,9 @@
+ return "BalancerMember must define balancer name when outside <Proxy > section";
+ if (!name)
+ return "BalancerMember must define remote proxy server";
++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) {
++ return "BalancerMember uses an invalid \"unix:\" URL";
++ }
+
+ ap_str_tolower(path); /* lowercase scheme://hostname */
+
+@@ -2332,7 +2341,7 @@
+ }
+
+ /* Try to find existing worker */
+- worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, ap_proxy_de_socketfy(cmd->temp_pool, name));
++ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, real);
+ if (!worker) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01147)
+ "Defining worker '%s' for balancer '%s'",
+@@ -2421,7 +2430,14 @@
+ }
+ }
+ else {
+- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->temp_pool, name));
++ const char *real;
++
++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) {
++ return "ProxySet uses an invalid \"unix:\" URL";
++ }
++
++ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf,
++ real);
+ if (!worker) {
+ if (in_proxy_section) {
+ err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
+@@ -2563,8 +2579,14 @@
+ }
+ }
+ else {
++ const char *real;
++
++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, conf->p))) {
++ return "<Proxy/ProxyMatch > uses an invalid \"unix:\" URL";
++ }
++
+ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf,
+- ap_proxy_de_socketfy(cmd->temp_pool, (char*)conf->p));
++ real);
+ if (!worker) {
+ err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
+ sconf, conf->p, 0);
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -1662,6 +1662,9 @@
+ }
+
+ url = ap_proxy_de_socketfy(p, url);
++ if (!url) {
++ return NULL;
++ }
+
+ c = ap_strchr_c(url, ':');
+ if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
+--- a/server/protocol.c
++++ b/server/protocol.c
+@@ -1465,7 +1465,7 @@
+ NULL, r, r->connection);
+
+ if (access_status != HTTP_OK
+- || (access_status = ap_run_post_read_request(r))) {
++ || (access_status = ap_post_read_request(r))) {
+ ap_die(access_status, r);
+ ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
+ ap_run_log_transaction(r);
+@@ -1503,6 +1503,27 @@
+ return r;
+ }
+
++AP_DECLARE(int) ap_post_read_request(request_rec *r)
++{
++ int status;
++
++ if ((status = ap_run_post_read_request(r))) {
++ return status;
++ }
++
++ /* Enforce http(s) only scheme for non-forward-proxy requests */
++ if (!r->proxyreq
++ && r->parsed_uri.scheme
++ && (ap_cstr_casecmpn(r->parsed_uri.scheme, "http", 4) != 0
++ || (r->parsed_uri.scheme[4] != '\0'
++ && (apr_tolower(r->parsed_uri.scheme[4]) != 's'
++ || r->parsed_uri.scheme[5] != '\0')))) {
++ return HTTP_BAD_REQUEST;
++ }
++
++ return OK;
++}
++
+ /* if a request with a body creates a subrequest, remove original request's
+ * input headers which pertain to the body which has already been read.
+ * out-of-line helper function for ap_set_sub_req_protocol.
diff --git a/debian/patches/CVE-2021-44224-2.patch b/debian/patches/CVE-2021-44224-2.patch
new file mode 100644
index 0000000..6b841dd
--- /dev/null
+++ b/debian/patches/CVE-2021-44224-2.patch
@@ -0,0 +1,93 @@
+Description: mod_proxy: Don't prevent forwarding URIs w/ no hostname.
+ (fix for r1895955 already in 2.4.x)
+ .
+ Part not applied:
+ #--- a/modules/proxy/mod_proxy.h
+ #+++ b/modules/proxy/mod_proxy.h
+ #@@ -323,6 +323,8 @@
+ # #define PROXY_WORKER_HC_FAIL_FLAG 'C'
+ # #define PROXY_WORKER_HOT_SPARE_FLAG 'R'
+ #
+ #+#define AP_PROXY_WORKER_NO_UDS (1u << 3)
+ #+
+ # #define PROXY_WORKER_NOT_USABLE_BITMAP ( PROXY_WORKER_IN_SHUTDOWN | \
+ # PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR | \
+ # PROXY_WORKER_HC_FAIL )
+ #--- a/modules/proxy/proxy_util.c
+ #+++ b/modules/proxy/proxy_util.c
+ #@@ -1661,9 +1661,11 @@
+ # return NULL;
+ # }
+ #
+ #- url = ap_proxy_de_socketfy(p, url);
+ #- if (!url) {
+ #- return NULL;
+ #+ if (!(mask & AP_PROXY_WORKER_NO_UDS)) {
+ #+ url = ap_proxy_de_socketfy(p, url);
+ #+ if (!url) {
+ #+ return NULL;
+ #+ }
+ # }
+ #
+ # c = ap_strchr_c(url, ':');
+Author: Stefan Eissing <icing@apache.org>
+Origin: upstream, https://github.com/apache/httpd/commit/a0521d289
+Bug: https://security-tracker.debian.org/tracker/CVE-2021-44224
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-12-21
+
+--- a/modules/proxy/mod_proxy.c
++++ b/modules/proxy/mod_proxy.c
+@@ -576,9 +576,10 @@
+
+ /* Ick... msvc (perhaps others) promotes ternary short results to int */
+
+- if (conf->req && r->parsed_uri.scheme && r->parsed_uri.hostname) {
++ if (conf->req && r->parsed_uri.scheme) {
+ /* but it might be something vhosted */
+- if (strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
++ if (!r->parsed_uri.hostname
++ || strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
+ || !ap_matches_request_vhost(r, r->parsed_uri.hostname,
+ (apr_port_t)(r->parsed_uri.port_str
+ ? r->parsed_uri.port
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -2128,22 +2128,21 @@
+
+ access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
+ if (access_status == DECLINED && *balancer == NULL) {
++ const int forward = (r->proxyreq == PROXYREQ_PROXY);
+ *worker = ap_proxy_get_worker(r->pool, NULL, conf, *url);
+ if (*worker) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "%s: found worker %s for %s",
+ (*worker)->s->scheme, (*worker)->s->name, *url);
+- *balancer = NULL;
+- if (!fix_uds_filename(r, url)) {
++ if (!forward && !fix_uds_filename(r, url)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ access_status = OK;
+ }
+- else if (r->proxyreq == PROXYREQ_PROXY) {
++ else if (forward) {
+ if (conf->forward) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "*: found forward proxy worker for %s", *url);
+- *balancer = NULL;
+ *worker = conf->forward;
+ access_status = OK;
+ /*
+@@ -2157,8 +2156,8 @@
+ else if (r->proxyreq == PROXYREQ_REVERSE) {
+ if (conf->reverse) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+- "*: using default reverse proxy worker for %s (no keepalive)", *url);
+- *balancer = NULL;
++ "*: using default reverse proxy worker for %s "
++ "(no keepalive)", *url);
+ *worker = conf->reverse;
+ access_status = OK;
+ /*
diff --git a/debian/patches/CVE-2021-44790.patch b/debian/patches/CVE-2021-44790.patch
new file mode 100644
index 0000000..dbba745
--- /dev/null
+++ b/debian/patches/CVE-2021-44790.patch
@@ -0,0 +1,18 @@
+Description: Improve error handling
+Author: Stefan Eissing <icing@apache.org>
+Origin: upstream, https://github.com/apache/httpd/commit/07b9768c
+Bug: https://security-tracker.debian.org/tracker/CVE-2021-44790
+Forwarded: not-needed
+Reviewed-By: Yadd <yadd@debian.org>
+Last-Update: 2021-12-21
+
+--- a/modules/lua/lua_request.c
++++ b/modules/lua/lua_request.c
+@@ -376,6 +376,7 @@
+ if (end == NULL) break;
+ key = (char *) apr_pcalloc(r->pool, 256);
+ filename = (char *) apr_pcalloc(r->pool, 256);
++ if (end - crlf <= 8) break;
+ vlen = end - crlf - 8;
+ buffer = (char *) apr_pcalloc(r->pool, vlen+1);
+ memcpy(buffer, crlf + 4, vlen);
diff --git a/debian/patches/CVE-2022-22719.patch b/debian/patches/CVE-2022-22719.patch
new file mode 100644
index 0000000..c52ceef
--- /dev/null
+++ b/debian/patches/CVE-2022-22719.patch
@@ -0,0 +1,95 @@
+From 1b96582269d9ec7c82ee0fea1f67934e4b8176ad Mon Sep 17 00:00:00 2001
+From: Yann Ylavic <ylavic@apache.org>
+Date: Mon, 7 Mar 2022 14:51:19 +0000
+Subject: [PATCH] mod_lua: Error out if lua_read_body() or lua_write_body()
+ fail.
+
+Otherwise r:requestbody() or r:parsebody() failures might go unnoticed for
+the user.
+
+
+Merge r1898689 from trunk.
+Submitted by: rpluem
+Reviewed by: rpluem, covener, ylavic
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898694 13f79535-47bb-0310-9956-ffa450edef68
+---
+ modules/lua/lua_request.c | 33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c
+index 493b2bb431..1eab7b6a47 100644
+--- a/modules/lua/lua_request.c
++++ b/modules/lua/lua_request.c
+@@ -235,14 +235,16 @@ static int lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size,
+ {
+ int rc = OK;
+
++ *rbuf = NULL;
++ *size = 0;
++
+ if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR))) {
+ return (rc);
+ }
+ if (ap_should_client_block(r)) {
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+- char argsbuffer[HUGE_STRING_LEN];
+- apr_off_t rsize, len_read, rpos = 0;
++ apr_off_t len_read, rpos = 0;
+ apr_off_t length = r->remaining;
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+@@ -250,18 +252,18 @@ static int lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size,
+ return APR_EINCOMPLETE; /* Only room for incomplete data chunk :( */
+ }
+ *rbuf = (const char *) apr_pcalloc(r->pool, (apr_size_t) (length + 1));
+- *size = length;
+- while ((len_read = ap_get_client_block(r, argsbuffer, sizeof(argsbuffer))) > 0) {
+- if ((rpos + len_read) > length) {
+- rsize = length - rpos;
+- }
+- else {
+- rsize = len_read;
+- }
+-
+- memcpy((char *) *rbuf + rpos, argsbuffer, (size_t) rsize);
+- rpos += rsize;
++ while ((rpos < length)
++ && (len_read = ap_get_client_block(r, (char *) *rbuf + rpos,
++ length - rpos)) > 0) {
++ rpos += len_read;
++ }
++ if (len_read < 0) {
++ return APR_EINCOMPLETE;
+ }
++ *size = rpos;
++ }
++ else {
++ rc = DONE;
+ }
+
+ return (rc);
+@@ -278,6 +280,8 @@ static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t *
+ {
+ apr_status_t rc = OK;
+
++ *size = 0;
++
+ if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)))
+ return rc;
+ if (ap_should_client_block(r)) {
+@@ -303,6 +307,9 @@ static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t *
+ rpos += rsize;
+ }
+ }
++ else {
++ rc = DONE;
++ }
+
+ return rc;
+ }
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-22720.patch b/debian/patches/CVE-2022-22720.patch
new file mode 100644
index 0000000..a296824
--- /dev/null
+++ b/debian/patches/CVE-2022-22720.patch
@@ -0,0 +1,190 @@
+From 19aa2d83b379719420f3a178413325156d7a62f3 Mon Sep 17 00:00:00 2001
+From: Yann Ylavic <ylavic@apache.org>
+Date: Mon, 7 Mar 2022 14:46:08 +0000
+Subject: [PATCH] core: Simpler connection close logic if discarding the
+ request body fails.
+
+If ap_discard_request_body() sets AP_CONN_CLOSE by itself it simplifies and
+allows to consolidate end_output_stream() and error_output_stream().
+
+
+Merge r1898683 from trunk.
+Submitted by: ylavic, rpluem
+Reviewed by: ylavic, rpluem, covener
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898692 13f79535-47bb-0310-9956-ffa450edef68
+---
+ changes-entries/discard_body.diff | 2 +
+ modules/http/http_filters.c | 69 ++++++++++++++++---------------
+ server/protocol.c | 14 +++++--
+ 3 files changed, 48 insertions(+), 37 deletions(-)
+ create mode 100644 changes-entries/discard_body.diff
+
+diff --git a/changes-entries/discard_body.diff b/changes-entries/discard_body.diff
+new file mode 100644
+index 0000000000..6b467ac5ee
+--- /dev/null
++++ b/changes-entries/discard_body.diff
+@@ -0,0 +1,2 @@
++ *) core: Simpler connection close logic if discarding the request body fails.
++ [Yann Ylavic, Ruediger Pluem]
+\ No newline at end of file
+diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
+index d9b3621215..43e8c6dd5d 100644
+--- a/modules/http/http_filters.c
++++ b/modules/http/http_filters.c
+@@ -1598,9 +1598,9 @@ AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status)
+ */
+ AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+ {
++ int rc = OK;
++ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+- int seen_eos;
+- apr_status_t rv;
+
+ /* Sometimes we'll get in a state where the input handling has
+ * detected an error where we want to drop the connection, so if
+@@ -1609,54 +1609,57 @@ AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+ *
+ * This function is also a no-op on a subrequest.
+ */
+- if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
+- ap_status_drops_connection(r->status)) {
++ if (r->main || c->keepalive == AP_CONN_CLOSE) {
++ return OK;
++ }
++ if (ap_status_drops_connection(r->status)) {
++ c->keepalive = AP_CONN_CLOSE;
+ return OK;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+- seen_eos = 0;
+- do {
+- apr_bucket *bucket;
++ for (;;) {
++ apr_status_t rv;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+-
+ if (rv != APR_SUCCESS) {
+- apr_brigade_destroy(bb);
+- return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
++ rc = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
++ goto cleanup;
+ }
+
+- for (bucket = APR_BRIGADE_FIRST(bb);
+- bucket != APR_BRIGADE_SENTINEL(bb);
+- bucket = APR_BUCKET_NEXT(bucket))
+- {
+- const char *data;
+- apr_size_t len;
++ while (!APR_BRIGADE_EMPTY(bb)) {
++ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+
+- if (APR_BUCKET_IS_EOS(bucket)) {
+- seen_eos = 1;
+- break;
+- }
+-
+- /* These are metadata buckets. */
+- if (bucket->length == 0) {
+- continue;
++ if (APR_BUCKET_IS_EOS(b)) {
++ goto cleanup;
+ }
+
+- /* We MUST read because in case we have an unknown-length
+- * bucket or one that morphs, we want to exhaust it.
++ /* There is no need to read empty or metadata buckets or
++ * buckets of known length, but we MUST read buckets of
++ * unknown length in order to exhaust them.
+ */
+- rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+- if (rv != APR_SUCCESS) {
+- apr_brigade_destroy(bb);
+- return HTTP_BAD_REQUEST;
++ if (b->length == (apr_size_t)-1) {
++ apr_size_t len;
++ const char *data;
++
++ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
++ if (rv != APR_SUCCESS) {
++ rc = HTTP_BAD_REQUEST;
++ goto cleanup;
++ }
+ }
++
++ apr_bucket_delete(b);
+ }
+- apr_brigade_cleanup(bb);
+- } while (!seen_eos);
++ }
+
+- return OK;
++cleanup:
++ apr_brigade_cleanup(bb);
++ if (rc != OK) {
++ c->keepalive = AP_CONN_CLOSE;
++ }
++ return rc;
+ }
+
+ /* Here we deal with getting the request message body from the client.
+diff --git a/server/protocol.c b/server/protocol.c
+index 2214f72b5a..298f61e1fb 100644
+--- a/server/protocol.c
++++ b/server/protocol.c
+@@ -1687,23 +1687,29 @@ AP_DECLARE(void) ap_set_sub_req_protocol(request_rec *rnew,
+ rnew->main = (request_rec *) r;
+ }
+
+-static void end_output_stream(request_rec *r)
++static void end_output_stream(request_rec *r, int status)
+ {
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
++ if (status != OK) {
++ b = ap_bucket_error_create(status, NULL, r->pool, c->bucket_alloc);
++ APR_BRIGADE_INSERT_TAIL(bb, b);
++ }
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
++
+ ap_pass_brigade(r->output_filters, bb);
++ apr_brigade_cleanup(bb);
+ }
+
+ AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub)
+ {
+ /* tell the filter chain there is no more content coming */
+ if (!sub->eos_sent) {
+- end_output_stream(sub);
++ end_output_stream(sub, OK);
+ }
+ }
+
+@@ -1714,11 +1720,11 @@ AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub)
+ */
+ AP_DECLARE(void) ap_finalize_request_protocol(request_rec *r)
+ {
+- (void) ap_discard_request_body(r);
++ int status = ap_discard_request_body(r);
+
+ /* tell the filter chain there is no more content coming */
+ if (!r->eos_sent) {
+- end_output_stream(r);
++ end_output_stream(r, status);
+ }
+ }
+
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-22721.patch b/debian/patches/CVE-2022-22721.patch
new file mode 100644
index 0000000..2f607aa
--- /dev/null
+++ b/debian/patches/CVE-2022-22721.patch
@@ -0,0 +1,116 @@
+From 5a72f0fe6f2f8ce35c45242e99a421dc19251ab5 Mon Sep 17 00:00:00 2001
+From: Yann Ylavic <ylavic@apache.org>
+Date: Mon, 7 Mar 2022 14:48:54 +0000
+Subject: [PATCH] core: Make sure and check that LimitXMLRequestBody fits in
+ system memory.
+
+LimitXMLRequestBody can not exceed the size needed to ap_escape_html2() the
+body without failing to allocate memory, so enforce this at load time based
+on APR_SIZE_MAX, and make sure that ap_escape_html2() is within the bounds.
+
+Document the limits for LimitXMLRequestBody in our docs.
+
+
+Merge r1898686 from trunk.
+Submitted by: ylavic, rpluem
+Reviewed by: ylavic, covener, rpluem
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898693 13f79535-47bb-0310-9956-ffa450edef68
+---
+ changes-entries/AP_MAX_LIMIT_XML_BODY.diff | 2 ++
+ docs/manual/mod/core.xml | 12 +++++++++---
+ server/core.c | 9 +++++++++
+ server/util.c | 8 ++++++--
+ server/util_xml.c | 2 +-
+ 5 files changed, 27 insertions(+), 6 deletions(-)
+ create mode 100644 changes-entries/AP_MAX_LIMIT_XML_BODY.diff
+
+diff --git a/changes-entries/AP_MAX_LIMIT_XML_BODY.diff b/changes-entries/AP_MAX_LIMIT_XML_BODY.diff
+new file mode 100644
+index 0000000000..07fef3c624
+--- /dev/null
++++ b/changes-entries/AP_MAX_LIMIT_XML_BODY.diff
+@@ -0,0 +1,2 @@
++ *) core: Make sure and check that LimitXMLRequestBody fits in system memory.
++ [Ruediger Pluem, Yann Ylavic]
+\ No newline at end of file
+diff --git a/server/core.c b/server/core.c
+index 798212b480..090e397642 100644
+--- a/server/core.c
++++ b/server/core.c
+@@ -72,6 +72,8 @@
+ /* LimitXMLRequestBody handling */
+ #define AP_LIMIT_UNSET ((long) -1)
+ #define AP_DEFAULT_LIMIT_XML_BODY ((apr_size_t)1000000)
++/* Hard limit for ap_escape_html2() */
++#define AP_MAX_LIMIT_XML_BODY ((apr_size_t)(APR_SIZE_MAX / 6 - 1))
+
+ #define AP_MIN_SENDFILE_BYTES (256)
+
+@@ -3761,6 +3763,11 @@ static const char *set_limit_xml_req_body(cmd_parms *cmd, void *conf_,
+ if (conf->limit_xml_body < 0)
+ return "LimitXMLRequestBody requires a non-negative integer.";
+
++ /* zero is AP_MAX_LIMIT_XML_BODY (implicitly) */
++ if ((apr_size_t)conf->limit_xml_body > AP_MAX_LIMIT_XML_BODY)
++ return apr_psprintf(cmd->pool, "LimitXMLRequestBody must not exceed "
++ "%" APR_SIZE_T_FMT, AP_MAX_LIMIT_XML_BODY);
++
+ return NULL;
+ }
+
+@@ -3849,6 +3856,8 @@ AP_DECLARE(apr_size_t) ap_get_limit_xml_body(const request_rec *r)
+ conf = ap_get_core_module_config(r->per_dir_config);
+ if (conf->limit_xml_body == AP_LIMIT_UNSET)
+ return AP_DEFAULT_LIMIT_XML_BODY;
++ if (conf->limit_xml_body == 0)
++ return AP_MAX_LIMIT_XML_BODY;
+
+ return (apr_size_t)conf->limit_xml_body;
+ }
+diff --git a/server/util.c b/server/util.c
+index 6cfe0035c4..604be1a1ce 100644
+--- a/server/util.c
++++ b/server/util.c
+@@ -2142,11 +2142,14 @@ AP_DECLARE(char *) ap_escape_urlencoded(apr_pool_t *p, const char *buffer)
+
+ AP_DECLARE(char *) ap_escape_html2(apr_pool_t *p, const char *s, int toasc)
+ {
+- int i, j;
++ apr_size_t i, j;
+ char *x;
+
+ /* first, count the number of extra characters */
+- for (i = 0, j = 0; s[i] != '\0'; i++)
++ for (i = 0, j = 0; s[i] != '\0'; i++) {
++ if (i + j > APR_SIZE_MAX - 6) {
++ abort();
++ }
+ if (s[i] == '<' || s[i] == '>')
+ j += 3;
+ else if (s[i] == '&')
+@@ -2155,6 +2158,7 @@ AP_DECLARE(char *) ap_escape_html2(apr_pool_t *p, const char *s, int toasc)
+ j += 5;
+ else if (toasc && !apr_isascii(s[i]))
+ j += 5;
++ }
+
+ if (j == 0)
+ return apr_pstrmemdup(p, s, i);
+diff --git a/server/util_xml.c b/server/util_xml.c
+index 4845194656..22806fa8a4 100644
+--- a/server/util_xml.c
++++ b/server/util_xml.c
+@@ -85,7 +85,7 @@ AP_DECLARE(int) ap_xml_parse_input(request_rec * r, apr_xml_doc **pdoc)
+ }
+
+ total_read += len;
+- if (limit_xml_body && total_read > limit_xml_body) {
++ if (total_read > limit_xml_body) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00539)
+ "XML request body is larger than the configured "
+ "limit of %lu", (unsigned long)limit_xml_body);
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-23943-1.patch b/debian/patches/CVE-2022-23943-1.patch
new file mode 100644
index 0000000..d82fd1d
--- /dev/null
+++ b/debian/patches/CVE-2022-23943-1.patch
@@ -0,0 +1,360 @@
+From 943f57b336f264d77e5b780c82ab73daf3d14deb Mon Sep 17 00:00:00 2001
+From: Yann Ylavic <ylavic@apache.org>
+Date: Mon, 7 Mar 2022 14:52:42 +0000
+Subject: [PATCH] mod_sed: use size_t to allow for larger buffer sizes and
+ unsigned arithmetics.
+
+Let's switch to apr_size_t buffers and get rid of the ints.
+
+
+Merge r1898690 from trunk.
+Submitted by: rpluem
+Reviewed by: rpluem, covener, ylavic
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898695 13f79535-47bb-0310-9956-ffa450edef68
+---
+ modules/filters/libsed.h | 12 +++---
+ modules/filters/mod_sed.c | 10 ++---
+ modules/filters/sed1.c | 79 +++++++++++++++++++++++----------------
+ 3 files changed, 58 insertions(+), 43 deletions(-)
+
+diff --git a/modules/filters/libsed.h b/modules/filters/libsed.h
+index 76cbc0ce8a..0256b1ea83 100644
+--- a/modules/filters/libsed.h
++++ b/modules/filters/libsed.h
+@@ -60,7 +60,7 @@ struct sed_label_s {
+ };
+
+ typedef apr_status_t (sed_err_fn_t)(void *data, const char *error);
+-typedef apr_status_t (sed_write_fn_t)(void *ctx, char *buf, int sz);
++typedef apr_status_t (sed_write_fn_t)(void *ctx, char *buf, apr_size_t sz);
+
+ typedef struct sed_commands_s sed_commands_t;
+ #define NWFILES 11 /* 10 plus one for standard output */
+@@ -69,7 +69,7 @@ struct sed_commands_s {
+ sed_err_fn_t *errfn;
+ void *data;
+
+- unsigned lsize;
++ apr_size_t lsize;
+ char *linebuf;
+ char *lbend;
+ const char *saveq;
+@@ -116,15 +116,15 @@ struct sed_eval_s {
+ apr_int64_t lnum;
+ void *fout;
+
+- unsigned lsize;
++ apr_size_t lsize;
+ char *linebuf;
+ char *lspend;
+
+- unsigned hsize;
++ apr_size_t hsize;
+ char *holdbuf;
+ char *hspend;
+
+- unsigned gsize;
++ apr_size_t gsize;
+ char *genbuf;
+ char *lcomend;
+
+@@ -160,7 +160,7 @@ apr_status_t sed_init_eval(sed_eval_t *eval, sed_commands_t *commands,
+ sed_err_fn_t *errfn, void *data,
+ sed_write_fn_t *writefn, apr_pool_t *p);
+ apr_status_t sed_reset_eval(sed_eval_t *eval, sed_commands_t *commands, sed_err_fn_t *errfn, void *data);
+-apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void *fout);
++apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz, void *fout);
+ apr_status_t sed_eval_file(sed_eval_t *eval, apr_file_t *fin, void *fout);
+ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *f);
+ void sed_destroy_eval(sed_eval_t *eval);
+diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
+index 9b408029a8..7092dd5e7f 100644
+--- a/modules/filters/mod_sed.c
++++ b/modules/filters/mod_sed.c
+@@ -51,7 +51,7 @@ typedef struct sed_filter_ctxt
+ apr_bucket_brigade *bbinp;
+ char *outbuf;
+ char *curoutbuf;
+- int bufsize;
++ apr_size_t bufsize;
+ apr_pool_t *tpool;
+ int numbuckets;
+ } sed_filter_ctxt;
+@@ -100,7 +100,7 @@ static void alloc_outbuf(sed_filter_ctxt* ctx)
+ /* append_bucket
+ * Allocate a new bucket from buf and sz and append to ctx->bb
+ */
+-static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, int sz)
++static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, apr_size_t sz)
+ {
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b;
+@@ -133,7 +133,7 @@ static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, int sz)
+ */
+ static apr_status_t flush_output_buffer(sed_filter_ctxt *ctx)
+ {
+- int size = ctx->curoutbuf - ctx->outbuf;
++ apr_size_t size = ctx->curoutbuf - ctx->outbuf;
+ char *out;
+ apr_status_t status = APR_SUCCESS;
+ if ((ctx->outbuf == NULL) || (size <=0))
+@@ -147,12 +147,12 @@ static apr_status_t flush_output_buffer(sed_filter_ctxt *ctx)
+ /* This is a call back function. When libsed wants to generate the output,
+ * this function will be invoked.
+ */
+-static apr_status_t sed_write_output(void *dummy, char *buf, int sz)
++static apr_status_t sed_write_output(void *dummy, char *buf, apr_size_t sz)
+ {
+ /* dummy is basically filter context. Context is passed during invocation
+ * of sed_eval_buffer
+ */
+- int remainbytes = 0;
++ apr_size_t remainbytes = 0;
+ apr_status_t status = APR_SUCCESS;
+ sed_filter_ctxt *ctx = (sed_filter_ctxt *) dummy;
+ if (ctx->outbuf == NULL) {
+diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c
+index be03506788..67a8d06515 100644
+--- a/modules/filters/sed1.c
++++ b/modules/filters/sed1.c
+@@ -71,7 +71,7 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
+ static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2);
+ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ step_vars_storage *step_vars);
+-static apr_status_t wline(sed_eval_t *eval, char *buf, int sz);
++static apr_status_t wline(sed_eval_t *eval, char *buf, apr_size_t sz);
+ static apr_status_t arout(sed_eval_t *eval);
+
+ static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
+@@ -92,11 +92,11 @@ static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
+ * grow_buffer
+ */
+ static void grow_buffer(apr_pool_t *pool, char **buffer,
+- char **spend, unsigned int *cursize,
+- unsigned int newsize)
++ char **spend, apr_size_t *cursize,
++ apr_size_t newsize)
+ {
+ char* newbuffer = NULL;
+- int spendsize = 0;
++ apr_size_t spendsize = 0;
+ if (*cursize >= newsize)
+ return;
+ /* Avoid number of times realloc is called. It could cause huge memory
+@@ -124,7 +124,7 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
+ /*
+ * grow_line_buffer
+ */
+-static void grow_line_buffer(sed_eval_t *eval, int newsize)
++static void grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
+ {
+ grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
+ &eval->lsize, newsize);
+@@ -133,7 +133,7 @@ static void grow_line_buffer(sed_eval_t *eval, int newsize)
+ /*
+ * grow_hold_buffer
+ */
+-static void grow_hold_buffer(sed_eval_t *eval, int newsize)
++static void grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
+ {
+ grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
+ &eval->hsize, newsize);
+@@ -142,7 +142,7 @@ static void grow_hold_buffer(sed_eval_t *eval, int newsize)
+ /*
+ * grow_gen_buffer
+ */
+-static void grow_gen_buffer(sed_eval_t *eval, int newsize,
++static void grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
+ char **gspend)
+ {
+ if (gspend == NULL) {
+@@ -156,9 +156,9 @@ static void grow_gen_buffer(sed_eval_t *eval, int newsize,
+ /*
+ * appendmem_to_linebuf
+ */
+-static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, int len)
++static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
+ {
+- unsigned int reqsize = (eval->lspend - eval->linebuf) + len;
++ apr_size_t reqsize = (eval->lspend - eval->linebuf) + len;
+ if (eval->lsize < reqsize) {
+ grow_line_buffer(eval, reqsize);
+ }
+@@ -169,21 +169,36 @@ static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, int len)
+ /*
+ * append_to_linebuf
+ */
+-static void append_to_linebuf(sed_eval_t *eval, const char* sz)
++static void append_to_linebuf(sed_eval_t *eval, const char* sz,
++ step_vars_storage *step_vars)
+ {
+- int len = strlen(sz);
++ apr_size_t len = strlen(sz);
++ char *old_linebuf = eval->linebuf;
+ /* Copy string including null character */
+ appendmem_to_linebuf(eval, sz, len + 1);
+ --eval->lspend; /* lspend will now point to NULL character */
++ /* Sync step_vars after a possible linebuf expansion */
++ if (step_vars && old_linebuf != eval->linebuf) {
++ if (step_vars->loc1) {
++ step_vars->loc1 = step_vars->loc1 - old_linebuf + eval->linebuf;
++ }
++ if (step_vars->loc2) {
++ step_vars->loc2 = step_vars->loc2 - old_linebuf + eval->linebuf;
++ }
++ if (step_vars->locs) {
++ step_vars->locs = step_vars->locs - old_linebuf + eval->linebuf;
++ }
++ }
+ }
+
+ /*
+ * copy_to_linebuf
+ */
+-static void copy_to_linebuf(sed_eval_t *eval, const char* sz)
++static void copy_to_linebuf(sed_eval_t *eval, const char* sz,
++ step_vars_storage *step_vars)
+ {
+ eval->lspend = eval->linebuf;
+- append_to_linebuf(eval, sz);
++ append_to_linebuf(eval, sz, step_vars);
+ }
+
+ /*
+@@ -191,8 +206,8 @@ static void copy_to_linebuf(sed_eval_t *eval, const char* sz)
+ */
+ static void append_to_holdbuf(sed_eval_t *eval, const char* sz)
+ {
+- int len = strlen(sz);
+- unsigned int reqsize = (eval->hspend - eval->holdbuf) + len + 1;
++ apr_size_t len = strlen(sz);
++ apr_size_t reqsize = (eval->hspend - eval->holdbuf) + len + 1;
+ if (eval->hsize <= reqsize) {
+ grow_hold_buffer(eval, reqsize);
+ }
+@@ -215,8 +230,8 @@ static void copy_to_holdbuf(sed_eval_t *eval, const char* sz)
+ */
+ static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
+ {
+- int len = strlen(sz);
+- unsigned int reqsize = (*gspend - eval->genbuf) + len + 1;
++ apr_size_t len = strlen(sz);
++ apr_size_t reqsize = (*gspend - eval->genbuf) + len + 1;
+ if (eval->gsize < reqsize) {
+ grow_gen_buffer(eval, reqsize, gspend);
+ }
+@@ -230,8 +245,8 @@ static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
+ */
+ static void copy_to_genbuf(sed_eval_t *eval, const char* sz)
+ {
+- int len = strlen(sz);
+- unsigned int reqsize = len + 1;
++ apr_size_t len = strlen(sz);
++ apr_size_t reqsize = len + 1;
+ if (eval->gsize < reqsize) {
+ grow_gen_buffer(eval, reqsize, NULL);
+ }
+@@ -353,7 +368,7 @@ apr_status_t sed_eval_file(sed_eval_t *eval, apr_file_t *fin, void *fout)
+ /*
+ * sed_eval_buffer
+ */
+-apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void *fout)
++apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz, void *fout)
+ {
+ apr_status_t rv;
+
+@@ -383,7 +398,7 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void
+
+ while (bufsz) {
+ char *n;
+- int llen;
++ apr_size_t llen;
+
+ n = memchr(buf, '\n', bufsz);
+ if (n == NULL)
+@@ -442,7 +457,7 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
+ * buffer is not a newline.
+ */
+ /* Assure space for NULL */
+- append_to_linebuf(eval, "");
++ append_to_linebuf(eval, "", NULL);
+ }
+
+ *eval->lspend = '\0';
+@@ -666,7 +681,7 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
+ lp = step_vars->loc2;
+ step_vars->loc2 = sp - eval->genbuf + eval->linebuf;
+ append_to_genbuf(eval, lp, &sp);
+- copy_to_linebuf(eval, eval->genbuf);
++ copy_to_linebuf(eval, eval->genbuf, step_vars);
+ return rv;
+ }
+
+@@ -676,8 +691,8 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
+ static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2)
+ {
+ char *sp = asp;
+- int n = al2 - al1;
+- unsigned int reqsize = (sp - eval->genbuf) + n + 1;
++ apr_size_t n = al2 - al1;
++ apr_size_t reqsize = (sp - eval->genbuf) + n + 1;
+
+ if (eval->gsize < reqsize) {
+ grow_gen_buffer(eval, reqsize, &sp);
+@@ -735,7 +750,7 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ }
+
+ p1++;
+- copy_to_linebuf(eval, p1);
++ copy_to_linebuf(eval, p1, step_vars);
+ eval->jflag++;
+ break;
+
+@@ -745,12 +760,12 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ break;
+
+ case GCOM:
+- copy_to_linebuf(eval, eval->holdbuf);
++ copy_to_linebuf(eval, eval->holdbuf, step_vars);
+ break;
+
+ case CGCOM:
+- append_to_linebuf(eval, "\n");
+- append_to_linebuf(eval, eval->holdbuf);
++ append_to_linebuf(eval, "\n", step_vars);
++ append_to_linebuf(eval, eval->holdbuf, step_vars);
+ break;
+
+ case HCOM:
+@@ -881,7 +896,7 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ if (rv != APR_SUCCESS)
+ return rv;
+ }
+- append_to_linebuf(eval, "\n");
++ append_to_linebuf(eval, "\n", step_vars);
+ eval->pending = ipc->next;
+ break;
+
+@@ -956,7 +971,7 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+
+ case XCOM:
+ copy_to_genbuf(eval, eval->linebuf);
+- copy_to_linebuf(eval, eval->holdbuf);
++ copy_to_linebuf(eval, eval->holdbuf, step_vars);
+ copy_to_holdbuf(eval, eval->genbuf);
+ break;
+
+@@ -1013,7 +1028,7 @@ static apr_status_t arout(sed_eval_t *eval)
+ /*
+ * wline
+ */
+-static apr_status_t wline(sed_eval_t *eval, char *buf, int sz)
++static apr_status_t wline(sed_eval_t *eval, char *buf, apr_size_t sz)
+ {
+ apr_status_t rv = APR_SUCCESS;
+ rv = eval->writefn(eval->fout, buf, sz);
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-23943-2.patch b/debian/patches/CVE-2022-23943-2.patch
new file mode 100644
index 0000000..bcf883c
--- /dev/null
+++ b/debian/patches/CVE-2022-23943-2.patch
@@ -0,0 +1,63 @@
+From e266bd09c313a668d7cca17a8b096d189148be49 Mon Sep 17 00:00:00 2001
+From: Ruediger Pluem <rpluem@apache.org>
+Date: Wed, 9 Mar 2022 07:41:40 +0000
+Subject: [PATCH] Merge r1898735 from trunk:
+
+* Improve the logic flow
+
+Reviewed by: rpluem, covener, ylavic
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898772 13f79535-47bb-0310-9956-ffa450edef68
+---
+ modules/filters/mod_sed.c | 30 +++++++++++++++++++-----------
+ 1 file changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
+index 7092dd5e7f..4bdb4ce33a 100644
+--- a/modules/filters/mod_sed.c
++++ b/modules/filters/mod_sed.c
+@@ -168,21 +168,29 @@ static apr_status_t sed_write_output(void *dummy, char *buf, apr_size_t sz)
+ }
+ /* buffer is now full */
+ status = append_bucket(ctx, ctx->outbuf, ctx->bufsize);
+- /* old buffer is now used so allocate new buffer */
+- alloc_outbuf(ctx);
+- /* if size is bigger than the allocated buffer directly add to output
+- * brigade */
+- if ((status == APR_SUCCESS) && (sz >= ctx->bufsize)) {
+- char* newbuf = apr_pmemdup(ctx->tpool, buf, sz);
+- status = append_bucket(ctx, newbuf, sz);
+- /* pool might get clear after append_bucket */
+- if (ctx->outbuf == NULL) {
++ if (status == APR_SUCCESS) {
++ /* if size is bigger than the allocated buffer directly add to output
++ * brigade */
++ if (sz >= ctx->bufsize) {
++ char* newbuf = apr_pmemdup(ctx->tpool, buf, sz);
++ status = append_bucket(ctx, newbuf, sz);
++ if (status == APR_SUCCESS) {
++ /* old buffer is now used so allocate new buffer */
++ alloc_outbuf(ctx);
++ }
++ else {
++ clear_ctxpool(ctx);
++ }
++ }
++ else {
++ /* old buffer is now used so allocate new buffer */
+ alloc_outbuf(ctx);
++ memcpy(ctx->curoutbuf, buf, sz);
++ ctx->curoutbuf += sz;
+ }
+ }
+ else {
+- memcpy(ctx->curoutbuf, buf, sz);
+- ctx->curoutbuf += sz;
++ clear_ctxpool(ctx);
+ }
+ }
+ else {
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-26377.patch b/debian/patches/CVE-2022-26377.patch
new file mode 100644
index 0000000..af59776
--- /dev/null
+++ b/debian/patches/CVE-2022-26377.patch
@@ -0,0 +1,39 @@
+From f7f15f3d8bfe3032926c8c39eb8434529f680bd4 Mon Sep 17 00:00:00 2001
+From: Yann Ylavic <ylavic@apache.org>
+Date: Wed, 1 Jun 2022 13:48:21 +0000
+Subject: [PATCH] mod_proxy_ajp: T-E has precedence over C-L.
+
+Merge r1901521 from trunk.
+Submitted by: rpluem
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901522 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/f7f15f3d8bfe3032926c8c39eb8434529f680bd4
+---
+ modules/proxy/mod_proxy_ajp.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/modules/proxy/mod_proxy_ajp.c
++++ b/modules/proxy/mod_proxy_ajp.c
+@@ -245,9 +245,18 @@
+ /* read the first bloc of data */
+ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
+ tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+- if (tenc && (strcasecmp(tenc, "chunked") == 0)) {
+- /* The AJP protocol does not want body data yet */
+- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870) "request is chunked");
++ if (tenc) {
++ if (strcasecmp(tenc, "chunked") == 0) {
++ /* The AJP protocol does not want body data yet */
++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870)
++ "request is chunked");
++ }
++ else {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396)
++ "%s Transfer-Encoding is not supported",
++ tenc);
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
+ } else {
+ /* Get client provided Content-Length header */
+ content_length = get_content_length(r);
diff --git a/debian/patches/CVE-2022-28614.patch b/debian/patches/CVE-2022-28614.patch
new file mode 100644
index 0000000..fdd8f6b
--- /dev/null
+++ b/debian/patches/CVE-2022-28614.patch
@@ -0,0 +1,65 @@
+From 8c14927162cf3b4f810683e1c5505e9ef9e1f123 Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Wed, 1 Jun 2022 12:34:16 +0000
+Subject: [PATCH] Merge r1901500 from trunk:
+
+handle large writes in ap_rputs
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901501 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/8c14927162cf3b4f810683e1c5505e9ef9e1f123
+---
+ include/http_protocol.h | 22 +++++++++++++++++++++-
+ server/protocol.c | 3 +++
+ 2 files changed, 24 insertions(+), 1 deletion(-)
+
+diff --git a/include/http_protocol.h b/include/http_protocol.h
+index 20bd202226..94c481e5f4 100644
+--- a/include/http_protocol.h
++++ b/include/http_protocol.h
+@@ -475,7 +475,27 @@ AP_DECLARE(int) ap_rwrite(const void *buf, int nbyte, request_rec *r);
+ */
+ static APR_INLINE int ap_rputs(const char *str, request_rec *r)
+ {
+- return ap_rwrite(str, (int)strlen(str), r);
++ apr_size_t len;
++
++ len = strlen(str);
++
++ for (;;) {
++ if (len <= INT_MAX) {
++ return ap_rwrite(str, (int)len, r);
++ }
++ else {
++ int rc;
++
++ rc = ap_rwrite(str, INT_MAX, r);
++ if (rc < 0) {
++ return rc;
++ }
++ else {
++ str += INT_MAX;
++ len -= INT_MAX;
++ }
++ }
++ }
+ }
+
+ /**
+diff --git a/server/protocol.c b/server/protocol.c
+index 298f61e1fb..7adc7f75c1 100644
+--- a/server/protocol.c
++++ b/server/protocol.c
+@@ -2128,6 +2128,9 @@ AP_DECLARE(int) ap_rputc(int c, request_rec *r)
+
+ AP_DECLARE(int) ap_rwrite(const void *buf, int nbyte, request_rec *r)
+ {
++ if (nbyte < 0)
++ return -1;
++
+ if (r->connection->aborted)
+ return -1;
+
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-28615.patch b/debian/patches/CVE-2022-28615.patch
new file mode 100644
index 0000000..2c15157
--- /dev/null
+++ b/debian/patches/CVE-2022-28615.patch
@@ -0,0 +1,35 @@
+From 6503d09ab51047554c384a6d03646ce1a8848120 Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Wed, 1 Jun 2022 12:21:45 +0000
+Subject: [PATCH] Merge r1901494 from trunk:
+
+fix types
+
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901495 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/6503d09ab51047554c384a6d03646ce1a8848120
+---
+ server/util.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/server/util.c
++++ b/server/util.c
+@@ -186,7 +186,7 @@
+ */
+ AP_DECLARE(int) ap_strcmp_match(const char *str, const char *expected)
+ {
+- int x, y;
++ apr_size_t x, y;
+
+ for (x = 0, y = 0; expected[y]; ++y, ++x) {
+ if ((!str[x]) && (expected[y] != '*'))
+@@ -210,7 +210,7 @@
+
+ AP_DECLARE(int) ap_strcasecmp_match(const char *str, const char *expected)
+ {
+- int x, y;
++ apr_size_t x, y;
+
+ for (x = 0, y = 0; expected[y]; ++y, ++x) {
+ if (!str[x] && expected[y] != '*')
diff --git a/debian/patches/CVE-2022-29404.patch b/debian/patches/CVE-2022-29404.patch
new file mode 100644
index 0000000..259e920
--- /dev/null
+++ b/debian/patches/CVE-2022-29404.patch
@@ -0,0 +1,82 @@
+From ce259c4061905bf834f9af51c92456cfe8335ddc Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Wed, 1 Jun 2022 12:31:48 +0000
+Subject: [PATCH] Merge r1901497 from trunk:
+
+use a liberal default limit for LimitRequestBody of 1GB
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901499 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/ce259c4061905bf834f9af51c92456cfe8335ddc
+---
+ modules/http/http_filters.c | 6 ++++++
+ modules/proxy/mod_proxy_http.c | 14 --------------
+ server/core.c | 2 +-
+ 3 files changed, 7 insertions(+), 15 deletions(-)
+
+--- a/modules/http/http_filters.c
++++ b/modules/http/http_filters.c
+@@ -1657,6 +1657,7 @@
+ {
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
++ apr_off_t limit_req_body = ap_get_limit_req_body(r);
+
+ r->read_body = read_policy;
+ r->read_chunked = 0;
+@@ -1695,6 +1696,11 @@
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
++ if (limit_req_body > 0 && (r->remaining > limit_req_body)) {
++ /* will be logged when the body is discarded */
++ return HTTP_REQUEST_ENTITY_TOO_LARGE;
++ }
++
+ #ifdef AP_DEBUG
+ {
+ /* Make sure ap_getline() didn't leave any droppings. */
+--- a/server/core.c
++++ b/server/core.c
+@@ -61,7 +61,7 @@
+
+ /* LimitRequestBody handling */
+ #define AP_LIMIT_REQ_BODY_UNSET ((apr_off_t) -1)
+-#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 0)
++#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 1<<30) /* 1GB */
+
+ /* LimitXMLRequestBody handling */
+ #define AP_LIMIT_UNSET ((long) -1)
+--- a/modules/proxy/mod_proxy_http.c
++++ b/modules/proxy/mod_proxy_http.c
+@@ -512,12 +512,9 @@
+ apr_bucket *e;
+ apr_off_t bytes, bytes_spooled = 0, fsize = 0;
+ apr_file_t *tmpfile = NULL;
+- apr_off_t limit;
+
+ body_brigade = apr_brigade_create(p, bucket_alloc);
+
+- limit = ap_get_limit_req_body(r);
+-
+ while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
+ {
+ /* If this brigade contains EOS, either stop or remove it. */
+@@ -532,17 +529,6 @@
+ apr_brigade_length(input_brigade, 1, &bytes);
+
+ if (bytes_spooled + bytes > MAX_MEM_SPOOL) {
+- /*
+- * LimitRequestBody does not affect Proxy requests (Should it?).
+- * Let it take effect if we decide to store the body in a
+- * temporary file on disk.
+- */
+- if (limit && (bytes_spooled + bytes > limit)) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
+- "Request body is larger than the configured "
+- "limit of %" APR_OFF_T_FMT, limit);
+- return HTTP_REQUEST_ENTITY_TOO_LARGE;
+- }
+ /* can't spool any more in memory; write latest brigade to disk */
+ if (tmpfile == NULL) {
+ const char *temp_dir;
diff --git a/debian/patches/CVE-2022-30522.patch b/debian/patches/CVE-2022-30522.patch
new file mode 100644
index 0000000..5ad124e
--- /dev/null
+++ b/debian/patches/CVE-2022-30522.patch
@@ -0,0 +1,561 @@
+From db47781128e42bd49f55076665b3f6ca4e2bc5e2 Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Wed, 1 Jun 2022 12:50:40 +0000
+Subject: [PATCH] Merge r1901506 from trunk:
+
+limit mod_sed memory use
+
+Resync mod_sed.c with trunk due to merge conflicts.
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901509 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/db47781128e42bd49f55076665b3f6ca4e2bc5e2
+---
+ modules/filters/mod_sed.c | 75 ++++++++----------
+ modules/filters/sed1.c | 158 +++++++++++++++++++++++++++-----------
+ 2 files changed, 147 insertions(+), 86 deletions(-)
+
+diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
+index 4bdb4ce33a..12cb04a20f 100644
+--- a/modules/filters/mod_sed.c
++++ b/modules/filters/mod_sed.c
+@@ -59,7 +59,7 @@ typedef struct sed_filter_ctxt
+ module AP_MODULE_DECLARE_DATA sed_module;
+
+ /* This function will be call back from libsed functions if there is any error
+- * happend during execution of sed scripts
++ * happened during execution of sed scripts
+ */
+ static apr_status_t log_sed_errf(void *data, const char *error)
+ {
+@@ -277,7 +277,7 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+ {
+ apr_bucket *b;
+- apr_status_t status;
++ apr_status_t status = APR_SUCCESS;
+ sed_config *cfg = ap_get_module_config(f->r->per_dir_config,
+ &sed_module);
+ sed_filter_ctxt *ctx = f->ctx;
+@@ -302,9 +302,9 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
+ return status;
+ ctx = f->ctx;
+ apr_table_unset(f->r->headers_out, "Content-Length");
+- }
+
+- ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
++ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
++ }
+
+ /* Here is the main logic. Iterate through all the buckets, read the
+ * content of the bucket, call sed_eval_buffer on the data.
+@@ -326,63 +326,52 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
+ * in sed's internal buffer which can't be flushed until new line
+ * character is arrived.
+ */
+- for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb);) {
+- const char *buf = NULL;
+- apr_size_t bytes = 0;
++ while (!APR_BRIGADE_EMPTY(bb)) {
++ b = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_EOS(b)) {
+- apr_bucket *b1 = APR_BUCKET_NEXT(b);
+ /* Now clean up the internal sed buffer */
+ sed_finalize_eval(&ctx->eval, ctx);
+ status = flush_output_buffer(ctx);
+ if (status != APR_SUCCESS) {
+- clear_ctxpool(ctx);
+- return status;
++ break;
+ }
++ /* Move the eos bucket to ctx->bb brigade */
+ APR_BUCKET_REMOVE(b);
+- /* Insert the eos bucket to ctx->bb brigade */
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
+- b = b1;
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+- apr_bucket *b1 = APR_BUCKET_NEXT(b);
+- APR_BUCKET_REMOVE(b);
+ status = flush_output_buffer(ctx);
+ if (status != APR_SUCCESS) {
+- clear_ctxpool(ctx);
+- return status;
++ break;
+ }
++ /* Move the flush bucket to ctx->bb brigade */
++ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
+- b = b1;
+- }
+- else if (APR_BUCKET_IS_METADATA(b)) {
+- b = APR_BUCKET_NEXT(b);
+ }
+- else if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ)
+- == APR_SUCCESS) {
+- apr_bucket *b1 = APR_BUCKET_NEXT(b);
+- status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
+- if (status != APR_SUCCESS) {
+- clear_ctxpool(ctx);
+- return status;
++ else {
++ if (!APR_BUCKET_IS_METADATA(b)) {
++ const char *buf = NULL;
++ apr_size_t bytes = 0;
++
++ status = apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ);
++ if (status == APR_SUCCESS) {
++ status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
++ }
++ if (status != APR_SUCCESS) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10394) "error evaluating sed on output");
++ break;
++ }
+ }
+- APR_BUCKET_REMOVE(b);
+ apr_bucket_delete(b);
+- b = b1;
+- }
+- else {
+- apr_bucket *b1 = APR_BUCKET_NEXT(b);
+- APR_BUCKET_REMOVE(b);
+- b = b1;
+ }
+ }
+- apr_brigade_cleanup(bb);
+- status = flush_output_buffer(ctx);
+- if (status != APR_SUCCESS) {
+- clear_ctxpool(ctx);
+- return status;
++ if (status == APR_SUCCESS) {
++ status = flush_output_buffer(ctx);
+ }
+ if (!APR_BRIGADE_EMPTY(ctx->bb)) {
+- status = ap_pass_brigade(f->next, ctx->bb);
++ if (status == APR_SUCCESS) {
++ status = ap_pass_brigade(f->next, ctx->bb);
++ }
+ apr_brigade_cleanup(ctx->bb);
+ }
+ clear_ctxpool(ctx);
+@@ -433,7 +422,7 @@ static apr_status_t sed_request_filter(ap_filter_t *f,
+ * the buckets in bbinp and read the data from buckets and invoke
+ * sed_eval_buffer on the data. libsed will generate its output using
+ * sed_write_output which will add data in ctx->bb. Do it until it have
+- * atleast one bucket in ctx->bb. At the end of data eos bucket
++ * at least one bucket in ctx->bb. At the end of data eos bucket
+ * should be there.
+ *
+ * Once eos bucket is seen, then invoke sed_finalize_eval to clear the
+@@ -475,8 +464,10 @@ static apr_status_t sed_request_filter(ap_filter_t *f,
+ if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ)
+ == APR_SUCCESS) {
+ status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
+- if (status != APR_SUCCESS)
++ if (status != APR_SUCCESS) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10395) "error evaluating sed on input");
+ return status;
++ }
+ flush_output_buffer(ctx);
+ }
+ }
+diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c
+index 67a8d06515..047f49ba13 100644
+--- a/modules/filters/sed1.c
++++ b/modules/filters/sed1.c
+@@ -87,18 +87,20 @@ static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
+ }
+
+ #define INIT_BUF_SIZE 1024
++#define MAX_BUF_SIZE 1024*8192
+
+ /*
+ * grow_buffer
+ */
+-static void grow_buffer(apr_pool_t *pool, char **buffer,
++static apr_status_t grow_buffer(apr_pool_t *pool, char **buffer,
+ char **spend, apr_size_t *cursize,
+ apr_size_t newsize)
+ {
+ char* newbuffer = NULL;
+ apr_size_t spendsize = 0;
+- if (*cursize >= newsize)
+- return;
++ if (*cursize >= newsize) {
++ return APR_SUCCESS;
++ }
+ /* Avoid number of times realloc is called. It could cause huge memory
+ * requirement if line size is huge e.g 2 MB */
+ if (newsize < *cursize * 2) {
+@@ -107,6 +109,9 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
+
+ /* Align it to 4 KB boundary */
+ newsize = (newsize + ((1 << 12) - 1)) & ~((1 << 12) - 1);
++ if (newsize > MAX_BUF_SIZE) {
++ return APR_ENOMEM;
++ }
+ newbuffer = apr_pcalloc(pool, newsize);
+ if (*spend && *buffer && (*cursize > 0)) {
+ spendsize = *spend - *buffer;
+@@ -119,63 +124,77 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
+ if (spend != buffer) {
+ *spend = *buffer + spendsize;
+ }
++ return APR_SUCCESS;
+ }
+
+ /*
+ * grow_line_buffer
+ */
+-static void grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
++static apr_status_t grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
+ {
+- grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
++ return grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
+ &eval->lsize, newsize);
+ }
+
+ /*
+ * grow_hold_buffer
+ */
+-static void grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
++static apr_status_t grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
+ {
+- grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
++ return grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
+ &eval->hsize, newsize);
+ }
+
+ /*
+ * grow_gen_buffer
+ */
+-static void grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
++static apr_status_t grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
+ char **gspend)
+ {
++ apr_status_t rc = 0;
+ if (gspend == NULL) {
+ gspend = &eval->genbuf;
+ }
+- grow_buffer(eval->pool, &eval->genbuf, gspend,
+- &eval->gsize, newsize);
+- eval->lcomend = &eval->genbuf[71];
++ rc = grow_buffer(eval->pool, &eval->genbuf, gspend,
++ &eval->gsize, newsize);
++ if (rc == APR_SUCCESS) {
++ eval->lcomend = &eval->genbuf[71];
++ }
++ return rc;
+ }
+
+ /*
+ * appendmem_to_linebuf
+ */
+-static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
++static apr_status_t appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
+ {
++ apr_status_t rc = 0;
+ apr_size_t reqsize = (eval->lspend - eval->linebuf) + len;
+ if (eval->lsize < reqsize) {
+- grow_line_buffer(eval, reqsize);
++ rc = grow_line_buffer(eval, reqsize);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ }
+ memcpy(eval->lspend, sz, len);
+ eval->lspend += len;
++ return APR_SUCCESS;
+ }
+
+ /*
+ * append_to_linebuf
+ */
+-static void append_to_linebuf(sed_eval_t *eval, const char* sz,
++static apr_status_t append_to_linebuf(sed_eval_t *eval, const char* sz,
+ step_vars_storage *step_vars)
+ {
+ apr_size_t len = strlen(sz);
+ char *old_linebuf = eval->linebuf;
++ apr_status_t rc = 0;
+ /* Copy string including null character */
+- appendmem_to_linebuf(eval, sz, len + 1);
++ rc = appendmem_to_linebuf(eval, sz, len + 1);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ --eval->lspend; /* lspend will now point to NULL character */
+ /* Sync step_vars after a possible linebuf expansion */
+ if (step_vars && old_linebuf != eval->linebuf) {
+@@ -189,68 +208,84 @@ static void append_to_linebuf(sed_eval_t *eval, const char* sz,
+ step_vars->locs = step_vars->locs - old_linebuf + eval->linebuf;
+ }
+ }
++ return APR_SUCCESS;
+ }
+
+ /*
+ * copy_to_linebuf
+ */
+-static void copy_to_linebuf(sed_eval_t *eval, const char* sz,
++static apr_status_t copy_to_linebuf(sed_eval_t *eval, const char* sz,
+ step_vars_storage *step_vars)
+ {
+ eval->lspend = eval->linebuf;
+- append_to_linebuf(eval, sz, step_vars);
++ return append_to_linebuf(eval, sz, step_vars);
+ }
+
+ /*
+ * append_to_holdbuf
+ */
+-static void append_to_holdbuf(sed_eval_t *eval, const char* sz)
++static apr_status_t append_to_holdbuf(sed_eval_t *eval, const char* sz)
+ {
+ apr_size_t len = strlen(sz);
+ apr_size_t reqsize = (eval->hspend - eval->holdbuf) + len + 1;
++ apr_status_t rc = 0;
+ if (eval->hsize <= reqsize) {
+- grow_hold_buffer(eval, reqsize);
++ rc = grow_hold_buffer(eval, reqsize);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ }
+ memcpy(eval->hspend, sz, len + 1);
+ /* hspend will now point to NULL character */
+ eval->hspend += len;
++ return APR_SUCCESS;
+ }
+
+ /*
+ * copy_to_holdbuf
+ */
+-static void copy_to_holdbuf(sed_eval_t *eval, const char* sz)
++static apr_status_t copy_to_holdbuf(sed_eval_t *eval, const char* sz)
+ {
+ eval->hspend = eval->holdbuf;
+- append_to_holdbuf(eval, sz);
++ return append_to_holdbuf(eval, sz);
+ }
+
+ /*
+ * append_to_genbuf
+ */
+-static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
++static apr_status_t append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
+ {
+ apr_size_t len = strlen(sz);
+ apr_size_t reqsize = (*gspend - eval->genbuf) + len + 1;
++ apr_status_t rc = 0;
+ if (eval->gsize < reqsize) {
+- grow_gen_buffer(eval, reqsize, gspend);
++ rc = grow_gen_buffer(eval, reqsize, gspend);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ }
+ memcpy(*gspend, sz, len + 1);
+ /* *gspend will now point to NULL character */
+ *gspend += len;
++ return APR_SUCCESS;
+ }
+
+ /*
+ * copy_to_genbuf
+ */
+-static void copy_to_genbuf(sed_eval_t *eval, const char* sz)
++static apr_status_t copy_to_genbuf(sed_eval_t *eval, const char* sz)
+ {
+ apr_size_t len = strlen(sz);
+ apr_size_t reqsize = len + 1;
++ apr_status_t rc = APR_SUCCESS;;
+ if (eval->gsize < reqsize) {
+- grow_gen_buffer(eval, reqsize, NULL);
++ rc = grow_gen_buffer(eval, reqsize, NULL);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ }
+ memcpy(eval->genbuf, sz, len + 1);
++ return rc;
+ }
+
+ /*
+@@ -397,6 +432,7 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
+ }
+
+ while (bufsz) {
++ apr_status_t rc = 0;
+ char *n;
+ apr_size_t llen;
+
+@@ -411,7 +447,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
+ break;
+ }
+
+- appendmem_to_linebuf(eval, buf, llen + 1);
++ rc = appendmem_to_linebuf(eval, buf, llen + 1);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ --eval->lspend;
+ /* replace new line character with NULL */
+ *eval->lspend = '\0';
+@@ -426,7 +465,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
+
+ /* Save the leftovers for later */
+ if (bufsz) {
+- appendmem_to_linebuf(eval, buf, bufsz);
++ apr_status_t rc = appendmem_to_linebuf(eval, buf, bufsz);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ }
+
+ return APR_SUCCESS;
+@@ -448,6 +490,7 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
+ /* Process leftovers */
+ if (eval->lspend > eval->linebuf) {
+ apr_status_t rv;
++ apr_status_t rc = 0;
+
+ if (eval->lreadyflag) {
+ eval->lreadyflag = 0;
+@@ -457,7 +500,10 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
+ * buffer is not a newline.
+ */
+ /* Assure space for NULL */
+- append_to_linebuf(eval, "", NULL);
++ rc = append_to_linebuf(eval, "", NULL);
++ if (rc != APR_SUCCESS) {
++ return rc;
++ }
+ }
+
+ *eval->lspend = '\0';
+@@ -655,11 +701,15 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
+ sp = eval->genbuf;
+ rp = rhsbuf;
+ sp = place(eval, sp, lp, step_vars->loc1);
++ if (sp == NULL) {
++ return APR_EGENERAL;
++ }
+ while ((c = *rp++) != 0) {
+ if (c == '&') {
+ sp = place(eval, sp, step_vars->loc1, step_vars->loc2);
+- if (sp == NULL)
++ if (sp == NULL) {
+ return APR_EGENERAL;
++ }
+ }
+ else if (c == '\\') {
+ c = *rp++;
+@@ -675,13 +725,19 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
+ *sp++ = c;
+ if (sp >= eval->genbuf + eval->gsize) {
+ /* expand genbuf and set the sp appropriately */
+- grow_gen_buffer(eval, eval->gsize + 1024, &sp);
++ rv = grow_gen_buffer(eval, eval->gsize + 1024, &sp);
++ if (rv != APR_SUCCESS) {
++ return rv;
++ }
+ }
+ }
+ lp = step_vars->loc2;
+ step_vars->loc2 = sp - eval->genbuf + eval->linebuf;
+- append_to_genbuf(eval, lp, &sp);
+- copy_to_linebuf(eval, eval->genbuf, step_vars);
++ rv = append_to_genbuf(eval, lp, &sp);
++ if (rv != APR_SUCCESS) {
++ return rv;
++ }
++ rv = copy_to_linebuf(eval, eval->genbuf, step_vars);
+ return rv;
+ }
+
+@@ -695,7 +751,10 @@ static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2)
+ apr_size_t reqsize = (sp - eval->genbuf) + n + 1;
+
+ if (eval->gsize < reqsize) {
+- grow_gen_buffer(eval, reqsize, &sp);
++ apr_status_t rc = grow_gen_buffer(eval, reqsize, &sp);
++ if (rc != APR_SUCCESS) {
++ return NULL;
++ }
+ }
+ memcpy(sp, al1, n);
+ return sp + n;
+@@ -750,7 +809,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ }
+
+ p1++;
+- copy_to_linebuf(eval, p1, step_vars);
++ rv = copy_to_linebuf(eval, p1, step_vars);
++ if (rv != APR_SUCCESS) return rv;
+ eval->jflag++;
+ break;
+
+@@ -760,21 +820,27 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ break;
+
+ case GCOM:
+- copy_to_linebuf(eval, eval->holdbuf, step_vars);
++ rv = copy_to_linebuf(eval, eval->holdbuf, step_vars);
++ if (rv != APR_SUCCESS) return rv;
+ break;
+
+ case CGCOM:
+- append_to_linebuf(eval, "\n", step_vars);
+- append_to_linebuf(eval, eval->holdbuf, step_vars);
++ rv = append_to_linebuf(eval, "\n", step_vars);
++ if (rv != APR_SUCCESS) return rv;
++ rv = append_to_linebuf(eval, eval->holdbuf, step_vars);
++ if (rv != APR_SUCCESS) return rv;
+ break;
+
+ case HCOM:
+- copy_to_holdbuf(eval, eval->linebuf);
++ rv = copy_to_holdbuf(eval, eval->linebuf);
++ if (rv != APR_SUCCESS) return rv;
+ break;
+
+ case CHCOM:
+- append_to_holdbuf(eval, "\n");
+- append_to_holdbuf(eval, eval->linebuf);
++ rv = append_to_holdbuf(eval, "\n");
++ if (rv != APR_SUCCESS) return rv;
++ rv = append_to_holdbuf(eval, eval->linebuf);
++ if (rv != APR_SUCCESS) return rv;
+ break;
+
+ case ICOM:
+@@ -896,7 +962,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ if (rv != APR_SUCCESS)
+ return rv;
+ }
+- append_to_linebuf(eval, "\n", step_vars);
++ rv = append_to_linebuf(eval, "\n", step_vars);
++ if (rv != APR_SUCCESS) return rv;
+ eval->pending = ipc->next;
+ break;
+
+@@ -970,9 +1037,12 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
+ break;
+
+ case XCOM:
+- copy_to_genbuf(eval, eval->linebuf);
+- copy_to_linebuf(eval, eval->holdbuf, step_vars);
+- copy_to_holdbuf(eval, eval->genbuf);
++ rv = copy_to_genbuf(eval, eval->linebuf);
++ if (rv != APR_SUCCESS) return rv;
++ rv = copy_to_linebuf(eval, eval->holdbuf, step_vars);
++ if (rv != APR_SUCCESS) return rv;
++ rv = copy_to_holdbuf(eval, eval->genbuf);
++ if (rv != APR_SUCCESS) return rv;
+ break;
+
+ case YCOM:
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-30556.patch b/debian/patches/CVE-2022-30556.patch
new file mode 100644
index 0000000..f9b541d
--- /dev/null
+++ b/debian/patches/CVE-2022-30556.patch
@@ -0,0 +1,250 @@
+From 3a561759fcb37af179585adb8478922dc9bc6a85 Mon Sep 17 00:00:00 2001
+From: Eric Covener <covener@apache.org>
+Date: Wed, 1 Jun 2022 12:36:39 +0000
+Subject: [PATCH] Merge r1901502 from trunk:
+
+use filters consistently
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901503 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/3a561759fcb37af179585adb8478922dc9bc6a85
+---
+ modules/lua/lua_request.c | 144 ++++++++++++++------------------------
+ 1 file changed, 53 insertions(+), 91 deletions(-)
+
+diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c
+index a3e3b613bc..2ec453e86b 100644
+--- a/modules/lua/lua_request.c
++++ b/modules/lua/lua_request.c
+@@ -2227,23 +2227,20 @@ static int lua_websocket_greet(lua_State *L)
+ return 0;
+ }
+
+-static apr_status_t lua_websocket_readbytes(conn_rec* c, char* buffer,
+- apr_off_t len)
++static apr_status_t lua_websocket_readbytes(conn_rec* c,
++ apr_bucket_brigade *brigade,
++ char* buffer, apr_off_t len)
+ {
+- apr_bucket_brigade *brigade = apr_brigade_create(c->pool, c->bucket_alloc);
++ apr_size_t delivered;
+ apr_status_t rv;
++
+ rv = ap_get_brigade(c->input_filters, brigade, AP_MODE_READBYTES,
+ APR_BLOCK_READ, len);
+ if (rv == APR_SUCCESS) {
+- if (!APR_BRIGADE_EMPTY(brigade)) {
+- apr_bucket* bucket = APR_BRIGADE_FIRST(brigade);
+- const char* data = NULL;
+- apr_size_t data_length = 0;
+- rv = apr_bucket_read(bucket, &data, &data_length, APR_BLOCK_READ);
+- if (rv == APR_SUCCESS) {
+- memcpy(buffer, data, len);
+- }
+- apr_bucket_delete(bucket);
++ delivered = len;
++ rv = apr_brigade_flatten(brigade, buffer, &delivered);
++ if ((rv == APR_SUCCESS) && (delivered < len)) {
++ rv = APR_INCOMPLETE;
+ }
+ }
+ apr_brigade_cleanup(brigade);
+@@ -2273,35 +2270,28 @@ static int lua_websocket_peek(lua_State *L)
+
+ static int lua_websocket_read(lua_State *L)
+ {
+- apr_socket_t *sock;
+ apr_status_t rv;
+ int do_read = 1;
+ int n = 0;
+- apr_size_t len = 1;
+ apr_size_t plen = 0;
+ unsigned short payload_short = 0;
+ apr_uint64_t payload_long = 0;
+ unsigned char *mask_bytes;
+ char byte;
+- int plaintext;
+-
+-
++ apr_bucket_brigade *brigade;
++ conn_rec* c;
++
+ request_rec *r = ap_lua_check_request_rec(L, 1);
+- plaintext = ap_lua_ssl_is_https(r->connection) ? 0 : 1;
++ c = r->connection;
+
+-
+ mask_bytes = apr_pcalloc(r->pool, 4);
+- sock = ap_get_conn_socket(r->connection);
++
++ brigade = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ while (do_read) {
+ do_read = 0;
+ /* Get opcode and FIN bit */
+- if (plaintext) {
+- rv = apr_socket_recv(sock, &byte, &len);
+- }
+- else {
+- rv = lua_websocket_readbytes(r->connection, &byte, 1);
+- }
++ rv = lua_websocket_readbytes(c, brigade, &byte, 1);
+ if (rv == APR_SUCCESS) {
+ unsigned char ubyte, fin, opcode, mask, payload;
+ ubyte = (unsigned char)byte;
+@@ -2311,12 +2301,7 @@ static int lua_websocket_read(lua_State *L)
+ opcode = ubyte & 0xf;
+
+ /* Get the payload length and mask bit */
+- if (plaintext) {
+- rv = apr_socket_recv(sock, &byte, &len);
+- }
+- else {
+- rv = lua_websocket_readbytes(r->connection, &byte, 1);
+- }
++ rv = lua_websocket_readbytes(c, brigade, &byte, 1);
+ if (rv == APR_SUCCESS) {
+ ubyte = (unsigned char)byte;
+ /* Mask is the first bit */
+@@ -2327,40 +2312,25 @@ static int lua_websocket_read(lua_State *L)
+
+ /* Extended payload? */
+ if (payload == 126) {
+- len = 2;
+- if (plaintext) {
+- /* XXX: apr_socket_recv does not receive len bits, only up to len bits! */
+- rv = apr_socket_recv(sock, (char*) &payload_short, &len);
+- }
+- else {
+- rv = lua_websocket_readbytes(r->connection,
+- (char*) &payload_short, 2);
+- }
+- payload_short = ntohs(payload_short);
++ rv = lua_websocket_readbytes(c, brigade,
++ (char*) &payload_short, 2);
+
+- if (rv == APR_SUCCESS) {
+- plen = payload_short;
+- }
+- else {
++ if (rv != APR_SUCCESS) {
+ return 0;
+ }
++
++ plen = ntohs(payload_short);
+ }
+ /* Super duper extended payload? */
+ if (payload == 127) {
+- len = 8;
+- if (plaintext) {
+- rv = apr_socket_recv(sock, (char*) &payload_long, &len);
+- }
+- else {
+- rv = lua_websocket_readbytes(r->connection,
+- (char*) &payload_long, 8);
+- }
+- if (rv == APR_SUCCESS) {
+- plen = ap_ntoh64(&payload_long);
+- }
+- else {
++ rv = lua_websocket_readbytes(c, brigade,
++ (char*) &payload_long, 8);
++
++ if (rv != APR_SUCCESS) {
+ return 0;
+ }
++
++ plen = ap_ntoh64(&payload_long);
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03210)
+ "Websocket: Reading %" APR_SIZE_T_FMT " (%s) bytes, masking is %s. %s",
+@@ -2369,46 +2339,27 @@ static int lua_websocket_read(lua_State *L)
+ mask ? "on" : "off",
+ fin ? "This is a final frame" : "more to follow");
+ if (mask) {
+- len = 4;
+- if (plaintext) {
+- rv = apr_socket_recv(sock, (char*) mask_bytes, &len);
+- }
+- else {
+- rv = lua_websocket_readbytes(r->connection,
+- (char*) mask_bytes, 4);
+- }
++ rv = lua_websocket_readbytes(c, brigade,
++ (char*) mask_bytes, 4);
++
+ if (rv != APR_SUCCESS) {
+ return 0;
+ }
+ }
+ if (plen < (HUGE_STRING_LEN*1024) && plen > 0) {
+ apr_size_t remaining = plen;
+- apr_size_t received;
+- apr_off_t at = 0;
+ char *buffer = apr_palloc(r->pool, plen+1);
+ buffer[plen] = 0;
+
+- if (plaintext) {
+- while (remaining > 0) {
+- received = remaining;
+- rv = apr_socket_recv(sock, buffer+at, &received);
+- if (received > 0 ) {
+- remaining -= received;
+- at += received;
+- }
+- }
+- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+- "Websocket: Frame contained %" APR_OFF_T_FMT " bytes, pushed to Lua stack",
+- at);
+- }
+- else {
+- rv = lua_websocket_readbytes(r->connection, buffer,
+- remaining);
+- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+- "Websocket: SSL Frame contained %" APR_SIZE_T_FMT " bytes, "\
+- "pushed to Lua stack",
+- remaining);
++ rv = lua_websocket_readbytes(c, brigade, buffer, remaining);
++
++ if (rv != APR_SUCCESS) {
++ return 0;
+ }
++
++ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
++ "Websocket: Frame contained %" APR_SIZE_T_FMT \
++ " bytes, pushed to Lua stack", remaining);
+ if (mask) {
+ for (n = 0; n < plen; n++) {
+ buffer[n] ^= mask_bytes[n%4];
+@@ -2420,14 +2371,25 @@ static int lua_websocket_read(lua_State *L)
+ return 2;
+ }
+
+-
+ /* Decide if we need to react to the opcode or not */
+ if (opcode == 0x09) { /* ping */
+ char frame[2];
+- plen = 2;
++ apr_bucket *b;
++
+ frame[0] = 0x8A;
+ frame[1] = 0;
+- apr_socket_send(sock, frame, &plen); /* Pong! */
++
++ /* Pong! */
++ b = apr_bucket_transient_create(frame, 2, c->bucket_alloc);
++ APR_BRIGADE_INSERT_TAIL(brigade, b);
++
++ rv = ap_pass_brigade(c->output_filters, brigade);
++ apr_brigade_cleanup(brigade);
++
++ if (rv != APR_SUCCESS) {
++ return 0;
++ }
++
+ do_read = 1;
+ }
+ }
+--
+2.30.2
+
diff --git a/debian/patches/CVE-2022-31813.patch b/debian/patches/CVE-2022-31813.patch
new file mode 100644
index 0000000..d2bd341
--- /dev/null
+++ b/debian/patches/CVE-2022-31813.patch
@@ -0,0 +1,242 @@
+From 956f708b094698ac9ad570d640d4f30eb0df7305 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <icing@apache.org>
+Date: Wed, 1 Jun 2022 07:51:04 +0000
+Subject: [PATCH] Merge r1901461 from trunk via #320:
+
+ *) mod_proxy: ap_proxy_create_hdrbrgd() to clear hop-by-hop first and fixup last.
+
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901480 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/956f708b094698ac9ad570d640d4f30eb0df7305
+---
+ modules/proxy/proxy_util.c | 153 ++++++++++++++++++++++-----------------------
+ 1 file changed, 77 insertions(+), 76 deletions(-)
+
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -3396,12 +3396,14 @@
+ char **old_cl_val,
+ char **old_te_val)
+ {
++ int rc = OK;
+ conn_rec *c = r->connection;
+ int counter;
+ char *buf;
++ apr_table_t *saved_headers_in = r->headers_in;
++ const char *saved_host = apr_table_get(saved_headers_in, "Host");
+ const apr_array_header_t *headers_in_array;
+ const apr_table_entry_t *headers_in;
+- apr_table_t *saved_headers_in;
+ apr_bucket *e;
+ int do_100_continue;
+ conn_rec *origin = p_conn->connection;
+@@ -3437,6 +3439,52 @@
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
++
++ /*
++ * Make a copy on r->headers_in for the request we make to the backend,
++ * modify the copy in place according to our configuration and connection
++ * handling, use it to fill in the forwarded headers' brigade, and finally
++ * restore the saved/original ones in r->headers_in.
++ *
++ * Note: We need to take r->pool for apr_table_copy as the key / value
++ * pairs in r->headers_in have been created out of r->pool and
++ * p might be (and actually is) a longer living pool.
++ * This would trigger the bad pool ancestry abort in apr_table_copy if
++ * apr is compiled with APR_POOL_DEBUG.
++ *
++ * icing: if p indeed lives longer than r->pool, we should allocate
++ * all new header values from r->pool as well and avoid leakage.
++ */
++ r->headers_in = apr_table_copy(r->pool, saved_headers_in);
++
++ /* Return the original Transfer-Encoding and/or Content-Length values
++ * then drop the headers, they must be set by the proxy handler based
++ * on the actual body being forwarded.
++ */
++ if ((*old_te_val = (char *)apr_table_get(r->headers_in,
++ "Transfer-Encoding"))) {
++ apr_table_unset(r->headers_in, "Transfer-Encoding");
++ }
++ if ((*old_cl_val = (char *)apr_table_get(r->headers_in,
++ "Content-Length"))) {
++ apr_table_unset(r->headers_in, "Content-Length");
++ }
++
++ /* Clear out hop-by-hop request headers not to forward */
++ if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
++ rc = HTTP_BAD_REQUEST;
++ goto cleanup;
++ }
++
++ /* RFC2616 13.5.1 says we should strip these */
++ apr_table_unset(r->headers_in, "Keep-Alive");
++ apr_table_unset(r->headers_in, "Upgrade");
++ apr_table_unset(r->headers_in, "Trailer");
++ apr_table_unset(r->headers_in, "TE");
++
++ /* We used to send `Host: ` always first, so let's keep it that
++ * way. No telling which legacy backend is relying no this.
++ */
+ if (dconf->preserve_host == 0) {
+ if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
+ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
+@@ -3458,7 +3506,7 @@
+ /* don't want to use r->hostname, as the incoming header might have a
+ * port attached
+ */
+- const char* hostname = apr_table_get(r->headers_in,"Host");
++ const char* hostname = saved_host;
+ if (!hostname) {
+ hostname = r->server->server_hostname;
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
+@@ -3472,21 +3520,7 @@
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+-
+- /*
+- * Save the original headers in here and restore them when leaving, since
+- * we will apply proxy purpose only modifications (eg. clearing hop-by-hop
+- * headers, add Via or X-Forwarded-* or Expect...), whereas the originals
+- * will be needed later to prepare the correct response and logging.
+- *
+- * Note: We need to take r->pool for apr_table_copy as the key / value
+- * pairs in r->headers_in have been created out of r->pool and
+- * p might be (and actually is) a longer living pool.
+- * This would trigger the bad pool ancestry abort in apr_table_copy if
+- * apr is compiled with APR_POOL_DEBUG.
+- */
+- saved_headers_in = r->headers_in;
+- r->headers_in = apr_table_copy(r->pool, saved_headers_in);
++ apr_table_unset(r->headers_in, "Host");
+
+ /* handle Via */
+ if (conf->viaopt == via_block) {
+@@ -3561,8 +3595,6 @@
+ */
+ if (dconf->add_forwarded_headers) {
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+- const char *buf;
+-
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+@@ -3572,8 +3604,9 @@
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+- if ((buf = apr_table_get(r->headers_in, "Host"))) {
+- apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
++ if (saved_host) {
++ apr_table_mergen(r->headers_in, "X-Forwarded-Host",
++ saved_host);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+@@ -3585,67 +3618,37 @@
+ }
+ }
+
+- proxy_run_fixups(r);
+- if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
+- return HTTP_BAD_REQUEST;
++ /* Do we want to strip Proxy-Authorization ?
++ * If we haven't used it, then NO
++ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
++ * So let's make it configurable by env.
++ */
++ if (r->user != NULL /* we've authenticated */
++ && !apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
++ apr_table_unset(r->headers_in, "Proxy-Authorization");
+ }
+
++ /* for sub-requests, ignore freshness/expiry headers */
++ if (r->main) {
++ apr_table_unset(r->headers_in, "If-Match");
++ apr_table_unset(r->headers_in, "If-Modified-Since");
++ apr_table_unset(r->headers_in, "If-Range");
++ apr_table_unset(r->headers_in, "If-Unmodified-Since");
++ apr_table_unset(r->headers_in, "If-None-Match");
++ }
++
++ /* run hook to fixup the request we are about to send */
++ proxy_run_fixups(r);
++
+ /* send request headers */
+ headers_in_array = apr_table_elts(r->headers_in);
+ headers_in = (const apr_table_entry_t *) headers_in_array->elts;
+ for (counter = 0; counter < headers_in_array->nelts; counter++) {
+ if (headers_in[counter].key == NULL
+- || headers_in[counter].val == NULL
+-
+- /* Already sent */
+- || !strcasecmp(headers_in[counter].key, "Host")
+-
+- /* Clear out hop-by-hop request headers not to send
+- * RFC2616 13.5.1 says we should strip these headers
+- */
+- || !strcasecmp(headers_in[counter].key, "Keep-Alive")
+- || !strcasecmp(headers_in[counter].key, "TE")
+- || !strcasecmp(headers_in[counter].key, "Trailer")
+- || !strcasecmp(headers_in[counter].key, "Upgrade")
+-
+- ) {
+- continue;
+- }
+- /* Do we want to strip Proxy-Authorization ?
+- * If we haven't used it, then NO
+- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+- * So let's make it configurable by env.
+- */
+- if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) {
+- if (r->user != NULL) { /* we've authenticated */
+- if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
+- continue;
+- }
+- }
+- }
+-
+- /* Skip Transfer-Encoding and Content-Length for now.
+- */
+- if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
+- *old_te_val = headers_in[counter].val;
+- continue;
+- }
+- if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
+- *old_cl_val = headers_in[counter].val;
++ || headers_in[counter].val == NULL) {
+ continue;
+ }
+
+- /* for sub-requests, ignore freshness/expiry headers */
+- if (r->main) {
+- if ( !strcasecmp(headers_in[counter].key, "If-Match")
+- || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
+- || !strcasecmp(headers_in[counter].key, "If-Range")
+- || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
+- || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
+- continue;
+- }
+- }
+-
+ buf = apr_pstrcat(p, headers_in[counter].key, ": ",
+ headers_in[counter].val, CRLF,
+ NULL);
+@@ -3654,11 +3657,9 @@
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
+
+- /* Restore the original headers in (see comment above),
+- * we won't modify them anymore.
+- */
++cleanup:
+ r->headers_in = saved_headers_in;
+- return OK;
++ return rc;
+ }
+
+ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
diff --git a/debian/patches/build_suexec-custom.patch b/debian/patches/build_suexec-custom.patch
new file mode 100644
index 0000000..e03d54b
--- /dev/null
+++ b/debian/patches/build_suexec-custom.patch
@@ -0,0 +1,69 @@
+Description: add suexec-custom to the build system
+Forwarded: not-needed
+Author: Stefan Fritsch <sf@debian.org>
+Last-Update: 2012-02-25
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -272,23 +272,26 @@
+ install-suexec: install-suexec-$(INSTALL_SUEXEC)
+
+ install-suexec-binary:
+- @if test -f $(builddir)/support/suexec; then \
+- test -d $(DESTDIR)$(sbindir) || $(MKINSTALLDIRS) $(DESTDIR)$(sbindir); \
+- $(INSTALL_PROGRAM) $(top_builddir)/support/suexec $(DESTDIR)$(sbindir); \
++ @if test -f $(builddir)/support/suexec-pristine && test -f $(builddir)/support/suexec-custom; then \
++ test -d $(DESTDIR)$(sbindir) || $(MKINSTALLDIRS) $(DESTDIR)$(sbindir); \
++ $(INSTALL_PROGRAM) $(top_builddir)/support/suexec-pristine $(DESTDIR)$(sbindir); \
++ $(INSTALL_PROGRAM) $(top_builddir)/support/suexec-custom $(DESTDIR)$(sbindir); \
+ fi
+
+ install-suexec-setuid: install-suexec-binary
+- @if test -f $(builddir)/support/suexec; then \
+- chmod 4755 $(DESTDIR)$(sbindir)/suexec; \
++ @if test -f $(builddir)/support/suexec-pristine && test -f $(builddir)/support/suexec-custom; then \
++ chmod 4755 $(DESTDIR)$(sbindir)/suexec-pristine; \
++ chmod 4755 $(DESTDIR)$(sbindir)/suexec-custom; \
+ fi
+
+ install-suexec-caps: install-suexec-binary
+- @if test -f $(builddir)/support/suexec; then \
+- setcap 'cap_setuid,cap_setgid+pe' $(DESTDIR)$(sbindir)/suexec; \
++ @if test -f $(builddir)/support/suexec-pristine && test -f $(builddir)/support/suexec-custom; then \
++ setcap 'cap_setuid,cap_setgid+pe' $(DESTDIR)$(sbindir)/suexec-pristine; \
++ setcap 'cap_setuid,cap_setgid+pe' $(DESTDIR)$(sbindir)/suexec-custom; \
+ fi
+
+ suexec:
+- cd support && $(MAKE) suexec
++ cd support && $(MAKE) suexec-pristine suexec-custom
+
+ x-local-distclean:
+ @rm -rf autom4te.cache
+--- a/support/Makefile.in
++++ b/support/Makefile.in
+@@ -1,7 +1,7 @@
+ DISTCLEAN_TARGETS = apxs apachectl dbmmanage log_server_status \
+ logresolve.pl phf_abuse_log.cgi split-logfile envvars-std
+
+-CLEAN_TARGETS = suexec
++CLEAN_TARGETS = suexec-pristine suexec-custom
+
+ bin_PROGRAMS = htpasswd htdigest htdbm ab logresolve httxt2dbm
+ sbin_PROGRAMS = htcacheclean rotatelogs $(NONPORTABLE_SUPPORT)
+@@ -72,9 +72,13 @@
+ checkgid: $(checkgid_OBJECTS)
+ $(LINK) $(checkgid_LTFLAGS) $(checkgid_OBJECTS) $(PROGRAM_LDADD)
+
+-suexec_OBJECTS = suexec.lo
+-suexec: $(suexec_OBJECTS)
+- $(LINK) $(suexec_OBJECTS)
++suexec-pristine_OBJECTS = suexec.lo
++suexec-pristine: $(suexec-pristine_OBJECTS)
++ $(LINK) $(suexec-pristine_OBJECTS)
++
++suexec-custom_OBJECTS = suexec-custom.lo
++suexec-custom: $(suexec-custom_OBJECTS)
++ $(LINK) $(suexec-custom_OBJECTS)
+
+ htcacheclean_OBJECTS = htcacheclean.lo
+ htcacheclean: $(htcacheclean_OBJECTS)
diff --git a/debian/patches/customize_apxs.patch b/debian/patches/customize_apxs.patch
new file mode 100644
index 0000000..9c75ff1
--- /dev/null
+++ b/debian/patches/customize_apxs.patch
@@ -0,0 +1,220 @@
+Description: Adapt apxs to Debian specific changes
+ - Make apxs2 use a2enmod and /etc/apache2/mods-available
+ - Make libtool happier
+ - Use LDFLAGS from config_vars.mk, allows one to override them
+Forwarded: not-needed
+Author: Stefan Fritsch <sf@debian.org>
+Last-Update: 2012-03-17
+
+--- a/support/apxs.in
++++ b/support/apxs.in
+@@ -38,7 +38,7 @@
+ my $CFG_TARGET = get_vars("progname");
+ my $CFG_SYSCONFDIR = get_vars("sysconfdir");
+ my $CFG_CFLAGS = join ' ', map { get_vars($_) }
+- qw(SHLTCFLAGS CFLAGS NOTEST_CPPFLAGS EXTRA_CPPFLAGS EXTRA_CFLAGS);
++ qw(SHLTCFLAGS CFLAGS CPPFLAGS NOTEST_CPPFLAGS EXTRA_CPPFLAGS EXTRA_CFLAGS);
+ my $CFG_LDFLAGS = join ' ', map { get_vars($_) }
+ qw(LDFLAGS NOTEST_LDFLAGS SH_LDFLAGS);
+ my $includedir = get_vars("includedir");
+@@ -49,7 +49,7 @@
+ my $sbindir = get_vars("sbindir");
+ my $CFG_SBINDIR = eval qq("$sbindir");
+ my $ltflags = $ENV{'LTFLAGS'};
+-$ltflags or $ltflags = "--silent";
++$ltflags or $ltflags = "";
+
+ my %internal_vars = map {$_ => 1}
+ qw(TARGET CC CFLAGS CFLAGS_SHLIB LD_SHLIB LDFLAGS_SHLIB LIBS_SHLIB
+@@ -276,6 +276,7 @@
+ $data =~ s|%TARGET%|$CFG_TARGET|sg;
+ $data =~ s|%PREFIX%|$prefix|sg;
+ $data =~ s|%INSTALLBUILDDIR%|$installbuilddir|sg;
++ $data =~ s|%DATADIR%|$datadir|sg;
+
+ my ($mkf, $mods, $src) = ($data =~ m|^(.+)-=#=-\n(.+)-=#=-\n(.+)|s);
+
+@@ -428,7 +429,7 @@
+ $la =~ s|\.c$|.la|;
+ my $o = $s;
+ $o =~ s|\.c$|.o|;
+- push(@cmds, "$libtool $ltflags --mode=compile $CFG_CC $cflags -I$CFG_INCLUDEDIR $apr_includedir $apu_includedir $opt -c -o $lo $s && touch $slo");
++ push(@cmds, "$libtool $ltflags --mode=compile --tag=disable-static $CFG_CC $cflags -I$CFG_INCLUDEDIR $apr_includedir $apu_includedir $opt -c -o $lo $s && touch $slo");
+ unshift(@objs, $lo);
+ }
+
+@@ -469,7 +470,7 @@
+ $opt .= " -rpath $CFG_LIBEXECDIR -module -avoid-version $apr_ldflags";
+ }
+
+- push(@cmds, "$libtool $ltflags --mode=link $CFG_CC $ldflags -o $dso_file $opt $lo");
++ push(@cmds, "$libtool $ltflags --mode=link --tag=disable-static $CFG_CC $ldflags -o $dso_file $opt $lo");
+
+ # execute the commands
+ &execute_cmds(@cmds);
+@@ -503,7 +504,7 @@
+ if ($opt_i) {
+ push(@cmds, "$installbuilddir/instdso.sh SH_LIBTOOL='" .
+ "$libtool' $f $CFG_LIBEXECDIR");
+- push(@cmds, "chmod 755 $CFG_LIBEXECDIR/$t");
++ push(@cmds, "chmod 644 $CFG_LIBEXECDIR/$t");
+ }
+
+ # determine module symbolname and filename
+@@ -539,10 +540,11 @@
+ $filename = "mod_${name}.c";
+ }
+ my $dir = $CFG_LIBEXECDIR;
+- $dir =~ s|^$CFG_PREFIX/?||;
++ # Debian doesn't have a CFG_PREFIX, so this stuffs up:
++ # $dir =~ s|^$CFG_PREFIX/?||;
+ $dir =~ s|(.)$|$1/|;
+ $t =~ s|\.la$|.so|;
+- push(@lmd, sprintf("LoadModule %-18s %s", "${name}_module", "$dir$t"));
++ push(@lmd, [ $name, sprintf("LoadModule %-18s %s", "${name}_module", "$dir$t") ] );
+ }
+
+ # execute the commands
+@@ -550,108 +552,35 @@
+
+ # activate module via LoadModule/AddModule directive
+ if ($opt_a or $opt_A) {
+- if (not -f "$CFG_SYSCONFDIR/$CFG_TARGET.conf") {
+- error("Config file $CFG_SYSCONFDIR/$CFG_TARGET.conf not found");
++ if (not -d "$CFG_SYSCONFDIR/mods-available") {
++ error("Config file $CFG_SYSCONFDIR/mods-available not found");
+ exit(1);
+ }
+
+- open(FP, "<$CFG_SYSCONFDIR/$CFG_TARGET.conf") || die;
+- my $content = join('', <FP>);
+- close(FP);
+-
+- if ($content !~ m|\n#?\s*LoadModule\s+|) {
+- error("Activation failed for custom $CFG_SYSCONFDIR/$CFG_TARGET.conf file.");
+- error("At least one `LoadModule' directive already has to exist.");
+- exit(1);
+- }
+-
+- my $lmd;
+- my $c = '';
+- $c = '#' if ($opt_A);
+- foreach $lmd (@lmd) {
+- my $what = $opt_A ? "preparing" : "activating";
+- my $lmd_re = $lmd;
+- $lmd_re =~ s/\s+/\\s+/g;
+-
+- if ($content !~ m|\n#?\s*$lmd_re|) {
+- # check for open <containers>, so that the new LoadModule
+- # directive always appears *outside* of an <container>.
+-
+- my $before = ($content =~ m|^(.*\n)#?\s*LoadModule\s+[^\n]+\n|s)[0];
+-
+- # the '()=' trick forces list context and the scalar
+- # assignment counts the number of list members (aka number
+- # of matches) then
+- my $cntopen = () = ($before =~ m|^\s*<[^/].*$|mg);
+- my $cntclose = () = ($before =~ m|^\s*</.*$|mg);
+-
+- if ($cntopen == $cntclose) {
+- # fine. Last LoadModule is contextless.
+- $content =~ s|^(.*\n#?\s*LoadModule\s+[^\n]+\n)|$1$c$lmd\n|s;
++ my $entry;
++ foreach $entry (@lmd) {
++ my ($name, $lmd) = @{$entry};
++ my $filename = "$CFG_SYSCONFDIR/mods-available/$name.load";
++ if (-f $filename) {
++ my $cmd = "mv $filename $filename.bak~";
++ if (system($cmd) != 0) {
++ die "'$cmd' failed\n";
+ }
+- elsif ($cntopen < $cntclose) {
+- error('Configuration file is not valid. There are sections'
+- . ' closed before opened.');
+- exit(1);
+- }
+- else {
+- # put our cmd after the section containing the last
+- # LoadModule.
+- my $found =
+- $content =~ s!\A ( # string and capture start
+- (?:(?:
+- ^\s* # start of conf line with a
+- (?:[^<]|<[^/]) # directive which does not
+- # start with '</'
+-
+- .*(?:$)\n # rest of the line.
+- # the '$' is in parentheses
+- # to avoid misinterpreting
+- # the string "$\" as
+- # perl variable.
+-
+- )* # catch as much as possible
+- # of such lines. (including
+- # zero)
+-
+- ^\s*</.*(?:$)\n? # after the above, we
+- # expect a config line with
+- # a closing container (</)
+-
+- ) {$cntopen} # the whole pattern (bunch
+- # of lines that end up with
+- # a closing directive) must
+- # be repeated $cntopen
+- # times. That's it.
+- # Simple, eh? ;-)
+-
+- ) # capture end
+- !$1$c$lmd\n!mx;
+-
+- unless ($found) {
+- error('Configuration file is not valid. There are '
+- . 'sections opened and not closed.');
+- exit(1);
+- }
++ }
++
++ notice("[preparing module `$name' in $filename]");
++ open(FP, ">$filename") || die;
++ print FP "$lmd\n";
++ close(FP);
++
++ if ($opt_a) {
++ my $cmd = "a2enmod $name";
++ if (system($cmd) != 0) {
++ die "'$cmd' failed\n";
+ }
+- } else {
+- # replace already existing LoadModule line
+- $content =~ s|^(.*\n)#?\s*$lmd_re[^\n]*\n|$1$c$lmd\n|s;
+- }
+- $lmd =~ m|LoadModule\s+(.+?)_module.*|;
+- notice("[$what module `$1' in $CFG_SYSCONFDIR/$CFG_TARGET.conf]");
+- }
+- if (@lmd) {
+- if (open(FP, ">$CFG_SYSCONFDIR/$CFG_TARGET.conf.new")) {
+- print FP $content;
+- close(FP);
+- system("cp $CFG_SYSCONFDIR/$CFG_TARGET.conf $CFG_SYSCONFDIR/$CFG_TARGET.conf.bak && " .
+- "cp $CFG_SYSCONFDIR/$CFG_TARGET.conf.new $CFG_SYSCONFDIR/$CFG_TARGET.conf && " .
+- "rm $CFG_SYSCONFDIR/$CFG_TARGET.conf.new");
+- } else {
+- notice("unable to open configuration file");
+ }
+- }
++
++ }
+ }
+ }
+
+@@ -671,8 +600,8 @@
+ ##
+
+ builddir=.
+-top_srcdir=%PREFIX%
+-top_builddir=%PREFIX%
++top_srcdir=%DATADIR%
++top_builddir=%DATADIR%
+ include %INSTALLBUILDDIR%/special.mk
+
+ # the used tools
diff --git a/debian/patches/fhs_compliance.patch b/debian/patches/fhs_compliance.patch
new file mode 100644
index 0000000..00f8f71
--- /dev/null
+++ b/debian/patches/fhs_compliance.patch
@@ -0,0 +1,64 @@
+Description: Fix up FHS file locations for apache2 droppings.
+Forwarded: not-needed
+Author: Adam Conrad <adconrad@0c3.net>
+Last-Update: 2012-02-25
+--- a/configure
++++ b/configure
+@@ -39688,17 +39688,17 @@
+
+
+ cat >>confdefs.h <<_ACEOF
+-#define HTTPD_ROOT "${ap_prefix}"
++#define HTTPD_ROOT "/etc/apache2"
+ _ACEOF
+
+
+ cat >>confdefs.h <<_ACEOF
+-#define SERVER_CONFIG_FILE "${rel_sysconfdir}/${progname}.conf"
++#define SERVER_CONFIG_FILE "${progname}.conf"
+ _ACEOF
+
+
+ cat >>confdefs.h <<_ACEOF
+-#define AP_TYPES_CONFIG_FILE "${rel_sysconfdir}/mime.types"
++#define AP_TYPES_CONFIG_FILE "mime.types"
+ _ACEOF
+
+
+--- a/configure.in
++++ b/configure.in
+@@ -871,11 +871,11 @@
+ echo $MODLIST | $AWK -f $srcdir/build/build-modules-c.awk > modules.c
+
+ APR_EXPAND_VAR(ap_prefix, $prefix)
+-AC_DEFINE_UNQUOTED(HTTPD_ROOT, "${ap_prefix}",
++AC_DEFINE_UNQUOTED(HTTPD_ROOT, "/etc/apache2",
+ [Root directory of the Apache install area])
+-AC_DEFINE_UNQUOTED(SERVER_CONFIG_FILE, "${rel_sysconfdir}/${progname}.conf",
++AC_DEFINE_UNQUOTED(SERVER_CONFIG_FILE, "${progname}.conf",
+ [Location of the config file, relative to the Apache root directory])
+-AC_DEFINE_UNQUOTED(AP_TYPES_CONFIG_FILE, "${rel_sysconfdir}/mime.types",
++AC_DEFINE_UNQUOTED(AP_TYPES_CONFIG_FILE, "mime.types",
+ [Location of the MIME types config file, relative to the Apache root directory])
+
+ perlbin=`$ac_aux_dir/PrintPath perl`
+--- a/include/ap_config_layout.h.in
++++ b/include/ap_config_layout.h.in
+@@ -60,5 +60,6 @@
+ #define DEFAULT_REL_LOGFILEDIR "@rel_logfiledir@"
+ #define DEFAULT_EXP_PROXYCACHEDIR "@exp_proxycachedir@"
+ #define DEFAULT_REL_PROXYCACHEDIR "@rel_proxycachedir@"
++#define DEFAULT_PIDLOG "/var/run/apache2.pid"
+
+ #endif /* AP_CONFIG_LAYOUT_H */
+--- a/include/httpd.h
++++ b/include/httpd.h
+@@ -109,7 +109,7 @@
+ #define DOCUMENT_LOCATION HTTPD_ROOT "/docs"
+ #else
+ /* Set default for non OS/2 file system */
+-#define DOCUMENT_LOCATION HTTPD_ROOT "/htdocs"
++#define DOCUMENT_LOCATION "/var/www/html"
+ #endif
+ #endif /* DOCUMENT_LOCATION */
+
diff --git a/debian/patches/import-http2-module-from-2.4.46.patch b/debian/patches/import-http2-module-from-2.4.46.patch
new file mode 100644
index 0000000..cdca37d
--- /dev/null
+++ b/debian/patches/import-http2-module-from-2.4.46.patch
@@ -0,0 +1,7588 @@
+Description: import http2 module from 2.4.41
+ There are too many changes in http2 module to distiguish CVE-2019-9517,
+ CVE-2019-10082 and CVE-2019-10081 changes.
+Author: Apache authors
+Bug: https://security-tracker.debian.org/tracker/CVE-2019-9517
+ https://security-tracker.debian.org/tracker/CVE-2019-10082
+ https://security-tracker.debian.org/tracker/CVE-2019-10081
+ https://security-tracker.debian.org/tracker/CVE-2020-9490
+ https://security-tracker.debian.org/tracker/CVE-2020-11993
+Forwarded: not-needed
+Reviewed-By: Xavier Guimard <yadd@debian.org>
+Last-Update: 2020-08-25
+
+--- a/modules/http2/config2.m4
++++ b/modules/http2/config2.m4
+@@ -31,7 +31,6 @@
+ h2_h2.lo dnl
+ h2_headers.lo dnl
+ h2_mplx.lo dnl
+-h2_ngn_shed.lo dnl
+ h2_push.lo dnl
+ h2_request.lo dnl
+ h2_session.lo dnl
+--- a/modules/http2/h2.h
++++ b/modules/http2/h2.h
+@@ -48,12 +48,12 @@
+ #define H2_HEADER_PATH_LEN 5
+ #define H2_CRLF "\r\n"
+
+-/* Max data size to write so it fits inside a TLS record */
+-#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9)
+-
+ /* Size of the frame header itself in HTTP/2 */
+ #define H2_FRAME_HDR_LEN 9
+
++/* Max data size to write so it fits inside a TLS record */
++#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - H2_FRAME_HDR_LEN)
++
+ /* Maximum number of padding bytes in a frame, rfc7540 */
+ #define H2_MAX_PADLEN 256
+ /* Initial default window size, RFC 7540 ch. 6.5.2 */
+@@ -138,7 +138,7 @@
+ apr_table_t *headers;
+
+ apr_time_t request_time;
+- unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
++ unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
+ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
+ };
+@@ -162,5 +162,6 @@
+ #define H2_FILTER_DEBUG_NOTE "http2-debug"
+ #define H2_HDR_CONFORMANCE "http2-hdr-conformance"
+ #define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
++#define H2_PUSH_MODE_NOTE "http2-push-mode"
+
+ #endif /* defined(__mod_h2__h2__) */
+--- a/modules/http2/h2_alt_svc.c
++++ b/modules/http2/h2_alt_svc.c
+@@ -75,7 +75,7 @@
+
+ static int h2_alt_svc_handler(request_rec *r)
+ {
+- const h2_config *cfg;
++ apr_array_header_t *alt_svcs;
+ int i;
+
+ if (r->connection->keepalives > 0) {
+@@ -87,8 +87,8 @@
+ return DECLINED;
+ }
+
+- cfg = h2_config_sget(r->server);
+- if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) {
++ alt_svcs = h2_config_alt_svcs(r);
++ if (r->hostname && alt_svcs && alt_svcs->nelts > 0) {
+ const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
+ if (!alt_svc_used) {
+ /* We have alt-svcs defined and client is not already using
+@@ -99,7 +99,7 @@
+ const char *alt_svc = "";
+ const char *svc_ma = "";
+ int secure = h2_h2_is_tls(r->connection);
+- int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE);
++ int ma = h2_config_rgeti(r, H2_CONF_ALT_SVC_MAX_AGE);
+ if (ma >= 0) {
+ svc_ma = apr_psprintf(r->pool, "; ma=%d", ma);
+ }
+@@ -107,8 +107,8 @@
+ "h2_alt_svc: announce %s for %s:%d",
+ (secure? "secure" : "insecure"),
+ r->hostname, (int)r->server->port);
+- for (i = 0; i < cfg->alt_svcs->nelts; ++i) {
+- h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i);
++ for (i = 0; i < alt_svcs->nelts; ++i) {
++ h2_alt_svc *as = h2_alt_svc_IDX(alt_svcs, i);
+ const char *ahost = as->host;
+ if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) {
+ ahost = NULL;
+--- a/modules/http2/h2_bucket_beam.c
++++ b/modules/http2/h2_bucket_beam.c
+@@ -196,7 +196,7 @@
+ * bucket beam that can transport buckets across threads
+ ******************************************************************************/
+
+-static void mutex_leave(void *ctx, apr_thread_mutex_t *lock)
++static void mutex_leave(apr_thread_mutex_t *lock)
+ {
+ apr_thread_mutex_unlock(lock);
+ }
+@@ -217,7 +217,7 @@
+ static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+ {
+ if (pbl->leave) {
+- pbl->leave(pbl->leave_ctx, pbl->mutex);
++ pbl->leave(pbl->mutex);
+ }
+ }
+
+--- a/modules/http2/h2_bucket_beam.h
++++ b/modules/http2/h2_bucket_beam.h
+@@ -126,12 +126,11 @@
+ * buffers until the transmission is complete. Star gates use a similar trick.
+ */
+
+-typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock);
++typedef void h2_beam_mutex_leave(struct apr_thread_mutex_t *lock);
+
+ typedef struct {
+ apr_thread_mutex_t *mutex;
+ h2_beam_mutex_leave *leave;
+- void *leave_ctx;
+ } h2_beam_lock;
+
+ typedef struct h2_bucket_beam h2_bucket_beam;
+--- a/modules/http2/h2_config.c
++++ b/modules/http2/h2_config.c
+@@ -42,6 +42,55 @@
+ #define H2_CONFIG_GET(a, b, n) \
+ (((a)->n == DEF_VAL)? (b) : (a))->n
+
++#define H2_CONFIG_SET(a, n, v) \
++ ((a)->n = v)
++
++#define CONFIG_CMD_SET(cmd,dir,var,val) \
++ h2_config_seti(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
++
++#define CONFIG_CMD_SET64(cmd,dir,var,val) \
++ h2_config_seti64(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
++
++/* Apache httpd module configuration for h2. */
++typedef struct h2_config {
++ const char *name;
++ int h2_max_streams; /* max concurrent # streams (http2) */
++ int h2_window_size; /* stream window size (http2) */
++ int min_workers; /* min # of worker threads/child */
++ int max_workers; /* max # of worker threads/child */
++ int max_worker_idle_secs; /* max # of idle seconds for worker */
++ int stream_max_mem_size; /* max # bytes held in memory/stream */
++ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
++ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
++ int serialize_headers; /* Use serialized HTTP/1.1 headers for
++ processing, better compatibility */
++ int h2_direct; /* if mod_h2 is active directly */
++ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
++ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
++ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
++ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
++ int h2_push; /* if HTTP/2 server push is enabled */
++ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
++
++ int push_diary_size; /* # of entries in push diary */
++ int copy_files; /* if files shall be copied vs setaside on output */
++ apr_array_header_t *push_list;/* list of h2_push_res configurations */
++ int early_hints; /* support status code 103 */
++ int padding_bits;
++ int padding_always;
++} h2_config;
++
++typedef struct h2_dir_config {
++ const char *name;
++ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
++ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
++ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
++ int h2_push; /* if HTTP/2 server push is enabled */
++ apr_array_header_t *push_list;/* list of h2_push_res configurations */
++ int early_hints; /* support status code 103 */
++} h2_dir_config;
++
++
+ static h2_config defconf = {
+ "default",
+ 100, /* max_streams */
+@@ -64,6 +113,18 @@
+ 0, /* copy files across threads */
+ NULL, /* push list */
+ 0, /* early hints, http status 103 */
++ 0, /* padding bits */
++ 1, /* padding always */
++};
++
++static h2_dir_config defdconf = {
++ "default",
++ NULL, /* no alt-svcs */
++ -1, /* alt-svc max age */
++ -1, /* HTTP/1 Upgrade support */
++ -1, /* HTTP/2 server push enabled */
++ NULL, /* push list */
++ -1, /* early hints, http status 103 */
+ };
+
+ void h2_config_init(apr_pool_t *pool)
+@@ -71,12 +132,10 @@
+ (void)pool;
+ }
+
+-static void *h2_config_create(apr_pool_t *pool,
+- const char *prefix, const char *x)
++void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
+ {
+ h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+- const char *s = x? x : "unknown";
+- char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
++ char *name = apr_pstrcat(pool, "srv[", s->defn_name, "]", NULL);
+
+ conf->name = name;
+ conf->h2_max_streams = DEF_VAL;
+@@ -98,19 +157,11 @@
+ conf->copy_files = DEF_VAL;
+ conf->push_list = NULL;
+ conf->early_hints = DEF_VAL;
++ conf->padding_bits = DEF_VAL;
++ conf->padding_always = DEF_VAL;
+ return conf;
+ }
+
+-void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
+-{
+- return h2_config_create(pool, "srv", s->defn_name);
+-}
+-
+-void *h2_config_create_dir(apr_pool_t *pool, char *x)
+-{
+- return h2_config_create(pool, "dir", x);
+-}
+-
+ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
+ {
+ h2_config *base = (h2_config *)basev;
+@@ -149,25 +200,52 @@
+ n->push_list = add->push_list? add->push_list : base->push_list;
+ }
+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
++ n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
++ n->padding_always = H2_CONFIG_GET(add, base, padding_always);
+ return n;
+ }
+
+-void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
++void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
+ {
+ return h2_config_merge(pool, basev, addv);
+ }
+
+-void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
++void *h2_config_create_dir(apr_pool_t *pool, char *x)
+ {
+- return h2_config_merge(pool, basev, addv);
++ h2_dir_config *conf = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
++ const char *s = x? x : "unknown";
++ char *name = apr_pstrcat(pool, "dir[", s, "]", NULL);
++
++ conf->name = name;
++ conf->alt_svc_max_age = DEF_VAL;
++ conf->h2_upgrade = DEF_VAL;
++ conf->h2_push = DEF_VAL;
++ conf->early_hints = DEF_VAL;
++ return conf;
+ }
+
+-int h2_config_geti(const h2_config *conf, h2_config_var_t var)
++void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
+ {
+- return (int)h2_config_geti64(conf, var);
++ h2_dir_config *base = (h2_dir_config *)basev;
++ h2_dir_config *add = (h2_dir_config *)addv;
++ h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
++
++ n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
++ n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
++ n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
++ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
++ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
++ if (add->push_list && base->push_list) {
++ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
++ }
++ else {
++ n->push_list = add->push_list? add->push_list : base->push_list;
++ }
++ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
++ return n;
+ }
+
+-apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
++static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t var)
+ {
+ switch(var) {
+ case H2_CONF_MAX_STREAMS:
+@@ -204,12 +282,93 @@
+ return H2_CONFIG_GET(conf, &defconf, copy_files);
+ case H2_CONF_EARLY_HINTS:
+ return H2_CONFIG_GET(conf, &defconf, early_hints);
++ case H2_CONF_PADDING_BITS:
++ return H2_CONFIG_GET(conf, &defconf, padding_bits);
++ case H2_CONF_PADDING_ALWAYS:
++ return H2_CONFIG_GET(conf, &defconf, padding_always);
+ default:
+ return DEF_VAL;
+ }
+ }
+
+-const h2_config *h2_config_sget(server_rec *s)
++static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
++{
++ switch(var) {
++ case H2_CONF_MAX_STREAMS:
++ H2_CONFIG_SET(conf, h2_max_streams, val);
++ break;
++ case H2_CONF_WIN_SIZE:
++ H2_CONFIG_SET(conf, h2_window_size, val);
++ break;
++ case H2_CONF_MIN_WORKERS:
++ H2_CONFIG_SET(conf, min_workers, val);
++ break;
++ case H2_CONF_MAX_WORKERS:
++ H2_CONFIG_SET(conf, max_workers, val);
++ break;
++ case H2_CONF_MAX_WORKER_IDLE_SECS:
++ H2_CONFIG_SET(conf, max_worker_idle_secs, val);
++ break;
++ case H2_CONF_STREAM_MAX_MEM:
++ H2_CONFIG_SET(conf, stream_max_mem_size, val);
++ break;
++ case H2_CONF_ALT_SVC_MAX_AGE:
++ H2_CONFIG_SET(conf, alt_svc_max_age, val);
++ break;
++ case H2_CONF_SER_HEADERS:
++ H2_CONFIG_SET(conf, serialize_headers, val);
++ break;
++ case H2_CONF_MODERN_TLS_ONLY:
++ H2_CONFIG_SET(conf, modern_tls_only, val);
++ break;
++ case H2_CONF_UPGRADE:
++ H2_CONFIG_SET(conf, h2_upgrade, val);
++ break;
++ case H2_CONF_DIRECT:
++ H2_CONFIG_SET(conf, h2_direct, val);
++ break;
++ case H2_CONF_TLS_WARMUP_SIZE:
++ H2_CONFIG_SET(conf, tls_warmup_size, val);
++ break;
++ case H2_CONF_TLS_COOLDOWN_SECS:
++ H2_CONFIG_SET(conf, tls_cooldown_secs, val);
++ break;
++ case H2_CONF_PUSH:
++ H2_CONFIG_SET(conf, h2_push, val);
++ break;
++ case H2_CONF_PUSH_DIARY_SIZE:
++ H2_CONFIG_SET(conf, push_diary_size, val);
++ break;
++ case H2_CONF_COPY_FILES:
++ H2_CONFIG_SET(conf, copy_files, val);
++ break;
++ case H2_CONF_EARLY_HINTS:
++ H2_CONFIG_SET(conf, early_hints, val);
++ break;
++ case H2_CONF_PADDING_BITS:
++ H2_CONFIG_SET(conf, padding_bits, val);
++ break;
++ case H2_CONF_PADDING_ALWAYS:
++ H2_CONFIG_SET(conf, padding_always, val);
++ break;
++ default:
++ break;
++ }
++}
++
++static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64_t val)
++{
++ switch(var) {
++ case H2_CONF_TLS_WARMUP_SIZE:
++ H2_CONFIG_SET(conf, tls_warmup_size, val);
++ break;
++ default:
++ h2_srv_config_seti(conf, var, (int)val);
++ break;
++ }
++}
++
++static h2_config *h2_config_sget(server_rec *s)
+ {
+ h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
+ &http2_module);
+@@ -217,9 +376,162 @@
+ return cfg;
+ }
+
+-const struct h2_priority *h2_config_get_priority(const h2_config *conf,
+- const char *content_type)
++static const h2_dir_config *h2_config_rget(request_rec *r)
++{
++ h2_dir_config *cfg = (h2_dir_config *)ap_get_module_config(r->per_dir_config,
++ &http2_module);
++ ap_assert(cfg);
++ return cfg;
++}
++
++static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var)
++{
++ switch(var) {
++ case H2_CONF_ALT_SVC_MAX_AGE:
++ return H2_CONFIG_GET(conf, &defdconf, alt_svc_max_age);
++ case H2_CONF_UPGRADE:
++ return H2_CONFIG_GET(conf, &defdconf, h2_upgrade);
++ case H2_CONF_PUSH:
++ return H2_CONFIG_GET(conf, &defdconf, h2_push);
++ case H2_CONF_EARLY_HINTS:
++ return H2_CONFIG_GET(conf, &defdconf, early_hints);
++
++ default:
++ return DEF_VAL;
++ }
++}
++
++static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, int val)
++{
++ int set_srv = !dconf;
++ if (dconf) {
++ switch(var) {
++ case H2_CONF_ALT_SVC_MAX_AGE:
++ H2_CONFIG_SET(dconf, alt_svc_max_age, val);
++ break;
++ case H2_CONF_UPGRADE:
++ H2_CONFIG_SET(dconf, h2_upgrade, val);
++ break;
++ case H2_CONF_PUSH:
++ H2_CONFIG_SET(dconf, h2_push, val);
++ break;
++ case H2_CONF_EARLY_HINTS:
++ H2_CONFIG_SET(dconf, early_hints, val);
++ break;
++ default:
++ /* not handled in dir_conf */
++ set_srv = 1;
++ break;
++ }
++ }
++
++ if (set_srv) {
++ h2_srv_config_seti(conf, var, val);
++ }
++}
++
++static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, apr_int64_t val)
+ {
++ int set_srv = !dconf;
++ if (dconf) {
++ switch(var) {
++ default:
++ /* not handled in dir_conf */
++ set_srv = 1;
++ break;
++ }
++ }
++
++ if (set_srv) {
++ h2_srv_config_seti64(conf, var, val);
++ }
++}
++
++static const h2_config *h2_config_get(conn_rec *c)
++{
++ h2_ctx *ctx = h2_ctx_get(c, 0);
++
++ if (ctx) {
++ if (ctx->config) {
++ return ctx->config;
++ }
++ else if (ctx->server) {
++ ctx->config = h2_config_sget(ctx->server);
++ return ctx->config;
++ }
++ }
++
++ return h2_config_sget(c->base_server);
++}
++
++int h2_config_cgeti(conn_rec *c, h2_config_var_t var)
++{
++ return (int)h2_srv_config_geti64(h2_config_get(c), var);
++}
++
++apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var)
++{
++ return h2_srv_config_geti64(h2_config_get(c), var);
++}
++
++int h2_config_sgeti(server_rec *s, h2_config_var_t var)
++{
++ return (int)h2_srv_config_geti64(h2_config_sget(s), var);
++}
++
++apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var)
++{
++ return h2_srv_config_geti64(h2_config_sget(s), var);
++}
++
++int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var)
++{
++ return (int)h2_config_geti64(r, s, var);
++}
++
++apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var)
++{
++ apr_int64_t mode = r? (int)h2_dir_config_geti64(h2_config_rget(r), var) : DEF_VAL;
++ return (mode != DEF_VAL)? mode : h2_config_sgeti64(s, var);
++}
++
++int h2_config_rgeti(request_rec *r, h2_config_var_t var)
++{
++ return h2_config_geti(r, r->server, var);
++}
++
++apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var)
++{
++ return h2_config_geti64(r, r->server, var);
++}
++
++apr_array_header_t *h2_config_push_list(request_rec *r)
++{
++ const h2_config *sconf;
++ const h2_dir_config *conf = h2_config_rget(r);
++
++ if (conf && conf->push_list) {
++ return conf->push_list;
++ }
++ sconf = h2_config_sget(r->server);
++ return sconf? sconf->push_list : NULL;
++}
++
++apr_array_header_t *h2_config_alt_svcs(request_rec *r)
++{
++ const h2_config *sconf;
++ const h2_dir_config *conf = h2_config_rget(r);
++
++ if (conf && conf->alt_svcs) {
++ return conf->alt_svcs;
++ }
++ sconf = h2_config_sget(r->server);
++ return sconf? sconf->alt_svcs : NULL;
++}
++
++const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
++{
++ const h2_config *conf = h2_config_get(c);
+ if (content_type && conf->priorities) {
+ size_t len = strcspn(content_type, "; \t");
+ h2_priority *prio = apr_hash_get(conf->priorities, content_type, len);
+@@ -228,166 +540,156 @@
+ return NULL;
+ }
+
+-static const char *h2_conf_set_max_streams(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_max_streams(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->h2_max_streams = (int)apr_atoi64(value);
+- (void)arg;
+- if (cfg->h2_max_streams < 1) {
++ apr_int64_t ival = (int)apr_atoi64(value);
++ if (ival < 1) {
+ return "value must be > 0";
+ }
++ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_STREAMS, ival);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_window_size(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_window_size(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->h2_window_size = (int)apr_atoi64(value);
+- (void)arg;
+- if (cfg->h2_window_size < 1024) {
++ int val = (int)apr_atoi64(value);
++ if (val < 1024) {
+ return "value must be >= 1024";
+ }
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WIN_SIZE, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_min_workers(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_min_workers(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->min_workers = (int)apr_atoi64(value);
+- (void)arg;
+- if (cfg->min_workers < 1) {
++ int val = (int)apr_atoi64(value);
++ if (val < 1) {
+ return "value must be > 0";
+ }
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MIN_WORKERS, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_max_workers(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_max_workers(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->max_workers = (int)apr_atoi64(value);
+- (void)arg;
+- if (cfg->max_workers < 1) {
++ int val = (int)apr_atoi64(value);
++ if (val < 1) {
+ return "value must be > 0";
+ }
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKERS, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->max_worker_idle_secs = (int)apr_atoi64(value);
+- (void)arg;
+- if (cfg->max_worker_idle_secs < 1) {
++ int val = (int)apr_atoi64(value);
++ if (val < 1) {
+ return "value must be > 0";
+ }
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_SECS, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+-
+-
+- cfg->stream_max_mem_size = (int)apr_atoi64(value);
+- (void)arg;
+- if (cfg->stream_max_mem_size < 1024) {
++ int val = (int)apr_atoi64(value);
++ if (val < 1024) {
+ return "value must be >= 1024";
+ }
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_STREAM_MAX_MEM, val);
+ return NULL;
+ }
+
+-static const char *h2_add_alt_svc(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_add_alt_svc(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+ if (value && *value) {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool);
++ h2_alt_svc *as = h2_alt_svc_parse(value, cmd->pool);
+ if (!as) {
+ return "unable to parse alt-svc specifier";
+ }
+- if (!cfg->alt_svcs) {
+- cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*));
++
++ if (cmd->path) {
++ h2_dir_config *dcfg = (h2_dir_config *)dirconf;
++ if (!dcfg->alt_svcs) {
++ dcfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
++ }
++ APR_ARRAY_PUSH(dcfg->alt_svcs, h2_alt_svc*) = as;
++ }
++ else {
++ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
++ if (!cfg->alt_svcs) {
++ cfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
++ }
++ APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
+ }
+- APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
+ }
+- (void)arg;
+ return NULL;
+ }
+
+-static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_alt_svc_max_age(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->alt_svc_max_age = (int)apr_atoi64(value);
+- (void)arg;
++ int val = (int)apr_atoi64(value);
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_ALT_SVC_MAX_AGE, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+ /* deprecated, ignore */
+- (void)arg;
++ (void)dirconf;
+ (void)value;
+- ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */
++ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, /* NO LOGNO */
+ "H2SessionExtraFiles is obsolete and will be ignored");
+ return NULL;
+ }
+
+-static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_serialize_headers(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+- cfg->serialize_headers = 1;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+- cfg->serialize_headers = 0;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 0);
+ return NULL;
+ }
+-
+- (void)arg;
+ return "value must be On or Off";
+ }
+
+-static const char *h2_conf_set_direct(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_direct(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+- cfg->h2_direct = 1;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+- cfg->h2_direct = 0;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 0);
+ return NULL;
+ }
+-
+- (void)arg;
+ return "value must be On or Off";
+ }
+
+-static const char *h2_conf_set_push(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+- cfg->h2_push = 1;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+- cfg->h2_push = 0;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 0);
+ return NULL;
+ }
+-
+- (void)arg;
+ return "value must be On or Off";
+ }
+
+@@ -419,7 +721,7 @@
+ else if (!strcasecmp("BEFORE", sdependency)) {
+ dependency = H2_DEPENDANT_BEFORE;
+ if (sweight) {
+- return "dependency 'Before' does not allow a weight";
++ return "dependecy 'Before' does not allow a weight";
+ }
+ }
+ else if (!strcasecmp("INTERLEAVED", sdependency)) {
+@@ -447,100 +749,88 @@
+ return NULL;
+ }
+
+-static const char *h2_conf_set_modern_tls_only(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_modern_tls_only(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+- cfg->modern_tls_only = 1;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+- cfg->modern_tls_only = 0;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 0);
+ return NULL;
+ }
+-
+- (void)arg;
+ return "value must be On or Off";
+ }
+
+-static const char *h2_conf_set_upgrade(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_upgrade(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+- cfg->h2_upgrade = 1;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+- cfg->h2_upgrade = 0;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 0);
+ return NULL;
+ }
+-
+- (void)arg;
+ return "value must be On or Off";
+ }
+
+-static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_tls_warmup_size(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->tls_warmup_size = apr_atoi64(value);
+- (void)arg;
++ apr_int64_t val = apr_atoi64(value);
++ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_WARMUP_SIZE, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- cfg->tls_cooldown_secs = (int)apr_atoi64(value);
+- (void)arg;
++ apr_int64_t val = (int)apr_atoi64(value);
++ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_COOLDOWN_SECS, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_push_diary_size(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- (void)arg;
+- cfg->push_diary_size = (int)apr_atoi64(value);
+- if (cfg->push_diary_size < 0) {
++ int val = (int)apr_atoi64(value);
++ if (val < 0) {
+ return "value must be >= 0";
+ }
+- if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
++ if (val > 0 && (val & (val-1))) {
+ return "value must a power of 2";
+ }
+- if (cfg->push_diary_size > (1 << 15)) {
++ if (val > (1 << 15)) {
+ return "value must <= 65536";
+ }
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH_DIARY_SIZE, val);
+ return NULL;
+ }
+
+-static const char *h2_conf_set_copy_files(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_copy_files(cmd_parms *cmd,
++ void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)arg;
+ if (!strcasecmp(value, "On")) {
+- cfg->copy_files = 1;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+- cfg->copy_files = 0;
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 0);
+ return NULL;
+ }
+-
+- (void)arg;
+ return "value must be On or Off";
+ }
+
+-static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push)
++static void add_push(apr_array_header_t **plist, apr_pool_t *pool, h2_push_res *push)
+ {
+ h2_push_res *new;
+- if (!conf->push_list) {
+- conf->push_list = apr_array_make(pool, 10, sizeof(*push));
++ if (!*plist) {
++ *plist = apr_array_make(pool, 10, sizeof(*push));
+ }
+- new = apr_array_push(conf->push_list);
++ new = apr_array_push(*plist);
+ new->uri_ref = push->uri_ref;
+ new->critical = push->critical;
+ }
+@@ -549,8 +839,6 @@
+ const char *arg1, const char *arg2,
+ const char *arg3)
+ {
+- h2_config *dconf = (h2_config*)dirconf ;
+- h2_config *sconf = (h2_config*)h2_config_sget(cmd->server);
+ h2_push_res push;
+ const char *last = arg3;
+
+@@ -575,42 +863,54 @@
+ }
+ }
+
+- /* server command? set both */
+- if (cmd->path == NULL) {
+- add_push(cmd->pool, sconf, &push);
+- add_push(cmd->pool, dconf, &push);
++ if (cmd->path) {
++ add_push(&(((h2_dir_config*)dirconf)->push_list), cmd->pool, &push);
+ }
+ else {
+- add_push(cmd->pool, dconf, &push);
++ add_push(&(h2_config_sget(cmd->server)->push_list), cmd->pool, &push);
+ }
++ return NULL;
++}
+
++static const char *h2_conf_set_early_hints(cmd_parms *cmd,
++ void *dirconf, const char *value)
++{
++ int val;
++
++ if (!strcasecmp(value, "On")) val = 1;
++ else if (!strcasecmp(value, "Off")) val = 0;
++ else return "value must be On or Off";
++
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_EARLY_HINTS, val);
++ if (cmd->path) {
++ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool,
++ "H2EarlyHints = %d on path %s", val, cmd->path);
++ }
+ return NULL;
+ }
+
+-static const char *h2_conf_set_early_hints(cmd_parms *parms,
+- void *arg, const char *value)
++static const char *h2_conf_set_padding(cmd_parms *cmd, void *dirconf, const char *value)
+ {
+- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+- if (!strcasecmp(value, "On")) {
+- cfg->early_hints = 1;
+- return NULL;
++ int val;
++
++ val = (int)apr_atoi64(value);
++ if (val < 0) {
++ return "number of bits must be >= 0";
+ }
+- else if (!strcasecmp(value, "Off")) {
+- cfg->early_hints = 0;
+- return NULL;
++ if (val > 8) {
++ return "number of bits must be <= 8";
+ }
+-
+- (void)arg;
+- return "value must be On or Off";
++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PADDING_BITS, val);
++ return NULL;
+ }
+
++
+ void h2_get_num_workers(server_rec *s, int *minw, int *maxw)
+ {
+ int threads_per_child = 0;
+- const h2_config *config = h2_config_sget(s);
+
+- *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
+- *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
++ *minw = h2_config_sgeti(s, H2_CONF_MIN_WORKERS);
++ *maxw = h2_config_sgeti(s, H2_CONF_MAX_WORKERS);
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child);
+
+ if (*minw <= 0) {
+@@ -652,7 +952,7 @@
+ AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
+ RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
+ AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
+- RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"),
++ RSRC_CONF|OR_AUTHCFG, "on to allow HTTP/1 Upgrades to h2/h2c"),
+ AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
+ RSRC_CONF, "on to enable direct HTTP/2 mode"),
+ AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
+@@ -662,7 +962,7 @@
+ AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
+ RSRC_CONF, "seconds of idle time on TLS before shrinking writes"),
+ AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL,
+- RSRC_CONF, "off to disable HTTP/2 server push"),
++ RSRC_CONF|OR_AUTHCFG, "off to disable HTTP/2 server push"),
+ AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
+ RSRC_CONF, "define priority of PUSHed resources per content type"),
+ AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
+@@ -670,33 +970,12 @@
+ AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL,
+ OR_FILEINFO, "on to perform copy of file data"),
+ AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL,
+- OR_FILEINFO, "add a resource to be pushed in this location/on this server."),
++ OR_FILEINFO|OR_AUTHCFG, "add a resource to be pushed in this location/on this server."),
+ AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL,
+ RSRC_CONF, "on to enable interim status 103 responses"),
++ AP_INIT_TAKE1("H2Padding", h2_conf_set_padding, NULL,
++ RSRC_CONF, "set payload padding"),
+ AP_END_CMD
+ };
+
+
+-const h2_config *h2_config_rget(request_rec *r)
+-{
+- h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config,
+- &http2_module);
+- return cfg? cfg : h2_config_sget(r->server);
+-}
+-
+-const h2_config *h2_config_get(conn_rec *c)
+-{
+- h2_ctx *ctx = h2_ctx_get(c, 0);
+-
+- if (ctx) {
+- if (ctx->config) {
+- return ctx->config;
+- }
+- else if (ctx->server) {
+- ctx->config = h2_config_sget(ctx->server);
+- return ctx->config;
+- }
+- }
+-
+- return h2_config_sget(c->base_server);
+-}
+--- a/modules/http2/h2_config.h
++++ b/modules/http2/h2_config.h
+@@ -42,6 +42,8 @@
+ H2_CONF_PUSH_DIARY_SIZE,
+ H2_CONF_COPY_FILES,
+ H2_CONF_EARLY_HINTS,
++ H2_CONF_PADDING_BITS,
++ H2_CONF_PADDING_ALWAYS,
+ } h2_config_var_t;
+
+ struct apr_hash_t;
+@@ -53,33 +55,6 @@
+ int critical;
+ } h2_push_res;
+
+-/* Apache httpd module configuration for h2. */
+-typedef struct h2_config {
+- const char *name;
+- int h2_max_streams; /* max concurrent # streams (http2) */
+- int h2_window_size; /* stream window size (http2) */
+- int min_workers; /* min # of worker threads/child */
+- int max_workers; /* max # of worker threads/child */
+- int max_worker_idle_secs; /* max # of idle seconds for worker */
+- int stream_max_mem_size; /* max # bytes held in memory/stream */
+- apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
+- int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
+- int serialize_headers; /* Use serialized HTTP/1.1 headers for
+- processing, better compatibility */
+- int h2_direct; /* if mod_h2 is active directly */
+- int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
+- int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+- apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
+- int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
+- int h2_push; /* if HTTP/2 server push is enabled */
+- struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
+-
+- int push_diary_size; /* # of entries in push diary */
+- int copy_files; /* if files shall be copied vs setaside on output */
+- apr_array_header_t *push_list;/* list of h2_push_res configurations */
+- int early_hints; /* support status code 103 */
+-} h2_config;
+-
+
+ void *h2_config_create_dir(apr_pool_t *pool, char *x);
+ void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv);
+@@ -88,19 +63,37 @@
+
+ extern const command_rec h2_cmds[];
+
+-const h2_config *h2_config_get(conn_rec *c);
+-const h2_config *h2_config_sget(server_rec *s);
+-const h2_config *h2_config_rget(request_rec *r);
++int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var);
++apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var);
+
+-int h2_config_geti(const h2_config *conf, h2_config_var_t var);
+-apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var);
++/**
++ * Get the configured value for variable <var> at the given connection.
++ */
++int h2_config_cgeti(conn_rec *c, h2_config_var_t var);
++apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var);
++
++/**
++ * Get the configured value for variable <var> at the given server.
++ */
++int h2_config_sgeti(server_rec *s, h2_config_var_t var);
++apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var);
++
++/**
++ * Get the configured value for variable <var> at the given request,
++ * if configured for the request location.
++ * Fallback to request server config otherwise.
++ */
++int h2_config_rgeti(request_rec *r, h2_config_var_t var);
++apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
+
+-void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
++apr_array_header_t *h2_config_push_list(request_rec *r);
++apr_array_header_t *h2_config_alt_svcs(request_rec *r);
+
++
++void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
+ void h2_config_init(apr_pool_t *pool);
+
+-const struct h2_priority *h2_config_get_priority(const h2_config *conf,
+- const char *content_type);
++const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type);
+
+ #endif /* __mod_h2__h2_config_h__ */
+
+--- a/modules/http2/h2_conn.c
++++ b/modules/http2/h2_conn.c
+@@ -18,6 +18,7 @@
+ #include <apr_strings.h>
+
+ #include <ap_mpm.h>
++#include <ap_mmn.h>
+
+ #include <httpd.h>
+ #include <http_core.h>
+@@ -79,7 +80,7 @@
+ mpm_type = H2_MPM_PREFORK;
+ mpm_module = m;
+ /* While http2 can work really well on prefork, it collides
+- * today's use case for prefork: runnning single-thread app engines
++ * today's use case for prefork: running single-thread app engines
+ * like php. If we restrict h2_workers to 1 per process, php will
+ * work fine, but browser will be limited to 1 active request at a
+ * time. */
+@@ -109,7 +110,6 @@
+
+ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
+ {
+- const h2_config *config = h2_config_sget(s);
+ apr_status_t status = APR_SUCCESS;
+ int minw, maxw;
+ int max_threads_per_child = 0;
+@@ -129,7 +129,7 @@
+
+ h2_get_num_workers(s, &minw, &maxw);
+
+- idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
++ idle_secs = h2_config_sgeti(s, H2_CONF_MAX_WORKER_IDLE_SECS);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
+ minw, maxw, max_threads_per_child, idle_secs);
+@@ -138,7 +138,7 @@
+ ap_register_input_filter("H2_IN", h2_filter_core_input,
+ NULL, AP_FTYPE_CONNECTION);
+
+- status = h2_mplx_child_init(pool, s);
++ status = h2_mplx_m_child_init(pool, s);
+
+ if (status == APR_SUCCESS) {
+ status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
+@@ -172,9 +172,10 @@
+ return mpm_module;
+ }
+
+-apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
++apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s)
+ {
+ h2_session *session;
++ h2_ctx *ctx;
+ apr_status_t status;
+
+ if (!workers) {
+@@ -183,24 +184,25 @@
+ return APR_EGENERAL;
+ }
+
+- if (r) {
+- status = h2_session_rcreate(&session, r, ctx, workers);
+- }
+- else {
+- status = h2_session_create(&session, c, ctx, workers);
+- }
+-
+- if (status == APR_SUCCESS) {
++ if (APR_SUCCESS == (status = h2_session_create(&session, c, r, s, workers))) {
++ ctx = h2_ctx_get(c, 1);
+ h2_ctx_session_set(ctx, session);
++
++ /* remove the input filter of mod_reqtimeout, now that the connection
++ * is established and we have swtiched to h2. reqtimeout has supervised
++ * possibly configured handshake timeouts and needs to get out of the way
++ * now since the rest of its state handling assumes http/1.x to take place. */
++ ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
+ }
++
+ return status;
+ }
+
+-apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
++apr_status_t h2_conn_run(conn_rec *c)
+ {
+ apr_status_t status;
+ int mpm_state = 0;
+- h2_session *session = h2_ctx_session_get(ctx);
++ h2_session *session = h2_ctx_get_session(c);
+
+ ap_assert(session);
+ do {
+@@ -235,6 +237,13 @@
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
++ if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
++ /* let the MPM know that we are not done and want
++ * the Timeout behaviour instead of a KeepAliveTimeout
++ * See PR 63534.
++ */
++ c->cs->sense = CONN_SENSE_WANT_READ;
++ }
+ break;
+ case H2_SESSION_ST_CLEANUP:
+ case H2_SESSION_ST_DONE:
+@@ -249,7 +258,7 @@
+
+ apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
+ {
+- h2_session *session = h2_ctx_session_get(ctx);
++ h2_session *session = h2_ctx_get_session(c);
+ if (session) {
+ apr_status_t status = h2_session_pre_close(session, async_mpm);
+ return (status == APR_SUCCESS)? DONE : status;
+@@ -257,7 +266,7 @@
+ return DONE;
+ }
+
+-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
++conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent)
+ {
+ apr_allocator_t *allocator;
+ apr_status_t status;
+@@ -268,11 +277,11 @@
+
+ ap_assert(master);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
+- "h2_stream(%ld-%d): create slave", master->id, slave_id);
++ "h2_stream(%ld-%d): create secondary", master->id, sec_id);
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+- * independant of its parent pool in the sense that it can work in
++ * independent of its parent pool in the sense that it can work in
+ * another thread. Also, the new allocator needs its own mutex to
+ * synchronize sub-pools.
+ */
+@@ -281,18 +290,18 @@
+ status = apr_pool_create_ex(&pool, parent, NULL, allocator);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master,
+- APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
+- master->id, slave_id);
++ APLOGNO(10004) "h2_session(%ld-%d): create secondary pool",
++ master->id, sec_id);
+ return NULL;
+ }
+ apr_allocator_owner_set(allocator, pool);
+- apr_pool_tag(pool, "h2_slave_conn");
++ apr_pool_tag(pool, "h2_secondary_conn");
+
+ c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ if (c == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
+- APLOGNO(02913) "h2_session(%ld-%d): create slave",
+- master->id, slave_id);
++ APLOGNO(02913) "h2_session(%ld-%d): create secondary",
++ master->id, sec_id);
+ apr_pool_destroy(pool);
+ return NULL;
+ }
+@@ -310,26 +319,28 @@
+ c->filter_conn_ctx = NULL;
+ #endif
+ c->bucket_alloc = apr_bucket_alloc_create(pool);
++#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
+ c->data_in_input_filters = 0;
+ c->data_in_output_filters = 0;
++#endif
+ /* prevent mpm_event from making wrong assumptions about this connection,
+ * like e.g. using its socket for an async read check. */
+ c->clogging_input_filters = 1;
+ c->log = NULL;
+ c->log_id = apr_psprintf(pool, "%ld-%d",
+- master->id, slave_id);
++ master->id, sec_id);
+ c->aborted = 0;
+- /* We cannot install the master connection socket on the slaves, as
++ /* We cannot install the master connection socket on the secondary, as
+ * modules mess with timeouts/blocking of the socket, with
+ * unwanted side effects to the master connection processing.
+- * Fortunately, since we never use the slave socket, we can just install
++ * Fortunately, since we never use the secondary socket, we can just install
+ * a single, process-wide dummy and everyone is happy.
+ */
+ ap_set_module_config(c->conn_config, &core_module, dummy_socket);
+ /* TODO: these should be unique to this thread */
+ c->sbh = master->sbh;
+- /* TODO: not all mpm modules have learned about slave connections yet.
+- * copy their config from master to slave.
++ /* TODO: not all mpm modules have learned about secondary connections yet.
++ * copy their config from master to secondary.
+ */
+ if ((mpm = h2_conn_mpm_module()) != NULL) {
+ cfg = ap_get_module_config(master->conn_config, mpm);
+@@ -337,38 +348,38 @@
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
+- "h2_slave(%s): created", c->log_id);
++ "h2_secondary(%s): created", c->log_id);
+ return c;
+ }
+
+-void h2_slave_destroy(conn_rec *slave)
++void h2_secondary_destroy(conn_rec *secondary)
+ {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
+- "h2_slave(%s): destroy", slave->log_id);
+- slave->sbh = NULL;
+- apr_pool_destroy(slave->pool);
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, secondary,
++ "h2_secondary(%s): destroy", secondary->log_id);
++ secondary->sbh = NULL;
++ apr_pool_destroy(secondary->pool);
+ }
+
+-apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
++apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd)
+ {
+- if (slave->keepalives == 0) {
++ if (secondary->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+- slave->keepalives = 1;
++ secondary->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+- * on this slave connection after each requests. Although we might
++ * on this secondary connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+- * is unneccessary on a h2 stream.
++ * is unnecessary on a h2 stream.
+ */
+- slave->keepalive = AP_CONN_CLOSE;
+- return ap_run_pre_connection(slave, csd);
++ secondary->keepalive = AP_CONN_CLOSE;
++ return ap_run_pre_connection(secondary, csd);
+ }
+- ap_assert(slave->output_filters);
++ ap_assert(secondary->output_filters);
+ return APR_SUCCESS;
+ }
+
+--- a/modules/http2/h2_conn.h
++++ b/modules/http2/h2_conn.h
+@@ -23,21 +23,21 @@
+ /**
+ * Setup the connection and our context for HTTP/2 processing
+ *
+- * @param ctx the http2 context to setup
+ * @param c the connection HTTP/2 is starting on
+ * @param r the upgrade request that still awaits an answer, optional
++ * @param s the server selected for this connection (can be != c->base_server)
+ */
+-apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r);
++apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s);
+
+ /**
+ * Run the HTTP/2 connection in synchronous fashion.
+ * Return when the HTTP/2 session is done
+ * and the connection will close or a fatal error occurred.
+ *
+- * @param ctx the http2 context to run
++ * @param c the http2 connection to run
+ * @return APR_SUCCESS when session is done.
+ */
+-apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c);
++apr_status_t h2_conn_run(conn_rec *c);
+
+ /**
+ * The connection is about to close. If we have not send a GOAWAY
+@@ -68,10 +68,10 @@
+ const char *h2_conn_mpm_name(void);
+ int h2_mpm_supported(void);
+
+-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
+-void h2_slave_destroy(conn_rec *slave);
++conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent);
++void h2_secondary_destroy(conn_rec *secondary);
+
+-apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
+-void h2_slave_run_connection(conn_rec *slave);
++apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd);
++void h2_secondary_run_connection(conn_rec *secondary);
+
+ #endif /* defined(__mod_h2__h2_conn__) */
+--- a/modules/http2/h2_conn_io.c
++++ b/modules/http2/h2_conn_io.c
+@@ -40,12 +40,17 @@
+ * ~= 1300 bytes */
+ #define WRITE_SIZE_INITIAL 1300
+
+-/* Calculated like this: max TLS record size 16*1024
+- * - 40 (IP) - 20 (TCP) - 40 (TCP options)
+- * - TLS overhead (60-100)
+- * which seems to create less TCP packets overall
++/* The maximum we'd like to write in one chunk is
++ * the max size of a TLS record. When pushing
++ * many frames down the h2 connection, this might
++ * align differently because of headers and other
++ * frames or simply as not sufficient data is
++ * in a response body.
++ * However keeping frames at or below this limit
++ * should make optimizations at the layer that writes
++ * to TLS easier.
+ */
+-#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100)
++#define WRITE_SIZE_MAX (TLS_DATA_MAX)
+
+
+ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
+@@ -123,21 +128,20 @@
+
+ }
+
+-apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
+- const h2_config *cfg)
++apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s)
+ {
+ io->c = c;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
+ io->is_tls = h2_h2_is_tls(c);
+ io->buffer_output = io->is_tls;
+- io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM);
++ io->flush_threshold = (apr_size_t)h2_config_sgeti64(s, H2_CONF_STREAM_MAX_MEM);
+
+ if (io->is_tls) {
+ /* This is what we start with,
+ * see https://issues.apache.org/jira/browse/TS-2503
+ */
+- io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE);
+- io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS)
++ io->warmup_size = h2_config_sgeti64(s, H2_CONF_TLS_WARMUP_SIZE);
++ io->cooldown_usecs = (h2_config_sgeti(s, H2_CONF_TLS_COOLDOWN_SECS)
+ * APR_USEC_PER_SEC);
+ io->write_size = (io->cooldown_usecs > 0?
+ WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
+--- a/modules/http2/h2_conn_io.h
++++ b/modules/http2/h2_conn_io.h
+@@ -48,8 +48,7 @@
+ apr_size_t slen;
+ } h2_conn_io;
+
+-apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
+- const struct h2_config *cfg);
++apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s);
+
+ /**
+ * Append data to the buffered output.
+--- a/modules/http2/h2_ctx.c
++++ b/modules/http2/h2_ctx.c
+@@ -29,8 +29,8 @@
+ {
+ h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
+ ap_assert(ctx);
++ h2_ctx_server_update(ctx, c->base_server);
+ ap_set_module_config(c->conn_config, &http2_module, ctx);
+- h2_ctx_server_set(ctx, c->base_server);
+ return ctx;
+ }
+
+@@ -79,8 +79,9 @@
+ return ctx;
+ }
+
+-h2_session *h2_ctx_session_get(h2_ctx *ctx)
++h2_session *h2_ctx_get_session(conn_rec *c)
+ {
++ h2_ctx *ctx = h2_ctx_get(c, 0);
+ return ctx? ctx->session : NULL;
+ }
+
+@@ -89,33 +90,17 @@
+ ctx->session = session;
+ }
+
+-server_rec *h2_ctx_server_get(h2_ctx *ctx)
++h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s)
+ {
+- return ctx? ctx->server : NULL;
+-}
+-
+-h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s)
+-{
+- ctx->server = s;
++ if (ctx->server != s) {
++ ctx->server = s;
++ }
+ return ctx;
+ }
+
+-int h2_ctx_is_task(h2_ctx *ctx)
+-{
+- return ctx && ctx->task;
+-}
+-
+-h2_task *h2_ctx_get_task(h2_ctx *ctx)
++h2_task *h2_ctx_get_task(conn_rec *c)
+ {
++ h2_ctx *ctx = h2_ctx_get(c, 0);
+ return ctx? ctx->task : NULL;
+ }
+
+-h2_task *h2_ctx_cget_task(conn_rec *c)
+-{
+- return h2_ctx_get_task(h2_ctx_get(c, 0));
+-}
+-
+-h2_task *h2_ctx_rget_task(request_rec *r)
+-{
+- return h2_ctx_get_task(h2_ctx_rget(r));
+-}
+--- a/modules/http2/h2_ctx.h
++++ b/modules/http2/h2_ctx.h
+@@ -56,12 +56,11 @@
+ */
+ h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto);
+
+-/* Set the server_rec relevant for this context.
++/* Update the server_rec relevant for this context. A server for
++ * a connection may change during SNI handling, for example.
+ */
+-h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s);
+-server_rec *h2_ctx_server_get(h2_ctx *ctx);
++h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s);
+
+-struct h2_session *h2_ctx_session_get(h2_ctx *ctx);
+ void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
+
+ /**
+@@ -69,10 +68,8 @@
+ */
+ const char *h2_ctx_protocol_get(const conn_rec *c);
+
+-int h2_ctx_is_task(h2_ctx *ctx);
++struct h2_session *h2_ctx_get_session(conn_rec *c);
++struct h2_task *h2_ctx_get_task(conn_rec *c);
+
+-struct h2_task *h2_ctx_get_task(h2_ctx *ctx);
+-struct h2_task *h2_ctx_cget_task(conn_rec *c);
+-struct h2_task *h2_ctx_rget_task(request_rec *r);
+
+ #endif /* defined(__mod_h2__h2_ctx__) */
+--- a/modules/http2/h2_filter.c
++++ b/modules/http2/h2_filter.c
+@@ -54,6 +54,7 @@
+ const char *data;
+ ssize_t n;
+
++ (void)c;
+ status = apr_bucket_read(b, &data, &len, block);
+
+ while (status == APR_SUCCESS && len > 0) {
+@@ -71,10 +72,10 @@
+ }
+ else {
+ session->io.bytes_read += n;
+- if (len <= n) {
++ if ((apr_ssize_t)len <= n) {
+ break;
+ }
+- len -= n;
++ len -= (apr_size_t)n;
+ data += n;
+ }
+ }
+@@ -277,6 +278,7 @@
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+ {
++ (void)beam;
+ if (H2_BUCKET_IS_OBSERVER(src)) {
+ h2_bucket_observer *l = (h2_bucket_observer *)src->data;
+ apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc,
+@@ -311,8 +313,7 @@
+ bbout(bb, " \"settings\": {\n");
+ bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams);
+ bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024);
+- bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
+- h2_config_geti(s->config, H2_CONF_WIN_SIZE));
++ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", h2_config_sgeti(s->s, H2_CONF_WIN_SIZE));
+ bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s));
+ bbout(bb, " }%s\n", last? "" : ",");
+ }
+@@ -369,7 +370,7 @@
+ x.s = s;
+ x.idx = 0;
+ bbout(bb, " \"streams\": {");
+- h2_mplx_stream_do(s->mplx, add_stream, &x);
++ h2_mplx_m_stream_do(s->mplx, add_stream, &x);
+ bbout(bb, "\n }%s\n", last? "" : ",");
+ }
+
+@@ -431,41 +432,38 @@
+
+ static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
+ {
+- conn_rec *c = task->c->master;
+- h2_ctx *h2ctx = h2_ctx_get(c, 0);
+- h2_session *session;
+- h2_stream *stream;
++ h2_mplx *m = task->mplx;
++ h2_stream *stream = h2_mplx_t_stream_get(m, task);
++ h2_session *s;
++ conn_rec *c;
++
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+ int32_t connFlowIn, connFlowOut;
+
+-
+- if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) {
+- return APR_SUCCESS;
+- }
+-
+- stream = h2_session_stream_get(session, task->stream_id);
+ if (!stream) {
+ /* stream already done */
+ return APR_SUCCESS;
+ }
++ s = stream->session;
++ c = s->c;
+
+ bb = apr_brigade_create(stream->pool, c->bucket_alloc);
+
+- connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
+- connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
++ connFlowIn = nghttp2_session_get_effective_local_window_size(s->ngh2);
++ connFlowOut = nghttp2_session_get_remote_window_size(s->ngh2);
+
+ bbout(bb, "{\n");
+ bbout(bb, " \"version\": \"draft-01\",\n");
+- add_settings(bb, session, 0);
+- add_peer_settings(bb, session, 0);
++ add_settings(bb, s, 0);
++ add_peer_settings(bb, s, 0);
+ bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn);
+ bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut);
+- bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown);
++ bbout(bb, " \"sentGoAway\": %d,\n", s->local.shutdown);
+
+- add_streams(bb, session, 0);
++ add_streams(bb, s, 0);
+
+- add_stats(bb, session, stream, 1);
++ add_stats(bb, s, stream, 1);
+ bbout(bb, "}\n");
+
+ while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) {
+@@ -495,9 +493,54 @@
+ return APR_SUCCESS;
+ }
+
++static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
++{
++ apr_bucket_brigade *bb;
++ int seen_eos;
++ apr_status_t rv;
++
++ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
++ seen_eos = 0;
++ do {
++ apr_bucket *bucket;
++
++ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
++ APR_BLOCK_READ, HUGE_STRING_LEN);
++
++ if (rv != APR_SUCCESS) {
++ apr_brigade_destroy(bb);
++ return rv;
++ }
++
++ for (bucket = APR_BRIGADE_FIRST(bb);
++ bucket != APR_BRIGADE_SENTINEL(bb);
++ bucket = APR_BUCKET_NEXT(bucket))
++ {
++ const char *data;
++ apr_size_t len;
++
++ if (APR_BUCKET_IS_EOS(bucket)) {
++ seen_eos = 1;
++ break;
++ }
++ if (bucket->length == 0) {
++ continue;
++ }
++ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
++ if (rv != APR_SUCCESS) {
++ apr_brigade_destroy(bb);
++ return rv;
++ }
++ maxlen -= bucket->length;
++ }
++ apr_brigade_cleanup(bb);
++ } while (!seen_eos && maxlen >= 0);
++
++ return APR_SUCCESS;
++}
++
+ int h2_filter_h2_status_handler(request_rec *r)
+ {
+- h2_ctx *ctx = h2_ctx_rget(r);
+ conn_rec *c = r->connection;
+ h2_task *task;
+ apr_bucket_brigade *bb;
+@@ -511,10 +554,12 @@
+ return DECLINED;
+ }
+
+- task = ctx? h2_ctx_get_task(ctx) : NULL;
++ task = h2_ctx_get_task(r->connection);
+ if (task) {
+-
+- if ((status = ap_discard_request_body(r)) != OK) {
++ /* In this handler, we do some special sauce to send footers back,
++ * IFF we received footers in the request. This is used in our test
++ * cases, since CGI has no way of handling those. */
++ if ((status = discard_body(r, 1024)) != OK) {
+ return status;
+ }
+
+--- a/modules/http2/h2_from_h1.c
++++ b/modules/http2/h2_from_h1.c
+@@ -315,6 +315,7 @@
+ int http_status;
+ apr_array_header_t *hlines;
+ apr_bucket_brigade *tmp;
++ apr_bucket_brigade *saveto;
+ } h2_response_parser;
+
+ static apr_status_t parse_header(h2_response_parser *parser, char *line) {
+@@ -351,13 +352,17 @@
+ parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc);
+ }
+ status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
+- HUGE_STRING_LEN);
++ len);
+ if (status == APR_SUCCESS) {
+ --len;
+ status = apr_brigade_flatten(parser->tmp, line, &len);
+ if (status == APR_SUCCESS) {
+ /* we assume a non-0 containing line and remove trailing crlf. */
+ line[len] = '\0';
++ /*
++ * XXX: What to do if there is an LF but no CRLF?
++ * Should we error out?
++ */
+ if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
+ len -= 2;
+ line[len] = '\0';
+@@ -367,10 +372,47 @@
+ task->id, line);
+ }
+ else {
++ apr_off_t brigade_length;
++
++ /*
++ * If the brigade parser->tmp becomes longer than our buffer
++ * for flattening we never have a chance to get a complete
++ * line. This can happen if we are called multiple times after
++ * previous calls did not find a H2_CRLF and we returned
++ * APR_EAGAIN. In this case parser->tmp (correctly) grows
++ * with each call to apr_brigade_split_line.
++ *
++ * XXX: Currently a stack based buffer of HUGE_STRING_LEN is
++ * used. This means we cannot cope with lines larger than
++ * HUGE_STRING_LEN which might be an issue.
++ */
++ status = apr_brigade_length(parser->tmp, 0, &brigade_length);
++ if ((status != APR_SUCCESS) || (brigade_length > len)) {
++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, task->c, APLOGNO(10257)
++ "h2_task(%s): read response, line too long",
++ task->id);
++ return APR_ENOSPC;
++ }
+ /* this does not look like a complete line yet */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ "h2_task(%s): read response, incomplete line: %s",
+ task->id, line);
++ if (!parser->saveto) {
++ parser->saveto = apr_brigade_create(task->pool,
++ task->c->bucket_alloc);
++ }
++ /*
++ * Be on the save side and save the parser->tmp brigade
++ * as it could contain transient buckets which could be
++ * invalid next time we are here.
++ *
++ * NULL for the filter parameter is ok since we
++ * provide our own brigade as second parameter
++ * and ap_save_brigade does not need to create one.
++ */
++ ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp),
++ parser->tmp->p);
++ APR_BRIGADE_CONCAT(parser->tmp, parser->saveto);
+ return APR_EAGAIN;
+ }
+ }
+@@ -594,18 +636,20 @@
+ }
+ }
+
+- if (r->header_only) {
++ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+- "h2_task(%s): header_only, cleanup output brigade",
++ "h2_task(%s): headers only, cleanup output brigade",
+ task->id);
+ b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ break;
+- }
+- APR_BUCKET_REMOVE(b);
+- apr_bucket_destroy(b);
++ }
++ if (!H2_BUCKET_IS_HEADERS(b)) {
++ APR_BUCKET_REMOVE(b);
++ apr_bucket_destroy(b);
++ }
+ b = next;
+ }
+ }
+--- a/modules/http2/h2_h2.c
++++ b/modules/http2/h2_h2.c
+@@ -463,19 +463,18 @@
+ return opt_ssl_is_https && opt_ssl_is_https(c);
+ }
+
+-int h2_is_acceptable_connection(conn_rec *c, int require_all)
++int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all)
+ {
+ int is_tls = h2_h2_is_tls(c);
+- const h2_config *cfg = h2_config_get(c);
+
+- if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) {
++ if (is_tls && h2_config_cgeti(c, H2_CONF_MODERN_TLS_ONLY) > 0) {
+ /* Check TLS connection for modern TLS parameters, as defined in
+ * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ */
+ apr_pool_t *pool = c->pool;
+ server_rec *s = c->base_server;
+ char *val;
+-
++
+ if (!opt_ssl_var_lookup) {
+ /* unable to check */
+ return 0;
+@@ -521,33 +520,29 @@
+ return 1;
+ }
+
+-int h2_allows_h2_direct(conn_rec *c)
++static int h2_allows_h2_direct(conn_rec *c)
+ {
+- const h2_config *cfg = h2_config_get(c);
+ int is_tls = h2_h2_is_tls(c);
+ const char *needed_protocol = is_tls? "h2" : "h2c";
+- int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT);
++ int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT);
+
+ if (h2_direct < 0) {
+ h2_direct = is_tls? 0 : 1;
+ }
+- return (h2_direct
+- && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
++ return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
+ }
+
+-int h2_allows_h2_upgrade(conn_rec *c)
++int h2_allows_h2_upgrade(request_rec *r)
+ {
+- const h2_config *cfg = h2_config_get(c);
+- int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE);
+-
+- return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c));
++ int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE);
++ return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(r->connection));
+ }
+
+ /*******************************************************************************
+ * Register various hooks
+ */
+ static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
+-static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL};
++static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL};
+
+ void h2_h2_register_hooks(void)
+ {
+@@ -558,7 +553,7 @@
+ * a chance to take over before it.
+ */
+ ap_hook_process_connection(h2_h2_process_conn,
+- mod_ssl, mod_reqtimeout, APR_HOOK_LAST);
++ mod_reqtimeout, NULL, APR_HOOK_LAST);
+
+ /* One last chance to properly say goodbye if we have not done so
+ * already. */
+@@ -581,14 +576,17 @@
+ {
+ apr_status_t status;
+ h2_ctx *ctx;
++ server_rec *s;
+
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
++ s = ctx? ctx->server : c->base_server;
++
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
+- if (h2_ctx_is_task(ctx)) {
++ if (ctx && ctx->task) {
+ /* our stream pseudo connection */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
+ return DECLINED;
+@@ -601,19 +599,19 @@
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
+ "new connection using protocol '%s', direct=%d, "
+ "tls acceptable=%d", proto, h2_allows_h2_direct(c),
+- h2_is_acceptable_connection(c, 1));
++ h2_is_acceptable_connection(c, NULL, 1));
+ }
+
+ if (!strcmp(AP_PROTOCOL_HTTP1, proto)
+ && h2_allows_h2_direct(c)
+- && h2_is_acceptable_connection(c, 1)) {
++ && h2_is_acceptable_connection(c, NULL, 1)) {
+ /* Fresh connection still is on http/1.1 and H2Direct is enabled.
+ * Otherwise connection is in a fully acceptable state.
+ * -> peek at the first 24 incoming bytes
+ */
+ apr_bucket_brigade *temp;
+- char *s = NULL;
+- apr_size_t slen;
++ char *peek = NULL;
++ apr_size_t peeklen;
+
+ temp = apr_brigade_create(c->pool, c->bucket_alloc);
+ status = ap_get_brigade(c->input_filters, temp,
+@@ -626,8 +624,8 @@
+ return DECLINED;
+ }
+
+- apr_brigade_pflatten(temp, &s, &slen, c->pool);
+- if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
++ apr_brigade_pflatten(temp, &peek, &peeklen, c->pool);
++ if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, direct mode detected");
+ if (!ctx) {
+@@ -638,7 +636,7 @@
+ else if (APLOGctrace2(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, not detected in %d bytes(base64): %s",
+- (int)slen, h2_util_base64url_encode(s, slen, c->pool));
++ (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool));
+ }
+
+ apr_brigade_destroy(temp);
+@@ -647,15 +645,16 @@
+
+ if (ctx) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
+- if (!h2_ctx_session_get(ctx)) {
+- status = h2_conn_setup(ctx, c, NULL);
++
++ if (!h2_ctx_get_session(c)) {
++ status = h2_conn_setup(c, NULL, s);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
+ if (status != APR_SUCCESS) {
+ h2_ctx_clear(c);
+ return !OK;
+ }
+ }
+- h2_conn_run(ctx, c);
++ h2_conn_run(c);
+ return OK;
+ }
+
+@@ -667,7 +666,7 @@
+ {
+ h2_ctx *ctx;
+
+- /* slave connection? */
++ /* secondary connection? */
+ if (c->master) {
+ return DECLINED;
+ }
+@@ -684,16 +683,17 @@
+
+ static void check_push(request_rec *r, const char *tag)
+ {
+- const h2_config *conf = h2_config_rget(r);
+- if (!r->expecting_100
+- && conf && conf->push_list && conf->push_list->nelts > 0) {
++ apr_array_header_t *push_list = h2_config_push_list(r);
++
++ if (!r->expecting_100 && push_list && push_list->nelts > 0) {
+ int i, old_status;
+ const char *old_line;
++
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+- tag, conf->push_list->nelts);
+- for (i = 0; i < conf->push_list->nelts; ++i) {
+- h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res);
++ tag, push_list->nelts);
++ for (i = 0; i < push_list->nelts; ++i) {
++ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
+ apr_table_add(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+@@ -710,10 +710,9 @@
+
+ static int h2_h2_post_read_req(request_rec *r)
+ {
+- /* slave connection? */
++ /* secondary connection? */
+ if (r->connection->master) {
+- h2_ctx *ctx = h2_ctx_rget(r);
+- struct h2_task *task = h2_ctx_get_task(ctx);
++ struct h2_task *task = h2_ctx_get_task(r->connection);
+ /* This hook will get called twice on internal redirects. Take care
+ * that we manipulate filters only once. */
+ if (task && !task->filters_set) {
+@@ -730,7 +729,7 @@
+ ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+
+ for (f = r->input_filters; f; f = f->next) {
+- if (!strcmp("H2_SLAVE_IN", f->frec->name)) {
++ if (!strcmp("H2_SECONDARY_IN", f->frec->name)) {
+ f->r = r;
+ break;
+ }
+@@ -744,17 +743,15 @@
+
+ static int h2_h2_late_fixups(request_rec *r)
+ {
+- /* slave connection? */
++ /* secondary connection? */
+ if (r->connection->master) {
+- h2_ctx *ctx = h2_ctx_rget(r);
+- struct h2_task *task = h2_ctx_get_task(ctx);
++ struct h2_task *task = h2_ctx_get_task(r->connection);
+ if (task) {
+ /* check if we copy vs. setaside files in this location */
+- task->output.copy_files = h2_config_geti(h2_config_rget(r),
+- H2_CONF_COPY_FILES);
++ task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES);
+ if (task->output.copy_files) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+- "h2_slave_out(%s): copy_files on", task->id);
++ "h2_secondary_out(%s): copy_files on", task->id);
+ h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
+ }
+ check_push(r, "late_fixup");
+--- a/modules/http2/h2_h2.h
++++ b/modules/http2/h2_h2.h
+@@ -57,23 +57,15 @@
+ * the handshake is still ongoing.
+ * @return != 0 iff connection requirements are met
+ */
+-int h2_is_acceptable_connection(conn_rec *c, int require_all);
+-
+-/**
+- * Check if the "direct" HTTP/2 mode of protocol handling is enabled
+- * for the given connection.
+- * @param c the connection to check
+- * @return != 0 iff direct mode is enabled
+- */
+-int h2_allows_h2_direct(conn_rec *c);
++int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all);
+
+ /**
+ * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
+- * for the given connection.
+- * @param c the connection to check
++ * for the given request.
++ * @param r the request to check
+ * @return != 0 iff Upgrade switching is enabled
+ */
+-int h2_allows_h2_upgrade(conn_rec *c);
++int h2_allows_h2_upgrade(request_rec *r);
+
+
+ #endif /* defined(__mod_h2__h2_h2__) */
+--- a/modules/http2/h2_headers.c
++++ b/modules/http2/h2_headers.c
+@@ -28,6 +28,7 @@
+
+ #include "h2_private.h"
+ #include "h2_h2.h"
++#include "h2_config.h"
+ #include "h2_util.h"
+ #include "h2_request.h"
+ #include "h2_headers.h"
+@@ -101,8 +102,9 @@
+ const apr_bucket *src)
+ {
+ if (H2_BUCKET_IS_HEADERS(src)) {
+- h2_headers *r = ((h2_bucket_headers *)src->data)->headers;
+- apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r);
++ h2_headers *src_headers = ((h2_bucket_headers *)src->data)->headers;
++ apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc,
++ h2_headers_clone(dest->p, src_headers));
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ return b;
+ }
+@@ -128,28 +130,41 @@
+ {
+ h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+ if (headers->status == HTTP_FORBIDDEN) {
+- const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
+- if (cause) {
+- /* This request triggered a TLS renegotiation that is now allowed
+- * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+- */
+- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
+- APLOGNO(03061)
+- "h2_headers(%ld): renegotiate forbidden, cause: %s",
+- (long)r->connection->id, cause);
+- headers->status = H2_ERR_HTTP_1_1_REQUIRED;
++ request_rec *r_prev;
++ for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
++ const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden");
++ if (cause) {
++ /* This request triggered a TLS renegotiation that is not allowed
++ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
++ */
++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
++ APLOGNO(03061)
++ "h2_headers(%ld): renegotiate forbidden, cause: %s",
++ (long)r->connection->id, cause);
++ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
++ break;
++ }
+ }
+ }
+ if (is_unsafe(r->server)) {
+- apr_table_setn(headers->notes, H2_HDR_CONFORMANCE,
+- H2_HDR_CONFORMANCE_UNSAFE);
++ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE);
++ }
++ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
++ apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0");
+ }
+ return headers;
+ }
+
+ h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
+ {
+- return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
++ return h2_headers_create(h->status, apr_table_copy(pool, h->headers),
++ apr_table_copy(pool, h->notes), h->raw_bytes, pool);
++}
++
++h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
++{
++ return h2_headers_create(h->status, apr_table_clone(pool, h->headers),
++ apr_table_clone(pool, h->notes), h->raw_bytes, pool);
+ }
+
+ h2_headers *h2_headers_die(apr_status_t type,
+--- a/modules/http2/h2_headers.h
++++ b/modules/http2/h2_headers.h
+@@ -59,12 +59,18 @@
+ apr_table_t *header, apr_pool_t *pool);
+
+ /**
+- * Clone the headers into another pool. This will not copy any
++ * Copy the headers into another pool. This will not copy any
+ * header strings.
+ */
+ h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
+
+ /**
++ * Clone the headers into another pool. This will also clone any
++ * header strings.
++ */
++h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
++
++/**
+ * Create the headers for the given error.
+ * @param stream_id id of the stream to create the headers for
+ * @param type the error code
+--- a/modules/http2/h2_mplx.c
++++ b/modules/http2/h2_mplx.c
+@@ -40,7 +40,6 @@
+ #include "h2_ctx.h"
+ #include "h2_h2.h"
+ #include "h2_mplx.h"
+-#include "h2_ngn_shed.h"
+ #include "h2_request.h"
+ #include "h2_stream.h"
+ #include "h2_session.h"
+@@ -54,9 +53,21 @@
+ h2_mplx *m;
+ h2_stream *stream;
+ apr_time_t now;
++ apr_size_t count;
+ } stream_iter_ctx;
+
+-apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
++/**
++ * Naming convention for static functions:
++ * - m_*: function only called from the master connection
++ * - s_*: function only called from a secondary connection
++ * - t_*: function only called from a h2_task holder
++ * - mst_*: function called from everyone
++ */
++
++static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task);
++static apr_status_t m_be_annoyed(h2_mplx *m);
++
++apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s)
+ {
+ return APR_SUCCESS;
+ }
+@@ -72,46 +83,40 @@
+ #define H2_MPLX_ENTER_ALWAYS(m) \
+ apr_thread_mutex_lock(m->lock)
+
+-#define H2_MPLX_ENTER_MAYBE(m, lock) \
+- if (lock) apr_thread_mutex_lock(m->lock)
++#define H2_MPLX_ENTER_MAYBE(m, dolock) \
++ if (dolock) apr_thread_mutex_lock(m->lock)
+
+-#define H2_MPLX_LEAVE_MAYBE(m, lock) \
+- if (lock) apr_thread_mutex_unlock(m->lock)
++#define H2_MPLX_LEAVE_MAYBE(m, dolock) \
++ if (dolock) apr_thread_mutex_unlock(m->lock)
+
+-static void check_data_for(h2_mplx *m, h2_stream *stream, int lock);
++static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked);
+
+-static void stream_output_consumed(void *ctx,
+- h2_bucket_beam *beam, apr_off_t length)
++static void mst_stream_output_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+ {
+- h2_stream *stream = ctx;
+- h2_task *task = stream->task;
+-
+- if (length > 0 && task && task->assigned) {
+- h2_req_engine_out_consumed(task->assigned, task->c, length);
+- }
+ }
+
+-static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
++static void mst_stream_input_ev(void *ctx, h2_bucket_beam *beam)
+ {
+ h2_stream *stream = ctx;
+ h2_mplx *m = stream->session->mplx;
+ apr_atomic_set32(&m->event_pending, 1);
+ }
+
+-static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
++static void m_stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+ {
+ h2_stream_in_consumed(ctx, length);
+ }
+
+-static void stream_joined(h2_mplx *m, h2_stream *stream)
++static void ms_stream_joined(h2_mplx *m, h2_stream *stream)
+ {
+- ap_assert(!stream->task || stream->task->worker_done);
++ ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
+
++ h2_ififo_remove(m->readyq, stream->id);
+ h2_ihash_remove(m->shold, stream->id);
+ h2_ihash_add(m->spurge, stream);
+ }
+
+-static void stream_cleanup(h2_mplx *m, h2_stream *stream)
++static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
+ {
+ ap_assert(stream->state == H2_SS_CLEANUP);
+
+@@ -128,15 +133,16 @@
+
+ h2_ihash_remove(m->streams, stream->id);
+ h2_iq_remove(m->q, stream->id);
+- h2_ififo_remove(m->readyq, stream->id);
+- h2_ihash_add(m->shold, stream);
+
+- if (!stream->task || stream->task->worker_done) {
+- stream_joined(m, stream);
++ if (!h2_task_has_started(stream->task) || stream->task->done_done) {
++ ms_stream_joined(m, stream);
+ }
+- else if (stream->task) {
+- stream->task->c->aborted = 1;
+- apr_thread_cond_broadcast(m->task_thawed);
++ else {
++ h2_ififo_remove(m->readyq, stream->id);
++ h2_ihash_add(m->shold, stream);
++ if (stream->task) {
++ stream->task->c->aborted = 1;
++ }
+ }
+ }
+
+@@ -151,29 +157,23 @@
+ * their HTTP/1 cousins, the separate allocator seems to work better
+ * than protecting a shared h2_session one with an own lock.
+ */
+-h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
+- const h2_config *conf,
+- h2_workers *workers)
++h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
++ h2_workers *workers)
+ {
+ apr_status_t status = APR_SUCCESS;
+ apr_allocator_t *allocator;
+ apr_thread_mutex_t *mutex;
+ h2_mplx *m;
+- h2_ctx *ctx = h2_ctx_get(c, 0);
+- ap_assert(conf);
+
+ m = apr_pcalloc(parent, sizeof(h2_mplx));
+ if (m) {
+ m->id = c->id;
+ m->c = c;
+- m->s = (ctx? h2_ctx_server_get(ctx) : NULL);
+- if (!m->s) {
+- m->s = c->base_server;
+- }
++ m->s = s;
+
+ /* We create a pool with its own allocator to be used for
+- * processing slave connections. This is the only way to have the
+- * processing independant of its parent pool in the sense that it
++ * processing secondary connections. This is the only way to have the
++ * processing independent of its parent pool in the sense that it
+ * can work in another thread. Also, the new allocator needs its own
+ * mutex to synchronize sub-pools.
+ */
+@@ -204,17 +204,10 @@
+ return NULL;
+ }
+
+- status = apr_thread_cond_create(&m->task_thawed, m->pool);
+- if (status != APR_SUCCESS) {
+- apr_pool_destroy(m->pool);
+- return NULL;
+- }
+-
+- m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
+- m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
++ m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
++ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
+
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->q = h2_iq_create(m->pool, m->max_streams);
+@@ -228,19 +221,15 @@
+ m->workers = workers;
+ m->max_active = workers->max_workers;
+ m->limit_active = 6; /* the original h1 max parallel connections */
+- m->last_limit_change = m->last_idle_block = apr_time_now();
+- m->limit_change_interval = apr_time_from_msec(100);
+-
+- m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
++ m->last_mood_change = apr_time_now();
++ m->mood_update_interval = apr_time_from_msec(100);
+
+- m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
+- m->stream_max_mem);
+- h2_ngn_shed_set_ctx(m->ngn_shed , m);
++ m->spare_secondary = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+ }
+ return m;
+ }
+
+-int h2_mplx_shutdown(h2_mplx *m)
++int h2_mplx_m_shutdown(h2_mplx *m)
+ {
+ int max_stream_started = 0;
+
+@@ -254,7 +243,7 @@
+ return max_stream_started;
+ }
+
+-static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
++static int m_input_consumed_signal(h2_mplx *m, h2_stream *stream)
+ {
+ if (stream->input) {
+ return h2_beam_report_consumption(stream->input);
+@@ -262,12 +251,12 @@
+ return 0;
+ }
+
+-static int report_consumption_iter(void *ctx, void *val)
++static int m_report_consumption_iter(void *ctx, void *val)
+ {
+ h2_stream *stream = val;
+ h2_mplx *m = ctx;
+
+- input_consumed_signal(m, stream);
++ m_input_consumed_signal(m, stream);
+ if (stream->state == H2_SS_CLOSED_L
+ && (!stream->task || stream->task->worker_done)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
+@@ -278,7 +267,7 @@
+ return 1;
+ }
+
+-static int output_consumed_signal(h2_mplx *m, h2_task *task)
++static int s_output_consumed_signal(h2_mplx *m, h2_task *task)
+ {
+ if (task->output.beam) {
+ return h2_beam_report_consumption(task->output.beam);
+@@ -286,7 +275,7 @@
+ return 0;
+ }
+
+-static int stream_destroy_iter(void *ctx, void *val)
++static int m_stream_destroy_iter(void *ctx, void *val)
+ {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+@@ -296,7 +285,7 @@
+
+ if (stream->input) {
+ /* Process outstanding events before destruction */
+- input_consumed_signal(m, stream);
++ m_input_consumed_signal(m, stream);
+ h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
+ h2_beam_destroy(stream->input);
+ stream->input = NULL;
+@@ -304,12 +293,12 @@
+
+ if (stream->task) {
+ h2_task *task = stream->task;
+- conn_rec *slave;
+- int reuse_slave = 0;
++ conn_rec *secondary;
++ int reuse_secondary = 0;
+
+ stream->task = NULL;
+- slave = task->c;
+- if (slave) {
++ secondary = task->c;
++ if (secondary) {
+ /* On non-serialized requests, the IO logging has not accounted for any
+ * meta data send over the network: response headers and h2 frame headers. we
+ * counted this on the stream and need to add this now.
+@@ -318,26 +307,25 @@
+ if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) {
+ apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets;
+ if (unaccounted > 0) {
+- h2_task_logio_add_bytes_out(slave, unaccounted);
++ h2_task_logio_add_bytes_out(secondary, unaccounted);
+ }
+ }
+
+- if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
+- reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
+- && !task->rst_error);
++ if (m->s->keep_alive_max == 0 || secondary->keepalives < m->s->keep_alive_max) {
++ reuse_secondary = ((m->spare_secondary->nelts < (m->limit_active * 3 / 2))
++ && !task->rst_error);
+ }
+
+- task->c = NULL;
+- if (reuse_slave) {
++ if (reuse_secondary) {
+ h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
+- APLOGNO(03385) "h2_task_destroy, reuse slave");
++ APLOGNO(03385) "h2_task_destroy, reuse secondary");
+ h2_task_destroy(task);
+- APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
++ APR_ARRAY_PUSH(m->spare_secondary, conn_rec*) = secondary;
+ }
+ else {
+ h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
+- "h2_task_destroy, destroy slave");
+- h2_slave_destroy(slave);
++ "h2_task_destroy, destroy secondary");
++ h2_secondary_destroy(secondary);
+ }
+ }
+ }
+@@ -345,11 +333,11 @@
+ return 0;
+ }
+
+-static void purge_streams(h2_mplx *m, int lock)
++static void m_purge_streams(h2_mplx *m, int lock)
+ {
+ if (!h2_ihash_empty(m->spurge)) {
+ H2_MPLX_ENTER_MAYBE(m, lock);
+- while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) {
++ while (!h2_ihash_iter(m->spurge, m_stream_destroy_iter, m)) {
+ /* repeat until empty */
+ }
+ H2_MPLX_LEAVE_MAYBE(m, lock);
+@@ -361,13 +349,13 @@
+ void *ctx;
+ } stream_iter_ctx_t;
+
+-static int stream_iter_wrap(void *ctx, void *stream)
++static int m_stream_iter_wrap(void *ctx, void *stream)
+ {
+ stream_iter_ctx_t *x = ctx;
+ return x->cb(stream, x->ctx);
+ }
+
+-apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
++apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+ {
+ stream_iter_ctx_t x;
+
+@@ -375,13 +363,13 @@
+
+ x.cb = cb;
+ x.ctx = ctx;
+- h2_ihash_iter(m->streams, stream_iter_wrap, &x);
++ h2_ihash_iter(m->streams, m_stream_iter_wrap, &x);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+ }
+
+-static int report_stream_iter(void *ctx, void *val) {
++static int m_report_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ h2_task *task = stream->task;
+@@ -394,10 +382,10 @@
+ if (task) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: %s %s %s"
+- "[started=%d/done=%d/frozen=%d]"),
++ "[started=%d/done=%d]"),
+ task->request->method, task->request->authority,
+ task->request->path, task->worker_started,
+- task->worker_done, task->frozen);
++ task->worker_done);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+@@ -406,7 +394,7 @@
+ return 1;
+ }
+
+-static int unexpected_stream_iter(void *ctx, void *val) {
++static int m_unexpected_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+@@ -415,7 +403,7 @@
+ return 1;
+ }
+
+-static int stream_cancel_iter(void *ctx, void *val) {
++static int m_stream_cancel_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+
+@@ -429,14 +417,14 @@
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ /* All connection data has been sent, simulate cleanup */
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+- stream_cleanup(m, stream);
++ m_stream_cleanup(m, stream);
+ return 0;
+ }
+
+-void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
++void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+ {
+ apr_status_t status;
+- int i, wait_secs = 60;
++ int i, wait_secs = 60, old_aborted;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): start release", m->id);
+@@ -447,15 +435,23 @@
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
++ /* While really terminating any secondary connections, treat the master
++ * connection as aborted. It's not as if we could send any more data
++ * at this point. */
++ old_aborted = m->c->aborted;
++ m->c->aborted = 1;
++
+ /* How to shut down a h2 connection:
+ * 1. cancel all streams still active */
+- while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
++ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
++ m->id, (int)h2_ihash_count(m->streams),
++ (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
++ while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
+ /* until empty */
+ }
+
+- /* 2. terminate ngn_shed, no more streams
+- * should be scheduled or in the active set */
+- h2_ngn_shed_abort(m->ngn_shed);
++ /* 2. no more streams should be scheduled or in the active set */
+ ap_assert(h2_ihash_empty(m->streams));
+ ap_assert(h2_iq_empty(m->q));
+
+@@ -473,65 +469,60 @@
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
+ "h2_mplx(%ld): waited %d sec for %d tasks",
+ m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
+- h2_ihash_iter(m->shold, report_stream_iter, m);
++ h2_ihash_iter(m->shold, m_report_stream_iter, m);
+ }
+ }
+- ap_assert(m->tasks_active == 0);
+ m->join_wait = NULL;
+-
+- /* 4. close the h2_req_enginge shed */
+- h2_ngn_shed_destroy(m->ngn_shed);
+- m->ngn_shed = NULL;
+-
++
+ /* 4. With all workers done, all streams should be in spurge */
++ ap_assert(m->tasks_active == 0);
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
+ "h2_mplx(%ld): unexpected %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+- h2_ihash_iter(m->shold, unexpected_stream_iter, m);
++ h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
+ }
+
++ m->c->aborted = old_aborted;
+ H2_MPLX_LEAVE(m);
+
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): released", m->id);
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
+ }
+
+-apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
++apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, h2_stream *stream)
+ {
+ H2_MPLX_ENTER(m);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "cleanup"));
+- stream_cleanup(m, stream);
++ m_stream_cleanup(m, stream);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+ }
+
+-h2_stream *h2_mplx_stream_get(h2_mplx *m, int id)
++h2_stream *h2_mplx_t_stream_get(h2_mplx *m, h2_task *task)
+ {
+ h2_stream *s = NULL;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+- s = h2_ihash_get(m->streams, id);
++ s = h2_ihash_get(m->streams, task->stream_id);
+
+ H2_MPLX_LEAVE(m);
+ return s;
+ }
+
+-static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
++static void mst_output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+ {
+ h2_stream *stream = ctx;
+ h2_mplx *m = stream->session->mplx;
+
+- check_data_for(m, stream, 1);
++ mst_check_data_for(m, stream, 0);
+ }
+
+-static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
++static apr_status_t t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+ {
+- apr_status_t status = APR_SUCCESS;
+ h2_stream *stream = h2_ihash_get(m->streams, stream_id);
+
+ if (!stream || !stream->task || m->aborted) {
+@@ -542,26 +533,26 @@
+ stream->output = beam;
+
+ if (APLOGctrace2(m->c)) {
+- h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
++ h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open");
+ }
+ else {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c,
+ "h2_mplx(%s): out open", stream->task->id);
+ }
+
+- h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream);
+- h2_beam_on_produced(stream->output, output_produced, stream);
++ h2_beam_on_consumed(stream->output, NULL, mst_stream_output_consumed, stream);
++ h2_beam_on_produced(stream->output, mst_output_produced, stream);
+ if (stream->task->output.copy_files) {
+ h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
+ }
+
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+- check_data_for(m, stream, 0);
+- return status;
++ mst_check_data_for(m, stream, 1);
++ return APR_SUCCESS;
+ }
+
+-apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
++apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+ {
+ apr_status_t status;
+
+@@ -571,14 +562,14 @@
+ status = APR_ECONNABORTED;
+ }
+ else {
+- status = out_open(m, stream_id, beam);
++ status = t_out_open(m, stream_id, beam);
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+ }
+
+-static apr_status_t out_close(h2_mplx *m, h2_task *task)
++static apr_status_t s_out_close(h2_mplx *m, h2_task *task)
+ {
+ apr_status_t status = APR_SUCCESS;
+ h2_stream *stream;
+@@ -595,17 +586,17 @@
+ return APR_ECONNABORTED;
+ }
+
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c,
+ "h2_mplx(%s): close", task->id);
+ status = h2_beam_close(task->output.beam);
+- h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
+- output_consumed_signal(m, task);
+- check_data_for(m, stream, 0);
++ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close");
++ s_output_consumed_signal(m, task);
++ mst_check_data_for(m, stream, 1);
+ return status;
+ }
+
+-apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+- apr_thread_cond_t *iowait)
++apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
++ apr_thread_cond_t *iowait)
+ {
+ apr_status_t status;
+
+@@ -614,12 +605,12 @@
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+- else if (h2_mplx_has_master_events(m)) {
++ else if (h2_mplx_m_has_master_events(m)) {
+ status = APR_SUCCESS;
+ }
+ else {
+- purge_streams(m, 0);
+- h2_ihash_iter(m->streams, report_consumption_iter, m);
++ m_purge_streams(m, 0);
++ h2_ihash_iter(m->streams, m_report_consumption_iter, m);
+ m->added_output = iowait;
+ status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
+ if (APLOGctrace2(m->c)) {
+@@ -634,19 +625,27 @@
+ return status;
+ }
+
+-static void check_data_for(h2_mplx *m, h2_stream *stream, int lock)
++static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked)
+ {
++ /* If m->lock is already held, we must release during h2_ififo_push()
++ * which can wait on its not_full condition, causing a deadlock because
++ * no one would then be able to acquire m->lock to empty the fifo.
++ */
++ H2_MPLX_LEAVE_MAYBE(m, mplx_is_locked);
+ if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) {
++ H2_MPLX_ENTER_ALWAYS(m);
+ apr_atomic_set32(&m->event_pending, 1);
+- H2_MPLX_ENTER_MAYBE(m, lock);
+ if (m->added_output) {
+ apr_thread_cond_signal(m->added_output);
+ }
+- H2_MPLX_LEAVE_MAYBE(m, lock);
++ H2_MPLX_LEAVE_MAYBE(m, !mplx_is_locked);
++ }
++ else {
++ H2_MPLX_ENTER_MAYBE(m, mplx_is_locked);
+ }
+ }
+
+-apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
++apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
+ {
+ apr_status_t status;
+
+@@ -666,22 +665,22 @@
+ return status;
+ }
+
+-static void register_if_needed(h2_mplx *m)
++static void ms_register_if_needed(h2_mplx *m, int from_master)
+ {
+ if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
+ apr_status_t status = h2_workers_register(m->workers, m);
+ if (status == APR_SUCCESS) {
+ m->is_registered = 1;
+ }
+- else {
++ else if (from_master) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
+ "h2_mplx(%ld): register at workers", m->id);
+ }
+ }
+ }
+
+-apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
+- h2_stream_pri_cmp *cmp, void *ctx)
++apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
++ h2_stream_pri_cmp *cmp, void *ctx)
+ {
+ apr_status_t status;
+
+@@ -695,13 +694,13 @@
+ h2_ihash_add(m->streams, stream);
+ if (h2_stream_is_ready(stream)) {
+ /* already have a response */
+- check_data_for(m, stream, 0);
++ mst_check_data_for(m, stream, 1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "process, add to readyq"));
+ }
+ else {
+ h2_iq_add(m->q, stream->id, cmp, ctx);
+- register_if_needed(m);
++ ms_register_if_needed(m, 1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "process, added to q"));
+ }
+@@ -711,7 +710,7 @@
+ return status;
+ }
+
+-static h2_task *next_stream_task(h2_mplx *m)
++static h2_task *s_next_stream_task(h2_mplx *m)
+ {
+ h2_stream *stream;
+ int sid;
+@@ -720,40 +719,39 @@
+
+ stream = h2_ihash_get(m->streams, sid);
+ if (stream) {
+- conn_rec *slave, **pslave;
++ conn_rec *secondary, **psecondary;
+
+- pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
+- if (pslave) {
+- slave = *pslave;
+- slave->aborted = 0;
++ psecondary = (conn_rec **)apr_array_pop(m->spare_secondary);
++ if (psecondary) {
++ secondary = *psecondary;
++ secondary->aborted = 0;
+ }
+ else {
+- slave = h2_slave_create(m->c, stream->id, m->pool);
++ secondary = h2_secondary_create(m->c, stream->id, m->pool);
+ }
+
+ if (!stream->task) {
+-
+ if (sid > m->max_stream_started) {
+ m->max_stream_started = sid;
+ }
+ if (stream->input) {
+- h2_beam_on_consumed(stream->input, stream_input_ev,
+- stream_input_consumed, stream);
++ h2_beam_on_consumed(stream->input, mst_stream_input_ev,
++ m_stream_input_consumed, stream);
+ }
+
+- stream->task = h2_task_create(slave, stream->id,
++ stream->task = h2_task_create(secondary, stream->id,
+ stream->request, m, stream->input,
+ stream->session->s->timeout,
+ m->stream_max_mem);
+ if (!stream->task) {
+- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave,
++ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, secondary,
+ H2_STRM_LOG(APLOGNO(02941), stream,
+ "create task"));
+ return NULL;
+ }
+-
+ }
+
++ stream->task->started_at = apr_time_now();
+ ++m->tasks_active;
+ return stream->task;
+ }
+@@ -761,7 +759,7 @@
+ return NULL;
+ }
+
+-apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
++apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
+ {
+ apr_status_t rv = APR_EOF;
+
+@@ -777,7 +775,7 @@
+ rv = APR_EOF;
+ }
+ else {
+- *ptask = next_stream_task(m);
++ *ptask = s_next_stream_task(m);
+ rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
+ }
+ if (APR_EAGAIN != rv) {
+@@ -787,127 +785,87 @@
+ return rv;
+ }
+
+-static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
++static void s_task_done(h2_mplx *m, h2_task *task)
+ {
+ h2_stream *stream;
+
+- if (task->frozen) {
+- /* this task was handed over to an engine for processing
+- * and the original worker has finished. That means the
+- * engine may start processing now. */
+- h2_task_thaw(task);
+- apr_thread_cond_broadcast(m->task_thawed);
+- return;
+- }
+-
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_mplx(%ld): task(%s) done", m->id, task->id);
+- out_close(m, task);
+-
+- if (ngn) {
+- apr_off_t bytes = 0;
+- h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+- bytes += h2_beam_get_buffered(task->output.beam);
+- if (bytes > 0) {
+- /* we need to report consumed and current buffered output
+- * to the engine. The request will be streamed out or cancelled,
+- * no more data is coming from it and the engine should update
+- * its calculations before we destroy this information. */
+- h2_req_engine_out_consumed(ngn, task->c, bytes);
+- }
+- }
+-
+- if (task->engine) {
+- if (!m->aborted && !task->c->aborted
+- && !h2_req_engine_is_shutdown(task->engine)) {
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022)
+- "h2_mplx(%ld): task(%s) has not-shutdown "
+- "engine(%s)", m->id, task->id,
+- h2_req_engine_get_id(task->engine));
+- }
+- h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+- }
++ s_out_close(m, task);
+
+ task->worker_done = 1;
+ task->done_at = apr_time_now();
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ "h2_mplx(%s): request done, %f ms elapsed", task->id,
+ (task->done_at - task->started_at) / 1000.0);
+
+- if (task->started_at > m->last_idle_block) {
+- /* this task finished without causing an 'idle block', e.g.
+- * a block by flow control.
+- */
+- if (task->done_at- m->last_limit_change >= m->limit_change_interval
+- && m->limit_active < m->max_active) {
+- /* Well behaving stream, allow it more workers */
+- m->limit_active = H2MIN(m->limit_active * 2,
+- m->max_active);
+- m->last_limit_change = task->done_at;
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): increase worker limit to %d",
+- m->id, m->limit_active);
+- }
++ if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
++ s_mplx_be_happy(m, task);
+ }
+
++ ap_assert(task->done_done == 0);
++
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ /* stream not done yet. */
+- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
++ if (!m->aborted && task->redo) {
+ /* reset and schedule again */
+ h2_task_redo(task);
+- h2_ihash_remove(m->sredo, stream->id);
+ h2_iq_add(m->q, stream->id, NULL, NULL);
++ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
++ H2_STRM_MSG(stream, "redo, added to q"));
+ }
+ else {
+ /* stream not cleaned up, stay around */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
++ task->done_done = 1;
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ H2_STRM_MSG(stream, "task_done, stream open"));
+ if (stream->input) {
+ h2_beam_leave(stream->input);
+ }
+
+ /* more data will not arrive, resume the stream */
+- check_data_for(m, stream, 0);
++ mst_check_data_for(m, stream, 1);
+ }
+ }
+ else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
+ /* stream is done, was just waiting for this. */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
++ task->done_done = 1;
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ H2_STRM_MSG(stream, "task_done, in hold"));
+ if (stream->input) {
+ h2_beam_leave(stream->input);
+ }
+- stream_joined(m, stream);
++ ms_stream_joined(m, stream);
+ }
+ else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
+- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c,
+ H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
+ ap_assert("stream should not be in spurge" == NULL);
+ }
+ else {
+- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518)
+ "h2_mplx(%s): task_done, stream not found",
+ task->id);
+ ap_assert("stream should still be available" == NULL);
+ }
+ }
+
+-void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
++void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
+ {
+ H2_MPLX_ENTER_ALWAYS(m);
+
+- task_done(m, task, NULL);
+ --m->tasks_active;
++ s_task_done(m, task);
+
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+ }
+ if (ptask) {
+ /* caller wants another task */
+- *ptask = next_stream_task(m);
++ *ptask = s_next_stream_task(m);
+ }
+- register_if_needed(m);
++ ms_register_if_needed(m, 0);
+
+ H2_MPLX_LEAVE(m);
+ }
+@@ -916,94 +874,161 @@
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+-static int latest_repeatable_unsubmitted_iter(void *data, void *val)
++static int m_timed_out_busy_iter(void *data, void *val)
+ {
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+-
+- if (stream->task && !stream->task->worker_done
+- && h2_task_can_redo(stream->task)
+- && !h2_ihash_get(ctx->m->sredo, stream->id)) {
+- if (!h2_stream_is_ready(stream)) {
+- /* this task occupies a worker, the response has not been submitted
+- * yet, not been cancelled and it is a repeatable request
+- * -> it can be re-scheduled later */
+- if (!ctx->stream
+- || (ctx->stream->task->started_at < stream->task->started_at)) {
+- /* we did not have one or this one was started later */
+- ctx->stream = stream;
+- }
+- }
++ if (h2_task_has_started(stream->task) && !stream->task->worker_done
++ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
++ /* timed out stream occupying a worker, found */
++ ctx->stream = stream;
++ return 0;
+ }
+ return 1;
+ }
+
+-static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
++static h2_stream *m_get_timed_out_busy_stream(h2_mplx *m)
+ {
+ stream_iter_ctx ctx;
+ ctx.m = m;
+ ctx.stream = NULL;
+- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
++ ctx.now = apr_time_now();
++ h2_ihash_iter(m->streams, m_timed_out_busy_iter, &ctx);
+ return ctx.stream;
+ }
+
+-static int timed_out_busy_iter(void *data, void *val)
++static int m_latest_repeatable_unsubmitted_iter(void *data, void *val)
+ {
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+- if (stream->task && !stream->task->worker_done
+- && (ctx->now - stream->task->started_at) > stream->task->timeout) {
+- /* timed out stream occupying a worker, found */
+- ctx->stream = stream;
+- return 0;
++
++ if (!stream->task) goto leave;
++ if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
++ if (h2_stream_is_ready(stream)) goto leave;
++ if (stream->task->redo) {
++ ++ctx->count;
++ goto leave;
++ }
++ if (h2_task_can_redo(stream->task)) {
++ /* this task occupies a worker, the response has not been submitted
++ * yet, not been cancelled and it is a repeatable request
++ * -> we could redo it later */
++ if (!ctx->stream
++ || (ctx->stream->task->started_at < stream->task->started_at)) {
++ /* we did not have one or this one was started later */
++ ctx->stream = stream;
++ }
+ }
++leave:
+ return 1;
+ }
+
+-static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
++static apr_status_t m_assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
+ {
+ stream_iter_ctx ctx;
++
++ /* count the running tasks already marked for redo and get one that could
++ * be throttled */
++ *ptask = NULL;
+ ctx.m = m;
+ ctx.stream = NULL;
+- ctx.now = apr_time_now();
+- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+- return ctx.stream;
++ ctx.count = 0;
++ h2_ihash_iter(m->streams, m_latest_repeatable_unsubmitted_iter, &ctx);
++ if (m->tasks_active - ctx.count > m->limit_active) {
++ /* we are above the limit of running tasks, accounting for the ones
++ * already throttled. */
++ if (ctx.stream && ctx.stream->task) {
++ *ptask = ctx.stream->task;
++ return APR_EAGAIN;
++ }
++ /* above limit, be seeing no candidate for easy throttling */
++ if (m_get_timed_out_busy_stream(m)) {
++ /* Too many busy workers, unable to cancel enough streams
++ * and with a busy, timed out stream, we tell the client
++ * to go away... */
++ return APR_TIMEUP;
++ }
++ }
++ return APR_SUCCESS;
+ }
+
+-static apr_status_t unschedule_slow_tasks(h2_mplx *m)
++static apr_status_t m_unschedule_slow_tasks(h2_mplx *m)
+ {
+- h2_stream *stream;
+- int n;
++ h2_task *task;
++ apr_status_t rv;
+
+ /* Try to get rid of streams that occupy workers. Look for safe requests
+ * that are repeatable. If none found, fail the connection.
+ */
+- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
+- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
++ while (APR_EAGAIN == (rv = m_assess_task_to_throttle(&task, m))) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): unschedule, resetting task for redo later",
+- stream->task->id);
+- h2_task_rst(stream->task, H2_ERR_CANCEL);
+- h2_ihash_add(m->sredo, stream);
+- --n;
++ task->id);
++ task->redo = 1;
++ h2_task_rst(task, H2_ERR_CANCEL);
+ }
+
+- if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
+- h2_stream *stream = get_timed_out_busy_stream(m);
+- if (stream) {
+- /* Too many busy workers, unable to cancel enough streams
+- * and with a busy, timed out stream, we tell the client
+- * to go away... */
+- return APR_TIMEUP;
+- }
++ return rv;
++}
++
++static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task)
++{
++ apr_time_t now;
++
++ --m->irritations_since;
++ now = apr_time_now();
++ if (m->limit_active < m->max_active
++ && (now - m->last_mood_change >= m->mood_update_interval
++ || m->irritations_since < -m->limit_active)) {
++ m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
++ m->last_mood_change = now;
++ m->irritations_since = 0;
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
++ "h2_mplx(%ld): mood update, increasing worker limit to %d",
++ m->id, m->limit_active);
+ }
+ return APR_SUCCESS;
+ }
+
+-apr_status_t h2_mplx_idle(h2_mplx *m)
++static apr_status_t m_be_annoyed(h2_mplx *m)
+ {
+ apr_status_t status = APR_SUCCESS;
+ apr_time_t now;
++
++ ++m->irritations_since;
++ now = apr_time_now();
++ if (m->limit_active > 2 &&
++ ((now - m->last_mood_change >= m->mood_update_interval)
++ || (m->irritations_since >= m->limit_active))) {
++
++ if (m->limit_active > 16) {
++ m->limit_active = 16;
++ }
++ else if (m->limit_active > 8) {
++ m->limit_active = 8;
++ }
++ else if (m->limit_active > 4) {
++ m->limit_active = 4;
++ }
++ else if (m->limit_active > 2) {
++ m->limit_active = 2;
++ }
++ m->last_mood_change = now;
++ m->irritations_since = 0;
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
++ "h2_mplx(%ld): mood update, decreasing worker limit to %d",
++ m->id, m->limit_active);
++ }
++
++ if (m->tasks_active > m->limit_active) {
++ status = m_unschedule_slow_tasks(m);
++ }
++ return status;
++}
++
++apr_status_t h2_mplx_m_idle(h2_mplx *m)
++{
++ apr_status_t status = APR_SUCCESS;
+ apr_size_t scount;
+
+ H2_MPLX_ENTER(m);
+@@ -1023,31 +1048,7 @@
+ * of busy workers we allow for this connection until it
+ * well behaves.
+ */
+- now = apr_time_now();
+- m->last_idle_block = now;
+- if (m->limit_active > 2
+- && now - m->last_limit_change >= m->limit_change_interval) {
+- if (m->limit_active > 16) {
+- m->limit_active = 16;
+- }
+- else if (m->limit_active > 8) {
+- m->limit_active = 8;
+- }
+- else if (m->limit_active > 4) {
+- m->limit_active = 4;
+- }
+- else if (m->limit_active > 2) {
+- m->limit_active = 2;
+- }
+- m->last_limit_change = now;
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): decrease worker limit to %d",
+- m->id, m->limit_active);
+- }
+-
+- if (m->tasks_active > m->limit_active) {
+- status = unschedule_slow_tasks(m);
+- }
++ status = m_be_annoyed(m);
+ }
+ else if (!h2_iq_empty(m->q)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+@@ -1077,167 +1078,30 @@
+ h2_beam_is_closed(stream->output),
+ (long)h2_beam_get_buffered(stream->output));
+ h2_ihash_add(m->streams, stream);
+- check_data_for(m, stream, 0);
++ mst_check_data_for(m, stream, 1);
+ stream->out_checked = 1;
+ status = APR_EAGAIN;
+ }
+ }
+ }
+ }
+- register_if_needed(m);
++ ms_register_if_needed(m, 1);
+
+ H2_MPLX_LEAVE(m);
+ return status;
+ }
+
+ /*******************************************************************************
+- * HTTP/2 request engines
+- ******************************************************************************/
+-
+-typedef struct {
+- h2_mplx * m;
+- h2_req_engine *ngn;
+- int streams_updated;
+-} ngn_update_ctx;
+-
+-static int ngn_update_window(void *ctx, void *val)
+-{
+- ngn_update_ctx *uctx = ctx;
+- h2_stream *stream = val;
+- if (stream->task && stream->task->assigned == uctx->ngn
+- && output_consumed_signal(uctx->m, stream->task)) {
+- ++uctx->streams_updated;
+- }
+- return 1;
+-}
+-
+-static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn)
+-{
+- ngn_update_ctx ctx;
+-
+- ctx.m = m;
+- ctx.ngn = ngn;
+- ctx.streams_updated = 0;
+- h2_ihash_iter(m->streams, ngn_update_window, &ctx);
+-
+- return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
+-}
+-
+-apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
+- request_rec *r,
+- http2_req_engine_init *einit)
+-{
+- apr_status_t status;
+- h2_mplx *m;
+- h2_task *task;
+- h2_stream *stream;
+-
+- task = h2_ctx_rget_task(r);
+- if (!task) {
+- return APR_ECONNABORTED;
+- }
+- m = task->mplx;
+-
+- H2_MPLX_ENTER(m);
+-
+- stream = h2_ihash_get(m->streams, task->stream_id);
+- if (stream) {
+- status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit);
+- }
+- else {
+- status = APR_ECONNABORTED;
+- }
+-
+- H2_MPLX_LEAVE(m);
+- return status;
+-}
+-
+-apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn,
+- apr_read_type_e block,
+- int capacity,
+- request_rec **pr)
+-{
+- h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn);
+- h2_mplx *m = h2_ngn_shed_get_ctx(shed);
+- apr_status_t status;
+- int want_shutdown;
+-
+- H2_MPLX_ENTER(m);
+-
+- want_shutdown = (block == APR_BLOCK_READ);
+-
+- /* Take this opportunity to update output consummation
+- * for this engine */
+- ngn_out_update_windows(m, ngn);
+-
+- if (want_shutdown && !h2_iq_empty(m->q)) {
+- /* For a blocking read, check first if requests are to be
+- * had and, if not, wait a short while before doing the
+- * blocking, and if unsuccessful, terminating read.
+- */
+- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
+- if (APR_STATUS_IS_EAGAIN(status)) {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): start block engine pull", m->id);
+- apr_thread_cond_timedwait(m->task_thawed, m->lock,
+- apr_time_from_msec(20));
+- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
+- }
+- }
+- else {
+- status = h2_ngn_shed_pull_request(shed, ngn, capacity,
+- want_shutdown, pr);
+- }
+-
+- H2_MPLX_LEAVE(m);
+- return status;
+-}
+-
+-void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
+- apr_status_t status)
+-{
+- h2_task *task = h2_ctx_cget_task(r_conn);
+-
+- if (task) {
+- h2_mplx *m = task->mplx;
+- h2_stream *stream;
+-
+- H2_MPLX_ENTER_ALWAYS(m);
+-
+- stream = h2_ihash_get(m->streams, task->stream_id);
+-
+- ngn_out_update_windows(m, ngn);
+- h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
+-
+- if (status != APR_SUCCESS && stream
+- && h2_task_can_redo(task)
+- && !h2_ihash_get(m->sredo, stream->id)) {
+- h2_ihash_add(m->sredo, stream);
+- }
+-
+- if (task->engine) {
+- /* cannot report that as done until engine returns */
+- }
+- else {
+- task_done(m, task, ngn);
+- }
+-
+- H2_MPLX_LEAVE(m);
+- }
+-}
+-
+-/*******************************************************************************
+ * mplx master events dispatching
+ ******************************************************************************/
+
+-int h2_mplx_has_master_events(h2_mplx *m)
++int h2_mplx_m_has_master_events(h2_mplx *m)
+ {
+ return apr_atomic_read32(&m->event_pending) > 0;
+ }
+
+-apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+- stream_ev_callback *on_resume,
+- void *on_ctx)
++apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
++ void *on_ctx)
+ {
+ h2_stream *stream;
+ int n, id;
+@@ -1247,8 +1111,8 @@
+ apr_atomic_set32(&m->event_pending, 0);
+
+ /* update input windows for streams */
+- h2_ihash_iter(m->streams, report_consumption_iter, m);
+- purge_streams(m, 1);
++ h2_ihash_iter(m->streams, m_report_consumption_iter, m);
++ m_purge_streams(m, 1);
+
+ n = h2_ififo_count(m->readyq);
+ while (n > 0
+@@ -1263,13 +1127,13 @@
+ return APR_SUCCESS;
+ }
+
+-apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream)
++apr_status_t h2_mplx_m_keep_active(h2_mplx *m, h2_stream *stream)
+ {
+- check_data_for(m, stream, 1);
++ mst_check_data_for(m, stream, 0);
+ return APR_SUCCESS;
+ }
+
+-int h2_mplx_awaits_data(h2_mplx *m)
++int h2_mplx_m_awaits_data(h2_mplx *m)
+ {
+ int waiting = 1;
+
+@@ -1278,11 +1142,24 @@
+ if (h2_ihash_empty(m->streams)) {
+ waiting = 0;
+ }
+- else if (!m->tasks_active && !h2_ififo_count(m->readyq)
+- && h2_iq_empty(m->q)) {
++ else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
+ waiting = 0;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return waiting;
+ }
++
++apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id)
++{
++ h2_stream *stream;
++ apr_status_t status = APR_SUCCESS;
++
++ H2_MPLX_ENTER_ALWAYS(m);
++ stream = h2_ihash_get(m->streams, stream_id);
++ if (stream && stream->task) {
++ status = m_be_annoyed(m);
++ }
++ H2_MPLX_LEAVE(m);
++ return status;
++}
+--- a/modules/http2/h2_mplx.h
++++ b/modules/http2/h2_mplx.h
+@@ -31,8 +31,10 @@
+ * queued in the multiplexer. If a task thread tries to write more
+ * data, it is blocked until space becomes available.
+ *
+- * Writing input is never blocked. In order to use flow control on the input,
+- * the mplx can be polled for input data consumption.
++ * Naming Convention:
++ * "h2_mplx_m_" are methods only to be called by the main connection
++ * "h2_mplx_s_" are method only to be called by a secondary connection
++ * "h2_mplx_t_" are method only to be called by a task handler (can be master or secondary)
+ */
+
+ struct apr_pool_t;
+@@ -47,8 +49,6 @@
+ struct apr_thread_cond_t;
+ struct h2_workers;
+ struct h2_iqueue;
+-struct h2_ngn_shed;
+-struct h2_req_engine;
+
+ #include <apr_queue.h>
+
+@@ -65,7 +65,6 @@
+ unsigned int is_registered; /* is registered at h2_workers */
+
+ struct h2_ihash_t *streams; /* all streams currently processing */
+- struct h2_ihash_t *sredo; /* all streams that need to be re-started */
+ struct h2_ihash_t *shold; /* all streams done with task ongoing */
+ struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
+
+@@ -79,41 +78,35 @@
+ int tasks_active; /* # of tasks being processed from this mplx */
+ int limit_active; /* current limit on active tasks, dynamic */
+ int max_active; /* max, hard limit # of active tasks in a process */
+- apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
+- * streams were ready */
+- apr_time_t last_limit_change; /* last time, worker limit changed */
+- apr_interval_time_t limit_change_interval;
++
++ apr_time_t last_mood_change; /* last time, we worker limit changed */
++ apr_interval_time_t mood_update_interval; /* how frequent we update at most */
++ int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
+
+ apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *added_output;
+- struct apr_thread_cond_t *task_thawed;
+ struct apr_thread_cond_t *join_wait;
+
+ apr_size_t stream_max_mem;
+
+ apr_pool_t *spare_io_pool;
+- apr_array_header_t *spare_slaves; /* spare slave connections */
++ apr_array_header_t *spare_secondary; /* spare secondary connections */
+
+ struct h2_workers *workers;
+-
+- struct h2_ngn_shed *ngn_shed;
+ };
+
+-
+-
+ /*******************************************************************************
+- * Object lifecycle and information.
++ * From the main connection processing: h2_mplx_m_*
+ ******************************************************************************/
+
+-apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
++apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s);
+
+ /**
+ * Create the multiplexer for the given HTTP2 session.
+ * Implicitly has reference count 1.
+ */
+-h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master,
+- const struct h2_config *conf,
+- struct h2_workers *workers);
++h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *master,
++ struct h2_workers *workers);
+
+ /**
+ * Decreases the reference counter of this mplx and waits for it
+@@ -123,26 +116,14 @@
+ * @param m the mplx to be released and destroyed
+ * @param wait condition var to wait on for ref counter == 0
+ */
+-void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
+-
+-apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask);
+-
+-void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
++void h2_mplx_m_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
+
+ /**
+ * Shut down the multiplexer gracefully. Will no longer schedule new streams
+ * but let the ongoing ones finish normally.
+ * @return the highest stream id being/been processed
+ */
+-int h2_mplx_shutdown(h2_mplx *m);
+-
+-int h2_mplx_is_busy(h2_mplx *m);
+-
+-/*******************************************************************************
+- * IO lifetime of streams.
+- ******************************************************************************/
+-
+-struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id);
++int h2_mplx_m_shutdown(h2_mplx *m);
+
+ /**
+ * Notifies mplx that a stream has been completely handled on the main
+@@ -151,20 +132,16 @@
+ * @param m the mplx itself
+ * @param stream the stream ready for cleanup
+ */
+-apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
++apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
+
+ /**
+ * Waits on output data from any stream in this session to become available.
+ * Returns APR_TIMEUP if no data arrived in the given time.
+ */
+-apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+- struct apr_thread_cond_t *iowait);
++apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
++ struct apr_thread_cond_t *iowait);
+
+-apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream);
+-
+-/*******************************************************************************
+- * Stream processing.
+- ******************************************************************************/
++apr_status_t h2_mplx_m_keep_active(h2_mplx *m, struct h2_stream *stream);
+
+ /**
+ * Process a stream request.
+@@ -175,8 +152,8 @@
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+-apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
+- h2_stream_pri_cmp *cmp, void *ctx);
++apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
++ h2_stream_pri_cmp *cmp, void *ctx);
+
+ /**
+ * Stream priorities have changed, reschedule pending requests.
+@@ -185,7 +162,7 @@
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+-apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
++apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+
+ typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
+
+@@ -193,7 +170,7 @@
+ * Check if the multiplexer has events for the master connection pending.
+ * @return != 0 iff there are events pending
+ */
+-int h2_mplx_has_master_events(h2_mplx *m);
++int h2_mplx_m_has_master_events(h2_mplx *m);
+
+ /**
+ * Dispatch events for the master connection, such as
+@@ -201,130 +178,46 @@
+ * @param on_resume new output data has arrived for a suspended stream
+ * @param ctx user supplied argument to invocation.
+ */
+-apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+- stream_ev_callback *on_resume,
+- void *ctx);
++apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
++ void *ctx);
+
+-int h2_mplx_awaits_data(h2_mplx *m);
++int h2_mplx_m_awaits_data(h2_mplx *m);
+
+ typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
+
+-apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
++apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+
+-/*******************************************************************************
+- * Output handling of streams.
+- ******************************************************************************/
++apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id);
+
+ /**
+- * Opens the output for the given stream with the specified response.
++ * Master connection has entered idle mode.
++ * @param m the mplx instance of the master connection
++ * @return != SUCCESS iff connection should be terminated
+ */
+-apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
+- struct h2_bucket_beam *beam);
++apr_status_t h2_mplx_m_idle(h2_mplx *m);
+
+ /*******************************************************************************
+- * h2_mplx list Manipulation.
++ * From a secondary connection processing: h2_mplx_s_*
+ ******************************************************************************/
+-
+-/**
+- * The magic pointer value that indicates the head of a h2_mplx list
+- * @param b The mplx list
+- * @return The magic pointer value
+- */
+-#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link)
+-
+-/**
+- * Determine if the mplx list is empty
+- * @param b The list to check
+- * @return true or false
+- */
+-#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link)
+-
+-/**
+- * Return the first mplx in a list
+- * @param b The list to query
+- * @return The first mplx in the list
+- */
+-#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b)
+-
+-/**
+- * Return the last mplx in a list
+- * @param b The list to query
+- * @return The last mplx int he list
+- */
+-#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b)
+-
+-/**
+- * Insert a single mplx at the front of a list
+- * @param b The list to add to
+- * @param e The mplx to insert
+- */
+-#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \
+-h2_mplx *ap__b = (e); \
+-APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \
+-} while (0)
+-
+-/**
+- * Insert a single mplx at the end of a list
+- * @param b The list to add to
+- * @param e The mplx to insert
+- */
+-#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \
+-h2_mplx *ap__b = (e); \
+-APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \
+-} while (0)
+-
+-/**
+- * Get the next mplx in the list
+- * @param e The current mplx
+- * @return The next mplx
+- */
+-#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link)
+-/**
+- * Get the previous mplx in the list
+- * @param e The current mplx
+- * @return The previous mplx
+- */
+-#define H2_MPLX_PREV(e) APR_RING_PREV((e), link)
+-
+-/**
+- * Remove a mplx from its list
+- * @param e The mplx to remove
+- */
+-#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link)
++apr_status_t h2_mplx_s_pop_task(h2_mplx *m, struct h2_task **ptask);
++void h2_mplx_s_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
+
+ /*******************************************************************************
+- * h2_mplx DoS protection
++ * From a h2_task owner: h2_mplx_s_*
++ * (a task is transfered from master to secondary connection and back in
++ * its normal lifetime).
+ ******************************************************************************/
+
+ /**
+- * Master connection has entered idle mode.
+- * @param m the mplx instance of the master connection
+- * @return != SUCCESS iff connection should be terminated
++ * Opens the output for the given stream with the specified response.
+ */
+-apr_status_t h2_mplx_idle(h2_mplx *m);
++apr_status_t h2_mplx_t_out_open(h2_mplx *mplx, int stream_id,
++ struct h2_bucket_beam *beam);
+
+-/*******************************************************************************
+- * h2_req_engine handling
+- ******************************************************************************/
++/**
++ * Get the stream that belongs to the given task.
++ */
++struct h2_stream *h2_mplx_t_stream_get(h2_mplx *m, struct h2_task *task);
+
+-typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+-typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine,
+- const char *id,
+- const char *type,
+- apr_pool_t *pool,
+- apr_size_t req_buffer_size,
+- request_rec *r,
+- h2_output_consumed **pconsumed,
+- void **pbaton);
+-
+-apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
+- request_rec *r,
+- h2_mplx_req_engine_init *einit);
+-apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn,
+- apr_read_type_e block,
+- int capacity,
+- request_rec **pr);
+-void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn,
+- apr_status_t status);
+
+ #endif /* defined(__mod_h2__h2_mplx__) */
+--- a/modules/http2/h2_proxy_session.c
++++ b/modules/http2/h2_proxy_session.c
+@@ -45,6 +45,7 @@
+ unsigned int suspended : 1;
+ unsigned int waiting_on_100 : 1;
+ unsigned int waiting_on_ping : 1;
++ unsigned int headers_ended : 1;
+ uint32_t error_code;
+
+ apr_bucket_brigade *input;
+@@ -61,7 +62,123 @@
+ static void ping_arrived(h2_proxy_session *session);
+ static apr_status_t check_suspended(h2_proxy_session *session);
+ static void stream_resume(h2_proxy_stream *stream);
++static apr_status_t submit_trailers(h2_proxy_stream *stream);
+
++/*
++ * The H2_PING connection sub-state: a state independant of the H2_SESSION state
++ * of the connection:
++ * - H2_PING_ST_NONE: no interference with request handling, ProxyTimeout in effect.
++ * When entered, all suspended streams are unsuspended again.
++ * - H2_PING_ST_AWAIT_ANY: new requests are suspended, a possibly configured "ping"
++ * timeout is in effect. Any frame received transits to H2_PING_ST_NONE.
++ * - H2_PING_ST_AWAIT_PING: same as above, but only a PING frame transits
++ * to H2_PING_ST_NONE.
++ *
++ * An AWAIT state is entered on a new connection or when re-using a connection and
++ * the last frame received has been some time ago. The latter sends a PING frame
++ * and insists on an answer, the former is satisfied by any frame received from the
++ * backend.
++ *
++ * This works for new connections as there is always at least one SETTINGS frame
++ * that the backend sends. When re-using connection, we send a PING and insist on
++ * receiving one back, as there might be frames in our connection buffers from
++ * some time ago. Since some servers have protections against PING flooding, we
++ * only ever have one PING unanswered.
++ *
++ * Requests are suspended while in a PING state, as we do not want to send data
++ * before we can be reasonably sure that the connection is working (at least on
++ * the h2 protocol level). This also means that the session can do blocking reads
++ * when expecting PING answers.
++ */
++static void set_ping_timeout(h2_proxy_session *session)
++{
++ if (session->ping_timeout != -1 && session->save_timeout == -1) {
++ apr_socket_t *socket = NULL;
++
++ socket = ap_get_conn_socket(session->c);
++ if (socket) {
++ apr_socket_timeout_get(socket, &session->save_timeout);
++ apr_socket_timeout_set(socket, session->ping_timeout);
++ }
++ }
++}
++
++static void unset_ping_timeout(h2_proxy_session *session)
++{
++ if (session->save_timeout != -1) {
++ apr_socket_t *socket = NULL;
++
++ socket = ap_get_conn_socket(session->c);
++ if (socket) {
++ apr_socket_timeout_set(socket, session->save_timeout);
++ session->save_timeout = -1;
++ }
++ }
++}
++
++static void enter_ping_state(h2_proxy_session *session, h2_ping_state_t state)
++{
++ if (session->ping_state == state) return;
++ switch (session->ping_state) {
++ case H2_PING_ST_NONE:
++ /* leaving NONE, enforce timeout, send frame maybe */
++ if (H2_PING_ST_AWAIT_PING == state) {
++ unset_ping_timeout(session);
++ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
++ }
++ set_ping_timeout(session);
++ session->ping_state = state;
++ break;
++ default:
++ /* no switching between the != NONE states */
++ if (H2_PING_ST_NONE == state) {
++ session->ping_state = state;
++ unset_ping_timeout(session);
++ ping_arrived(session);
++ }
++ break;
++ }
++}
++
++static void ping_new_session(h2_proxy_session *session, proxy_conn_rec *p_conn)
++{
++ session->save_timeout = -1;
++ session->ping_timeout = (p_conn->worker->s->ping_timeout_set?
++ p_conn->worker->s->ping_timeout : -1);
++ session->ping_state = H2_PING_ST_NONE;
++ enter_ping_state(session, H2_PING_ST_AWAIT_ANY);
++}
++
++static void ping_reuse_session(h2_proxy_session *session)
++{
++ if (H2_PING_ST_NONE == session->ping_state) {
++ apr_interval_time_t age = apr_time_now() - session->last_frame_received;
++ if (age > apr_time_from_sec(1)) {
++ enter_ping_state(session, H2_PING_ST_AWAIT_PING);
++ }
++ }
++}
++
++static void ping_ev_frame_received(h2_proxy_session *session, const nghttp2_frame *frame)
++{
++ session->last_frame_received = apr_time_now();
++ switch (session->ping_state) {
++ case H2_PING_ST_NONE:
++ /* nop */
++ break;
++ case H2_PING_ST_AWAIT_ANY:
++ enter_ping_state(session, H2_PING_ST_NONE);
++ break;
++ case H2_PING_ST_AWAIT_PING:
++ if (NGHTTP2_PING == frame->hd.type) {
++ enter_ping_state(session, H2_PING_ST_NONE);
++ }
++ /* we may receive many other frames while we are waiting for the
++ * PING answer. They may come all from our connection buffers and
++ * say nothing about the current state of the backend. */
++ break;
++ }
++}
+
+ static apr_status_t proxy_session_pre_close(void *theconn)
+ {
+@@ -152,7 +269,8 @@
+ session->id, buffer);
+ }
+
+- session->last_frame_received = apr_time_now();
++ ping_ev_frame_received(session, frame);
++ /* Action for frame types: */
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+@@ -193,10 +311,6 @@
+ stream_resume(stream);
+ break;
+ case NGHTTP2_PING:
+- if (session->check_ping) {
+- session->check_ping = 0;
+- ping_arrived(session);
+- }
+ break;
+ case NGHTTP2_PUSH_PROMISE:
+ break;
+@@ -241,7 +355,8 @@
+ return 1;
+ }
+
+-static void process_proxy_header(h2_proxy_stream *stream, const char *n, const char *v)
++static void process_proxy_header(apr_table_t *headers, h2_proxy_stream *stream,
++ const char *n, const char *v)
+ {
+ static const struct {
+ const char *name;
+@@ -262,20 +377,18 @@
+ if (!dconf->preserve_host) {
+ for (i = 0; transform_hdrs[i].name; ++i) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+- apr_table_add(r->headers_out, n,
+- (*transform_hdrs[i].func)(r, dconf, v));
++ apr_table_add(headers, n, (*transform_hdrs[i].func)(r, dconf, v));
+ return;
+ }
+ }
+ if (!ap_cstr_casecmp("Link", n)) {
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+- apr_table_add(r->headers_out, n,
+- h2_proxy_link_reverse_map(r, dconf,
+- stream->real_server_uri, stream->p_server_uri, v));
++ apr_table_add(headers, n, h2_proxy_link_reverse_map(r, dconf,
++ stream->real_server_uri, stream->p_server_uri, v));
+ return;
+ }
+ }
+- apr_table_add(r->headers_out, n, v);
++ apr_table_add(headers, n, v);
+ }
+
+ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
+@@ -299,8 +412,13 @@
+ return APR_SUCCESS;
+ }
+
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
++ "h2_proxy_stream(%s-%d): on_header %s: %s",
++ stream->session->id, stream->id, n, v);
+ if (!h2_proxy_res_ignore_header(n, nlen)) {
+ char *hname, *hvalue;
++ apr_table_t *headers = (stream->headers_ended?
++ stream->r->trailers_out : stream->r->headers_out);
+
+ hname = apr_pstrndup(stream->pool, n, nlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
+@@ -309,7 +427,7 @@
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got header %s: %s",
+ stream->session->id, stream->id, hname, hvalue);
+- process_proxy_header(stream, hname, hvalue);
++ process_proxy_header(headers, stream, hname, hvalue);
+ }
+ return APR_SUCCESS;
+ }
+@@ -374,6 +492,7 @@
+ server_name, portstr)
+ );
+ }
++ if (r->status >= 200) stream->headers_ended = 1;
+
+ if (APLOGrtrace2(stream->r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+@@ -429,12 +548,6 @@
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+- if (stream->standalone) {
+- nghttp2_session_consume(ngh2, stream_id, len);
+- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+- "h2_proxy_session(%s): stream %d, win_update %d bytes",
+- session->id, stream_id, (int)len);
+- }
+ return 0;
+ }
+
+@@ -493,12 +606,12 @@
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361)
+- "h2_proxy_stream(%s): data_read, stream %d not found",
+- stream->session->id, stream_id);
++ "h2_proxy_stream(NULL): data_read, stream %d not found",
++ stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+- if (stream->session->check_ping) {
++ if (stream->session->ping_state != H2_PING_ST_NONE) {
+ /* suspend until we hear from the other side */
+ stream->waiting_on_ping = 1;
+ status = APR_EAGAIN;
+@@ -553,9 +666,14 @@
+ stream->data_sent += readlen;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468)
+ "h2_proxy_stream(%d): request DATA %ld, %ld"
+- " total, flags=%d",
+- stream->id, (long)readlen, (long)stream->data_sent,
++ " total, flags=%d", stream->id, (long)readlen, (long)stream->data_sent,
+ (int)*data_flags);
++ if ((*data_flags & NGHTTP2_DATA_FLAG_EOF) && !apr_is_empty_table(stream->r->trailers_in)) {
++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(10179)
++ "h2_proxy_stream(%d): submit trailers", stream->id);
++ *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;
++ submit_trailers(stream);
++ }
+ return readlen;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+@@ -641,23 +759,20 @@
+
+ nghttp2_option_new(&option);
+ nghttp2_option_set_peer_max_concurrent_streams(option, 100);
+- nghttp2_option_set_no_auto_window_update(option, 1);
++ nghttp2_option_set_no_auto_window_update(option, 0);
+
+ nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
+
+ nghttp2_option_del(option);
+ nghttp2_session_callbacks_del(cbs);
+
++ ping_new_session(session, p_conn);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
+ "setup session for %s", p_conn->hostname);
+ }
+ else {
+ h2_proxy_session *session = p_conn->data;
+- apr_interval_time_t age = apr_time_now() - session->last_frame_received;
+- if (age > apr_time_from_sec(1)) {
+- session->check_ping = 1;
+- nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
+- }
++ ping_reuse_session(session);
+ }
+ return p_conn->data;
+ }
+@@ -740,6 +855,8 @@
+ stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority);
+ stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
+ path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
++
++
+ h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
+ authority, path, r->headers_in);
+
+@@ -826,6 +943,16 @@
+ return APR_EGENERAL;
+ }
+
++static apr_status_t submit_trailers(h2_proxy_stream *stream)
++{
++ h2_proxy_ngheader *hd;
++ int rv;
++
++ hd = h2_proxy_util_nghd_make(stream->pool, stream->r->trailers_in);
++ rv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, hd->nv, hd->nvlen);
++ return rv == 0? APR_SUCCESS: APR_EGENERAL;
++}
++
+ static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb)
+ {
+ apr_status_t status = APR_SUCCESS;
+@@ -882,7 +1009,7 @@
+ apr_socket_t *socket = NULL;
+ apr_time_t save_timeout = -1;
+
+- if (block) {
++ if (block && timeout > 0) {
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &save_timeout);
+@@ -954,6 +1081,14 @@
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ }
+
++static int is_waiting_for_backend(h2_proxy_session *session)
++{
++ return ((session->ping_state != H2_PING_ST_NONE)
++ || ((session->suspended->nelts <= 0)
++ && !nghttp2_session_want_write(session->ngh2)
++ && nghttp2_session_want_read(session->ngh2)));
++}
++
+ static apr_status_t check_suspended(h2_proxy_session *session)
+ {
+ h2_proxy_stream *stream;
+@@ -1408,7 +1543,22 @@
+ break;
+
+ case H2_PROXYS_ST_WAIT:
+- if (check_suspended(session) == APR_EAGAIN) {
++ if (is_waiting_for_backend(session)) {
++ /* we can do a blocking read with the default timeout (as
++ * configured via ProxyTimeout in our socket. There is
++ * nothing we want to send or check until we get more data
++ * from the backend. */
++ status = h2_proxy_session_read(session, 1, 0);
++ if (status == APR_SUCCESS) {
++ have_read = 1;
++ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
++ }
++ else {
++ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
++ return status;
++ }
++ }
++ else if (check_suspended(session) == APR_EAGAIN) {
+ /* no stream has become resumed. Do a blocking read with
+ * ever increasing timeouts... */
+ if (session->wait_timeout < 25) {
+@@ -1423,7 +1573,7 @@
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ APLOGNO(03365)
+ "h2_proxy_session(%s): WAIT read, timeout=%fms",
+- session->id, (float)session->wait_timeout/1000.0);
++ session->id, session->wait_timeout/1000.0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+@@ -1543,42 +1693,3 @@
+ int updated;
+ } win_update_ctx;
+
+-static int win_update_iter(void *udata, void *val)
+-{
+- win_update_ctx *ctx = udata;
+- h2_proxy_stream *stream = val;
+-
+- if (stream->r && stream->r->connection == ctx->c) {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c,
+- "h2_proxy_session(%s-%d): win_update %ld bytes",
+- ctx->session->id, (int)stream->id, (long)ctx->bytes);
+- nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes);
+- ctx->updated = 1;
+- return 0;
+- }
+- return 1;
+-}
+-
+-
+-void h2_proxy_session_update_window(h2_proxy_session *session,
+- conn_rec *c, apr_off_t bytes)
+-{
+- if (!h2_proxy_ihash_empty(session->streams)) {
+- win_update_ctx ctx;
+- ctx.session = session;
+- ctx.c = c;
+- ctx.bytes = bytes;
+- ctx.updated = 0;
+- h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx);
+-
+- if (!ctx.updated) {
+- /* could not find the stream any more, possibly closed, update
+- * the connection window at least */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+- "h2_proxy_session(%s): win_update conn %ld bytes",
+- session->id, (long)bytes);
+- nghttp2_session_consume_connection(session->ngh2, (size_t)bytes);
+- }
+- }
+-}
+-
+--- a/modules/http2/h2_proxy_session.h
++++ b/modules/http2/h2_proxy_session.h
+@@ -60,6 +60,11 @@
+ H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */
+ } h2_proxys_event_t;
+
++typedef enum {
++ H2_PING_ST_NONE, /* normal connection mode, ProxyTimeout rules */
++ H2_PING_ST_AWAIT_ANY, /* waiting for any frame from backend */
++ H2_PING_ST_AWAIT_PING, /* waiting for PING frame from backend */
++} h2_ping_state_t;
+
+ typedef struct h2_proxy_session h2_proxy_session;
+ typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
+@@ -74,7 +79,6 @@
+ nghttp2_session *ngh2; /* the nghttp2 session itself */
+
+ unsigned int aborted : 1;
+- unsigned int check_ping : 1;
+ unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */
+
+ h2_proxy_request_done *done;
+@@ -94,6 +98,10 @@
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
++
++ h2_ping_state_t ping_state;
++ apr_time_t ping_timeout;
++ apr_time_t save_timeout;
+ };
+
+ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+@@ -120,9 +128,6 @@
+
+ void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
+
+-void h2_proxy_session_update_window(h2_proxy_session *s,
+- conn_rec *c, apr_off_t bytes);
+-
+ #define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
+
+ #endif /* h2_proxy_session_h */
+--- a/modules/http2/h2_proxy_util.c
++++ b/modules/http2/h2_proxy_util.c
+@@ -452,6 +452,22 @@
+ return ngh;
+ }
+
++h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers)
++{
++
++ h2_proxy_ngheader *ngh;
++ size_t n;
++
++ n = 0;
++ apr_table_do(count_header, &n, headers, NULL);
++
++ ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader));
++ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
++ apr_table_do(add_table_header, ngh, headers, NULL);
++
++ return ngh;
++}
++
+ /*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+@@ -609,6 +625,7 @@
+ apr_table_t *headers)
+ {
+ h1_ctx x;
++ const char *val;
+
+ req->method = method;
+ req->scheme = scheme;
+@@ -623,6 +640,11 @@
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
++ if ((val = apr_table_get(headers, "TE")) && ap_find_token(pool, val, "trailers")) {
++ /* client accepts trailers, forward this information */
++ apr_table_addn(req->headers, "TE", "trailers");
++ }
++ apr_table_setn(req->headers, "te", "trailers");
+ return APR_SUCCESS;
+ }
+
+@@ -915,12 +937,12 @@
+ nlen = (int)strlen(ns);
+ delta = nlen - olen;
+ plen = ctx->slen + delta + 1;
+- p = apr_pcalloc(ctx->pool, plen);
++ p = apr_palloc(ctx->pool, plen);
+ memcpy(p, ctx->s, start);
+ memcpy(p + start, ns, nlen);
+ strcpy(p + start + nlen, ctx->s + end);
+ ctx->s = p;
+- ctx->slen = (int)strlen(p);
++ ctx->slen = plen - 1; /* (int)strlen(p) */
+ if (ctx->i >= end) {
+ ctx->i += delta;
+ }
+--- a/modules/http2/h2_proxy_util.h
++++ b/modules/http2/h2_proxy_util.h
+@@ -168,6 +168,8 @@
+ h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const struct h2_proxy_request *req);
+
++h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers);
++
+ /*******************************************************************************
+ * h2_proxy_request helpers
+ ******************************************************************************/
+@@ -183,7 +185,7 @@
+
+ apr_time_t request_time;
+
+- unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
++ unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
+ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ };
+
+--- a/modules/http2/h2_push.c
++++ b/modules/http2/h2_push.c
+@@ -464,33 +464,6 @@
+ return NULL;
+ }
+
+-/*******************************************************************************
+- * push diary
+- *
+- * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
+- * connection. It records a hash value from the absolute URL of the resource
+- * pushed.
+- * - Lacking openssl, it uses 'apr_hashfunc_default' for the value
+- * - with openssl, it uses SHA256 to calculate the hash value
+- * - whatever the method to generate the hash, the diary keeps a maximum of 64
+- * bits per hash, limiting the memory consumption to about
+- * H2PushDiarySize * 8
+- * bytes. Entries are sorted by most recently used and oldest entries are
+- * forgotten first.
+- * - Clients can initialize/replace the push diary by sending a 'Cache-Digest'
+- * header. Currently, this is the base64url encoded value of the cache digest
+- * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+- * This draft can be expected to evolve and the definition of the header
+- * will be added there and refined.
+- * - The cache digest header is a Golomb Coded Set of hash values, but it may
+- * limit the amount of bits per hash value even further. For a good description
+- * of GCS, read here:
+- * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters
+- * - The means that the push diary might be initialized with hash values of much
+- * less than 64 bits, leading to more false positives, but smaller digest size.
+- ******************************************************************************/
+-
+-
+ #define GCSLOG_LEVEL APLOG_TRACE1
+
+ typedef struct h2_push_diary_entry {
+@@ -617,38 +590,48 @@
+ return -1;
+ }
+
+-static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
++static void move_to_last(h2_push_diary *diary, apr_size_t idx)
+ {
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+ h2_push_diary_entry e;
+- apr_size_t lastidx = diary->entries->nelts-1;
++ int lastidx;
+
++ /* Move an existing entry to the last place */
++ if (diary->entries->nelts <= 0)
++ return;
++
+ /* move entry[idx] to the end */
++ lastidx = diary->entries->nelts - 1;
+ if (idx < lastidx) {
+ e = entries[idx];
+- memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
++ memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx));
+ entries[lastidx] = e;
+ }
+- return &entries[lastidx];
+ }
+
+-static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
++static void remove_first(h2_push_diary *diary)
+ {
+- h2_push_diary_entry *ne;
++ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
++ int lastidx;
+
+- if (diary->entries->nelts < diary->N) {
+- /* append a new diary entry at the end */
+- APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
+- ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
+- }
+- else {
+- /* replace content with new digest. keeps memory usage constant once diary is full */
+- ne = move_to_last(diary, 0);
+- *ne = *e;
++ /* move remaining entries to index 0 */
++ lastidx = diary->entries->nelts - 1;
++ if (lastidx > 0) {
++ --diary->entries->nelts;
++ memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts);
+ }
++}
++
++static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
++{
++ while (diary->entries->nelts >= diary->N) {
++ remove_first(diary);
++ }
++ /* append a new diary entry at the end */
++ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
+- "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
++ "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash);
+ }
+
+ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
+@@ -691,30 +674,12 @@
+ const struct h2_request *req,
+ const struct h2_headers *res)
+ {
+- h2_session *session = stream->session;
+- const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
+ apr_array_header_t *pushes;
+- apr_status_t status;
+
+- if (cache_digest && session->push_diary) {
+- status = h2_push_diary_digest64_set(session->push_diary, req->authority,
+- cache_digest, stream->pool);
+- if (status != APR_SUCCESS) {
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+- H2_SSSN_LOG(APLOGNO(03057), session,
+- "push diary set from Cache-Digest: %s"), cache_digest);
+- }
+- }
+ pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
+ return h2_push_diary_update(stream->session, pushes);
+ }
+
+-static apr_int32_t h2_log2inv(unsigned char log2)
+-{
+- return log2? (1 << log2) : 1;
+-}
+-
+-
+ typedef struct {
+ h2_push_diary *diary;
+ unsigned char log2p;
+@@ -829,16 +794,11 @@
+ apr_size_t hash_count;
+
+ nelts = diary->entries->nelts;
+-
+- if (nelts > APR_UINT32_MAX) {
+- /* should not happen */
+- return APR_ENOTIMPL;
+- }
+ N = ceil_power_of_2(nelts);
+ log2n = h2_log2(N);
+
+ /* Now log2p is the max number of relevant bits, so that
+- * log2p + log2n == mask_bits. We can uise a lower log2p
++ * log2p + log2n == mask_bits. We can use a lower log2p
+ * and have a shorter set encoding...
+ */
+ log2pmax = h2_log2(ceil_power_of_2(maxP));
+@@ -895,166 +855,3 @@
+ return APR_SUCCESS;
+ }
+
+-typedef struct {
+- h2_push_diary *diary;
+- apr_pool_t *pool;
+- unsigned char log2p;
+- const unsigned char *data;
+- apr_size_t datalen;
+- apr_size_t offset;
+- unsigned int bit;
+- apr_uint64_t last_val;
+-} gset_decoder;
+-
+-static int gset_decode_next_bit(gset_decoder *decoder)
+-{
+- if (++decoder->bit >= 8) {
+- if (++decoder->offset >= decoder->datalen) {
+- return -1;
+- }
+- decoder->bit = 0;
+- }
+- return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
+-}
+-
+-static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
+-{
+- apr_uint64_t flex = 0, fixed = 0, delta;
+- int i;
+-
+- /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
+- * On a malformed bit-string, this will not fail, but produce results
+- * which are pbly too large. Luckily, the diary will modulo the hash.
+- */
+- while (1) {
+- int bit = gset_decode_next_bit(decoder);
+- if (bit == -1) {
+- return APR_EINVAL;
+- }
+- if (!bit) {
+- break;
+- }
+- ++flex;
+- }
+-
+- for (i = 0; i < decoder->log2p; ++i) {
+- int bit = gset_decode_next_bit(decoder);
+- if (bit == -1) {
+- return APR_EINVAL;
+- }
+- fixed = (fixed << 1) | bit;
+- }
+-
+- delta = (flex << decoder->log2p) | fixed;
+- *phash = delta + decoder->last_val;
+- decoder->last_val = *phash;
+-
+- /* Intentional no APLOGNO */
+- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
+- "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+- APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT,
+- *phash, delta, (int)flex, fixed);
+-
+- return APR_SUCCESS;
+-}
+-
+-/**
+- * Initialize the push diary by a cache digest as described in
+- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+- * .
+- * @param diary the diary to set the digest into
+- * @param data the binary cache digest
+- * @param len the length of the cache digest
+- * @return APR_EINVAL if digest was not successfully parsed
+- */
+-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+- const char *data, apr_size_t len)
+-{
+- gset_decoder decoder;
+- unsigned char log2n, log2p;
+- int N, i;
+- apr_pool_t *pool = diary->entries->pool;
+- h2_push_diary_entry e;
+- apr_status_t status = APR_SUCCESS;
+-
+- if (len < 2) {
+- /* at least this should be there */
+- return APR_EINVAL;
+- }
+- log2n = data[0];
+- log2p = data[1];
+- diary->mask_bits = log2n + log2p;
+- if (diary->mask_bits > 64) {
+- /* cannot handle */
+- return APR_ENOTIMPL;
+- }
+-
+- /* whatever is in the digest, it replaces the diary entries */
+- apr_array_clear(diary->entries);
+- if (!authority || !strcmp("*", authority)) {
+- diary->authority = NULL;
+- }
+- else if (!diary->authority || strcmp(diary->authority, authority)) {
+- diary->authority = apr_pstrdup(diary->entries->pool, authority);
+- }
+-
+- N = h2_log2inv(log2n + log2p);
+-
+- decoder.diary = diary;
+- decoder.pool = pool;
+- decoder.log2p = log2p;
+- decoder.data = (const unsigned char*)data;
+- decoder.datalen = len;
+- decoder.offset = 1;
+- decoder.bit = 8;
+- decoder.last_val = 0;
+-
+- diary->N = N;
+- /* Determine effective N we use for storage */
+- if (!N) {
+- /* a totally empty cache digest. someone tells us that she has no
+- * entries in the cache at all. Use our own preferences for N+mask
+- */
+- diary->N = diary->NMax;
+- return APR_SUCCESS;
+- }
+- else if (N > diary->NMax) {
+- /* Store not more than diary is configured to hold. We open us up
+- * to DOS attacks otherwise. */
+- diary->N = diary->NMax;
+- }
+-
+- /* Intentional no APLOGNO */
+- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+- "h2_push_diary_digest_set: N=%d, log2n=%d, "
+- "diary->mask_bits=%d, dec.log2p=%d",
+- (int)diary->N, (int)log2n, diary->mask_bits,
+- (int)decoder.log2p);
+-
+- for (i = 0; i < diary->N; ++i) {
+- if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
+- /* the data may have less than N values */
+- break;
+- }
+- h2_push_diary_append(diary, &e);
+- }
+-
+- /* Intentional no APLOGNO */
+- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+- "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
+- (int)diary->entries->nelts, diary->mask_bits);
+- return status;
+-}
+-
+-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+- const char *data64url, apr_pool_t *pool)
+-{
+- const char *data;
+- apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
+- /* Intentional no APLOGNO */
+- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+- "h2_push_diary_digest64_set: digest=%s, dlen=%d",
+- data64url, (int)len);
+- return h2_push_diary_digest_set(diary, authority, data, len);
+-}
+-
+--- a/modules/http2/h2_push.h
++++ b/modules/http2/h2_push.h
+@@ -35,6 +35,44 @@
+ H2_PUSH_DIGEST_SHA256
+ } h2_push_digest_type;
+
++/*******************************************************************************
++ * push diary
++ *
++ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
++ * connection. It records a hash value from the absolute URL of the resource
++ * pushed.
++ * - Lacking openssl,
++ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it
++ * falls back to apr_hashfunc_default()
++ * - whatever the method to generate the hash, the diary keeps a maximum of 64
++ * bits per hash, limiting the memory consumption to about
++ * H2PushDiarySize * 8
++ * bytes. Entries are sorted by most recently used and oldest entries are
++ * forgotten first.
++ * - While useful by itself to avoid duplicated PUSHes on the same connection,
++ * the original idea was that clients provided a 'Cache-Digest' header with
++ * the values of *their own* cached resources. This was described in
++ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
++ * and some subsequent revisions that tweaked values but kept the overall idea.
++ * - The draft was abandoned by the IETF http-wg, as support from major clients,
++ * e.g. browsers, was lacking for various reasons.
++ * - For these reasons, mod_h2 abandoned its support for client supplied values
++ * but keeps the diary. It seems to provide value for applications using PUSH,
++ * is configurable in size and defaults to a very moderate amount of memory
++ * used.
++ * - The cache digest header is a Golomb Coded Set of hash values, but it may
++ * limit the amount of bits per hash value even further. For a good description
++ * of GCS, read here:
++ * <http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters>
++ ******************************************************************************/
++
++
++/*
++ * The push diary is based on the abandoned draft
++ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
++ * that describes how to use golomb filters.
++ */
++
+ typedef struct h2_push_diary h2_push_diary;
+
+ typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
+@@ -101,20 +139,4 @@
+ int maxP, const char *authority,
+ const char **pdata, apr_size_t *plen);
+
+-/**
+- * Initialize the push diary by a cache digest as described in
+- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+- * .
+- * @param diary the diary to set the digest into
+- * @param authority the authority to set the data for
+- * @param data the binary cache digest
+- * @param len the length of the cache digest
+- * @return APR_EINVAL if digest was not successfully parsed
+- */
+-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+- const char *data, apr_size_t len);
+-
+-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+- const char *data64url, apr_pool_t *pool);
+-
+ #endif /* defined(__mod_h2__h2_push__) */
+--- a/modules/http2/h2_request.c
++++ b/modules/http2/h2_request.c
+@@ -17,6 +17,7 @@
+ #include <assert.h>
+
+ #include <apr_strings.h>
++#include <ap_mmn.h>
+
+ #include <httpd.h>
+ #include <http_core.h>
+@@ -46,9 +47,9 @@
+ static int set_h1_header(void *ctx, const char *key, const char *value)
+ {
+ h1_ctx *x = ctx;
+- x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key),
+- value, strlen(value));
+- return (x->status == APR_SUCCESS)? 1 : 0;
++ int was_added;
++ h2_req_add_header(x->headers, x->pool, key, strlen(key), value, strlen(value), 0, &was_added);
++ return 1;
+ }
+
+ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+@@ -84,8 +85,7 @@
+ req->path = path;
+ req->headers = apr_table_make(pool, 10);
+ if (r->server) {
+- req->serialize = h2_config_geti(h2_config_sget(r->server),
+- H2_CONF_SER_HEADERS);
++ req->serialize = h2_config_rgeti(r, H2_CONF_SER_HEADERS);
+ }
+
+ x.pool = pool;
+@@ -99,10 +99,12 @@
+
+ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+- const char *value, size_t vlen)
++ const char *value, size_t vlen,
++ size_t max_field_len, int *pwas_added)
+ {
+ apr_status_t status = APR_SUCCESS;
+
++ *pwas_added = 0;
+ if (nlen <= 0) {
+ return status;
+ }
+@@ -143,8 +145,9 @@
+ }
+ }
+ else {
+- /* non-pseudo header, append to work bucket of stream */
+- status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen);
++ /* non-pseudo header, add to table */
++ status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen,
++ max_field_len, pwas_added);
+ }
+
+ return status;
+@@ -156,7 +159,7 @@
+
+ /* rfc7540, ch. 8.1.2.3:
+ * - if we have :authority, it overrides any Host header
+- * - :authority MUST be ommited when converting h1->h2, so we
++ * - :authority MUST be omitted when converting h1->h2, so we
+ * might get a stream without, but then Host needs to be there */
+ if (!req->authority) {
+ const char *host = apr_table_get(req->headers, "Host");
+@@ -206,13 +209,11 @@
+ return dst;
+ }
+
+-request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
++#if !AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
++static request_rec *my_ap_create_request(conn_rec *c)
+ {
+- int access_status = HTTP_OK;
+- const char *rpath;
+ apr_pool_t *p;
+ request_rec *r;
+- const char *s;
+
+ apr_pool_create(&p, c->pool);
+ apr_pool_tag(p, "request");
+@@ -226,8 +227,8 @@
+ r->ap_auth_type = NULL;
+
+ r->allowed_methods = ap_make_method_list(p, 2);
+-
+- r->headers_in = apr_table_clone(r->pool, req->headers);
++
++ r->headers_in = apr_table_make(r->pool, 5);
+ r->trailers_in = apr_table_make(r->pool, 5);
+ r->subprocess_env = apr_table_make(r->pool, 25);
+ r->headers_out = apr_table_make(r->pool, 12);
+@@ -262,6 +263,24 @@
+ r->useragent_addr = c->client_addr;
+ r->useragent_ip = c->client_ip;
+
++ return r;
++}
++#endif
++
++request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
++{
++ int access_status = HTTP_OK;
++ const char *rpath;
++ const char *s;
++
++#if AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
++ request_rec *r = ap_create_request(c);
++#else
++ request_rec *r = my_ap_create_request(c);
++#endif
++
++ r->headers_in = apr_table_clone(r->pool, req->headers);
++
+ ap_run_pre_read_request(r, c);
+
+ /* Time to populate r with the data we have. */
+@@ -272,6 +291,9 @@
+ if (r->method_number == M_GET && r->method[0] == 'H') {
+ r->header_only = 1;
+ }
++ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
++ req->method, req->path ? req->path : "");
++ r->headers_in = apr_table_clone(r->pool, req->headers);
+
+ rpath = (req->path ? req->path : "");
+ ap_parse_uri(r, rpath);
+@@ -288,7 +310,9 @@
+ */
+ r->hostname = NULL;
+ ap_update_vhost_from_headers(r);
+-
++ r->protocol = "HTTP/2.0";
++ r->proto_num = HTTP_VERSION(2, 0);
++
+ /* we may have switched to another server */
+ r->per_dir_config = r->server->lookup_defaults;
+
+@@ -337,3 +361,4 @@
+ }
+
+
++
+--- a/modules/http2/h2_request.h
++++ b/modules/http2/h2_request.h
+@@ -24,7 +24,8 @@
+
+ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+- const char *value, size_t vlen);
++ const char *value, size_t vlen,
++ size_t max_field_len, int *pwas_added);
+
+ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+--- a/modules/http2/h2_session.c
++++ b/modules/http2/h2_session.c
+@@ -106,7 +106,7 @@
+
+ static void cleanup_unprocessed_streams(h2_session *session)
+ {
+- h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session);
++ h2_mplx_m_stream_do(session->mplx, rst_unprocessed_stream, session);
+ }
+
+ static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+@@ -385,14 +385,19 @@
+ break;
+ case NGHTTP2_RST_STREAM:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
+- "h2_stream(%ld-%d): RST_STREAM by client, errror=%d",
++ "h2_stream(%ld-%d): RST_STREAM by client, error=%d",
+ session->id, (int)frame->hd.stream_id,
+ (int)frame->rst_stream.error_code);
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream && stream->initiated_on) {
++ /* A stream reset on a request we sent it. Normal, when the
++ * client does not want it. */
+ ++session->pushes_reset;
+ }
+ else {
++ /* A stream reset on a request it sent us. Could happen in a browser
++ * when the user navigates away or cancels loading - maybe. */
++ h2_mplx_m_client_rst(session->mplx, frame->hd.stream_id);
+ ++session->streams_reset;
+ }
+ break;
+@@ -462,7 +467,7 @@
+ }
+
+ static int h2_session_continue_data(h2_session *session) {
+- if (h2_mplx_has_master_events(session->mplx)) {
++ if (h2_mplx_m_has_master_events(session->mplx)) {
+ return 0;
+ }
+ if (h2_conn_io_needs_flush(&session->io)) {
+@@ -495,9 +500,7 @@
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
+- if (frame->data.padlen > H2_MAX_PADLEN) {
+- return NGHTTP2_ERR_PROTO;
+- }
++ ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1));
+ padlen = (unsigned char)frame->data.padlen;
+
+ stream = h2_session_stream_get(session, stream_id);
+@@ -513,8 +516,9 @@
+ H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
+ (long)length);
+
+- status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
++ status = h2_conn_io_write(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN);
+ if (padlen && status == APR_SUCCESS) {
++ --padlen;
+ status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
+ }
+
+@@ -622,6 +626,39 @@
+ }
+ #endif
+
++static ssize_t select_padding_cb(nghttp2_session *ngh2,
++ const nghttp2_frame *frame,
++ size_t max_payloadlen, void *user_data)
++{
++ h2_session *session = user_data;
++ ssize_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
++ ssize_t padded_len = frame_len;
++
++ /* Determine # of padding bytes to append to frame. Unless session->padding_always
++ * the number my be capped by the ui.write_size that currently applies.
++ */
++ if (session->padding_max) {
++ int n = ap_random_pick(0, session->padding_max);
++ padded_len = H2MIN(max_payloadlen + H2_FRAME_HDR_LEN, frame_len + n);
++ }
++
++ if (padded_len != frame_len) {
++ if (!session->padding_always && session->io.write_size
++ && (padded_len > session->io.write_size)
++ && (frame_len <= session->io.write_size)) {
++ padded_len = session->io.write_size;
++ }
++ if (APLOGctrace2(session->c)) {
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
++ "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)",
++ (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN,
++ (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size);
++ }
++ return padded_len - H2_FRAME_HDR_LEN;
++ }
++ return frame->hd.length;
++}
++
+ #define NGH2_SET_CALLBACK(callbacks, name, fn)\
+ nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
+
+@@ -647,6 +684,7 @@
+ #ifdef H2_NG2_INVALID_HEADER_CB
+ NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
+ #endif
++ NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb);
+ return APR_SUCCESS;
+ }
+
+@@ -691,7 +729,7 @@
+ * Remove all streams greater than this number without submitting
+ * a RST_STREAM frame, since that should be clear from the GOAWAY
+ * we send. */
+- session->local.accepted_max = h2_mplx_shutdown(session->mplx);
++ session->local.accepted_max = h2_mplx_m_shutdown(session->mplx);
+ session->local.error = error;
+ }
+ else {
+@@ -741,7 +779,7 @@
+ }
+
+ transit(session, trigger, H2_SESSION_ST_CLEANUP);
+- h2_mplx_release_and_join(session->mplx, session->iowait);
++ h2_mplx_m_release_and_join(session->mplx, session->iowait);
+ session->mplx = NULL;
+
+ ap_assert(session->ngh2);
+@@ -757,13 +795,12 @@
+ {
+ conn_rec *c = data;
+ h2_session *session;
+- h2_ctx *ctx = h2_ctx_get(c, 0);
+
+- if (ctx && (session = h2_ctx_session_get(ctx))) {
++ if ((session = h2_ctx_get_session(c))) {
+ /* if the session is still there, now is the last chance
+ * to perform cleanup. Normally, cleanup should have happened
+ * earlier in the connection pre_close. Main reason is that
+- * any ongoing requests on slave connections might still access
++ * any ongoing requests on secondary connections might still access
+ * data which has, at this time, already been freed. An example
+ * is mod_ssl that uses request hooks. */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
+@@ -775,11 +812,8 @@
+ return APR_SUCCESS;
+ }
+
+-static apr_status_t h2_session_create_int(h2_session **psession,
+- conn_rec *c,
+- request_rec *r,
+- h2_ctx *ctx,
+- h2_workers *workers)
++apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *r,
++ server_rec *s, h2_workers *workers)
+ {
+ nghttp2_session_callbacks *callbacks = NULL;
+ nghttp2_option *options = NULL;
+@@ -820,19 +854,16 @@
+ session->id = c->id;
+ session->c = c;
+ session->r = r;
+- session->s = h2_ctx_server_get(ctx);
++ session->s = s;
+ session->pool = pool;
+- session->config = h2_config_sget(session->s);
+ session->workers = workers;
+
+ session->state = H2_SESSION_ST_INIT;
+ session->local.accepting = 1;
+ session->remote.accepting = 1;
+
+- session->max_stream_count = h2_config_geti(session->config,
+- H2_CONF_MAX_STREAMS);
+- session->max_stream_mem = h2_config_geti(session->config,
+- H2_CONF_STREAM_MAX_MEM);
++ session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
++ session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
+
+ status = apr_thread_cond_create(&session->iowait, session->pool);
+ if (status != APR_SUCCESS) {
+@@ -862,14 +893,18 @@
+ session->monitor->on_state_event = on_stream_state_event;
+ session->monitor->on_event = on_stream_event;
+
+- session->mplx = h2_mplx_create(c, session->pool, session->config,
+- workers);
++ session->mplx = h2_mplx_m_create(c, s, session->pool, workers);
+
+ /* connection input filter that feeds the session */
+ session->cin = h2_filter_cin_create(session);
+ ap_add_input_filter("H2_IN", session->cin, r, c);
+
+- h2_conn_io_init(&session->io, c, session->config);
++ h2_conn_io_init(&session->io, c, s);
++ session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS);
++ if (session->padding_max) {
++ session->padding_max = (0x01 << session->padding_max) - 1;
++ }
++ session->padding_always = h2_config_sgeti(s, H2_CONF_PADDING_ALWAYS);
+ session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
+
+ status = init_callbacks(c, &callbacks);
+@@ -888,8 +923,7 @@
+ apr_pool_destroy(pool);
+ return status;
+ }
+- nghttp2_option_set_peer_max_concurrent_streams(
+- options, (uint32_t)session->max_stream_count);
++ nghttp2_option_set_peer_max_concurrent_streams(options, (uint32_t)session->max_stream_count);
+ /* We need to handle window updates ourself, otherwise we
+ * get flooded by nghttp2. */
+ nghttp2_option_set_no_auto_window_update(options, 1);
+@@ -907,7 +941,7 @@
+ return APR_ENOMEM;
+ }
+
+- n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
++ n = h2_config_sgeti(s, H2_CONF_PUSH_DIARY_SIZE);
+ session->push_diary = h2_push_diary_create(session->pool, n);
+
+ if (APLOGcdebug(c)) {
+@@ -924,22 +958,11 @@
+ (int)session->push_diary->N);
+ }
+
+- apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
++ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
++
+ return APR_SUCCESS;
+ }
+
+-apr_status_t h2_session_create(h2_session **psession,
+- conn_rec *c, h2_ctx *ctx, h2_workers *workers)
+-{
+- return h2_session_create_int(psession, c, NULL, ctx, workers);
+-}
+-
+-apr_status_t h2_session_rcreate(h2_session **psession,
+- request_rec *r, h2_ctx *ctx, h2_workers *workers)
+-{
+- return h2_session_create_int(psession, r->connection, r, ctx, workers);
+-}
+-
+ static apr_status_t h2_session_start(h2_session *session, int *rv)
+ {
+ apr_status_t status = APR_SUCCESS;
+@@ -1004,7 +1027,7 @@
+ settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
+ settings[slen].value = (uint32_t)session->max_stream_count;
+ ++slen;
+- win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE);
++ win_size = h2_config_sgeti(session->s, H2_CONF_WIN_SIZE);
+ if (win_size != H2_INITIAL_WINDOW_SIZE) {
+ settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[slen].value = win_size;
+@@ -1156,7 +1179,7 @@
+ stream = h2_session_open_stream(session, nid, is->id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+- H2_STRM_LOG(APLOGNO(03077), stream,
++ H2_STRM_LOG(APLOGNO(03077), is,
+ "failed to create stream obj %d"), nid);
+ /* kill the push_promise */
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
+@@ -1262,7 +1285,7 @@
+
+ rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+- ""H2_STRM_LOG(APLOGNO(03203), stream,
++ H2_STRM_LOG(APLOGNO(03203), stream,
+ "PUSH %s, weight=%d, depends=%d, returned=%d"),
+ ptype, ps.weight, ps.stream_id, rv);
+ status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
+@@ -1280,7 +1303,7 @@
+ {
+ /* iff we can and they can and want */
+ return (session->remote.accepting /* remote GOAWAY received */
+- && h2_config_geti(session->config, H2_CONF_PUSH)
++ && h2_config_sgeti(session->s, H2_CONF_PUSH)
+ && nghttp2_session_get_remote_settings(session->ngh2,
+ NGHTTP2_SETTINGS_ENABLE_PUSH));
+ }
+@@ -1324,6 +1347,7 @@
+ int eos)
+ {
+ apr_status_t status = APR_SUCCESS;
++ const char *s;
+ int rv = 0;
+
+ ap_assert(session);
+@@ -1391,8 +1415,12 @@
+ && (headers->status < 400)
+ && (headers->status != 304)
+ && h2_session_push_enabled(session)) {
+-
+- h2_stream_submit_pushes(stream, headers);
++ /* PUSH is possible and enabled on server, unless the request
++ * denies it, submit resources to push */
++ s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
++ if (!s || strcmp(s, "0")) {
++ h2_stream_submit_pushes(stream, headers);
++ }
+ }
+
+ if (!stream->pref_priority) {
+@@ -1414,7 +1442,7 @@
+ }
+
+ if (headers->status == 103
+- && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) {
++ && !h2_config_sgeti(session->s, H2_CONF_EARLY_HINTS)) {
+ /* suppress sending this to the client, it might have triggered
+ * pushes and served its purpose nevertheless */
+ rv = 0;
+@@ -1524,7 +1552,7 @@
+ if (stream) {
+ ap_assert(!stream->scheduled);
+ if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
+- h2_mplx_process(session->mplx, stream, stream_pri_cmp, session);
++ h2_mplx_m_process(session->mplx, stream, stream_pri_cmp, session);
+ }
+ else {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+@@ -1680,7 +1708,7 @@
+ * that already served requests - not fair. */
+ session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
+ s = "timeout";
+- timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
++ timeout = session->s->timeout;
+ update_child_status(session, SERVER_BUSY_READ, "idle");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
+@@ -1688,8 +1716,8 @@
+ }
+ else if (session->open_streams) {
+ s = "timeout";
+- timeout = session->s->keep_alive_timeout;
+- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
++ timeout = session->s->timeout;
++ update_child_status(session, SERVER_BUSY_READ, "idle");
+ }
+ else {
+ /* normal keepalive setup */
+@@ -1796,7 +1824,7 @@
+ session->open_streams);
+ h2_conn_io_flush(&session->io);
+ if (session->open_streams > 0) {
+- if (h2_mplx_awaits_data(session->mplx)) {
++ if (h2_mplx_m_awaits_data(session->mplx)) {
+ /* waiting for at least one stream to produce data */
+ transit(session, "no io", H2_SESSION_ST_WAIT);
+ }
+@@ -1954,7 +1982,8 @@
+ ev_stream_closed(session, stream);
+ break;
+ case H2_SS_CLEANUP:
+- h2_mplx_stream_cleanup(session->mplx, stream);
++ nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
++ h2_mplx_m_stream_cleanup(session->mplx, stream);
+ break;
+ default:
+ break;
+@@ -2044,7 +2073,7 @@
+ static apr_status_t dispatch_master(h2_session *session) {
+ apr_status_t status;
+
+- status = h2_mplx_dispatch_master_events(session->mplx,
++ status = h2_mplx_m_dispatch_master_events(session->mplx,
+ on_stream_resume, session);
+ if (status == APR_EAGAIN) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+@@ -2089,7 +2118,7 @@
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+- if (!h2_is_acceptable_connection(c, 1)) {
++ if (!h2_is_acceptable_connection(c, session->r, 1)) {
+ update_child_status(session, SERVER_BUSY_READ,
+ "inadequate security");
+ h2_session_shutdown(session,
+@@ -2112,7 +2141,7 @@
+ break;
+
+ case H2_SESSION_ST_IDLE:
+- if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) {
++ if (session->idle_until && (now + session->idle_delay) > session->idle_until) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
+ H2_SSSN_MSG(session, "idle, timeout reached, closing"));
+ if (session->idle_delay) {
+@@ -2146,6 +2175,14 @@
+ session->have_read = 1;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
++ status = h2_mplx_m_idle(session->mplx);
++ if (status == APR_EAGAIN) {
++ break;
++ }
++ else if (status != APR_SUCCESS) {
++ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
++ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
++ }
+ status = APR_EAGAIN;
+ goto out;
+ }
+@@ -2168,7 +2205,7 @@
+ /* We wait in smaller increments, using a 1 second timeout.
+ * That gives us the chance to check for MPMQ_STOPPING often.
+ */
+- status = h2_mplx_idle(session->mplx);
++ status = h2_mplx_m_idle(session->mplx);
+ if (status == APR_EAGAIN) {
+ break;
+ }
+@@ -2282,7 +2319,7 @@
+ "h2_session: wait for data, %ld micros",
+ (long)session->wait_us);
+ }
+- status = h2_mplx_out_trywait(session->mplx, session->wait_us,
++ status = h2_mplx_m_out_trywait(session->mplx, session->wait_us,
+ session->iowait);
+ if (status == APR_SUCCESS) {
+ session->wait_us = 0;
+@@ -2319,7 +2356,7 @@
+ dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
+ }
+ if (session->reprioritize) {
+- h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
++ h2_mplx_m_reprioritize(session->mplx, stream_pri_cmp, session);
+ session->reprioritize = 0;
+ }
+ }
+--- a/modules/http2/h2_session.h
++++ b/modules/http2/h2_session.h
+@@ -80,12 +80,13 @@
+ request_rec *r; /* the request that started this in case
+ * of 'h2c', NULL otherwise */
+ server_rec *s; /* server/vhost we're starting on */
+- const struct h2_config *config; /* Relevant config for this session */
+ apr_pool_t *pool; /* pool to use in session */
+ struct h2_mplx *mplx; /* multiplexer for stream data */
+ struct h2_workers *workers; /* for executing stream tasks */
+ struct h2_filter_cin *cin; /* connection input filter context */
+ h2_conn_io io; /* io on httpd conn filters */
++ int padding_max; /* max number of padding bytes */
++ int padding_always; /* padding has precedence over I/O optimizations */
+ struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
+
+ h2_session_state state; /* state session is in */
+@@ -131,7 +132,7 @@
+ const char *last_status_msg; /* the one already reported */
+
+ struct h2_iqueue *in_pending; /* all streams with input pending */
+- struct h2_iqueue *in_process; /* all streams ready for processing on slave */
++ struct h2_iqueue *in_process; /* all streams ready for processing on a secondary */
+
+ } h2_session;
+
+@@ -142,27 +143,15 @@
+ * The session will apply the configured parameter.
+ * @param psession pointer receiving the created session on success or NULL
+ * @param c the connection to work on
++ * @param r optional request when protocol was upgraded
+ * @param cfg the module config to apply
+ * @param workers the worker pool to use
+ * @return the created session
+ */
+ apr_status_t h2_session_create(h2_session **psession,
+- conn_rec *c, struct h2_ctx *ctx,
++ conn_rec *c, request_rec *r, server_rec *,
+ struct h2_workers *workers);
+
+-/**
+- * Create a new h2_session for the given request.
+- * The session will apply the configured parameter.
+- * @param psession pointer receiving the created session on success or NULL
+- * @param r the request that was upgraded
+- * @param cfg the module config to apply
+- * @param workers the worker pool to use
+- * @return the created session
+- */
+-apr_status_t h2_session_rcreate(h2_session **psession,
+- request_rec *r, struct h2_ctx *ctx,
+- struct h2_workers *workers);
+-
+ void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
+
+--- a/modules/http2/h2_stream.c
++++ b/modules/http2/h2_stream.c
+@@ -365,9 +365,8 @@
+ static void set_policy_for(h2_stream *stream, h2_request *r)
+ {
+ int enabled = h2_session_push_enabled(stream->session);
+- stream->push_policy = h2_push_policy_determine(r->headers, stream->pool,
+- enabled);
+- r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS);
++ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled);
++ r->serialize = h2_config_sgeti(stream->session->s, H2_CONF_SER_HEADERS);
+ }
+
+ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
+@@ -398,13 +397,8 @@
+ /* start pushed stream */
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp != NULL);
+- status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
+- if (status != APR_SUCCESS) {
+- return status;
+- }
+- set_policy_for(stream, stream->rtmp);
+- stream->request = stream->rtmp;
+- stream->rtmp = NULL;
++ status = h2_stream_end_headers(stream, 1, 0);
++ if (status != APR_SUCCESS) goto leave;
+ break;
+
+ default:
+@@ -416,6 +410,7 @@
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
+ }
++leave:
+ return status;
+ }
+
+@@ -451,18 +446,13 @@
+ ap_assert(stream->request == NULL);
+ if (stream->rtmp == NULL) {
+ /* This can only happen, if the stream has received no header
+- * name/value pairs at all. The lastest nghttp2 version have become
++ * name/value pairs at all. The latest nghttp2 version have become
+ * pretty good at detecting this early. In any case, we have
+ * to abort the connection here, since this is clearly a protocol error */
+ return APR_EINVAL;
+ }
+- status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
+- if (status != APR_SUCCESS) {
+- return status;
+- }
+- set_policy_for(stream, stream->rtmp);
+- stream->request = stream->rtmp;
+- stream->rtmp = NULL;
++ status = h2_stream_end_headers(stream, eos, frame_len);
++ if (status != APR_SUCCESS) goto leave;
+ }
+ break;
+
+@@ -473,6 +463,7 @@
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
+ }
++leave:
+ return status;
+ }
+
+@@ -663,11 +654,14 @@
+
+ static apr_status_t add_trailer(h2_stream *stream,
+ const char *name, size_t nlen,
+- const char *value, size_t vlen)
++ const char *value, size_t vlen,
++ size_t max_field_len, int *pwas_added)
+ {
+ conn_rec *c = stream->session->c;
+ char *hname, *hvalue;
++ const char *existing;
+
++ *pwas_added = 0;
+ if (nlen == 0 || name[0] == ':') {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c,
+ H2_STRM_LOG(APLOGNO(03060), stream,
+@@ -681,9 +675,18 @@
+ stream->trailers = apr_table_make(stream->pool, 5);
+ }
+ hname = apr_pstrndup(stream->pool, name, nlen);
+- hvalue = apr_pstrndup(stream->pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
++ existing = apr_table_get(stream->trailers, hname);
++ if (max_field_len
++ && ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len)) {
++ /* "key: (oldval, )?nval" is too long */
++ return APR_EINVAL;
++ }
++ if (!existing) *pwas_added = 1;
++ hvalue = apr_pstrndup(stream->pool, value, vlen);
+ apr_table_mergen(stream->trailers, hname, hvalue);
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
++ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
+
+ return APR_SUCCESS;
+ }
+@@ -693,44 +696,31 @@
+ const char *value, size_t vlen)
+ {
+ h2_session *session = stream->session;
+- int error = 0;
+- apr_status_t status;
++ int error = 0, was_added = 0;
++ apr_status_t status = APR_SUCCESS;
+
+ if (stream->has_response) {
+ return APR_EINVAL;
+ }
+- ++stream->request_headers_added;
++
+ if (name[0] == ':') {
+ if ((vlen) > session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+- H2_STRM_MSG(stream, "pseudo %s too long"), name);
++ if (!h2_stream_is_ready(stream)) {
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
++ H2_STRM_LOG(APLOGNO(10178), stream,
++ "Request pseudo header exceeds "
++ "LimitRequestFieldSize: %s"), name);
++ }
+ error = HTTP_REQUEST_URI_TOO_LARGE;
++ goto cleanup;
+ }
+ }
+- else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
+- /* header too long */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+- H2_STRM_MSG(stream, "header %s too long"), name);
+- error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+- }
+-
+- if (stream->request_headers_added > session->s->limit_req_fields + 4) {
+- /* too many header lines, include 4 pseudo headers */
+- if (stream->request_headers_added
+- > session->s->limit_req_fields + 4 + 100) {
+- /* yeah, right */
+- h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
+- return APR_ECONNRESET;
+- }
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+- H2_STRM_MSG(stream, "too many header lines"));
+- error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+- }
+
+- if (error) {
+- set_error_response(stream, error);
+- return APR_EINVAL;
++ if (session->s->limit_req_fields > 0
++ && stream->request_headers_added > session->s->limit_req_fields) {
++ /* already over limit, count this attempt, but do not take it in */
++ ++stream->request_headers_added;
+ }
+ else if (H2_SS_IDLE == stream->state) {
+ if (!stream->rtmp) {
+@@ -738,16 +728,55 @@
+ NULL, NULL, NULL, NULL, NULL, 0);
+ }
+ status = h2_request_add_header(stream->rtmp, stream->pool,
+- name, nlen, value, vlen);
++ name, nlen, value, vlen,
++ session->s->limit_req_fieldsize, &was_added);
++ if (was_added) ++stream->request_headers_added;
+ }
+ else if (H2_SS_OPEN == stream->state) {
+- status = add_trailer(stream, name, nlen, value, vlen);
++ status = add_trailer(stream, name, nlen, value, vlen,
++ session->s->limit_req_fieldsize, &was_added);
++ if (was_added) ++stream->request_headers_added;
+ }
+ else {
+ status = APR_EINVAL;
++ goto cleanup;
++ }
++
++ if (APR_EINVAL == status) {
++ /* header too long */
++ if (!h2_stream_is_ready(stream)) {
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
++ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
++ "LimitRequestFieldSize: %.*s"),
++ (int)H2MIN(nlen, 80), name);
++ }
++ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
++ goto cleanup;
++ }
++
++ if (session->s->limit_req_fields > 0
++ && stream->request_headers_added > session->s->limit_req_fields) {
++ /* too many header lines */
++ if (stream->request_headers_added > session->s->limit_req_fields + 100) {
++ /* yeah, right, this request is way over the limit, say goodbye */
++ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
++ return APR_ECONNRESET;
++ }
++ if (!h2_stream_is_ready(stream)) {
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
++ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
++ "exceeds LimitRequestFields"));
++ }
++ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
++ goto cleanup;
+ }
+
+- if (status != APR_SUCCESS) {
++cleanup:
++ if (error) {
++ set_error_response(stream, error);
++ return APR_EINVAL;
++ }
++ else if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "header %s not accepted"), name);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
+@@ -755,6 +784,49 @@
+ return status;
+ }
+
++typedef struct {
++ apr_size_t maxlen;
++ const char *failed_key;
++} val_len_check_ctx;
++
++static int table_check_val_len(void *baton, const char *key, const char *value)
++{
++ val_len_check_ctx *ctx = baton;
++
++ if (strlen(value) <= ctx->maxlen) return 1;
++ ctx->failed_key = key;
++ return 0;
++}
++
++apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
++{
++ apr_status_t status;
++ val_len_check_ctx ctx;
++
++ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes);
++ if (APR_SUCCESS == status) {
++ set_policy_for(stream, stream->rtmp);
++ stream->request = stream->rtmp;
++ stream->rtmp = NULL;
++
++ ctx.maxlen = stream->session->s->limit_req_fieldsize;
++ ctx.failed_key = NULL;
++ apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
++ if (ctx.failed_key) {
++ if (!h2_stream_is_ready(stream)) {
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
++ H2_STRM_LOG(APLOGNO(10230), stream,"Request header exceeds "
++ "LimitRequestFieldSize: %.*s"),
++ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
++ }
++ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
++ /* keep on returning APR_SUCCESS, so that we send a HTTP response and
++ * do not RST the stream. */
++ }
++ }
++ return status;
++}
++
+ static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
+ {
+ if (bb) {
+@@ -855,7 +927,7 @@
+ * is requested. But we can reduce the size in case the master
+ * connection operates in smaller chunks. (TSL warmup) */
+ if (stream->session->io.write_size > 0) {
+- max_chunk = stream->session->io.write_size - 9; /* header bits */
++ max_chunk = stream->session->io.write_size - H2_FRAME_HDR_LEN;
+ }
+ requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
+
+@@ -864,7 +936,7 @@
+
+ if (status == APR_EAGAIN) {
+ /* TODO: ugly, someone needs to retrieve the response first */
+- h2_mplx_keep_active(stream->session->mplx, stream);
++ h2_mplx_m_keep_active(stream->session->mplx, stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prep, response eagain"));
+ return status;
+@@ -987,7 +1059,7 @@
+ const char *ctype = apr_table_get(response->headers, "content-type");
+ if (ctype) {
+ /* FIXME: Not good enough, config needs to come from request->server */
+- return h2_config_get_priority(stream->session->config, ctype);
++ return h2_cconfig_get_priority(stream->session->c, ctype);
+ }
+ }
+ return NULL;
+--- a/modules/http2/h2_stream.h
++++ b/modules/http2/h2_stream.h
+@@ -198,6 +198,10 @@
+ apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
++
++/* End the construction of request headers */
++apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
++
+
+ apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+ apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+--- a/modules/http2/h2_switch.c
++++ b/modules/http2/h2_switch.c
+@@ -55,7 +55,6 @@
+ int is_tls = h2_h2_is_tls(c);
+ const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
+
+- (void)s;
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+@@ -68,7 +67,7 @@
+ return DECLINED;
+ }
+
+- if (!h2_is_acceptable_connection(c, 0)) {
++ if (!h2_is_acceptable_connection(c, r, 0)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
+ "protocol propose: connection requirements not met");
+ return DECLINED;
+@@ -81,7 +80,7 @@
+ */
+ const char *p;
+
+- if (!h2_allows_h2_upgrade(c)) {
++ if (!h2_allows_h2_upgrade(r)) {
+ return DECLINED;
+ }
+
+@@ -150,7 +149,7 @@
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "switching protocol to '%s'", protocol);
+ h2_ctx_protocol_set(ctx, protocol);
+- h2_ctx_server_set(ctx, s);
++ h2_ctx_server_update(ctx, s);
+
+ if (r != NULL) {
+ apr_status_t status;
+@@ -160,12 +159,11 @@
+ * right away.
+ */
+ ap_remove_input_filter_byhandle(r->input_filters, "http_in");
+- ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout");
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+
+ /* Ok, start an h2_conn on this one. */
+- h2_ctx_server_set(ctx, r->server);
+- status = h2_conn_setup(ctx, r->connection, r);
++ status = h2_conn_setup(c, r, s);
++
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
+ "session setup");
+@@ -173,7 +171,7 @@
+ return !OK;
+ }
+
+- h2_conn_run(ctx, c);
++ h2_conn_run(c);
+ }
+ return OK;
+ }
+--- a/modules/http2/h2_task.c
++++ b/modules/http2/h2_task.c
+@@ -86,7 +86,7 @@
+ task->request->authority,
+ task->request->path);
+ task->output.opened = 1;
+- return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam);
++ return h2_mplx_t_out_open(task->mplx, task->stream_id, task->output.beam);
+ }
+
+ static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
+@@ -97,7 +97,7 @@
+ apr_brigade_length(bb, 0, &written);
+ H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out");
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)");
+- /* engines send unblocking */
++
+ status = h2_beam_send(task->output.beam, bb,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ);
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)");
+@@ -126,33 +126,16 @@
+ * request_rec out filter chain) into the h2_mplx for further sending
+ * on the master connection.
+ */
+-static apr_status_t slave_out(h2_task *task, ap_filter_t* f,
+- apr_bucket_brigade* bb)
++static apr_status_t secondary_out(h2_task *task, ap_filter_t* f,
++ apr_bucket_brigade* bb)
+ {
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+ int flush = 0, blocking;
+
+- if (task->frozen) {
+- h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
+- "frozen task output write, ignored", bb);
+- while (!APR_BRIGADE_EMPTY(bb)) {
+- b = APR_BRIGADE_FIRST(bb);
+- if (AP_BUCKET_IS_EOR(b)) {
+- APR_BUCKET_REMOVE(b);
+- task->eor = b;
+- }
+- else {
+- apr_bucket_delete(b);
+- }
+- }
+- return APR_SUCCESS;
+- }
+-
+ send:
+- /* we send block once we opened the output, so someone is there
+- * reading it *and* the task is not assigned to a h2_req_engine */
+- blocking = (!task->assigned && task->output.opened);
++ /* we send block once we opened the output, so someone is there reading it */
++ blocking = task->output.opened;
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b)) {
+@@ -192,7 +175,7 @@
+ if (APR_SUCCESS == rv) {
+ /* could not write all, buffer the rest */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
+- "h2_slave_out(%s): saving brigade", task->id);
++ "h2_secondary_out(%s): saving brigade", task->id);
+ ap_assert(NULL);
+ rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
+ flush = 1;
+@@ -206,7 +189,7 @@
+ }
+ out:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c,
+- "h2_slave_out(%s): slave_out leave", task->id);
++ "h2_secondary_out(%s): secondary_out leave", task->id);
+ return rv;
+ }
+
+@@ -219,14 +202,14 @@
+ }
+
+ /*******************************************************************************
+- * task slave connection filters
++ * task secondary connection filters
+ ******************************************************************************/
+
+-static apr_status_t h2_filter_slave_in(ap_filter_t* f,
+- apr_bucket_brigade* bb,
+- ap_input_mode_t mode,
+- apr_read_type_e block,
+- apr_off_t readbytes)
++static apr_status_t h2_filter_secondary_in(ap_filter_t* f,
++ apr_bucket_brigade* bb,
++ ap_input_mode_t mode,
++ apr_read_type_e block,
++ apr_off_t readbytes)
+ {
+ h2_task *task;
+ apr_status_t status = APR_SUCCESS;
+@@ -236,12 +219,12 @@
+ apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
+ (apr_size_t)readbytes : APR_SIZE_MAX);
+
+- task = h2_ctx_cget_task(f->c);
++ task = h2_ctx_get_task(f->c);
+ ap_assert(task);
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+- "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld",
++ "h2_secondary_in(%s): read, mode=%d, block=%d, readbytes=%ld",
+ task->id, mode, block, (long)readbytes);
+ }
+
+@@ -271,7 +254,7 @@
+ /* Get more input data for our request. */
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+- "h2_slave_in(%s): get more data from mplx, block=%d, "
++ "h2_secondary_in(%s): get more data from mplx, block=%d, "
+ "readbytes=%ld", task->id, block, (long)readbytes);
+ }
+ if (task->input.beam) {
+@@ -284,7 +267,7 @@
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+- "h2_slave_in(%s): read returned", task->id);
++ "h2_secondary_in(%s): read returned", task->id);
+ }
+ if (APR_STATUS_IS_EAGAIN(status)
+ && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
+@@ -310,11 +293,9 @@
+ }
+ }
+
+- /* Nothing there, no more data to get. Return APR_EAGAIN on
+- * speculative reads, this is ap_check_pipeline()'s trick to
+- * see if the connection needs closing. */
++ /* Nothing there, no more data to get. Return. */
+ if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) {
+- return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF;
++ return status;
+ }
+
+ if (trace1) {
+@@ -325,7 +306,7 @@
+ if (APR_BRIGADE_EMPTY(task->input.bb)) {
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+- "h2_slave_in(%s): no data", task->id);
++ "h2_secondary_in(%s): no data", task->id);
+ }
+ return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ }
+@@ -353,7 +334,7 @@
+ buffer[len] = 0;
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+- "h2_slave_in(%s): getline: %s",
++ "h2_secondary_in(%s): getline: %s",
+ task->id, buffer);
+ }
+ }
+@@ -363,7 +344,7 @@
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(03472)
+- "h2_slave_in(%s), unsupported READ mode %d",
++ "h2_secondary_in(%s), unsupported READ mode %d",
+ task->id, mode);
+ status = APR_ENOTIMPL;
+ }
+@@ -371,19 +352,19 @@
+ if (trace1) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+- "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
++ "h2_secondary_in(%s): %ld data bytes", task->id, (long)bblen);
+ }
+ return status;
+ }
+
+-static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
+- apr_bucket_brigade* brigade)
++static apr_status_t h2_filter_secondary_output(ap_filter_t* filter,
++ apr_bucket_brigade* brigade)
+ {
+- h2_task *task = h2_ctx_cget_task(filter->c);
++ h2_task *task = h2_ctx_get_task(filter->c);
+ apr_status_t status;
+
+ ap_assert(task);
+- status = slave_out(task, filter, brigade);
++ status = secondary_out(task, filter, brigade);
+ if (status != APR_SUCCESS) {
+ h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
+ }
+@@ -392,14 +373,14 @@
+
+ static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb)
+ {
+- h2_task *task = h2_ctx_cget_task(f->c);
++ h2_task *task = h2_ctx_get_task(f->c);
+ apr_status_t status;
+
+ ap_assert(task);
+ /* There are cases where we need to parse a serialized http/1.1
+ * response. One example is a 100-continue answer in serialized mode
+ * or via a mod_proxy setup */
+- while (bb && !task->output.sent_response) {
++ while (bb && !task->c->aborted && !task->output.sent_response) {
+ status = h2_from_h1_parse_response(task, f, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_task(%s): parsed response", task->id);
+@@ -425,8 +406,15 @@
+ || !strcmp("OPTIONS", task->request->method));
+ }
+
++int h2_task_has_started(h2_task *task)
++{
++ return task && task->started_at != 0;
++}
++
+ void h2_task_redo(h2_task *task)
+ {
++ task->started_at = 0;
++ task->worker_done = 0;
+ task->rst_error = 0;
+ }
+
+@@ -468,9 +456,9 @@
+ ap_hook_process_connection(h2_task_process_conn,
+ NULL, NULL, APR_HOOK_FIRST);
+
+- ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in,
++ ap_register_input_filter("H2_SECONDARY_IN", h2_filter_secondary_in,
+ NULL, AP_FTYPE_NETWORK);
+- ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output,
++ ap_register_output_filter("H2_SECONDARY_OUT", h2_filter_secondary_output,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
+ NULL, AP_FTYPE_NETWORK);
+@@ -502,17 +490,17 @@
+
+ ctx = h2_ctx_get(c, 0);
+ (void)arg;
+- if (h2_ctx_is_task(ctx)) {
++ if (ctx->task) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+- "h2_slave(%s), pre_connection, adding filters", c->log_id);
+- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
++ "h2_secondary(%s), pre_connection, adding filters", c->log_id);
++ ap_add_input_filter("H2_SECONDARY_IN", NULL, NULL, c);
+ ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
+- ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
++ ap_add_output_filter("H2_SECONDARY_OUT", NULL, NULL, c);
+ }
+ return OK;
+ }
+
+-h2_task *h2_task_create(conn_rec *slave, int stream_id,
++h2_task *h2_task_create(conn_rec *secondary, int stream_id,
+ const h2_request *req, h2_mplx *m,
+ h2_bucket_beam *input,
+ apr_interval_time_t timeout,
+@@ -521,17 +509,18 @@
+ apr_pool_t *pool;
+ h2_task *task;
+
+- ap_assert(slave);
++ ap_assert(secondary);
+ ap_assert(req);
+
+- apr_pool_create(&pool, slave->pool);
++ apr_pool_create(&pool, secondary->pool);
++ apr_pool_tag(pool, "h2_task");
+ task = apr_pcalloc(pool, sizeof(h2_task));
+ if (task == NULL) {
+ return NULL;
+ }
+ task->id = "000";
+ task->stream_id = stream_id;
+- task->c = slave;
++ task->c = secondary;
+ task->mplx = m;
+ task->pool = pool;
+ task->request = req;
+@@ -564,41 +553,40 @@
+ ap_assert(task);
+ c = task->c;
+ task->worker_started = 1;
+- task->started_at = apr_time_now();
+
+ if (c->master) {
+- /* Each conn_rec->id is supposed to be unique at a point in time. Since
++ /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
++ *
++ * Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+- * for the request_rec they handle, it needs to be unique for slave
++ * for the request_rec they handle, it needs to be unique for secondary
+ * connections also.
+- * The connection id is generated by the MPM and most MPMs use the formula
+- * id := (child_num * max_threads) + thread_num
+- * which means that there is a maximum id of about
+- * idmax := max_child_count * max_threads
+- * If we assume 2024 child processes with 2048 threads max, we get
+- * idmax ~= 2024 * 2048 = 2 ** 22
+- * On 32 bit systems, we have not much space left, but on 64 bit systems
+- * (and higher?) we can use the upper 32 bits without fear of collision.
+- * 32 bits is just what we need, since a connection can only handle so
+- * many streams.
++ *
++ * The MPM module assigns the connection ids and mod_unique_id is using
++ * that one to generate identifier for requests. While the implementation
++ * works for HTTP/1.x, the parallel execution of several requests per
++ * connection will generate duplicate identifiers on load.
++ *
++ * The original implementation for secondary connection identifiers used
++ * to shift the master connection id up and assign the stream id to the
++ * lower bits. This was cramped on 32 bit systems, but on 64bit there was
++ * enough space.
++ *
++ * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
++ * connection id, even on 64bit systems. Therefore collisions in request ids.
++ *
++ * The way master connection ids are generated, there is some space "at the
++ * top" of the lower 32 bits on allmost all systems. If you have a setup
++ * with 64k threads per child and 255 child processes, you live on the edge.
++ *
++ * The new implementation shifts 8 bits and XORs in the worker
++ * id. This will experience collisions with > 256 h2 workers and heavy
++ * load still. There seems to be no way to solve this in all possible
++ * configurations by mod_h2 alone.
+ */
+- int slave_id, free_bits;
+-
++ task->c->id = (c->master->id << 8)^worker_id;
+ task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id,
+ task->stream_id);
+- if (sizeof(unsigned long) >= 8) {
+- free_bits = 32;
+- slave_id = task->stream_id;
+- }
+- else {
+- /* Assume we have a more limited number of threads/processes
+- * and h2 workers on a 32-bit system. Use the worker instead
+- * of the stream id. */
+- free_bits = 8;
+- slave_id = worker_id;
+- }
+- task->c->id = (c->master->id << free_bits)^slave_id;
+- c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output",
+@@ -613,7 +601,7 @@
+ h2_ctx_create_for(c, task);
+ apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
+
+- h2_slave_run_pre_connection(c, ap_get_conn_socket(c));
++ h2_secondary_run_pre_connection(c, ap_get_conn_socket(c));
+
+ task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
+ if (task->request->serialize) {
+@@ -633,18 +621,9 @@
+ task->c->current_thread = thread;
+ ap_run_process_connection(c);
+
+- if (task->frozen) {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "h2_task(%s): process_conn returned frozen task",
+- task->id);
+- /* cleanup delayed */
+- return APR_EAGAIN;
+- }
+- else {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "h2_task(%s): processing done", task->id);
+- return output_finish(task);
+- }
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
++ "h2_task(%s): processing done", task->id);
++ return output_finish(task);
+ }
+
+ static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
+@@ -682,14 +661,8 @@
+
+ ap_process_request(r);
+
+- if (task->frozen) {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "h2_task(%s): process_request frozen", task->id);
+- }
+- else {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "h2_task(%s): process_request done", task->id);
+- }
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
++ "h2_task(%s): process_request done", task->id);
+
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. We set
+@@ -724,7 +697,7 @@
+ }
+
+ ctx = h2_ctx_get(c, 0);
+- if (h2_ctx_is_task(ctx)) {
++ if (ctx->task) {
+ if (!ctx->task->request->serialize) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, processing request directly");
+@@ -736,33 +709,8 @@
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "slave_conn(%ld): has no task", c->id);
++ "secondary_conn(%ld): has no task", c->id);
+ }
+ return DECLINED;
+ }
+
+-apr_status_t h2_task_freeze(h2_task *task)
+-{
+- if (!task->frozen) {
+- task->frozen = 1;
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406)
+- "h2_task(%s), frozen", task->id);
+- }
+- return APR_SUCCESS;
+-}
+-
+-apr_status_t h2_task_thaw(h2_task *task)
+-{
+- if (task->frozen) {
+- task->frozen = 0;
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
+- "h2_task(%s), thawed", task->id);
+- }
+- task->thawed = 1;
+- return APR_SUCCESS;
+-}
+-
+-int h2_task_has_thawed(h2_task *task)
+-{
+- return task->thawed;
+-}
+--- a/modules/http2/h2_task.h
++++ b/modules/http2/h2_task.h
+@@ -35,14 +35,13 @@
+ *
+ * Finally, to keep certain connection level filters, such as ourselves and
+ * especially mod_ssl ones, from messing with our data, we need a filter
+- * of our own to disble those.
++ * of our own to disable those.
+ */
+
+ struct h2_bucket_beam;
+ struct h2_conn;
+ struct h2_mplx;
+ struct h2_task;
+-struct h2_req_engine;
+ struct h2_request;
+ struct h2_response_parser;
+ struct h2_stream;
+@@ -80,20 +79,18 @@
+ struct h2_mplx *mplx;
+
+ unsigned int filters_set : 1;
+- unsigned int frozen : 1;
+- unsigned int thawed : 1;
+ unsigned int worker_started : 1; /* h2_worker started processing */
+- unsigned int worker_done : 1; /* h2_worker finished */
++ unsigned int redo : 1; /* was throttled, should be restarted later */
++
++ int worker_done; /* h2_worker finished */
++ int done_done; /* task_done has been handled */
+
+ apr_time_t started_at; /* when processing started */
+ apr_time_t done_at; /* when processing was done */
+ apr_bucket *eor;
+-
+- struct h2_req_engine *engine; /* engine hosted by this task */
+- struct h2_req_engine *assigned; /* engine that task has been assigned to */
+ };
+
+-h2_task *h2_task_create(conn_rec *slave, int stream_id,
++h2_task *h2_task_create(conn_rec *secondary, int stream_id,
+ const h2_request *req, struct h2_mplx *m,
+ struct h2_bucket_beam *input,
+ apr_interval_time_t timeout,
+@@ -105,6 +102,7 @@
+
+ void h2_task_redo(h2_task *task);
+ int h2_task_can_redo(h2_task *task);
++int h2_task_has_started(h2_task *task);
+
+ /**
+ * Reset the task with the given error code, resets all input/output.
+@@ -120,8 +118,4 @@
+ extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
+ extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
+
+-apr_status_t h2_task_freeze(h2_task *task);
+-apr_status_t h2_task_thaw(h2_task *task);
+-int h2_task_has_thawed(h2_task *task);
+-
+ #endif /* defined(__mod_h2__h2_task__) */
+--- a/modules/http2/h2_util.c
++++ b/modules/http2/h2_util.c
+@@ -638,15 +638,6 @@
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+- apr_thread_mutex_unlock(fifo->lock);
+- }
+- return rv;
+-}
+-
+-apr_status_t h2_fifo_interrupt(h2_fifo *fifo)
+-{
+- apr_status_t rv;
+- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+@@ -710,10 +701,6 @@
+ {
+ apr_status_t rv;
+
+- if (fifo->aborted) {
+- return APR_EOF;
+- }
+-
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = fifo_push_int(fifo, elem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+@@ -754,10 +741,6 @@
+ {
+ apr_status_t rv;
+
+- if (fifo->aborted) {
+- return APR_EOF;
+- }
+-
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = pull_head(fifo, pelem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+@@ -946,15 +929,6 @@
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+- apr_thread_mutex_unlock(fifo->lock);
+- }
+- return rv;
+-}
+-
+-apr_status_t h2_ififo_interrupt(h2_ififo *fifo)
+-{
+- apr_status_t rv;
+- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+@@ -1018,10 +992,6 @@
+ {
+ apr_status_t rv;
+
+- if (fifo->aborted) {
+- return APR_EOF;
+- }
+-
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ififo_push_int(fifo, id, block);
+ apr_thread_mutex_unlock(fifo->lock);
+@@ -1062,10 +1032,6 @@
+ {
+ apr_status_t rv;
+
+- if (fifo->aborted) {
+- return APR_EOF;
+- }
+-
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ipull_head(fifo, pi, block);
+ apr_thread_mutex_unlock(fifo->lock);
+@@ -1088,10 +1054,6 @@
+ apr_status_t rv;
+ int id;
+
+- if (fifo->aborted) {
+- return APR_EOF;
+- }
+-
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) {
+ switch (fn(id, ctx)) {
+@@ -1117,39 +1079,40 @@
+ return ififo_peek(fifo, fn, ctx, 0);
+ }
+
+-apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
++static apr_status_t ififo_remove(h2_ififo *fifo, int id)
+ {
+- apr_status_t rv;
++ int rc, i;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+- int i, rc;
+- int e;
+-
+- rc = 0;
+- for (i = 0; i < fifo->count; ++i) {
+- e = fifo->elems[inth_index(fifo, i)];
+- if (e == id) {
+- ++rc;
+- }
+- else if (rc) {
+- fifo->elems[inth_index(fifo, i-rc)] = e;
+- }
+- }
+- if (rc) {
+- fifo->count -= rc;
+- if (fifo->count + rc == fifo->nelems) {
+- apr_thread_cond_broadcast(fifo->not_full);
+- }
+- rv = APR_SUCCESS;
++ rc = 0;
++ for (i = 0; i < fifo->count; ++i) {
++ int e = fifo->elems[inth_index(fifo, i)];
++ if (e == id) {
++ ++rc;
+ }
+- else {
+- rv = APR_EAGAIN;
++ else if (rc) {
++ fifo->elems[inth_index(fifo, i-rc)] = e;
+ }
+-
++ }
++ if (!rc) {
++ return APR_EAGAIN;
++ }
++ fifo->count -= rc;
++ if (fifo->count + rc == fifo->nelems) {
++ apr_thread_cond_broadcast(fifo->not_full);
++ }
++ return APR_SUCCESS;
++}
++
++apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
++{
++ apr_status_t rv;
++
++ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
++ rv = ififo_remove(fifo, id);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+@@ -1373,7 +1336,7 @@
+ return status;
+ }
+ else if (blen == 0) {
+- /* brigade without data, does it have an EOS bucket somwhere? */
++ /* brigade without data, does it have an EOS bucket somewhere? */
+ *plen = 0;
+ *peos = h2_util_has_eos(bb, -1);
+ }
+@@ -1840,22 +1803,29 @@
+ }
+
+ apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
+- const char *name, size_t nlen,
+- const char *value, size_t vlen)
++ const char *name, size_t nlen,
++ const char *value, size_t vlen,
++ size_t max_field_len, int *pwas_added)
+ {
+ char *hname, *hvalue;
++ const char *existing;
+
++ *pwas_added = 0;
+ if (h2_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+- const char *existing = apr_table_get(headers, "cookie");
++ existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
++ if (max_field_len && strlen(existing) + vlen + nlen + 4 > max_field_len) {
++ /* "key: oldval, nval" is too long */
++ return APR_EINVAL;
++ }
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+@@ -1869,8 +1839,16 @@
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+- hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
++ existing = apr_table_get(headers, hname);
++ if (max_field_len) {
++ if ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len) {
++ /* "key: (oldval, )?nval" is too long */
++ return APR_EINVAL;
++ }
++ }
++ if (!existing) *pwas_added = 1;
++ hvalue = apr_pstrndup(pool, value, vlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+@@ -1960,7 +1938,8 @@
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+- memcpy(scratch, frame->goaway.opaque_data, len);
++ if (len)
++ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+--- a/modules/http2/h2_util.h
++++ b/modules/http2/h2_util.h
+@@ -209,7 +209,6 @@
+ apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+ apr_status_t h2_fifo_term(h2_fifo *fifo);
+-apr_status_t h2_fifo_interrupt(h2_fifo *fifo);
+
+ int h2_fifo_count(h2_fifo *fifo);
+
+@@ -229,7 +228,7 @@
+
+ typedef enum {
+ H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */
+- H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */
++ H2_FIFO_OP_REPUSH, /* pull and immediately re-push it */
+ } h2_fifo_op_t;
+
+ typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx);
+@@ -280,7 +279,6 @@
+ apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+ apr_status_t h2_ififo_term(h2_ififo *fifo);
+-apr_status_t h2_ififo_interrupt(h2_ififo *fifo);
+
+ int h2_ififo_count(h2_ififo *fifo);
+
+@@ -412,9 +410,14 @@
+ apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req);
+
++/**
++ * Add a HTTP/2 header and return the table key if it really was added
++ * and not ignored.
++ */
+ apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+- const char *value, size_t vlen);
++ const char *value, size_t vlen,
++ size_t max_field_len, int *pwas_added);
+
+ /*******************************************************************************
+ * h2_request helpers
+--- a/modules/http2/h2_version.h
++++ b/modules/http2/h2_version.h
+@@ -27,7 +27,7 @@
+ * @macro
+ * Version number of the http2 module as c string
+ */
+-#define MOD_HTTP2_VERSION "1.11.4"
++#define MOD_HTTP2_VERSION "1.15.14"
+
+ /**
+ * @macro
+@@ -35,7 +35,6 @@
+ * release. This is a 24 bit number with 8 bits for major number, 8 bits
+ * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
+ */
+-#define MOD_HTTP2_VERSION_NUM 0x010b04
+-
++#define MOD_HTTP2_VERSION_NUM 0x010f0e
+
+ #endif /* mod_h2_h2_version_h */
+--- a/modules/http2/h2_workers.c
++++ b/modules/http2/h2_workers.c
+@@ -155,7 +155,7 @@
+ {
+ apr_status_t rv;
+
+- rv = h2_mplx_pop_task(m, &slot->task);
++ rv = h2_mplx_s_pop_task(m, &slot->task);
+ if (slot->task) {
+ /* Ok, we got something to give back to the worker for execution.
+ * If we still have idle workers, we let the worker be sticky,
+@@ -234,10 +234,10 @@
+ * mplx the opportunity to give us back a new task right away.
+ */
+ if (!slot->aborted && (--slot->sticks > 0)) {
+- h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task);
++ h2_mplx_s_task_done(slot->task->mplx, slot->task, &slot->task);
+ }
+ else {
+- h2_mplx_task_done(slot->task->mplx, slot->task, NULL);
++ h2_mplx_s_task_done(slot->task->mplx, slot->task, NULL);
+ slot->task = NULL;
+ }
+ }
+@@ -269,7 +269,6 @@
+ }
+
+ h2_fifo_term(workers->mplxs);
+- h2_fifo_interrupt(workers->mplxs);
+
+ cleanup_zombies(workers);
+ }
+--- a/modules/http2/mod_http2.c
++++ b/modules/http2/mod_http2.c
+@@ -172,27 +172,6 @@
+ conn_rec *, request_rec *, char *name);
+ static int http2_is_h2(conn_rec *);
+
+-static apr_status_t http2_req_engine_push(const char *ngn_type,
+- request_rec *r,
+- http2_req_engine_init *einit)
+-{
+- return h2_mplx_req_engine_push(ngn_type, r, einit);
+-}
+-
+-static apr_status_t http2_req_engine_pull(h2_req_engine *ngn,
+- apr_read_type_e block,
+- int capacity,
+- request_rec **pr)
+-{
+- return h2_mplx_req_engine_pull(ngn, block, capacity, pr);
+-}
+-
+-static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
+- apr_status_t status)
+-{
+- h2_mplx_req_engine_done(ngn, r_conn, status);
+-}
+-
+ static void http2_get_num_workers(server_rec *s, int *minw, int *maxw)
+ {
+ h2_get_num_workers(s, minw, maxw);
+@@ -220,9 +199,6 @@
+
+ APR_REGISTER_OPTIONAL_FN(http2_is_h2);
+ APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
+- APR_REGISTER_OPTIONAL_FN(http2_req_engine_push);
+- APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull);
+- APR_REGISTER_OPTIONAL_FN(http2_req_engine_done);
+ APR_REGISTER_OPTIONAL_FN(http2_get_num_workers);
+
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
+@@ -260,9 +236,8 @@
+ {
+ if (ctx) {
+ if (r) {
+- h2_task *task = h2_ctx_get_task(ctx);
+- if (task) {
+- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
++ if (ctx->task) {
++ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
+ if (stream && stream->push_policy != H2_PUSH_NONE) {
+ return "on";
+ }
+@@ -273,8 +248,7 @@
+ }
+ }
+ else if (s) {
+- const h2_config *cfg = h2_config_sget(s);
+- if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) {
++ if (h2_config_geti(r, s, H2_CONF_PUSH)) {
+ return "on";
+ }
+ }
+@@ -285,8 +259,7 @@
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+ {
+ if (ctx) {
+- h2_task *task = h2_ctx_get_task(ctx);
+- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
++ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
+ return "PUSHED";
+ }
+ }
+@@ -297,9 +270,8 @@
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+ {
+ if (ctx) {
+- h2_task *task = h2_ctx_get_task(ctx);
+- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
+- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
++ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
++ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
+ if (stream) {
+ return apr_itoa(p, stream->initiated_on);
+ }
+@@ -312,9 +284,8 @@
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+ {
+ if (ctx) {
+- h2_task *task = h2_ctx_get_task(ctx);
+- if (task) {
+- return task->id;
++ if (ctx->task) {
++ return ctx->task->id;
+ }
+ }
+ return "";
+@@ -366,7 +337,7 @@
+ for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (!strcmp(vdef->name, name)) {
+- h2_ctx *ctx = (r? h2_ctx_rget(r) :
++ h2_ctx *ctx = (r? h2_ctx_get(c, 0) :
+ h2_ctx_get(c->master? c->master : c, 0));
+ return (char *)vdef->lookup(p, s, c, r, ctx);
+ }
+@@ -377,7 +348,7 @@
+ static int h2_h2_fixups(request_rec *r)
+ {
+ if (r->connection->master) {
+- h2_ctx *ctx = h2_ctx_rget(r);
++ h2_ctx *ctx = h2_ctx_get(r->connection, 0);
+ int i;
+
+ for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
+--- a/modules/http2/mod_http2.dep
++++ b/modules/http2/mod_http2.dep
+@@ -694,7 +694,6 @@
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+- ".\h2_ngn_shed.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_stream.h"\
+@@ -754,7 +753,6 @@
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+- ".\h2_ngn_shed.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_task.h"\
+--- a/modules/http2/mod_http2.dsp
++++ b/modules/http2/mod_http2.dsp
+@@ -145,10 +145,6 @@
+ # End Source File
+ # Begin Source File
+
+-SOURCE=./h2_ngn_shed.c
+-# End Source File
+-# Begin Source File
+-
+ SOURCE=./h2_push.c
+ # End Source File
+ # Begin Source File
+--- a/modules/http2/mod_http2.h
++++ b/modules/http2/mod_http2.h
+@@ -30,22 +30,20 @@
+
+
+ /*******************************************************************************
+- * HTTP/2 request engines
++ * START HTTP/2 request engines (DEPRECATED)
+ ******************************************************************************/
++
++/* The following functions were introduced for the experimental mod_proxy_http2
++ * support, but have been abandoned since.
++ * They are still declared here for backward compatibility, in case someone
++ * tries to build an old mod_proxy_http2 against it, but will disappear
++ * completely sometime in the future.
++ */
+
+ struct apr_thread_cond_t;
+-
+ typedef struct h2_req_engine h2_req_engine;
+-
+ typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+
+-/**
+- * Initialize a h2_req_engine. The structure will be passed in but
+- * only the name and master are set. The function should initialize
+- * all fields.
+- * @param engine the allocated, partially filled structure
+- * @param r the first request to process, or NULL
+- */
+ typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+@@ -55,35 +53,11 @@
+ http2_output_consumed **pconsumed,
+ void **pbaton);
+
+-/**
+- * Push a request to an engine with the specified name for further processing.
+- * If no such engine is available, einit is not NULL, einit is called
+- * with a new engine record and the caller is responsible for running the
+- * new engine instance.
+- * @param engine_type the type of the engine to add the request to
+- * @param r the request to push to an engine for processing
+- * @param einit an optional initialization callback for a new engine
+- * of the requested type, should no instance be available.
+- * By passing a non-NULL callback, the caller is willing
+- * to init and run a new engine itself.
+- * @return APR_SUCCESS iff slave was successfully added to an engine
+- */
+ APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_push, (const char *engine_type,
+ request_rec *r,
+ http2_req_engine_init *einit));
+
+-/**
+- * Get a new request for processing in this engine.
+- * @param engine the engine which is done processing the slave
+- * @param block if call should block waiting for request to come
+- * @param capacity how many parallel requests are acceptable
+- * @param pr the request that needs processing or NULL
+- * @return APR_SUCCESS if new request was assigned
+- * APR_EAGAIN if no new request is available
+- * APR_EOF if engine may shut down, as no more request will be scheduled
+- * APR_ECONNABORTED if the engine needs to shut down immediately
+- */
+ APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_pull, (h2_req_engine *engine,
+ apr_read_type_e block,
+@@ -98,4 +72,8 @@
+ http2_get_num_workers, (server_rec *s,
+ int *minw, int *max));
+
++/*******************************************************************************
++ * END HTTP/2 request engines (DEPRECATED)
++ ******************************************************************************/
++
+ #endif
+--- a/modules/http2/mod_http2.mak
++++ b/modules/http2/mod_http2.mak
+@@ -61,7 +61,6 @@
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+- -@erase "$(INTDIR)\h2_ngn_shed.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+@@ -138,7 +137,6 @@
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+- "$(INTDIR)\h2_ngn_shed.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_session.obj" \
+@@ -207,7 +205,6 @@
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+- -@erase "$(INTDIR)\h2_ngn_shed.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+@@ -284,7 +281,6 @@
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+- "$(INTDIR)\h2_ngn_shed.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_session.obj" \
+@@ -469,11 +465,6 @@
+ "$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+-SOURCE=./h2_ngn_shed.c
+-
+-"$(INTDIR)\h2_ngn_shed.obj" : $(SOURCE) "$(INTDIR)"
+-
+-
+ SOURCE=./h2_push.c
+
+ "$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)"
+--- a/modules/http2/mod_proxy_http2.c
++++ b/modules/http2/mod_proxy_http2.c
+@@ -16,13 +16,14 @@
+
+ #include <nghttp2/nghttp2.h>
+
++#include <ap_mmn.h>
+ #include <httpd.h>
+ #include <mod_proxy.h>
+ #include "mod_http2.h"
+
+
+ #include "mod_proxy_http2.h"
+-#include "h2_request.h"
++#include "h2.h"
+ #include "h2_proxy_util.h"
+ #include "h2_version.h"
+ #include "h2_proxy_session.h"
+@@ -46,19 +47,12 @@
+
+ /* Optional functions from mod_http2 */
+ static int (*is_h2)(conn_rec *c);
+-static apr_status_t (*req_engine_push)(const char *name, request_rec *r,
+- http2_req_engine_init *einit);
+-static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
+- apr_read_type_e block,
+- int capacity,
+- request_rec **pr);
+-static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn,
+- apr_status_t status);
+-
++
+ typedef struct h2_proxy_ctx {
++ const char *id;
++ conn_rec *master;
+ conn_rec *owner;
+ apr_pool_t *pool;
+- request_rec *rbase;
+ server_rec *server;
+ const char *proxy_func;
+ char server_portstr[32];
+@@ -66,19 +60,15 @@
+ proxy_worker *worker;
+ proxy_server_conf *conf;
+
+- h2_req_engine *engine;
+- const char *engine_id;
+- const char *engine_type;
+- apr_pool_t *engine_pool;
+ apr_size_t req_buffer_size;
+- h2_proxy_fifo *requests;
+ int capacity;
+
+- unsigned standalone : 1;
+ unsigned is_ssl : 1;
+- unsigned flushall : 1;
+
+- apr_status_t r_status; /* status of our first request work */
++ request_rec *r; /* the request processed in this ctx */
++ apr_status_t r_status; /* status of request work */
++ int r_done; /* request was processed, not necessarily successfully */
++ int r_may_retry; /* request may be retried */
+ h2_proxy_session *session; /* current http2 session against backend */
+ } h2_proxy_ctx;
+
+@@ -104,16 +94,6 @@
+ MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
+
+ is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
+- req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push);
+- req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull);
+- req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done);
+-
+- /* we need all of them */
+- if (!req_engine_push || !req_engine_pull || !req_engine_done) {
+- req_engine_push = NULL;
+- req_engine_pull = NULL;
+- req_engine_done = NULL;
+- }
+
+ return status;
+ }
+@@ -204,45 +184,6 @@
+ return OK;
+ }
+
+-static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes)
+-{
+- h2_proxy_ctx *ctx = baton;
+-
+- if (ctx->session) {
+- h2_proxy_session_update_window(ctx->session, c, bytes);
+- }
+-}
+-
+-static apr_status_t proxy_engine_init(h2_req_engine *engine,
+- const char *id,
+- const char *type,
+- apr_pool_t *pool,
+- apr_size_t req_buffer_size,
+- request_rec *r,
+- http2_output_consumed **pconsumed,
+- void **pctx)
+-{
+- h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
+- &proxy_http2_module);
+- if (!ctx) {
+- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
+- "h2_proxy_session, engine init, no ctx found");
+- return APR_ENOTIMPL;
+- }
+-
+- ctx->pool = pool;
+- ctx->engine = engine;
+- ctx->engine_id = id;
+- ctx->engine_type = type;
+- ctx->engine_pool = pool;
+- ctx->req_buffer_size = req_buffer_size;
+- ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests));
+-
+- *pconsumed = out_consumed;
+- *pctx = ctx;
+- return APR_SUCCESS;
+-}
+-
+ static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
+ {
+ h2_proxy_ctx *ctx = session->user_data;
+@@ -252,7 +193,7 @@
+ url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
+ apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
+ ctx->p_conn->connection->local_addr->port));
+- status = h2_proxy_session_submit(session, url, r, ctx->standalone);
++ status = h2_proxy_session_submit(session, url, r, 1);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
+ "pass request body failed to %pI (%s) from %s (%s)",
+@@ -266,43 +207,15 @@
+ static void request_done(h2_proxy_ctx *ctx, request_rec *r,
+ apr_status_t status, int touched)
+ {
+- const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
+-
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
+- "h2_proxy_session(%s): request done %s, touched=%d",
+- ctx->engine_id, task_id, touched);
+- if (status != APR_SUCCESS) {
+- if (!touched) {
+- /* untouched request, need rescheduling */
+- status = h2_proxy_fifo_push(ctx->requests, r);
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+- APLOGNO(03369)
+- "h2_proxy_session(%s): rescheduled request %s",
+- ctx->engine_id, task_id);
+- return;
+- }
+- else {
+- const char *uri;
+- uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0);
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+- APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s "
+- "not complete, cannot repeat",
+- ctx->engine_id, task_id, uri);
+- }
+- }
+-
+- if (r == ctx->rbase) {
++ if (r == ctx->r) {
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
++ "h2_proxy_session(%s): request done, touched=%d",
++ ctx->id, touched);
++ ctx->r_done = 1;
++ if (touched) ctx->r_may_retry = 0;
+ ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
+ : HTTP_SERVICE_UNAVAILABLE);
+ }
+-
+- if (req_engine_done && ctx->engine) {
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+- APLOGNO(03370)
+- "h2_proxy_session(%s): finished request %s",
+- ctx->engine_id, task_id);
+- req_engine_done(ctx->engine, r->connection, status);
+- }
+ }
+
+ static void session_req_done(h2_proxy_session *session, request_rec *r,
+@@ -311,43 +224,15 @@
+ request_done(session->user_data, r, status, touched);
+ }
+
+-static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
+-{
+- if (h2_proxy_fifo_count(ctx->requests) > 0) {
+- return APR_SUCCESS;
+- }
+- else if (req_engine_pull && ctx->engine) {
+- apr_status_t status;
+- request_rec *r = NULL;
+-
+- status = req_engine_pull(ctx->engine, before_leave?
+- APR_BLOCK_READ: APR_NONBLOCK_READ,
+- ctx->capacity, &r);
+- if (status == APR_SUCCESS && r) {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner,
+- "h2_proxy_engine(%s): pulled request (%s) %s",
+- ctx->engine_id,
+- before_leave? "before leave" : "regular",
+- r->the_request);
+- h2_proxy_fifo_push(ctx->requests, r);
+- }
+- return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
+- }
+- return APR_EOF;
+-}
+-
+-static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
++static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
+ apr_status_t status = OK;
+ int h2_front;
+- request_rec *r;
+
+ /* Step Four: Send the Request in a new HTTP/2 stream and
+ * loop until we got the response or encounter errors.
+ */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
+- "eng(%s): setup session", ctx->engine_id);
+ h2_front = is_h2? is_h2(ctx->owner) : 0;
+- ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
++ ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
+ h2_front, 30,
+ h2_proxy_log2((int)ctx->req_buffer_size),
+ session_req_done);
+@@ -358,105 +243,45 @@
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
+- "eng(%s): run session %s", ctx->engine_id, ctx->session->id);
++ "eng(%s): run session %s", ctx->id, ctx->session->id);
+ ctx->session->user_data = ctx;
+
+- while (!ctx->owner->aborted) {
+- if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
+- add_request(ctx->session, r);
+- }
+-
++ ctx->r_done = 0;
++ add_request(ctx->session, ctx->r);
++
++ while (!ctx->master->aborted && !ctx->r_done) {
++
+ status = h2_proxy_session_process(ctx->session);
+-
+- if (status == APR_SUCCESS) {
+- apr_status_t s2;
+- /* ongoing processing, call again */
+- if (ctx->session->remote_max_concurrent > 0
+- && ctx->session->remote_max_concurrent != ctx->capacity) {
+- ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent,
+- h2_proxy_fifo_capacity(ctx->requests));
+- }
+- s2 = next_request(ctx, 0);
+- if (s2 == APR_ECONNABORTED) {
+- /* master connection gone */
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
+- APLOGNO(03374) "eng(%s): pull request",
+- ctx->engine_id);
+- /* give notice that we're leaving and cancel all ongoing
+- * streams. */
+- next_request(ctx, 1);
+- h2_proxy_session_cancel_all(ctx->session);
+- h2_proxy_session_process(ctx->session);
+- status = ctx->r_status = APR_SUCCESS;
+- break;
+- }
+- if ((h2_proxy_fifo_count(ctx->requests) == 0)
+- && h2_proxy_ihash_empty(ctx->session->streams)) {
+- break;
+- }
+- }
+- else {
+- /* end of processing, maybe error */
++ if (status != APR_SUCCESS) {
++ /* Encountered an error during session processing */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03375) "eng(%s): end of session %s",
+- ctx->engine_id, ctx->session->id);
+- /*
+- * Any open stream of that session needs to
++ ctx->id, ctx->session->id);
++ /* Any open stream of that session needs to
+ * a) be reopened on the new session iff safe to do so
+ * b) reported as done (failed) otherwise
+ */
+ h2_proxy_session_cleanup(ctx->session, session_req_done);
+- break;
++ goto out;
+ }
+ }
+
+- ctx->session->user_data = NULL;
+- ctx->session = NULL;
+-
+- return status;
+-}
+-
+-static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r)
+-{
+- conn_rec *c = ctx->owner;
+- const char *engine_type, *hostname;
+-
+- hostname = (ctx->p_conn->ssl_hostname?
+- ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
+- engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
+- ctx->server_portstr);
+-
+- if (c->master && req_engine_push && r && is_h2 && is_h2(c)) {
+- /* If we are have req_engine capabilities, push the handling of this
+- * request (e.g. slave connection) to a proxy_http2 engine which
+- * uses the same backend. We may be called to create an engine
+- * ourself. */
+- if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) {
+- if (ctx->engine == NULL) {
+- /* request has been assigned to an engine in another thread */
+- return SUSPENDED;
+- }
++out:
++ if (ctx->master->aborted) {
++ /* master connection gone */
++ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
++ APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
++ /* cancel all ongoing requests */
++ h2_proxy_session_cancel_all(ctx->session);
++ h2_proxy_session_process(ctx->session);
++ if (!ctx->master->aborted) {
++ status = ctx->r_status = APR_SUCCESS;
+ }
+ }
+
+- if (!ctx->engine) {
+- /* No engine was available or has been initialized, handle this
+- * request just by ourself. */
+- ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
+- ctx->engine_type = engine_type;
+- ctx->engine_pool = ctx->pool;
+- ctx->req_buffer_size = (32*1024);
+- ctx->standalone = 1;
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "h2_proxy_http2(%ld): setup standalone engine for type %s",
+- c->id, engine_type);
+- }
+- else {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+- "H2: hosting engine %s", ctx->engine_id);
+- }
+-
+- return h2_proxy_fifo_push(ctx->requests, r);
++ ctx->session->user_data = NULL;
++ ctx->session = NULL;
++ return status;
+ }
+
+ static int proxy_http2_handler(request_rec *r,
+@@ -466,7 +291,7 @@
+ const char *proxyname,
+ apr_port_t proxyport)
+ {
+- const char *proxy_func;
++ const char *proxy_func, *task_id;
+ char *locurl = url, *u;
+ apr_size_t slen;
+ int is_ssl = 0;
+@@ -498,29 +323,35 @@
+ default:
+ return DECLINED;
+ }
++
++ task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
+
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+- ctx->owner = r->connection;
+- ctx->pool = r->pool;
+- ctx->rbase = r;
+- ctx->server = r->server;
++ ctx->master = r->connection->master? r->connection->master : r->connection;
++ ctx->id = task_id? task_id : apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
++ ctx->owner = r->connection;
++ ctx->pool = r->pool;
++ ctx->server = r->server;
+ ctx->proxy_func = proxy_func;
+- ctx->is_ssl = is_ssl;
+- ctx->worker = worker;
+- ctx->conf = conf;
+- ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
+- ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
+-
+- h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100);
++ ctx->is_ssl = is_ssl;
++ ctx->worker = worker;
++ ctx->conf = conf;
++ ctx->req_buffer_size = (32*1024);
++ ctx->r = r;
++ ctx->r_status = status = HTTP_SERVICE_UNAVAILABLE;
++ ctx->r_done = 0;
++ ctx->r_may_retry = 1;
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+
+ /* scheme says, this is for us. */
+- apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
+- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase,
++ apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url);
++ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->r,
+ "H2: serving URL %s", url);
+
+ run_connect:
++ if (ctx->master->aborted) goto cleanup;
++
+ /* Get a proxy_conn_rec from the worker, might be a new one, might
+ * be one still open from another request, or it might fail if the
+ * worker is stopped or in error. */
+@@ -530,25 +361,11 @@
+ }
+
+ ctx->p_conn->is_ssl = ctx->is_ssl;
+- if (ctx->is_ssl && ctx->p_conn->connection) {
+- /* If there are some metadata on the connection (e.g. TLS alert),
+- * let mod_ssl detect them, and create a new connection below.
+- */
+- apr_bucket_brigade *tmp_bb;
+- tmp_bb = apr_brigade_create(ctx->rbase->pool,
+- ctx->rbase->connection->bucket_alloc);
+- status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb,
+- AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1);
+- if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+- ctx->p_conn->close = 1;
+- }
+- apr_brigade_cleanup(tmp_bb);
+- }
+
+ /* Step One: Determine the URL to connect to (might be a proxy),
+ * initialize the backend accordingly and determine the server
+ * port string we can expect in responses. */
+- if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
++ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->r, conf, worker,
+ ctx->p_conn, &uri, &locurl,
+ proxyname, proxyport,
+ ctx->server_portstr,
+@@ -556,17 +373,6 @@
+ goto cleanup;
+ }
+
+- /* If we are not already hosting an engine, try to push the request
+- * to an already existing engine or host a new engine here. */
+- if (r && !ctx->engine) {
+- ctx->r_status = push_request_somewhere(ctx, r);
+- r = NULL;
+- if (ctx->r_status == SUSPENDED) {
+- /* request was pushed to another thread, leave processing here */
+- goto cleanup;
+- }
+- }
+-
+ /* Step Two: Make the Connection (or check that an already existing
+ * socket is still usable). On success, we have a socket connected to
+ * backend->hostname. */
+@@ -575,70 +381,56 @@
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
+ "H2: failed to make connection to backend: %s",
+ ctx->p_conn->hostname);
+- goto reconnect;
++ goto cleanup;
+ }
+
+ /* Step Three: Create conn_rec for the socket we have open now. */
+- if (!ctx->p_conn->connection) {
+- status = ap_proxy_connection_create_ex(ctx->proxy_func,
+- ctx->p_conn, ctx->rbase);
+- if (status != OK) {
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+- "setup new connection: is_ssl=%d %s %s %s",
+- ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
+- locurl, ctx->p_conn->hostname);
+- goto reconnect;
+- }
+-
+- if (!ctx->p_conn->data) {
+- /* New conection: set a note on the connection what CN is
+- * requested and what protocol we want */
+- if (ctx->p_conn->ssl_hostname) {
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner,
+- "set SNI to %s for (%s)",
+- ctx->p_conn->ssl_hostname,
+- ctx->p_conn->hostname);
+- apr_table_setn(ctx->p_conn->connection->notes,
+- "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+- }
+- if (ctx->is_ssl) {
+- apr_table_setn(ctx->p_conn->connection->notes,
+- "proxy-request-alpn-protos", "h2");
+- }
+- }
++ status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r);
++ if (status != OK) {
++ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
++ "setup new connection: is_ssl=%d %s %s %s",
++ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
++ locurl, ctx->p_conn->hostname);
++ ctx->r_status = status;
++ goto cleanup;
+ }
+-
+-run_session:
+- status = proxy_engine_run(ctx);
+- if (status == APR_SUCCESS) {
+- /* session and connection still ok */
+- if (next_request(ctx, 1) == APR_SUCCESS) {
+- /* more requests, run again */
+- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376)
+- "run_session, again");
+- goto run_session;
++
++ if (!ctx->p_conn->data && ctx->is_ssl) {
++ /* New SSL connection: set a note on the connection about what
++ * protocol we want.
++ */
++ apr_table_setn(ctx->p_conn->connection->notes,
++ "proxy-request-alpn-protos", "h2");
++ if (ctx->p_conn->ssl_hostname) {
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
++ "set SNI to %s for (%s)",
++ ctx->p_conn->ssl_hostname,
++ ctx->p_conn->hostname);
++ apr_table_setn(ctx->p_conn->connection->notes,
++ "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+ }
+- /* done */
+- ctx->engine = NULL;
+ }
+
+-reconnect:
+- if (next_request(ctx, 1) == APR_SUCCESS) {
+- /* Still more to do, tear down old conn and start over */
++ if (ctx->master->aborted) goto cleanup;
++ status = ctx_run(ctx);
++
++ if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->master->aborted) {
++ /* Not successfully processed, but may retry, tear down old conn and start over */
+ if (ctx->p_conn) {
+ ctx->p_conn->close = 1;
+- /*only in trunk so far */
+- /*proxy_run_detach_backend(r, ctx->p_conn);*/
++#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
++ proxy_run_detach_backend(r, ctx->p_conn);
++#endif
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+ ++reconnects;
+- if (reconnects < 5 && !ctx->owner->aborted) {
++ if (reconnects < 5) {
+ goto run_connect;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
+- "giving up after %d reconnects, %d requests todo",
+- reconnects, h2_proxy_fifo_count(ctx->requests));
++ "giving up after %d reconnects, request-done=%d",
++ reconnects, ctx->r_done);
+ }
+
+ cleanup:
+@@ -647,17 +439,13 @@
+ /* close socket when errors happened or session shut down (EOF) */
+ ctx->p_conn->close = 1;
+ }
+- /*only in trunk so far */
+- /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/
++#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
++ proxy_run_detach_backend(ctx->r, ctx->p_conn);
++#endif
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+
+- /* Any requests will still have need to fail */
+- while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
+- request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1);
+- }
+-
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03377) "leaving handler");
diff --git a/debian/patches/no_LD_LIBRARY_PATH.patch b/debian/patches/no_LD_LIBRARY_PATH.patch
new file mode 100644
index 0000000..85966fd
--- /dev/null
+++ b/debian/patches/no_LD_LIBRARY_PATH.patch
@@ -0,0 +1,18 @@
+Description: Remove LD_LIBRARY_PATH from envvars-std
+Forwarded: no
+Author: Adam Conrad <adconrad@0c3.net>
+Last-Update: 2012-04-15
+--- a/support/envvars-std.in
++++ b/support/envvars-std.in
+@@ -18,11 +18,4 @@
+ #
+ # This file is generated from envvars-std.in
+ #
+-if test "x$@SHLIBPATH_VAR@" != "x" ; then
+- @SHLIBPATH_VAR@="@exp_libdir@:$@SHLIBPATH_VAR@"
+-else
+- @SHLIBPATH_VAR@="@exp_libdir@"
+-fi
+-export @SHLIBPATH_VAR@
+-#
+ @OS_SPECIFIC_VARS@
diff --git a/debian/patches/reproducible_builds.diff b/debian/patches/reproducible_builds.diff
new file mode 100644
index 0000000..36f71e2
--- /dev/null
+++ b/debian/patches/reproducible_builds.diff
@@ -0,0 +1,40 @@
+Description: Make builds reproducible
+ Don't use __DATE__ __TIME__. Use changelog date instead.
+ Sort exported symbols.
+Author: Jean-Michel Vourgère <nirgal@debian.org>
+Forwarded: no
+Last-Update: 2015-08-11
+
+--- a/server/buildmark.c
++++ b/server/buildmark.c
+@@ -17,11 +17,7 @@
+ #include "ap_config.h"
+ #include "httpd.h"
+
+-#if defined(__DATE__) && defined(__TIME__)
+-static const char server_built[] = __DATE__ " " __TIME__;
+-#else
+-static const char server_built[] = "unknown";
+-#endif
++static const char server_built[] = BUILD_DATETIME;
+
+ AP_DECLARE(const char *) ap_get_server_built()
+ {
+--- a/server/Makefile.in
++++ b/server/Makefile.in
+@@ -1,3 +1,4 @@
++export LC_ALL = C
+
+ CLEAN_TARGETS = gen_test_char test_char.h \
+ ApacheCoreOS2.def httpd.exp export_files \
+@@ -82,8 +83,8 @@
+ @echo "#! ." > $@
+ @echo "* This file was AUTOGENERATED at build time." >> $@
+ @echo "* Please do not edit by hand." >> $@
+- $(CPP) $(ALL_CPPFLAGS) $(ALL_INCLUDES) exports.c | grep "ap_hack_" | grep -v apr_ | sed -e 's/^.*[)]\(.*\);$$/\1/' >> $@
+- $(CPP) $(ALL_CPPFLAGS) $(ALL_INCLUDES) export_vars.h | grep -v apr_ | sed -e 's/^\#[^!]*//' | sed -e '/^$$/d' >> $@
++ $(CPP) $(ALL_CPPFLAGS) $(ALL_INCLUDES) exports.c | grep "ap_hack_" | grep -v apr_ | sed -e 's/^.*[)]\(.*\);$$/\1/' | sort >> $@
++ $(CPP) $(ALL_CPPFLAGS) $(ALL_INCLUDES) export_vars.h | grep -v apr_ | sed -e 's/^\#[^!]*//' | sed -e '/^$$/d' | sort >> $@
+
+
+ # developer stuff
diff --git a/debian/patches/series b/debian/patches/series
new file mode 100644
index 0000000..5ac5730
--- /dev/null
+++ b/debian/patches/series
@@ -0,0 +1,51 @@
+fhs_compliance.patch
+no_LD_LIBRARY_PATH.patch
+suexec-CVE-2007-1742.patch
+customize_apxs.patch
+build_suexec-custom.patch
+reproducible_builds.diff
+
+# This patch is applied manually
+#suexec-custom.patch
+spelling-errors.patch
+
+CVE-2019-0196.patch
+CVE-2019-0211.patch
+CVE-2019-0215.patch
+CVE-2019-0217.patch
+CVE-2019-0220-1.patch
+CVE-2019-0220-2.patch
+CVE-2019-0220-3.patch
+CVE-2019-0197.patch
+CVE-2019-10092.patch
+CVE-2019-10097.patch
+CVE-2019-10098.patch
+import-http2-module-from-2.4.46.patch
+CVE-2020-11984.patch
+CVE-2020-1927.patch
+CVE-2020-1934.patch
+CVE-2021-31618.patch
+CVE-2021-30641.patch
+CVE-2021-26691.patch
+CVE-2021-26690.patch
+CVE-2020-35452.patch
+CVE-2021-34798.patch
+CVE-2021-36160.patch
+CVE-2021-39275.patch
+CVE-2021-40438.patch
+CVE-2021-44224-1.patch
+CVE-2021-44224-2.patch
+CVE-2021-44790.patch
+CVE-2021-36160-2.patch
+CVE-2022-22719.patch
+CVE-2022-22720.patch
+CVE-2022-22721.patch
+CVE-2022-23943-1.patch
+CVE-2022-23943-2.patch
+CVE-2022-26377.patch
+CVE-2022-28614.patch
+CVE-2022-28615.patch
+CVE-2022-29404.patch
+CVE-2022-30522.patch
+CVE-2022-30556.patch
+CVE-2022-31813.patch
diff --git a/debian/patches/spelling-errors.patch b/debian/patches/spelling-errors.patch
new file mode 100644
index 0000000..d42ec00
--- /dev/null
+++ b/debian/patches/spelling-errors.patch
@@ -0,0 +1,196 @@
+Description: spelling errors
+Author: Xavier Guimard <yadd@debian.org>
+Forwarded: https://bz.apache.org/bugzilla/show_bug.cgi?id=62960
+Last-Update: 2018-11-28
+
+--- a/LICENSE
++++ b/LICENSE
+@@ -516,7 +516,7 @@
+ This program may be used and copied freely providing this copyright notice
+ is not removed.
+
+-This software is provided "as is" and any express or implied waranties,
++This software is provided "as is" and any express or implied warranties,
+ including but not limited to, the implied warranties of merchantability and
+ fitness for a particular purpose are disclaimed. In no event shall
+ Zeus Technology Ltd. be liable for any direct, indirect, incidental, special,
+--- a/docs/man/httxt2dbm.1
++++ b/docs/man/httxt2dbm.1
+@@ -50,7 +50,7 @@
+ Specify the DBM type to be used for the output\&. If not specified, will use the APR Default\&. Available types are: \fBGDBM\fR for GDBM files, \fBSDBM\fR for SDBM files, \fBDB\fR for berkeley DB files, \fBNDBM\fR for NDBM files, \fBdefault\fR for the default DBM type\&.
+ .TP
+ \fB-i \fISOURCE_TXT\fR\fR
+-Input file from which the dbm is to be created\&. The file should be formated with one record per line, of the form: \fBkey value\fR\&. See the documentation for RewriteMap for further details of this file's format and meaning\&.
++Input file from which the dbm is to be created\&. The file should be formatted with one record per line, of the form: \fBkey value\fR\&. See the documentation for RewriteMap for further details of this file's format and meaning\&.
+ .TP
+ \fB-o \fIOUTPUT_DBM\fR\fR
+ Name of the output dbm files\&.
+--- a/docs/manual/howto/htaccess.html.ja.utf8
++++ b/docs/manual/howto/htaccess.html.ja.utf8
+@@ -247,7 +247,7 @@
+
+ <p>As discussed in the documentation on <a href="../sections.html">Configuration Sections</a>,
+ <code>.htaccess</code> files can override the <code class="directive"><a href="../mod/core.html#directory">&lt;Directory&gt;</a></code> sections for
+- the corresponding directory, but will be overriden by other types
++ the corresponding directory, but will be overridden by other types
+ of configuration sections from the main configuration files. This
+ fact can be used to enforce certain configurations, even in the
+ presence of a liberal <code class="directive"><a href="../mod/core.html#allowoverride">AllowOverride</a></code> setting. For example, to
+@@ -414,4 +414,4 @@
+ prettyPrint();
+ }
+ //--><!]]></script>
+-</body></html>
+\ No newline at end of file
++</body></html>
+--- a/docs/manual/mod/core.html.es
++++ b/docs/manual/mod/core.html.es
+@@ -1211,7 +1211,7 @@
+ error rather than masking it. More information is available in
+ Microsoft Knowledge Base article <a href="http://support.microsoft.com/default.aspx?scid=kb;en-us;Q294807">Q294807</a>.</p>
+
+- <p>Although most error messages can be overriden, there are certain
++ <p>Although most error messages can be overridden, there are certain
+ circumstances where the internal messages are used regardless of the
+ setting of <code class="directive"><a href="#errordocument">ErrorDocument</a></code>. In
+ particular, if a malformed request is detected, normal request processing
+@@ -4524,4 +4524,4 @@
+ prettyPrint();
+ }
+ //--><!]]></script>
+-</body></html>
+\ No newline at end of file
++</body></html>
+--- a/docs/manual/programs/httxt2dbm.html.en
++++ b/docs/manual/programs/httxt2dbm.html.en
+@@ -66,7 +66,7 @@
+ </dd>
+
+ <dt><code>-i <var>SOURCE_TXT</var></code></dt>
+- <dd>Input file from which the dbm is to be created. The file should be formated
++ <dd>Input file from which the dbm is to be created. The file should be formatted
+ with one record per line, of the form: <code>key value</code>.
+ See the documentation for <code class="directive"><a href="../mod/mod_rewrite.html#rewritemap">RewriteMap</a></code> for
+ further details of this file's format and meaning.
+@@ -111,4 +111,4 @@
+ prettyPrint();
+ }
+ //--><!]]></script>
+-</body></html>
+\ No newline at end of file
++</body></html>
+--- a/modules/http/http_request.c
++++ b/modules/http/http_request.c
+@@ -376,7 +376,7 @@
+
+ /* The EOR bucket has either been handled by an output filter (eg.
+ * deleted or moved to a buffered_bb => no more in bb), or an error
+- * occured before that (eg. c->aborted => still in bb) and we ought
++ * occurred before that (eg. c->aborted => still in bb) and we ought
+ * to destroy it now. So cleanup any remaining bucket along with
+ * the orphan request (if any).
+ */
+@@ -779,7 +779,7 @@
+
+ AP_INTERNAL_REDIRECT(r->uri, new_uri);
+
+- /* ap_die was already called, if an error occured */
++ /* ap_die was already called, if an error occurred */
+ if (!new) {
+ return;
+ }
+@@ -803,7 +803,7 @@
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+- /* ap_die was already called, if an error occured */
++ /* ap_die was already called, if an error occurred */
+ if (!new) {
+ return;
+ }
+--- a/modules/http2/h2_config.c
++++ b/modules/http2/h2_config.c
+@@ -419,7 +419,7 @@
+ else if (!strcasecmp("BEFORE", sdependency)) {
+ dependency = H2_DEPENDANT_BEFORE;
+ if (sweight) {
+- return "dependecy 'Before' does not allow a weight";
++ return "dependency 'Before' does not allow a weight";
+ }
+ }
+ else if (!strcasecmp("INTERLEAVED", sdependency)) {
+--- a/modules/http2/h2_ngn_shed.c
++++ b/modules/http2/h2_ngn_shed.c
+@@ -281,7 +281,7 @@
+ if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
+ if (want_shutdown) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+- "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s",
++ "h2_ngn_shed(%ld): empty queue, shutdown engine %s",
+ shed->c->id, ngn->id);
+ ngn->shutdown = 1;
+ }
+--- a/modules/mappers/mod_imagemap.c
++++ b/modules/mappers/mod_imagemap.c
+@@ -858,7 +858,7 @@
+ /* There's not much else we can do ... we've already sent the headers
+ * to the client.
+ */
+- ap_rputs("\n\n[an internal server error occured]\n", r);
++ ap_rputs("\n\n[an internal server error occurred]\n", r);
+ menu_footer(r);
+ return OK;
+ }
+--- a/modules/md/md_acme_authz.c
++++ b/modules/md/md_acme_authz.c
+@@ -239,7 +239,7 @@
+
+ if (md_log_is_level(p, log_level)) {
+ md_log_perror(MD_LOG_MARK, log_level, rv, p, "ACME server authz: %s for %s at %s. "
+- "Exact repsonse was: %s", err? err : "", authz->domain, authz->location,
++ "Exact response was: %s", err? err : "", authz->domain, authz->location,
+ json? md_json_writep(json, p, MD_JSON_FMT_COMPACT) : "not available");
+ }
+
+--- a/modules/metadata/mod_remoteip.c
++++ b/modules/metadata/mod_remoteip.c
+@@ -393,7 +393,7 @@
+
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, new, APLOGNO(03491)
+ "RemoteIPProxyProtocol: previous setting for %s:%hu from virtual "
+- "host {%s:%hu in %s} is being overriden by virtual host "
++ "host {%s:%hu in %s} is being overridden by virtual host "
+ "{%s:%hu in %s}; new setting is '%s'",
+ buf, prev->addr->port, prev->source->server_hostname,
+ prev->source->addrs->host_port, prev->source->defn_name,
+--- a/modules/ssl/mod_ssl.c
++++ b/modules/ssl/mod_ssl.c
+@@ -94,7 +94,7 @@
+ "Enable FIPS-140 mode "
+ "(`on', `off')")
+ SSL_CMD_ALL(CipherSuite, TAKE12,
+- "Colon-delimited list of permitted SSL Ciphers, optional preceeded "
++ "Colon-delimited list of permitted SSL Ciphers, optional preceded "
+ "by protocol identifier ('XXX:...:XXX' - see manual)")
+ SSL_CMD_SRV(CertificateFile, TAKE1,
+ "SSL Server Certificate file "
+@@ -187,7 +187,7 @@
+ "('[+-][" SSL_PROTOCOLS "] ...' - see manual)")
+ SSL_CMD_PXY(ProxyCipherSuite, TAKE12,
+ "SSL Proxy: colon-delimited list of permitted SSL ciphers "
+- ", optionally preceeded by protocol specifier ('XXX:...:XXX' - see manual)")
++ ", optionally preceded by protocol specifier ('XXX:...:XXX' - see manual)")
+ SSL_CMD_PXY(ProxyVerify, TAKE1,
+ "SSL Proxy: whether to verify the remote certificate "
+ "('on' or 'off')")
+--- a/support/ab.c
++++ b/support/ab.c
+@@ -18,7 +18,7 @@
+ ** This program is based on ZeusBench V1.0 written by Adam Twiss
+ ** which is Copyright (c) 1996 by Zeus Technology Ltd. http://www.zeustech.net/
+ **
+- ** This software is provided "as is" and any express or implied waranties,
++ ** This software is provided "as is" and any express or implied warranties,
+ ** including but not limited to, the implied warranties of merchantability and
+ ** fitness for a particular purpose are disclaimed. In no event shall
+ ** Zeus Technology Ltd. be liable for any direct, indirect, incidental, special,
diff --git a/debian/patches/suexec-CVE-2007-1742.patch b/debian/patches/suexec-CVE-2007-1742.patch
new file mode 100644
index 0000000..a348cbc
--- /dev/null
+++ b/debian/patches/suexec-CVE-2007-1742.patch
@@ -0,0 +1,66 @@
+Description: Fix race condition with chdir
+ Fix /var/www* being accepted as docroot instead of /var/www/*
+ (the same for public_html* instead of public_html/* )
+Author: Stefan Fritsch <sf@debian.org>
+Last-Update: 2014-05-29
+Bug: https://issues.apache.org/bugzilla/show_bug.cgi?id=44752
+--- a/support/suexec.c
++++ b/support/suexec.c
+@@ -42,6 +42,7 @@
+ #if APR_HAVE_UNISTD_H
+ #include <unistd.h>
+ #endif
++#include <fcntl.h>
+
+ #include <stdio.h>
+ #include <stdarg.h>
+@@ -281,11 +282,12 @@
+ char *actual_gname; /* actual group name */
+ char *cmd; /* command to be executed */
+ char cwd[AP_MAXPATH]; /* current working directory */
+- char dwd[AP_MAXPATH]; /* docroot working directory */
++ char dwd[AP_MAXPATH+1]; /* docroot working directory */
+ struct passwd *pw; /* password entry holder */
+ struct group *gr; /* group entry holder */
+ struct stat dir_info; /* directory info holder */
+ struct stat prg_info; /* program info holder */
++ int cwdh; /* handle to cwd */
+
+ /*
+ * Start with a "clean" environment
+@@ -529,11 +531,16 @@
+ exit(111);
+ }
+
++ if ( (cwdh = open(".", O_RDONLY)) == -1 ) {
++ log_err("cannot open current working directory\n");
++ exit(111);
++ }
++
+ if (userdir) {
+ if (((chdir(target_homedir)) != 0) ||
+ ((chdir(AP_USERDIR_SUFFIX)) != 0) ||
+ ((getcwd(dwd, AP_MAXPATH)) == NULL) ||
+- ((chdir(cwd)) != 0)) {
++ ((fchdir(cwdh)) != 0)) {
+ log_err("cannot get docroot information (%s)\n", target_homedir);
+ exit(112);
+ }
+@@ -541,12 +548,16 @@
+ else {
+ if (((chdir(AP_DOC_ROOT)) != 0) ||
+ ((getcwd(dwd, AP_MAXPATH)) == NULL) ||
+- ((chdir(cwd)) != 0)) {
++ ((fchdir(cwdh)) != 0)) {
+ log_err("cannot get docroot information (%s)\n", AP_DOC_ROOT);
+ exit(113);
+ }
+ }
+
++ close(cwdh);
++
++ if (strlen(cwd) > strlen(dwd))
++ strncat(dwd, "/", 1);
+ if ((strncmp(cwd, dwd, strlen(dwd))) != 0) {
+ log_err("command not in docroot (%s/%s)\n", cwd, cmd);
+ exit(114);
diff --git a/debian/patches/suexec-custom.patch b/debian/patches/suexec-custom.patch
new file mode 100644
index 0000000..9c7a732
--- /dev/null
+++ b/debian/patches/suexec-custom.patch
@@ -0,0 +1,192 @@
+Description: the actual patch to make suexec-custom read a config file
+Forwarded: not-needed
+Author: Stefan Fritsch <sf@debian.org>
+Last-Update: 2018-07-17
+diff --git a/support/suexec-custom.c b/support/suexec-custom.c
+index f3811a97..30bb644b 100644
+--- a/support/suexec-custom.c
++++ b/support/suexec-custom.c
+@@ -29,6 +29,7 @@
+ *
+ *
+ */
++#define SUEXEC_CONFIG_DIR "/etc/apache2/suexec/"
+
+ #include "apr.h"
+ #include "ap_config.h"
+@@ -39,6 +40,7 @@
+ #include <sys/types.h>
+ #include <string.h>
+ #include <time.h>
++#include <ctype.h>
+ #if APR_HAVE_UNISTD_H
+ #include <unistd.h>
+ #endif
+@@ -222,6 +224,26 @@ static void log_no_err(const char *fmt,...)
+ return;
+ }
+
++static int read_line(char *buf, FILE *file) {
++ char *p;
++ p = fgets(buf, AP_MAXPATH+1, file);
++ if (!p) return 0;
++ if (*p == '\0') return 1;
++
++ p = buf;
++ while (*p)
++ p++;
++ p--;
++
++ /* remove trailing space and slash */
++ while ( isspace(*p) && p >= buf )
++ *p-- = '\0';
++ while ( *p == '/' && p >= buf )
++ *p-- = '\0';
++
++ return 1;
++}
++
+ static void clean_env(void)
+ {
+ char pathbuf[512];
+@@ -288,6 +310,11 @@ int main(int argc, char *argv[])
+ struct stat dir_info; /* directory info holder */
+ struct stat prg_info; /* program info holder */
+ int cwdh; /* handle to cwd */
++ char *suexec_docroot = NULL;
++ char *suexec_userdir_suffix = NULL;
++ char *filename = NULL;
++ FILE *configfile;
++
+
+ /*
+ * Start with a "clean" environment
+@@ -317,15 +344,10 @@ int main(int argc, char *argv[])
+ || (! strcmp(AP_HTTPD_USER, pw->pw_name)))
+ #endif /* _OSD_POSIX */
+ ) {
+-#ifdef AP_DOC_ROOT
+- fprintf(stderr, " -D AP_DOC_ROOT=\"%s\"\n", AP_DOC_ROOT);
+-#endif
++ fprintf(stderr, " -D SUEXEC_CONFIG_DIR=%s\n", SUEXEC_CONFIG_DIR);
+ #ifdef AP_GID_MIN
+ fprintf(stderr, " -D AP_GID_MIN=%d\n", AP_GID_MIN);
+ #endif
+-#ifdef AP_HTTPD_USER
+- fprintf(stderr, " -D AP_HTTPD_USER=\"%s\"\n", AP_HTTPD_USER);
+-#endif
+ #if defined(AP_LOG_SYSLOG)
+ fprintf(stderr, " -D AP_LOG_SYSLOG\n");
+ #elif defined(AP_LOG_EXEC)
+@@ -339,9 +361,6 @@ int main(int argc, char *argv[])
+ #endif
+ #ifdef AP_UID_MIN
+ fprintf(stderr, " -D AP_UID_MIN=%d\n", AP_UID_MIN);
+-#endif
+-#ifdef AP_USERDIR_SUFFIX
+- fprintf(stderr, " -D AP_USERDIR_SUFFIX=\"%s\"\n", AP_USERDIR_SUFFIX);
+ #endif
+ exit(0);
+ }
+@@ -357,23 +376,6 @@ int main(int argc, char *argv[])
+ target_gname = argv[2];
+ cmd = argv[3];
+
+- /*
+- * Check to see if the user running this program
+- * is the user allowed to do so as defined in
+- * suexec.h. If not the allowed user, error out.
+- */
+-#ifdef _OSD_POSIX
+- /* User name comparisons are case insensitive on BS2000/OSD */
+- if (strcasecmp(AP_HTTPD_USER, pw->pw_name)) {
+- log_err("user mismatch (%s instead of %s)\n", pw->pw_name, AP_HTTPD_USER);
+- exit(103);
+- }
+-#else /*_OSD_POSIX*/
+- if (strcmp(AP_HTTPD_USER, pw->pw_name)) {
+- log_err("user mismatch (%s instead of %s)\n", pw->pw_name, AP_HTTPD_USER);
+- exit(103);
+- }
+-#endif /*_OSD_POSIX*/
+
+ /*
+ * Check for a leading '/' (absolute path) in the command to be executed,
+@@ -397,6 +399,59 @@ int main(int argc, char *argv[])
+ userdir = 1;
+ }
+
++ /*
++ * Check to see if the user running this program
++ * is the user allowed to do so as defined in
++ * SUEXEC_CONFIG_DIR/username
++ * If not, error out.
++ */
++ suexec_docroot = malloc(AP_MAXPATH+1);
++ suexec_userdir_suffix = malloc(AP_MAXPATH+1);
++ if (!suexec_docroot || !suexec_userdir_suffix ||
++ asprintf(&filename, SUEXEC_CONFIG_DIR "%s", pw->pw_name) == -1) {
++ log_err("malloc failed\n");
++ exit(120);
++ }
++
++ configfile = fopen(filename, "r");
++ if (!configfile) {
++ log_err("User %s not allowed: Could not open config file %s\n", pw->pw_name, filename);
++ exit(123);
++ }
++
++ if (!read_line(suexec_docroot, configfile)) {
++ log_err("Could not read docroot from %s\n", filename);
++ exit(124);
++ }
++
++ if (!read_line(suexec_userdir_suffix, configfile)) {
++ log_err("Could not read userdir suffix from %s\n", filename);
++ exit(125);
++ }
++
++ fclose(configfile);
++
++ if (userdir) {
++ if ( !isalnum(*suexec_userdir_suffix) && suexec_userdir_suffix[0] != '.') {
++ log_err("userdir suffix disabled in %s\n", filename);
++ exit(126);
++ }
++ }
++ else {
++ if (suexec_docroot[0] != '/') {
++ log_err("docroot disabled in %s\n", filename);
++ exit(127);
++ }
++
++ if (suexec_docroot[1] == '/' ||
++ suexec_docroot[1] == '.' ||
++ suexec_docroot[1] == '\0' )
++ {
++ log_err("invalid docroot %s in %s\n", suexec_docroot, filename);
++ exit(128);
++ }
++ }
++
+ /*
+ * Error out if the target username is invalid.
+ */
+@@ -538,7 +593,7 @@ int main(int argc, char *argv[])
+
+ if (userdir) {
+ if (((chdir(target_homedir)) != 0) ||
+- ((chdir(AP_USERDIR_SUFFIX)) != 0) ||
++ ((chdir(suexec_userdir_suffix)) != 0) ||
+ ((getcwd(dwd, AP_MAXPATH)) == NULL) ||
+ ((fchdir(cwdh)) != 0)) {
+ log_err("cannot get docroot information (%s)\n", target_homedir);
+@@ -546,7 +601,7 @@ int main(int argc, char *argv[])
+ }
+ }
+ else {
+- if (((chdir(AP_DOC_ROOT)) != 0) ||
++ if (((chdir(suexec_docroot)) != 0) ||
+ ((getcwd(dwd, AP_MAXPATH)) == NULL) ||
+ ((fchdir(cwdh)) != 0)) {
+ log_err("cannot get docroot information (%s)\n", AP_DOC_ROOT);