summaryrefslogtreecommitdiffstats
path: root/debian/patches/CVE-2022-31813.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:04:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:04:07 +0000
commit1221c736f9a90756d47ea6d28320b6b83602dd2a (patch)
treeb453ba7b1393205258c9b098a773b4330984672f /debian/patches/CVE-2022-31813.patch
parentAdding upstream version 2.4.38. (diff)
downloadapache2-1221c736f9a90756d47ea6d28320b6b83602dd2a.tar.xz
apache2-1221c736f9a90756d47ea6d28320b6b83602dd2a.zip
Adding debian version 2.4.38-3+deb10u8.debian/2.4.38-3+deb10u8debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches/CVE-2022-31813.patch')
-rw-r--r--debian/patches/CVE-2022-31813.patch242
1 files changed, 242 insertions, 0 deletions
diff --git a/debian/patches/CVE-2022-31813.patch b/debian/patches/CVE-2022-31813.patch
new file mode 100644
index 0000000..d2bd341
--- /dev/null
+++ b/debian/patches/CVE-2022-31813.patch
@@ -0,0 +1,242 @@
+From 956f708b094698ac9ad570d640d4f30eb0df7305 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <icing@apache.org>
+Date: Wed, 1 Jun 2022 07:51:04 +0000
+Subject: [PATCH] Merge r1901461 from trunk via #320:
+
+ *) mod_proxy: ap_proxy_create_hdrbrgd() to clear hop-by-hop first and fixup last.
+
+
+
+git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901480 13f79535-47bb-0310-9956-ffa450edef68
+Origin: https://github.com/apache/httpd/commit/956f708b094698ac9ad570d640d4f30eb0df7305
+---
+ modules/proxy/proxy_util.c | 153 ++++++++++++++++++++++-----------------------
+ 1 file changed, 77 insertions(+), 76 deletions(-)
+
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -3396,12 +3396,14 @@
+ char **old_cl_val,
+ char **old_te_val)
+ {
++ int rc = OK;
+ conn_rec *c = r->connection;
+ int counter;
+ char *buf;
++ apr_table_t *saved_headers_in = r->headers_in;
++ const char *saved_host = apr_table_get(saved_headers_in, "Host");
+ const apr_array_header_t *headers_in_array;
+ const apr_table_entry_t *headers_in;
+- apr_table_t *saved_headers_in;
+ apr_bucket *e;
+ int do_100_continue;
+ conn_rec *origin = p_conn->connection;
+@@ -3437,6 +3439,52 @@
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
++
++ /*
++ * Make a copy on r->headers_in for the request we make to the backend,
++ * modify the copy in place according to our configuration and connection
++ * handling, use it to fill in the forwarded headers' brigade, and finally
++ * restore the saved/original ones in r->headers_in.
++ *
++ * Note: We need to take r->pool for apr_table_copy as the key / value
++ * pairs in r->headers_in have been created out of r->pool and
++ * p might be (and actually is) a longer living pool.
++ * This would trigger the bad pool ancestry abort in apr_table_copy if
++ * apr is compiled with APR_POOL_DEBUG.
++ *
++ * icing: if p indeed lives longer than r->pool, we should allocate
++ * all new header values from r->pool as well and avoid leakage.
++ */
++ r->headers_in = apr_table_copy(r->pool, saved_headers_in);
++
++ /* Return the original Transfer-Encoding and/or Content-Length values
++ * then drop the headers, they must be set by the proxy handler based
++ * on the actual body being forwarded.
++ */
++ if ((*old_te_val = (char *)apr_table_get(r->headers_in,
++ "Transfer-Encoding"))) {
++ apr_table_unset(r->headers_in, "Transfer-Encoding");
++ }
++ if ((*old_cl_val = (char *)apr_table_get(r->headers_in,
++ "Content-Length"))) {
++ apr_table_unset(r->headers_in, "Content-Length");
++ }
++
++ /* Clear out hop-by-hop request headers not to forward */
++ if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
++ rc = HTTP_BAD_REQUEST;
++ goto cleanup;
++ }
++
++ /* RFC2616 13.5.1 says we should strip these */
++ apr_table_unset(r->headers_in, "Keep-Alive");
++ apr_table_unset(r->headers_in, "Upgrade");
++ apr_table_unset(r->headers_in, "Trailer");
++ apr_table_unset(r->headers_in, "TE");
++
++ /* We used to send `Host: ` always first, so let's keep it that
++ * way. No telling which legacy backend is relying no this.
++ */
+ if (dconf->preserve_host == 0) {
+ if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
+ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
+@@ -3458,7 +3506,7 @@
+ /* don't want to use r->hostname, as the incoming header might have a
+ * port attached
+ */
+- const char* hostname = apr_table_get(r->headers_in,"Host");
++ const char* hostname = saved_host;
+ if (!hostname) {
+ hostname = r->server->server_hostname;
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
+@@ -3472,21 +3520,7 @@
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+-
+- /*
+- * Save the original headers in here and restore them when leaving, since
+- * we will apply proxy purpose only modifications (eg. clearing hop-by-hop
+- * headers, add Via or X-Forwarded-* or Expect...), whereas the originals
+- * will be needed later to prepare the correct response and logging.
+- *
+- * Note: We need to take r->pool for apr_table_copy as the key / value
+- * pairs in r->headers_in have been created out of r->pool and
+- * p might be (and actually is) a longer living pool.
+- * This would trigger the bad pool ancestry abort in apr_table_copy if
+- * apr is compiled with APR_POOL_DEBUG.
+- */
+- saved_headers_in = r->headers_in;
+- r->headers_in = apr_table_copy(r->pool, saved_headers_in);
++ apr_table_unset(r->headers_in, "Host");
+
+ /* handle Via */
+ if (conf->viaopt == via_block) {
+@@ -3561,8 +3595,6 @@
+ */
+ if (dconf->add_forwarded_headers) {
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+- const char *buf;
+-
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+@@ -3572,8 +3604,9 @@
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+- if ((buf = apr_table_get(r->headers_in, "Host"))) {
+- apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
++ if (saved_host) {
++ apr_table_mergen(r->headers_in, "X-Forwarded-Host",
++ saved_host);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+@@ -3585,67 +3618,37 @@
+ }
+ }
+
+- proxy_run_fixups(r);
+- if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
+- return HTTP_BAD_REQUEST;
++ /* Do we want to strip Proxy-Authorization ?
++ * If we haven't used it, then NO
++ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
++ * So let's make it configurable by env.
++ */
++ if (r->user != NULL /* we've authenticated */
++ && !apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
++ apr_table_unset(r->headers_in, "Proxy-Authorization");
+ }
+
++ /* for sub-requests, ignore freshness/expiry headers */
++ if (r->main) {
++ apr_table_unset(r->headers_in, "If-Match");
++ apr_table_unset(r->headers_in, "If-Modified-Since");
++ apr_table_unset(r->headers_in, "If-Range");
++ apr_table_unset(r->headers_in, "If-Unmodified-Since");
++ apr_table_unset(r->headers_in, "If-None-Match");
++ }
++
++ /* run hook to fixup the request we are about to send */
++ proxy_run_fixups(r);
++
+ /* send request headers */
+ headers_in_array = apr_table_elts(r->headers_in);
+ headers_in = (const apr_table_entry_t *) headers_in_array->elts;
+ for (counter = 0; counter < headers_in_array->nelts; counter++) {
+ if (headers_in[counter].key == NULL
+- || headers_in[counter].val == NULL
+-
+- /* Already sent */
+- || !strcasecmp(headers_in[counter].key, "Host")
+-
+- /* Clear out hop-by-hop request headers not to send
+- * RFC2616 13.5.1 says we should strip these headers
+- */
+- || !strcasecmp(headers_in[counter].key, "Keep-Alive")
+- || !strcasecmp(headers_in[counter].key, "TE")
+- || !strcasecmp(headers_in[counter].key, "Trailer")
+- || !strcasecmp(headers_in[counter].key, "Upgrade")
+-
+- ) {
+- continue;
+- }
+- /* Do we want to strip Proxy-Authorization ?
+- * If we haven't used it, then NO
+- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+- * So let's make it configurable by env.
+- */
+- if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) {
+- if (r->user != NULL) { /* we've authenticated */
+- if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
+- continue;
+- }
+- }
+- }
+-
+- /* Skip Transfer-Encoding and Content-Length for now.
+- */
+- if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
+- *old_te_val = headers_in[counter].val;
+- continue;
+- }
+- if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
+- *old_cl_val = headers_in[counter].val;
++ || headers_in[counter].val == NULL) {
+ continue;
+ }
+
+- /* for sub-requests, ignore freshness/expiry headers */
+- if (r->main) {
+- if ( !strcasecmp(headers_in[counter].key, "If-Match")
+- || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
+- || !strcasecmp(headers_in[counter].key, "If-Range")
+- || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
+- || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
+- continue;
+- }
+- }
+-
+ buf = apr_pstrcat(p, headers_in[counter].key, ": ",
+ headers_in[counter].val, CRLF,
+ NULL);
+@@ -3654,11 +3657,9 @@
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
+
+- /* Restore the original headers in (see comment above),
+- * we won't modify them anymore.
+- */
++cleanup:
+ r->headers_in = saved_headers_in;
+- return OK;
++ return rc;
+ }
+
+ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,