summaryrefslogtreecommitdiffstats
path: root/modules/http
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--modules/http/.indent.pro54
-rw-r--r--modules/http/Makefile.in3
-rw-r--r--modules/http/byterange_filter.c610
-rw-r--r--modules/http/chunk_filter.c197
-rw-r--r--modules/http/config.m420
-rw-r--r--modules/http/http_core.c326
-rw-r--r--modules/http/http_etag.c413
-rw-r--r--modules/http/http_filters.c1941
-rw-r--r--modules/http/http_protocol.c1671
-rw-r--r--modules/http/http_request.c861
-rw-r--r--modules/http/mod_mime.c1037
-rw-r--r--modules/http/mod_mime.dep55
-rw-r--r--modules/http/mod_mime.dsp111
-rw-r--r--modules/http/mod_mime.exp1
-rw-r--r--modules/http/mod_mime.mak353
-rw-r--r--modules/http2/Makefile.in20
-rw-r--r--modules/http2/NWGNUmakefile246
-rw-r--r--modules/http2/NWGNUmod_http2395
-rw-r--r--modules/http2/NWGNUproxyht2288
-rw-r--r--modules/http2/README.h270
-rw-r--r--modules/http2/config2.m4238
-rw-r--r--modules/http2/h2.h192
-rw-r--r--modules/http2/h2_bucket_beam.c825
-rw-r--r--modules/http2/h2_bucket_beam.h248
-rw-r--r--modules/http2/h2_bucket_eos.c112
-rw-r--r--modules/http2/h2_bucket_eos.h32
-rw-r--r--modules/http2/h2_c1.c323
-rw-r--r--modules/http2/h2_c1.h83
-rw-r--r--modules/http2/h2_c1_io.c545
-rw-r--r--modules/http2/h2_c1_io.h100
-rw-r--r--modules/http2/h2_c2.c864
-rw-r--r--modules/http2/h2_c2.h57
-rw-r--r--modules/http2/h2_c2_filter.c1034
-rw-r--r--modules/http2/h2_c2_filter.h68
-rw-r--r--modules/http2/h2_config.c943
-rw-r--r--modules/http2/h2_config.h98
-rw-r--r--modules/http2/h2_conn_ctx.c123
-rw-r--r--modules/http2/h2_conn_ctx.h98
-rw-r--r--modules/http2/h2_headers.c207
-rw-r--r--modules/http2/h2_headers.h107
-rw-r--r--modules/http2/h2_mplx.c1191
-rw-r--r--modules/http2/h2_mplx.h218
-rw-r--r--modules/http2/h2_private.h28
-rw-r--r--modules/http2/h2_protocol.c485
-rw-r--r--modules/http2/h2_protocol.h56
-rw-r--r--modules/http2/h2_proxy_session.c1719
-rw-r--r--modules/http2/h2_proxy_session.h133
-rw-r--r--modules/http2/h2_proxy_util.c1355
-rw-r--r--modules/http2/h2_proxy_util.h257
-rw-r--r--modules/http2/h2_push.c876
-rw-r--r--modules/http2/h2_push.h158
-rw-r--r--modules/http2/h2_request.c519
-rw-r--r--modules/http2/h2_request.h59
-rw-r--r--modules/http2/h2_session.c1991
-rw-r--r--modules/http2/h2_session.h205
-rw-r--r--modules/http2/h2_stream.c1712
-rw-r--r--modules/http2/h2_stream.h326
-rw-r--r--modules/http2/h2_switch.c232
-rw-r--r--modules/http2/h2_switch.h30
-rw-r--r--modules/http2/h2_util.c1929
-rw-r--r--modules/http2/h2_util.h519
-rw-r--r--modules/http2/h2_version.h41
-rw-r--r--modules/http2/h2_workers.c626
-rw-r--r--modules/http2/h2_workers.h129
-rw-r--r--modules/http2/mod_http2.c349
-rw-r--r--modules/http2/mod_http2.dep1431
-rw-r--r--modules/http2/mod_http2.dsp187
-rw-r--r--modules/http2/mod_http2.h79
-rw-r--r--modules/http2/mod_http2.mak533
-rw-r--r--modules/http2/mod_proxy_http2.c470
-rw-r--r--modules/http2/mod_proxy_http2.dep208
-rw-r--r--modules/http2/mod_proxy_http2.dsp119
-rw-r--r--modules/http2/mod_proxy_http2.h21
-rw-r--r--modules/http2/mod_proxy_http2.mak427
74 files changed, 33587 insertions, 0 deletions
diff --git a/modules/http/.indent.pro b/modules/http/.indent.pro
new file mode 100644
index 0000000..a9fbe9f
--- /dev/null
+++ b/modules/http/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/modules/http/Makefile.in b/modules/http/Makefile.in
new file mode 100644
index 0000000..167b343
--- /dev/null
+++ b/modules/http/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/modules/http/byterange_filter.c b/modules/http/byterange_filter.c
new file mode 100644
index 0000000..5ebe853
--- /dev/null
+++ b/modules/http/byterange_filter.c
@@ -0,0 +1,610 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * byterange_filter.c --- HTTP byterange filter and friends.
+ */
+
+#include "apr.h"
+
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifndef AP_DEFAULT_MAX_RANGES
+#define AP_DEFAULT_MAX_RANGES 200
+#endif
+#ifndef AP_DEFAULT_MAX_OVERLAPS
+#define AP_DEFAULT_MAX_OVERLAPS 20
+#endif
+#ifndef AP_DEFAULT_MAX_REVERSALS
+#define AP_DEFAULT_MAX_REVERSALS 20
+#endif
+
+#define MAX_PREALLOC_RANGES 100
+
+APLOG_USE_MODULE(http);
+
+typedef struct indexes_t {
+ apr_off_t start;
+ apr_off_t end;
+} indexes_t;
+
+/*
+ * Returns: number of ranges (merged) or -1 for no-good
+ */
+static int ap_set_byterange(request_rec *r, apr_off_t clength,
+ apr_array_header_t **indexes,
+ int *overlaps, int *reversals)
+{
+ const char *range;
+ const char *ct;
+ char *cur;
+ apr_array_header_t *merged;
+ int num_ranges = 0, unsatisfiable = 0;
+ apr_off_t ostart = 0, oend = 0, sum_lengths = 0;
+ int in_merge = 0;
+ indexes_t *idx;
+ int ranges = 1;
+ int i;
+ const char *it;
+
+ *overlaps = 0;
+ *reversals = 0;
+
+ if (r->assbackwards) {
+ return 0;
+ }
+
+ /*
+ * Check for Range request-header (HTTP/1.1) or Request-Range for
+ * backwards-compatibility with second-draft Luotonen/Franks
+ * byte-ranges (e.g. Netscape Navigator 2-3).
+ *
+ * We support this form, with Request-Range, and (farther down) we
+ * send multipart/x-byteranges instead of multipart/byteranges for
+ * Request-Range based requests to work around a bug in Netscape
+ * Navigator 2-3 and MSIE 3.
+ */
+
+ if (!(range = apr_table_get(r->headers_in, "Range"))) {
+ range = apr_table_get(r->headers_in, "Request-Range");
+ }
+
+ if (!range || strncasecmp(range, "bytes=", 6) || r->status != HTTP_OK) {
+ return 0;
+ }
+
+ /* is content already a single range? */
+ if (apr_table_get(r->headers_out, "Content-Range")) {
+ return 0;
+ }
+
+ /* is content already a multiple range? */
+ if ((ct = apr_table_get(r->headers_out, "Content-Type"))
+ && (!strncasecmp(ct, "multipart/byteranges", 20)
+ || !strncasecmp(ct, "multipart/x-byteranges", 22))) {
+ return 0;
+ }
+
+ /*
+ * Check the If-Range header for Etag or Date.
+ */
+ if (AP_CONDITION_NOMATCH == ap_condition_if_range(r, r->headers_out)) {
+ return 0;
+ }
+
+ range += 6;
+ it = range;
+ while (*it) {
+ if (*it++ == ',') {
+ ranges++;
+ }
+ }
+ it = range;
+ if (ranges > MAX_PREALLOC_RANGES) {
+ ranges = MAX_PREALLOC_RANGES;
+ }
+ *indexes = apr_array_make(r->pool, ranges, sizeof(indexes_t));
+ while ((cur = ap_getword(r->pool, &range, ','))) {
+ char *dash;
+ apr_off_t number, start, end;
+
+ if (!*cur)
+ break;
+
+ /*
+ * Per RFC 2616 14.35.1: If there is at least one syntactically invalid
+ * byte-range-spec, we must ignore the whole header.
+ */
+
+ if (!(dash = strchr(cur, '-'))) {
+ return 0;
+ }
+
+ if (dash == cur) {
+ /* In the form "-5" */
+ if (!ap_parse_strict_length(&number, dash+1)) {
+ return 0;
+ }
+ if (number < 1) {
+ return 0;
+ }
+ start = clength - number;
+ end = clength - 1;
+ }
+ else {
+ *dash++ = '\0';
+ if (!ap_parse_strict_length(&number, cur)) {
+ return 0;
+ }
+ start = number;
+ if (*dash) {
+ if (!ap_parse_strict_length(&number, dash)) {
+ return 0;
+ }
+ end = number;
+ if (start > end) {
+ return 0;
+ }
+ }
+ else { /* "5-" */
+ end = clength - 1;
+ /*
+ * special case: 0-
+ * ignore all other ranges provided
+ * return as a single range: 0-
+ */
+ if (start == 0) {
+ num_ranges = 0;
+ sum_lengths = 0;
+ in_merge = 1;
+ oend = end;
+ ostart = start;
+ apr_array_clear(*indexes);
+ break;
+ }
+ }
+ }
+
+ if (start < 0) {
+ start = 0;
+ }
+ if (start >= clength) {
+ unsatisfiable = 1;
+ continue;
+ }
+ if (end >= clength) {
+ end = clength - 1;
+ }
+
+ if (!in_merge) {
+ /* new set */
+ ostart = start;
+ oend = end;
+ in_merge = 1;
+ continue;
+ }
+ in_merge = 0;
+
+ if (start >= ostart && end <= oend) {
+ in_merge = 1;
+ }
+
+ if (start < ostart && end >= ostart-1) {
+ ostart = start;
+ ++*reversals;
+ in_merge = 1;
+ }
+ if (end >= oend && start <= oend+1 ) {
+ oend = end;
+ in_merge = 1;
+ }
+
+ if (in_merge) {
+ ++*overlaps;
+ continue;
+ } else {
+ idx = (indexes_t *)apr_array_push(*indexes);
+ idx->start = ostart;
+ idx->end = oend;
+ sum_lengths += oend - ostart + 1;
+ /* new set again */
+ in_merge = 1;
+ ostart = start;
+ oend = end;
+ num_ranges++;
+ }
+ }
+
+ if (in_merge) {
+ idx = (indexes_t *)apr_array_push(*indexes);
+ idx->start = ostart;
+ idx->end = oend;
+ sum_lengths += oend - ostart + 1;
+ num_ranges++;
+ }
+ else if (num_ranges == 0 && unsatisfiable) {
+ /* If all ranges are unsatisfiable, we should return 416 */
+ return -1;
+ }
+ if (sum_lengths > clength) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Sum of ranges larger than file, ignoring.");
+ return 0;
+ }
+
+ /*
+ * create the merged table now, now that we know we need it
+ */
+ merged = apr_array_make(r->pool, num_ranges, sizeof(char *));
+ idx = (indexes_t *)(*indexes)->elts;
+ for (i = 0; i < (*indexes)->nelts; i++, idx++) {
+ char **new = (char **)apr_array_push(merged);
+ *new = apr_psprintf(r->pool, "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT,
+ idx->start, idx->end);
+ }
+
+ r->status = HTTP_PARTIAL_CONTENT;
+ r->range = apr_array_pstrcat(r->pool, merged, ',');
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01583)
+ "Range: %s | %s (%d : %d : %"APR_OFF_T_FMT")",
+ it, r->range, *overlaps, *reversals, clength);
+
+ return num_ranges;
+}
+
+/*
+ * Here we try to be compatible with clients that want multipart/x-byteranges
+ * instead of multipart/byteranges (also see above), as per HTTP/1.1. We
+ * look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
+ * that the browser supports an older protocol. We also check User-Agent
+ * for Microsoft Internet Explorer 3, which needs this as well.
+ */
+static int use_range_x(request_rec *r)
+{
+ const char *ua;
+ return (apr_table_get(r->headers_in, "Request-Range")
+ || ((ua = apr_table_get(r->headers_in, "User-Agent"))
+ && ap_strstr_c(ua, "MSIE 3")));
+}
+
+#define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
+
+static apr_status_t copy_brigade_range(apr_bucket_brigade *bb,
+ apr_bucket_brigade *bbout,
+ apr_off_t start,
+ apr_off_t end)
+{
+ apr_bucket *first = NULL, *last = NULL, *out_first = NULL, *e;
+ apr_uint64_t pos = 0, off_first = 0, off_last = 0;
+ apr_status_t rv;
+ apr_uint64_t start64, end64;
+ apr_off_t pofft = 0;
+
+ /*
+ * Once we know that start and end are >= 0 convert everything to apr_uint64_t.
+ * See the comments in apr_brigade_partition why.
+ * In short apr_off_t (for values >= 0)and apr_size_t fit into apr_uint64_t.
+ */
+ start64 = (apr_uint64_t)start;
+ end64 = (apr_uint64_t)end;
+
+ if (start < 0 || end < 0 || start64 > end64)
+ return APR_EINVAL;
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ apr_uint64_t elen64;
+ /* we know that no bucket has undefined length (-1) */
+ AP_DEBUG_ASSERT(e->length != (apr_size_t)(-1));
+ elen64 = (apr_uint64_t)e->length;
+ if (!first && (elen64 + pos > start64)) {
+ first = e;
+ off_first = pos;
+ }
+ if (elen64 + pos > end64) {
+ last = e;
+ off_last = pos;
+ break;
+ }
+ pos += elen64;
+ }
+ if (!first || !last)
+ return APR_EINVAL;
+
+ e = first;
+ while (1)
+ {
+ apr_bucket *copy;
+ AP_DEBUG_ASSERT(e != APR_BRIGADE_SENTINEL(bb));
+ rv = apr_bucket_copy(e, &copy);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+
+ APR_BRIGADE_INSERT_TAIL(bbout, copy);
+ if (e == first) {
+ if (off_first != start64) {
+ rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first));
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+ out_first = APR_BUCKET_NEXT(copy);
+ apr_bucket_delete(copy);
+ }
+ else {
+ out_first = copy;
+ }
+ }
+ if (e == last) {
+ if (e == first) {
+ off_last += start64 - off_first;
+ copy = out_first;
+ }
+ if (end64 - off_last != (apr_uint64_t)e->length) {
+ rv = apr_bucket_split(copy, (apr_size_t)(end64 + 1 - off_last));
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+ copy = APR_BUCKET_NEXT(copy);
+ if (copy != APR_BRIGADE_SENTINEL(bbout)) {
+ apr_bucket_delete(copy);
+ }
+ }
+ break;
+ }
+ e = APR_BUCKET_NEXT(e);
+ }
+
+ AP_DEBUG_ASSERT(APR_SUCCESS == apr_brigade_length(bbout, 1, &pofft));
+ pos = (apr_uint64_t)pofft;
+ AP_DEBUG_ASSERT(pos == end64 - start64 + 1);
+ return APR_SUCCESS;
+}
+
+static apr_status_t send_416(ap_filter_t *f, apr_bucket_brigade *tmpbb)
+{
+ apr_bucket *e;
+ conn_rec *c = f->r->connection;
+ ap_remove_output_filter(f);
+ f->r->status = HTTP_OK;
+ e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL,
+ f->r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(tmpbb, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(tmpbb, e);
+ return ap_pass_brigade(f->next, tmpbb);
+}
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket *e;
+ apr_bucket_brigade *bsend;
+ apr_bucket_brigade *tmpbb;
+ apr_off_t range_start;
+ apr_off_t range_end;
+ apr_off_t clength = 0;
+ apr_status_t rv;
+ int found = 0;
+ int num_ranges;
+ char *bound_head = NULL;
+ apr_array_header_t *indexes;
+ indexes_t *idx;
+ int i;
+ int original_status;
+ int max_ranges, max_overlaps, max_reversals;
+ int overlaps = 0, reversals = 0;
+ core_dir_config *core_conf = ap_get_core_module_config(r->per_dir_config);
+
+ max_ranges = ( (core_conf->max_ranges >= 0 || core_conf->max_ranges == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_ranges
+ : AP_DEFAULT_MAX_RANGES );
+ max_overlaps = ( (core_conf->max_overlaps >= 0 || core_conf->max_overlaps == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_overlaps
+ : AP_DEFAULT_MAX_OVERLAPS );
+ max_reversals = ( (core_conf->max_reversals >= 0 || core_conf->max_reversals == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_reversals
+ : AP_DEFAULT_MAX_REVERSALS );
+ /*
+ * Iterate through the brigade until reaching EOS or a bucket with
+ * unknown length.
+ */
+ for (e = APR_BRIGADE_FIRST(bb);
+ (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)
+ && e->length != (apr_size_t)-1);
+ e = APR_BUCKET_NEXT(e)) {
+ clength += e->length;
+ }
+
+ /*
+ * Don't attempt to do byte range work if this brigade doesn't
+ * contain an EOS, or if any of the buckets has an unknown length;
+ * this avoids the cases where it is expensive to perform
+ * byteranging (i.e. may require arbitrary amounts of memory).
+ */
+ if (!APR_BUCKET_IS_EOS(e) || clength <= 0) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ original_status = r->status;
+ num_ranges = ap_set_byterange(r, clength, &indexes, &overlaps, &reversals);
+
+ /* No Ranges or we hit a limit? We have nothing to do, get out of the way. */
+ if (num_ranges == 0 ||
+ (max_ranges >= 0 && num_ranges > max_ranges) ||
+ (max_overlaps >= 0 && overlaps > max_overlaps) ||
+ (max_reversals >= 0 && reversals > max_reversals)) {
+ r->status = original_status;
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* this brigade holds what we will be sending */
+ bsend = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ if (num_ranges < 0)
+ return send_416(f, bsend);
+
+ if (num_ranges > 1) {
+ /* Is ap_make_content_type required here? */
+ const char *orig_ct = ap_make_content_type(r, r->content_type);
+
+ ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
+ use_range_x(r) ? "/x-" : "/",
+ "byteranges; boundary=",
+ ap_multipart_boundary, NULL));
+
+ if (orig_ct) {
+ bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ap_multipart_boundary,
+ CRLF "Content-type: ",
+ orig_ct,
+ CRLF "Content-range: bytes ",
+ NULL);
+ }
+ else {
+ /* if we have no type for the content, do our best */
+ bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ap_multipart_boundary,
+ CRLF "Content-range: bytes ",
+ NULL);
+ }
+ ap_xlate_proto_to_ascii(bound_head, strlen(bound_head));
+ }
+
+ tmpbb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ idx = (indexes_t *)indexes->elts;
+ for (i = 0; i < indexes->nelts; i++, idx++) {
+ range_start = idx->start;
+ range_end = idx->end;
+
+ rv = copy_brigade_range(bb, tmpbb, range_start, range_end);
+ if (rv != APR_SUCCESS ) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01584)
+ "copy_brigade_range() failed [%" APR_OFF_T_FMT
+ "-%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]",
+ range_start, range_end, clength);
+ continue;
+ }
+ found = 1;
+
+ /*
+ * For single range requests, we must produce Content-Range header.
+ * Otherwise, we need to produce the multipart boundaries.
+ */
+ if (num_ranges == 1) {
+ apr_table_setn(r->headers_out, "Content-Range",
+ apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
+ range_start, range_end, clength));
+ }
+ else {
+ char *ts;
+
+ e = apr_bucket_pool_create(bound_head, strlen(bound_head),
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
+ range_start, range_end, clength);
+ ap_xlate_proto_to_ascii(ts, strlen(ts));
+ e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ APR_BRIGADE_CONCAT(bsend, tmpbb);
+ if (i && !(i & 0x1F)) {
+ /*
+ * Every now and then, pass what we have down the filter chain.
+ * In this case, the content-length filter cannot calculate and
+ * set the content length and we must remove any Content-Length
+ * header already present.
+ */
+ apr_table_unset(r->headers_out, "Content-Length");
+ if ((rv = ap_pass_brigade(f->next, bsend)) != APR_SUCCESS)
+ return rv;
+ apr_brigade_cleanup(bsend);
+ }
+ }
+
+ if (found == 0) {
+ /* bsend is assumed to be empty if we get here. */
+ return send_416(f, bsend);
+ }
+
+ if (num_ranges > 1) {
+ char *end;
+
+ /* add the final boundary */
+ end = apr_pstrcat(r->pool, CRLF "--", ap_multipart_boundary, "--" CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(end, strlen(end));
+ e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ /* we're done with the original content - all of our data is in bsend. */
+ apr_brigade_cleanup(bb);
+ apr_brigade_destroy(tmpbb);
+
+ /* send our multipart output */
+ return ap_pass_brigade(f->next, bsend);
+}
diff --git a/modules/http/chunk_filter.c b/modules/http/chunk_filter.c
new file mode 100644
index 0000000..cb1501a
--- /dev/null
+++ b/modules/http/chunk_filter.c
@@ -0,0 +1,197 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * chunk_filter.c --- HTTP/1.1 chunked transfer encoding filter.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/*
+ * A pointer to this is used to memorize in the filter context that a bad
+ * gateway error bucket had been seen. It is used as an invented unique pointer.
+ */
+static char bad_gateway_seen;
+
+apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+#define ASCII_CRLF "\015\012"
+#define ASCII_ZERO "\060"
+ conn_rec *c = f->r->connection;
+ apr_bucket_brigade *more, *tmp;
+ apr_bucket *e;
+ apr_status_t rv;
+
+ for (more = tmp = NULL; b; b = more, more = NULL) {
+ apr_off_t bytes = 0;
+ apr_bucket *eos = NULL;
+ apr_bucket *flush = NULL;
+ /* XXX: chunk_hdr must remain at this scope since it is used in a
+ * transient bucket.
+ */
+ char chunk_hdr[20]; /* enough space for the snprintf below */
+
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ /* there shouldn't be anything after the eos */
+ ap_remove_output_filter(f);
+ eos = e;
+ break;
+ }
+ if (AP_BUCKET_IS_ERROR(e)
+ && (((ap_bucket_error *)(e->data))->status
+ == HTTP_BAD_GATEWAY)) {
+ /*
+ * We had a broken backend. Memorize this in the filter
+ * context.
+ */
+ f->ctx = &bad_gateway_seen;
+ continue;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ flush = e;
+ if (e != APR_BRIGADE_LAST(b)) {
+ more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+ }
+ break;
+ }
+ else if (e->length == (apr_size_t)-1) {
+ /* unknown amount of data (e.g. a pipe) */
+ const char *data;
+ apr_size_t len;
+
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len > 0) {
+ /*
+ * There may be a new next bucket representing the
+ * rest of the data stream on which a read() may
+ * block so we pass down what we have so far.
+ */
+ bytes += len;
+ more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+ break;
+ }
+ else {
+ /* If there was nothing in this bucket then we can
+ * safely move on to the next one without pausing
+ * to pass down what we have counted up so far.
+ */
+ continue;
+ }
+ }
+ else {
+ bytes += e->length;
+ }
+ }
+
+ /*
+ * XXX: if there aren't very many bytes at this point it may
+ * be a good idea to set them aside and return for more,
+ * unless we haven't finished counting this brigade yet.
+ */
+ /* if there are content bytes, then wrap them in a chunk */
+ if (bytes > 0) {
+ apr_size_t hdr_len;
+ /*
+ * Insert the chunk header, specifying the number of bytes in
+ * the chunk.
+ */
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes);
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+
+ /*
+ * Insert the end-of-chunk CRLF before an EOS or
+ * FLUSH bucket, or appended to the brigade
+ */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
+ if (eos != NULL) {
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+ else if (flush != NULL) {
+ APR_BUCKET_INSERT_BEFORE(flush, e);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+ }
+
+ /* RFC 2616, Section 3.6.1
+ *
+ * If there is an EOS bucket, then prefix it with:
+ * 1) the last-chunk marker ("0" CRLF)
+ * 2) the trailer
+ * 3) the end-of-chunked body CRLF
+ *
+ * We only do this if we have not seen an error bucket with
+ * status HTTP_BAD_GATEWAY. We have memorized an
+ * error bucket that we had seen in the filter context.
+ * The error bucket with status HTTP_BAD_GATEWAY indicates that the
+ * connection to the backend (mod_proxy) broke in the middle of the
+ * response. In order to signal the client that something went wrong
+ * we do not create the last-chunk marker and set c->keepalive to
+ * AP_CONN_CLOSE in the core output filter.
+ *
+ * XXX: it would be nice to combine this with the end-of-chunk
+ * marker above, but this is a bit more straight-forward for
+ * now.
+ */
+ if (eos && !f->ctx) {
+ /* XXX: (2) trailers ... does not yet exist */
+ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
+ /* <trailers> */
+ ASCII_CRLF, 5, c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+
+ /* pass the brigade to the next filter. */
+ rv = ap_pass_brigade(f->next, b);
+ apr_brigade_cleanup(b);
+ if (rv != APR_SUCCESS || eos != NULL) {
+ return rv;
+ }
+ tmp = b;
+ }
+ return APR_SUCCESS;
+}
diff --git a/modules/http/config.m4 b/modules/http/config.m4
new file mode 100644
index 0000000..6496007
--- /dev/null
+++ b/modules/http/config.m4
@@ -0,0 +1,20 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(http)
+
+http_objects="http_core.lo http_protocol.lo http_request.lo http_filters.lo chunk_filter.lo byterange_filter.lo http_etag.lo"
+
+dnl mod_http should only be built as a static module for now.
+dnl this will hopefully be "fixed" at some point in the future by
+dnl refactoring mod_http and moving some things to the core and
+dnl vice versa so that the core does not depend upon mod_http.
+if test "$enable_http" = "yes"; then
+ enable_http="static"
+elif test "$enable_http" = "shared"; then
+ AC_MSG_ERROR([mod_http can not be built as a shared DSO])
+fi
+
+APACHE_MODULE(http,[HTTP protocol handling. The http module is a basic one that enables the server to function as an HTTP server. It is only useful to disable it if you want to use another protocol module instead. Don't disable this module unless you are really sure what you are doing. Note: This module will always be linked statically.], $http_objects, , static)
+APACHE_MODULE(mime, mapping of file-extension to MIME. Disabling this module is normally not recommended., , , yes)
+
+APACHE_MODPATH_FINISH
diff --git a/modules/http/http_core.c b/modules/http/http_core.c
new file mode 100644
index 0000000..c6cb473
--- /dev/null
+++ b/modules/http/http_core.c
@@ -0,0 +1,326 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/* Handles for core filters */
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_outerror_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
+
+AP_DECLARE_DATA const char *ap_multipart_boundary;
+
+/* If we are using an MPM That Supports Async Connections,
+ * use a different processing function
+ */
+static int async_mpm = 0;
+
+static const char *set_keep_alive_timeout(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ apr_interval_time_t timeout;
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ /* Stolen from mod_proxy.c */
+ if (ap_timeout_parameter_parse(arg, &timeout, "s") != APR_SUCCESS)
+ return "KeepAliveTimeout has wrong format";
+ cmd->server->keep_alive_timeout = timeout;
+
+ /* We don't want to take into account whether or not KeepAliveTimeout is
+ * set for the main server, because if no http_module directive is used
+ * for a vhost, it will inherit the http_srv_cfg from the main server.
+ * However keep_alive_timeout_set helps determine whether the vhost should
+ * use its own configured timeout or the one from the vhost declared first
+ * on the same IP:port (ie. c->base_server, and the legacy behaviour).
+ */
+ if (cmd->server->is_virtual) {
+ cmd->server->keep_alive_timeout_set = 1;
+ }
+ return NULL;
+}
+
+static const char *set_keep_alive(cmd_parms *cmd, void *dummy,
+ int arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive = arg;
+ return NULL;
+}
+
+static const char *set_keep_alive_max(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive_max = atoi(arg);
+ return NULL;
+}
+
+static const command_rec http_cmds[] = {
+ AP_INIT_TAKE1("KeepAliveTimeout", set_keep_alive_timeout, NULL, RSRC_CONF,
+ "Keep-Alive timeout duration (sec)"),
+ AP_INIT_TAKE1("MaxKeepAliveRequests", set_keep_alive_max, NULL, RSRC_CONF,
+ "Maximum number of Keep-Alive requests per connection, "
+ "or 0 for infinite"),
+ AP_INIT_FLAG("KeepAlive", set_keep_alive, NULL, RSRC_CONF,
+ "Whether persistent connections should be On or Off"),
+ { NULL }
+};
+
+static const char *http_scheme(const request_rec *r)
+{
+ /*
+ * The http module shouldn't return anything other than
+ * "http" (the default) or "https".
+ */
+ if (r->server->server_scheme &&
+ (strcmp(r->server->server_scheme, "https") == 0))
+ return "https";
+
+ return "http";
+}
+
+static apr_port_t http_port(const request_rec *r)
+{
+ if (r->server->server_scheme &&
+ (strcmp(r->server->server_scheme, "https") == 0))
+ return DEFAULT_HTTPS_PORT;
+
+ return DEFAULT_HTTP_PORT;
+}
+
+static int ap_process_http_async_connection(conn_rec *c)
+{
+ request_rec *r = NULL;
+ conn_state_t *cs = c->cs;
+
+ AP_DEBUG_ASSERT(cs != NULL);
+ AP_DEBUG_ASSERT(cs->state == CONN_STATE_READ_REQUEST_LINE);
+
+ if (cs->state == CONN_STATE_READ_REQUEST_LINE) {
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ if (ap_extended_status) {
+ ap_set_conn_count(c->sbh, r, c->keepalives);
+ }
+ if ((r = ap_read_request(c))) {
+ if (r->status == HTTP_OK) {
+ cs->state = CONN_STATE_HANDLER;
+ if (ap_extended_status) {
+ ap_set_conn_count(c->sbh, r, c->keepalives + 1);
+ }
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ ap_process_async_request(r);
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ r = NULL;
+ }
+
+ if (cs->state != CONN_STATE_WRITE_COMPLETION &&
+ cs->state != CONN_STATE_SUSPENDED) {
+ /* Something went wrong; close the connection */
+ cs->state = CONN_STATE_LINGER;
+ }
+ }
+ else { /* ap_read_request failed - client may have closed */
+ cs->state = CONN_STATE_LINGER;
+ }
+ }
+
+ return OK;
+}
+
+static int ap_process_http_sync_connection(conn_rec *c)
+{
+ request_rec *r;
+ conn_state_t *cs = c->cs;
+ apr_socket_t *csd = NULL;
+ int mpm_state = 0;
+
+ /*
+ * Read and process each request found on our connection
+ * until no requests are left or we decide to close.
+ */
+
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ while ((r = ap_read_request(c)) != NULL) {
+ apr_interval_time_t keep_alive_timeout = r->server->keep_alive_timeout;
+
+ /* To preserve legacy behaviour, use the keepalive timeout from the
+ * base server (first on this IP:port) when none is explicitly
+ * configured on this server.
+ */
+ if (!r->server->keep_alive_timeout_set) {
+ keep_alive_timeout = c->base_server->keep_alive_timeout;
+ }
+
+ if (r->status == HTTP_OK) {
+ if (cs)
+ cs->state = CONN_STATE_HANDLER;
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ ap_process_request(r);
+ /* After the call to ap_process_request, the
+ * request pool will have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ r = NULL;
+ }
+
+ if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted)
+ break;
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, NULL);
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ break;
+ }
+
+ if (!csd) {
+ csd = ap_get_conn_socket(c);
+ }
+ apr_socket_opt_set(csd, APR_INCOMPLETE_READ, 1);
+ apr_socket_timeout_set(csd, keep_alive_timeout);
+ /* Go straight to select() to wait for the next request */
+ }
+
+ return OK;
+}
+
+static int ap_process_http_connection(conn_rec *c)
+{
+ if (async_mpm && !c->clogging_input_filters) {
+ return ap_process_http_async_connection(c);
+ }
+ else {
+ return ap_process_http_sync_connection(c);
+ }
+}
+
+static int http_create_request(request_rec *r)
+{
+ if (!r->main && !r->prev) {
+ ap_add_output_filter_handle(ap_byterange_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_content_length_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_header_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_outerror_filter_handle,
+ NULL, r, r->connection);
+ }
+
+ return OK;
+}
+
+static int http_send_options(request_rec *r)
+{
+ if ((r->method_number == M_OPTIONS) && r->uri && (r->uri[0] == '*') &&
+ (r->uri[1] == '\0')) {
+ return DONE; /* Send HTTP pong, without Allow header */
+ }
+ return DECLINED;
+}
+
+static int http_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ apr_uint64_t val;
+ if (ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm) != APR_SUCCESS) {
+ async_mpm = 0;
+ }
+ ap_random_insecure_bytes(&val, sizeof(val));
+ ap_multipart_boundary = apr_psprintf(p, "%0" APR_UINT64_T_HEX_FMT, val);
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(http_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_process_connection(ap_process_http_connection, NULL, NULL,
+ APR_HOOK_REALLY_LAST);
+ ap_hook_map_to_storage(ap_send_http_trace,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_map_to_storage(http_send_options,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_http_scheme(http_scheme,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_default_port(http_port,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_create_request(http_create_request, NULL, NULL, APR_HOOK_REALLY_LAST);
+ ap_http_input_filter_handle =
+ ap_register_input_filter("HTTP_IN", ap_http_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_http_header_filter_handle =
+ ap_register_output_filter("HTTP_HEADER", ap_http_header_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_chunk_filter_handle =
+ ap_register_output_filter("CHUNK", ap_http_chunk_filter,
+ NULL, AP_FTYPE_TRANSCODE);
+ ap_http_outerror_filter_handle =
+ ap_register_output_filter("HTTP_OUTERROR", ap_http_outerror_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_byterange_filter_handle =
+ ap_register_output_filter("BYTERANGE", ap_byterange_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_method_registry_init(p);
+}
+
+AP_DECLARE_MODULE(http) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ http_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/modules/http/http_etag.c b/modules/http/http_etag.c
new file mode 100644
index 0000000..af74549
--- /dev/null
+++ b/modules/http/http_etag.c
@@ -0,0 +1,413 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+#include "apr_sha1.h"
+#include "apr_base64.h"
+#include "apr_buckets.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#if APR_HAS_MMAP
+#include "apr_mmap.h"
+#endif /* APR_HAS_MMAP */
+
+#define SHA1_DIGEST_BASE64_LEN 4*(APR_SHA1_DIGESTSIZE/3)
+
+/* Generate the human-readable hex representation of an apr_uint64_t
+ * (basically a faster version of 'sprintf("%llx")')
+ */
+#define HEX_DIGITS "0123456789abcdef"
+static char *etag_uint64_to_hex(char *next, apr_uint64_t u)
+{
+ int printing = 0;
+ int shift = sizeof(apr_uint64_t) * 8 - 4;
+ do {
+ unsigned short next_digit = (unsigned short)
+ ((u >> shift) & (apr_uint64_t)0xf);
+ if (next_digit) {
+ *next++ = HEX_DIGITS[next_digit];
+ printing = 1;
+ }
+ else if (printing) {
+ *next++ = HEX_DIGITS[next_digit];
+ }
+ shift -= 4;
+ } while (shift);
+ *next++ = HEX_DIGITS[u & (apr_uint64_t)0xf];
+ return next;
+}
+
+#define ETAG_WEAK "W/"
+#define CHARS_PER_UINT64 (sizeof(apr_uint64_t) * 2)
+
+static void etag_start(char *etag, const char *weak, char **next)
+{
+ if (weak) {
+ while (*weak) {
+ *etag++ = *weak++;
+ }
+ }
+ *etag++ = '"';
+
+ *next = etag;
+}
+
+static void etag_end(char *next, const char *vlv, apr_size_t vlv_len)
+{
+ if (vlv) {
+ *next++ = ';';
+ apr_cpystrn(next, vlv, vlv_len);
+ }
+ else {
+ *next++ = '"';
+ *next = '\0';
+ }
+}
+
+/*
+ * Construct a strong ETag by creating a SHA1 hash across the file content.
+ */
+static char *make_digest_etag(request_rec *r, etag_rec *er, char *vlv,
+ apr_size_t vlv_len, char *weak, apr_size_t weak_len)
+{
+ apr_sha1_ctx_t context;
+ unsigned char digest[APR_SHA1_DIGESTSIZE];
+ apr_file_t *fd = NULL;
+ core_dir_config *cfg;
+ char *etag, *next;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+
+ apr_size_t nbytes;
+ apr_off_t offset = 0, zero = 0, len = 0;
+ apr_status_t status;
+
+ cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config);
+
+ if (er->fd) {
+ fd = er->fd;
+ }
+ else if (er->pathname) {
+ if ((status = apr_file_open(&fd, er->pathname, APR_READ | APR_BINARY,
+ 0, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10251)
+ "Make etag: could not open %s", er->pathname);
+ return "";
+ }
+ }
+ if (!fd) {
+ return "";
+ }
+
+ if ((status = apr_file_seek(fd, APR_CUR, &offset)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10252)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ if ((status = apr_file_seek(fd, APR_END, &len)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10258)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ if ((status = apr_file_seek(fd, APR_SET, &zero)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10253)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ e = apr_brigade_insert_file(bb, fd, 0, len, r->pool);
+
+#if APR_HAS_MMAP
+ if (cfg->enable_mmap == ENABLE_MMAP_OFF) {
+ (void)apr_bucket_file_enable_mmap(e, 0);
+ }
+#endif
+
+ apr_sha1_init(&context);
+ while (!APR_BRIGADE_EMPTY(bb))
+ {
+ const char *str;
+
+ e = APR_BRIGADE_FIRST(bb);
+
+ if ((status = apr_bucket_read(e, &str, &nbytes, APR_BLOCK_READ)) != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10254)
+ "Make etag: could not read");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ apr_sha1_update(&context, str, nbytes);
+ apr_bucket_delete(e);
+ }
+
+ if ((status = apr_file_seek(fd, APR_SET, &offset)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10255)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+ apr_sha1_final(digest, &context);
+
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ SHA1_DIGEST_BASE64_LEN + vlv_len + 4);
+
+ etag_start(etag, weak, &next);
+ next += apr_base64_encode_binary(next, digest, APR_SHA1_DIGESTSIZE) - 1;
+ etag_end(next, vlv, vlv_len);
+
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+
+ return etag;
+}
+
+/*
+ * Construct an entity tag (ETag) from resource information. If it's a real
+ * file, build in some of the file characteristics. If the modification time
+ * is newer than (request-time minus 1 second), mark the ETag as weak - it
+ * could be modified again in as short an interval.
+ */
+AP_DECLARE(char *) ap_make_etag_ex(request_rec *r, etag_rec *er)
+{
+ char *weak = NULL;
+ apr_size_t weak_len = 0, vlv_len = 0;
+ char *etag, *next, *vlv;
+ core_dir_config *cfg;
+ etag_components_t etag_bits;
+ etag_components_t bits_added;
+
+ cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config);
+ etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add;
+
+ if (er->force_weak) {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (r->vlist_validator) {
+
+ /* If we have a variant list validator (vlv) due to the
+ * response being negotiated, then we create a structured
+ * entity tag which merges the variant etag with the variant
+ * list validator (vlv). This merging makes revalidation
+ * somewhat safer, ensures that caches which can deal with
+ * Vary will (eventually) be updated if the set of variants is
+ * changed, and is also a protocol requirement for transparent
+ * content negotiation.
+ */
+
+ /* if the variant list validator is weak, we make the whole
+ * structured etag weak. If we would not, then clients could
+ * have problems merging range responses if we have different
+ * variants with the same non-globally-unique strong etag.
+ */
+
+ vlv = r->vlist_validator;
+ if (vlv[0] == 'W') {
+ vlv += 3;
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+ else {
+ vlv++;
+ }
+ vlv_len = strlen(vlv);
+
+ }
+ else {
+ vlv = NULL;
+ vlv_len = 0;
+ }
+
+ /*
+ * Did a module flag the need for a strong etag, or did the
+ * configuration tell us to generate a digest?
+ */
+ if (er->finfo->filetype == APR_REG &&
+ (AP_REQUEST_IS_STRONG_ETAG(r) || (etag_bits & ETAG_DIGEST))) {
+
+ return make_digest_etag(r, er, vlv, vlv_len, weak, weak_len);
+ }
+
+ /*
+ * If it's a file (or we wouldn't be here) and no ETags
+ * should be set for files, return an empty string and
+ * note it for the header-sender to ignore.
+ */
+ if (etag_bits & ETAG_NONE) {
+ return "";
+ }
+
+ if (etag_bits == ETAG_UNSET) {
+ etag_bits = ETAG_BACKWARD;
+ }
+ /*
+ * Make an ETag header out of various pieces of information. We use
+ * the last-modified date and, if we have a real file, the
+ * length and inode number - note that this doesn't have to match
+ * the content-length (i.e. includes), it just has to be unique
+ * for the file.
+ *
+ * If the request was made within a second of the last-modified date,
+ * we send a weak tag instead of a strong one, since it could
+ * be modified again later in the second, and the validation
+ * would be incorrect.
+ */
+ if ((er->request_time - er->finfo->mtime < (1 * APR_USEC_PER_SEC))) {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (er->finfo->filetype != APR_NOFILE) {
+ /*
+ * ETag gets set to [W/]"inode-size-mtime", modulo any
+ * FileETag keywords.
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") +
+ 3 * CHARS_PER_UINT64 + vlv_len + 2);
+
+ etag_start(etag, weak, &next);
+
+ bits_added = 0;
+ if (etag_bits & ETAG_INODE) {
+ next = etag_uint64_to_hex(next, er->finfo->inode);
+ bits_added |= ETAG_INODE;
+ }
+ if (etag_bits & ETAG_SIZE) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_uint64_to_hex(next, er->finfo->size);
+ bits_added |= ETAG_SIZE;
+ }
+ if (etag_bits & ETAG_MTIME) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_uint64_to_hex(next, er->finfo->mtime);
+ }
+
+ etag_end(next, vlv, vlv_len);
+
+ }
+ else {
+ /*
+ * Not a file document, so just use the mtime: [W/]"mtime"
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ CHARS_PER_UINT64 + vlv_len + 2);
+
+ etag_start(etag, weak, &next);
+ next = etag_uint64_to_hex(next, er->finfo->mtime);
+ etag_end(next, vlv, vlv_len);
+
+ }
+
+ return etag;
+}
+
+AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak)
+{
+ etag_rec er;
+
+ er.vlist_validator = NULL;
+ er.request_time = r->request_time;
+ er.finfo = &r->finfo;
+ er.pathname = r->filename;
+ er.fd = NULL;
+ er.force_weak = force_weak;
+
+ return ap_make_etag_ex(r, &er);
+}
+
+AP_DECLARE(void) ap_set_etag(request_rec *r)
+{
+ char *etag;
+
+ etag_rec er;
+
+ er.vlist_validator = r->vlist_validator;
+ er.request_time = r->request_time;
+ er.finfo = &r->finfo;
+ er.pathname = r->filename;
+ er.fd = NULL;
+ er.force_weak = 0;
+
+ etag = ap_make_etag_ex(r, &er);
+
+ if (etag && etag[0]) {
+ apr_table_setn(r->headers_out, "ETag", etag);
+ }
+ else {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ }
+
+}
+
+AP_DECLARE(void) ap_set_etag_fd(request_rec *r, apr_file_t *fd)
+{
+ char *etag;
+
+ etag_rec er;
+
+ er.vlist_validator = r->vlist_validator;
+ er.request_time = r->request_time;
+ er.finfo = &r->finfo;
+ er.pathname = NULL;
+ er.fd = fd;
+ er.force_weak = 0;
+
+ etag = ap_make_etag_ex(r, &er);
+
+ if (etag && etag[0]) {
+ apr_table_setn(r->headers_out, "ETag", etag);
+ }
+ else {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ }
+
+}
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
new file mode 100644
index 0000000..1a8df34
--- /dev/null
+++ b/modules/http/http_filters.c
@@ -0,0 +1,1941 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_filter.c --- HTTP routines which either filters or deal with filters.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_connection.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+APLOG_USE_MODULE(http);
+
+typedef struct http_filter_ctx
+{
+ apr_off_t remaining;
+ apr_off_t limit;
+ apr_off_t limit_used;
+ apr_int32_t chunk_used;
+ apr_int32_t chunk_bws;
+ apr_int32_t chunkbits;
+ enum
+ {
+ BODY_NONE, /* streamed data */
+ BODY_LENGTH, /* data constrained by content length */
+ BODY_CHUNK, /* chunk expected */
+ BODY_CHUNK_PART, /* chunk digits */
+ BODY_CHUNK_EXT, /* chunk extension */
+ BODY_CHUNK_CR, /* got space(s) after digits, expect [CR]LF or ext */
+ BODY_CHUNK_LF, /* got CR after digits or ext, expect LF */
+ BODY_CHUNK_DATA, /* data constrained by chunked encoding */
+ BODY_CHUNK_END, /* chunked data terminating CRLF */
+ BODY_CHUNK_END_LF, /* got CR after data, expect LF */
+ BODY_CHUNK_TRAILER /* trailers */
+ } state;
+ unsigned int eos_sent :1,
+ seen_data:1;
+ apr_bucket_brigade *bb;
+} http_ctx_t;
+
+/* bail out if some error in the HTTP input filter happens */
+static apr_status_t bail_out_on_error(http_ctx_t *ctx,
+ ap_filter_t *f,
+ int http_error)
+{
+ apr_bucket *e;
+ apr_bucket_brigade *bb = ctx->bb;
+
+ apr_brigade_cleanup(bb);
+
+ if (f->r->proxyreq == PROXYREQ_RESPONSE) {
+ switch (http_error) {
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return APR_ENOSPC;
+
+ case HTTP_REQUEST_TIME_OUT:
+ return APR_INCOMPLETE;
+
+ case HTTP_NOT_IMPLEMENTED:
+ return APR_ENOTIMPL;
+
+ default:
+ return APR_EGENERAL;
+ }
+ }
+
+ e = ap_bucket_error_create(http_error,
+ NULL, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ /* If chunked encoding / content-length are corrupt, we may treat parts
+ * of this request's body as the next one's headers.
+ * To be safe, disable keep-alive.
+ */
+ f->r->connection->keepalive = AP_CONN_CLOSE;
+ return ap_pass_brigade(f->r->output_filters, bb);
+}
+
+/**
+ * Parse a chunk line with optional extension, detect overflow.
+ * There are several error cases:
+ * 1) If the chunk link is misformatted, APR_EINVAL is returned.
+ * 2) If the conversion would require too many bits, APR_EGENERAL is returned.
+ * 3) If the conversion used the correct number of bits, but an overflow
+ * caused only the sign bit to flip, then APR_ENOSPC is returned.
+ * A negative chunk length always indicates an overflow error.
+ */
+static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer,
+ apr_size_t len, int linelimit, int strict)
+{
+ apr_size_t i = 0;
+
+ while (i < len) {
+ char c = buffer[i];
+
+ ap_xlate_proto_from_ascii(&c, 1);
+
+ /* handle CRLF after the chunk */
+ if (ctx->state == BODY_CHUNK_END
+ || ctx->state == BODY_CHUNK_END_LF) {
+ if (c == LF) {
+ if (strict && (ctx->state != BODY_CHUNK_END_LF)) {
+ /*
+ * CR missing before LF.
+ */
+ return APR_EINVAL;
+ }
+ ctx->state = BODY_CHUNK;
+ }
+ else if (c == CR && ctx->state == BODY_CHUNK_END) {
+ ctx->state = BODY_CHUNK_END_LF;
+ }
+ else {
+ /*
+ * CRLF expected.
+ */
+ return APR_EINVAL;
+ }
+ i++;
+ continue;
+ }
+
+ /* handle start of the chunk */
+ if (ctx->state == BODY_CHUNK) {
+ if (!apr_isxdigit(c)) {
+ /*
+ * Detect invalid character at beginning. This also works for
+ * empty chunk size lines.
+ */
+ return APR_EINVAL;
+ }
+ else {
+ ctx->state = BODY_CHUNK_PART;
+ }
+ ctx->remaining = 0;
+ ctx->chunkbits = sizeof(apr_off_t) * 8;
+ ctx->chunk_used = 0;
+ ctx->chunk_bws = 0;
+ }
+
+ if (c == LF) {
+ if (strict && (ctx->state != BODY_CHUNK_LF)) {
+ /*
+ * CR missing before LF.
+ */
+ return APR_EINVAL;
+ }
+ if (ctx->remaining) {
+ ctx->state = BODY_CHUNK_DATA;
+ }
+ else {
+ ctx->state = BODY_CHUNK_TRAILER;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_LF) {
+ /*
+ * LF expected.
+ */
+ return APR_EINVAL;
+ }
+ else if (c == CR) {
+ ctx->state = BODY_CHUNK_LF;
+ }
+ else if (c == ';') {
+ ctx->state = BODY_CHUNK_EXT;
+ }
+ else if (ctx->state == BODY_CHUNK_EXT) {
+ /*
+ * Control chars (excluding tabs) are invalid.
+ * TODO: more precisely limit input
+ */
+ if (c != '\t' && apr_iscntrl(c)) {
+ return APR_EINVAL;
+ }
+ }
+ else if (c == ' ' || c == '\t') {
+ /* Be lenient up to 10 implied *LWS, a legacy of RFC 2616,
+ * and noted as errata to RFC7230;
+ * https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4667
+ */
+ ctx->state = BODY_CHUNK_CR;
+ if (++ctx->chunk_bws > 10) {
+ return APR_EINVAL;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_CR) {
+ /*
+ * ';', CR or LF expected.
+ */
+ return APR_EINVAL;
+ }
+ else if (ctx->state == BODY_CHUNK_PART) {
+ int xvalue;
+
+ /* ignore leading zeros */
+ if (!ctx->remaining && c == '0') {
+ i++;
+ continue;
+ }
+
+ ctx->chunkbits -= 4;
+ if (ctx->chunkbits < 0) {
+ /* overflow */
+ return APR_ENOSPC;
+ }
+
+ if (c >= '0' && c <= '9') {
+ xvalue = c - '0';
+ }
+ else if (c >= 'A' && c <= 'F') {
+ xvalue = c - 'A' + 0xa;
+ }
+ else if (c >= 'a' && c <= 'f') {
+ xvalue = c - 'a' + 0xa;
+ }
+ else {
+ /* bogus character */
+ return APR_EINVAL;
+ }
+
+ ctx->remaining = (ctx->remaining << 4) | xvalue;
+ if (ctx->remaining < 0) {
+ /* overflow */
+ return APR_ENOSPC;
+ }
+ }
+ else {
+ /* Should not happen */
+ return APR_EGENERAL;
+ }
+
+ i++;
+ }
+
+ /* sanity check */
+ ctx->chunk_used += len;
+ if (ctx->chunk_used < 0 || ctx->chunk_used > linelimit) {
+ return APR_ENOSPC;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
+ apr_bucket_brigade *b, int merge)
+{
+ int rv;
+ apr_bucket *e;
+ request_rec *r = f->r;
+ apr_table_t *saved_headers_in = r->headers_in;
+ int saved_status = r->status;
+
+ r->status = HTTP_OK;
+ r->headers_in = r->trailers_in;
+ apr_table_clear(r->headers_in);
+ ap_get_mime_headers(r);
+
+ if(r->status == HTTP_OK) {
+ r->status = saved_status;
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ rv = APR_SUCCESS;
+ }
+ else {
+ const char *error_notes = apr_table_get(r->notes,
+ "error-notes");
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02656)
+ "Error while reading HTTP trailer: %i%s%s",
+ r->status, error_notes ? ": " : "",
+ error_notes ? error_notes : "");
+ rv = APR_EINVAL;
+ }
+
+ if(!merge) {
+ r->headers_in = saved_headers_in;
+ }
+ else {
+ r->headers_in = apr_table_overlay(r->pool, saved_headers_in,
+ r->trailers_in);
+ }
+
+ return rv;
+}
+
+/* This is the HTTP_INPUT filter for HTTP requests and responses from
+ * proxied servers (mod_proxy). It handles chunked and content-length
+ * bodies. This can only be inserted/used after the headers
+ * are successfully parsed.
+ */
+apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(f->r->server->module_config,
+ &core_module);
+ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
+ apr_bucket *e;
+ http_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+ int http_error = HTTP_REQUEST_ENTITY_TOO_LARGE;
+ int again;
+
+ /* just get out of the way of things we don't want. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return ap_get_brigade(f->next, b, mode, block, readbytes);
+ }
+
+ if (!ctx) {
+ const char *tenc, *lenp;
+ f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+ ctx->state = BODY_NONE;
+ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+
+ /* LimitRequestBody does not apply to proxied responses.
+ * Consider implementing this check in its own filter.
+ * Would adding a directive to limit the size of proxied
+ * responses be useful?
+ */
+ if (!f->r->proxyreq) {
+ ctx->limit = ap_get_limit_req_body(f->r);
+ }
+ else {
+ ctx->limit = 0;
+ }
+
+ tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
+ lenp = apr_table_get(f->r->headers_in, "Content-Length");
+
+ if (tenc) {
+ if (ap_is_chunked(f->r->pool, tenc)) {
+ ctx->state = BODY_CHUNK;
+ }
+ else if (f->r->proxyreq == PROXYREQ_RESPONSE) {
+ /* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23
+ * Section 3.3.3.3: "If a Transfer-Encoding header field is
+ * present in a response and the chunked transfer coding is not
+ * the final encoding, the message body length is determined by
+ * reading the connection until it is closed by the server."
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02555)
+ "Unknown Transfer-Encoding: %s; "
+ "using read-until-close", tenc);
+ tenc = NULL;
+ }
+ else {
+ /* Something that isn't a HTTP request, unless some future
+ * edition defines new transfer encodings, is unsupported.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
+ "Unknown Transfer-Encoding: %s", tenc);
+ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST);
+ }
+ lenp = NULL;
+ }
+ if (lenp) {
+ ctx->state = BODY_LENGTH;
+
+ /* Protects against over/underflow, non-digit chars in the
+ * string, leading plus/minus signs, trailing characters and
+ * a negative number.
+ */
+ if (!ap_parse_strict_length(&ctx->remaining, lenp)) {
+ ctx->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587)
+ "Invalid Content-Length");
+
+ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST);
+ }
+
+ /* If we have a limit in effect and we know the C-L ahead of
+ * time, stop it here if it is invalid.
+ */
+ if (ctx->limit && ctx->limit < ctx->remaining) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01588)
+ "Requested content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
+ return bail_out_on_error(ctx, f, HTTP_REQUEST_ENTITY_TOO_LARGE);
+ }
+ }
+
+ /* If we don't have a request entity indicated by the headers, EOS.
+ * (BODY_NONE is a valid intermediate state due to trailers,
+ * but it isn't a valid starting state.)
+ *
+ * RFC 2616 Section 4.4 note 5 states that connection-close
+ * is invalid for a request entity - request bodies must be
+ * denoted by C-L or T-E: chunked.
+ *
+ * Note that since the proxy uses this filter to handle the
+ * proxied *response*, proxy responses MUST be exempt.
+ */
+ if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+ }
+
+ /* Since we're about to read data, send 100-Continue if needed.
+ * Only valid on chunked and C-L bodies where the C-L is > 0.
+ *
+ * If the read is to be nonblocking though, the caller may not want to
+ * handle this just now (e.g. mod_proxy_http), and is prepared to read
+ * nothing if the client really waits for 100 continue, so we don't
+ * send it now and wait for later blocking read.
+ *
+ * In any case, even if r->expecting remains set at the end of the
+ * request handling, ap_set_keepalive() will finally do the right
+ * thing (i.e. "Connection: close" the connection).
+ */
+ if (block == APR_BLOCK_READ
+ && (ctx->state == BODY_CHUNK
+ || (ctx->state == BODY_LENGTH && ctx->remaining > 0))
+ && f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)
+ && !(ctx->eos_sent || f->r->eos_sent || f->r->bytes_sent)) {
+ if (!ap_is_HTTP_SUCCESS(f->r->status)) {
+ ctx->state = BODY_NONE;
+ ctx->eos_sent = 1; /* send EOS below */
+ }
+ else if (!ctx->seen_data) {
+ int saved_status = f->r->status;
+ const char *saved_status_line = f->r->status_line;
+ f->r->status = HTTP_CONTINUE;
+ f->r->status_line = NULL;
+ ap_send_interim_response(f->r, 0);
+ AP_DEBUG_ASSERT(!f->r->expecting_100);
+ f->r->status_line = saved_status_line;
+ f->r->status = saved_status;
+ }
+ else {
+ /* https://tools.ietf.org/html/rfc7231#section-5.1.1
+ * A server MAY omit sending a 100 (Continue) response if it
+ * has already received some or all of the message body for
+ * the corresponding request [...]
+ */
+ f->r->expecting_100 = 0;
+ }
+ }
+
+ /* sanity check in case we're read twice */
+ if (ctx->eos_sent) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ return APR_SUCCESS;
+ }
+
+ do {
+ apr_brigade_cleanup(b);
+ again = 0; /* until further notice */
+
+ /* read and handle the brigade */
+ switch (ctx->state) {
+ case BODY_CHUNK:
+ case BODY_CHUNK_PART:
+ case BODY_CHUNK_EXT:
+ case BODY_CHUNK_CR:
+ case BODY_CHUNK_LF:
+ case BODY_CHUNK_END:
+ case BODY_CHUNK_END_LF: {
+
+ rv = ap_get_brigade(f->next, b, AP_MODE_GETLINE, block, 0);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv == APR_EOF) {
+ return APR_INCOMPLETE;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ e = APR_BRIGADE_FIRST(b);
+ while (e != APR_BRIGADE_SENTINEL(b)) {
+ const char *buffer;
+ apr_size_t len;
+
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ int parsing = 0;
+
+ rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ);
+ if (rv == APR_SUCCESS) {
+ parsing = 1;
+ if (len > 0) {
+ ctx->seen_data = 1;
+ }
+ rv = parse_chunk_size(ctx, buffer, len,
+ f->r->server->limit_req_fieldsize, strict);
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590)
+ "Error reading/parsing chunk %s ",
+ (APR_ENOSPC == rv) ? "(overflow)" : "");
+ if (parsing) {
+ if (rv != APR_ENOSPC) {
+ http_error = HTTP_BAD_REQUEST;
+ }
+ return bail_out_on_error(ctx, f, http_error);
+ }
+ return rv;
+ }
+ }
+
+ apr_bucket_delete(e);
+ e = APR_BRIGADE_FIRST(b);
+ }
+ again = 1; /* come around again */
+
+ if (ctx->state == BODY_CHUNK_TRAILER) {
+ /* Treat UNSET as DISABLE - trailers aren't merged by default */
+ return read_chunked_trailers(ctx, f, b,
+ conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE);
+ }
+
+ break;
+ }
+ case BODY_NONE:
+ case BODY_LENGTH:
+ case BODY_CHUNK_DATA: {
+
+ /* Ensure that the caller can not go over our boundary point. */
+ if (ctx->state != BODY_NONE && ctx->remaining < readbytes) {
+ readbytes = ctx->remaining;
+ }
+ if (readbytes > 0) {
+ apr_off_t totalread;
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv == APR_EOF && ctx->state != BODY_NONE
+ && ctx->remaining > 0) {
+ return APR_INCOMPLETE;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* How many bytes did we just read? */
+ apr_brigade_length(b, 0, &totalread);
+ if (totalread > 0) {
+ ctx->seen_data = 1;
+ }
+
+ /* If this happens, we have a bucket of unknown length. Die because
+ * it means our assumptions have changed. */
+ AP_DEBUG_ASSERT(totalread >= 0);
+
+ if (ctx->state != BODY_NONE) {
+ ctx->remaining -= totalread;
+ if (ctx->remaining > 0) {
+ e = APR_BRIGADE_LAST(b);
+ if (APR_BUCKET_IS_EOS(e)) {
+ apr_bucket_delete(e);
+ return APR_INCOMPLETE;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_DATA) {
+ /* next chunk please */
+ ctx->state = BODY_CHUNK_END;
+ ctx->chunk_used = 0;
+ }
+ }
+
+ /* We have a limit in effect. */
+ if (ctx->limit) {
+ /* FIXME: Note that we might get slightly confused on
+ * chunked inputs as we'd need to compensate for the chunk
+ * lengths which may not really count. This seems to be up
+ * for interpretation.
+ */
+ ctx->limit_used += totalread;
+ if (ctx->limit < ctx->limit_used) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
+ APLOGNO(01591) "Read content length of "
+ "%" APR_OFF_T_FMT " is larger than the "
+ "configured limit of %" APR_OFF_T_FMT,
+ ctx->limit_used, ctx->limit);
+ return bail_out_on_error(ctx, f,
+ HTTP_REQUEST_ENTITY_TOO_LARGE);
+ }
+ }
+ }
+
+ /* If we have no more bytes remaining on a C-L request,
+ * save the caller a round trip to discover EOS.
+ */
+ if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ }
+
+ break;
+ }
+ case BODY_CHUNK_TRAILER: {
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ break;
+ }
+ default: {
+ /* Should not happen */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02901)
+ "Unexpected body state (%i)", (int)ctx->state);
+ return APR_EGENERAL;
+ }
+ }
+
+ } while (again);
+
+ return APR_SUCCESS;
+}
+
+struct check_header_ctx {
+ request_rec *r;
+ int strict;
+};
+
+/* check a single header, to be used with apr_table_do() */
+static int check_header(struct check_header_ctx *ctx,
+ const char *name, const char **val)
+{
+ const char *pos, *end;
+ char *dst = NULL;
+
+ if (name[0] == '\0') {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02428)
+ "Empty response header name, aborting request");
+ return 0;
+ }
+
+ if (ctx->strict) {
+ end = ap_scan_http_token(name);
+ }
+ else {
+ end = ap_scan_vchar_obstext(name);
+ }
+ if (*end) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02429)
+ "Response header name '%s' contains invalid "
+ "characters, aborting request",
+ name);
+ return 0;
+ }
+
+ for (pos = *val; *pos; pos = end) {
+ end = ap_scan_http_field_content(pos);
+ if (*end) {
+ if (end[0] != CR || end[1] != LF || (end[2] != ' ' &&
+ end[2] != '\t')) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02430)
+ "Response header '%s' value of '%s' contains "
+ "invalid characters, aborting request",
+ name, pos);
+ return 0;
+ }
+ if (!dst) {
+ *val = dst = apr_palloc(ctx->r->pool, strlen(*val) + 1);
+ }
+ }
+ if (dst) {
+ memcpy(dst, pos, end - pos);
+ dst += end - pos;
+ if (*end) {
+ /* skip folding and replace with a single space */
+ end += 3 + strspn(end + 3, "\t ");
+ *dst++ = ' ';
+ }
+ }
+ }
+ if (dst) {
+ *dst = '\0';
+ }
+ return 1;
+}
+
+static int check_headers_table(apr_table_t *t, struct check_header_ctx *ctx)
+{
+ const apr_array_header_t *headers = apr_table_elts(t);
+ apr_table_entry_t *header;
+ int i;
+
+ for (i = 0; i < headers->nelts; ++i) {
+ header = &APR_ARRAY_IDX(headers, i, apr_table_entry_t);
+ if (!header->key) {
+ continue;
+ }
+ if (!check_header(ctx, header->key, (const char **)&header->val)) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Check headers for HTTP conformance
+ * @return 1 if ok, 0 if bad
+ */
+static APR_INLINE int check_headers(request_rec *r)
+{
+ struct check_header_ctx ctx;
+ core_server_config *conf =
+ ap_get_core_module_config(r->server->module_config);
+
+ ctx.r = r;
+ ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
+ return check_headers_table(r->headers_out, &ctx) &&
+ check_headers_table(r->err_headers_out, &ctx);
+}
+
+static int check_headers_recursion(request_rec *r)
+{
+ void *check = NULL;
+ apr_pool_userdata_get(&check, "check_headers_recursion", r->pool);
+ if (check) {
+ return 1;
+ }
+ apr_pool_userdata_setn("true", "check_headers_recursion", NULL, r->pool);
+ return 0;
+}
+
+typedef struct header_struct {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+} header_struct;
+
+/* Send a single HTTP header field to the client. Note that this function
+ * is used in calls to apr_table_do(), so don't change its interface.
+ * It returns true unless there was a write error of some kind.
+ */
+static int form_header_field(header_struct *h,
+ const char *fieldname, const char *fieldval)
+{
+#if APR_CHARSET_EBCDIC
+ char *headfield;
+ apr_size_t len;
+
+ headfield = apr_pstrcat(h->pool, fieldname, ": ", fieldval, CRLF, NULL);
+ len = strlen(headfield);
+
+ ap_xlate_proto_to_ascii(headfield, len);
+ apr_brigade_write(h->bb, NULL, NULL, headfield, len);
+#else
+ struct iovec vec[4];
+ struct iovec *v = vec;
+ v->iov_base = (void *)fieldname;
+ v->iov_len = strlen(fieldname);
+ v++;
+ v->iov_base = ": ";
+ v->iov_len = sizeof(": ") - 1;
+ v++;
+ v->iov_base = (void *)fieldval;
+ v->iov_len = strlen(fieldval);
+ v++;
+ v->iov_base = CRLF;
+ v->iov_len = sizeof(CRLF) - 1;
+ apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
+#endif /* !APR_CHARSET_EBCDIC */
+ return 1;
+}
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && ap_cstr_casecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fixup_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+/* Send a request's HTTP response headers to the client.
+ */
+static apr_status_t send_all_header_fields(header_struct *h,
+ const request_rec *r)
+{
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ struct iovec *vec;
+ struct iovec *vec_next;
+
+ elts = apr_table_elts(r->headers_out);
+ if (elts->nelts == 0) {
+ return APR_SUCCESS;
+ }
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ t_end = t_elt + elts->nelts;
+ vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
+ sizeof(struct iovec));
+ vec_next = vec;
+
+ /* For each field, generate
+ * name ": " value CRLF
+ */
+ do {
+ vec_next->iov_base = (void*)(t_elt->key);
+ vec_next->iov_len = strlen(t_elt->key);
+ vec_next++;
+ vec_next->iov_base = ": ";
+ vec_next->iov_len = sizeof(": ") - 1;
+ vec_next++;
+ vec_next->iov_base = (void*)(t_elt->val);
+ vec_next->iov_len = strlen(t_elt->val);
+ vec_next++;
+ vec_next->iov_base = CRLF;
+ vec_next->iov_len = sizeof(CRLF) - 1;
+ vec_next++;
+ t_elt++;
+ } while (t_elt < t_end);
+
+ if (APLOGrtrace4(r)) {
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ do {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
+ ap_escape_logitem(r->pool, t_elt->key),
+ ap_escape_logitem(r->pool, t_elt->val));
+ t_elt++;
+ } while (t_elt < t_end);
+ }
+
+#if APR_CHARSET_EBCDIC
+ {
+ apr_size_t len;
+ char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
+ }
+#else
+ return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
+#endif
+}
+
+/* Confirm that the status line is well-formed and matches r->status.
+ * If they don't match, a filter may have negated the status line set by a
+ * handler.
+ * Zap r->status_line if bad.
+ */
+static apr_status_t validate_status_line(request_rec *r)
+{
+ char *end;
+
+ if (r->status_line) {
+ int len = strlen(r->status_line);
+ if (len < 3
+ || apr_strtoi64(r->status_line, &end, 10) != r->status
+ || (end - 3) != r->status_line
+ || (len >= 4 && ! apr_isspace(r->status_line[3]))) {
+ r->status_line = NULL;
+ return APR_EGENERAL;
+ }
+ /* Since we passed the above check, we know that length three
+ * is equivalent to only a 3 digit numeric http status.
+ * RFC2616 mandates a trailing space, let's add it.
+ */
+ if (len == 3) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, " ", NULL);
+ return APR_EGENERAL;
+ }
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+/*
+ * Determine the protocol to use for the response. Potentially downgrade
+ * to HTTP/1.0 in some situations and/or turn off keepalives.
+ *
+ * also prepare r->status_line.
+ */
+static void basic_http_header_check(request_rec *r,
+ const char **protocol)
+{
+ apr_status_t rv;
+
+ if (r->assbackwards) {
+ /* no such thing as a response protocol */
+ return;
+ }
+
+ rv = validate_status_line(r);
+
+ if (!r->status_line) {
+ r->status_line = ap_get_status_line(r->status);
+ } else if (rv != APR_SUCCESS) {
+ /* Status line is OK but our own reason phrase
+ * would be preferred if defined
+ */
+ const char *tmp = ap_get_status_line(r->status);
+ if (!strncmp(tmp, r->status_line, 3)) {
+ r->status_line = tmp;
+ }
+ }
+
+ /* Note that we must downgrade before checking for force responses. */
+ if (r->proto_num > HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "downgrade-1.0")) {
+ r->proto_num = HTTP_VERSION(1,0);
+ }
+
+ /* kludge around broken browsers when indicated by force-response-1.0
+ */
+ if (r->proto_num == HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "force-response-1.0")) {
+ *protocol = "HTTP/1.0";
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ *protocol = AP_SERVER_PROTOCOL;
+ }
+
+}
+
+/* fill "bb" with a barebones/initial HTTP response header */
+static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
+ const char *protocol)
+{
+ char *date = NULL;
+ const char *proxy_date = NULL;
+ const char *server = NULL;
+ const char *us = ap_get_server_banner();
+ header_struct h;
+ struct iovec vec[4];
+
+ if (r->assbackwards) {
+ /* there are no headers to send */
+ return;
+ }
+
+ /* Output the HTTP/1.x Status-Line and the Date and Server fields */
+
+ vec[0].iov_base = (void *)protocol;
+ vec[0].iov_len = strlen(protocol);
+ vec[1].iov_base = (void *)" ";
+ vec[1].iov_len = sizeof(" ") - 1;
+ vec[2].iov_base = (void *)(r->status_line);
+ vec[2].iov_len = strlen(r->status_line);
+ vec[3].iov_base = (void *)CRLF;
+ vec[3].iov_len = sizeof(CRLF) - 1;
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ tmp = apr_pstrcatv(r->pool, vec, 4, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_write(bb, NULL, NULL, tmp, len);
+ }
+#else
+ apr_brigade_writev(bb, NULL, NULL, vec, 4);
+#endif
+
+ h.pool = r->pool;
+ h.bb = bb;
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r->proxyreq != PROXYREQ_NONE) {
+ proxy_date = apr_table_get(r->headers_out, "Date");
+ if (!proxy_date) {
+ /*
+ * proxy_date needs to be const. So use date for the creation of
+ * our own Date header and pass it over to proxy_date later to
+ * avoid a compiler warning.
+ */
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+ server = apr_table_get(r->headers_out, "Server");
+ }
+ else {
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+
+ form_header_field(&h, "Date", proxy_date ? proxy_date : date );
+
+ if (!server && *us)
+ server = us;
+ if (server)
+ form_header_field(&h, "Server", server);
+
+ if (APLOGrtrace3(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "Response sent with status %d%s",
+ r->status,
+ APLOGrtrace4(r) ? ", headers:" : "");
+
+ /*
+ * Date and Server are less interesting, use TRACE5 for them while
+ * using TRACE4 for the other headers.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Date: %s",
+ proxy_date ? proxy_date : date );
+ if (server)
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Server: %s",
+ server);
+ }
+
+
+ /* unset so we don't send them again */
+ apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
+ if (server) {
+ apr_table_unset(r->headers_out, "Server");
+ }
+}
+
+AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
+{
+ const char *protocol = NULL;
+
+ basic_http_header_check(r, &protocol);
+ basic_http_header(r, bb, protocol);
+}
+
+static void terminate_header(apr_bucket_brigade *bb)
+{
+ char crlf[] = CRLF;
+ apr_size_t buflen;
+
+ buflen = strlen(crlf);
+ ap_xlate_proto_to_ascii(crlf, buflen);
+ apr_brigade_write(bb, NULL, NULL, crlf, buflen);
+}
+
+AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
+{
+ core_server_config *conf;
+ int rv;
+ apr_bucket_brigade *bb;
+ header_struct h;
+ apr_bucket *b;
+ int body;
+ char *bodyread = NULL, *bodyoff;
+ apr_size_t bodylen = 0;
+ apr_size_t bodybuf;
+ long res = -1; /* init to avoid gcc -Wall warning */
+
+ if (r->method_number != M_TRACE) {
+ return DECLINED;
+ }
+
+ /* Get the original request */
+ while (r->prev) {
+ r = r->prev;
+ }
+ conf = ap_get_core_module_config(r->server->module_config);
+
+ if (conf->trace_enable == AP_TRACE_DISABLE) {
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE denied by server configuration");
+ return HTTP_METHOD_NOT_ALLOWED;
+ }
+
+ if (conf->trace_enable == AP_TRACE_EXTENDED)
+ /* XXX: should be = REQUEST_CHUNKED_PASS */
+ body = REQUEST_CHUNKED_DECHUNK;
+ else
+ body = REQUEST_NO_BODY;
+
+ if ((rv = ap_setup_client_block(r, body))) {
+ if (rv == HTTP_REQUEST_ENTITY_TOO_LARGE)
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE with a request body is not allowed");
+ return rv;
+ }
+
+ if (ap_should_client_block(r)) {
+
+ if (r->remaining > 0) {
+ if (r->remaining > 65536) {
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k\n");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+ /* always 32 extra bytes to catch chunk header exceptions */
+ bodybuf = (apr_size_t)r->remaining + 32;
+ }
+ else {
+ /* Add an extra 8192 for chunk headers */
+ bodybuf = 73730;
+ }
+
+ bodyoff = bodyread = apr_palloc(r->pool, bodybuf);
+
+ /* only while we have enough for a chunked header */
+ while ((!bodylen || bodybuf >= 32) &&
+ (res = ap_get_client_block(r, bodyoff, bodybuf)) > 0) {
+ bodylen += res;
+ bodybuf -= res;
+ bodyoff += res;
+ }
+ if (res > 0 && bodybuf < 32) {
+ /* discard_rest_of_request_body into our buffer */
+ while (ap_get_client_block(r, bodyread, bodylen) > 0)
+ ;
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k\n");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ if (res < 0) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ ap_set_content_type(r, "message/http");
+
+ /* Now we recreate the request, and echo it back */
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ len = strlen(r->the_request);
+ tmp = apr_pmemdup(r->pool, r->the_request, len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_putstrs(bb, NULL, NULL, tmp, CRLF_ASCII, NULL);
+ }
+#else
+ apr_brigade_putstrs(bb, NULL, NULL, r->the_request, CRLF, NULL);
+#endif
+ h.pool = r->pool;
+ h.bb = bb;
+ apr_table_do((int (*) (void *, const char *, const char *))
+ form_header_field, (void *) &h, r->headers_in, NULL);
+ apr_brigade_puts(bb, NULL, NULL, CRLF_ASCII);
+
+ /* If configured to accept a body, echo the body */
+ if (bodylen) {
+ b = apr_bucket_pool_create(bodyread, bodylen,
+ r->pool, bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+
+ ap_pass_brigade(r->output_filters, bb);
+
+ return DONE;
+}
+
+typedef struct header_filter_ctx {
+ int headers_sent;
+} header_filter_ctx;
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ const char *clheader;
+ int header_only = (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status));
+ const char *protocol = NULL;
+ apr_bucket *e;
+ apr_bucket_brigade *b2;
+ header_struct h;
+ header_filter_ctx *ctx = f->ctx;
+ const char *ctype;
+ ap_bucket_error *eb = NULL;
+ apr_status_t rv = APR_SUCCESS;
+ int recursive_error = 0;
+
+ AP_DEBUG_ASSERT(!r->main);
+
+ if (!ctx) {
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
+ }
+ else if (ctx->headers_sent) {
+ /* Eat body if response must not have one. */
+ if (header_only) {
+ /* Still next filters may be waiting for EOS, so pass it (alone)
+ * when encountered and be done with this filter.
+ */
+ e = APR_BRIGADE_LAST(b);
+ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) {
+ APR_BUCKET_REMOVE(e);
+ apr_brigade_cleanup(b);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+ }
+ apr_brigade_cleanup(b);
+ return rv;
+ }
+ }
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e) && !eb) {
+ eb = e->data;
+ continue;
+ }
+ /*
+ * If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ if (AP_BUCKET_IS_EOC(e)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+ }
+ }
+
+ if (!ctx->headers_sent && !check_headers(r)) {
+ /* We may come back here from ap_die() below,
+ * so clear anything from this response.
+ */
+ apr_table_clear(r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ apr_brigade_cleanup(b);
+
+ /* Don't recall ap_die() if we come back here (from its own internal
+ * redirect or error response), otherwise we can end up in infinite
+ * recursion; better fall through with 500, minimal headers and an
+ * empty body (EOS only).
+ */
+ if (!check_headers_recursion(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return AP_FILTER_ERROR;
+ }
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ e = ap_bucket_eoc_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ ap_set_content_length(r, 0);
+ recursive_error = 1;
+ }
+ else if (eb) {
+ int status;
+ status = eb->status;
+ apr_brigade_cleanup(b);
+ ap_die(status, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (r->assbackwards) {
+ r->sent_bodyct = 1;
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+ goto out;
+ }
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ *
+ * Note: the force-response-1.0 should come before the call to
+ * basic_http_header_check()
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fixup_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ basic_http_header_check(r, &protocol);
+ ap_set_keepalive(r);
+
+ if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
+ }
+ else if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!ap_cstr_casecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cacheable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ b2 = apr_brigade_create(r->pool, c->bucket_alloc);
+ basic_http_header(r, b2, protocol);
+
+ h.pool = r->pool;
+ h.bb = b2;
+
+ send_all_header_fields(&h, r);
+
+ terminate_header(b2);
+
+ if (header_only) {
+ e = APR_BRIGADE_LAST(b);
+ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(b2, e);
+ ap_remove_output_filter(f);
+ }
+ apr_brigade_cleanup(b);
+ }
+
+ rv = ap_pass_brigade(f->next, b2);
+ apr_brigade_cleanup(b2);
+ ctx->headers_sent = 1;
+
+ if (rv != APR_SUCCESS || header_only) {
+ goto out;
+ }
+
+ r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
+
+ if (r->chunked) {
+ /* We can't add this filter until we have already sent the headers.
+ * If we add it before this point, then the headers will be chunked
+ * as well, and that is just wrong.
+ */
+ ap_add_output_filter("CHUNK", NULL, r, r->connection);
+ }
+
+ /* Don't remove this filter until after we have added the CHUNK filter.
+ * Otherwise, f->next won't be the CHUNK filter and thus the first
+ * brigade won't be chunked properly.
+ */
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+out:
+ if (recursive_error) {
+ return AP_FILTER_ERROR;
+ }
+ return rv;
+}
+
+/*
+ * Map specific APR codes returned by the filter stack to HTTP error
+ * codes, or the default status code provided. Use it as follows:
+ *
+ * return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ *
+ * If the filter has already handled the error, AP_FILTER_ERROR will
+ * be returned, which is cleanly passed through.
+ *
+ * These mappings imply that the filter stack is reading from the
+ * downstream client, the proxy will map these codes differently.
+ */
+AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status)
+{
+ switch (rv) {
+ case AP_FILTER_ERROR:
+ return AP_FILTER_ERROR;
+
+ case APR_ENOSPC:
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+
+ case APR_ENOTIMPL:
+ return HTTP_NOT_IMPLEMENTED;
+
+ case APR_TIMEUP:
+ case APR_ETIMEDOUT:
+ return HTTP_REQUEST_TIME_OUT;
+
+ default:
+ return status;
+ }
+}
+
+/* In HTTP/1.1, any method can have a body. However, most GET handlers
+ * wouldn't know what to do with a request body if they received one.
+ * This helper routine tests for and reads any message body in the request,
+ * simply discarding whatever it receives. We need to do this because
+ * failing to read the request body would cause it to be interpreted
+ * as the next request on a persistent connection.
+ *
+ * Since we return an error status if the request is malformed, this
+ * routine should be called at the beginning of a no-body handler, e.g.,
+ *
+ * if ((retval = ap_discard_request_body(r)) != OK) {
+ * return retval;
+ * }
+ */
+AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+{
+ int rc = OK;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+
+ /* Sometimes we'll get in a state where the input handling has
+ * detected an error where we want to drop the connection, so if
+ * that's the case, don't read the data as that is what we're trying
+ * to avoid.
+ *
+ * This function is also a no-op on a subrequest.
+ */
+ if (r->main || c->keepalive == AP_CONN_CLOSE) {
+ return OK;
+ }
+ if (ap_status_drops_connection(r->status)) {
+ c->keepalive = AP_CONN_CLOSE;
+ return OK;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ for (;;) {
+ apr_status_t rv;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+ if (rv != APR_SUCCESS) {
+ rc = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ goto cleanup;
+ }
+
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_EOS(b)) {
+ goto cleanup;
+ }
+
+ /* There is no need to read empty or metadata buckets or
+ * buckets of known length, but we MUST read buckets of
+ * unknown length in order to exhaust them.
+ */
+ if (b->length == (apr_size_t)-1) {
+ apr_size_t len;
+ const char *data;
+
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ rc = HTTP_BAD_REQUEST;
+ goto cleanup;
+ }
+ }
+
+ apr_bucket_delete(b);
+ }
+ }
+
+cleanup:
+ apr_brigade_cleanup(bb);
+ if (rc != OK) {
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ return rc;
+}
+
+/* Here we deal with getting the request message body from the client.
+ * Whether or not the request contains a body is signaled by the presence
+ * of a non-zero Content-Length or by a Transfer-Encoding: chunked.
+ *
+ * Note that this is more complicated than it was in Apache 1.1 and prior
+ * versions, because chunked support means that the module does less.
+ *
+ * The proper procedure is this:
+ *
+ * 1. Call ap_setup_client_block() near the beginning of the request
+ * handler. This will set up all the necessary properties, and will
+ * return either OK, or an error code. If the latter, the module should
+ * return that error code. The second parameter selects the policy to
+ * apply if the request message indicates a body, and how a chunked
+ * transfer-coding should be interpreted. Choose one of
+ *
+ * REQUEST_NO_BODY Send 413 error if message has any body
+ * REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
+ * REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
+ * REQUEST_CHUNKED_PASS If chunked, pass the chunk headers with body.
+ *
+ * In order to use the last two options, the caller MUST provide a buffer
+ * large enough to hold a chunk-size line, including any extensions.
+ *
+ * 2. When you are ready to read a body (if any), call ap_should_client_block().
+ * This will tell the module whether or not to read input. If it is 0,
+ * the module should assume that there is no message body to read.
+ *
+ * 3. Finally, call ap_get_client_block in a loop. Pass it a buffer and its size.
+ * It will put data into the buffer (not necessarily a full buffer), and
+ * return the length of the input block. When it is done reading, it will
+ * return 0 if EOF, or -1 if there was an error.
+ * If an error occurs on input, we force an end to keepalive.
+ *
+ * This step also sends a 100 Continue response to HTTP/1.1 clients if appropriate.
+ */
+
+AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
+{
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+ apr_off_t limit_req_body = ap_get_limit_req_body(r);
+
+ r->read_body = read_policy;
+ r->read_chunked = 0;
+ r->remaining = 0;
+
+ if (tenc) {
+ if (ap_cstr_casecmp(tenc, "chunked")) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592)
+ "Unknown Transfer-Encoding %s", tenc);
+ return HTTP_NOT_IMPLEMENTED;
+ }
+ if (r->read_body == REQUEST_CHUNKED_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01593)
+ "chunked Transfer-Encoding forbidden: %s", r->uri);
+ return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
+ }
+
+ r->read_chunked = 1;
+ }
+ else if (lenp) {
+ if (!ap_parse_strict_length(&r->remaining, lenp)) {
+ r->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01594)
+ "Invalid Content-Length '%s'", lenp);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ if ((r->read_body == REQUEST_NO_BODY)
+ && (r->read_chunked || (r->remaining > 0))) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01595)
+ "%s with body is not allowed for %s", r->method, r->uri);
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ if (limit_req_body > 0 && (r->remaining > limit_req_body)) {
+ /* will be logged when the body is discarded */
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+#ifdef AP_DEBUG
+ {
+ /* Make sure ap_getline() didn't leave any droppings. */
+ core_request_config *req_cfg =
+ (core_request_config *)ap_get_core_module_config(r->request_config);
+ AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
+ }
+#endif
+
+ return OK;
+}
+
+AP_DECLARE(int) ap_should_client_block(request_rec *r)
+{
+ /* First check if we have already read the request body */
+
+ if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* get_client_block is called in a loop to get the request message body.
+ * This is quite simple if the client includes a content-length
+ * (the normal case), but gets messy if the body is chunked. Note that
+ * r->remaining is used to maintain state across calls and that
+ * r->read_length is the total number of bytes given to the caller
+ * across all invocations. It is messy because we have to be careful not
+ * to read past the data provided by the client, since these reads block.
+ * Returns 0 on End-of-body, -1 on error or premature chunk end.
+ *
+ */
+AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
+ apr_size_t bufsiz)
+{
+ apr_status_t rv;
+ apr_bucket_brigade *bb;
+
+ if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
+ return 0;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ if (bb == NULL) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ return -1;
+ }
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, bufsiz);
+
+ /* We lose the failure code here. This is why ap_get_client_block should
+ * not be used.
+ */
+ if (rv == AP_FILTER_ERROR) {
+ /* AP_FILTER_ERROR means a filter has responded already,
+ * we are DONE.
+ */
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+ if (rv != APR_SUCCESS) {
+ /* if we actually fail here, we want to just return and
+ * stop trying to read data from the client.
+ */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* If this fails, it means that a filter is written incorrectly and that
+ * it needs to learn how to properly handle APR_BLOCK_READ requests by
+ * returning data when requested.
+ */
+ AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
+
+ /* Check to see if EOS in the brigade.
+ *
+ * If so, we have to leave a nugget for the *next* ap_get_client_block
+ * call to return 0.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (r->read_chunked) {
+ r->remaining = -1;
+ }
+ else {
+ r->remaining = 0;
+ }
+ }
+
+ rv = apr_brigade_flatten(bb, buffer, &bufsiz);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* XXX yank me? */
+ r->read_length += bufsiz;
+
+ apr_brigade_destroy(bb);
+ return bufsiz;
+}
+
+/* Context struct for ap_http_outerror_filter */
+typedef struct {
+ int seen_eoc;
+ int first_error;
+} outerror_filter_ctx_t;
+
+/* Filter to handle any error buckets on output */
+apr_status_t ap_http_outerror_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ outerror_filter_ctx_t *ctx = (outerror_filter_ctx_t *)(f->ctx);
+ apr_bucket *e;
+
+ /* Create context if none is present */
+ if (!ctx) {
+ ctx = apr_pcalloc(r->pool, sizeof(outerror_filter_ctx_t));
+ f->ctx = ctx;
+ }
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e)) {
+ /*
+ * Start of error handling state tree. Just one condition
+ * right now :)
+ */
+ if (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY) {
+ /* stream aborted and we have not ended it yet */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ /*
+ * Memorize the status code of the first error bucket for possible
+ * later use.
+ */
+ if (!ctx->first_error) {
+ ctx->first_error = ((ap_bucket_error *)(e->data))->status;
+ }
+ continue;
+ }
+ /* Detect EOC buckets and memorize this in the context. */
+ if (AP_BUCKET_IS_EOC(e)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ ctx->seen_eoc = 1;
+ }
+ }
+ /*
+ * Remove all data buckets that are in a brigade after an EOC bucket
+ * was seen, as an EOC bucket tells us that no (further) resource
+ * and protocol data should go out to the client. OTOH meta buckets
+ * are still welcome as they might trigger needed actions down in
+ * the chain (e.g. in network filters like SSL).
+ * Remark 1: It is needed to dump ALL data buckets in the brigade
+ * since an filter in between might have inserted data
+ * buckets BEFORE the EOC bucket sent by the original
+ * sender and we do NOT want this data to be sent.
+ * Remark 2: Dumping all data buckets here does not necessarily mean
+ * that no further data is send to the client as:
+ * 1. Network filters like SSL can still be triggered via
+ * meta buckets to talk with the client e.g. for a
+ * clean shutdown.
+ * 2. There could be still data that was buffered before
+ * down in the chain that gets flushed by a FLUSH or an
+ * EOS bucket.
+ */
+ if (ctx->seen_eoc) {
+ /*
+ * Set the request status to the status of the first error bucket.
+ * This should ensure that we log an appropriate status code in
+ * the access log.
+ * We need to set r->status on each call after we noticed an EOC as
+ * data bucket generators like ap_die might have changed the status
+ * code. But we know better in this case and insist on the status
+ * code that we have seen in the error bucket.
+ */
+ if (ctx->first_error) {
+ r->status = ctx->first_error;
+ }
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ APR_BUCKET_REMOVE(e);
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, b);
+}
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
new file mode 100644
index 0000000..d031f24
--- /dev/null
+++ b/modules/http/http_protocol.c
@@ -0,0 +1,1671 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_protocol.c --- routines which directly communicate with the client.
+ *
+ * Code originally by Rob McCool; much redone by Robert S. Thau
+ * and the Apache Software Foundation.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+#include "ap_mpm.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+APLOG_USE_MODULE(http);
+
+/* New Apache routine to map status codes into array indices
+ * e.g. 100 -> 0, 101 -> 1, 200 -> 2 ...
+ * The number of status lines must equal the value of
+ * RESPONSE_CODES (httpd.h) and must be listed in order.
+ * No gaps are allowed between X00 and the largest Xnn
+ * for any X (see ap_index_of_response).
+ * When adding a new code here, add a define to httpd.h
+ * as well.
+ */
+
+static const char * const status_lines[RESPONSE_CODES] =
+{
+ "100 Continue",
+ "101 Switching Protocols",
+ "102 Processing",
+#define LEVEL_200 3
+ "200 OK",
+ "201 Created",
+ "202 Accepted",
+ "203 Non-Authoritative Information",
+ "204 No Content",
+ "205 Reset Content",
+ "206 Partial Content",
+ "207 Multi-Status",
+ "208 Already Reported",
+ NULL, /* 209 */
+ NULL, /* 210 */
+ NULL, /* 211 */
+ NULL, /* 212 */
+ NULL, /* 213 */
+ NULL, /* 214 */
+ NULL, /* 215 */
+ NULL, /* 216 */
+ NULL, /* 217 */
+ NULL, /* 218 */
+ NULL, /* 219 */
+ NULL, /* 220 */
+ NULL, /* 221 */
+ NULL, /* 222 */
+ NULL, /* 223 */
+ NULL, /* 224 */
+ NULL, /* 225 */
+ "226 IM Used",
+#define LEVEL_300 30
+ "300 Multiple Choices",
+ "301 Moved Permanently",
+ "302 Found",
+ "303 See Other",
+ "304 Not Modified",
+ "305 Use Proxy",
+ NULL, /* 306 */
+ "307 Temporary Redirect",
+ "308 Permanent Redirect",
+#define LEVEL_400 39
+ "400 Bad Request",
+ "401 Unauthorized",
+ "402 Payment Required",
+ "403 Forbidden",
+ "404 Not Found",
+ "405 Method Not Allowed",
+ "406 Not Acceptable",
+ "407 Proxy Authentication Required",
+ "408 Request Timeout",
+ "409 Conflict",
+ "410 Gone",
+ "411 Length Required",
+ "412 Precondition Failed",
+ "413 Request Entity Too Large",
+ "414 Request-URI Too Long",
+ "415 Unsupported Media Type",
+ "416 Requested Range Not Satisfiable",
+ "417 Expectation Failed",
+ NULL, /* 418 */
+ NULL, /* 419 */
+ NULL, /* 420 */
+ "421 Misdirected Request",
+ "422 Unprocessable Entity",
+ "423 Locked",
+ "424 Failed Dependency",
+ NULL, /* 425 */
+ "426 Upgrade Required",
+ NULL, /* 427 */
+ "428 Precondition Required",
+ "429 Too Many Requests",
+ NULL, /* 430 */
+ "431 Request Header Fields Too Large",
+ NULL, /* 432 */
+ NULL, /* 433 */
+ NULL, /* 434 */
+ NULL, /* 435 */
+ NULL, /* 436 */
+ NULL, /* 437 */
+ NULL, /* 438 */
+ NULL, /* 439 */
+ NULL, /* 440 */
+ NULL, /* 441 */
+ NULL, /* 442 */
+ NULL, /* 443 */
+ NULL, /* 444 */
+ NULL, /* 445 */
+ NULL, /* 446 */
+ NULL, /* 447 */
+ NULL, /* 448 */
+ NULL, /* 449 */
+ NULL, /* 450 */
+ "451 Unavailable For Legal Reasons",
+#define LEVEL_500 91
+ "500 Internal Server Error",
+ "501 Not Implemented",
+ "502 Bad Gateway",
+ "503 Service Unavailable",
+ "504 Gateway Timeout",
+ "505 HTTP Version Not Supported",
+ "506 Variant Also Negotiates",
+ "507 Insufficient Storage",
+ "508 Loop Detected",
+ NULL, /* 509 */
+ "510 Not Extended",
+ "511 Network Authentication Required"
+};
+
+APR_HOOK_STRUCT(
+ APR_HOOK_LINK(insert_error_filter)
+)
+
+AP_IMPLEMENT_HOOK_VOID(insert_error_filter, (request_rec *r), (r))
+
+/* The index of the first bit field that is used to index into a limit
+ * bitmask. M_INVALID + 1 to METHOD_NUMBER_LAST.
+ */
+#define METHOD_NUMBER_FIRST (M_INVALID + 1)
+
+/* The max method number. Method numbers are used to shift bitmasks,
+ * so this cannot exceed 63, and all bits high is equal to -1, which is a
+ * special flag, so the last bit used has index 62.
+ */
+#define METHOD_NUMBER_LAST 62
+
+static int is_mpm_running(void)
+{
+ int mpm_state = 0;
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ return 0;
+ }
+
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
+AP_DECLARE(int) ap_set_keepalive(request_rec *r)
+{
+ int ka_sent = 0;
+ int left = r->server->keep_alive_max - r->connection->keepalives;
+ int wimpy = ap_find_token(r->pool,
+ apr_table_get(r->headers_out, "Connection"),
+ "close");
+ const char *conn = apr_table_get(r->headers_in, "Connection");
+
+ /* The following convoluted conditional determines whether or not
+ * the current connection should remain persistent after this response
+ * (a.k.a. HTTP Keep-Alive) and whether or not the output message
+ * body should use the HTTP/1.1 chunked transfer-coding. In English,
+ *
+ * IF we have not marked this connection as errored;
+ * and the client isn't expecting 100-continue (PR47087 - more
+ * input here could be the client continuing when we're
+ * closing the request).
+ * and the response body has a defined length due to the status code
+ * being 304 or 204, the request method being HEAD, already
+ * having defined Content-Length or Transfer-Encoding: chunked, or
+ * the request version being HTTP/1.1 and thus capable of being set
+ * as chunked [we know the (r->chunked = 1) side-effect is ugly];
+ * and the server configuration enables keep-alive;
+ * and the server configuration has a reasonable inter-request timeout;
+ * and there is no maximum # requests or the max hasn't been reached;
+ * and the response status does not require a close;
+ * and the response generator has not already indicated close;
+ * and the client did not request non-persistence (Connection: close);
+ * and we haven't been configured to ignore the buggy twit
+ * or they're a buggy twit coming through a HTTP/1.1 proxy
+ * and the client is requesting an HTTP/1.0-style keep-alive
+ * or the client claims to be HTTP/1.1 compliant (perhaps a proxy);
+ * and this MPM process is not already exiting
+ * THEN we can be persistent, which requires more headers be output.
+ *
+ * Note that the condition evaluation order is extremely important.
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && !r->expecting_100
+ && (r->header_only
+ || AP_STATUS_IS_HEADER_ONLY(r->status)
+ || apr_table_get(r->headers_out, "Content-Length")
+ || ap_is_chunked(r->pool,
+ apr_table_get(r->headers_out,
+ "Transfer-Encoding"))
+ || ((r->proto_num >= HTTP_VERSION(1,1))
+ && (r->chunked = 1))) /* THIS CODE IS CORRECT, see above. */
+ && r->server->keep_alive
+ && (r->server->keep_alive_timeout > 0)
+ && ((r->server->keep_alive_max == 0)
+ || (left > 0))
+ && !ap_status_drops_connection(r->status)
+ && !wimpy
+ && !ap_find_token(r->pool, conn, "close")
+ && (!apr_table_get(r->subprocess_env, "nokeepalive")
+ || apr_table_get(r->headers_in, "Via"))
+ && ((ka_sent = ap_find_token(r->pool, conn, "keep-alive"))
+ || (r->proto_num >= HTTP_VERSION(1,1)))
+ && is_mpm_running()) {
+
+ r->connection->keepalive = AP_CONN_KEEPALIVE;
+ r->connection->keepalives++;
+
+ /* If they sent a Keep-Alive token, send one back */
+ if (ka_sent) {
+ if (r->server->keep_alive_max) {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d, max=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout),
+ left));
+ }
+ else {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout)));
+ }
+ apr_table_mergen(r->headers_out, "Connection", "Keep-Alive");
+ }
+
+ return 1;
+ }
+
+ /* Otherwise, we need to indicate that we will be closing this
+ * connection immediately after the current response.
+ *
+ * We only really need to send "close" to HTTP/1.1 clients, but we
+ * always send it anyway, because a broken proxy may identify itself
+ * as HTTP/1.0, but pass our request along with our HTTP/1.1 tag
+ * to a HTTP/1.1 client. Better safe than sorry.
+ */
+ if (!wimpy) {
+ apr_table_mergen(r->headers_out, "Connection", "close");
+ }
+
+ /*
+ * If we had previously been a keepalive connection and this
+ * is the last one, then bump up the number of keepalives
+ * we've had
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && r->server->keep_alive_max
+ && !left) {
+ r->connection->keepalives++;
+ }
+ r->connection->keepalive = AP_CONN_CLOSE;
+
+ return 0;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_match, *etag;
+
+ /* A server MUST use the strong comparison function (see section 13.3.3)
+ * to compare the entity tags in If-Match.
+ */
+ if ((if_match = apr_table_get(r->headers_in, "If-Match")) != NULL) {
+ if (if_match[0] == '*'
+ || ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_match, etag))) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_unmodified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_unmodified;
+
+ if_unmodified = apr_table_get(r->headers_in, "If-Unmodified-Since");
+ if (if_unmodified) {
+ apr_int64_t mtime, reqtime;
+
+ apr_time_t ius = apr_time_sec(apr_date_parse_http(if_unmodified));
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ if ((ius != APR_DATE_BAD) && (mtime > ius)) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_none_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_nonematch, *etag;
+
+ if_nonematch = apr_table_get(r->headers_in, "If-None-Match");
+ if (if_nonematch != NULL) {
+
+ if (if_nonematch[0] == '*') {
+ return AP_CONDITION_STRONG;
+ }
+
+ /* See section 13.3.3 for rules on how to determine if two entities tags
+ * match. The weak comparison function can only be used with GET or HEAD
+ * requests.
+ */
+ if (r->method_number == M_GET) {
+ if ((etag = apr_table_get(headers, "ETag")) != NULL) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ if (ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ if (ap_find_etag_weak(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ }
+ }
+
+ else if ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ return AP_CONDITION_NOMATCH;
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_modified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_modified_since;
+
+ if ((if_modified_since = apr_table_get(r->headers_in, "If-Modified-Since"))
+ != NULL) {
+ apr_int64_t mtime;
+ apr_int64_t ims, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ ims = apr_time_sec(apr_date_parse_http(if_modified_since));
+
+ if (ims >= mtime && ims <= reqtime) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_range(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_range, *etag;
+
+ if ((if_range = apr_table_get(r->headers_in, "If-Range"))
+ && apr_table_get(r->headers_in, "Range")) {
+ if (if_range[0] == '"') {
+
+ if ((etag = apr_table_get(headers, "ETag"))
+ && !strcmp(if_range, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+
+ }
+ else {
+ apr_int64_t mtime;
+ apr_int64_t rtime, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ rtime = apr_time_sec(apr_date_parse_http(if_range));
+
+ if (rtime == mtime) {
+ if (reqtime < mtime + 60) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(int) ap_meets_conditions(request_rec *r)
+{
+ int not_modified = -1; /* unset by default */
+ ap_condition_e cond;
+
+ /* Check for conditional requests --- note that we only want to do
+ * this if we are successful so far and we are not processing a
+ * subrequest or an ErrorDocument.
+ *
+ * The order of the checks is important, since ETag checks are supposed
+ * to be more accurate than checks relative to the modification time.
+ * However, not all documents are guaranteed to *have* ETags, and some
+ * might have Last-Modified values w/o ETags, so this gets a little
+ * complicated.
+ */
+
+ if (!ap_is_HTTP_SUCCESS(r->status) || r->no_local_copy) {
+ return OK;
+ }
+
+ /* If an If-Match request-header field was given
+ * AND the field value is not "*" (meaning match anything)
+ * AND if our strong ETag does not match any entity tag in that field,
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+
+ /* Else if a valid If-Unmodified-Since request-header field was given
+ * AND the requested resource has been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_unmodified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+
+ /* If an If-None-Match request-header field was given
+ * AND the field value is "*" (meaning match anything)
+ * OR our ETag matches any of the entity tags in that field, fail.
+ *
+ * If the request method was GET or HEAD, failure means the server
+ * SHOULD respond with a 304 (Not Modified) response.
+ * For all other request methods, failure means the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ *
+ * GET or HEAD allow weak etag comparison, all other methods require
+ * strong comparison. We can only use weak if it's not a range request.
+ */
+ cond = ap_condition_if_none_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ else {
+ return HTTP_PRECONDITION_FAILED;
+ }
+ }
+
+ /* If a valid If-Modified-Since request-header field was given
+ * AND it is a GET or HEAD request
+ * AND the requested resource has not been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 304 (Not Modified).
+ * A date later than the server's current request time is invalid.
+ */
+ cond = ap_condition_if_modified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ }
+
+ /* If an If-Range and an Range header is present, we must return
+ * 200 OK. The byterange filter will convert it to a range response.
+ */
+ cond = ap_condition_if_range(r, r->headers_out);
+ if (cond > AP_CONDITION_NONE) {
+ return OK;
+ }
+
+ if (not_modified == 1) {
+ return HTTP_NOT_MODIFIED;
+ }
+
+ return OK;
+}
+
+/**
+ * Singleton registry of additional methods. This maps new method names
+ * such as "MYGET" to methnums, which are int offsets into bitmasks.
+ *
+ * This follows the same technique as standard M_GET, M_POST, etc. These
+ * are dynamically assigned when modules are loaded and <Limit GET MYGET>
+ * directives are processed.
+ */
+static apr_hash_t *methods_registry = NULL;
+static int cur_method_number = METHOD_NUMBER_FIRST;
+
+/* internal function to register one method/number pair */
+static void register_one_method(apr_pool_t *p, const char *methname,
+ int methnum)
+{
+ int *pnum = apr_palloc(p, sizeof(*pnum));
+
+ *pnum = methnum;
+ apr_hash_set(methods_registry, methname, APR_HASH_KEY_STRING, pnum);
+}
+
+/* This internal function is used to clear the method registry
+ * and reset the cur_method_number counter.
+ */
+static apr_status_t ap_method_registry_destroy(void *notused)
+{
+ methods_registry = NULL;
+ cur_method_number = METHOD_NUMBER_FIRST;
+ return APR_SUCCESS;
+}
+
+AP_DECLARE(void) ap_method_registry_init(apr_pool_t *p)
+{
+ methods_registry = apr_hash_make(p);
+ apr_pool_cleanup_register(p, NULL,
+ ap_method_registry_destroy,
+ apr_pool_cleanup_null);
+
+ /* put all the standard methods into the registry hash to ease the
+ * mapping operations between name and number
+ * HEAD is a special-instance of the GET method and shares the same ID
+ */
+ register_one_method(p, "GET", M_GET);
+ register_one_method(p, "HEAD", M_GET);
+ register_one_method(p, "PUT", M_PUT);
+ register_one_method(p, "POST", M_POST);
+ register_one_method(p, "DELETE", M_DELETE);
+ register_one_method(p, "CONNECT", M_CONNECT);
+ register_one_method(p, "OPTIONS", M_OPTIONS);
+ register_one_method(p, "TRACE", M_TRACE);
+ register_one_method(p, "PATCH", M_PATCH);
+ register_one_method(p, "PROPFIND", M_PROPFIND);
+ register_one_method(p, "PROPPATCH", M_PROPPATCH);
+ register_one_method(p, "MKCOL", M_MKCOL);
+ register_one_method(p, "COPY", M_COPY);
+ register_one_method(p, "MOVE", M_MOVE);
+ register_one_method(p, "LOCK", M_LOCK);
+ register_one_method(p, "UNLOCK", M_UNLOCK);
+ register_one_method(p, "VERSION-CONTROL", M_VERSION_CONTROL);
+ register_one_method(p, "CHECKOUT", M_CHECKOUT);
+ register_one_method(p, "UNCHECKOUT", M_UNCHECKOUT);
+ register_one_method(p, "CHECKIN", M_CHECKIN);
+ register_one_method(p, "UPDATE", M_UPDATE);
+ register_one_method(p, "LABEL", M_LABEL);
+ register_one_method(p, "REPORT", M_REPORT);
+ register_one_method(p, "MKWORKSPACE", M_MKWORKSPACE);
+ register_one_method(p, "MKACTIVITY", M_MKACTIVITY);
+ register_one_method(p, "BASELINE-CONTROL", M_BASELINE_CONTROL);
+ register_one_method(p, "MERGE", M_MERGE);
+}
+
+AP_DECLARE(int) ap_method_register(apr_pool_t *p, const char *methname)
+{
+ int *methnum;
+
+ if (methods_registry == NULL) {
+ ap_method_registry_init(p);
+ }
+
+ if (methname == NULL) {
+ return M_INVALID;
+ }
+
+ /* Check if the method was previously registered. If it was
+ * return the associated method number.
+ */
+ methnum = (int *)apr_hash_get(methods_registry, methname,
+ APR_HASH_KEY_STRING);
+ if (methnum != NULL)
+ return *methnum;
+
+ if (cur_method_number > METHOD_NUMBER_LAST) {
+ /* The method registry has run out of dynamically
+ * assignable method numbers. Log this and return M_INVALID.
+ */
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p, APLOGNO(01610)
+ "Maximum new request methods %d reached while "
+ "registering method %s.",
+ METHOD_NUMBER_LAST, methname);
+ return M_INVALID;
+ }
+
+ register_one_method(p, methname, cur_method_number);
+ return cur_method_number++;
+}
+
+#define UNKNOWN_METHOD (-1)
+
+static int lookup_builtin_method(const char *method, apr_size_t len)
+{
+ /* Note: the following code was generated by the "shilka" tool from
+ the "cocom" parsing/compilation toolkit. It is an optimized lookup
+ based on analysis of the input keywords. Postprocessing was done
+ on the shilka output, but the basic structure and analysis is
+ from there. Should new HTTP methods be added, then manual insertion
+ into this code is fine, or simply re-running the shilka tool on
+ the appropriate input. */
+
+ /* Note: it is also quite reasonable to just use our method_registry,
+ but I'm assuming (probably incorrectly) we want more speed here
+ (based on the optimizations the previous code was doing). */
+
+ switch (len)
+ {
+ case 3:
+ switch (method[0])
+ {
+ case 'P':
+ return (method[1] == 'U'
+ && method[2] == 'T'
+ ? M_PUT : UNKNOWN_METHOD);
+ case 'G':
+ return (method[1] == 'E'
+ && method[2] == 'T'
+ ? M_GET : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 4:
+ switch (method[0])
+ {
+ case 'H':
+ return (method[1] == 'E'
+ && method[2] == 'A'
+ && method[3] == 'D'
+ ? M_GET : UNKNOWN_METHOD);
+ case 'P':
+ return (method[1] == 'O'
+ && method[2] == 'S'
+ && method[3] == 'T'
+ ? M_POST : UNKNOWN_METHOD);
+ case 'M':
+ return (method[1] == 'O'
+ && method[2] == 'V'
+ && method[3] == 'E'
+ ? M_MOVE : UNKNOWN_METHOD);
+ case 'L':
+ return (method[1] == 'O'
+ && method[2] == 'C'
+ && method[3] == 'K'
+ ? M_LOCK : UNKNOWN_METHOD);
+ case 'C':
+ return (method[1] == 'O'
+ && method[2] == 'P'
+ && method[3] == 'Y'
+ ? M_COPY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 5:
+ switch (method[2])
+ {
+ case 'T':
+ return (memcmp(method, "PATCH", 5) == 0
+ ? M_PATCH : UNKNOWN_METHOD);
+ case 'R':
+ return (memcmp(method, "MERGE", 5) == 0
+ ? M_MERGE : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "MKCOL", 5) == 0
+ ? M_MKCOL : UNKNOWN_METHOD);
+ case 'B':
+ return (memcmp(method, "LABEL", 5) == 0
+ ? M_LABEL : UNKNOWN_METHOD);
+ case 'A':
+ return (memcmp(method, "TRACE", 5) == 0
+ ? M_TRACE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 6:
+ switch (method[0])
+ {
+ case 'U':
+ switch (method[5])
+ {
+ case 'K':
+ return (memcmp(method, "UNLOCK", 6) == 0
+ ? M_UNLOCK : UNKNOWN_METHOD);
+ case 'E':
+ return (memcmp(method, "UPDATE", 6) == 0
+ ? M_UPDATE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+ case 'R':
+ return (memcmp(method, "REPORT", 6) == 0
+ ? M_REPORT : UNKNOWN_METHOD);
+ case 'D':
+ return (memcmp(method, "DELETE", 6) == 0
+ ? M_DELETE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 7:
+ switch (method[1])
+ {
+ case 'P':
+ return (memcmp(method, "OPTIONS", 7) == 0
+ ? M_OPTIONS : UNKNOWN_METHOD);
+ case 'O':
+ return (memcmp(method, "CONNECT", 7) == 0
+ ? M_CONNECT : UNKNOWN_METHOD);
+ case 'H':
+ return (memcmp(method, "CHECKIN", 7) == 0
+ ? M_CHECKIN : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 8:
+ switch (method[0])
+ {
+ case 'P':
+ return (memcmp(method, "PROPFIND", 8) == 0
+ ? M_PROPFIND : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "CHECKOUT", 8) == 0
+ ? M_CHECKOUT : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 9:
+ return (memcmp(method, "PROPPATCH", 9) == 0
+ ? M_PROPPATCH : UNKNOWN_METHOD);
+
+ case 10:
+ switch (method[0])
+ {
+ case 'U':
+ return (memcmp(method, "UNCHECKOUT", 10) == 0
+ ? M_UNCHECKOUT : UNKNOWN_METHOD);
+ case 'M':
+ return (memcmp(method, "MKACTIVITY", 10) == 0
+ ? M_MKACTIVITY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 11:
+ return (memcmp(method, "MKWORKSPACE", 11) == 0
+ ? M_MKWORKSPACE : UNKNOWN_METHOD);
+
+ case 15:
+ return (memcmp(method, "VERSION-CONTROL", 15) == 0
+ ? M_VERSION_CONTROL : UNKNOWN_METHOD);
+
+ case 16:
+ return (memcmp(method, "BASELINE-CONTROL", 16) == 0
+ ? M_BASELINE_CONTROL : UNKNOWN_METHOD);
+
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ /* NOTREACHED */
+}
+
+/* Get the method number associated with the given string, assumed to
+ * contain an HTTP method. Returns M_INVALID if not recognized.
+ *
+ * This is the first step toward placing method names in a configurable
+ * list. Hopefully it (and other routines) can eventually be moved to
+ * something like a mod_http_methods.c, complete with config stuff.
+ */
+AP_DECLARE(int) ap_method_number_of(const char *method)
+{
+ int len = strlen(method);
+ int which = lookup_builtin_method(method, len);
+
+ if (which != UNKNOWN_METHOD)
+ return which;
+
+ /* check if the method has been dynamically registered */
+ if (methods_registry != NULL) {
+ int *methnum = apr_hash_get(methods_registry, method, len);
+
+ if (methnum != NULL) {
+ return *methnum;
+ }
+ }
+
+ return M_INVALID;
+}
+
+/*
+ * Turn a known method number into a name.
+ */
+AP_DECLARE(const char *) ap_method_name_of(apr_pool_t *p, int methnum)
+{
+ apr_hash_index_t *hi = apr_hash_first(p, methods_registry);
+
+ /* scan through the hash table, looking for a value that matches
+ the provided method number. */
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if (*(int *)val == methnum)
+ return key;
+ }
+
+ /* it wasn't found in the hash */
+ return NULL;
+}
+
+/* The index is found by its offset from the x00 code of each level.
+ * Although this is fast, it will need to be replaced if some nutcase
+ * decides to define a high-numbered code before the lower numbers.
+ * If that sad event occurs, replace the code below with a linear search
+ * from status_lines[shortcut[i]] to status_lines[shortcut[i+1]-1];
+ * or use NULL to fill the gaps.
+ */
+static int index_of_response(int status)
+{
+ static int shortcut[6] = {0, LEVEL_200, LEVEL_300, LEVEL_400, LEVEL_500,
+ RESPONSE_CODES};
+ int i, pos;
+
+ if (status < 100) { /* Below 100 is illegal for HTTP status */
+ return -1;
+ }
+ if (status > 999) { /* Above 999 is also illegal for HTTP status */
+ return -1;
+ }
+
+ for (i = 0; i < 5; i++) {
+ status -= 100;
+ if (status < 100) {
+ pos = (status + shortcut[i]);
+ if (pos < shortcut[i + 1] && status_lines[pos] != NULL) {
+ return pos;
+ }
+ else {
+ break;
+ }
+ }
+ }
+ return -2; /* Status unknown (falls in gap) or above 600 */
+}
+
+AP_DECLARE(int) ap_index_of_response(int status)
+{
+ int index = index_of_response(status);
+ return (index < 0) ? LEVEL_500 : index;
+}
+
+AP_DECLARE(const char *) ap_get_status_line_ex(apr_pool_t *p, int status)
+{
+ int index = index_of_response(status);
+ if (index >= 0) {
+ return status_lines[index];
+ }
+ else if (index == -2) {
+ return apr_psprintf(p, "%i Status %i", status, status);
+ }
+ return status_lines[LEVEL_500];
+}
+
+AP_DECLARE(const char *) ap_get_status_line(int status)
+{
+ return status_lines[ap_index_of_response(status)];
+}
+
+/* Build the Allow field-value from the request handler method mask.
+ */
+static char *make_allow(request_rec *r)
+{
+ apr_int64_t mask;
+ apr_array_header_t *allow = apr_array_make(r->pool, 10, sizeof(char *));
+ apr_hash_index_t *hi = apr_hash_first(r->pool, methods_registry);
+ /* For TRACE below */
+ core_server_config *conf =
+ ap_get_core_module_config(r->server->module_config);
+
+ mask = r->allowed_methods->method_mask;
+
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if ((mask & (AP_METHOD_BIT << *(int *)val)) != 0) {
+ APR_ARRAY_PUSH(allow, const char *) = key;
+ }
+ }
+
+ /* TRACE is tested on a per-server basis */
+ if (conf->trace_enable != AP_TRACE_DISABLE)
+ *(const char **)apr_array_push(allow) = "TRACE";
+
+ /* ### this is rather annoying. we should enforce registration of
+ ### these methods */
+ if ((mask & (AP_METHOD_BIT << M_INVALID))
+ && (r->allowed_methods->method_list != NULL)
+ && (r->allowed_methods->method_list->nelts != 0)) {
+ apr_array_cat(allow, r->allowed_methods->method_list);
+ }
+
+ return apr_array_pstrcat(r->pool, allow, ',');
+}
+
+AP_DECLARE(int) ap_send_http_options(request_rec *r)
+{
+ if (r->assbackwards) {
+ return DECLINED;
+ }
+
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+
+ /* the request finalization will send an EOS, which will flush all
+ * the headers out (including the Allow header)
+ */
+
+ return OK;
+}
+
+AP_DECLARE(void) ap_set_content_type(request_rec *r, const char *ct)
+{
+ if (!ct) {
+ r->content_type = NULL;
+ }
+ else if (!r->content_type || strcmp(r->content_type, ct)) {
+ r->content_type = ct;
+ }
+}
+
+AP_DECLARE(void) ap_set_accept_ranges(request_rec *r)
+{
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ apr_table_setn(r->headers_out, "Accept-Ranges",
+ (d->max_ranges == AP_MAXRANGES_NORANGES) ? "none"
+ : "bytes");
+}
+static const char *add_optional_notes(request_rec *r,
+ const char *prefix,
+ const char *key,
+ const char *suffix)
+{
+ const char *notes, *result;
+
+ if ((notes = apr_table_get(r->notes, key)) == NULL) {
+ result = apr_pstrcat(r->pool, prefix, suffix, NULL);
+ }
+ else {
+ result = apr_pstrcat(r->pool, prefix, notes, suffix, NULL);
+ }
+
+ return result;
+}
+
+/* construct and return the default error message for a given
+ * HTTP defined error code
+ */
+static const char *get_canned_error_string(int status,
+ request_rec *r,
+ const char *location)
+{
+ apr_pool_t *p = r->pool;
+ const char *error_notes, *h1, *s1;
+
+ switch (status) {
+ case HTTP_MOVED_PERMANENTLY:
+ case HTTP_MOVED_TEMPORARILY:
+ case HTTP_TEMPORARY_REDIRECT:
+ case HTTP_PERMANENT_REDIRECT:
+ return(apr_pstrcat(p,
+ "<p>The document has moved <a href=\"",
+ ap_escape_html(r->pool, location),
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_SEE_OTHER:
+ return(apr_pstrcat(p,
+ "<p>The answer to your request is located "
+ "<a href=\"",
+ ap_escape_html(r->pool, location),
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_USE_PROXY:
+ return("<p>This resource is only accessible "
+ "through the proxy\n"
+ "<br />\nYou will need to configure "
+ "your client to use that proxy.</p>\n");
+ case HTTP_PROXY_AUTHENTICATION_REQUIRED:
+ case HTTP_UNAUTHORIZED:
+ return("<p>This server could not verify that you\n"
+ "are authorized to access the document\n"
+ "requested. Either you supplied the wrong\n"
+ "credentials (e.g., bad password), or your\n"
+ "browser doesn't understand how to supply\n"
+ "the credentials required.</p>\n");
+ case HTTP_BAD_REQUEST:
+ return(add_optional_notes(r,
+ "<p>Your browser sent a request that "
+ "this server could not understand.<br />\n",
+ "error-notes",
+ "</p>\n"));
+ case HTTP_FORBIDDEN:
+ return(add_optional_notes(r, "<p>You don't have permission to access this resource.", "error-notes", "</p>\n"));
+ case HTTP_NOT_FOUND:
+ return("<p>The requested URL was not found on this server.</p>\n");
+ case HTTP_METHOD_NOT_ALLOWED:
+ return(apr_pstrcat(p,
+ "<p>The requested method ",
+ ap_escape_html(r->pool, r->method),
+ " is not allowed for this URL.</p>\n",
+ NULL));
+ case HTTP_NOT_ACCEPTABLE:
+ return(add_optional_notes(r,
+ "<p>An appropriate representation of the requested resource "
+ "could not be found on this server.</p>\n",
+ "variant-list", ""));
+ case HTTP_MULTIPLE_CHOICES:
+ return(add_optional_notes(r, "", "variant-list", ""));
+ case HTTP_LENGTH_REQUIRED:
+ s1 = apr_pstrcat(p,
+ "<p>A request of the requested method ",
+ ap_escape_html(r->pool, r->method),
+ " requires a valid Content-length.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_PRECONDITION_FAILED:
+ return("<p>The precondition on the request "
+ "for this URL evaluated to false.</p>\n");
+ case HTTP_NOT_IMPLEMENTED:
+ s1 = apr_pstrcat(p,
+ "<p>",
+ ap_escape_html(r->pool, r->method),
+ " not supported for current URL.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_BAD_GATEWAY:
+ s1 = "<p>The proxy server received an invalid" CRLF
+ "response from an upstream server.<br />" CRLF;
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_VARIANT_ALSO_VARIES:
+ return("<p>A variant for the requested "
+ "resource\n<pre>\n"
+ "\n</pre>\nis itself a negotiable resource. "
+ "This indicates a configuration error.</p>\n");
+ case HTTP_REQUEST_TIME_OUT:
+ return("<p>Server timeout waiting for the HTTP request from the client.</p>\n");
+ case HTTP_GONE:
+ return("<p>The requested resource is no longer available on this server"
+ " and there is no forwarding address.\n"
+ "Please remove all references to this resource.</p>\n");
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return(apr_pstrcat(p,
+ "The requested resource does not allow request data with ",
+ ap_escape_html(r->pool, r->method),
+ " requests, or the amount of data provided in\n"
+ "the request exceeds the capacity limit.\n",
+ NULL));
+ case HTTP_REQUEST_URI_TOO_LARGE:
+ s1 = "<p>The requested URL's length exceeds the capacity\n"
+ "limit for this server.<br />\n";
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_UNSUPPORTED_MEDIA_TYPE:
+ return("<p>The supplied request data is not in a format\n"
+ "acceptable for processing by this resource.</p>\n");
+ case HTTP_RANGE_NOT_SATISFIABLE:
+ return("<p>None of the range-specifier values in the Range\n"
+ "request-header field overlap the current extent\n"
+ "of the selected resource.</p>\n");
+ case HTTP_EXPECTATION_FAILED:
+ s1 = apr_table_get(r->headers_in, "Expect");
+ if (s1)
+ s1 = apr_pstrcat(p,
+ "<p>The expectation given in the Expect request-header\n"
+ "field could not be met by this server.\n"
+ "The client sent<pre>\n Expect: ",
+ ap_escape_html(r->pool, s1), "\n</pre>\n",
+ NULL);
+ else
+ s1 = "<p>No expectation was seen, the Expect request-header \n"
+ "field was not presented by the client.\n";
+ return add_optional_notes(r, s1, "error-notes", "</p>"
+ "<p>Only the 100-continue expectation is supported.</p>\n");
+ case HTTP_UNPROCESSABLE_ENTITY:
+ return("<p>The server understands the media type of the\n"
+ "request entity, but was unable to process the\n"
+ "contained instructions.</p>\n");
+ case HTTP_LOCKED:
+ return("<p>The requested resource is currently locked.\n"
+ "The lock must be released or proper identification\n"
+ "given before the method can be applied.</p>\n");
+ case HTTP_FAILED_DEPENDENCY:
+ return("<p>The method could not be performed on the resource\n"
+ "because the requested action depended on another\n"
+ "action and that other action failed.</p>\n");
+ case HTTP_UPGRADE_REQUIRED:
+ return("<p>The requested resource can only be retrieved\n"
+ "using SSL. The server is willing to upgrade the current\n"
+ "connection to SSL, but your client doesn't support it.\n"
+ "Either upgrade your client, or try requesting the page\n"
+ "using https://\n");
+ case HTTP_PRECONDITION_REQUIRED:
+ return("<p>The request is required to be conditional.</p>\n");
+ case HTTP_TOO_MANY_REQUESTS:
+ return("<p>The user has sent too many requests\n"
+ "in a given amount of time.</p>\n");
+ case HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE:
+ return("<p>The server refused this request because\n"
+ "the request header fields are too large.</p>\n");
+ case HTTP_INSUFFICIENT_STORAGE:
+ return("<p>The method could not be performed on the resource\n"
+ "because the server is unable to store the\n"
+ "representation needed to successfully complete the\n"
+ "request. There is insufficient free space left in\n"
+ "your storage allocation.</p>\n");
+ case HTTP_SERVICE_UNAVAILABLE:
+ return("<p>The server is temporarily unable to service your\n"
+ "request due to maintenance downtime or capacity\n"
+ "problems. Please try again later.</p>\n");
+ case HTTP_GATEWAY_TIME_OUT:
+ return("<p>The gateway did not receive a timely response\n"
+ "from the upstream server or application.</p>\n");
+ case HTTP_LOOP_DETECTED:
+ return("<p>The server terminated an operation because\n"
+ "it encountered an infinite loop.</p>\n");
+ case HTTP_NOT_EXTENDED:
+ return("<p>A mandatory extension policy in the request is not\n"
+ "accepted by the server for this resource.</p>\n");
+ case HTTP_NETWORK_AUTHENTICATION_REQUIRED:
+ return("<p>The client needs to authenticate to gain\n"
+ "network access.</p>\n");
+ case HTTP_MISDIRECTED_REQUEST:
+ return("<p>The client needs a new connection for this\n"
+ "request as the requested host name does not match\n"
+ "the Server Name Indication (SNI) in use for this\n"
+ "connection.</p>\n");
+ case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
+ return(add_optional_notes(r,
+ "<p>Access to this URL has been denied for legal reasons.<br />\n",
+ "error-notes", "</p>\n"));
+ default: /* HTTP_INTERNAL_SERVER_ERROR */
+ /*
+ * This comparison to expose error-notes could be modified to
+ * use a configuration directive and export based on that
+ * directive. For now "*" is used to designate an error-notes
+ * that is totally safe for any user to see (ie lacks paths,
+ * database passwords, etc.)
+ */
+ if (((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL)
+ && (h1 = apr_table_get(r->notes, "verbose-error-to")) != NULL
+ && (strcmp(h1, "*") == 0)) {
+ return(apr_pstrcat(p, error_notes, "<p />\n", NULL));
+ }
+ else {
+ return(apr_pstrcat(p,
+ "<p>The server encountered an internal "
+ "error or\n"
+ "misconfiguration and was unable to complete\n"
+ "your request.</p>\n"
+ "<p>Please contact the server "
+ "administrator at \n ",
+ ap_escape_html(r->pool,
+ r->server->server_admin),
+ " to inform them of the time this "
+ "error occurred,\n"
+ " and the actions you performed just before "
+ "this error.</p>\n"
+ "<p>More information about this error "
+ "may be available\n"
+ "in the server error log.</p>\n",
+ NULL));
+ }
+ /*
+ * It would be nice to give the user the information they need to
+ * fix the problem directly since many users don't have access to
+ * the error_log (think University sites) even though they can easily
+ * get this error by misconfiguring an htaccess file. However, the
+ * e error notes tend to include the real file pathname in this case,
+ * which some people consider to be a breach of privacy. Until we
+ * can figure out a way to remove the pathname, leave this commented.
+ *
+ * if ((error_notes = apr_table_get(r->notes,
+ * "error-notes")) != NULL) {
+ * return(apr_pstrcat(p, error_notes, "<p />\n", NULL);
+ * }
+ * else {
+ * return "";
+ * }
+ */
+ }
+}
+
+/* We should have named this send_canned_response, since it is used for any
+ * response that can be generated by the server from the request record.
+ * This includes all 204 (no content), 3xx (redirect), 4xx (client error),
+ * and 5xx (server error) messages that have not been redirected to another
+ * handler via the ErrorDocument feature.
+ */
+AP_DECLARE(void) ap_send_error_response(request_rec *r, int recursive_error)
+{
+ int status = r->status;
+ int idx = ap_index_of_response(status);
+ char *custom_response;
+ const char *location = apr_table_get(r->headers_out, "Location");
+
+ /* At this point, we are starting the response over, so we have to reset
+ * this value.
+ */
+ r->eos_sent = 0;
+
+ /* and we need to get rid of any RESOURCE filters that might be lurking
+ * around, thinking they are in the middle of the original request
+ */
+
+ r->output_filters = r->proto_output_filters;
+
+ ap_run_insert_error_filter(r);
+
+ /* We need to special-case the handling of 204 and 304 responses,
+ * since they have specific HTTP requirements and do not include a
+ * message body. Note that being assbackwards here is not an option.
+ */
+ if (AP_STATUS_IS_HEADER_ONLY(status)) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ /*
+ * It's possible that the Location field might be in r->err_headers_out
+ * instead of r->headers_out; use the latter if possible, else the
+ * former.
+ */
+ if (location == NULL) {
+ location = apr_table_get(r->err_headers_out, "Location");
+ }
+
+ if (!r->assbackwards) {
+ apr_table_t *tmp = r->headers_out;
+
+ /* For all HTTP/1.x responses for which we generate the message,
+ * we need to avoid inheriting the "normal status" header fields
+ * that may have been set by the request handler before the
+ * error or redirect, except for Location on external redirects.
+ */
+ r->headers_out = r->err_headers_out;
+ r->err_headers_out = tmp;
+ apr_table_clear(r->err_headers_out);
+
+ if (ap_is_HTTP_REDIRECT(status) || (status == HTTP_CREATED)) {
+ if ((location != NULL) && *location) {
+ apr_table_setn(r->headers_out, "Location", location);
+ }
+ else {
+ location = ""; /* avoids coredump when printing, below */
+ }
+ }
+
+ r->content_languages = NULL;
+ r->content_encoding = NULL;
+ r->clength = 0;
+
+ if (apr_table_get(r->subprocess_env,
+ "suppress-error-charset") != NULL) {
+ core_request_config *request_conf =
+ ap_get_core_module_config(r->request_config);
+ request_conf->suppress_charset = 1; /* avoid adding default
+ * charset later
+ */
+ ap_set_content_type(r, "text/html");
+ }
+ else {
+ ap_set_content_type(r, "text/html; charset=iso-8859-1");
+ }
+
+ if ((status == HTTP_METHOD_NOT_ALLOWED)
+ || (status == HTTP_NOT_IMPLEMENTED)) {
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+ }
+
+ if (r->header_only) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+
+ if ((custom_response = ap_response_code_string(r, idx))) {
+ /*
+ * We have a custom response output. This should only be
+ * a text-string to write back. But if the ErrorDocument
+ * was a local redirect and the requested resource failed
+ * for any reason, the custom_response will still hold the
+ * redirect URL. We don't really want to output this URL
+ * as a text message, so first check the custom response
+ * string to ensure that it is a text-string (using the
+ * same test used in ap_die(), i.e. does it start with a ").
+ *
+ * If it's not a text string, we've got a recursive error or
+ * an external redirect. If it's a recursive error, ap_die passes
+ * us the second error code so we can write both, and has already
+ * backed up to the original error. If it's an external redirect,
+ * it hasn't happened yet; we may never know if it fails.
+ */
+ if (custom_response[0] == '\"') {
+ ap_rputs(custom_response + 1, r);
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+ {
+ const char *title = status_lines[idx];
+ const char *h1;
+
+ /* Accept a status_line set by a module, but only if it begins
+ * with the correct 3 digit status code
+ */
+ if (r->status_line) {
+ char *end;
+ int len = strlen(r->status_line);
+ if (len >= 3
+ && apr_strtoi64(r->status_line, &end, 10) == r->status
+ && (end - 3) == r->status_line
+ && (len < 4 || apr_isspace(r->status_line[3]))
+ && (len < 5 || apr_isalnum(r->status_line[4]))) {
+ /* Since we passed the above check, we know that length three
+ * is equivalent to only a 3 digit numeric http status.
+ * RFC2616 mandates a trailing space, let's add it.
+ * If we have an empty reason phrase, we also add "Unknown Reason".
+ */
+ if (len == 3) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, " Unknown Reason", NULL);
+ } else if (len == 4) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, "Unknown Reason", NULL);
+ }
+ title = r->status_line;
+ }
+ }
+
+ /* folks decided they didn't want the error code in the H1 text */
+ h1 = &title[4];
+
+ /* can't count on a charset filter being in place here,
+ * so do ebcdic->ascii translation explicitly (if needed)
+ */
+
+ ap_rvputs_proto_in_ascii(r,
+ DOCTYPE_HTML_2_0
+ "<html><head>\n<title>", title,
+ "</title>\n</head><body>\n<h1>", h1, "</h1>\n",
+ NULL);
+
+ ap_rvputs_proto_in_ascii(r,
+ get_canned_error_string(status, r, location),
+ NULL);
+
+ if (recursive_error) {
+ ap_rvputs_proto_in_ascii(r, "<p>Additionally, a ",
+ status_lines[ap_index_of_response(recursive_error)],
+ "\nerror was encountered while trying to use an "
+ "ErrorDocument to handle the request.</p>\n", NULL);
+ }
+ ap_rvputs_proto_in_ascii(r, ap_psignature("<hr>\n", r), NULL);
+ ap_rvputs_proto_in_ascii(r, "</body></html>\n", NULL);
+ }
+ ap_finalize_request_protocol(r);
+}
+
+/*
+ * Create a new method list with the specified number of preallocated
+ * extension slots.
+ */
+AP_DECLARE(ap_method_list_t *) ap_make_method_list(apr_pool_t *p, int nelts)
+{
+ ap_method_list_t *ml;
+
+ ml = (ap_method_list_t *) apr_palloc(p, sizeof(ap_method_list_t));
+ ml->method_mask = 0;
+ ml->method_list = apr_array_make(p, nelts, sizeof(char *));
+ return ml;
+}
+
+/*
+ * Make a copy of a method list (primarily for subrequests that may
+ * subsequently change it; don't want them changing the parent's, too!).
+ */
+AP_DECLARE(void) ap_copy_method_list(ap_method_list_t *dest,
+ ap_method_list_t *src)
+{
+ int i;
+ char **imethods;
+ char **omethods;
+
+ dest->method_mask = src->method_mask;
+ imethods = (char **) src->method_list->elts;
+ for (i = 0; i < src->method_list->nelts; ++i) {
+ omethods = (char **) apr_array_push(dest->method_list);
+ *omethods = apr_pstrdup(dest->method_list->pool, imethods[i]);
+ }
+}
+
+/*
+ * Return true if the specified HTTP method is in the provided
+ * method list.
+ */
+AP_DECLARE(int) ap_method_in_list(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+
+ /*
+ * If it's one of our known methods, use the shortcut and check the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ return !!(l->method_mask & (AP_METHOD_BIT << methnum));
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if ((l->method_list == NULL) || (l->method_list->nelts == 0)) {
+ return 0;
+ }
+
+ return ap_array_str_contains(l->method_list, method);
+}
+
+/*
+ * Add the specified method to a method list (if it isn't already there).
+ */
+AP_DECLARE(void) ap_method_list_add(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+ const char **xmethod;
+
+ /*
+ * If it's one of our known methods, use the shortcut and use the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ l->method_mask |= (AP_METHOD_BIT << methnum);
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (ap_array_str_contains(l->method_list, method)) {
+ return;
+ }
+
+ xmethod = (const char **) apr_array_push(l->method_list);
+ *xmethod = method;
+}
+
+/*
+ * Remove the specified method from a method list.
+ */
+AP_DECLARE(void) ap_method_list_remove(ap_method_list_t *l,
+ const char *method)
+{
+ int methnum;
+ char **methods;
+
+ /*
+ * If it's a known methods, either builtin or registered
+ * by a module, use the bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ l->method_mask &= ~(AP_METHOD_BIT << methnum);
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (l->method_list->nelts != 0) {
+ int i, j, k;
+ methods = (char **)l->method_list->elts;
+ for (i = 0; i < l->method_list->nelts; ) {
+ if (strcmp(method, methods[i]) == 0) {
+ for (j = i, k = i + 1; k < l->method_list->nelts; ++j, ++k) {
+ methods[j] = methods[k];
+ }
+ --l->method_list->nelts;
+ }
+ else {
+ ++i;
+ }
+ }
+ }
+}
+
+/*
+ * Reset a method list to be completely empty.
+ */
+AP_DECLARE(void) ap_clear_method_list(ap_method_list_t *l)
+{
+ l->method_mask = 0;
+ l->method_list->nelts = 0;
+}
+
diff --git a/modules/http/http_request.c b/modules/http/http_request.c
new file mode 100644
index 0000000..d59cfe2
--- /dev/null
+++ b/modules/http/http_request.c
@@ -0,0 +1,861 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_request.c: functions to get and process requests
+ *
+ * Rob McCool 3/21/93
+ *
+ * Thoroughly revamped by rst for Apache. NB this file reads
+ * best from the bottom up.
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_file_io.h"
+#include "apr_fnmatch.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_filter.h"
+#include "util_charset.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+APLOG_USE_MODULE(http);
+
+/*****************************************************************
+ *
+ * Mainline request processing...
+ */
+
+/* XXX A cleaner and faster way to do this might be to pass the request_rec
+ * down the filter chain as a parameter. It would need to change for
+ * subrequest vs. main request filters; perhaps the subrequest filter could
+ * make the switch.
+ */
+static void update_r_in_filters(ap_filter_t *f,
+ request_rec *from,
+ request_rec *to)
+{
+ while (f) {
+ if (f->r == from) {
+ f->r = to;
+ }
+ f = f->next;
+ }
+}
+
+static void ap_die_r(int type, request_rec *r, int recursive_error)
+{
+ char *custom_response;
+ request_rec *r_1st_err = r;
+
+ if (type == OK || type == DONE) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ if (!ap_is_HTTP_VALID_RESPONSE(type)) {
+ ap_filter_t *next;
+
+ /*
+ * Check if we still have the ap_http_header_filter in place. If
+ * this is the case we should not ignore the error here because
+ * it means that we have not sent any response at all and never
+ * will. This is bad. Sent an internal server error instead.
+ */
+ next = r->output_filters;
+ while (next && (next->frec != ap_http_header_filter_handle)) {
+ next = next->next;
+ }
+
+ /*
+ * If next != NULL then we left the while above because of
+ * next->frec == ap_http_header_filter
+ */
+ if (next) {
+ if (type != AP_FILTER_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01579)
+ "Invalid response status %i", type);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02831)
+ "Response from AP_FILTER_ERROR");
+ }
+ type = HTTP_INTERNAL_SERVER_ERROR;
+ }
+ else {
+ return;
+ }
+ }
+
+ /*
+ * The following takes care of Apache redirects to custom response URLs
+ * Note that if we are already dealing with the response to some other
+ * error condition, we just report on the original error, and give up on
+ * any attempt to handle the other thing "intelligently"...
+ */
+ if (recursive_error != HTTP_OK) {
+ while (r_1st_err->prev && (r_1st_err->prev->status != HTTP_OK))
+ r_1st_err = r_1st_err->prev; /* Get back to original error */
+
+ if (r_1st_err != r) {
+ /* The recursive error was caused by an ErrorDocument specifying
+ * an internal redirect to a bad URI. ap_internal_redirect has
+ * changed the filter chains to point to the ErrorDocument's
+ * request_rec. Back out those changes so we can safely use the
+ * original failing request_rec to send the canned error message.
+ *
+ * ap_send_error_response gets rid of existing resource filters
+ * on the output side, so we can skip those.
+ */
+ update_r_in_filters(r_1st_err->proto_output_filters, r, r_1st_err);
+ update_r_in_filters(r_1st_err->input_filters, r, r_1st_err);
+ }
+
+ custom_response = NULL; /* Do NOT retry the custom thing! */
+ }
+ else {
+ int error_index = ap_index_of_response(type);
+ custom_response = ap_response_code_string(r, error_index);
+ recursive_error = 0;
+ }
+
+ r->status = type;
+
+ /*
+ * This test is done here so that none of the auth modules needs to know
+ * about proxy authentication. They treat it like normal auth, and then
+ * we tweak the status.
+ */
+ if (HTTP_UNAUTHORIZED == r->status && PROXYREQ_PROXY == r->proxyreq) {
+ r->status = HTTP_PROXY_AUTHENTICATION_REQUIRED;
+ }
+
+ /* If we don't want to keep the connection, make sure we mark that the
+ * connection is not eligible for keepalive. If we want to keep the
+ * connection, be sure that the request body (if any) has been read.
+ */
+ if (ap_status_drops_connection(r->status)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+
+ /*
+ * Two types of custom redirects --- plain text, and URLs. Plain text has
+ * a leading '"', so the URL code, here, is triggered on its absence
+ */
+
+ if (custom_response && custom_response[0] != '"') {
+
+ if (ap_is_url(custom_response)) {
+ /*
+ * The URL isn't local, so lets drop through the rest of this
+ * apache code, and continue with the usual REDIRECT handler.
+ * But note that the client will ultimately see the wrong
+ * status...
+ */
+ r->status = HTTP_MOVED_TEMPORARILY;
+ apr_table_setn(r->headers_out, "Location", custom_response);
+ }
+ else if (custom_response[0] == '/') {
+ const char *error_notes, *original_method;
+ int original_method_number;
+ r->no_local_copy = 1; /* Do NOT send HTTP_NOT_MODIFIED for
+ * error documents! */
+ /*
+ * This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ apr_table_setn(r->subprocess_env, "REQUEST_METHOD", r->method);
+
+ /*
+ * Provide a special method for modules to communicate
+ * more informative (than the plain canned) messages to us.
+ * Propagate them to ErrorDocuments via the ERROR_NOTES variable:
+ */
+ if ((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL) {
+ apr_table_setn(r->subprocess_env, "ERROR_NOTES", error_notes);
+ }
+ original_method = r->method;
+ original_method_number = r->method_number;
+ r->method = "GET";
+ r->method_number = M_GET;
+ ap_internal_redirect(custom_response, r);
+ /* preserve ability to see %<m in the access log */
+ r->method = original_method;
+ r->method_number = original_method_number;
+ return;
+ }
+ else {
+ /*
+ * Dumb user has given us a bad url to redirect to --- fake up
+ * dying with a recursive server error...
+ */
+ recursive_error = HTTP_INTERNAL_SERVER_ERROR;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01580)
+ "Invalid error redirection directive: %s",
+ custom_response);
+ }
+ }
+ ap_send_error_response(r_1st_err, recursive_error);
+}
+
+AP_DECLARE(void) ap_die(int type, request_rec *r)
+{
+ ap_die_r(type, r, r->status);
+}
+
+AP_DECLARE(apr_status_t) ap_check_pipeline(conn_rec *c, apr_bucket_brigade *bb,
+ unsigned int max_blank_lines)
+{
+ apr_status_t rv = APR_EOF;
+ ap_input_mode_t mode = AP_MODE_SPECULATIVE;
+ unsigned int num_blank_lines = 0;
+ apr_size_t cr = 0;
+ char buf[2];
+
+ while (c->keepalive != AP_CONN_CLOSE && !c->aborted) {
+ apr_size_t len = cr + 1;
+
+ apr_brigade_cleanup(bb);
+ rv = ap_get_brigade(c->input_filters, bb, mode,
+ APR_NONBLOCK_READ, len);
+ if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) {
+ if (mode == AP_MODE_READBYTES) {
+ /* Unexpected error, stop with this connection */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(02967)
+ "Can't consume pipelined empty lines");
+ c->keepalive = AP_CONN_CLOSE;
+ rv = APR_EGENERAL;
+ }
+ else if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
+ /* Pipe is dead */
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ /* Pipe is up and empty */
+ rv = APR_EAGAIN;
+ }
+ break;
+ }
+ if (!max_blank_lines) {
+ apr_off_t n = 0;
+ /* Single read asked, (non-meta-)data available? */
+ rv = apr_brigade_length(bb, 0, &n);
+ if (rv == APR_SUCCESS && n <= 0) {
+ rv = APR_EAGAIN;
+ }
+ break;
+ }
+
+ /* Lookup and consume blank lines */
+ rv = apr_brigade_flatten(bb, buf, &len);
+ if (rv != APR_SUCCESS || len != cr + 1) {
+ int log_level;
+ if (mode == AP_MODE_READBYTES) {
+ /* Unexpected error, stop with this connection */
+ c->keepalive = AP_CONN_CLOSE;
+ log_level = APLOG_ERR;
+ rv = APR_EGENERAL;
+ }
+ else {
+ /* Let outside (non-speculative/blocking) read determine
+ * where this possible failure comes from (metadata,
+ * morphed EOF socket, ...). Debug only here.
+ */
+ log_level = APLOG_DEBUG;
+ rv = APR_SUCCESS;
+ }
+ ap_log_cerror(APLOG_MARK, log_level, rv, c, APLOGNO(02968)
+ "Can't check pipelined data");
+ break;
+ }
+
+ if (mode == AP_MODE_READBYTES) {
+ /* [CR]LF consumed, try next */
+ mode = AP_MODE_SPECULATIVE;
+ cr = 0;
+ }
+ else if (cr) {
+ AP_DEBUG_ASSERT(len == 2 && buf[0] == APR_ASCII_CR);
+ if (buf[1] == APR_ASCII_LF) {
+ /* consume this CRLF */
+ mode = AP_MODE_READBYTES;
+ num_blank_lines++;
+ }
+ else {
+ /* CR(?!LF) is data */
+ break;
+ }
+ }
+ else {
+ if (buf[0] == APR_ASCII_LF) {
+ /* consume this LF */
+ mode = AP_MODE_READBYTES;
+ num_blank_lines++;
+ }
+ else if (buf[0] == APR_ASCII_CR) {
+ cr = 1;
+ }
+ else {
+ /* Not [CR]LF, some data */
+ break;
+ }
+ }
+ if (num_blank_lines > max_blank_lines) {
+ /* Enough blank lines with this connection,
+ * stop and don't recycle it.
+ */
+ c->keepalive = AP_CONN_CLOSE;
+ rv = APR_NOTFOUND;
+ break;
+ }
+ }
+
+ return rv;
+}
+
+#define RETRIEVE_BRIGADE_FROM_POOL(bb, key, pool, allocator) do { \
+ apr_pool_userdata_get((void **)&bb, key, pool); \
+ if (bb == NULL) { \
+ bb = apr_brigade_create(pool, allocator); \
+ apr_pool_userdata_setn((const void *)bb, key, NULL, pool); \
+ } \
+ else { \
+ apr_brigade_cleanup(bb); \
+ } \
+} while(0)
+
+AP_DECLARE(void) ap_process_request_after_handler(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ conn_rec *c = r->connection;
+ apr_status_t rv;
+
+ /* Send an EOR bucket through the output filter chain. When
+ * this bucket is destroyed, the request will be logged and
+ * its pool will be freed
+ */
+ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_after_handler_brigade",
+ c->pool, c->bucket_alloc);
+ b = ap_bucket_eor_create(c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+
+ ap_pass_brigade(c->output_filters, bb);
+
+ /* The EOR bucket has either been handled by an output filter (eg.
+ * deleted or moved to a buffered_bb => no more in bb), or an error
+ * occured before that (eg. c->aborted => still in bb) and we ought
+ * to destroy it now. So cleanup any remaining bucket along with
+ * the orphan request (if any).
+ */
+ apr_brigade_cleanup(bb);
+
+ /* From here onward, it is no longer safe to reference r
+ * or r->pool, because r->pool may have been destroyed
+ * already by the EOR bucket's cleanup function.
+ */
+
+ /* Check pipeline consuming blank lines, they must not be interpreted as
+ * the next pipelined request, otherwise we would block on the next read
+ * without flushing data, and hence possibly delay pending response(s)
+ * until the next/real request comes in or the keepalive timeout expires.
+ */
+ rv = ap_check_pipeline(c, bb, DEFAULT_LIMIT_BLANK_LINES);
+ c->data_in_input_filters = (rv == APR_SUCCESS);
+ apr_brigade_cleanup(bb);
+
+ if (c->cs)
+ c->cs->state = (c->aborted) ? CONN_STATE_LINGER
+ : CONN_STATE_WRITE_COMPLETION;
+ AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, r->status);
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+}
+
+void ap_process_async_request(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ int access_status;
+
+ /* Give quick handlers a shot at serving the request on the fast
+ * path, bypassing all of the other Apache hooks.
+ *
+ * This hook was added to enable serving files out of a URI keyed
+ * content cache ( e.g., Mike Abbott's Quick Shortcut Cache,
+ * described here: http://oss.sgi.com/projects/apache/mod_qsc.html )
+ *
+ * It may have other uses as well, such as routing requests directly to
+ * content handlers that have the ability to grok HTTP and do their
+ * own access checking, etc (e.g. servlet engines).
+ *
+ * Use this hook with extreme care and only if you know what you are
+ * doing.
+ */
+ AP_PROCESS_REQUEST_ENTRY((uintptr_t)r, r->uri);
+ if (ap_extended_status) {
+ ap_time_process_request(r->connection->sbh, START_PREQUEST);
+ }
+
+ if (APLOGrtrace4(r)) {
+ int i;
+ const apr_array_header_t *t_h = apr_table_elts(r->headers_in);
+ const apr_table_entry_t *t_elt = (apr_table_entry_t *)t_h->elts;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r,
+ "Headers received from client:");
+ for (i = 0; i < t_h->nelts; i++, t_elt++) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
+ ap_escape_logitem(r->pool, t_elt->key),
+ ap_escape_logitem(r->pool, t_elt->val));
+ }
+ }
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&r->invoke_mtx, APR_THREAD_MUTEX_DEFAULT, r->pool);
+ apr_thread_mutex_lock(r->invoke_mtx);
+#endif
+ access_status = ap_run_quick_handler(r, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(r);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(r);
+ }
+ }
+
+ if (access_status == SUSPENDED) {
+ /* TODO: Should move these steps into a generic function, so modules
+ * working on a suspended request can also call _ENTRY again.
+ */
+ AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, access_status);
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+ if (c->cs)
+ c->cs->state = CONN_STATE_SUSPENDED;
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(r->invoke_mtx);
+#endif
+ return;
+ }
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(r->invoke_mtx);
+#endif
+
+ ap_die_r(access_status, r, HTTP_OK);
+
+ ap_process_request_after_handler(r);
+}
+
+AP_DECLARE(void) ap_process_request(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ conn_rec *c = r->connection;
+ apr_status_t rv;
+
+ ap_process_async_request(r);
+
+ if (!c->data_in_input_filters) {
+ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_brigade",
+ c->pool, c->bucket_alloc);
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+ rv = ap_pass_brigade(c->output_filters, bb);
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ /*
+ * Notice a timeout as an error message. This might be
+ * valuable for detecting clients with broken network
+ * connections or possible DoS attacks.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, APLOGNO(01581)
+ "flushing data to the client");
+ }
+ apr_brigade_cleanup(bb);
+ }
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+}
+
+static apr_table_t *rename_original_env(apr_pool_t *p, apr_table_t *t)
+{
+ const apr_array_header_t *env_arr = apr_table_elts(t);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *) env_arr->elts;
+ apr_table_t *new = apr_table_make(p, env_arr->nalloc);
+ int i;
+
+ for (i = 0; i < env_arr->nelts; ++i) {
+ if (!elts[i].key)
+ continue;
+ apr_table_setn(new, apr_pstrcat(p, "REDIRECT_", elts[i].key, NULL),
+ elts[i].val);
+ }
+
+ return new;
+}
+
+static request_rec *internal_internal_redirect(const char *new_uri,
+ request_rec *r) {
+ int access_status;
+ request_rec *new;
+ const char *vary_header;
+
+ if (ap_is_recursion_limit_exceeded(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return NULL;
+ }
+
+ new = (request_rec *) apr_pcalloc(r->pool, sizeof(request_rec));
+
+ new->connection = r->connection;
+ new->server = r->server;
+ new->pool = r->pool;
+
+ /*
+ * A whole lot of this really ought to be shared with http_protocol.c...
+ * another missing cleanup. It's particularly inappropriate to be
+ * setting header_only, etc., here.
+ */
+
+ new->method = r->method;
+ new->method_number = r->method_number;
+ new->allowed_methods = ap_make_method_list(new->pool, 2);
+ ap_parse_uri(new, new_uri);
+ new->parsed_uri.port_str = r->parsed_uri.port_str;
+ new->parsed_uri.port = r->parsed_uri.port;
+
+ new->request_config = ap_create_request_config(r->pool);
+
+ new->per_dir_config = r->server->lookup_defaults;
+
+ new->prev = r;
+ r->next = new;
+
+ new->useragent_addr = r->useragent_addr;
+ new->useragent_ip = r->useragent_ip;
+
+ /* Must have prev and next pointers set before calling create_request
+ * hook.
+ */
+ ap_run_create_request(new);
+
+ /* Inherit the rest of the protocol info... */
+
+ new->the_request = r->the_request;
+
+ new->allowed = r->allowed;
+
+ new->status = r->status;
+ new->assbackwards = r->assbackwards;
+ new->header_only = r->header_only;
+ new->protocol = r->protocol;
+ new->proto_num = r->proto_num;
+ new->hostname = r->hostname;
+ new->request_time = r->request_time;
+ new->main = r->main;
+
+ new->headers_in = r->headers_in;
+ new->trailers_in = r->trailers_in;
+ new->headers_out = apr_table_make(r->pool, 12);
+ if (ap_is_HTTP_REDIRECT(new->status)) {
+ const char *location = apr_table_get(r->headers_out, "Location");
+ if (location)
+ apr_table_setn(new->headers_out, "Location", location);
+ }
+
+ /* A module (like mod_rewrite) can force an internal redirect
+ * to carry over the Vary header (if present).
+ */
+ if (apr_table_get(r->notes, "redirect-keeps-vary")) {
+ if((vary_header = apr_table_get(r->headers_out, "Vary"))) {
+ apr_table_setn(new->headers_out, "Vary", vary_header);
+ }
+ }
+
+ new->err_headers_out = r->err_headers_out;
+ new->trailers_out = apr_table_make(r->pool, 5);
+ new->subprocess_env = rename_original_env(r->pool, r->subprocess_env);
+ new->notes = apr_table_make(r->pool, 5);
+
+ new->htaccess = r->htaccess;
+ new->no_cache = r->no_cache;
+ new->expecting_100 = r->expecting_100;
+ new->no_local_copy = r->no_local_copy;
+ new->read_length = r->read_length; /* We can only read it once */
+ new->vlist_validator = r->vlist_validator;
+
+ new->proto_output_filters = r->proto_output_filters;
+ new->proto_input_filters = r->proto_input_filters;
+
+ new->input_filters = new->proto_input_filters;
+
+ if (new->main) {
+ ap_filter_t *f, *nextf;
+
+ /* If this is a subrequest, the filter chain may contain a
+ * mixture of filters specific to the old request (r), and
+ * some inherited from r->main. Here, inherit that filter
+ * chain, and remove all those which are specific to the old
+ * request; ensuring the subreq filter is left in place. */
+ new->output_filters = r->output_filters;
+
+ f = new->output_filters;
+ do {
+ nextf = f->next;
+
+ if (f->r == r && f->frec != ap_subreq_core_filter_handle) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01582)
+ "dropping filter '%s' in internal redirect from %s to %s",
+ f->frec->name, r->unparsed_uri, new_uri);
+
+ /* To remove the filter, first set f->r to the *new*
+ * request_rec, so that ->output_filters on 'new' is
+ * changed (if necessary) when removing the filter. */
+ f->r = new;
+ ap_remove_output_filter(f);
+ }
+
+ f = nextf;
+
+ /* Stop at the protocol filters. If a protocol filter has
+ * been newly installed for this resource, better leave it
+ * in place, though it's probably a misconfiguration or
+ * filter bug to get into this state. */
+ } while (f && f != new->proto_output_filters);
+ }
+ else {
+ /* If this is not a subrequest, clear out all
+ * resource-specific filters. */
+ new->output_filters = new->proto_output_filters;
+ }
+
+ update_r_in_filters(new->input_filters, r, new);
+ update_r_in_filters(new->output_filters, r, new);
+
+ apr_table_setn(new->subprocess_env, "REDIRECT_STATUS",
+ apr_itoa(r->pool, r->status));
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ new->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+#if APR_HAS_THREADS
+ new->invoke_mtx = r->invoke_mtx;
+#endif
+
+ /*
+ * XXX: hmm. This is because mod_setenvif and mod_unique_id really need
+ * to do their thing on internal redirects as well. Perhaps this is a
+ * misnamed function.
+ */
+ if ((access_status = ap_post_read_request(new))) {
+ ap_die(access_status, new);
+ return NULL;
+ }
+
+ return new;
+}
+
+/* XXX: Is this function is so bogus and fragile that we deep-6 it? */
+AP_DECLARE(void) ap_internal_fast_redirect(request_rec *rr, request_rec *r)
+{
+ /* We need to tell POOL_DEBUG that we're guaranteeing that rr->pool
+ * will exist as long as r->pool. Otherwise we run into troubles because
+ * some values in this request will be allocated in r->pool, and others in
+ * rr->pool.
+ */
+ apr_pool_join(r->pool, rr->pool);
+ r->proxyreq = rr->proxyreq;
+ r->no_cache = (r->no_cache && rr->no_cache);
+ r->no_local_copy = (r->no_local_copy && rr->no_local_copy);
+ r->mtime = rr->mtime;
+ r->uri = rr->uri;
+ r->filename = rr->filename;
+ r->canonical_filename = rr->canonical_filename;
+ r->path_info = rr->path_info;
+ r->args = rr->args;
+ r->finfo = rr->finfo;
+ r->handler = rr->handler;
+ ap_set_content_type(r, rr->content_type);
+ r->content_encoding = rr->content_encoding;
+ r->content_languages = rr->content_languages;
+ r->per_dir_config = rr->per_dir_config;
+ /* copy output headers from subrequest, but leave negotiation headers */
+ r->notes = apr_table_overlay(r->pool, rr->notes, r->notes);
+ r->headers_out = apr_table_overlay(r->pool, rr->headers_out,
+ r->headers_out);
+ r->err_headers_out = apr_table_overlay(r->pool, rr->err_headers_out,
+ r->err_headers_out);
+ r->trailers_out = apr_table_overlay(r->pool, rr->trailers_out,
+ r->trailers_out);
+ r->subprocess_env = apr_table_overlay(r->pool, rr->subprocess_env,
+ r->subprocess_env);
+
+ r->output_filters = rr->output_filters;
+ r->input_filters = rr->input_filters;
+
+ /* If any filters pointed at the now-defunct rr, we must point them
+ * at our "new" instance of r. In particular, some of rr's structures
+ * will now be bogus (say rr->headers_out). If a filter tried to modify
+ * their f->r structure when it is pointing to rr, the real request_rec
+ * will not get updated. Fix that here.
+ */
+ update_r_in_filters(r->input_filters, rr, r);
+ update_r_in_filters(r->output_filters, rr, r);
+
+ if (r->main) {
+ ap_filter_t *next = r->output_filters;
+ while (next && (next != r->proto_output_filters)) {
+ if (next->frec == ap_subreq_core_filter_handle) {
+ break;
+ }
+ next = next->next;
+ }
+ if (!next || next == r->proto_output_filters) {
+ ap_add_output_filter_handle(ap_subreq_core_filter_handle,
+ NULL, r, r->connection);
+ }
+ }
+ else {
+ /*
+ * We need to check if we now have the SUBREQ_CORE filter in our filter
+ * chain. If this is the case we need to remove it since we are NO
+ * subrequest. But we need to keep in mind that the SUBREQ_CORE filter
+ * does not necessarily need to be the first filter in our chain. So we
+ * need to go through the chain. But we only need to walk up the chain
+ * until the proto_output_filters as the SUBREQ_CORE filter is below the
+ * protocol filters.
+ */
+ ap_filter_t *next;
+
+ next = r->output_filters;
+ while (next && (next->frec != ap_subreq_core_filter_handle)
+ && (next != r->proto_output_filters)) {
+ next = next->next;
+ }
+ if (next && (next->frec == ap_subreq_core_filter_handle)) {
+ ap_remove_output_filter(next);
+ }
+ }
+}
+
+AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ AP_INTERNAL_REDIRECT(r->uri, new_uri);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ access_status = ap_run_quick_handler(new, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(new);
+ }
+ }
+ ap_die(access_status, new);
+}
+
+/* This function is designed for things like actions or CGI scripts, when
+ * using AddHandler, and you want to preserve the content type across
+ * an internal redirect.
+ */
+AP_DECLARE(void) ap_internal_redirect_handler(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ if (r->handler)
+ ap_set_content_type(new, r->content_type);
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(new);
+ }
+ ap_die(access_status, new);
+}
+
+AP_DECLARE(void) ap_allow_methods(request_rec *r, int reset, ...)
+{
+ const char *method;
+ va_list methods;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ va_start(methods, reset);
+ while ((method = va_arg(methods, const char *)) != NULL) {
+ ap_method_list_add(r->allowed_methods, method);
+ }
+ va_end(methods);
+}
+
+AP_DECLARE(void) ap_allow_standard_methods(request_rec *r, int reset, ...)
+{
+ int method;
+ va_list methods;
+ apr_int64_t mask;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ mask = 0;
+ va_start(methods, reset);
+ while ((method = va_arg(methods, int)) != -1) {
+ mask |= (AP_METHOD_BIT << method);
+ }
+ va_end(methods);
+
+ r->allowed_methods->method_mask |= mask;
+}
diff --git a/modules/http/mod_mime.c b/modules/http/mod_mime.c
new file mode 100644
index 0000000..700f824
--- /dev/null
+++ b/modules/http/mod_mime.c
@@ -0,0 +1,1037 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_mime.c: Sends/gets MIME headers for requests
+ *
+ * Rob McCool
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_hash.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+/* XXXX - fix me / EBCDIC
+ * there was a cludge here which would use its
+ * own version apr_isascii(). Indicating that
+ * on some platforms that might be needed.
+ *
+ * #define OS_ASC(c) (c) -- for mere mortals
+ * or
+ * #define OS_ASC(c) (ebcdic2ascii[c]) -- for dino's
+ *
+ * #define apr_isascii(c) ((OS_ASC(c) & 0x80) == 0)
+ */
+
+/* XXXXX - fix me - See note with NOT_PROXY
+ */
+
+typedef struct attrib_info {
+ char *name;
+ int offset;
+} attrib_info;
+
+/* Information to which an extension can be mapped
+ */
+typedef struct extension_info {
+ char *forced_type; /* Additional AddTyped stuff */
+ char *encoding_type; /* Added with AddEncoding... */
+ char *language_type; /* Added with AddLanguage... */
+ char *handler; /* Added with AddHandler... */
+ char *charset_type; /* Added with AddCharset... */
+ char *input_filters; /* Added with AddInputFilter... */
+ char *output_filters; /* Added with AddOutputFilter... */
+} extension_info;
+
+#define MULTIMATCH_UNSET 0
+#define MULTIMATCH_ANY 1
+#define MULTIMATCH_NEGOTIATED 2
+#define MULTIMATCH_HANDLERS 4
+#define MULTIMATCH_FILTERS 8
+
+typedef struct {
+ apr_hash_t *extension_mappings; /* Map from extension name to
+ * extension_info structure */
+
+ apr_array_header_t *remove_mappings; /* A simple list, walked once */
+
+ char *default_language; /* Language if no AddLanguage ext found */
+
+ int multimatch; /* Extensions to include in multiview matching
+ * for filenames, e.g. Filters and Handlers
+ */
+ int use_path_info; /* If set to 0, only use filename.
+ * If set to 1, append PATH_INFO to filename for
+ * lookups.
+ * If set to 2, this value is unset and is
+ * effectively 0.
+ */
+} mime_dir_config;
+
+typedef struct param_s {
+ char *attr;
+ char *val;
+ struct param_s *next;
+} param;
+
+typedef struct {
+ const char *type;
+ apr_size_t type_len;
+ const char *subtype;
+ apr_size_t subtype_len;
+ param *param;
+} content_type;
+
+static char tspecial[] = {
+ '(', ')', '<', '>', '@', ',', ';', ':',
+ '\\', '"', '/', '[', ']', '?', '=',
+ '\0'
+};
+
+module AP_MODULE_DECLARE_DATA mime_module;
+
+static void *create_mime_dir_config(apr_pool_t *p, char *dummy)
+{
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ new->extension_mappings = NULL;
+ new->remove_mappings = NULL;
+
+ new->default_language = NULL;
+
+ new->multimatch = MULTIMATCH_UNSET;
+
+ new->use_path_info = 2;
+
+ return new;
+}
+/*
+ * Overlay one hash table of extension_mappings onto another
+ */
+static void *overlay_extension_mappings(apr_pool_t *p,
+ const void *key,
+ apr_ssize_t klen,
+ const void *overlay_val,
+ const void *base_val,
+ const void *data)
+{
+ const extension_info *overlay_info = (const extension_info *)overlay_val;
+ const extension_info *base_info = (const extension_info *)base_val;
+ extension_info *new_info = apr_pmemdup(p, base_info, sizeof(extension_info));
+
+ if (overlay_info->forced_type) {
+ new_info->forced_type = overlay_info->forced_type;
+ }
+ if (overlay_info->encoding_type) {
+ new_info->encoding_type = overlay_info->encoding_type;
+ }
+ if (overlay_info->language_type) {
+ new_info->language_type = overlay_info->language_type;
+ }
+ if (overlay_info->handler) {
+ new_info->handler = overlay_info->handler;
+ }
+ if (overlay_info->charset_type) {
+ new_info->charset_type = overlay_info->charset_type;
+ }
+ if (overlay_info->input_filters) {
+ new_info->input_filters = overlay_info->input_filters;
+ }
+ if (overlay_info->output_filters) {
+ new_info->output_filters = overlay_info->output_filters;
+ }
+
+ return new_info;
+}
+
+/* Member is the offset within an extension_info of the pointer to reset
+ */
+static void remove_items(apr_pool_t *p, apr_array_header_t *remove,
+ apr_hash_t *mappings)
+{
+ attrib_info *suffix = (attrib_info *) remove->elts;
+ int i;
+ for (i = 0; i < remove->nelts; i++) {
+ extension_info *exinfo = apr_hash_get(mappings,
+ suffix[i].name,
+ APR_HASH_KEY_STRING);
+ if (exinfo && *(const char**)((char *)exinfo + suffix[i].offset)) {
+ extension_info *copyinfo = exinfo;
+ exinfo = apr_pmemdup(p, copyinfo, sizeof(*exinfo));
+ apr_hash_set(mappings, suffix[i].name,
+ APR_HASH_KEY_STRING, exinfo);
+
+ *(const char**)((char *)exinfo + suffix[i].offset) = NULL;
+ }
+ }
+}
+
+static void *merge_mime_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ mime_dir_config *base = (mime_dir_config *)basev;
+ mime_dir_config *add = (mime_dir_config *)addv;
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ if (base->extension_mappings && add->extension_mappings) {
+ new->extension_mappings = apr_hash_merge(p, add->extension_mappings,
+ base->extension_mappings,
+ overlay_extension_mappings,
+ NULL);
+ }
+ else {
+ if (base->extension_mappings == NULL) {
+ new->extension_mappings = add->extension_mappings;
+ }
+ else {
+ new->extension_mappings = base->extension_mappings;
+ }
+ /* We may not be merging the tables, but if we potentially will change
+ * an exinfo member, then we are about to trounce it anyways.
+ * We must have a copy for safety.
+ */
+ if (new->extension_mappings && add->remove_mappings) {
+ new->extension_mappings =
+ apr_hash_copy(p, new->extension_mappings);
+ }
+ }
+
+ if (new->extension_mappings) {
+ if (add->remove_mappings)
+ remove_items(p, add->remove_mappings, new->extension_mappings);
+ }
+ new->remove_mappings = NULL;
+
+ new->default_language = add->default_language ?
+ add->default_language : base->default_language;
+
+ new->multimatch = (add->multimatch != MULTIMATCH_UNSET) ?
+ add->multimatch : base->multimatch;
+
+ if ((add->use_path_info & 2) == 0) {
+ new->use_path_info = add->use_path_info;
+ }
+ else {
+ new->use_path_info = base->use_path_info;
+ }
+
+ return new;
+}
+
+static const char *add_extension_info(cmd_parms *cmd, void *m_,
+ const char *value_, const char* ext)
+{
+ mime_dir_config *m=m_;
+ extension_info *exinfo;
+ int offset = (int) (long) cmd->info;
+ char *key = apr_pstrdup(cmd->temp_pool, ext);
+ char *value = apr_pstrdup(cmd->pool, value_);
+ ap_str_tolower(value);
+ ap_str_tolower(key);
+
+ if (*key == '.') {
+ ++key;
+ }
+ if (!m->extension_mappings) {
+ m->extension_mappings = apr_hash_make(cmd->pool);
+ exinfo = NULL;
+ }
+ else {
+ exinfo = (extension_info*)apr_hash_get(m->extension_mappings, key,
+ APR_HASH_KEY_STRING);
+ }
+ if (!exinfo) {
+ exinfo = apr_pcalloc(cmd->pool, sizeof(extension_info));
+ key = apr_pstrdup(cmd->pool, key);
+ apr_hash_set(m->extension_mappings, key, APR_HASH_KEY_STRING, exinfo);
+ }
+ *(const char**)((char *)exinfo + offset) = value;
+ return NULL;
+}
+
+/*
+ * As RemoveType should also override the info from TypesConfig, we add an
+ * empty string as type instead of actually removing the type.
+ */
+static const char *remove_extension_type(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ return add_extension_info(cmd, m_, "", ext);
+}
+
+/*
+ * Note handler names are un-added with each per_dir_config merge.
+ * This keeps the association from being inherited, but not
+ * from being re-added at a subordinate level.
+ */
+static const char *remove_extension_info(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ attrib_info *suffix;
+ if (*ext == '.') {
+ ++ext;
+ }
+ if (!m->remove_mappings) {
+ m->remove_mappings = apr_array_make(cmd->pool, 4, sizeof(*suffix));
+ }
+ suffix = (attrib_info *)apr_array_push(m->remove_mappings);
+ suffix->name = apr_pstrdup(cmd->pool, ext);
+ ap_str_tolower(suffix->name);
+ suffix->offset = (int) (long) cmd->info;
+ return NULL;
+}
+
+/* The sole bit of server configuration that the MIME module has is
+ * the name of its config file, so...
+ */
+
+static const char *set_types_config(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ ap_set_module_config(cmd->server->module_config, &mime_module,
+ (void *)arg);
+ return NULL;
+}
+
+static const char *multiviews_match(cmd_parms *cmd, void *m_,
+ const char *include)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ const char *errmsg;
+
+ errmsg = ap_check_cmd_context(cmd, NOT_IN_LOCATION);
+ if (errmsg != NULL) {
+ return errmsg;
+ }
+
+ if (strcasecmp(include, "Any") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_ANY)) {
+ return "Any is incompatible with NegotiatedOnly, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_ANY;
+ }
+ else if (strcasecmp(include, "NegotiatedOnly") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_NEGOTIATED)) {
+ return "NegotiatedOnly is incompatible with Any, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_NEGOTIATED;
+ }
+ else if (strcasecmp(include, "Filters") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Filters is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_FILTERS;
+ }
+ else if (strcasecmp(include, "Handlers") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Handlers is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_HANDLERS;
+ }
+ else {
+ return apr_psprintf(cmd->pool, "Unrecognized option '%s'", include);
+ }
+
+ return NULL;
+}
+
+static const command_rec mime_cmds[] =
+{
+ AP_INIT_ITERATE2("AddCharset", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "a charset (e.g., iso-2022-jp), followed by one or more "
+ "file extensions"),
+ AP_INIT_ITERATE2("AddEncoding", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "an encoding (e.g., gzip), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddHandler", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "a handler name followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddInputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "input filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddLanguage", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "a language (e.g., fr), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddOutputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "output filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddType", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "a mime type followed by one or more file extensions"),
+ AP_INIT_TAKE1("DefaultLanguage", ap_set_string_slot,
+ (void*)APR_OFFSETOF(mime_dir_config, default_language), OR_FILEINFO,
+ "language to use for documents with no other language file extension"),
+ AP_INIT_ITERATE("MultiviewsMatch", multiviews_match, NULL, OR_FILEINFO,
+ "NegotiatedOnly (default), Handlers and/or Filters, or Any"),
+ AP_INIT_ITERATE("RemoveCharset", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveEncoding", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveHandler", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveInputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveLanguage", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveOutputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveType", remove_extension_type,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_TAKE1("TypesConfig", set_types_config, NULL, RSRC_CONF,
+ "the MIME types config file"),
+ AP_INIT_FLAG("ModMimeUsePathInfo", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mime_dir_config, use_path_info), ACCESS_CONF,
+ "Set to 'yes' to allow mod_mime to use path info for type checking"),
+ {NULL}
+};
+
+static apr_hash_t *mime_type_extensions;
+
+static int mime_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *types_confname = ap_get_module_config(s->module_config,
+ &mime_module);
+ apr_status_t status;
+
+ if (!types_confname) {
+ types_confname = AP_TYPES_CONFIG_FILE;
+ }
+
+ types_confname = ap_server_root_relative(p, types_confname);
+ if (!types_confname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s, APLOGNO(01596)
+ "Invalid mime types config path %s",
+ (const char *)ap_get_module_config(s->module_config,
+ &mime_module));
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if ((status = ap_pcfg_openfile(&f, ptemp, types_confname))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s, APLOGNO(01597)
+ "could not open mime types config file %s.",
+ types_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ mime_type_extensions = apr_hash_make(p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ const char *ll = l, *ct;
+
+ if (l[0] == '#') {
+ continue;
+ }
+ ct = ap_getword_conf(p, &ll);
+
+ while (ll[0]) {
+ char *ext = ap_getword_conf(p, &ll);
+ ap_str_tolower(ext);
+ apr_hash_set(mime_type_extensions, ext, APR_HASH_KEY_STRING, ct);
+ }
+ }
+ ap_cfg_closefile(f);
+ return OK;
+}
+
+static const char *zap_sp(const char *s)
+{
+ if (s == NULL) {
+ return (NULL);
+ }
+ if (*s == '\0') {
+ return (s);
+ }
+
+ /* skip prefixed white space */
+ for (; *s == ' ' || *s == '\t' || *s == '\n'; s++)
+ ;
+
+ return (s);
+}
+
+static char *zap_sp_and_dup(apr_pool_t *p, const char *start,
+ const char *end, apr_size_t *len)
+{
+ while ((start < end) && apr_isspace(*start)) {
+ start++;
+ }
+ while ((end > start) && apr_isspace(*(end - 1))) {
+ end--;
+ }
+ if (len) {
+ *len = end - start;
+ }
+ return apr_pstrmemdup(p, start, end - start);
+}
+
+static int is_token(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && apr_isgraph(c)
+ && (strchr(tspecial, c) == NULL)) ? 1 : -1;
+ return res;
+}
+
+static int is_qtext(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && (c != '"') && (c != '\\') && (c != '\n'))
+ ? 1 : -1;
+ return res;
+}
+
+static int is_quoted_pair(const char *s)
+{
+ int res = -1;
+ int c;
+
+ if (*s == '\\') {
+ c = (int) *(s + 1);
+ if (c && apr_isascii(c)) {
+ res = 1;
+ }
+ }
+ return (res);
+}
+
+static content_type *analyze_ct(request_rec *r, const char *s)
+{
+ const char *cp, *mp;
+ char *attribute, *value;
+ int quoted = 0;
+ server_rec * ss = r->server;
+ apr_pool_t * p = r->pool;
+
+ content_type *ctp;
+ param *pp, *npp;
+
+ /* initialize ctp */
+ ctp = (content_type *)apr_palloc(p, sizeof(content_type));
+ ctp->type = NULL;
+ ctp->subtype = NULL;
+ ctp->param = NULL;
+
+ mp = s;
+
+ /* getting a type */
+ cp = mp;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01598)
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type = cp;
+ do {
+ cp++;
+ } while (*cp && (*cp != '/') && !apr_isspace(*cp) && (*cp != ';'));
+ if (!*cp || (*cp == ';')) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01599)
+ "Cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (*cp != '/') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01600)
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type_len = cp - ctp->type;
+
+ cp++; /* skip the '/' */
+
+ /* getting a subtype */
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01601)
+ "Cannot get media subtype.");
+ return (NULL);
+ }
+ ctp->subtype = cp;
+ do {
+ cp++;
+ } while (*cp && !apr_isspace(*cp) && (*cp != ';'));
+ ctp->subtype_len = cp - ctp->subtype;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+
+ if (*cp == '\0') {
+ return (ctp);
+ }
+
+ /* getting parameters */
+ cp++; /* skip the ';' */
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01602)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ attribute = NULL;
+ value = NULL;
+
+ while (cp != NULL && *cp != '\0') {
+ if (attribute == NULL) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ continue;
+ }
+ else if (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ continue;
+ }
+ else if (*cp == '=') {
+ attribute = zap_sp_and_dup(p, mp, cp, NULL);
+ if (attribute == NULL || *attribute == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01603)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ cp++;
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01604)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ continue;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01605)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ else {
+ if (mp == cp) {
+ if (*cp == '"') {
+ quoted = 1;
+ cp++;
+ }
+ else {
+ quoted = 0;
+ }
+ }
+ if (quoted > 0) {
+ while (quoted && *cp != '\0') {
+ if (is_qtext(*cp) > 0) {
+ cp++;
+ }
+ else if (is_quoted_pair(cp) > 0) {
+ cp += 2;
+ }
+ else if (*cp == '"') {
+ cp++;
+ while (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ }
+ if (*cp != ';' && *cp != '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01606)
+ "Cannot get media parameter.");
+ return(NULL);
+ }
+ quoted = 0;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01607)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ else {
+ while (1) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ }
+ else if (*cp == '\0' || *cp == ';') {
+ break;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01608)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ value = zap_sp_and_dup(p, mp, cp, NULL);
+ if (value == NULL || *value == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01609)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+
+ pp = apr_palloc(p, sizeof(param));
+ pp->attr = attribute;
+ pp->val = value;
+ pp->next = NULL;
+
+ if (ctp->param == NULL) {
+ ctp->param = pp;
+ }
+ else {
+ npp = ctp->param;
+ while (npp->next) {
+ npp = npp->next;
+ }
+ npp->next = pp;
+ }
+ quoted = 0;
+ attribute = NULL;
+ value = NULL;
+ if (*cp == '\0') {
+ break;
+ }
+ cp++;
+ mp = cp;
+ }
+ }
+ return (ctp);
+}
+
+/*
+ * find_ct is the hook routine for determining content-type and other
+ * MIME-related metadata. It assumes that r->filename has already been
+ * set and stat has been called for r->finfo. It also assumes that the
+ * non-path base file name is not the empty string unless it is a dir.
+ */
+static int find_ct(request_rec *r)
+{
+ mime_dir_config *conf;
+ apr_array_header_t *exception_list;
+ char *ext;
+ const char *fn, *fntmp, *type, *charset = NULL, *resource_name, *qm;
+ int found_metadata = 0;
+
+ if (r->finfo.filetype == APR_DIR) {
+ ap_set_content_type(r, DIR_MAGIC_TYPE);
+ return OK;
+ }
+
+ if (!r->filename) {
+ return DECLINED;
+ }
+
+ conf = (mime_dir_config *)ap_get_module_config(r->per_dir_config,
+ &mime_module);
+ exception_list = apr_array_make(r->pool, 2, sizeof(char *));
+
+ /* If use_path_info is explicitly set to on (value & 1 == 1), append. */
+ if (conf->use_path_info & 1) {
+ resource_name = apr_pstrcat(r->pool, r->filename, r->path_info, NULL);
+ }
+ /*
+ * In the reverse proxy case r->filename might contain a query string if
+ * the nocanon option was used with ProxyPass.
+ * If this is the case cut off the query string as the last parameter in
+ * this query string might end up on an extension we take care about, but
+ * we only want to match against path components not against query
+ * parameters.
+ */
+ else if ((r->proxyreq == PROXYREQ_REVERSE)
+ && (apr_table_get(r->notes, "proxy-nocanon"))
+ && ((qm = ap_strchr_c(r->filename, '?')) != NULL)) {
+ resource_name = apr_pstrmemdup(r->pool, r->filename, qm - r->filename);
+ }
+ else {
+ resource_name = r->filename;
+ }
+
+ /* Always drop the path leading up to the file name.
+ */
+ if ((fn = ap_strrchr_c(resource_name, '/')) == NULL) {
+ fn = resource_name;
+ }
+ else {
+ ++fn;
+ }
+
+
+ /* The exception list keeps track of those filename components that
+ * are not associated with extensions indicating metadata.
+ * The base name is always the first exception (i.e., "txt.html" has
+ * a basename of "txt" even though it might look like an extension).
+ * Leading dots are considered to be part of the base name (a file named
+ * ".png" is likely not a png file but just a hidden file called png).
+ */
+ fntmp = fn;
+ while (*fntmp == '.')
+ fntmp++;
+ fntmp = ap_strchr_c(fntmp, '.');
+ if (fntmp) {
+ ext = apr_pstrmemdup(r->pool, fn, fntmp - fn);
+ fn = fntmp + 1;
+ }
+ else {
+ ext = apr_pstrdup(r->pool, fn);
+ fn += strlen(fn);
+ }
+
+ *((const char **)apr_array_push(exception_list)) = ext;
+
+ /* Parse filename extensions which can be in any order
+ */
+ while (*fn && (ext = ap_getword(r->pool, &fn, '.'))) {
+ const extension_info *exinfo = NULL;
+ int found;
+ char *extcase;
+
+ if (*ext == '\0') { /* ignore empty extensions "bad..html" */
+ continue;
+ }
+
+ found = 0;
+
+ /* Save the ext in extcase before converting it to lower case.
+ */
+ extcase = apr_pstrdup(r->pool, ext);
+ ap_str_tolower(ext);
+
+ if (conf->extension_mappings != NULL) {
+ exinfo = (extension_info*)apr_hash_get(conf->extension_mappings,
+ ext, APR_HASH_KEY_STRING);
+ }
+
+ if (exinfo == NULL || !exinfo->forced_type) {
+ if ((type = apr_hash_get(mime_type_extensions, ext,
+ APR_HASH_KEY_STRING)) != NULL) {
+ ap_set_content_type(r, (char*) type);
+ found = 1;
+ }
+ }
+
+ if (exinfo != NULL) {
+
+ /* empty string is treated as special case for RemoveType */
+ if (exinfo->forced_type && *exinfo->forced_type) {
+ ap_set_content_type(r, exinfo->forced_type);
+ found = 1;
+ }
+
+ if (exinfo->charset_type) {
+ charset = exinfo->charset_type;
+ found = 1;
+ }
+ if (exinfo->language_type) {
+ if (!r->content_languages) {
+ r->content_languages = apr_array_make(r->pool, 2,
+ sizeof(char *));
+ }
+ *((const char **)apr_array_push(r->content_languages))
+ = exinfo->language_type;
+ found = 1;
+ }
+ if (exinfo->encoding_type) {
+ if (!r->content_encoding) {
+ r->content_encoding = exinfo->encoding_type;
+ }
+ else {
+ /* XXX should eliminate duplicate entities
+ *
+ * ah no. Order is important and double encoding is neither
+ * forbidden nor impossible. -- nd
+ */
+ r->content_encoding = apr_pstrcat(r->pool,
+ r->content_encoding,
+ ", ",
+ exinfo->encoding_type,
+ NULL);
+ }
+ found = 1;
+ }
+ /* The following extensions are not 'Found'. That is, they don't
+ * make any contribution to metadata negotiation, so they must have
+ * been explicitly requested by name.
+ */
+ if (exinfo->handler && r->proxyreq == PROXYREQ_NONE) {
+ r->handler = exinfo->handler;
+ if (conf->multimatch & MULTIMATCH_HANDLERS) {
+ found = 1;
+ }
+ }
+ /* XXX Two significant problems; 1, we don't check to see if we are
+ * setting redundant filters. 2, we insert these in the types
+ * config hook, which may be too early (dunno.)
+ */
+ if (exinfo->input_filters) {
+ const char *filter, *filters = exinfo->input_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_input_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ if (exinfo->output_filters) {
+ const char *filter, *filters = exinfo->output_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_output_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ }
+
+ if (found || (conf->multimatch & MULTIMATCH_ANY)) {
+ found_metadata = 1;
+ }
+ else {
+ *((const char **) apr_array_push(exception_list)) = extcase;
+ }
+ }
+
+ /*
+ * Need to set a notes entry on r for unrecognized elements.
+ * Somebody better claim them! If we did absolutely nothing,
+ * skip the notes to alert mod_negotiation we are clueless.
+ */
+ if (found_metadata) {
+ apr_table_setn(r->notes, "ap-mime-exceptions-list",
+ (void *)exception_list);
+ }
+
+ if (r->content_type) {
+ content_type *ctp;
+ int override = 0;
+
+ if ((ctp = analyze_ct(r, r->content_type))) {
+ param *pp = ctp->param;
+ char *base_content_type = apr_palloc(r->pool, ctp->type_len +
+ ctp->subtype_len +
+ sizeof("/"));
+ char *tmp = base_content_type;
+ memcpy(tmp, ctp->type, ctp->type_len);
+ tmp += ctp->type_len;
+ *tmp++ = '/';
+ memcpy(tmp, ctp->subtype, ctp->subtype_len);
+ tmp += ctp->subtype_len;
+ *tmp = 0;
+ ap_set_content_type(r, base_content_type);
+ while (pp != NULL) {
+ if (charset && !strcmp(pp->attr, "charset")) {
+ if (!override) {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; charset=",
+ charset,
+ NULL));
+ override = 1;
+ }
+ }
+ else {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; ", pp->attr,
+ "=", pp->val,
+ NULL));
+ }
+ pp = pp->next;
+ }
+ if (charset && !override) {
+ ap_set_content_type(r, apr_pstrcat(r->pool, r->content_type,
+ "; charset=", charset,
+ NULL));
+ }
+ }
+ }
+
+ /* Set default language, if none was specified by the extensions
+ * and we have a DefaultLanguage setting in force
+ */
+
+ if (!r->content_languages && conf->default_language) {
+ const char **new;
+
+ r->content_languages = apr_array_make(r->pool, 2, sizeof(char *));
+ new = (const char **)apr_array_push(r->content_languages);
+ *new = conf->default_language;
+ }
+
+ if (!r->content_type) {
+ return DECLINED;
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(mime_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_type_checker(find_ct,NULL,NULL,APR_HOOK_MIDDLE);
+ /*
+ * this hook seems redundant ... is there any reason a type checker isn't
+ * allowed to do this already? I'd think that fixups in general would be
+ * the last opportunity to get the filters right.
+ * ap_hook_insert_filter(mime_insert_filters,NULL,NULL,APR_HOOK_MIDDLE);
+ */
+}
+
+AP_DECLARE_MODULE(mime) = {
+ STANDARD20_MODULE_STUFF,
+ create_mime_dir_config, /* create per-directory config structure */
+ merge_mime_dir_configs, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ mime_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/modules/http/mod_mime.dep b/modules/http/mod_mime.dep
new file mode 100644
index 0000000..7a195a1
--- /dev/null
+++ b/modules/http/mod_mime.dep
@@ -0,0 +1,55 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_mime.mak
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+.\mod_mime.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+
diff --git a/modules/http/mod_mime.dsp b/modules/http/mod_mime.dsp
new file mode 100644
index 0000000..71ba2ad
--- /dev/null
+++ b/modules/http/mod_mime.dsp
@@ -0,0 +1,111 @@
+# Microsoft Developer Studio Project File - Name="mod_mime" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mime - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:".\Release\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mime - Win32 Release"
+# Name "mod_mime - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_mime.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http/mod_mime.exp b/modules/http/mod_mime.exp
new file mode 100644
index 0000000..f2e38db
--- /dev/null
+++ b/modules/http/mod_mime.exp
@@ -0,0 +1 @@
+mime_module
diff --git a/modules/http/mod_mime.mak b/modules/http/mod_mime.mak
new file mode 100644
index 0000000..14d106f
--- /dev/null
+++ b/modules/http/mod_mime.mak
@@ -0,0 +1,353 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_mime.dsp
+!IF "$(CFG)" == ""
+CFG=mod_mime - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_mime - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_mime - Win32 Release" && "$(CFG)" != "mod_mime - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\mod_mime.obj"
+ -@erase "$(INTDIR)\mod_mime.res"
+ -@erase "$(INTDIR)\mod_mime_src.idb"
+ -@erase "$(INTDIR)\mod_mime_src.pdb"
+ -@erase "$(OUTDIR)\mod_mime.exp"
+ -@erase "$(OUTDIR)\mod_mime.lib"
+ -@erase "$(OUTDIR)\mod_mime.pdb"
+ -@erase "$(OUTDIR)\mod_mime.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_mime_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_mime.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_mime.pdb" /debug /out:"$(OUTDIR)\mod_mime.so" /implib:"$(OUTDIR)\mod_mime.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\mod_mime.obj" \
+ "$(INTDIR)\mod_mime.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib"
+
+"$(OUTDIR)\mod_mime.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_mime.so"
+ if exist .\Release\mod_mime.so.manifest mt.exe -manifest .\Release\mod_mime.so.manifest -outputresource:.\Release\mod_mime.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\mod_mime.obj"
+ -@erase "$(INTDIR)\mod_mime.res"
+ -@erase "$(INTDIR)\mod_mime_src.idb"
+ -@erase "$(INTDIR)\mod_mime_src.pdb"
+ -@erase "$(OUTDIR)\mod_mime.exp"
+ -@erase "$(OUTDIR)\mod_mime.lib"
+ -@erase "$(OUTDIR)\mod_mime.pdb"
+ -@erase "$(OUTDIR)\mod_mime.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_mime_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_mime.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_mime.pdb" /debug /out:"$(OUTDIR)\mod_mime.so" /implib:"$(OUTDIR)\mod_mime.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+LINK32_OBJS= \
+ "$(INTDIR)\mod_mime.obj" \
+ "$(INTDIR)\mod_mime.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib"
+
+"$(OUTDIR)\mod_mime.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_mime.so"
+ if exist .\Debug\mod_mime.so.manifest mt.exe -manifest .\Debug\mod_mime.so.manifest -outputresource:.\Debug\mod_mime.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_mime.dep")
+!INCLUDE "mod_mime.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_mime.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_mime - Win32 Release" || "$(CFG)" == "mod_mime - Win32 Debug"
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http"
+
+!ENDIF
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+
+"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+
+"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=.\mod_mime.c
+
+"$(INTDIR)\mod_mime.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
diff --git a/modules/http2/Makefile.in b/modules/http2/Makefile.in
new file mode 100644
index 0000000..4395bc3
--- /dev/null
+++ b/modules/http2/Makefile.in
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# standard stuff
+#
+
+include $(top_srcdir)/build/special.mk
diff --git a/modules/http2/NWGNUmakefile b/modules/http2/NWGNUmakefile
new file mode 100644
index 0000000..d4a51ed
--- /dev/null
+++ b/modules/http2/NWGNUmakefile
@@ -0,0 +1,246 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mod_http2.nlm \
+ $(OBJDIR)/proxyht2.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2
new file mode 100644
index 0000000..f6d4a38
--- /dev/null
+++ b/modules/http2/NWGNUmod_http2
@@ -0,0 +1,395 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/ \
+ $(NGH2SRC)/lib/includes \
+ $(SERVER)/mpm/NetWare \
+ $(STDMOD)/ssl \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ -DHAVE_CONFIG_H \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mod_http2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Support module (w/ NGHTTP2 Lib)
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(OBJDIR)/nghttp2.lib \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/h2_alt_svc.o \
+ $(OBJDIR)/h2_bucket_beam.o \
+ $(OBJDIR)/h2_bucket_eos.o \
+ $(OBJDIR)/h2_config.o \
+ $(OBJDIR)/h2_conn.o \
+ $(OBJDIR)/h2_conn_io.o \
+ $(OBJDIR)/h2_ctx.o \
+ $(OBJDIR)/h2_filter.o \
+ $(OBJDIR)/h2_from_h1.o \
+ $(OBJDIR)/h2_h2.o \
+ $(OBJDIR)/h2_mplx.o \
+ $(OBJDIR)/h2_ngn_shed.o \
+ $(OBJDIR)/h2_push.o \
+ $(OBJDIR)/h2_request.o \
+ $(OBJDIR)/h2_headers.o \
+ $(OBJDIR)/h2_session.o \
+ $(OBJDIR)/h2_stream.o \
+ $(OBJDIR)/h2_switch.o \
+ $(OBJDIR)/h2_task.o \
+ $(OBJDIR)/h2_util.o \
+ $(OBJDIR)/h2_workers.o \
+ $(OBJDIR)/mod_http2.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(OBJDIR)/nghttp2.lib \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ @$(OBJDIR)/mod_http2.imp \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs := $(sort $(patsubst $(NGH2SRC)/lib/%.c,$(OBJDIR)/%.o,$(wildcard $(NGH2SRC)/lib/*.c)))
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(NGH2SRC)/lib/config.h $(TARGET_lib)
+
+nlms :: libs $(OBJDIR)/mod_http2.imp $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+ $(call DEL,$(NGH2SRC)/lib/config.h)
+#
+# Any specialized rules here
+#
+vpath %.c $(NGH2SRC)/lib
+
+$(NGH2SRC)/lib/config.h : NWGNUmod_http2
+ @-$(RM) $@
+ @echo $(DL)GEN $@$(DL)
+ @echo $(DL)/* For NetWare target.$(DL) > $@
+ @echo $(DL)** Do not edit - created by Make!$(DL) >> $@
+ @echo $(DL)*/$(DL) >> $@
+ @echo $(DL)#ifndef NGH2_CONFIG_H$(DL) >> $@
+ @echo $(DL)#define NGH2_CONFIG_H$(DL) >> $@
+ @echo #define HAVE_ARPA_INET_H 1 >> $@
+ @echo #define HAVE_CHOWN 1 >> $@
+ @echo #define HAVE_DECL_STRERROR_R 1 >> $@
+ @echo #define HAVE_DLFCN_H 1 >> $@
+ @echo #define HAVE_DUP2 1 >> $@
+ @echo #define HAVE_FCNTL_H 1 >> $@
+ @echo #define HAVE_GETCWD 1 >> $@
+ @echo #define HAVE_INTTYPES_H 1 >> $@
+ @echo #define HAVE_LIMITS_H 1 >> $@
+ @echo #define HAVE_LOCALTIME_R 1 >> $@
+ @echo #define HAVE_MALLOC 1 >> $@
+ @echo #define HAVE_MEMCHR 1 >> $@
+ @echo #define HAVE_MEMMOVE 1 >> $@
+ @echo #define HAVE_MEMORY_H 1 >> $@
+ @echo #define HAVE_MEMSET 1 >> $@
+ @echo #define HAVE_NETDB_H 1 >> $@
+ @echo #define HAVE_NETINET_IN_H 1 >> $@
+ @echo #define HAVE_PTRDIFF_T 1 >> $@
+ @echo #define HAVE_PWD_H 1 >> $@
+ @echo #define HAVE_SOCKET 1 >> $@
+ @echo #define HAVE_SQRT 1 >> $@
+ @echo #define HAVE_STDDEF_H 1 >> $@
+ @echo #define HAVE_STDINT_H 1 >> $@
+ @echo #define HAVE_STDLIB_H 1 >> $@
+ @echo #define HAVE_STRCHR 1 >> $@
+ @echo #define HAVE_STRDUP 1 >> $@
+ @echo #define HAVE_STRERROR 1 >> $@
+ @echo #define HAVE_STRERROR_R 1 >> $@
+ @echo #define HAVE_STRINGS_H 1 >> $@
+ @echo #define HAVE_STRING_H 1 >> $@
+ @echo #define HAVE_STRSTR 1 >> $@
+ @echo #define HAVE_STRTOL 1 >> $@
+ @echo #define HAVE_STRTOUL 1 >> $@
+ @echo #define HAVE_SYSLOG_H 1 >> $@
+ @echo #define HAVE_SYS_SOCKET_H 1 >> $@
+ @echo #define HAVE_SYS_STAT_H 1 >> $@
+ @echo #define HAVE_SYS_TIME_H 1 >> $@
+ @echo #define HAVE_SYS_TYPES_H 1 >> $@
+ @echo #define HAVE_TIME_H 1 >> $@
+ @echo #define HAVE_UNISTD_H 1 >> $@
+
+ @echo #define SIZEOF_INT_P 4 >> $@
+ @echo #define STDC_HEADERS 1 >> $@
+ @echo #define STRERROR_R_CHAR_P 4 >> $@
+
+# Hint to compiler a function parameter is not used
+ @echo #define _U_ >> $@
+
+ @echo #ifndef __cplusplus >> $@
+ @echo #define inline __inline >> $@
+ @echo #endif >> $@
+
+ @echo $(DL)#endif /* NGH2_CONFIG_H */$(DL) >> $@
+
+#
+# Exports from mod_http2 for mod_proxy_http2
+$(OBJDIR)/mod_http2.imp : NWGNUmod_http2
+ @-$(RM) $@
+ @echo $(DL)GEN $@$(DL)
+ @echo $(DL) (HTTP2)$(DL) > $@
+ @echo $(DL) http2_module,$(DL) >> $@
+ @echo $(DL) nghttp2_is_fatal,$(DL) >> $@
+ @echo $(DL) nghttp2_option_del,$(DL) >> $@
+ @echo $(DL) nghttp2_option_new,$(DL) >> $@
+ @echo $(DL) nghttp2_option_set_no_auto_window_update,$(DL) >> $@
+ @echo $(DL) nghttp2_option_set_peer_max_concurrent_streams,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_del,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_new,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_before_frame_send_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_data_chunk_recv_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_frame_recv_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_header_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_stream_close_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_send_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_client_new2,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume_connection,$(DL) >> $@
+ @echo $(DL) nghttp2_session_del,$(DL) >> $@
+ @echo $(DL) nghttp2_session_get_remote_settings,$(DL) >> $@
+ @echo $(DL) nghttp2_session_get_stream_user_data,$(DL) >> $@
+ @echo $(DL) nghttp2_session_mem_recv,$(DL) >> $@
+ @echo $(DL) nghttp2_session_resume_data,$(DL) >> $@
+ @echo $(DL) nghttp2_session_send,$(DL) >> $@
+ @echo $(DL) nghttp2_session_want_read,$(DL) >> $@
+ @echo $(DL) nghttp2_session_want_write,$(DL) >> $@
+ @echo $(DL) nghttp2_strerror,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_goaway,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_ping,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_request,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_rst_stream,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_settings,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_window_update,$(DL) >> $@
+ @echo $(DL) nghttp2_version$(DL) >> $@
+
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/NWGNUproxyht2 b/modules/http2/NWGNUproxyht2
new file mode 100644
index 0000000..ca44ad7
--- /dev/null
+++ b/modules/http2/NWGNUproxyht2
@@ -0,0 +1,288 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/includes \
+ $(STDMOD)/proxy \
+ $(SERVER)/mpm/NetWare \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyht2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Proxy module
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy_http2.o \
+ $(OBJDIR)/h2_proxy_session.o \
+ $(OBJDIR)/h2_proxy_util.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ mod_proxy \
+ mod_http2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ @$(OBJDIR)/mod_http2.imp \
+ ap_proxy_acquire_connection \
+ ap_proxy_canon_netloc \
+ ap_proxy_canonenc \
+ ap_proxy_connect_backend \
+ ap_proxy_connection_create \
+ ap_proxy_cookie_reverse_map \
+ ap_proxy_determine_connection \
+ ap_proxy_location_reverse_map \
+ ap_proxy_port_of_scheme \
+ ap_proxy_release_connection \
+ ap_proxy_ssl_connection_cleanup \
+ ap_sock_disable_nagle \
+ proxy_hook_canon_handler \
+ proxy_hook_scheme_handler \
+ proxy_module \
+ proxy_run_detach_backend \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_http2_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs :=
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/README.h2 b/modules/http2/README.h2
new file mode 100644
index 0000000..f2956f3
--- /dev/null
+++ b/modules/http2/README.h2
@@ -0,0 +1,70 @@
+The http2 module adds support for the HTTP/2 protocol to the server.
+
+Specifically, it supports the protocols "h2" (HTTP2 over TLS) and "h2c"
+(HTTP2 over plain HTTP connections via Upgrade). Additionally it offers
+the "direct" mode for both encrypted and unencrypted connections.
+
+You may enable it for the whole server or specific virtual hosts only.
+
+
+BUILD
+
+If you have libnghttp2 (https://nghttp2.org) installed on your system, simply
+add
+
+ --enable-http2
+
+to your httpd ./configure invocation. Should libnghttp2 reside in a unusual
+location, add
+
+ --with-nghttp2=<path>
+
+to ./configure. <path> is expected to be the installation prefix, so there
+should be a <path>/lib/libnghttp2.*. If your system support pkg-config,
+<path>/lib/pkgconfig/libnghttp2.pc will be inspected.
+
+If you want to link nghttp2 statically into the mod_http2 module, you may
+similarly to mod_ssl add
+
+ --enable-nghttp2-staticlib-deps
+
+For this, the lib directory should only contain the libnghttp2.a, not its
+shared cousins.
+
+
+CONFIGURATION
+
+If mod_http2 is enabled for a site or not depends on the new "Protocols"
+directive. This directive list all protocols enabled for a server or
+virtual host.
+
+If you do not specify "Protocols" all available protocols are enabled. For
+sites using TLS, the protocol supported by mod_http2 is "h2". For cleartext
+http:, the offered protocol is "h2c".
+
+The following is an example of a server that only supports http/1.1 in
+general and offers h2 for a specific virtual host.
+
+ ...
+ Protocols http/1.1
+ <virtualhost *:443>
+ Protocols h2 http/1.1
+ ...
+ </virtualhost>
+
+Please see the documentation of mod_http2 for a complete list and explanation
+of all options.
+
+
+TLS CONFIGURATION
+
+If you want to use HTTP/2 with a browser, most modern browsers will support
+it without further configuration. However, browsers so far only support
+HTTP/2 over TLS and are especially picky about the certificate and
+encryption ciphers used.
+
+Server admins may look for up-to-date information about "modern" TLS
+compatibility under:
+
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
new file mode 100644
index 0000000..f89f5ba
--- /dev/null
+++ b/modules/http2/config2.m4
@@ -0,0 +1,238 @@
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl # start of module specific part
+APACHE_MODPATH_INIT(http2)
+
+dnl # list of module object files
+http2_objs="dnl
+mod_http2.lo dnl
+h2_bucket_beam.lo dnl
+h2_bucket_eos.lo dnl
+h2_c1.lo dnl
+h2_c1_io.lo dnl
+h2_c2.lo dnl
+h2_c2_filter.lo dnl
+h2_config.lo dnl
+h2_conn_ctx.lo dnl
+h2_headers.lo dnl
+h2_mplx.lo dnl
+h2_protocol.lo dnl
+h2_push.lo dnl
+h2_request.lo dnl
+h2_session.lo dnl
+h2_stream.lo dnl
+h2_switch.lo dnl
+h2_util.lo dnl
+h2_workers.lo dnl
+"
+
+dnl
+dnl APACHE_CHECK_NGHTTP2
+dnl
+dnl Configure for nghttp2, giving preference to
+dnl "--with-nghttp2=<path>" if it was specified.
+dnl
+AC_DEFUN([APACHE_CHECK_NGHTTP2],[
+ AC_CACHE_CHECK([for nghttp2], [ac_cv_nghttp2], [
+ dnl initialise the variables we use
+ ac_cv_nghttp2=no
+ ap_nghttp2_found=""
+ ap_nghttp2_base=""
+ ap_nghttp2_libs=""
+
+ dnl Determine the nghttp2 base directory, if any
+ AC_MSG_CHECKING([for user-provided nghttp2 base directory])
+ AC_ARG_WITH(nghttp2, APACHE_HELP_STRING(--with-nghttp2=PATH, nghttp2 installation directory), [
+ dnl If --with-nghttp2 specifies a directory, we use that directory
+ if test "x$withval" != "xyes" -a "x$withval" != "x"; then
+ dnl This ensures $withval is actually a directory and that it is absolute
+ ap_nghttp2_base="`cd $withval ; pwd`"
+ fi
+ ])
+ if test "x$ap_nghttp2_base" = "x"; then
+ AC_MSG_RESULT(none)
+ else
+ AC_MSG_RESULT($ap_nghttp2_base)
+ fi
+
+ dnl Run header and version checks
+ saved_CPPFLAGS="$CPPFLAGS"
+ saved_LIBS="$LIBS"
+ saved_LDFLAGS="$LDFLAGS"
+
+ dnl Before doing anything else, load in pkg-config variables
+ if test -n "$PKGCONFIG"; then
+ saved_PKG_CONFIG_PATH="$PKG_CONFIG_PATH"
+ AC_MSG_CHECKING([for pkg-config along $PKG_CONFIG_PATH])
+ if test "x$ap_nghttp2_base" != "x" ; then
+ if test -f "${ap_nghttp2_base}/lib/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ elif test -f "${ap_nghttp2_base}/lib64/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib64/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ fi
+ fi
+ AC_ARG_ENABLE(nghttp2-staticlib-deps,APACHE_HELP_STRING(--enable-nghttp2-staticlib-deps,[link mod_http2 with dependencies of libnghttp2's static libraries (as indicated by "pkg-config --static"). Must be specified in addition to --enable-http2.]), [
+ if test "$enableval" = "yes"; then
+ PKGCONFIG_LIBOPTS="--static"
+ fi
+ ])
+ ap_nghttp2_libs="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-l --silence-errors libnghttp2`"
+ if test $? -eq 0; then
+ ap_nghttp2_found="yes"
+ pkglookup="`$PKGCONFIG --cflags-only-I libnghttp2`"
+ APR_ADDTO(CPPFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_CFLAGS, [$pkglookup])
+ pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-L libnghttp2`"
+ APR_ADDTO(LDFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
+ pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-other libnghttp2`"
+ APR_ADDTO(LDFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
+ fi
+ PKG_CONFIG_PATH="$saved_PKG_CONFIG_PATH"
+ fi
+
+ dnl fall back to the user-supplied directory if not found via pkg-config
+ if test "x$ap_nghttp2_base" != "x" -a "x$ap_nghttp2_found" = "x"; then
+ APR_ADDTO(CPPFLAGS, [-I$ap_nghttp2_base/include])
+ APR_ADDTO(MOD_CFLAGS, [-I$ap_nghttp2_base/include])
+ APR_ADDTO(LDFLAGS, [-L$ap_nghttp2_base/lib])
+ APR_ADDTO(MOD_LDFLAGS, [-L$ap_nghttp2_base/lib])
+ if test "x$ap_platform_runtime_link_flag" != "x"; then
+ APR_ADDTO(LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib])
+ APR_ADDTO(MOD_LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib])
+ fi
+ fi
+
+ AC_MSG_CHECKING([for nghttp2 version >= 1.2.1])
+ AC_TRY_COMPILE([#include <nghttp2/nghttp2ver.h>],[
+#if !defined(NGHTTP2_VERSION_NUM)
+#error "Missing nghttp2 version"
+#endif
+#if NGHTTP2_VERSION_NUM < 0x010201
+#error "Unsupported nghttp2 version " NGHTTP2_VERSION_TEXT
+#endif],
+ [AC_MSG_RESULT(OK)
+ ac_cv_nghttp2=yes],
+ [AC_MSG_RESULT(FAILED)])
+
+ if test "x$ac_cv_nghttp2" = "xyes"; then
+ ap_nghttp2_libs="${ap_nghttp2_libs:--lnghttp2} `$apr_config --libs`"
+ APR_ADDTO(MOD_LDFLAGS, [$ap_nghttp2_libs])
+ APR_ADDTO(LIBS, [$ap_nghttp2_libs])
+
+ dnl Run library and function checks
+ liberrors=""
+ AC_CHECK_HEADERS([nghttp2/nghttp2.h])
+ AC_CHECK_FUNCS([nghttp2_session_server_new2], [], [liberrors="yes"])
+ if test "x$liberrors" != "x"; then
+ AC_MSG_WARN([nghttp2 library is unusable])
+ fi
+dnl # nghttp2 >= 1.3.0: access to stream weights
+ AC_CHECK_FUNCS([nghttp2_stream_get_weight], [], [liberrors="yes"])
+ if test "x$liberrors" != "x"; then
+ AC_MSG_WARN([nghttp2 version >= 1.3.0 is required])
+ fi
+dnl # nghttp2 >= 1.5.0: changing stream priorities
+ AC_CHECK_FUNCS([nghttp2_session_change_stream_priority],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_CHANGE_PRIO"])], [])
+dnl # nghttp2 >= 1.14.0: invalid header callback
+ AC_CHECK_FUNCS([nghttp2_session_callbacks_set_on_invalid_header_callback],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_INVALID_HEADER_CB"])], [])
+dnl # nghttp2 >= 1.15.0: get/set stream window sizes
+ AC_CHECK_FUNCS([nghttp2_session_get_stream_local_window_size],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_LOCAL_WIN_SIZE"])], [])
+dnl # nghttp2 >= 1.15.0: don't keep info on closed streams
+ AC_CHECK_FUNCS([nghttp2_option_set_no_closed_streams],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_NO_CLOSED_STREAMS"])], [])
+dnl # nghttp2 >= 1.50.0: rfc9113 leading/trailing whitespec strictness
+ AC_CHECK_FUNCS([nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_RFC9113_STRICTNESS"])], [])
+ else
+ AC_MSG_WARN([nghttp2 version is too old])
+ fi
+
+ dnl restore
+ CPPFLAGS="$saved_CPPFLAGS"
+ LIBS="$saved_LIBS"
+ LDFLAGS="$saved_LDFLAGS"
+ ])
+ if test "x$ac_cv_nghttp2" = "xyes"; then
+ AC_DEFINE(HAVE_NGHTTP2, 1, [Define if nghttp2 is available])
+ fi
+])
+
+
+dnl # hook module into the Autoconf mechanism (--enable-http2)
+APACHE_MODULE(http2, [HTTP/2 protocol handling in addition to HTTP protocol
+handling. Implemented by mod_http2. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. This module
+is usually linked shared and requires loading. ], $http2_objs, , most, [
+ APACHE_CHECK_OPENSSL
+ if test "$ac_cv_openssl" = "yes" ; then
+ APR_ADDTO(MOD_CPPFLAGS, ["-DH2_OPENSSL"])
+ fi
+
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_HTTP2_LDADD, [-export-symbols-regex http2_module])
+ fi
+ else
+ enable_http2=no
+ fi
+])
+
+# Ensure that other modules can pick up mod_http2.h
+# icing: hold back for now until it is more stable
+#APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
+
+
+
+dnl # list of module object files
+proxy_http2_objs="dnl
+mod_proxy_http2.lo dnl
+h2_proxy_session.lo dnl
+h2_proxy_util.lo dnl
+"
+
+dnl # hook module into the Autoconf mechanism (--enable-proxy_http2)
+APACHE_MODULE(proxy_http2, [HTTP/2 proxy module. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. Also requires --enable-proxy.], $proxy_http2_objs, , no, [
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_PROXY_HTTP2_LDADD, [-export-symbols-regex proxy_http2_module])
+ fi
+ else
+ enable_proxy_http2=no
+ fi
+], proxy)
+
+
+dnl # end of module specific part
+APACHE_MODPATH_FINISH
+
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
new file mode 100644
index 0000000..250e726
--- /dev/null
+++ b/modules/http2/h2.h
@@ -0,0 +1,192 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2__
+#define __mod_h2__h2__
+
+#include <apr_version.h>
+#include <ap_mmn.h>
+
+struct h2_session;
+struct h2_stream;
+
+/*
+ * When apr pollsets can poll file descriptors (e.g. pipes),
+ * we use it for polling stream input/output.
+ */
+#ifdef H2_NO_PIPES
+#define H2_USE_PIPES 0
+#else
+#define H2_USE_PIPES (APR_FILES_AS_SOCKETS && APR_VERSION_AT_LEAST(1,6,0))
+#endif
+
+/**
+ * The magic PRIamble of RFC 7540 that is always sent when starting
+ * a h2 communication.
+ */
+extern const char *H2_MAGIC_TOKEN;
+
+#define H2_ERR_NO_ERROR (0x00)
+#define H2_ERR_PROTOCOL_ERROR (0x01)
+#define H2_ERR_INTERNAL_ERROR (0x02)
+#define H2_ERR_FLOW_CONTROL_ERROR (0x03)
+#define H2_ERR_SETTINGS_TIMEOUT (0x04)
+#define H2_ERR_STREAM_CLOSED (0x05)
+#define H2_ERR_FRAME_SIZE_ERROR (0x06)
+#define H2_ERR_REFUSED_STREAM (0x07)
+#define H2_ERR_CANCEL (0x08)
+#define H2_ERR_COMPRESSION_ERROR (0x09)
+#define H2_ERR_CONNECT_ERROR (0x0a)
+#define H2_ERR_ENHANCE_YOUR_CALM (0x0b)
+#define H2_ERR_INADEQUATE_SECURITY (0x0c)
+#define H2_ERR_HTTP_1_1_REQUIRED (0x0d)
+
+#define H2_HEADER_METHOD ":method"
+#define H2_HEADER_METHOD_LEN 7
+#define H2_HEADER_SCHEME ":scheme"
+#define H2_HEADER_SCHEME_LEN 7
+#define H2_HEADER_AUTH ":authority"
+#define H2_HEADER_AUTH_LEN 10
+#define H2_HEADER_PATH ":path"
+#define H2_HEADER_PATH_LEN 5
+#define H2_CRLF "\r\n"
+
+/* Size of the frame header itself in HTTP/2 */
+#define H2_FRAME_HDR_LEN 9
+
+/* Max data size to write so it fits inside a TLS record */
+#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - H2_FRAME_HDR_LEN)
+
+/* Maximum number of padding bytes in a frame, rfc7540 */
+#define H2_MAX_PADLEN 256
+/* Initial default window size, RFC 7540 ch. 6.5.2 */
+#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1)
+
+#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01)
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#define H2MAX(x,y) ((x) > (y) ? (x) : (y))
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+typedef enum {
+ H2_DEPENDANT_AFTER,
+ H2_DEPENDANT_INTERLEAVED,
+ H2_DEPENDANT_BEFORE,
+} h2_dependency;
+
+typedef struct h2_priority {
+ h2_dependency dependency;
+ int weight;
+} h2_priority;
+
+typedef enum {
+ H2_PUSH_NONE,
+ H2_PUSH_DEFAULT,
+ H2_PUSH_HEAD,
+ H2_PUSH_FAST_LOAD,
+} h2_push_policy;
+
+typedef enum {
+ H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_SESSION_ST_DONE, /* finished, connection close */
+ H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */
+ H2_SESSION_ST_BUSY, /* read/write without stop */
+ H2_SESSION_ST_WAIT, /* waiting for c1 incoming + c2s output */
+ H2_SESSION_ST_CLEANUP, /* pool is being cleaned up */
+} h2_session_state;
+
+typedef struct h2_session_props {
+ int accepted_max; /* the highest remote stream id was/will be handled */
+ int completed_max; /* the highest remote stream completed */
+ int emitted_count; /* the number of local streams sent */
+ int emitted_max; /* the highest local stream id sent */
+ int error; /* the last session error encountered */
+ const char *error_msg; /* the short message given on the error */
+ unsigned int accepting : 1; /* if the session is accepting new streams */
+ unsigned int shutdown : 1; /* if the final GOAWAY has been sent */
+} h2_session_props;
+
+typedef enum h2_stream_state_t {
+ H2_SS_IDLE,
+ H2_SS_RSVD_R,
+ H2_SS_RSVD_L,
+ H2_SS_OPEN,
+ H2_SS_CLOSED_R,
+ H2_SS_CLOSED_L,
+ H2_SS_CLOSED,
+ H2_SS_CLEANUP,
+ H2_SS_MAX
+} h2_stream_state_t;
+
+typedef enum {
+ H2_SEV_CLOSED_L,
+ H2_SEV_CLOSED_R,
+ H2_SEV_CANCELLED,
+ H2_SEV_EOS_SENT,
+ H2_SEV_IN_ERROR,
+ H2_SEV_IN_DATA_PENDING,
+ H2_SEV_OUT_C1_BLOCK,
+} h2_stream_event_t;
+
+
+/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal
+ * format that will be fed to various httpd input filters to finally
+ * become a request_rec to be handled by soemone.
+ */
+typedef struct h2_request h2_request;
+struct h2_request {
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
+ const char *scheme;
+ const char *authority;
+ const char *path;
+ apr_table_t *headers;
+
+ apr_time_t request_time;
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
+ int http_status; /* Store a possible HTTP status code that gets
+ * defined before creating the dummy HTTP/1.1
+ * request e.g. due to an error already
+ * detected.
+ */
+};
+
+/*
+ * A possible HTTP status code is not defined yet. See the http_status field
+ * in struct h2_request above for further explanation.
+ */
+#define H2_HTTP_STATUS_UNSET (0)
+
+typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
+
+typedef int h2_stream_pri_cmp_fn(int stream_id1, int stream_id2, void *session);
+typedef struct h2_stream *h2_stream_get_fn(struct h2_session *session, int stream_id);
+
+/* Note key to attach stream id to conn_rec/request_rec instances */
+#define H2_HDR_CONFORMANCE "http2-hdr-conformance"
+#define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
+#define H2_PUSH_MODE_NOTE "http2-push-mode"
+
+
+#if AP_MODULE_MAGIC_AT_LEAST(20211221, 6)
+#define AP_HAS_RESPONSE_BUCKETS 1
+
+#else /* AP_MODULE_MAGIC_AT_LEAST(20211221, 6) */
+#define AP_HAS_RESPONSE_BUCKETS 0
+
+#endif /* else AP_MODULE_MAGIC_AT_LEAST(20211221, 6) */
+
+#endif /* defined(__mod_h2__h2__) */
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
new file mode 100644
index 0000000..cbf7f34
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.c
@@ -0,0 +1,825 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_lib.h>
+#include <apr_atomic.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_buckets.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_protocol.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_conn_ctx.h"
+#include "h2_headers.h"
+#include "h2_util.h"
+#include "h2_bucket_beam.h"
+
+
+#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link);
+#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link)
+#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link)
+#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_BLIST_INSERT_HEAD(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_INSERT_TAIL(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+
+
+static int buffer_is_empty(h2_bucket_beam *beam);
+static apr_off_t get_buffered_data_len(h2_bucket_beam *beam);
+
+static int h2_blist_count(h2_blist *blist)
+{
+ apr_bucket *b;
+ int count = 0;
+
+ for (b = H2_BLIST_FIRST(blist); b != H2_BLIST_SENTINEL(blist);
+ b = APR_BUCKET_NEXT(b)) {
+ ++count;
+ }
+ return count;
+}
+
+#define H2_BEAM_LOG(beam, c, level, rv, msg, bb) \
+ do { \
+ if (APLOG_C_IS_LEVEL((c),(level))) { \
+ char buffer[4 * 1024]; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = bb? h2_util_bb_print(buffer, bmax, "", "", bb) : 0; \
+ ap_log_cerror(APLOG_MARK, (level), rv, (c), \
+ "BEAM[%s,%s%sdata=%ld,buckets(send/consumed)=%d/%d]: %s %s", \
+ (beam)->name, \
+ (beam)->aborted? "aborted," : "", \
+ buffer_is_empty(beam)? "empty," : "", \
+ (long)get_buffered_data_len(beam), \
+ h2_blist_count(&(beam)->buckets_to_send), \
+ h2_blist_count(&(beam)->buckets_consumed), \
+ (msg), len? buffer : ""); \
+ } \
+ } while (0)
+
+
+static int bucket_is_mmap(apr_bucket *b)
+{
+#if APR_HAS_MMAP
+ return APR_BUCKET_IS_MMAP(b);
+#else
+ /* if it is not defined as enabled, it should always be no */
+ return 0;
+#endif
+}
+
+static apr_off_t bucket_mem_used(apr_bucket *b)
+{
+ if (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b)) {
+ return 0;
+ }
+ else {
+ /* should all have determinate length */
+ return (apr_off_t)b->length;
+ }
+}
+
+static int report_consumption(h2_bucket_beam *beam, int locked)
+{
+ int rv = 0;
+ apr_off_t len = beam->recv_bytes - beam->recv_bytes_reported;
+ h2_beam_io_callback *cb = beam->cons_io_cb;
+
+ if (len > 0) {
+ if (cb) {
+ void *ctx = beam->cons_ctx;
+
+ if (locked) apr_thread_mutex_unlock(beam->lock);
+ cb(ctx, beam, len);
+ if (locked) apr_thread_mutex_lock(beam->lock);
+ rv = 1;
+ }
+ beam->recv_bytes_reported += len;
+ }
+ return rv;
+}
+
+static apr_size_t calc_buffered(h2_bucket_beam *beam)
+{
+ apr_size_t len = 0;
+ apr_bucket *b;
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b->length == ((apr_size_t)-1)) {
+ /* do not count */
+ }
+ else if (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b)) {
+ /* if unread, has no real mem footprint. */
+ }
+ else {
+ len += b->length;
+ }
+ }
+ return len;
+}
+
+static void purge_consumed_buckets(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ /* delete all sender buckets in purge brigade, needs to be called
+ * from sender thread only */
+ while (!H2_BLIST_EMPTY(&beam->buckets_consumed)) {
+ b = H2_BLIST_FIRST(&beam->buckets_consumed);
+ apr_bucket_delete(b);
+ }
+}
+
+static apr_size_t calc_space_left(h2_bucket_beam *beam)
+{
+ if (beam->max_buf_size > 0) {
+ apr_size_t len = calc_buffered(beam);
+ return (beam->max_buf_size > len? (beam->max_buf_size - len) : 0);
+ }
+ return APR_SIZE_MAX;
+}
+
+static int buffer_is_empty(h2_bucket_beam *beam)
+{
+ return H2_BLIST_EMPTY(&beam->buckets_to_send);
+}
+
+static apr_status_t wait_not_empty(h2_bucket_beam *beam, conn_rec *c, apr_read_type_e block)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ while (buffer_is_empty(beam) && APR_SUCCESS == rv) {
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (beam->closed) {
+ rv = APR_EOF;
+ }
+ else if (APR_BLOCK_READ != block) {
+ rv = APR_EAGAIN;
+ }
+ else if (beam->timeout > 0) {
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_empty, timeout", NULL);
+ rv = apr_thread_cond_timedwait(beam->change, beam->lock, beam->timeout);
+ }
+ else {
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_empty, forever", NULL);
+ rv = apr_thread_cond_wait(beam->change, beam->lock);
+ }
+ }
+ return rv;
+}
+
+static apr_status_t wait_not_full(h2_bucket_beam *beam, conn_rec *c,
+ apr_read_type_e block,
+ apr_size_t *pspace_left)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t left;
+
+ while (0 == (left = calc_space_left(beam)) && APR_SUCCESS == rv) {
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (block != APR_BLOCK_READ) {
+ rv = APR_EAGAIN;
+ }
+ else {
+ if (beam->timeout > 0) {
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_full, timeout", NULL);
+ rv = apr_thread_cond_timedwait(beam->change, beam->lock, beam->timeout);
+ }
+ else {
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_full, forever", NULL);
+ rv = apr_thread_cond_wait(beam->change, beam->lock);
+ }
+ }
+ }
+ *pspace_left = left;
+ return rv;
+}
+
+static void h2_blist_cleanup(h2_blist *bl)
+{
+ apr_bucket *e;
+
+ while (!H2_BLIST_EMPTY(bl)) {
+ e = H2_BLIST_FIRST(bl);
+ apr_bucket_delete(e);
+ }
+}
+
+static void beam_shutdown(h2_bucket_beam *beam, apr_shutdown_how_e how)
+{
+ if (!beam->pool) {
+ /* pool being cleared already */
+ return;
+ }
+
+ /* shutdown both receiver and sender? */
+ if (how == APR_SHUTDOWN_READWRITE) {
+ beam->cons_io_cb = NULL;
+ beam->recv_cb = NULL;
+ }
+
+ /* shutdown sender (or both)? */
+ if (how != APR_SHUTDOWN_READ) {
+ h2_blist_cleanup(&beam->buckets_to_send);
+ purge_consumed_buckets(beam);
+ }
+}
+
+static apr_status_t beam_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+ beam_shutdown(beam, APR_SHUTDOWN_READWRITE);
+ beam->pool = NULL; /* the pool is clearing now */
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam, conn_rec *c)
+{
+ if (beam->pool) {
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, 0, "destroy", NULL);
+ apr_pool_cleanup_run(beam->pool, beam, beam_cleanup);
+ }
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, 0, "destroyed", NULL);
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam, conn_rec *from,
+ apr_pool_t *pool, int id, const char *tag,
+ apr_size_t max_buf_size,
+ apr_interval_time_t timeout)
+{
+ h2_bucket_beam *beam;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(from);
+ apr_status_t rv;
+
+ beam = apr_pcalloc(pool, sizeof(*beam));
+ beam->pool = pool;
+ beam->from = from;
+ beam->id = id;
+ beam->name = apr_psprintf(pool, "%s-%d-%s",
+ conn_ctx->id, id, tag);
+
+ H2_BLIST_INIT(&beam->buckets_to_send);
+ H2_BLIST_INIT(&beam->buckets_consumed);
+ beam->tx_mem_limits = 1;
+ beam->max_buf_size = max_buf_size;
+ beam->timeout = timeout;
+
+ rv = apr_thread_mutex_create(&beam->lock, APR_THREAD_MUTEX_DEFAULT, pool);
+ if (APR_SUCCESS != rv) goto cleanup;
+ rv = apr_thread_cond_create(&beam->change, pool);
+ if (APR_SUCCESS != rv) goto cleanup;
+ apr_pool_pre_cleanup_register(pool, beam, beam_cleanup);
+
+cleanup:
+ H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "created", NULL);
+ *pbeam = (APR_SUCCESS == rv)? beam : NULL;
+ return rv;
+}
+
+void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->max_buf_size = buffer_size;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+void h2_beam_set_copy_files(h2_bucket_beam * beam, int enabled)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->copy_files = enabled;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam)
+{
+ apr_size_t buffer_size = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ buffer_size = beam->max_buf_size;
+ apr_thread_mutex_unlock(beam->lock);
+ return buffer_size;
+}
+
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam)
+{
+ apr_interval_time_t timeout;
+
+ apr_thread_mutex_lock(beam->lock);
+ timeout = beam->timeout;
+ apr_thread_mutex_unlock(beam->lock);
+ return timeout;
+}
+
+void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->timeout = timeout;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+void h2_beam_abort(h2_bucket_beam *beam, conn_rec *c)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->aborted = 1;
+ if (c == beam->from) {
+ /* sender aborts */
+ if (beam->send_cb) {
+ beam->send_cb(beam->send_ctx, beam);
+ }
+ if (beam->was_empty_cb && buffer_is_empty(beam)) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
+ }
+ /* no more consumption reporting to sender */
+ report_consumption(beam, 1);
+ beam->cons_ctx = NULL;
+
+ beam_shutdown(beam, APR_SHUTDOWN_WRITE);
+ }
+ else {
+ /* receiver aborts */
+ beam_shutdown(beam, APR_SHUTDOWN_READ);
+ }
+ apr_thread_cond_broadcast(beam->change);
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+void h2_beam_close(h2_bucket_beam *beam, conn_rec *c)
+{
+ apr_thread_mutex_lock(beam->lock);
+ if (!beam->closed) {
+ /* should only be called from sender */
+ ap_assert(c == beam->from);
+ beam->closed = 1;
+ if (beam->send_cb) {
+ beam->send_cb(beam->send_ctx, beam);
+ }
+ if (beam->was_empty_cb && buffer_is_empty(beam)) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
+ }
+ apr_thread_cond_broadcast(beam->change);
+ }
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+static apr_status_t append_bucket(h2_bucket_beam *beam,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_size_t *pspace_left,
+ apr_off_t *pwritten)
+{
+ apr_bucket *b;
+ const char *data;
+ apr_size_t len;
+ apr_status_t rv = APR_SUCCESS;
+ int can_beam = 0;
+
+ (void)block;
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ goto cleanup;
+ }
+
+ ap_assert(beam->pool);
+
+ b = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_setaside(b, beam->pool);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_to_send, b);
+ goto cleanup;
+ }
+ /* non meta bucket */
+
+ /* in case of indeterminate length, we need to read the bucket,
+ * so that it transforms itself into something stable. */
+ if (b->length == ((apr_size_t)-1)) {
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) goto cleanup;
+ }
+
+ if (APR_BUCKET_IS_FILE(b)) {
+ /* For file buckets the problem is their internal readpool that
+ * is used on the first read to allocate buffer/mmap.
+ * Since setting aside a file bucket will de-register the
+ * file cleanup function from the previous pool, we need to
+ * call that only from the sender thread.
+ *
+ * Currently, we do not handle file bucket with refcount > 1 as
+ * the beam is then not in complete control of the file's lifetime.
+ * Which results in the bug that a file get closed by the receiver
+ * while the sender or the beam still have buckets using it.
+ *
+ * Additionally, we allow callbacks to prevent beaming file
+ * handles across. The use case for this is to limit the number
+ * of open file handles and rather use a less efficient beam
+ * transport. */
+ apr_bucket_file *bf = b->data;
+ can_beam = !beam->copy_files && (bf->refcount.refcount == 1);
+ }
+ else if (bucket_is_mmap(b)) {
+ can_beam = !beam->copy_files;
+ }
+
+ if (b->length == 0) {
+ apr_bucket_delete(b);
+ rv = APR_SUCCESS;
+ goto cleanup;
+ }
+
+ if (!*pspace_left) {
+ rv = APR_EAGAIN;
+ goto cleanup;
+ }
+
+ /* bucket is accepted and added to beam->buckets_to_send */
+ if (APR_BUCKET_IS_HEAP(b)) {
+ /* For heap buckets, a read from a receiver thread is fine. The
+ * data will be there and live until the bucket itself is
+ * destroyed. */
+ rv = apr_bucket_setaside(b, beam->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+ }
+ else if (can_beam && (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b))) {
+ rv = apr_bucket_setaside(b, beam->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+ }
+ else {
+ /* we know of no special shortcut to transfer the bucket to
+ * another pool without copying. So we make it a heap bucket. */
+ apr_bucket *b2;
+
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) goto cleanup;
+ /* this allocates and copies data */
+ b2 = apr_bucket_heap_create(data, len, NULL, bb->bucket_alloc);
+ apr_bucket_delete(b);
+ b = b2;
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+ }
+
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_to_send, b);
+ *pwritten += (apr_off_t)b->length;
+ if (b->length > *pspace_left) {
+ *pspace_left = 0;
+ }
+ else {
+ *pspace_left -= b->length;
+ }
+
+cleanup:
+ return rv;
+}
+
+apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
+ apr_bucket_brigade *sender_bb,
+ apr_read_type_e block,
+ apr_off_t *pwritten)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t space_left = 0;
+ int was_empty;
+
+ ap_assert(beam->pool);
+
+ /* Called from the sender thread to add buckets to the beam */
+ apr_thread_mutex_lock(beam->lock);
+ ap_assert(beam->from == from);
+ ap_assert(sender_bb);
+ H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "start send", sender_bb);
+ purge_consumed_buckets(beam);
+ *pwritten = 0;
+ was_empty = buffer_is_empty(beam);
+
+ space_left = calc_space_left(beam);
+ while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
+ rv = append_bucket(beam, sender_bb, block, &space_left, pwritten);
+ if (beam->aborted) {
+ goto cleanup;
+ }
+ else if (APR_EAGAIN == rv) {
+ /* bucket was not added, as beam buffer has no space left.
+ * Trigger event callbacks, so receiver can know there is something
+ * to receive before we do a conditional wait. */
+ purge_consumed_buckets(beam);
+ if (beam->send_cb) {
+ beam->send_cb(beam->send_ctx, beam);
+ }
+ if (was_empty && beam->was_empty_cb) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
+ }
+ rv = wait_not_full(beam, from, block, &space_left);
+ if (APR_SUCCESS != rv) {
+ break;
+ }
+ was_empty = buffer_is_empty(beam);
+ }
+ }
+
+cleanup:
+ if (beam->send_cb && !buffer_is_empty(beam)) {
+ beam->send_cb(beam->send_ctx, beam);
+ }
+ if (was_empty && beam->was_empty_cb && !buffer_is_empty(beam)) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
+ }
+ apr_thread_cond_broadcast(beam->change);
+
+ report_consumption(beam, 1);
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "end send", sender_bb);
+ apr_thread_mutex_unlock(beam->lock);
+ return rv;
+}
+
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ conn_rec *to,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_bucket *bsender, *brecv, *ng;
+ int transferred = 0;
+ apr_status_t rv = APR_SUCCESS;
+ apr_off_t remain;
+ int consumed_buckets = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ H2_BEAM_LOG(beam, to, APLOG_TRACE2, 0, "start receive", bb);
+ if (readbytes <= 0) {
+ readbytes = (apr_off_t)APR_SIZE_MAX;
+ }
+ remain = readbytes;
+
+transfer:
+ if (beam->aborted) {
+ beam_shutdown(beam, APR_SHUTDOWN_READ);
+ rv = APR_ECONNABORTED;
+ goto leave;
+ }
+
+ ap_assert(beam->pool);
+
+ /* transfer from our sender brigade, transforming sender buckets to
+ * receiver ones until we have enough */
+ while (remain >= 0 && !H2_BLIST_EMPTY(&beam->buckets_to_send)) {
+
+ brecv = NULL;
+ bsender = H2_BLIST_FIRST(&beam->buckets_to_send);
+ if (bsender->length > 0 && remain <= 0) {
+ break;
+ }
+
+ if (APR_BUCKET_IS_METADATA(bsender)) {
+ /* we need a real copy into the receivers bucket_alloc */
+ if (APR_BUCKET_IS_EOS(bsender)) {
+ /* this closes the beam */
+ beam->closed = 1;
+ brecv = apr_bucket_eos_create(bb->bucket_alloc);
+ }
+ else if (APR_BUCKET_IS_FLUSH(bsender)) {
+ brecv = apr_bucket_flush_create(bb->bucket_alloc);
+ }
+#if AP_HAS_RESPONSE_BUCKETS
+ else if (AP_BUCKET_IS_RESPONSE(bsender)) {
+ brecv = ap_bucket_response_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+ else if (AP_BUCKET_IS_REQUEST(bsender)) {
+ brecv = ap_bucket_request_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+ else if (AP_BUCKET_IS_HEADERS(bsender)) {
+ brecv = ap_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+#else
+ else if (H2_BUCKET_IS_HEADERS(bsender)) {
+ brecv = h2_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+#endif /* AP_HAS_RESPONSE_BUCKETS */
+ else if (AP_BUCKET_IS_ERROR(bsender)) {
+ ap_bucket_error *eb = bsender->data;
+ brecv = ap_bucket_error_create(eb->status, eb->data,
+ bb->p, bb->bucket_alloc);
+ }
+ }
+ else if (bsender->length == 0) {
+ /* nop */
+ }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(bsender)) {
+ apr_bucket_mmap *bmmap = bsender->data;
+ apr_mmap_t *mmap;
+ rv = apr_mmap_dup(&mmap, bmmap->mmap, bb->p);
+ if (rv != APR_SUCCESS) goto leave;
+ brecv = apr_bucket_mmap_create(mmap, bsender->start, bsender->length, bb->bucket_alloc);
+ }
+#endif
+ else if (APR_BUCKET_IS_FILE(bsender)) {
+ /* This is setaside into the target brigade pool so that
+ * any read operation messes with that pool and not
+ * the sender one. */
+ apr_bucket_file *f = (apr_bucket_file *)bsender->data;
+ apr_file_t *fd = f->fd;
+ int setaside = (f->readpool != bb->p);
+
+ if (setaside) {
+ rv = apr_file_setaside(&fd, fd, bb->p);
+ if (rv != APR_SUCCESS) goto leave;
+ }
+ ng = apr_brigade_insert_file(bb, fd, bsender->start, (apr_off_t)bsender->length,
+ bb->p);
+#if APR_HAS_MMAP
+ /* disable mmap handling as this leads to segfaults when
+ * the underlying file is changed while memory pointer has
+ * been handed out. See also PR 59348 */
+ apr_bucket_file_enable_mmap(ng, 0);
+#endif
+ remain -= bsender->length;
+ ++transferred;
+ }
+ else {
+ const char *data;
+ apr_size_t dlen;
+ /* we did that when the bucket was added, so this should
+ * give us the same data as before without changing the bucket
+ * or anything (pool) connected to it. */
+ rv = apr_bucket_read(bsender, &data, &dlen, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) goto leave;
+ rv = apr_brigade_write(bb, NULL, NULL, data, dlen);
+ if (rv != APR_SUCCESS) goto leave;
+
+ remain -= dlen;
+ ++transferred;
+ }
+
+ if (brecv) {
+ /* we have a proxy that we can give the receiver */
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
+ ++transferred;
+ }
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_consumed, bsender);
+ beam->recv_bytes += bsender->length;
+ ++consumed_buckets;
+ }
+
+ if (beam->recv_cb && consumed_buckets > 0) {
+ beam->recv_cb(beam->recv_ctx, beam);
+ }
+
+ if (transferred) {
+ apr_thread_cond_broadcast(beam->change);
+ rv = APR_SUCCESS;
+ }
+ else if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (beam->closed) {
+ rv = APR_EOF;
+ }
+ else {
+ rv = wait_not_empty(beam, to, block);
+ if (rv != APR_SUCCESS) {
+ goto leave;
+ }
+ goto transfer;
+ }
+
+leave:
+ H2_BEAM_LOG(beam, to, APLOG_TRACE2, rv, "end receive", bb);
+ apr_thread_mutex_unlock(beam->lock);
+ return rv;
+}
+
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_io_callback *io_cb, void *ctx)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->cons_io_cb = io_cb;
+ beam->cons_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+void h2_beam_on_received(h2_bucket_beam *beam,
+ h2_beam_ev_callback *recv_cb, void *ctx)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->recv_cb = recv_cb;
+ beam->recv_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+void h2_beam_on_send(h2_bucket_beam *beam,
+ h2_beam_ev_callback *send_cb, void *ctx)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->send_cb = send_cb;
+ beam->send_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+void h2_beam_on_was_empty(h2_bucket_beam *beam,
+ h2_beam_ev_callback *was_empty_cb, void *ctx)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->was_empty_cb = was_empty_cb;
+ beam->was_empty_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
+
+static apr_off_t get_buffered_data_len(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
+ b = APR_BUCKET_NEXT(b)) {
+ /* should all have determinate length */
+ l += b->length;
+ }
+ return l;
+}
+
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
+{
+ apr_off_t l = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ l = get_buffered_data_len(beam);
+ apr_thread_mutex_unlock(beam->lock);
+ return l;
+}
+
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
+ b = APR_BUCKET_NEXT(b)) {
+ l += bucket_mem_used(b);
+ }
+ apr_thread_mutex_unlock(beam->lock);
+ return l;
+}
+
+int h2_beam_empty(h2_bucket_beam *beam)
+{
+ int empty = 1;
+
+ apr_thread_mutex_lock(beam->lock);
+ empty = buffer_is_empty(beam);
+ apr_thread_mutex_unlock(beam->lock);
+ return empty;
+}
+
+int h2_beam_report_consumption(h2_bucket_beam *beam)
+{
+ int rv = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ rv = report_consumption(beam, 1);
+ apr_thread_mutex_unlock(beam->lock);
+ return rv;
+}
diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h
new file mode 100644
index 0000000..2a9d5f0
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.h
@@ -0,0 +1,248 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_bucket_beam_h
+#define h2_bucket_beam_h
+
+#include "h2_conn_ctx.h"
+
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+
+/**
+ * A h2_bucket_beam solves the task of transferring buckets, esp. their data,
+ * across threads with as little copying as possible.
+ */
+
+typedef struct h2_bucket_beam h2_bucket_beam;
+
+typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam,
+ apr_off_t bytes);
+typedef void h2_beam_ev_callback(void *ctx, h2_bucket_beam *beam);
+
+/**
+ * h2_blist can hold a list of buckets just like apr_bucket_brigade, but
+ * does not to any allocations or related features.
+ */
+typedef struct {
+ APR_RING_HEAD(h2_bucket_list, apr_bucket) list;
+} h2_blist;
+
+struct h2_bucket_beam {
+ int id;
+ const char *name;
+ conn_rec *from;
+ apr_pool_t *pool;
+ h2_blist buckets_to_send;
+ h2_blist buckets_consumed;
+
+ apr_size_t max_buf_size;
+ apr_interval_time_t timeout;
+
+ int aborted;
+ int closed;
+ int tx_mem_limits; /* only memory size counts on transfers */
+ int copy_files;
+
+ struct apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *change;
+
+ h2_beam_ev_callback *was_empty_cb; /* event: beam changed to non-empty in h2_beam_send() */
+ void *was_empty_ctx;
+ h2_beam_ev_callback *recv_cb; /* event: buckets were transfered in h2_beam_receive() */
+ void *recv_ctx;
+ h2_beam_ev_callback *send_cb; /* event: buckets were added in h2_beam_send() */
+ void *send_ctx;
+
+ apr_off_t recv_bytes; /* amount of bytes transferred in h2_beam_receive() */
+ apr_off_t recv_bytes_reported; /* amount of bytes reported as received via callback */
+ h2_beam_io_callback *cons_io_cb; /* report: recv_bytes deltas for sender */
+ void *cons_ctx;
+};
+
+/**
+ * Creates a new bucket beam for transfer of buckets across threads.
+ *
+ * The pool the beam is created with will be protected by the given
+ * mutex and will be used in multiple threads. It needs a pool allocator
+ * that is only used inside that same mutex.
+ *
+ * @param pbeam will hold the created beam on return
+ * @param c_from connection from which buchets are sent
+ * @param pool pool owning the beam, beam will cleanup when pool released
+ * @param id identifier of the beam
+ * @param tag tag identifying beam for logging
+ * @param buffer_size maximum memory footprint of buckets buffered in beam, or
+ * 0 for no limitation
+ * @param timeout timeout for blocking operations
+ */
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam,
+ conn_rec *from,
+ apr_pool_t *pool,
+ int id, const char *tag,
+ apr_size_t buffer_size,
+ apr_interval_time_t timeout);
+
+/**
+ * Destroys the beam immediately without cleanup.
+ */
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam, conn_rec *c);
+
+/**
+ * Switch copying of file buckets on/off.
+ */
+void h2_beam_set_copy_files(h2_bucket_beam * beam, int enabled);
+
+/**
+ * Send buckets from the given brigade through the beam.
+ * This can block of the amount of bucket data is above the buffer limit.
+ * @param beam the beam to add buckets to
+ * @param from the connection the sender operates on, must be the same as
+ * used to create the beam
+ * @param bb the brigade to take buckets from
+ * @param block if the sending should block when the buffer is full
+ * @param pwritten on return, contains the number of data bytes sent
+ * @return APR_SUCCESS when buckets were added to the beam. This can be
+ * a partial transfer and other buckets may still remain in bb
+ * APR_EAGAIN on non-blocking send when the buffer is full
+ * APR_TIMEUP on blocking semd that time out
+ * APR_ECONNABORTED when beam has been aborted
+ */
+apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_off_t *pwritten);
+
+/**
+ * Receive buckets from the beam into the given brigade. The caller is
+ * operating on connection `to`.
+ * @param beam the beam to receive buckets from
+ * @param to the connection the receiver is working with
+ * @param bb the bucket brigade to append to
+ * @param block if the read should block when buckets are unavailable
+ * @param readbytes the amount of data the receiver wants
+ * @return APR_SUCCESS when buckets were appended
+ * APR_EAGAIN on non-blocking read when no buckets are available
+ * APR_TIMEUP on blocking reads that time out
+ * APR_ECONNABORTED when beam has been aborted
+ */
+apr_status_t h2_beam_receive(h2_bucket_beam *beam, conn_rec *to,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+/**
+ * Determine if beam is empty.
+ */
+int h2_beam_empty(h2_bucket_beam *beam);
+
+/**
+ * Abort the beam, either from receiving or sending side.
+ *
+ * @param beam the beam to abort
+ * @param c the connection the caller is working with
+ */
+void h2_beam_abort(h2_bucket_beam *beam, conn_rec *c);
+
+/**
+ * Close the beam. Make certain an EOS is sent.
+ *
+ * @param beam the beam to abort
+ * @param c the connection the caller is working with
+ */
+void h2_beam_close(h2_bucket_beam *beam, conn_rec *c);
+
+/**
+ * Set/get the timeout for blocking sebd/receive operations.
+ */
+void h2_beam_timeout_set(h2_bucket_beam *beam,
+ apr_interval_time_t timeout);
+
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam);
+
+/**
+ * Set/get the maximum buffer size for beam data (memory footprint).
+ */
+void h2_beam_buffer_size_set(h2_bucket_beam *beam,
+ apr_size_t buffer_size);
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
+
+/**
+ * Register a callback to be invoked on the sender side with the
+ * amount of bytes that have been consumed by the receiver, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param io_cb the callback or NULL, called on sender with bytes consumed
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the sender side, io callbacks invoked on sender side, ev callback
+ * from any side.
+ */
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_io_callback *io_cb, void *ctx);
+
+/**
+ * Register a callback to be invoked on the receiver side whenever
+ * buckets have been transfered in a h2_beam_receive() call.
+ * @param beam the beam to set the callback on
+ * @param recv_cb the callback or NULL, called when buckets are received
+ * @param ctx the context to use in callback invocation
+ */
+void h2_beam_on_received(h2_bucket_beam *beam,
+ h2_beam_ev_callback *recv_cb, void *ctx);
+
+/**
+ * Register a call back from the sender side to be invoked when send
+ * has added buckets to the beam.
+ * Unregister by passing a NULL on_send_cb.
+ * @param beam the beam to set the callback on
+ * @param on_send_cb the callback to invoke after buckets were added
+ * @param ctx the context to use in callback invocation
+ */
+void h2_beam_on_send(h2_bucket_beam *beam,
+ h2_beam_ev_callback *on_send_cb, void *ctx);
+
+/**
+ * Register a call back from the sender side to be invoked when send
+ * has added to a previously empty beam.
+ * Unregister by passing a NULL was_empty_cb.
+ * @param beam the beam to set the callback on
+ * @param was_empty_cb the callback to invoke on blocked send
+ * @param ctx the context to use in callback invocation
+ */
+void h2_beam_on_was_empty(h2_bucket_beam *beam,
+ h2_beam_ev_callback *was_empty_cb, void *ctx);
+
+/**
+ * Call any registered consumed handler, if any changes have happened
+ * since the last invocation.
+ * @return !=0 iff a handler has been called
+ *
+ * Needs to be invoked from the sending side.
+ */
+int h2_beam_report_consumption(h2_bucket_beam *beam);
+
+/**
+ * Get the amount of bytes currently buffered in the beam (unread).
+ */
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam);
+
+/**
+ * Get the memory used by the buffered buckets, approximately.
+ */
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam);
+
+#endif /* h2_bucket_beam_h */
diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c
new file mode 100644
index 0000000..fa46a30
--- /dev/null
+++ b/modules/http2/h2_bucket_eos.c
@@ -0,0 +1,112 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+#include <http_protocol.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_mplx.h"
+#include "h2_stream.h"
+#include "h2_bucket_eos.h"
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_stream *stream;
+} h2_bucket_eos;
+
+static apr_status_t bucket_cleanup(void *data)
+{
+ h2_stream **pstream = data;
+
+ if (*pstream) {
+ /* If bucket_destroy is called after us, this prevents
+ * bucket_destroy from trying to destroy the stream again. */
+ *pstream = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket *h2_bucket_eos_make(apr_bucket *b, h2_stream *stream)
+{
+ h2_bucket_eos *h;
+
+ h = apr_bucket_alloc(sizeof(*h), b->list);
+ h->stream = stream;
+
+ b = apr_bucket_shared_make(b, h, 0, 0);
+ b->type = &h2_bucket_type_eos;
+
+ return b;
+}
+
+apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list,
+ h2_stream *stream)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_eos_make(b, stream);
+ if (stream) {
+ h2_bucket_eos *h = b->data;
+ apr_pool_pre_cleanup_register(stream->pool, &h->stream, bucket_cleanup);
+ }
+ return b;
+}
+
+static void bucket_destroy(void *data)
+{
+ h2_bucket_eos *h = data;
+
+ if (apr_bucket_shared_destroy(h)) {
+ h2_stream *stream = h->stream;
+ if (stream && stream->pool) {
+ apr_pool_cleanup_kill(stream->pool, &h->stream, bucket_cleanup);
+ }
+ apr_bucket_free(h);
+ if (stream) {
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+ }
+ }
+}
+
+const apr_bucket_type_t h2_bucket_type_eos = {
+ "H2EOS", 5, APR_BUCKET_METADATA,
+ bucket_destroy,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
diff --git a/modules/http2/h2_bucket_eos.h b/modules/http2/h2_bucket_eos.h
new file mode 100644
index 0000000..04e32e3
--- /dev/null
+++ b/modules/http2/h2_bucket_eos.h
@@ -0,0 +1,32 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_http2_h2_bucket_stream_eos_h
+#define mod_http2_h2_bucket_stream_eos_h
+
+struct h2_stream;
+
+/** End Of HTTP/2 STREAM (H2EOS) bucket */
+extern const apr_bucket_type_t h2_bucket_type_eos;
+
+#define H2_BUCKET_IS_H2EOS(e) (e->type == &h2_bucket_type_eos)
+
+apr_bucket *h2_bucket_eos_make(apr_bucket *b, struct h2_stream *stream);
+
+apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list,
+ struct h2_stream *stream);
+
+#endif /* mod_http2_h2_bucket_stream_eos_h */
diff --git a/modules/http2/h2_c1.c b/modules/http2/h2_c1.c
new file mode 100644
index 0000000..afb26fc
--- /dev/null
+++ b/modules/http2/h2_c1.c
@@ -0,0 +1,323 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <ap_mpm.h>
+#include <ap_mmn.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_ssl.h>
+
+#include <mpm_common.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_mplx.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_protocol.h"
+#include "h2_workers.h"
+#include "h2_c1.h"
+#include "h2_version.h"
+#include "h2_util.h"
+
+static struct h2_workers *workers;
+
+static int async_mpm;
+
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c_logio_add_bytes_in;
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c_logio_add_bytes_out;
+
+apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s)
+{
+ apr_status_t status = APR_SUCCESS;
+ int minw, maxw;
+ apr_time_t idle_limit;
+
+ status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
+ if (status != APR_SUCCESS) {
+ /* some MPMs do not implemnent this */
+ async_mpm = 0;
+ status = APR_SUCCESS;
+ }
+
+ h2_config_init(pool);
+
+ h2_get_workers_config(s, &minw, &maxw, &idle_limit);
+ workers = h2_workers_create(s, pool, maxw, minw, idle_limit);
+
+ h2_c_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in);
+ h2_c_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out);
+
+ return h2_mplx_c1_child_init(pool, s);
+}
+
+void h2_c1_child_stopping(apr_pool_t *pool, int graceful)
+{
+ if (workers) {
+ h2_workers_shutdown(workers, graceful);
+ }
+}
+
+
+apr_status_t h2_c1_setup(conn_rec *c, request_rec *r, server_rec *s)
+{
+ h2_session *session;
+ h2_conn_ctx_t *ctx;
+ apr_status_t rv;
+
+ if (!workers) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
+ "workers not initialized");
+ rv = APR_EGENERAL;
+ goto cleanup;
+ }
+
+ rv = h2_session_create(&session, c, r, s, workers);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ ctx = h2_conn_ctx_get(c);
+ ap_assert(ctx);
+ h2_conn_ctx_assign_session(ctx, session);
+ /* remove the input filter of mod_reqtimeout, now that the connection
+ * is established and we have switched to h2. reqtimeout has supervised
+ * possibly configured handshake timeouts and needs to get out of the way
+ * now since the rest of its state handling assumes http/1.x to take place. */
+ ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
+
+cleanup:
+ return rv;
+}
+
+apr_status_t h2_c1_run(conn_rec *c)
+{
+ apr_status_t status;
+ int mpm_state = 0;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ ap_assert(conn_ctx);
+ ap_assert(conn_ctx->session);
+ do {
+ if (c->cs) {
+ c->cs->sense = CONN_SENSE_DEFAULT;
+ c->cs->state = CONN_STATE_HANDLER;
+ }
+
+ status = h2_session_process(conn_ctx->session, async_mpm);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03045), conn_ctx->session,
+ "process, closing conn"));
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+ } while (!async_mpm
+ && c->keepalive == AP_CONN_KEEPALIVE
+ && mpm_state != AP_MPMQ_STOPPING);
+
+ if (c->cs) {
+ switch (conn_ctx->session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
+ if (c->cs && !conn_ctx->session->remote.emitted_count) {
+ /* let the MPM know that we are not done and want
+ * the Timeout behaviour instead of a KeepAliveTimeout
+ * See PR 63534.
+ */
+ c->cs->sense = CONN_SENSE_WANT_READ;
+ }
+ break;
+ case H2_SESSION_ST_CLEANUP:
+ case H2_SESSION_ST_DONE:
+ default:
+ c->cs->state = CONN_STATE_LINGER;
+ break;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->session) {
+ apr_status_t status = h2_session_pre_close(conn_ctx->session, async_mpm);
+ return (status == APR_SUCCESS)? DONE : status;
+ }
+ return DONE;
+}
+
+int h2_c1_allows_direct(conn_rec *c)
+{
+ if (!c->master) {
+ int is_tls = ap_ssl_conn_is_ssl(c);
+ const char *needed_protocol = is_tls? "h2" : "h2c";
+ int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT);
+
+ if (h2_direct < 0) {
+ h2_direct = is_tls? 0 : 1;
+ }
+ return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
+ }
+ return 0;
+}
+
+int h2_c1_can_upgrade(request_rec *r)
+{
+ if (!r->connection->master) {
+ int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE);
+ return h2_upgrade > 0 || (h2_upgrade < 0 && !ap_ssl_conn_is_ssl(r->connection));
+ }
+ return 0;
+}
+
+static int h2_c1_hook_process_connection(conn_rec* c)
+{
+ apr_status_t status;
+ h2_conn_ctx_t *ctx;
+
+ if (c->master) goto declined;
+ ctx = h2_conn_ctx_get(c);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
+ if (!ctx && c->keepalives == 0) {
+ const char *proto = ap_get_protocol(c);
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
+ "new connection using protocol '%s', direct=%d, "
+ "tls acceptable=%d", proto, h2_c1_allows_direct(c),
+ h2_protocol_is_acceptable_c1(c, NULL, 1));
+ }
+
+ if (!strcmp(AP_PROTOCOL_HTTP1, proto)
+ && h2_c1_allows_direct(c)
+ && h2_protocol_is_acceptable_c1(c, NULL, 1)) {
+ /* Fresh connection still is on http/1.1 and H2Direct is enabled.
+ * Otherwise connection is in a fully acceptable state.
+ * -> peek at the first 24 incoming bytes
+ */
+ apr_bucket_brigade *temp;
+ char *peek = NULL;
+ apr_size_t peeklen;
+
+ temp = apr_brigade_create(c->pool, c->bucket_alloc);
+ status = ap_get_brigade(c->input_filters, temp,
+ AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054)
+ "h2_h2, error reading 24 bytes speculative");
+ apr_brigade_destroy(temp);
+ return DECLINED;
+ }
+
+ apr_brigade_pflatten(temp, &peek, &peeklen, c->pool);
+ if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, direct mode detected");
+ ctx = h2_conn_ctx_create_for_c1(c, c->base_server,
+ ap_ssl_conn_is_ssl(c)? "h2" : "h2c");
+ }
+ else if (APLOGctrace2(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, not detected in %d bytes(base64): %s",
+ (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool));
+ }
+ apr_brigade_destroy(temp);
+ }
+ }
+
+ if (!ctx) goto declined;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
+ if (!ctx->session) {
+ status = h2_c1_setup(c, NULL, ctx->server? ctx->server : c->base_server);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
+ if (status != APR_SUCCESS) {
+ h2_conn_ctx_detach(c);
+ return !OK;
+ }
+ }
+ h2_c1_run(c);
+ return OK;
+
+declined:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined");
+ return DECLINED;
+}
+
+static int h2_c1_hook_pre_close(conn_rec *c)
+{
+ h2_conn_ctx_t *ctx;
+
+ /* secondary connection? */
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_conn_ctx_get(c);
+ if (ctx) {
+ /* If the session has been closed correctly already, we will not
+ * find a h2_conn_ctx_there. The presence indicates that the session
+ * is still ongoing. */
+ return h2_c1_pre_close(ctx, c);
+ }
+ return DECLINED;
+}
+
+static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
+static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL};
+
+void h2_c1_register_hooks(void)
+{
+ /* Our main processing needs to run quite late. Definitely after mod_ssl,
+ * as we need its connection filters, but also before reqtimeout as its
+ * method of timeouts is specific to HTTP/1.1 (as of now).
+ * The core HTTP/1 processing run as REALLY_LAST, so we will have
+ * a chance to take over before it.
+ */
+ ap_hook_process_connection(h2_c1_hook_process_connection,
+ mod_reqtimeout, NULL, APR_HOOK_LAST);
+
+ /* One last chance to properly say goodbye if we have not done so
+ * already. */
+ ap_hook_pre_close_connection(h2_c1_hook_pre_close, NULL, mod_ssl, APR_HOOK_LAST);
+}
+
diff --git a/modules/http2/h2_c1.h b/modules/http2/h2_c1.h
new file mode 100644
index 0000000..41527f6
--- /dev/null
+++ b/modules/http2/h2_c1.h
@@ -0,0 +1,83 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_c1__
+#define __mod_h2__h2_c1__
+
+#include <http_core.h>
+
+struct h2_conn_ctx_t;
+
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c_logio_add_bytes_in;
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c_logio_add_bytes_out;
+
+/* Initialize this child process for h2 primary connection work,
+ * to be called once during child init before multi processing
+ * starts.
+ */
+apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s);
+
+/**
+ * Setup the primary connection and our context for HTTP/2 processing
+ *
+ * @param c the connection HTTP/2 is starting on
+ * @param r the upgrade request that still awaits an answer, optional
+ * @param s the server selected for this connection (can be != c->base_server)
+ */
+apr_status_t h2_c1_setup(conn_rec *c, request_rec *r, server_rec *s);
+
+/**
+ * Run the HTTP/2 primary connection in synchronous fashion.
+ * Return when the HTTP/2 session is done
+ * and the connection will close or a fatal error occurred.
+ *
+ * @param c the http2 connection to run
+ * @return APR_SUCCESS when session is done.
+ */
+apr_status_t h2_c1_run(conn_rec *c);
+
+/**
+ * The primary connection is about to close. If we have not send a GOAWAY
+ * yet, this is the last chance.
+ */
+apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c);
+
+/**
+ * Check if the connection allows a direct detection of HTTPP/2,
+ * as configurable by the H2Direct directive.
+ * @param c the connection to check on
+ * @return != 0 if direct detection is enabled
+ */
+int h2_c1_allows_direct(conn_rec *c);
+
+/**
+ * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
+ * for the given request.
+ * @param r the request to check
+ * @return != 0 iff Upgrade switching is enabled
+ */
+int h2_c1_can_upgrade(request_rec *r);
+
+/* Register hooks for h2 handling on primary connections.
+ */
+void h2_c1_register_hooks(void);
+
+/**
+ * Child is about to be stopped, release unused resources
+ */
+void h2_c1_child_stopping(apr_pool_t *pool, int graceful);
+
+#endif /* defined(__mod_h2__h2_c1__) */
diff --git a/modules/http2/h2_c1_io.c b/modules/http2/h2_c1_io.c
new file mode 100644
index 0000000..ade8836
--- /dev/null
+++ b/modules/http2/h2_c1_io.c
@@ -0,0 +1,545 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+#include <ap_mpm.h>
+#include <mpm_common.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_ssl.h>
+
+#include "h2_private.h"
+#include "h2_bucket_eos.h"
+#include "h2_config.h"
+#include "h2_c1.h"
+#include "h2_c1_io.h"
+#include "h2_protocol.h"
+#include "h2_session.h"
+#include "h2_util.h"
+
+#define TLS_DATA_MAX (16*1024)
+
+/* Calculated like this: assuming MTU 1500 bytes
+ * 1500 - 40 (IP) - 20 (TCP) - 40 (TCP options)
+ * - TLS overhead (60-100)
+ * ~= 1300 bytes */
+#define WRITE_SIZE_INITIAL 1300
+
+/* The maximum we'd like to write in one chunk is
+ * the max size of a TLS record. When pushing
+ * many frames down the h2 connection, this might
+ * align differently because of headers and other
+ * frames or simply as not sufficient data is
+ * in a response body.
+ * However keeping frames at or below this limit
+ * should make optimizations at the layer that writes
+ * to TLS easier.
+ */
+#define WRITE_SIZE_MAX (TLS_DATA_MAX)
+
+#define BUF_REMAIN ((apr_size_t)(bmax-off))
+
+static void h2_c1_io_bb_log(conn_rec *c, int stream_id, int level,
+ const char *tag, apr_bucket_brigade *bb)
+{
+ char buffer[16 * 1024];
+ const char *line = "(null)";
+ int bmax = sizeof(buffer)/sizeof(buffer[0]);
+ int off = 0;
+ apr_bucket *b;
+
+ (void)stream_id;
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ for (b = APR_BRIGADE_FIRST(bb);
+ bmax && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ off += apr_snprintf(buffer+off, BUF_REMAIN, "eos ");
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ off += apr_snprintf(buffer+off, BUF_REMAIN, "flush ");
+ }
+ else if (AP_BUCKET_IS_EOR(b)) {
+ off += apr_snprintf(buffer+off, BUF_REMAIN, "eor ");
+ }
+ else if (H2_BUCKET_IS_H2EOS(b)) {
+ off += apr_snprintf(buffer+off, BUF_REMAIN, "h2eos ");
+ }
+ else {
+ off += apr_snprintf(buffer+off, BUF_REMAIN, "meta(unknown) ");
+ }
+ }
+ else {
+ const char *btype = "data";
+ if (APR_BUCKET_IS_FILE(b)) {
+ btype = "file";
+ }
+ else if (APR_BUCKET_IS_PIPE(b)) {
+ btype = "pipe";
+ }
+ else if (APR_BUCKET_IS_SOCKET(b)) {
+ btype = "socket";
+ }
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ btype = "heap";
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(b)) {
+ btype = "transient";
+ }
+ else if (APR_BUCKET_IS_IMMORTAL(b)) {
+ btype = "immortal";
+ }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ btype = "mmap";
+ }
+#endif
+ else if (APR_BUCKET_IS_POOL(b)) {
+ btype = "pool";
+ }
+
+ off += apr_snprintf(buffer+off, BUF_REMAIN, "%s[%ld] ",
+ btype,
+ (long)(b->length == ((apr_size_t)-1)? -1UL : b->length));
+ }
+ }
+ line = *buffer? buffer : "(empty)";
+ }
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%ld)-%s: %s",
+ c->id, tag, line);
+
+}
+#define C1_IO_BB_LOG(c, stream_id, level, tag, bb) \
+ if (APLOG_C_IS_LEVEL(c, level)) { \
+ h2_c1_io_bb_log((c), (stream_id), (level), (tag), (bb)); \
+ }
+
+
+apr_status_t h2_c1_io_init(h2_c1_io *io, h2_session *session)
+{
+ conn_rec *c = session->c1;
+
+ io->session = session;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
+ io->is_tls = ap_ssl_conn_is_ssl(session->c1);
+ io->buffer_output = io->is_tls;
+ io->flush_threshold = 4 * (apr_size_t)h2_config_sgeti64(session->s, H2_CONF_STREAM_MAX_MEM);
+
+ if (io->buffer_output) {
+ /* This is what we start with,
+ * see https://issues.apache.org/jira/browse/TS-2503
+ */
+ io->warmup_size = h2_config_sgeti64(session->s, H2_CONF_TLS_WARMUP_SIZE);
+ io->cooldown_usecs = (h2_config_sgeti(session->s, H2_CONF_TLS_COOLDOWN_SECS)
+ * APR_USEC_PER_SEC);
+ io->cooldown_usecs = 0;
+ io->write_size = (io->cooldown_usecs > 0?
+ WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
+ }
+ else {
+ io->warmup_size = 0;
+ io->cooldown_usecs = 0;
+ io->write_size = 0;
+ }
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c,
+ "h2_c1_io(%ld): init, buffering=%d, warmup_size=%ld, "
+ "cd_secs=%f", c->id, io->buffer_output,
+ (long)io->warmup_size,
+ ((double)io->cooldown_usecs/APR_USEC_PER_SEC));
+ }
+
+ return APR_SUCCESS;
+}
+
+static void append_scratch(h2_c1_io *io)
+{
+ if (io->scratch && io->slen > 0) {
+ apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen,
+ apr_bucket_free,
+ io->session->c1->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->buffered_len += io->slen;
+ io->scratch = NULL;
+ io->slen = io->ssize = 0;
+ }
+}
+
+static apr_size_t assure_scratch_space(h2_c1_io *io) {
+ apr_size_t remain = io->ssize - io->slen;
+ if (io->scratch && remain == 0) {
+ append_scratch(io);
+ }
+ if (!io->scratch) {
+ /* we control the size and it is larger than what buckets usually
+ * allocate. */
+ io->scratch = apr_bucket_alloc(io->write_size, io->session->c1->bucket_alloc);
+ io->ssize = io->write_size;
+ io->slen = 0;
+ remain = io->ssize;
+ }
+ return remain;
+}
+
+static apr_status_t read_to_scratch(h2_c1_io *io, apr_bucket *b)
+{
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ if (!b->length) {
+ return APR_SUCCESS;
+ }
+
+ ap_assert(b->length <= (io->ssize - io->slen));
+ if (APR_BUCKET_IS_FILE(b)) {
+ apr_bucket_file *f = (apr_bucket_file *)b->data;
+ apr_file_t *fd = f->fd;
+ apr_off_t offset = b->start;
+
+ len = b->length;
+ /* file buckets will read 8000 byte chunks and split
+ * themselves. However, we do know *exactly* how many
+ * bytes we need where. So we read the file directly to
+ * where we need it.
+ */
+ status = apr_file_seek(fd, APR_SET, &offset);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ status = apr_file_read(fd, io->scratch + io->slen, &len);
+ if (status != APR_SUCCESS && status != APR_EOF) {
+ return status;
+ }
+ io->slen += len;
+ }
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, io->session->c1,
+ "h2_c1_io(%ld): seeing mmap bucket of size %ld, scratch remain=%ld",
+ io->session->c1->id, (long)b->length, (long)(io->ssize - io->slen));
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ memcpy(io->scratch+io->slen, data, len);
+ io->slen += len;
+ }
+ }
+ else {
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ memcpy(io->scratch+io->slen, data, len);
+ io->slen += len;
+ }
+ }
+ return status;
+}
+
+static apr_status_t pass_output(h2_c1_io *io, int flush)
+{
+ conn_rec *c = io->session->c1;
+ apr_off_t bblen;
+ apr_status_t rv;
+
+ append_scratch(io);
+ if (flush) {
+ if (!APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output))) {
+ apr_bucket *b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ }
+ if (APR_BRIGADE_EMPTY(io->output)) {
+ return APR_SUCCESS;
+ }
+
+ io->unflushed = !APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output));
+ apr_brigade_length(io->output, 0, &bblen);
+ C1_IO_BB_LOG(c, 0, APLOG_TRACE2, "out", io->output);
+
+ rv = ap_pass_brigade(c->output_filters, io->output);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ io->buffered_len = 0;
+ io->bytes_written += (apr_size_t)bblen;
+
+ if (io->write_size < WRITE_SIZE_MAX
+ && io->bytes_written >= io->warmup_size) {
+ /* connection is hot, use max size */
+ io->write_size = WRITE_SIZE_MAX;
+ }
+ else if (io->cooldown_usecs > 0
+ && io->write_size > WRITE_SIZE_INITIAL) {
+ apr_time_t now = apr_time_now();
+ if ((now - io->last_write) >= io->cooldown_usecs) {
+ /* long time not written, reset write size */
+ io->write_size = WRITE_SIZE_INITIAL;
+ io->bytes_written = 0;
+ }
+ else {
+ io->last_write = now;
+ }
+ }
+
+cleanup:
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(03044)
+ "h2_c1_io(%ld): pass_out brigade %ld bytes",
+ c->id, (long)bblen);
+ }
+ apr_brigade_cleanup(io->output);
+ return rv;
+}
+
+int h2_c1_io_needs_flush(h2_c1_io *io)
+{
+ return io->buffered_len >= io->flush_threshold;
+}
+
+int h2_c1_io_pending(h2_c1_io *io)
+{
+ return !APR_BRIGADE_EMPTY(io->output) || (io->scratch && io->slen > 0);
+}
+
+apr_status_t h2_c1_io_pass(h2_c1_io *io)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ if (h2_c1_io_pending(io)) {
+ rv = pass_output(io, 0);
+ }
+ return rv;
+}
+
+apr_status_t h2_c1_io_assure_flushed(h2_c1_io *io)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ if (h2_c1_io_pending(io) || io->unflushed) {
+ rv = pass_output(io, 1);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+cleanup:
+ return rv;
+}
+
+apr_status_t h2_c1_io_add_data(h2_c1_io *io, const char *data, size_t length)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t remain;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->session->c1,
+ "h2_c1_io(%ld): adding %ld data bytes",
+ io->session->c1->id, (long)length);
+ if (io->buffer_output) {
+ while (length > 0) {
+ remain = assure_scratch_space(io);
+ if (remain >= length) {
+ memcpy(io->scratch + io->slen, data, length);
+ io->slen += length;
+ length = 0;
+ }
+ else {
+ memcpy(io->scratch + io->slen, data, remain);
+ io->slen += remain;
+ data += remain;
+ length -= remain;
+ }
+ }
+ }
+ else {
+ status = apr_brigade_write(io->output, NULL, NULL, data, length);
+ io->buffered_len += length;
+ }
+ return status;
+}
+
+apr_status_t h2_c1_io_append(h2_c1_io *io, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_METADATA(b) || APR_BUCKET_IS_MMAP(b)) {
+ /* need to finish any open scratch bucket, as meta data
+ * needs to be forward "in order". */
+ append_scratch(io);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ else if (io->buffer_output) {
+ apr_size_t remain = assure_scratch_space(io);
+ if (b->length > remain) {
+ apr_bucket_split(b, remain);
+ if (io->slen == 0) {
+ /* complete write_size bucket, append unchanged */
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->buffered_len += b->length;
+ continue;
+ }
+ }
+ else {
+ /* bucket fits in remain, copy to scratch */
+ rv = read_to_scratch(io, b);
+ apr_bucket_delete(b);
+ if (APR_SUCCESS != rv) goto cleanup;
+ continue;
+ }
+ }
+ else {
+ /* no buffering, forward buckets setaside on flush */
+ apr_bucket_setaside(b, io->session->c1->pool);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->buffered_len += b->length;
+ }
+ }
+cleanup:
+ return rv;
+}
+
+static apr_status_t c1_in_feed_bucket(h2_session *session,
+ apr_bucket *b, apr_ssize_t *inout_len)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t len;
+ const char *data;
+ ssize_t n;
+
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ while (APR_SUCCESS == rv && len > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, session->c1,
+ H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"),
+ (long)len, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ h2_session_event(session, H2_SESSION_EV_PROTO_ERROR,
+ (int)n, nghttp2_strerror((int)n));
+ rv = APR_EGENERAL;
+ }
+ }
+ else {
+ *inout_len += n;
+ if ((apr_ssize_t)len <= n) {
+ break;
+ }
+ len -= (apr_size_t)n;
+ data += n;
+ }
+ }
+
+ return rv;
+}
+
+static apr_status_t c1_in_feed_brigade(h2_session *session,
+ apr_bucket_brigade *bb,
+ apr_ssize_t *inout_len)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_bucket* b;
+
+ *inout_len = 0;
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (!APR_BUCKET_IS_METADATA(b)) {
+ rv = c1_in_feed_bucket(session, b, inout_len);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+ apr_bucket_delete(b);
+ }
+cleanup:
+ apr_brigade_cleanup(bb);
+ return rv;
+}
+
+static apr_status_t read_and_feed(h2_session *session)
+{
+ apr_ssize_t bytes_fed, bytes_requested;
+ apr_status_t rv;
+
+ bytes_requested = H2MAX(APR_BUCKET_BUFF_SIZE, session->max_stream_mem * 4);
+ rv = ap_get_brigade(session->c1->input_filters,
+ session->bbtmp, AP_MODE_READBYTES,
+ APR_NONBLOCK_READ, bytes_requested);
+
+ if (APR_SUCCESS == rv) {
+ if (!APR_BRIGADE_EMPTY(session->bbtmp)) {
+ h2_util_bb_log(session->c1, session->id, APLOG_TRACE2, "c1 in",
+ session->bbtmp);
+ rv = c1_in_feed_brigade(session, session->bbtmp, &bytes_fed);
+ session->io.bytes_read += bytes_fed;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+ }
+ return rv;
+}
+
+apr_status_t h2_c1_read(h2_session *session)
+{
+ apr_status_t rv;
+
+ /* H2_IN filter handles all incoming data against the session.
+ * We just pull at the filter chain to make it happen */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_MSG(session, "session_read start"));
+ rv = read_and_feed(session);
+
+ if (APR_SUCCESS == rv) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_INPUT_PENDING, 0, NULL);
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* Signal that we have exhausted the input momentarily.
+ * This might switch to polling the socket */
+ h2_session_dispatch_event(session, H2_SESSION_EV_INPUT_EXHAUSTED, 0, NULL);
+ }
+ else if (APR_SUCCESS != rv) {
+ if (APR_STATUS_IS_ETIMEDOUT(rv)
+ || APR_STATUS_IS_ECONNABORTED(rv)
+ || APR_STATUS_IS_ECONNRESET(rv)
+ || APR_STATUS_IS_EOF(rv)
+ || APR_STATUS_IS_EBADF(rv)) {
+ /* common status for a client that has left */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, session->c1,
+ H2_SSSN_MSG(session, "input gone"));
+ }
+ else {
+ /* uncommon status, log on INFO so that we see this */
+ ap_log_cerror( APLOG_MARK, APLOG_DEBUG, rv, session->c1,
+ H2_SSSN_LOG(APLOGNO(02950), session,
+ "error reading, terminating"));
+ }
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+
+ apr_brigade_cleanup(session->bbtmp);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, session->c1,
+ H2_SSSN_MSG(session, "session_read done"));
+ return rv;
+}
diff --git a/modules/http2/h2_c1_io.h b/modules/http2/h2_c1_io.h
new file mode 100644
index 0000000..d891ffb
--- /dev/null
+++ b/modules/http2/h2_c1_io.h
@@ -0,0 +1,100 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_c1_io__
+#define __mod_h2__h2_c1_io__
+
+struct h2_config;
+struct h2_session;
+
+/* h2_io is the basic handler of a httpd connection. It keeps two brigades,
+ * one for input, one for output and works with the installed connection
+ * filters.
+ * The read is done via a callback function, so that input can be processed
+ * directly without copying.
+ */
+typedef struct {
+ struct h2_session *session;
+ apr_bucket_brigade *output;
+
+ int is_tls;
+ int unflushed;
+ apr_time_t cooldown_usecs;
+ apr_int64_t warmup_size;
+
+ apr_size_t write_size;
+ apr_time_t last_write;
+ apr_int64_t bytes_read;
+ apr_int64_t bytes_written;
+
+ int buffer_output;
+ apr_off_t buffered_len;
+ apr_off_t flush_threshold;
+ unsigned int is_flushed : 1;
+
+ char *scratch;
+ apr_size_t ssize;
+ apr_size_t slen;
+} h2_c1_io;
+
+apr_status_t h2_c1_io_init(h2_c1_io *io, struct h2_session *session);
+
+/**
+ * Append data to the buffered output.
+ * @param buf the data to append
+ * @param length the length of the data to append
+ */
+apr_status_t h2_c1_io_add_data(h2_c1_io *io,
+ const char *buf,
+ size_t length);
+
+apr_status_t h2_c1_io_add(h2_c1_io *io, apr_bucket *b);
+
+apr_status_t h2_c1_io_append(h2_c1_io *io, apr_bucket_brigade *bb);
+
+/**
+ * Pass any buffered data on to the connection output filters.
+ * @param io the connection io
+ */
+apr_status_t h2_c1_io_pass(h2_c1_io *io);
+
+/**
+ * if there is any data pendiong or was any data send
+ * since the last FLUSH, send out a FLUSH now.
+ */
+apr_status_t h2_c1_io_assure_flushed(h2_c1_io *io);
+
+/**
+ * Check if the buffered amount of data needs flushing.
+ */
+int h2_c1_io_needs_flush(h2_c1_io *io);
+
+/**
+ * Check if we have output pending.
+ */
+int h2_c1_io_pending(h2_c1_io *io);
+
+struct h2_session;
+
+/**
+ * Read c1 input and pass it on to nghttp2.
+ * @param session the session
+ * @param when_pending != 0 if only pending input (sitting in filters)
+ * needs to be read
+ */
+apr_status_t h2_c1_read(struct h2_session *session);
+
+#endif /* defined(__mod_h2__h2_c1_io__) */
diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c
new file mode 100644
index 0000000..44a08d0
--- /dev/null
+++ b/modules/http2/h2_c2.c
@@ -0,0 +1,864 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <apr_atomic.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mmn.h>
+#include <ap_mpm.h>
+#include <mpm_common.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_c1.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_c2_filter.h"
+#include "h2_protocol.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_c2.h"
+#include "h2_util.h"
+
+
+static module *mpm_module;
+static int mpm_supported = 1;
+static apr_socket_t *dummy_socket;
+
+#if AP_HAS_RESPONSE_BUCKETS
+
+static ap_filter_rec_t *c2_net_in_filter_handle;
+static ap_filter_rec_t *c2_net_out_filter_handle;
+static ap_filter_rec_t *c2_request_in_filter_handle;
+static ap_filter_rec_t *c2_notes_out_filter_handle;
+
+#endif /* AP_HAS_RESPONSE_BUCKETS */
+
+static void check_modules(int force)
+{
+ static int checked = 0;
+ int i;
+
+ if (force || !checked) {
+ for (i = 0; ap_loaded_modules[i]; ++i) {
+ module *m = ap_loaded_modules[i];
+
+ if (!strcmp("event.c", m->name)) {
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("motorz.c", m->name)) {
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("mpm_netware.c", m->name)) {
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("prefork.c", m->name)) {
+ mpm_module = m;
+ /* While http2 can work really well on prefork, it collides
+ * today's use case for prefork: running single-thread app engines
+ * like php. If we restrict h2_workers to 1 per process, php will
+ * work fine, but browser will be limited to 1 active request at a
+ * time. */
+ mpm_supported = 0;
+ break;
+ }
+ else if (!strcmp("simple_api.c", m->name)) {
+ mpm_module = m;
+ mpm_supported = 0;
+ break;
+ }
+ else if (!strcmp("mpm_winnt.c", m->name)) {
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("worker.c", m->name)) {
+ mpm_module = m;
+ break;
+ }
+ }
+ checked = 1;
+ }
+}
+
+const char *h2_conn_mpm_name(void)
+{
+ check_modules(0);
+ return mpm_module? mpm_module->name : "unknown";
+}
+
+int h2_mpm_supported(void)
+{
+ check_modules(0);
+ return mpm_supported;
+}
+
+apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s)
+{
+ check_modules(1);
+ return apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
+ APR_PROTO_TCP, pool);
+}
+
+void h2_c2_destroy(conn_rec *c2)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
+ "h2_c2(%s): destroy", c2->log_id);
+ apr_pool_destroy(c2->pool);
+}
+
+void h2_c2_abort(conn_rec *c2, conn_rec *from)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
+ AP_DEBUG_ASSERT(conn_ctx);
+ AP_DEBUG_ASSERT(conn_ctx->stream_id);
+ if (conn_ctx->beam_in) {
+ h2_beam_abort(conn_ctx->beam_in, from);
+ }
+ if (conn_ctx->beam_out) {
+ h2_beam_abort(conn_ctx->beam_out, from);
+ }
+ c2->aborted = 1;
+}
+
+typedef struct {
+ apr_bucket_brigade *bb; /* c2: data in holding area */
+} h2_c2_fctx_in_t;
+
+static apr_status_t h2_c2_filter_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_conn_ctx_t *conn_ctx;
+ h2_c2_fctx_in_t *fctx = f->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b;
+ apr_off_t bblen;
+ apr_size_t rmax = (readbytes < APR_INT32_MAX)?
+ (apr_size_t)readbytes : APR_INT32_MAX;
+
+ conn_ctx = h2_conn_ctx_get(f->c);
+ AP_DEBUG_ASSERT(conn_ctx);
+
+ if (mode == AP_MODE_INIT) {
+ return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
+ }
+
+ if (f->c->aborted) {
+ return APR_ECONNABORTED;
+ }
+
+ if (APLOGctrace3(f->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, f->c,
+ "h2_c2_in(%s-%d): read, mode=%d, block=%d, readbytes=%ld",
+ conn_ctx->id, conn_ctx->stream_id, mode, block,
+ (long)readbytes);
+ }
+
+ if (!fctx) {
+ fctx = apr_pcalloc(f->c->pool, sizeof(*fctx));
+ f->ctx = fctx;
+ fctx->bb = apr_brigade_create(f->c->pool, f->c->bucket_alloc);
+ if (!conn_ctx->beam_in) {
+ b = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(fctx->bb, b);
+ }
+ }
+
+ while (APR_BRIGADE_EMPTY(fctx->bb)) {
+ /* Get more input data for our request. */
+ if (APLOGctrace2(f->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_c2_in(%s-%d): get more data from mplx, block=%d, "
+ "readbytes=%ld",
+ conn_ctx->id, conn_ctx->stream_id, block, (long)readbytes);
+ }
+ if (conn_ctx->beam_in) {
+ if (conn_ctx->pipe_in[H2_PIPE_OUT]) {
+receive:
+ status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, APR_NONBLOCK_READ,
+ conn_ctx->mplx->stream_max_mem);
+ if (APR_STATUS_IS_EAGAIN(status) && APR_BLOCK_READ == block) {
+ status = h2_util_wait_on_pipe(conn_ctx->pipe_in[H2_PIPE_OUT]);
+ if (APR_SUCCESS == status) {
+ goto receive;
+ }
+ }
+ }
+ else {
+ status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, block,
+ conn_ctx->mplx->stream_max_mem);
+ }
+ }
+ else {
+ status = APR_EOF;
+ }
+
+ if (APLOGctrace3(f->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c,
+ "h2_c2_in(%s-%d): read returned",
+ conn_ctx->id, conn_ctx->stream_id);
+ }
+ if (APR_STATUS_IS_EAGAIN(status)
+ && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
+ /* chunked input handling does not seem to like it if we
+ * return with APR_EAGAIN from a GETLINE read...
+ * upload 100k test on test-ser.example.org hangs */
+ status = APR_SUCCESS;
+ }
+ else if (APR_STATUS_IS_EOF(status)) {
+ break;
+ }
+ else if (status != APR_SUCCESS) {
+ conn_ctx->last_err = status;
+ return status;
+ }
+
+ if (APLOGctrace3(f->c)) {
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE3,
+ "c2 input recv raw", fctx->bb);
+ }
+ if (h2_c_logio_add_bytes_in) {
+ apr_brigade_length(bb, 0, &bblen);
+ h2_c_logio_add_bytes_in(f->c, bblen);
+ }
+ }
+
+ /* Nothing there, no more data to get. Return. */
+ if (status == APR_EOF && APR_BRIGADE_EMPTY(fctx->bb)) {
+ return status;
+ }
+
+ if (APLOGctrace3(f->c)) {
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE3,
+ "c2 input.bb", fctx->bb);
+ }
+
+ if (APR_BRIGADE_EMPTY(fctx->bb)) {
+ if (APLOGctrace3(f->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, f->c,
+ "h2_c2_in(%s-%d): no data",
+ conn_ctx->id, conn_ctx->stream_id);
+ }
+ return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, fctx->bb);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, fctx->bb, rmax);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, fctx->bb, rmax);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, fctx->bb, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace3(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c,
+ "h2_c2_in(%s-%d): getline: %s",
+ conn_ctx->id, conn_ctx->stream_id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(03472)
+ "h2_c2_in(%s-%d), unsupported READ mode %d",
+ conn_ctx->id, conn_ctx->stream_id, mode);
+ status = APR_ENOTIMPL;
+ }
+
+ if (APLOGctrace3(f->c)) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c,
+ "h2_c2_in(%s-%d): %ld data bytes",
+ conn_ctx->id, conn_ctx->stream_id, (long)bblen);
+ }
+ return status;
+}
+
+static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_brigade* bb)
+{
+ apr_off_t written, header_len = 0;
+ apr_status_t rv;
+
+ if (h2_c_logio_add_bytes_out) {
+ /* mod_logio wants to report the number of bytes written in a
+ * response, including header and footer fields. Since h2 converts
+ * those during c1 processing into the HPACKed h2 HEADER frames,
+ * we need to give mod_logio something here and count just the
+ * raw lengths of all headers in the buckets. */
+ apr_bucket *b;
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ header_len += (apr_off_t)response_length_estimate(b->data);
+ }
+ if (AP_BUCKET_IS_HEADERS(b)) {
+ header_len += (apr_off_t)headers_length_estimate(b->data);
+ }
+#else
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ header_len += (apr_off_t)h2_bucket_headers_headers_length(b);
+ }
+#endif /* AP_HAS_RESPONSE_BUCKETS */
+ }
+ }
+
+ rv = h2_beam_send(conn_ctx->beam_out, c2, bb, APR_BLOCK_READ, &written);
+
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ rv = APR_SUCCESS;
+ }
+ if (written && h2_c_logio_add_bytes_out) {
+ h2_c_logio_add_bytes_out(c2, written + header_len);
+ }
+ return rv;
+}
+
+static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ apr_status_t rv;
+
+ ap_assert(conn_ctx);
+#if AP_HAS_RESPONSE_BUCKETS
+ if (!conn_ctx->has_final_response) {
+ apr_bucket *e;
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_RESPONSE(e)) {
+ ap_bucket_response *resp = e->data;
+ if (resp->status >= 200) {
+ conn_ctx->has_final_response = 1;
+ break;
+ }
+ }
+ if (APR_BUCKET_IS_EOS(e)) {
+ break;
+ }
+ }
+ }
+#endif /* AP_HAS_RESPONSE_BUCKETS */
+ rv = beam_out(f->c, conn_ctx, bb);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
+ "h2_c2(%s-%d): output leave",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (APR_SUCCESS != rv) {
+ h2_c2_abort(f->c, f->c);
+ }
+ return rv;
+}
+
+static void check_push(request_rec *r, const char *tag)
+{
+ apr_array_header_t *push_list = h2_config_push_list(r);
+
+ if (!r->expecting_100 && push_list && push_list->nelts > 0) {
+ int i, old_status;
+ const char *old_line;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+ tag, push_list->nelts);
+ for (i = 0; i < push_list->nelts; ++i) {
+ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
+ apr_table_add(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+ }
+ old_status = r->status;
+ old_line = r->status_line;
+ r->status = 103;
+ r->status_line = "103 Early Hints";
+ ap_send_interim_response(r, 1);
+ r->status = old_status;
+ r->status_line = old_line;
+ }
+}
+
+static int c2_hook_fixups(request_rec *r)
+{
+ conn_rec *c2 = r->connection;
+ h2_conn_ctx_t *conn_ctx;
+
+ if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
+ return DECLINED;
+ }
+
+ check_push(r, "late_fixup");
+
+ return DECLINED;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+
+static void c2_pre_read_request(request_rec *r, conn_rec *c2)
+{
+ h2_conn_ctx_t *conn_ctx;
+
+ if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
+ return;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "h2_c2(%s-%d): adding request filters",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_add_input_filter_handle(c2_request_in_filter_handle, NULL, r, r->connection);
+ ap_add_output_filter_handle(c2_notes_out_filter_handle, NULL, r, r->connection);
+}
+
+static int c2_post_read_request(request_rec *r)
+{
+ h2_conn_ctx_t *conn_ctx;
+ conn_rec *c2 = r->connection;
+ apr_time_t timeout;
+
+ if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
+ return DECLINED;
+ }
+ /* Now that the request_rec is fully initialized, set relevant params */
+ conn_ctx->server = r->server;
+ timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
+ if (timeout <= 0) {
+ timeout = r->server->timeout;
+ }
+ h2_conn_ctx_set_timeout(conn_ctx, timeout);
+ /* We only handle this one request on the connection and tell everyone
+ * that there is no need to keep it "clean" if something fails. Also,
+ * this prevents mod_reqtimeout from doing funny business with monitoring
+ * keepalive timeouts.
+ */
+ r->connection->keepalive = AP_CONN_CLOSE;
+
+ if (conn_ctx->beam_in && !apr_table_get(r->headers_in, "Content-Length")) {
+ r->body_indeterminate = 1;
+ }
+
+ if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "h2_mplx(%s-%d): copy_files in output",
+ conn_ctx->id, conn_ctx->stream_id);
+ h2_beam_set_copy_files(conn_ctx->beam_out, 1);
+ }
+
+ /* Add the raw bytes of the request (e.g. header frame lengths to
+ * the logio for this request. */
+ if (conn_ctx->request->raw_bytes && h2_c_logio_add_bytes_in) {
+ h2_c_logio_add_bytes_in(c2, conn_ctx->request->raw_bytes);
+ }
+ return OK;
+}
+
+static int c2_hook_pre_connection(conn_rec *c2, void *csd)
+{
+ h2_conn_ctx_t *conn_ctx;
+
+ if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
+ return DECLINED;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_c2(%s-%d), adding filters",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_add_input_filter_handle(c2_net_in_filter_handle, NULL, NULL, c2);
+ ap_add_output_filter_handle(c2_net_out_filter_handle, NULL, NULL, c2);
+ if (c2->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ c2->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+ * on this c2 connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+ * is unnecessary on a h2 stream.
+ */
+ c2->keepalive = AP_CONN_CLOSE;
+ }
+ return OK;
+}
+
+void h2_c2_register_hooks(void)
+{
+ /* When the connection processing actually starts, we might
+ * take over, if the connection is for a h2 stream.
+ */
+ ap_hook_pre_connection(c2_hook_pre_connection,
+ NULL, NULL, APR_HOOK_MIDDLE);
+
+ /* We need to manipulate the standard HTTP/1.1 protocol filters and
+ * install our own. This needs to be done very early. */
+ ap_hook_pre_read_request(c2_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_read_request(c2_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
+
+ c2_net_in_filter_handle =
+ ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
+ NULL, AP_FTYPE_NETWORK);
+ c2_net_out_filter_handle =
+ ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out,
+ NULL, AP_FTYPE_NETWORK);
+ c2_request_in_filter_handle =
+ ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in,
+ NULL, AP_FTYPE_PROTOCOL);
+ c2_notes_out_filter_handle =
+ ap_register_output_filter("H2_C2_NOTES_OUT", h2_c2_filter_notes_out,
+ NULL, AP_FTYPE_PROTOCOL);
+}
+
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+static apr_status_t c2_run_pre_connection(conn_rec *c2, apr_socket_t *csd)
+{
+ if (c2->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ c2->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+ * on this c2 connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+ * is unnecessary on a h2 stream.
+ */
+ c2->keepalive = AP_CONN_CLOSE;
+ return ap_run_pre_connection(c2, csd);
+ }
+ ap_assert(c2->output_filters);
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_c2_process(conn_rec *c2, apr_thread_t *thread, int worker_id)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
+ ap_assert(conn_ctx);
+ ap_assert(conn_ctx->mplx);
+
+ /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
+ *
+ * Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+ * for the request_rec they handle, it needs to be unique for secondary
+ * connections also.
+ *
+ * The MPM module assigns the connection ids and mod_unique_id is using
+ * that one to generate identifier for requests. While the implementation
+ * works for HTTP/1.x, the parallel execution of several requests per
+ * connection will generate duplicate identifiers on load.
+ *
+ * The original implementation for secondary connection identifiers used
+ * to shift the master connection id up and assign the stream id to the
+ * lower bits. This was cramped on 32 bit systems, but on 64bit there was
+ * enough space.
+ *
+ * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
+ * connection id, even on 64bit systems. Therefore collisions in request ids.
+ *
+ * The way master connection ids are generated, there is some space "at the
+ * top" of the lower 32 bits on allmost all systems. If you have a setup
+ * with 64k threads per child and 255 child processes, you live on the edge.
+ *
+ * The new implementation shifts 8 bits and XORs in the worker
+ * id. This will experience collisions with > 256 h2 workers and heavy
+ * load still. There seems to be no way to solve this in all possible
+ * configurations by mod_h2 alone.
+ */
+ c2->id = (c2->master->id << 8)^worker_id;
+
+ if (!conn_ctx->pre_conn_done) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_c2(%s-%d), adding filters",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_add_input_filter("H2_C2_NET_IN", NULL, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_CATCH_H1", NULL, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2);
+
+ c2_run_pre_connection(c2, ap_get_conn_socket(c2));
+ conn_ctx->pre_conn_done = 1;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): process connection",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ c2->current_thread = thread;
+ ap_run_process_connection(c2);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): processing done",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
+{
+ const h2_request *req = conn_ctx->request;
+ conn_state_t *cs = c->cs;
+ request_rec *r;
+ const char *tenc;
+ apr_time_t timeout;
+
+ r = h2_create_request_rec(conn_ctx->request, c, conn_ctx->beam_in == NULL);
+ if (!r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): create request_rec failed, r=NULL",
+ conn_ctx->id, conn_ctx->stream_id);
+ goto cleanup;
+ }
+ if (r->status != HTTP_OK) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): create request_rec failed, r->status=%d",
+ conn_ctx->id, conn_ctx->stream_id, r->status);
+ goto cleanup;
+ }
+
+ tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ conn_ctx->input_chunked = tenc && ap_is_chunked(r->pool, tenc);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): created request_rec for %s",
+ conn_ctx->id, conn_ctx->stream_id, r->the_request);
+ conn_ctx->server = r->server;
+ timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
+ if (timeout <= 0) {
+ timeout = r->server->timeout;
+ }
+ h2_conn_ctx_set_timeout(conn_ctx, timeout);
+
+ if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_mplx(%s-%d): copy_files in output",
+ conn_ctx->id, conn_ctx->stream_id);
+ h2_beam_set_copy_files(conn_ctx->beam_out, 1);
+ }
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ if (cs) {
+ cs->state = CONN_STATE_HANDLER;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): start process_request",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ /* Add the raw bytes of the request (e.g. header frame lengths to
+ * the logio for this request. */
+ if (req->raw_bytes && h2_c_logio_add_bytes_in) {
+ h2_c_logio_add_bytes_in(c, req->raw_bytes);
+ }
+
+ ap_process_request(r);
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. */
+ r = NULL;
+ if (conn_ctx->beam_out) {
+ h2_beam_close(conn_ctx->beam_out, c);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): process_request done",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (cs)
+ cs->state = CONN_STATE_WRITE_COMPLETION;
+
+cleanup:
+ return APR_SUCCESS;
+}
+
+conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent,
+ apr_bucket_alloc_t *buckt_alloc)
+{
+ apr_pool_t *pool;
+ conn_rec *c2;
+ void *cfg;
+
+ ap_assert(c1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c1,
+ "h2_c2: create for c1(%ld)", c1->id);
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+ * independent of its parent pool in the sense that it can work in
+ * another thread.
+ */
+ apr_pool_create(&pool, parent);
+ apr_pool_tag(pool, "h2_c2_conn");
+
+ c2 = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ memcpy(c2, c1, sizeof(conn_rec));
+
+ c2->master = c1;
+ c2->pool = pool;
+ c2->conn_config = ap_create_conn_config(pool);
+ c2->notes = apr_table_make(pool, 5);
+ c2->input_filters = NULL;
+ c2->output_filters = NULL;
+ c2->keepalives = 0;
+#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
+ c2->filter_conn_ctx = NULL;
+#endif
+ c2->bucket_alloc = apr_bucket_alloc_create(pool);
+#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
+ c2->data_in_input_filters = 0;
+ c2->data_in_output_filters = 0;
+#endif
+ /* prevent mpm_event from making wrong assumptions about this connection,
+ * like e.g. using its socket for an async read check. */
+ c2->clogging_input_filters = 1;
+ c2->log = NULL;
+ c2->aborted = 0;
+ /* We cannot install the master connection socket on the secondary, as
+ * modules mess with timeouts/blocking of the socket, with
+ * unwanted side effects to the master connection processing.
+ * Fortunately, since we never use the secondary socket, we can just install
+ * a single, process-wide dummy and everyone is happy.
+ */
+ ap_set_module_config(c2->conn_config, &core_module, dummy_socket);
+ /* TODO: these should be unique to this thread */
+ c2->sbh = NULL; /*c1->sbh;*/
+ /* TODO: not all mpm modules have learned about secondary connections yet.
+ * copy their config from master to secondary.
+ */
+ if (mpm_module) {
+ cfg = ap_get_module_config(c1->conn_config, mpm_module);
+ ap_set_module_config(c2->conn_config, mpm_module, cfg);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
+ "h2_c2(%s): created", c2->log_id);
+ return c2;
+}
+
+static int h2_c2_hook_post_read_request(request_rec *r)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "h2_c2(%s-%d): adding request filters",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ /* setup the correct filters to process the request for h2 */
+ ap_add_input_filter("H2_C2_REQUEST_IN", NULL, r, r->connection);
+
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+
+ ap_add_output_filter("H2_C2_RESPONSE_OUT", NULL, r, r->connection);
+ ap_add_output_filter("H2_C2_TRAILERS_OUT", NULL, r, r->connection);
+ }
+ return DECLINED;
+}
+
+static int h2_c2_hook_process(conn_rec* c)
+{
+ h2_conn_ctx_t *ctx;
+
+ if (!c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_conn_ctx_get(c);
+ if (ctx->stream_id) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, processing request directly");
+ c2_process(ctx, c);
+ return DONE;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "secondary_conn(%ld): no h2 stream assing?", c->id);
+ }
+ return DECLINED;
+}
+
+void h2_c2_register_hooks(void)
+{
+ /* When the connection processing actually starts, we might
+ * take over, if the connection is for a h2 stream.
+ */
+ ap_hook_process_connection(h2_c2_hook_process,
+ NULL, NULL, APR_HOOK_FIRST);
+ /* We need to manipulate the standard HTTP/1.1 protocol filters and
+ * install our own. This needs to be done very early. */
+ ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
+
+ ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_C2_NET_CATCH_H1", h2_c2_filter_catch_h1_out,
+ NULL, AP_FTYPE_NETWORK);
+
+ ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_C2_RESPONSE_OUT", h2_c2_filter_response_out,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_C2_TRAILERS_OUT", h2_c2_filter_trailers_out,
+ NULL, AP_FTYPE_PROTOCOL);
+}
+
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_c2.h b/modules/http2/h2_c2.h
new file mode 100644
index 0000000..f278382
--- /dev/null
+++ b/modules/http2/h2_c2.h
@@ -0,0 +1,57 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_c2__
+#define __mod_h2__h2_c2__
+
+#include <http_core.h>
+
+#include "h2.h"
+
+const char *h2_conn_mpm_name(void);
+int h2_mpm_supported(void);
+
+/* Initialize this child process for h2 secondary connection work,
+ * to be called once during child init before multi processing
+ * starts.
+ */
+apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s);
+
+#if !AP_HAS_RESPONSE_BUCKETS
+
+conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent,
+ apr_bucket_alloc_t *buckt_alloc);
+
+/**
+ * Process a secondary connection for a HTTP/2 stream request.
+ */
+apr_status_t h2_c2_process(conn_rec *c, apr_thread_t *thread, int worker_id);
+
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
+
+void h2_c2_destroy(conn_rec *c2);
+
+/**
+ * Abort the I/O processing of a secondary connection. And
+ * in-/output beams will return errors and c2->aborted is set.
+ * @param c2 the secondary connection to abort
+ * @param from the connection this is invoked from
+ */
+void h2_c2_abort(conn_rec *c2, conn_rec *from);
+
+void h2_c2_register_hooks(void);
+
+#endif /* defined(__mod_h2__h2_c2__) */
diff --git a/modules/http2/h2_c2_filter.c b/modules/http2/h2_c2_filter.c
new file mode 100644
index 0000000..37254fc
--- /dev/null
+++ b/modules/http2/h2_c2_filter.c
@@ -0,0 +1,1034 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_date.h>
+#include <apr_lib.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <util_time.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_headers.h"
+#include "h2_c1.h"
+#include "h2_c2_filter.h"
+#include "h2_c2.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_util.h"
+
+
+#if AP_HAS_RESPONSE_BUCKETS
+
+apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ request_rec *r_prev;
+ ap_bucket_response *resp;
+ const char *err;
+
+ if (!f->r) {
+ goto pass;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ resp = b->data;
+ if (resp->status >= 400 && f->r->prev) {
+ /* Error responses are commonly handled via internal
+ * redirects to error documents. That creates a new
+ * request_rec with 'prev' set to the original.
+ * Each of these has its onw 'notes'.
+ * We'd like to copy interesting ones into the current 'r->notes'
+ * as we reset HTTP/2 stream with H2 specific error codes then.
+ */
+ for (r_prev = f->r; r_prev != NULL; r_prev = r_prev->prev) {
+ if ((err = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden"))) {
+ if (r_prev != f->r) {
+ apr_table_setn(resp->notes, "ssl-renegotiate-forbidden", err);
+ }
+ break;
+ }
+ }
+ }
+ else if (h2_config_rgeti(f->r, H2_CONF_PUSH) == 0
+ && h2_config_sgeti(f->r->server, H2_CONF_PUSH) != 0) {
+ /* location configuration turns off H2 PUSH handling */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_c2_filter_notes_out, turning PUSH off");
+ apr_table_setn(resp->notes, H2_PUSH_MODE_NOTE, "0");
+ }
+ }
+ }
+pass:
+ return ap_pass_brigade(f->next, bb);
+}
+
+apr_status_t h2_c2_filter_request_in(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_conn_ctx_t *conn_ctx;
+ apr_bucket *b;
+
+ /* just get out of the way for things we don't want to handle. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
+ /* This filter is a one-time wonder */
+ ap_remove_input_filter(f);
+
+ if (f->c->master && (conn_ctx = h2_conn_ctx_get(f->c)) && conn_ctx->stream_id) {
+ if (conn_ctx->request->http_status != H2_HTTP_STATUS_UNSET) {
+ /* error was encountered preparing this request */
+ b = ap_bucket_error_create(conn_ctx->request->http_status, NULL, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ return APR_SUCCESS;
+ }
+ b = h2_request_create_bucket(conn_ctx->request, f->r);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ if (!conn_ctx->beam_in) {
+ b = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+ return APR_SUCCESS;
+ }
+
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+}
+
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+#define H2_FILTER_LOG(name, c, level, rv, msg, bb) \
+ do { \
+ if (APLOG_C_IS_LEVEL((c),(level))) { \
+ char buffer[4 * 1024]; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, "", "", (bb)); \
+ ap_log_cerror(APLOG_MARK, (level), rv, (c), \
+ "FILTER[%s]: %s %s", \
+ (name), (msg), len? buffer : ""); \
+ } \
+ } while (0)
+
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ (void)key;
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fix_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+static h2_headers *create_response(request_rec *r)
+{
+ const char *clheader;
+ const char *ctype;
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fix_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ ap_set_keepalive(r);
+
+ if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
+ }
+ else if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!apr_strnatcasecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cachable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_add(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r->proxyreq == PROXYREQ_NONE
+ || !apr_table_get(r->headers_out, "Date")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_setn(r->headers_out, "Date", date );
+ }
+ if (r->proxyreq == PROXYREQ_NONE
+ || !apr_table_get(r->headers_out, "Server")) {
+ const char *us = ap_get_server_banner();
+ if (us && *us) {
+ apr_table_setn(r->headers_out, "Server", us);
+ }
+ }
+
+ return h2_headers_rcreate(r, r->status, r->headers_out, r->pool);
+}
+
+typedef enum {
+ H2_RP_STATUS_LINE,
+ H2_RP_HEADER_LINE,
+ H2_RP_DONE
+} h2_rp_state_t;
+
+typedef struct h2_response_parser h2_response_parser;
+struct h2_response_parser {
+ const char *id;
+ h2_rp_state_t state;
+ conn_rec *c;
+ apr_pool_t *pool;
+ int http_status;
+ apr_array_header_t *hlines;
+ apr_bucket_brigade *tmp;
+ apr_bucket_brigade *saveto;
+};
+
+static apr_status_t parse_header(h2_response_parser *parser, char *line) {
+ const char *hline;
+ if (line[0] == ' ' || line[0] == '\t') {
+ char **plast;
+ /* continuation line from the header before this */
+ while (line[0] == ' ' || line[0] == '\t') {
+ ++line;
+ }
+
+ plast = apr_array_pop(parser->hlines);
+ if (plast == NULL) {
+ /* not well formed */
+ return APR_EINVAL;
+ }
+ hline = apr_psprintf(parser->pool, "%s %s", *plast, line);
+ }
+ else {
+ /* new header line */
+ hline = apr_pstrdup(parser->pool, line);
+ }
+ APR_ARRAY_PUSH(parser->hlines, const char*) = hline;
+ return APR_SUCCESS;
+}
+
+static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
+ char *line, apr_size_t len)
+{
+ apr_status_t status;
+
+ if (!parser->tmp) {
+ parser->tmp = apr_brigade_create(parser->pool, parser->c->bucket_alloc);
+ }
+ status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
+ len);
+ if (status == APR_SUCCESS) {
+ --len;
+ status = apr_brigade_flatten(parser->tmp, line, &len);
+ if (status == APR_SUCCESS) {
+ /* we assume a non-0 containing line and remove trailing crlf. */
+ line[len] = '\0';
+ /*
+ * XXX: What to do if there is an LF but no CRLF?
+ * Should we error out?
+ */
+ if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
+ len -= 2;
+ line[len] = '\0';
+ apr_brigade_cleanup(parser->tmp);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ "h2_c2(%s): read response line: %s",
+ parser->id, line);
+ }
+ else {
+ apr_off_t brigade_length;
+
+ /*
+ * If the brigade parser->tmp becomes longer than our buffer
+ * for flattening we never have a chance to get a complete
+ * line. This can happen if we are called multiple times after
+ * previous calls did not find a H2_CRLF and we returned
+ * APR_EAGAIN. In this case parser->tmp (correctly) grows
+ * with each call to apr_brigade_split_line.
+ *
+ * XXX: Currently a stack based buffer of HUGE_STRING_LEN is
+ * used. This means we cannot cope with lines larger than
+ * HUGE_STRING_LEN which might be an issue.
+ */
+ status = apr_brigade_length(parser->tmp, 0, &brigade_length);
+ if ((status != APR_SUCCESS) || (brigade_length > (apr_off_t)len)) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, parser->c, APLOGNO(10257)
+ "h2_c2(%s): read response, line too long",
+ parser->id);
+ return APR_ENOSPC;
+ }
+ /* this does not look like a complete line yet */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ "h2_c2(%s): read response, incomplete line: %s",
+ parser->id, line);
+ if (!parser->saveto) {
+ parser->saveto = apr_brigade_create(parser->pool,
+ parser->c->bucket_alloc);
+ }
+ /*
+ * Be on the save side and save the parser->tmp brigade
+ * as it could contain transient buckets which could be
+ * invalid next time we are here.
+ *
+ * NULL for the filter parameter is ok since we
+ * provide our own brigade as second parameter
+ * and ap_save_brigade does not need to create one.
+ */
+ ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp),
+ parser->tmp->p);
+ APR_BRIGADE_CONCAT(parser->tmp, parser->saveto);
+ return APR_EAGAIN;
+ }
+ }
+ }
+ apr_brigade_cleanup(parser->tmp);
+ return status;
+}
+
+static apr_table_t *make_table(h2_response_parser *parser)
+{
+ apr_array_header_t *hlines = parser->hlines;
+ if (hlines) {
+ apr_table_t *headers = apr_table_make(parser->pool, hlines->nelts);
+ int i;
+
+ for (i = 0; i < hlines->nelts; ++i) {
+ char *hline = ((char **)hlines->elts)[i];
+ char *sep = ap_strchr(hline, ':');
+ if (!sep) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, parser->c,
+ APLOGNO(02955) "h2_c2(%s): invalid header[%d] '%s'",
+ parser->id, i, (char*)hline);
+ /* not valid format, abort */
+ return NULL;
+ }
+ (*sep++) = '\0';
+ while (*sep == ' ' || *sep == '\t') {
+ ++sep;
+ }
+
+ if (!h2_util_ignore_resp_header(hline)) {
+ apr_table_merge(headers, hline, sep);
+ }
+ }
+ return headers;
+ }
+ else {
+ return apr_table_make(parser->pool, 0);
+ }
+}
+
+static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f,
+ h2_response_parser *parser)
+{
+ apr_bucket *b;
+ apr_status_t status;
+
+ h2_headers *response = h2_headers_create(parser->http_status,
+ make_table(parser),
+ NULL, 0, parser->pool);
+ apr_brigade_cleanup(parser->tmp);
+ b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ b = apr_bucket_flush_create(parser->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ status = ap_pass_brigade(f->next, parser->tmp);
+ apr_brigade_cleanup(parser->tmp);
+
+ /* reset parser for possible next response */
+ parser->state = H2_RP_STATUS_LINE;
+ apr_array_clear(parser->hlines);
+
+ if (response->status >= 200) {
+ conn_ctx->has_final_response = 1;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ APLOGNO(03197) "h2_c2(%s): passed response %d",
+ parser->id, response->status);
+ return status;
+}
+
+static apr_status_t parse_status(h2_response_parser *parser, char *line)
+{
+ int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
+ (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0));
+ if (sindex > 0) {
+ int k = sindex + 3;
+ char keepchar = line[k];
+ line[k] = '\0';
+ parser->http_status = atoi(&line[sindex]);
+ line[k] = keepchar;
+ parser->state = H2_RP_HEADER_LINE;
+
+ return APR_SUCCESS;
+ }
+ /* Seems like there is garbage on the connection. May be a leftover
+ * from a previous proxy request.
+ * This should only happen if the H2_RESPONSE filter is not yet in
+ * place (post_read_request has not been reached and the handler wants
+ * to write something. Probably just the interim response we are
+ * waiting for. But if there is other data hanging around before
+ * that, this needs to fail. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, APLOGNO(03467)
+ "h2_c2(%s): unable to parse status line: %s",
+ parser->id, line);
+ return APR_EINVAL;
+}
+
+static apr_status_t parse_response(h2_response_parser *parser,
+ h2_conn_ctx_t *conn_ctx,
+ ap_filter_t* f, apr_bucket_brigade *bb)
+{
+ char line[HUGE_STRING_LEN];
+ apr_status_t status = APR_SUCCESS;
+
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ switch (parser->state) {
+ case H2_RP_STATUS_LINE:
+ case H2_RP_HEADER_LINE:
+ status = get_line(parser, bb, line, sizeof(line));
+ if (status == APR_EAGAIN) {
+ /* need more data */
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (parser->state == H2_RP_STATUS_LINE) {
+ /* instead of parsing, just take it directly */
+ status = parse_status(parser, line);
+ }
+ else if (line[0] == '\0') {
+ /* end of headers, pass response onward */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
+ "h2_c2(%s): end of response", parser->id);
+ return pass_response(conn_ctx, f, parser);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
+ "h2_c2(%s): response header %s", parser->id, line);
+ status = parse_header(parser, line);
+ }
+ break;
+
+ default:
+ return status;
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ h2_response_parser *parser = f->ctx;
+ apr_status_t rv;
+
+ ap_assert(conn_ctx);
+ H2_FILTER_LOG("c2_catch_h1_out", f->c, APLOG_TRACE2, 0, "check", bb);
+
+ if (!f->c->aborted && !conn_ctx->has_final_response) {
+ if (!parser) {
+ parser = apr_pcalloc(f->c->pool, sizeof(*parser));
+ parser->id = apr_psprintf(f->c->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ parser->pool = f->c->pool;
+ parser->c = f->c;
+ parser->state = H2_RP_STATUS_LINE;
+ parser->hlines = apr_array_make(parser->pool, 10, sizeof(char *));
+ f->ctx = parser;
+ }
+
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ /* TODO: Yikes, this happens when errors are encountered on input
+ * before anything from the repsonse has been processed. The
+ * ap_die_r() call will do nothing in certain conditions.
+ */
+ int result = ap_map_http_request_error(conn_ctx->last_err,
+ HTTP_INTERNAL_SERVER_ERROR);
+ request_rec *r = h2_create_request_rec(conn_ctx->request, f->c, 1);
+ ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
+ b = ap_bucket_eor_create(f->c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+ }
+ /* There are cases where we need to parse a serialized http/1.1 response.
+ * One example is a 100-continue answer via a mod_proxy setup. */
+ while (bb && !f->c->aborted && !conn_ctx->has_final_response) {
+ rv = parse_response(parser, conn_ctx, f, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
+ "h2_c2(%s): parsed response", parser->id);
+ if (APR_BRIGADE_EMPTY(bb) || APR_SUCCESS != rv) {
+ return rv;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ request_rec *r = f->r;
+ apr_bucket *b, *bresp, *body_bucket = NULL, *next;
+ ap_bucket_error *eb = NULL;
+ h2_headers *response = NULL;
+ int headers_passing = 0;
+
+ H2_FILTER_LOG("c2_response_out", f->c, APLOG_TRACE1, 0, "called with", bb);
+
+ if (f->c->aborted || !conn_ctx || conn_ctx->has_final_response) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ if (!conn_ctx->has_final_response) {
+ /* check, if we need to send the response now. Until we actually
+ * see a DATA bucket or some EOS/EOR, we do not do so. */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_ERROR(b) && !eb) {
+ eb = b->data;
+ }
+ else if (AP_BUCKET_IS_EOC(b)) {
+ /* If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ ap_remove_output_filter(f);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_c2(%s): eoc bucket passed", conn_ctx->id);
+ return ap_pass_brigade(f->next, bb);
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ headers_passing = 1;
+ }
+ else if (!APR_BUCKET_IS_FLUSH(b)) {
+ body_bucket = b;
+ break;
+ }
+ }
+
+ if (eb) {
+ int st = eb->status;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
+ "h2_c2(%s): err bucket status=%d",
+ conn_ctx->id, st);
+ /* throw everything away and replace it with the error response
+ * generated by ap_die() */
+ apr_brigade_cleanup(bb);
+ ap_die(st, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (body_bucket || !headers_passing) {
+ /* time to insert the response bucket before the body or if
+ * no h2_headers is passed, e.g. the response is empty */
+ response = create_response(r);
+ if (response == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
+ "h2_c2(%s): unable to create response", conn_ctx->id);
+ return APR_ENOMEM;
+ }
+
+ bresp = h2_bucket_headers_create(f->c->bucket_alloc, response);
+ if (body_bucket) {
+ APR_BUCKET_INSERT_BEFORE(body_bucket, bresp);
+ }
+ else {
+ APR_BRIGADE_INSERT_HEAD(bb, bresp);
+ }
+ conn_ctx->has_final_response = 1;
+ r->sent_bodyct = 1;
+ ap_remove_output_filter_byhandle(f->r->output_filters, "H2_C2_NET_CATCH_H1");
+ }
+ }
+
+ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_c2(%s): headers only, cleanup output brigade", conn_ctx->id);
+ b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ break;
+ }
+ if (!H2_BUCKET_IS_HEADERS(b)) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ b = next;
+ }
+ }
+ if (conn_ctx->has_final_response) {
+ /* lets get out of the way, our task is done */
+ ap_remove_output_filter(f);
+ }
+ return ap_pass_brigade(f->next, bb);
+}
+
+
+struct h2_chunk_filter_t {
+ const char *id;
+ int eos_chunk_added;
+ apr_bucket_brigade *bbchunk;
+ apr_off_t chunked_total;
+};
+typedef struct h2_chunk_filter_t h2_chunk_filter_t;
+
+
+static void make_chunk(conn_rec *c, h2_chunk_filter_t *fctx, apr_bucket_brigade *bb,
+ apr_bucket *first, apr_off_t chunk_len,
+ apr_bucket *tail)
+{
+ /* Surround the buckets [first, tail[ with new buckets carrying the
+ * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
+ * to the end of the brigade. */
+ char buffer[128];
+ apr_bucket *b;
+ apr_size_t len;
+
+ len = (apr_size_t)apr_snprintf(buffer, H2_ALEN(buffer),
+ "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len);
+ b = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, b);
+ b = apr_bucket_immortal_create("\r\n", 2, bb->bucket_alloc);
+ if (tail) {
+ APR_BUCKET_INSERT_BEFORE(tail, b);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+ fctx->chunked_total += chunk_len;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_c2(%s): added chunk %ld, total %ld",
+ fctx->id, (long)chunk_len, (long)fctx->chunked_total);
+}
+
+static int ser_header(void *ctx, const char *name, const char *value)
+{
+ apr_bucket_brigade *bb = ctx;
+ apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+static apr_status_t read_and_chunk(ap_filter_t *f, h2_conn_ctx_t *conn_ctx,
+ apr_read_type_e block) {
+ h2_chunk_filter_t *fctx = f->ctx;
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+
+ if (!fctx->bbchunk) {
+ fctx->bbchunk = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ }
+
+ if (APR_BRIGADE_EMPTY(fctx->bbchunk)) {
+ apr_bucket *b, *next, *first_data = NULL;
+ apr_bucket_brigade *tmp;
+ apr_off_t bblen = 0;
+
+ /* get more data from the lower layer filters. Always do this
+ * in larger pieces, since we handle the read modes ourself. */
+ status = ap_get_brigade(f->next, fctx->bbchunk,
+ AP_MODE_READBYTES, block, conn_ctx->mplx->stream_max_mem);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (b = APR_BRIGADE_FIRST(fctx->bbchunk);
+ b != APR_BRIGADE_SENTINEL(fctx->bbchunk);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (first_data) {
+ make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, b);
+ first_data = NULL;
+ }
+
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
+ if (!apr_is_empty_table(headers->headers)) {
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n");
+ apr_table_do(ser_header, fctx->bbchunk, headers->headers, NULL);
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
+ }
+ r->trailers_in = apr_table_clone(r->pool, headers->headers);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
+ apr_brigade_destroy(tmp);
+ fctx->eos_chunk_added = 1;
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving eos",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (!fctx->eos_chunk_added) {
+ tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
+ APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
+ apr_brigade_destroy(tmp);
+ }
+ fctx->eos_chunk_added = 0;
+ }
+ }
+ else if (b->length == 0) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else {
+ if (!first_data) {
+ first_data = b;
+ bblen = 0;
+ }
+ bblen += b->length;
+ }
+ }
+
+ if (first_data) {
+ make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, NULL);
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ h2_chunk_filter_t *fctx = f->ctx;
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next;
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(r->server->module_config,
+ &core_module);
+ ap_assert(conn_ctx);
+
+ if (!fctx) {
+ fctx = apr_pcalloc(r->pool, sizeof(*fctx));
+ fctx->id = apr_psprintf(r->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ f->ctx = fctx;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+ "h2_c2(%s-%d): request input, mode=%d, block=%d, "
+ "readbytes=%ld, exp=%d",
+ conn_ctx->id, conn_ctx->stream_id, mode, block,
+ (long)readbytes, r->expecting_100);
+ if (!conn_ctx->input_chunked) {
+ status = ap_get_brigade(f->next, bb, mode, block, readbytes);
+ /* pipe data through, just take care of trailers */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ r->trailers_in = headers->headers;
+ if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
+ r->headers_in = apr_table_overlay(r->pool, r->headers_in,
+ r->trailers_in);
+ }
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_remove_input_filter(f);
+
+ if (headers->raw_bytes && h2_c_logio_add_bytes_in) {
+ h2_c_logio_add_bytes_in(f->c, headers->raw_bytes);
+ }
+ break;
+ }
+ }
+ return status;
+ }
+
+ /* Things are more complicated. The standard HTTP input filter, which
+ * does a lot what we do not want to duplicate, also cares about chunked
+ * transfer encoding and trailers.
+ * We need to simulate chunked encoding for it to be happy.
+ */
+ if ((status = read_and_chunk(f, conn_ctx, block)) != APR_SUCCESS) {
+ return status;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, fctx->bbchunk);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, fctx->bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, fctx->bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, fctx->bbchunk, block, HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_c2(%s-%d): getline: %s",
+ conn_ctx->id, conn_ctx->stream_id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(02942)
+ "h2_c2, unsupported READ mode %d", mode);
+ status = APR_ENOTIMPL;
+ }
+
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2, "returning input", bb);
+ return status;
+}
+
+apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ request_rec *r = f->r;
+ apr_bucket *b, *e;
+
+ if (conn_ctx && r) {
+ /* Detect the EOS/EOR bucket and forward any trailers that may have
+ * been set to our h2_headers.
+ */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b))
+ && r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
+ h2_headers *headers;
+ apr_table_t *trailers;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
+ "h2_c2(%s-%d): sending trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ trailers = apr_table_clone(r->pool, r->trailers_out);
+ headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool);
+ e = h2_bucket_headers_create(bb->bucket_alloc, headers);
+ APR_BUCKET_INSERT_BEFORE(b, e);
+ apr_table_clear(r->trailers_out);
+ ap_remove_output_filter(f);
+ break;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+#endif /* else #if AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_c2_filter.h b/modules/http2/h2_c2_filter.h
new file mode 100644
index 0000000..c6f50dd
--- /dev/null
+++ b/modules/http2/h2_c2_filter.h
@@ -0,0 +1,68 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_c2_filter__
+#define __mod_h2__h2_c2_filter__
+
+#include "h2.h"
+
+/**
+ * Input filter on secondary connections that insert the REQUEST bucket
+ * with the request to perform and then removes itself.
+ */
+apr_status_t h2_c2_filter_request_in(ap_filter_t *f,
+ apr_bucket_brigade *bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+#if AP_HAS_RESPONSE_BUCKETS
+
+/**
+ * Output filter that inspects the request_rec->notes of the request
+ * itself and possible internal redirects to detect conditions that
+ * merit specific HTTP/2 response codes, such as 421.
+ */
+apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+/**
+ * h2_from_h1 parses a HTTP/1.1 response into
+ * - response status
+ * - a list of header values
+ * - a series of bytes that represent the response body alone, without
+ * any meta data, such as inserted by chunked transfer encoding.
+ *
+ * All data is allocated from the stream memory pool.
+ *
+ * Again, see comments in h2_request: ideally we would take the headers
+ * and status from the httpd structures instead of parsing them here, but
+ * we need to have all handlers and filters involved in request/response
+ * processing, so this seems to be the way for now.
+ */
+struct h2_headers;
+struct h2_response_parser;
+
+apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb);
+
+apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+
+#endif /* defined(__mod_h2__h2_c2_filter__) */
diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c
new file mode 100644
index 0000000..eea4be2
--- /dev/null
+++ b/modules/http2/h2_config.c
@@ -0,0 +1,943 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_hash.h>
+#include <apr_lib.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_vhost.h>
+
+#include <ap_mpm.h>
+
+#include <apr_strings.h>
+
+#include "h2.h"
+#include "h2_conn_ctx.h"
+#include "h2_c1.h"
+#include "h2_config.h"
+#include "h2_protocol.h"
+#include "h2_private.h"
+
+#define DEF_VAL (-1)
+
+#define H2_CONFIG_GET(a, b, n) \
+ (((a)->n == DEF_VAL)? (b) : (a))->n
+
+#define H2_CONFIG_SET(a, n, v) \
+ ((a)->n = v)
+
+#define CONFIG_CMD_SET(cmd,dir,var,val) \
+ h2_config_seti(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
+
+#define CONFIG_CMD_SET64(cmd,dir,var,val) \
+ h2_config_seti64(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
+
+/* Apache httpd module configuration for h2. */
+typedef struct h2_config {
+ const char *name;
+ int h2_max_streams; /* max concurrent # streams (http2) */
+ int h2_window_size; /* stream window size (http2) */
+ int min_workers; /* min # of worker threads/child */
+ int max_workers; /* max # of worker threads/child */
+ apr_interval_time_t idle_limit; /* max duration for idle workers */
+ int stream_max_mem_size; /* max # bytes held in memory/stream */
+ int h2_direct; /* if mod_h2 is active directly */
+ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
+ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
+ int h2_push; /* if HTTP/2 server push is enabled */
+ struct apr_hash_t *priorities; /* map of content-type to h2_priority records */
+
+ int push_diary_size; /* # of entries in push diary */
+ int copy_files; /* if files shall be copied vs setaside on output */
+ apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ int early_hints; /* support status code 103 */
+ int padding_bits;
+ int padding_always;
+ int output_buffered;
+ apr_interval_time_t stream_timeout;/* beam timeout */
+} h2_config;
+
+typedef struct h2_dir_config {
+ const char *name;
+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+ int h2_push; /* if HTTP/2 server push is enabled */
+ apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ int early_hints; /* support status code 103 */
+ apr_interval_time_t stream_timeout;/* beam timeout */
+} h2_dir_config;
+
+
+static h2_config defconf = {
+ "default",
+ 100, /* max_streams */
+ H2_INITIAL_WINDOW_SIZE, /* window_size */
+ -1, /* min workers */
+ -1, /* max workers */
+ apr_time_from_sec(10 * 60), /* workers idle limit */
+ 32 * 1024, /* stream max mem size */
+ -1, /* h2 direct mode */
+ 1, /* modern TLS only */
+ -1, /* HTTP/1 Upgrade support */
+ 1024*1024, /* TLS warmup size */
+ 1, /* TLS cooldown secs */
+ 1, /* HTTP/2 server push enabled */
+ NULL, /* map of content-type to priorities */
+ 256, /* push diary size */
+ 0, /* copy files across threads */
+ NULL, /* push list */
+ 0, /* early hints, http status 103 */
+ 0, /* padding bits */
+ 1, /* padding always */
+ 1, /* stream output buffered */
+ -1, /* beam timeout */
+};
+
+static h2_dir_config defdconf = {
+ "default",
+ -1, /* HTTP/1 Upgrade support */
+ -1, /* HTTP/2 server push enabled */
+ NULL, /* push list */
+ -1, /* early hints, http status 103 */
+ -1, /* beam timeout */
+};
+
+void h2_config_init(apr_pool_t *pool)
+{
+ (void)pool;
+}
+
+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
+{
+ h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+ char *name = apr_pstrcat(pool, "srv[", s->defn_name, "]", NULL);
+
+ conf->name = name;
+ conf->h2_max_streams = DEF_VAL;
+ conf->h2_window_size = DEF_VAL;
+ conf->min_workers = DEF_VAL;
+ conf->max_workers = DEF_VAL;
+ conf->idle_limit = DEF_VAL;
+ conf->stream_max_mem_size = DEF_VAL;
+ conf->h2_direct = DEF_VAL;
+ conf->modern_tls_only = DEF_VAL;
+ conf->h2_upgrade = DEF_VAL;
+ conf->tls_warmup_size = DEF_VAL;
+ conf->tls_cooldown_secs = DEF_VAL;
+ conf->h2_push = DEF_VAL;
+ conf->priorities = NULL;
+ conf->push_diary_size = DEF_VAL;
+ conf->copy_files = DEF_VAL;
+ conf->push_list = NULL;
+ conf->early_hints = DEF_VAL;
+ conf->padding_bits = DEF_VAL;
+ conf->padding_always = DEF_VAL;
+ conf->output_buffered = DEF_VAL;
+ conf->stream_timeout = DEF_VAL;
+ return conf;
+}
+
+static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
+{
+ h2_config *base = (h2_config *)basev;
+ h2_config *add = (h2_config *)addv;
+ h2_config *n = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+ char *name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
+ n->name = name;
+
+ n->h2_max_streams = H2_CONFIG_GET(add, base, h2_max_streams);
+ n->h2_window_size = H2_CONFIG_GET(add, base, h2_window_size);
+ n->min_workers = H2_CONFIG_GET(add, base, min_workers);
+ n->max_workers = H2_CONFIG_GET(add, base, max_workers);
+ n->idle_limit = H2_CONFIG_GET(add, base, idle_limit);
+ n->stream_max_mem_size = H2_CONFIG_GET(add, base, stream_max_mem_size);
+ n->h2_direct = H2_CONFIG_GET(add, base, h2_direct);
+ n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only);
+ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
+ n->tls_warmup_size = H2_CONFIG_GET(add, base, tls_warmup_size);
+ n->tls_cooldown_secs = H2_CONFIG_GET(add, base, tls_cooldown_secs);
+ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
+ if (add->priorities && base->priorities) {
+ n->priorities = apr_hash_overlay(pool, add->priorities, base->priorities);
+ }
+ else {
+ n->priorities = add->priorities? add->priorities : base->priorities;
+ }
+ n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size);
+ n->copy_files = H2_CONFIG_GET(add, base, copy_files);
+ n->output_buffered = H2_CONFIG_GET(add, base, output_buffered);
+ if (add->push_list && base->push_list) {
+ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
+ }
+ else {
+ n->push_list = add->push_list? add->push_list : base->push_list;
+ }
+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
+ n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
+ n->padding_always = H2_CONFIG_GET(add, base, padding_always);
+ n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
+ return n;
+}
+
+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
+{
+ return h2_config_merge(pool, basev, addv);
+}
+
+void *h2_config_create_dir(apr_pool_t *pool, char *x)
+{
+ h2_dir_config *conf = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
+ const char *s = x? x : "unknown";
+ char *name = apr_pstrcat(pool, "dir[", s, "]", NULL);
+
+ conf->name = name;
+ conf->h2_upgrade = DEF_VAL;
+ conf->h2_push = DEF_VAL;
+ conf->early_hints = DEF_VAL;
+ conf->stream_timeout = DEF_VAL;
+ return conf;
+}
+
+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
+{
+ h2_dir_config *base = (h2_dir_config *)basev;
+ h2_dir_config *add = (h2_dir_config *)addv;
+ h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
+
+ n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
+ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
+ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
+ if (add->push_list && base->push_list) {
+ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
+ }
+ else {
+ n->push_list = add->push_list? add->push_list : base->push_list;
+ }
+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
+ n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
+ return n;
+}
+
+static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t var)
+{
+ switch(var) {
+ case H2_CONF_MAX_STREAMS:
+ return H2_CONFIG_GET(conf, &defconf, h2_max_streams);
+ case H2_CONF_WIN_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, h2_window_size);
+ case H2_CONF_MIN_WORKERS:
+ return H2_CONFIG_GET(conf, &defconf, min_workers);
+ case H2_CONF_MAX_WORKERS:
+ return H2_CONFIG_GET(conf, &defconf, max_workers);
+ case H2_CONF_MAX_WORKER_IDLE_LIMIT:
+ return H2_CONFIG_GET(conf, &defconf, idle_limit);
+ case H2_CONF_STREAM_MAX_MEM:
+ return H2_CONFIG_GET(conf, &defconf, stream_max_mem_size);
+ case H2_CONF_MODERN_TLS_ONLY:
+ return H2_CONFIG_GET(conf, &defconf, modern_tls_only);
+ case H2_CONF_UPGRADE:
+ return H2_CONFIG_GET(conf, &defconf, h2_upgrade);
+ case H2_CONF_DIRECT:
+ return H2_CONFIG_GET(conf, &defconf, h2_direct);
+ case H2_CONF_TLS_WARMUP_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
+ case H2_CONF_TLS_COOLDOWN_SECS:
+ return H2_CONFIG_GET(conf, &defconf, tls_cooldown_secs);
+ case H2_CONF_PUSH:
+ return H2_CONFIG_GET(conf, &defconf, h2_push);
+ case H2_CONF_PUSH_DIARY_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, push_diary_size);
+ case H2_CONF_COPY_FILES:
+ return H2_CONFIG_GET(conf, &defconf, copy_files);
+ case H2_CONF_EARLY_HINTS:
+ return H2_CONFIG_GET(conf, &defconf, early_hints);
+ case H2_CONF_PADDING_BITS:
+ return H2_CONFIG_GET(conf, &defconf, padding_bits);
+ case H2_CONF_PADDING_ALWAYS:
+ return H2_CONFIG_GET(conf, &defconf, padding_always);
+ case H2_CONF_OUTPUT_BUFFER:
+ return H2_CONFIG_GET(conf, &defconf, output_buffered);
+ case H2_CONF_STREAM_TIMEOUT:
+ return H2_CONFIG_GET(conf, &defconf, stream_timeout);
+ default:
+ return DEF_VAL;
+ }
+}
+
+static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
+{
+ switch(var) {
+ case H2_CONF_MAX_STREAMS:
+ H2_CONFIG_SET(conf, h2_max_streams, val);
+ break;
+ case H2_CONF_WIN_SIZE:
+ H2_CONFIG_SET(conf, h2_window_size, val);
+ break;
+ case H2_CONF_MIN_WORKERS:
+ H2_CONFIG_SET(conf, min_workers, val);
+ break;
+ case H2_CONF_MAX_WORKERS:
+ H2_CONFIG_SET(conf, max_workers, val);
+ break;
+ case H2_CONF_STREAM_MAX_MEM:
+ H2_CONFIG_SET(conf, stream_max_mem_size, val);
+ break;
+ case H2_CONF_MODERN_TLS_ONLY:
+ H2_CONFIG_SET(conf, modern_tls_only, val);
+ break;
+ case H2_CONF_UPGRADE:
+ H2_CONFIG_SET(conf, h2_upgrade, val);
+ break;
+ case H2_CONF_DIRECT:
+ H2_CONFIG_SET(conf, h2_direct, val);
+ break;
+ case H2_CONF_TLS_WARMUP_SIZE:
+ H2_CONFIG_SET(conf, tls_warmup_size, val);
+ break;
+ case H2_CONF_TLS_COOLDOWN_SECS:
+ H2_CONFIG_SET(conf, tls_cooldown_secs, val);
+ break;
+ case H2_CONF_PUSH:
+ H2_CONFIG_SET(conf, h2_push, val);
+ break;
+ case H2_CONF_PUSH_DIARY_SIZE:
+ H2_CONFIG_SET(conf, push_diary_size, val);
+ break;
+ case H2_CONF_COPY_FILES:
+ H2_CONFIG_SET(conf, copy_files, val);
+ break;
+ case H2_CONF_EARLY_HINTS:
+ H2_CONFIG_SET(conf, early_hints, val);
+ break;
+ case H2_CONF_PADDING_BITS:
+ H2_CONFIG_SET(conf, padding_bits, val);
+ break;
+ case H2_CONF_PADDING_ALWAYS:
+ H2_CONFIG_SET(conf, padding_always, val);
+ break;
+ case H2_CONF_OUTPUT_BUFFER:
+ H2_CONFIG_SET(conf, output_buffered, val);
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64_t val)
+{
+ switch(var) {
+ case H2_CONF_TLS_WARMUP_SIZE:
+ H2_CONFIG_SET(conf, tls_warmup_size, val);
+ break;
+ case H2_CONF_STREAM_TIMEOUT:
+ H2_CONFIG_SET(conf, stream_timeout, val);
+ break;
+ case H2_CONF_MAX_WORKER_IDLE_LIMIT:
+ H2_CONFIG_SET(conf, idle_limit, val);
+ break;
+ default:
+ h2_srv_config_seti(conf, var, (int)val);
+ break;
+ }
+}
+
+static h2_config *h2_config_sget(server_rec *s)
+{
+ h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
+ &http2_module);
+ ap_assert(cfg);
+ return cfg;
+}
+
+static const h2_dir_config *h2_config_rget(request_rec *r)
+{
+ h2_dir_config *cfg = (h2_dir_config *)ap_get_module_config(r->per_dir_config,
+ &http2_module);
+ ap_assert(cfg);
+ return cfg;
+}
+
+static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var)
+{
+ switch(var) {
+ case H2_CONF_UPGRADE:
+ return H2_CONFIG_GET(conf, &defdconf, h2_upgrade);
+ case H2_CONF_PUSH:
+ return H2_CONFIG_GET(conf, &defdconf, h2_push);
+ case H2_CONF_EARLY_HINTS:
+ return H2_CONFIG_GET(conf, &defdconf, early_hints);
+ case H2_CONF_STREAM_TIMEOUT:
+ return H2_CONFIG_GET(conf, &defdconf, stream_timeout);
+
+ default:
+ return DEF_VAL;
+ }
+}
+
+static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, int val)
+{
+ int set_srv = !dconf;
+ if (dconf) {
+ switch(var) {
+ case H2_CONF_UPGRADE:
+ H2_CONFIG_SET(dconf, h2_upgrade, val);
+ break;
+ case H2_CONF_PUSH:
+ H2_CONFIG_SET(dconf, h2_push, val);
+ break;
+ case H2_CONF_EARLY_HINTS:
+ H2_CONFIG_SET(dconf, early_hints, val);
+ break;
+ default:
+ /* not handled in dir_conf */
+ set_srv = 1;
+ break;
+ }
+ }
+
+ if (set_srv) {
+ h2_srv_config_seti(conf, var, val);
+ }
+}
+
+static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, apr_int64_t val)
+{
+ int set_srv = !dconf;
+ if (dconf) {
+ switch(var) {
+ case H2_CONF_STREAM_TIMEOUT:
+ H2_CONFIG_SET(dconf, stream_timeout, val);
+ break;
+ default:
+ /* not handled in dir_conf */
+ set_srv = 1;
+ break;
+ }
+ }
+
+ if (set_srv) {
+ h2_srv_config_seti64(conf, var, val);
+ }
+}
+
+static const h2_config *h2_config_get(conn_rec *c)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->server) {
+ return h2_config_sget(conn_ctx->server);
+ }
+ return h2_config_sget(c->base_server);
+}
+
+int h2_config_cgeti(conn_rec *c, h2_config_var_t var)
+{
+ return (int)h2_srv_config_geti64(h2_config_get(c), var);
+}
+
+apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var)
+{
+ return h2_srv_config_geti64(h2_config_get(c), var);
+}
+
+int h2_config_sgeti(server_rec *s, h2_config_var_t var)
+{
+ return (int)h2_srv_config_geti64(h2_config_sget(s), var);
+}
+
+apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var)
+{
+ return h2_srv_config_geti64(h2_config_sget(s), var);
+}
+
+int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var)
+{
+ return (int)h2_config_geti64(r, s, var);
+}
+
+apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var)
+{
+ apr_int64_t mode = r? (int)h2_dir_config_geti64(h2_config_rget(r), var) : DEF_VAL;
+ return (mode != DEF_VAL)? mode : h2_config_sgeti64(s, var);
+}
+
+int h2_config_rgeti(request_rec *r, h2_config_var_t var)
+{
+ return h2_config_geti(r, r->server, var);
+}
+
+apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var)
+{
+ return h2_config_geti64(r, r->server, var);
+}
+
+apr_array_header_t *h2_config_push_list(request_rec *r)
+{
+ const h2_config *sconf;
+ const h2_dir_config *conf = h2_config_rget(r);
+
+ if (conf && conf->push_list) {
+ return conf->push_list;
+ }
+ sconf = h2_config_sget(r->server);
+ return sconf? sconf->push_list : NULL;
+}
+
+const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
+{
+ const h2_config *conf = h2_config_get(c);
+ if (content_type && conf->priorities) {
+ apr_ssize_t len = (apr_ssize_t)strcspn(content_type, "; \t");
+ h2_priority *prio = apr_hash_get(conf->priorities, content_type, len);
+ return prio? prio : apr_hash_get(conf->priorities, "*", 1);
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_streams(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ apr_int64_t ival = (int)apr_atoi64(value);
+ if (ival < 1) {
+ return "value must be > 0";
+ }
+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_STREAMS, ival);
+ return NULL;
+}
+
+static const char *h2_conf_set_window_size(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val = (int)apr_atoi64(value);
+ if (val < 1024) {
+ return "value must be >= 1024";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WIN_SIZE, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_min_workers(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val = (int)apr_atoi64(value);
+ if (val < 1) {
+ return "value must be > 0";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MIN_WORKERS, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_max_workers(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val = (int)apr_atoi64(value);
+ if (val < 1) {
+ return "value must be > 0";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKERS, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_max_worker_idle_limit(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ apr_interval_time_t timeout;
+ apr_status_t rv = ap_timeout_parameter_parse(value, &timeout, "s");
+ if (rv != APR_SUCCESS) {
+ return "Invalid idle limit value";
+ }
+ if (timeout <= 0) {
+ timeout = DEF_VAL;
+ }
+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_LIMIT, timeout);
+ return NULL;
+}
+
+static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val = (int)apr_atoi64(value);
+ if (val < 1024) {
+ return "value must be >= 1024";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_STREAM_MAX_MEM, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ /* deprecated, ignore */
+ (void)dirconf;
+ (void)value;
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, /* NO LOGNO */
+ "H2SessionExtraFiles is obsolete and will be ignored");
+ return NULL;
+}
+
+static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, parms->server, APLOGNO(10307)
+ "%s: this feature has been disabled and the directive "
+ "to enable it is ignored.", parms->cmd->name);
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_direct(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg,
+ const char *ctype, const char *sdependency,
+ const char *sweight)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
+ const char *sdefweight = "16"; /* default AFTER weight */
+ h2_dependency dependency;
+ h2_priority *priority;
+ int weight;
+
+ (void)_cfg;
+ if (!*ctype) {
+ return "1st argument must be a mime-type, like 'text/css' or '*'";
+ }
+
+ if (!sweight) {
+ /* 2 args only, but which one? */
+ if (apr_isdigit(sdependency[0])) {
+ sweight = sdependency;
+ sdependency = "AFTER"; /* default dependency */
+ }
+ }
+
+ if (!strcasecmp("AFTER", sdependency)) {
+ dependency = H2_DEPENDANT_AFTER;
+ }
+ else if (!strcasecmp("BEFORE", sdependency)) {
+ dependency = H2_DEPENDANT_BEFORE;
+ if (sweight) {
+ return "dependency 'Before' does not allow a weight";
+ }
+ }
+ else if (!strcasecmp("INTERLEAVED", sdependency)) {
+ dependency = H2_DEPENDANT_INTERLEAVED;
+ sdefweight = "256"; /* default INTERLEAVED weight */
+ }
+ else {
+ return "dependency must be one of 'After', 'Before' or 'Interleaved'";
+ }
+
+ weight = (int)apr_atoi64(sweight? sweight : sdefweight);
+ if (weight < NGHTTP2_MIN_WEIGHT) {
+ return apr_psprintf(cmd->pool, "weight must be a number >= %d",
+ NGHTTP2_MIN_WEIGHT);
+ }
+
+ priority = apr_pcalloc(cmd->pool, sizeof(*priority));
+ priority->dependency = dependency;
+ priority->weight = weight;
+
+ if (!cfg->priorities) {
+ cfg->priorities = apr_hash_make(cmd->pool);
+ }
+ apr_hash_set(cfg->priorities, ctype, (apr_ssize_t)strlen(ctype), priority);
+ return NULL;
+}
+
+static const char *h2_conf_set_modern_tls_only(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_upgrade(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_tls_warmup_size(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ apr_int64_t val = apr_atoi64(value);
+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_WARMUP_SIZE, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ apr_int64_t val = (int)apr_atoi64(value);
+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_COOLDOWN_SECS, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_push_diary_size(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val = (int)apr_atoi64(value);
+ if (val < 0) {
+ return "value must be >= 0";
+ }
+ if (val > 0 && (val & (val-1))) {
+ return "value must a power of 2";
+ }
+ if (val > (1 << 15)) {
+ return "value must <= 65536";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH_DIARY_SIZE, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_copy_files(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
+static void add_push(apr_array_header_t **plist, apr_pool_t *pool, h2_push_res *push)
+{
+ h2_push_res *new;
+ if (!*plist) {
+ *plist = apr_array_make(pool, 10, sizeof(*push));
+ }
+ new = apr_array_push(*plist);
+ new->uri_ref = push->uri_ref;
+ new->critical = push->critical;
+}
+
+static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
+ const char *arg1, const char *arg2,
+ const char *arg3)
+{
+ h2_push_res push;
+ const char *last = arg3;
+
+ memset(&push, 0, sizeof(push));
+ if (!strcasecmp("add", arg1)) {
+ push.uri_ref = arg2;
+ }
+ else {
+ push.uri_ref = arg1;
+ last = arg2;
+ if (arg3) {
+ return "too many parameter";
+ }
+ }
+
+ if (last) {
+ if (!strcasecmp("critical", last)) {
+ push.critical = 1;
+ }
+ else {
+ return "unknown last parameter";
+ }
+ }
+
+ if (cmd->path) {
+ add_push(&(((h2_dir_config*)dirconf)->push_list), cmd->pool, &push);
+ }
+ else {
+ add_push(&(h2_config_sget(cmd->server)->push_list), cmd->pool, &push);
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_early_hints(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val;
+
+ if (!strcasecmp(value, "On")) val = 1;
+ else if (!strcasecmp(value, "Off")) val = 0;
+ else return "value must be On or Off";
+
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_EARLY_HINTS, val);
+ if (cmd->path) {
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool,
+ "H2EarlyHints = %d on path %s", val, cmd->path);
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_padding(cmd_parms *cmd, void *dirconf, const char *value)
+{
+ int val;
+
+ val = (int)apr_atoi64(value);
+ if (val < 0) {
+ return "number of bits must be >= 0";
+ }
+ if (val > 8) {
+ return "number of bits must be <= 8";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PADDING_BITS, val);
+ return NULL;
+}
+
+static const char *h2_conf_set_output_buffer(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_OUTPUT_BUFFER, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_OUTPUT_BUFFER, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_stream_timeout(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ apr_status_t rv;
+ apr_interval_time_t timeout;
+
+ rv = ap_timeout_parameter_parse(value, &timeout, "s");
+ if (rv != APR_SUCCESS) {
+ return "Invalid timeout value";
+ }
+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_STREAM_TIMEOUT, timeout);
+ return NULL;
+}
+
+void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw,
+ apr_time_t *pidle_limit)
+{
+ int threads_per_child = 0;
+
+ *pminw = h2_config_sgeti(s, H2_CONF_MIN_WORKERS);
+ *pmaxw = h2_config_sgeti(s, H2_CONF_MAX_WORKERS);
+
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child);
+ if (*pminw <= 0) {
+ *pminw = threads_per_child;
+ }
+ if (*pmaxw <= 0) {
+ *pmaxw = H2MAX(4, 3 * (*pminw) / 2);
+ }
+ *pidle_limit = h2_config_sgeti64(s, H2_CONF_MAX_WORKER_IDLE_LIMIT);
+}
+
+#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)
+
+const command_rec h2_cmds[] = {
+ AP_INIT_TAKE1("H2MaxSessionStreams", h2_conf_set_max_streams, NULL,
+ RSRC_CONF, "maximum number of open streams per session"),
+ AP_INIT_TAKE1("H2WindowSize", h2_conf_set_window_size, NULL,
+ RSRC_CONF, "window size on client DATA"),
+ AP_INIT_TAKE1("H2MinWorkers", h2_conf_set_min_workers, NULL,
+ RSRC_CONF, "minimum number of worker threads per child"),
+ AP_INIT_TAKE1("H2MaxWorkers", h2_conf_set_max_workers, NULL,
+ RSRC_CONF, "maximum number of worker threads per child"),
+ AP_INIT_TAKE1("H2MaxWorkerIdleSeconds", h2_conf_set_max_worker_idle_limit, NULL,
+ RSRC_CONF, "maximum number of idle seconds before a worker shuts down"),
+ AP_INIT_TAKE1("H2StreamMaxMemSize", h2_conf_set_stream_max_mem_size, NULL,
+ RSRC_CONF, "maximum number of bytes buffered in memory for a stream"),
+ AP_INIT_TAKE1("H2SerializeHeaders", h2_conf_set_serialize_headers, NULL,
+ RSRC_CONF, "disabled, this directive has no longer an effect."),
+ AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
+ RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
+ AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
+ RSRC_CONF|OR_AUTHCFG, "on to allow HTTP/1 Upgrades to h2/h2c"),
+ AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
+ RSRC_CONF, "on to enable direct HTTP/2 mode"),
+ AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
+ RSRC_CONF, "number of extra file a session might keep open (obsolete)"),
+ AP_INIT_TAKE1("H2TLSWarmUpSize", h2_conf_set_tls_warmup_size, NULL,
+ RSRC_CONF, "number of bytes on TLS connection before doing max writes"),
+ AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
+ RSRC_CONF, "seconds of idle time on TLS before shrinking writes"),
+ AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL,
+ RSRC_CONF|OR_AUTHCFG, "off to disable HTTP/2 server push"),
+ AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
+ RSRC_CONF, "define priority of PUSHed resources per content type"),
+ AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
+ RSRC_CONF, "size of push diary"),
+ AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL,
+ OR_FILEINFO, "on to perform copy of file data"),
+ AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL,
+ OR_FILEINFO|OR_AUTHCFG, "add a resource to be pushed in this location/on this server."),
+ AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL,
+ RSRC_CONF, "on to enable interim status 103 responses"),
+ AP_INIT_TAKE1("H2Padding", h2_conf_set_padding, NULL,
+ RSRC_CONF, "set payload padding"),
+ AP_INIT_TAKE1("H2OutputBuffering", h2_conf_set_output_buffer, NULL,
+ RSRC_CONF, "set stream output buffer on/off"),
+ AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL,
+ RSRC_CONF, "set stream timeout"),
+ AP_END_CMD
+};
+
+
diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h
new file mode 100644
index 0000000..6d2e65f
--- /dev/null
+++ b/modules/http2/h2_config.h
@@ -0,0 +1,98 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_config_h__
+#define __mod_h2__h2_config_h__
+
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
+typedef enum {
+ H2_CONF_MAX_STREAMS,
+ H2_CONF_WIN_SIZE,
+ H2_CONF_MIN_WORKERS,
+ H2_CONF_MAX_WORKERS,
+ H2_CONF_MAX_WORKER_IDLE_LIMIT,
+ H2_CONF_STREAM_MAX_MEM,
+ H2_CONF_DIRECT,
+ H2_CONF_MODERN_TLS_ONLY,
+ H2_CONF_UPGRADE,
+ H2_CONF_TLS_WARMUP_SIZE,
+ H2_CONF_TLS_COOLDOWN_SECS,
+ H2_CONF_PUSH,
+ H2_CONF_PUSH_DIARY_SIZE,
+ H2_CONF_COPY_FILES,
+ H2_CONF_EARLY_HINTS,
+ H2_CONF_PADDING_BITS,
+ H2_CONF_PADDING_ALWAYS,
+ H2_CONF_OUTPUT_BUFFER,
+ H2_CONF_STREAM_TIMEOUT,
+} h2_config_var_t;
+
+struct apr_hash_t;
+struct h2_priority;
+struct h2_push_res;
+
+typedef struct h2_push_res {
+ const char *uri_ref;
+ int critical;
+} h2_push_res;
+
+
+void *h2_config_create_dir(apr_pool_t *pool, char *x);
+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv);
+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s);
+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv);
+
+extern const command_rec h2_cmds[];
+
+int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var);
+apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var);
+
+/**
+ * Get the configured value for variable <var> at the given connection.
+ */
+int h2_config_cgeti(conn_rec *c, h2_config_var_t var);
+apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var);
+
+/**
+ * Get the configured value for variable <var> at the given server.
+ */
+int h2_config_sgeti(server_rec *s, h2_config_var_t var);
+apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var);
+
+/**
+ * Get the configured value for variable <var> at the given request,
+ * if configured for the request location.
+ * Fallback to request server config otherwise.
+ */
+int h2_config_rgeti(request_rec *r, h2_config_var_t var);
+apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
+
+apr_array_header_t *h2_config_push_list(request_rec *r);
+
+
+void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw,
+ apr_time_t *pidle_limit);
+void h2_config_init(apr_pool_t *pool);
+
+const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type);
+
+#endif /* __mod_h2__h2_config_h__ */
+
diff --git a/modules/http2/h2_conn_ctx.c b/modules/http2/h2_conn_ctx.c
new file mode 100644
index 0000000..b8a0fb3
--- /dev/null
+++ b/modules/http2/h2_conn_ctx.c
@@ -0,0 +1,123 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+#include <apr_atomic.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_protocol.h>
+
+#include "h2_private.h"
+#include "h2_session.h"
+#include "h2_bucket_beam.h"
+#include "h2_c2.h"
+#include "h2_mplx.h"
+#include "h2_stream.h"
+#include "h2_util.h"
+#include "h2_conn_ctx.h"
+
+
+void h2_conn_ctx_detach(conn_rec *c)
+{
+ ap_set_module_config(c->conn_config, &http2_module, NULL);
+}
+
+static h2_conn_ctx_t *ctx_create(conn_rec *c, const char *id)
+{
+ h2_conn_ctx_t *conn_ctx = apr_pcalloc(c->pool, sizeof(*conn_ctx));
+ conn_ctx->id = id;
+ conn_ctx->server = c->base_server;
+ apr_atomic_set32(&conn_ctx->started, 1);
+ conn_ctx->started_at = apr_time_now();
+
+ ap_set_module_config(c->conn_config, &http2_module, conn_ctx);
+ return conn_ctx;
+}
+
+h2_conn_ctx_t *h2_conn_ctx_create_for_c1(conn_rec *c1, server_rec *s, const char *protocol)
+{
+ h2_conn_ctx_t *ctx;
+
+ ctx = ctx_create(c1, apr_psprintf(c1->pool, "%ld", c1->id));
+ ctx->server = s;
+ ctx->protocol = apr_pstrdup(c1->pool, protocol);
+
+ ctx->pfd.desc_type = APR_POLL_SOCKET;
+ ctx->pfd.desc.s = ap_get_conn_socket(c1);
+ ctx->pfd.reqevents = APR_POLLIN | APR_POLLERR | APR_POLLHUP;
+ ctx->pfd.client_data = ctx;
+ apr_socket_opt_set(ctx->pfd.desc.s, APR_SO_NONBLOCK, 1);
+
+ return ctx;
+}
+
+void h2_conn_ctx_assign_session(h2_conn_ctx_t *ctx, struct h2_session *session)
+{
+ ctx->session = session;
+ ctx->id = apr_psprintf(session->pool, "%d-%lu", session->child_num, (unsigned long)session->id);
+}
+
+apr_status_t h2_conn_ctx_init_for_c2(h2_conn_ctx_t **pctx, conn_rec *c2,
+ struct h2_mplx *mplx, struct h2_stream *stream,
+ struct h2_c2_transit *transit)
+{
+ h2_conn_ctx_t *conn_ctx;
+ apr_status_t rv = APR_SUCCESS;
+
+ ap_assert(c2->master);
+ conn_ctx = h2_conn_ctx_get(c2);
+ if (!conn_ctx) {
+ h2_conn_ctx_t *c1_ctx;
+
+ c1_ctx = h2_conn_ctx_get(c2->master);
+ ap_assert(c1_ctx);
+ ap_assert(c1_ctx->session);
+
+ conn_ctx = ctx_create(c2, c1_ctx->id);
+ conn_ctx->server = c2->master->base_server;
+ }
+
+ conn_ctx->mplx = mplx;
+ conn_ctx->transit = transit;
+ conn_ctx->stream_id = stream->id;
+ apr_pool_create(&conn_ctx->req_pool, c2->pool);
+ apr_pool_tag(conn_ctx->req_pool, "H2_C2_REQ");
+ conn_ctx->request = stream->request;
+ apr_atomic_set32(&conn_ctx->started, 1);
+ conn_ctx->started_at = apr_time_now();
+ conn_ctx->done = 0;
+ conn_ctx->done_at = 0;
+
+ *pctx = conn_ctx;
+ return rv;
+}
+
+void h2_conn_ctx_set_timeout(h2_conn_ctx_t *conn_ctx, apr_interval_time_t timeout)
+{
+ if (conn_ctx->beam_out) {
+ h2_beam_timeout_set(conn_ctx->beam_out, timeout);
+ }
+ if (conn_ctx->beam_in) {
+ h2_beam_timeout_set(conn_ctx->beam_in, timeout);
+ }
+ if (conn_ctx->pipe_in[H2_PIPE_OUT]) {
+ apr_file_pipe_timeout_set(conn_ctx->pipe_in[H2_PIPE_OUT], timeout);
+ }
+}
diff --git a/modules/http2/h2_conn_ctx.h b/modules/http2/h2_conn_ctx.h
new file mode 100644
index 0000000..35987bc
--- /dev/null
+++ b/modules/http2/h2_conn_ctx.h
@@ -0,0 +1,98 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_conn_ctx__
+#define __mod_h2__h2_conn_ctx__
+
+#include "h2.h"
+
+struct h2_session;
+struct h2_stream;
+struct h2_mplx;
+struct h2_bucket_beam;
+struct h2_response_parser;
+struct h2_c2_transit;
+
+#define H2_PIPE_OUT 0
+#define H2_PIPE_IN 1
+
+/**
+ * The h2 module context associated with a connection.
+ *
+ * It keeps track of the different types of connections:
+ * - those from clients that use HTTP/2 protocol
+ * - those from clients that do not use HTTP/2
+ * - those created by ourself to perform work on HTTP/2 streams
+ */
+struct h2_conn_ctx_t {
+ const char *id; /* c*: our identifier of this connection */
+ server_rec *server; /* c*: httpd server selected. */
+ const char *protocol; /* c1: the protocol negotiated */
+ struct h2_session *session; /* c1: the h2 session established */
+ struct h2_mplx *mplx; /* c2: the multiplexer */
+ struct h2_c2_transit *transit; /* c2: transit pool and bucket_alloc */
+
+#if !AP_HAS_RESPONSE_BUCKETS
+ int pre_conn_done; /* has pre_connection setup run? */
+#endif
+ int stream_id; /* c1: 0, c2: stream id processed */
+ apr_pool_t *req_pool; /* c2: a c2 child pool for a request */
+ const struct h2_request *request; /* c2: the request to process */
+ struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */
+ struct h2_bucket_beam *beam_in; /* c2: data in or NULL, borrowed from request stream */
+ unsigned int input_chunked; /* c2: if input needs HTTP/1.1 chunking applied */
+
+ apr_file_t *pipe_in[2]; /* c2: input produced notification pipe */
+ apr_pollfd_t pfd; /* c1: poll socket input, c2: NUL */
+
+ int has_final_response; /* final HTTP response passed on out */
+ apr_status_t last_err; /* APR_SUCCES or last error encountered in filters */
+
+ /* atomic */ apr_uint32_t started; /* c2: processing was started */
+ apr_time_t started_at; /* c2: when processing started */
+ /* atomic */ apr_uint32_t done; /* c2: processing has finished */
+ apr_time_t done_at; /* c2: when processing was done */
+};
+typedef struct h2_conn_ctx_t h2_conn_ctx_t;
+
+/**
+ * Get the h2 connection context.
+ * @param c the connection to look at
+ * @return h2 context of this connection
+ */
+#define h2_conn_ctx_get(c) \
+ ((c)? (h2_conn_ctx_t*)ap_get_module_config((c)->conn_config, &http2_module) : NULL)
+
+/**
+ * Create the h2 connection context.
+ * @param c the connection to create it at
+ * @param s the server in use
+ * @param protocol the protocol selected
+ * @return created h2 context of this connection
+ */
+h2_conn_ctx_t *h2_conn_ctx_create_for_c1(conn_rec *c, server_rec *s, const char *protocol);
+
+void h2_conn_ctx_assign_session(h2_conn_ctx_t *ctx, struct h2_session *session);
+
+apr_status_t h2_conn_ctx_init_for_c2(h2_conn_ctx_t **pctx, conn_rec *c,
+ struct h2_mplx *mplx, struct h2_stream *stream,
+ struct h2_c2_transit *transit);
+
+void h2_conn_ctx_detach(conn_rec *c);
+
+void h2_conn_ctx_set_timeout(h2_conn_ctx_t *conn_ctx, apr_interval_time_t timeout);
+
+#endif /* defined(__mod_h2__h2_conn_ctx__) */
diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
new file mode 100644
index 0000000..cbc7b01
--- /dev/null
+++ b/modules/http2/h2_headers.c
@@ -0,0 +1,207 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <util_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2_protocol.h"
+#include "h2_config.h"
+#include "h2_util.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+
+#if !AP_HAS_RESPONSE_BUCKETS
+
+static int is_unsafe(server_rec *s)
+{
+ core_server_config *conf = ap_get_core_module_config(s->module_config);
+ return (conf->http_conformance == AP_HTTP_CONFORMANCE_UNSAFE);
+}
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_headers *headers;
+} h2_bucket_headers;
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r)
+{
+ h2_bucket_headers *br;
+
+ br = apr_bucket_alloc(sizeof(*br), b->list);
+ br->headers = r;
+
+ b = apr_bucket_shared_make(b, br, 0, 0);
+ b->type = &h2_bucket_type_headers;
+ b->length = 0;
+
+ return b;
+}
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_headers_make(b, r);
+ return b;
+}
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b)
+{
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return ((h2_bucket_headers *)b->data)->headers;
+ }
+ return NULL;
+}
+
+const apr_bucket_type_t h2_bucket_type_headers = {
+ "H2HEADERS", 5, APR_BUCKET_METADATA,
+ apr_bucket_destroy_noop,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
+apr_bucket *h2_bucket_headers_clone(apr_bucket *b, apr_pool_t *pool,
+ apr_bucket_alloc_t *list)
+{
+ h2_headers *hdrs = ((h2_bucket_headers *)b->data)->headers;
+ return h2_bucket_headers_create(list, h2_headers_clone(pool, hdrs));
+}
+
+
+h2_headers *h2_headers_create(int status, const apr_table_t *headers_in,
+ const apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool)
+{
+ h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = status;
+ headers->headers = (headers_in? apr_table_clone(pool, headers_in)
+ : apr_table_make(pool, 5));
+ headers->notes = (notes? apr_table_clone(pool, notes)
+ : apr_table_make(pool, 5));
+ return headers;
+}
+
+static int add_header_lengths(void *ctx, const char *name, const char *value)
+{
+ apr_size_t *plen = ctx;
+ *plen += strlen(name) + strlen(value);
+ return 1;
+}
+
+apr_size_t h2_headers_length(h2_headers *headers)
+{
+ apr_size_t len = 0;
+ apr_table_do(add_header_lengths, &len, headers->headers, NULL);
+ return len;
+}
+
+apr_size_t h2_bucket_headers_headers_length(apr_bucket *b)
+{
+ h2_headers *h = h2_bucket_headers_get(b);
+ return h? h2_headers_length(h) : 0;
+}
+
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ const apr_table_t *header, apr_pool_t *pool)
+{
+ h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+ if (headers->status == HTTP_FORBIDDEN) {
+ request_rec *r_prev;
+ for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
+ const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is not allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
+ APLOGNO(10399)
+ "h2_headers(%ld): renegotiate forbidden, cause: %s",
+ (long)r->connection->id, cause);
+ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
+ break;
+ }
+ }
+ }
+ if (is_unsafe(r->server)) {
+ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE);
+ }
+ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
+ apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0");
+ }
+ return headers;
+}
+
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
+{
+ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
+}
+
+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
+{
+ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
+}
+
+h2_headers *h2_headers_die(apr_status_t type,
+ const h2_request *req, apr_pool_t *pool)
+{
+ h2_headers *headers;
+ char *date;
+
+ headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = (type >= 200 && type < 600)? type : 500;
+ headers->headers = apr_table_make(pool, 5);
+ headers->notes = apr_table_make(pool, 5);
+
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, req? req->request_time : apr_time_now());
+ apr_table_setn(headers->headers, "Date", date);
+ apr_table_setn(headers->headers, "Server", ap_get_server_banner());
+
+ return headers;
+}
+
+int h2_headers_are_final_response(h2_headers *headers)
+{
+ return headers->status >= 200;
+}
+
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h
new file mode 100644
index 0000000..3d78dc3
--- /dev/null
+++ b/modules/http2/h2_headers.h
@@ -0,0 +1,107 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_headers__
+#define __mod_h2__h2_headers__
+
+#include "h2.h"
+
+#if !AP_HAS_RESPONSE_BUCKETS
+
+struct h2_bucket_beam;
+
+typedef struct h2_headers h2_headers;
+struct h2_headers {
+ int status;
+ apr_table_t *headers;
+ apr_table_t *notes;
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
+};
+
+
+extern const apr_bucket_type_t h2_bucket_type_headers;
+
+#define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers)
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r);
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r);
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b);
+
+/**
+ * Create the headers from the given status and headers
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param notes the notes carried by the headers
+ * @param raw_bytes the raw network bytes (if known) used to transmit these
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_create(int status, const apr_table_t *header,
+ const apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool);
+
+/**
+ * Create the headers from the given request_rec.
+ * @param r the request record which was processed
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ const apr_table_t *header, apr_pool_t *pool);
+
+/**
+ * Copy the headers into another pool. This will not copy any
+ * header strings.
+ */
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
+
+/**
+ * Clone the headers into another pool. This will also clone any
+ * header strings.
+ */
+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
+
+/**
+ * Create the headers for the given error.
+ * @param type the error code
+ * @param req the original h2_request
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_die(apr_status_t type,
+ const struct h2_request *req, apr_pool_t *pool);
+
+int h2_headers_are_final_response(h2_headers *headers);
+
+/**
+ * Give the number of bytes of all contained header strings.
+ */
+apr_size_t h2_headers_length(h2_headers *headers);
+
+/**
+ * For H2HEADER buckets, return the length of all contained header strings.
+ * For all other buckets, return 0.
+ */
+apr_size_t h2_bucket_headers_headers_length(apr_bucket *b);
+
+apr_bucket *h2_bucket_headers_clone(apr_bucket *b, apr_pool_t *pool,
+ apr_bucket_alloc_t *list);
+
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
+
+#endif /* defined(__mod_h2__h2_headers__) */
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
new file mode 100644
index 0000000..99c47ea
--- /dev/null
+++ b/modules/http2/h2_mplx.c
@@ -0,0 +1,1191 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <apr_atomic.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+#include <http_protocol.h>
+
+#include <mpm_common.h>
+
+#include "mod_http2.h"
+
+#include "h2.h"
+#include "h2_private.h"
+#include "h2_bucket_beam.h"
+#include "h2_config.h"
+#include "h2_c1.h"
+#include "h2_conn_ctx.h"
+#include "h2_protocol.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_stream.h"
+#include "h2_session.h"
+#include "h2_c2.h"
+#include "h2_workers.h"
+#include "h2_util.h"
+
+
+/* utility for iterating over ihash stream sets */
+typedef struct {
+ h2_mplx *m;
+ h2_stream *stream;
+ apr_time_t now;
+ apr_size_t count;
+} stream_iter_ctx;
+
+static conn_rec *c2_prod_next(void *baton, int *phas_more);
+static void c2_prod_done(void *baton, conn_rec *c2);
+static void workers_shutdown(void *baton, int graceful);
+
+static void s_mplx_be_happy(h2_mplx *m, conn_rec *c, h2_conn_ctx_t *conn_ctx);
+static void m_be_annoyed(h2_mplx *m);
+
+static apr_status_t mplx_pollset_create(h2_mplx *m);
+static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx);
+
+static apr_pool_t *pchild;
+
+/* APR callback invoked if allocation fails. */
+static int abort_on_oom(int retcode)
+{
+ ap_abort_on_oom();
+ return retcode; /* unreachable, hopefully. */
+}
+
+apr_status_t h2_mplx_c1_child_init(apr_pool_t *pool, server_rec *s)
+{
+ pchild = pool;
+ return APR_SUCCESS;
+}
+
+#define H2_MPLX_ENTER(m) \
+ do { apr_status_t rv_lock; if ((rv_lock = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\
+ return rv_lock;\
+ } } while(0)
+
+#define H2_MPLX_LEAVE(m) \
+ apr_thread_mutex_unlock(m->lock)
+
+#define H2_MPLX_ENTER_ALWAYS(m) \
+ apr_thread_mutex_lock(m->lock)
+
+#define H2_MPLX_ENTER_MAYBE(m, dolock) \
+ if (dolock) apr_thread_mutex_lock(m->lock)
+
+#define H2_MPLX_LEAVE_MAYBE(m, dolock) \
+ if (dolock) apr_thread_mutex_unlock(m->lock)
+
+static void c1_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_stream_in_consumed(ctx, length);
+}
+
+static int stream_is_running(h2_stream *stream)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+ return conn_ctx && apr_atomic_read32(&conn_ctx->started) != 0
+ && apr_atomic_read32(&conn_ctx->done) == 0;
+}
+
+int h2_mplx_c1_stream_is_running(h2_mplx *m, h2_stream *stream)
+{
+ int rv;
+
+ H2_MPLX_ENTER(m);
+ rv = stream_is_running(stream);
+ H2_MPLX_LEAVE(m);
+ return rv;
+}
+
+static void c1c2_stream_joined(h2_mplx *m, h2_stream *stream)
+{
+ ap_assert(!stream_is_running(stream));
+
+ h2_ihash_remove(m->shold, stream->id);
+ APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream;
+}
+
+static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
+{
+ h2_conn_ctx_t *c2_ctx = h2_conn_ctx_get(stream->c2);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, unsubscribing from beam events"));
+ if (c2_ctx) {
+ if (c2_ctx->beam_out) {
+ h2_beam_on_was_empty(c2_ctx->beam_out, NULL, NULL);
+ }
+ if (c2_ctx->beam_in) {
+ h2_beam_on_send(c2_ctx->beam_in, NULL, NULL);
+ h2_beam_on_received(c2_ctx->beam_in, NULL, NULL);
+ h2_beam_on_consumed(c2_ctx->beam_in, NULL, NULL);
+ }
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, removing from registries"));
+ ap_assert(stream->state == H2_SS_CLEANUP);
+ h2_stream_cleanup(stream);
+ h2_ihash_remove(m->streams, stream->id);
+ h2_iq_remove(m->q, stream->id);
+
+ if (c2_ctx) {
+ if (!stream_is_running(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, c2 is done, move to spurge"));
+ /* processing has finished */
+ APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, c2 is running, abort"));
+ /* c2 is still running */
+ h2_c2_abort(stream->c2, m->c1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, c2 is done, move to shold"));
+ h2_ihash_add(m->shold, stream);
+ }
+ }
+ else {
+ /* never started */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, never started, move to spurge"));
+ APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream;
+ }
+}
+
+static h2_c2_transit *c2_transit_create(h2_mplx *m)
+{
+ apr_allocator_t *allocator;
+ apr_pool_t *ptrans;
+ h2_c2_transit *transit;
+ apr_status_t rv;
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+ * independent of its parent pool in the sense that it can work in
+ * another thread.
+ */
+
+ rv = apr_allocator_create(&allocator);
+ if (rv == APR_SUCCESS) {
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ rv = apr_pool_create_ex(&ptrans, m->pool, NULL, allocator);
+ }
+ if (rv != APR_SUCCESS) {
+ /* maybe the log goes through, maybe not. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1,
+ APLOGNO(10004) "h2_mplx: create transit pool");
+ ap_abort_on_oom();
+ return NULL; /* should never be reached. */
+ }
+
+ apr_allocator_owner_set(allocator, ptrans);
+ apr_pool_abort_set(abort_on_oom, ptrans);
+ apr_pool_tag(ptrans, "h2_c2_transit");
+
+ transit = apr_pcalloc(ptrans, sizeof(*transit));
+ transit->pool = ptrans;
+ transit->bucket_alloc = apr_bucket_alloc_create(ptrans);
+ return transit;
+}
+
+static void c2_transit_destroy(h2_c2_transit *transit)
+{
+ apr_pool_destroy(transit->pool);
+}
+
+static h2_c2_transit *c2_transit_get(h2_mplx *m)
+{
+ h2_c2_transit **ptransit = apr_array_pop(m->c2_transits);
+ if (ptransit) {
+ return *ptransit;
+ }
+ return c2_transit_create(m);
+}
+
+static void c2_transit_recycle(h2_mplx *m, h2_c2_transit *transit)
+{
+ if (m->c2_transits->nelts >= APR_INT32_MAX ||
+ (apr_uint32_t)m->c2_transits->nelts >= m->max_spare_transits) {
+ c2_transit_destroy(transit);
+ }
+ else {
+ APR_ARRAY_PUSH(m->c2_transits, h2_c2_transit*) = transit;
+ }
+}
+
+/**
+ * A h2_mplx needs to be thread-safe *and* if will be called by
+ * the h2_session thread *and* the h2_worker threads. Therefore:
+ * - calls are protected by a mutex lock, m->lock
+ * - the pool needs its own allocator, since apr_allocator_t are
+ * not re-entrant. The separate allocator works without a
+ * separate lock since we already protect h2_mplx itself.
+ * Since HTTP/2 connections can be expected to live longer than
+ * their HTTP/1 cousins, the separate allocator seems to work better
+ * than protecting a shared h2_session one with an own lock.
+ */
+h2_mplx *h2_mplx_c1_create(int child_num, apr_uint32_t id, h2_stream *stream0,
+ server_rec *s, apr_pool_t *parent,
+ h2_workers *workers)
+{
+ h2_conn_ctx_t *conn_ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_allocator_t *allocator;
+ apr_thread_mutex_t *mutex = NULL;
+ h2_mplx *m = NULL;
+
+ m = apr_pcalloc(parent, sizeof(h2_mplx));
+ m->stream0 = stream0;
+ m->c1 = stream0->c2;
+ m->s = s;
+ m->child_num = child_num;
+ m->id = id;
+
+ /* We create a pool with its own allocator to be used for
+ * processing secondary connections. This is the only way to have the
+ * processing independent of its parent pool in the sense that it
+ * can work in another thread. Also, the new allocator needs its own
+ * mutex to synchronize sub-pools.
+ */
+ status = apr_allocator_create(&allocator);
+ if (status != APR_SUCCESS) {
+ allocator = NULL;
+ goto failure;
+ }
+
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ apr_pool_create_ex(&m->pool, parent, NULL, allocator);
+ if (!m->pool) goto failure;
+
+ apr_pool_tag(m->pool, "h2_mplx");
+ apr_allocator_owner_set(allocator, m->pool);
+
+ status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (APR_SUCCESS != status) goto failure;
+ apr_allocator_mutex_set(allocator, mutex);
+
+ status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (APR_SUCCESS != status) goto failure;
+
+ m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
+ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
+
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = apr_array_make(m->pool, 10, sizeof(h2_stream*));
+ m->q = h2_iq_create(m->pool, m->max_streams);
+
+ m->workers = workers;
+ m->processing_max = H2MIN(h2_workers_get_max_workers(workers), m->max_streams);
+ m->processing_limit = 6; /* the original h1 max parallel connections */
+ m->last_mood_change = apr_time_now();
+ m->mood_update_interval = apr_time_from_msec(100);
+
+ status = mplx_pollset_create(m);
+ if (APR_SUCCESS != status) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c1, APLOGNO(10308)
+ "nghttp2: could not create pollset");
+ goto failure;
+ }
+ m->streams_ev_in = apr_array_make(m->pool, 10, sizeof(h2_stream*));
+ m->streams_ev_out = apr_array_make(m->pool, 10, sizeof(h2_stream*));
+
+ m->streams_input_read = h2_iq_create(m->pool, 10);
+ m->streams_output_written = h2_iq_create(m->pool, 10);
+ status = apr_thread_mutex_create(&m->poll_lock, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (APR_SUCCESS != status) goto failure;
+
+ conn_ctx = h2_conn_ctx_get(m->c1);
+ if (conn_ctx->pfd.reqevents) {
+ apr_pollset_add(m->pollset, &conn_ctx->pfd);
+ }
+
+ m->scratch_r = apr_pcalloc(m->pool, sizeof(*m->scratch_r));
+ m->max_spare_transits = 3;
+ m->c2_transits = apr_array_make(m->pool, (int)m->max_spare_transits,
+ sizeof(h2_c2_transit*));
+
+ m->producer = h2_workers_register(workers, m->pool,
+ apr_psprintf(m->pool, "h2-%u",
+ (unsigned int)m->id),
+ c2_prod_next, c2_prod_done,
+ workers_shutdown, m);
+ return m;
+
+failure:
+ if (m->pool) {
+ apr_pool_destroy(m->pool);
+ }
+ else if (allocator) {
+ apr_allocator_destroy(allocator);
+ }
+ return NULL;
+}
+
+int h2_mplx_c1_shutdown(h2_mplx *m)
+{
+ int max_stream_id_started = 0;
+
+ H2_MPLX_ENTER(m);
+
+ max_stream_id_started = m->max_stream_id_started;
+ /* Clear schedule queue, disabling existing streams from starting */
+ h2_iq_clear(m->q);
+
+ H2_MPLX_LEAVE(m);
+ return max_stream_id_started;
+}
+
+typedef struct {
+ h2_mplx_stream_cb *cb;
+ void *ctx;
+} stream_iter_ctx_t;
+
+static int m_stream_iter_wrap(void *ctx, void *stream)
+{
+ stream_iter_ctx_t *x = ctx;
+ return x->cb(stream, x->ctx);
+}
+
+apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+{
+ stream_iter_ctx_t x;
+
+ H2_MPLX_ENTER(m);
+
+ x.cb = cb;
+ x.ctx = ctx;
+ h2_ihash_iter(m->streams, m_stream_iter_wrap, &x);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+}
+
+static int m_report_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1,
+ H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"),
+ !!stream->c2, stream->scheduled, h2_stream_is_ready(stream),
+ (long)(stream->output? h2_beam_get_buffered(stream->output) : -1));
+ if (conn_ctx) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: %s %s %s"
+ "[started=%u/done=%u]"),
+ conn_ctx->request->method, conn_ctx->request->authority,
+ conn_ctx->request->path,
+ apr_atomic_read32(&conn_ctx->started),
+ apr_atomic_read32(&conn_ctx->done));
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: not started"));
+ }
+ return 1;
+}
+
+static int m_unexpected_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "unexpected, started=%d, scheduled=%d, ready=%d"),
+ !!stream->c2, stream->scheduled, h2_stream_is_ready(stream));
+ return 1;
+}
+
+static int m_stream_cancel_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+
+ /* take over event monitoring */
+ h2_stream_set_monitor(stream, NULL);
+ /* Reset, should transit to CLOSED state */
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ /* All connection data has been sent, simulate cleanup */
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+ m_stream_cleanup(m, stream);
+ return 0;
+}
+
+void h2_mplx_c1_destroy(h2_mplx *m)
+{
+ apr_status_t status;
+ unsigned int i, wait_secs = 60;
+ int old_aborted;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_MPLX_MSG(m, "start release"));
+ /* How to shut down a h2 connection:
+ * 0. abort and tell the workers that no more work will come from us */
+ m->shutdown = m->aborted = 1;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ /* While really terminating any c2 connections, treat the master
+ * connection as aborted. It's not as if we could send any more data
+ * at this point. */
+ old_aborted = m->c1->aborted;
+ m->c1->aborted = 1;
+
+ /* How to shut down a h2 connection:
+ * 1. cancel all streams still active */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_MPLX_MSG(m, "release, %u/%u/%d streams (total/hold/purge), %d streams"),
+ h2_ihash_count(m->streams),
+ h2_ihash_count(m->shold),
+ m->spurge->nelts, m->processing_count);
+ while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
+ /* until empty */
+ }
+
+ /* 2. no more streams should be scheduled or in the active set */
+ ap_assert(h2_ihash_empty(m->streams));
+ ap_assert(h2_iq_empty(m->q));
+
+ /* 3. while workers are busy on this connection, meaning they
+ * are processing streams from this connection, wait on them finishing
+ * in order to wake us and let us check again.
+ * Eventually, this has to succeed. */
+ if (!m->join_wait) {
+ apr_thread_cond_create(&m->join_wait, m->pool);
+ }
+
+ for (i = 0; h2_ihash_count(m->shold) > 0; ++i) {
+ status = apr_thread_cond_timedwait(m->join_wait, m->lock, apr_time_from_sec(wait_secs));
+
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ /* This can happen if we have very long running requests
+ * that do not time out on IO. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, APLOGNO(03198)
+ H2_MPLX_MSG(m, "waited %u sec for %u streams"),
+ i*wait_secs, h2_ihash_count(m->shold));
+ h2_ihash_iter(m->shold, m_report_stream_iter, m);
+ }
+ }
+
+ H2_MPLX_LEAVE(m);
+ h2_workers_join(m->workers, m->producer);
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ /* 4. With all workers done, all streams should be in spurge */
+ ap_assert(m->processing_count == 0);
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, APLOGNO(03516)
+ H2_MPLX_MSG(m, "unexpected %u streams in hold"),
+ h2_ihash_count(m->shold));
+ h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
+ }
+
+ m->c1->aborted = old_aborted;
+ H2_MPLX_LEAVE(m);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_MPLX_MSG(m, "released"));
+}
+
+apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, h2_stream *stream,
+ unsigned int *pstream_count)
+{
+ H2_MPLX_ENTER(m);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup"));
+ m_stream_cleanup(m, stream);
+ *pstream_count = h2_ihash_count(m->streams);
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+}
+
+const h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id)
+{
+ h2_stream *s = NULL;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+ s = h2_ihash_get(m->streams, stream_id);
+ H2_MPLX_LEAVE(m);
+
+ return s;
+}
+
+
+static void c1_update_scoreboard(h2_mplx *m, h2_stream *stream)
+{
+ if (stream->c2) {
+ m->scratch_r->connection = stream->c2;
+ m->scratch_r->bytes_sent = stream->out_frame_octets;
+ ap_increment_counts(m->c1->sbh, m->scratch_r);
+ m->scratch_r->connection = NULL;
+ }
+}
+
+static void c1_purge_streams(h2_mplx *m)
+{
+ h2_stream *stream;
+ int i;
+
+ for (i = 0; i < m->spurge->nelts; ++i) {
+ stream = APR_ARRAY_IDX(m->spurge, i, h2_stream*);
+ ap_assert(stream->state == H2_SS_CLEANUP);
+
+ c1_update_scoreboard(m, stream);
+
+ if (stream->input) {
+ h2_beam_destroy(stream->input, m->c1);
+ stream->input = NULL;
+ }
+ if (stream->c2) {
+ conn_rec *c2 = stream->c2;
+ h2_conn_ctx_t *c2_ctx = h2_conn_ctx_get(c2);
+ h2_c2_transit *transit;
+
+ stream->c2 = NULL;
+ ap_assert(c2_ctx);
+ transit = c2_ctx->transit;
+ h2_c2_destroy(c2); /* c2_ctx is gone as well */
+ if (transit) {
+ c2_transit_recycle(m, transit);
+ }
+ }
+ h2_stream_destroy(stream);
+ }
+ apr_array_clear(m->spurge);
+}
+
+apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx)
+{
+ apr_status_t rv;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ rv = APR_ECONNABORTED;
+ goto cleanup;
+ }
+ /* Purge (destroy) streams outside of pollset processing.
+ * Streams that are registered in the pollset, will be removed
+ * when they are destroyed, but the pollset works on copies
+ * of these registrations. So, if we destroy streams while
+ * processing pollset events, we might access freed memory.
+ */
+ if (m->spurge->nelts) {
+ c1_purge_streams(m);
+ }
+ rv = mplx_pollset_poll(m, timeout, on_stream_input, on_stream_output, on_ctx);
+
+cleanup:
+ H2_MPLX_LEAVE(m);
+ return rv;
+}
+
+apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
+ h2_session *session)
+{
+ apr_status_t status;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_iq_sort(m->q, cmp, session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_MPLX_MSG(m, "reprioritize streams"));
+ status = APR_SUCCESS;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static apr_status_t c1_process_stream(h2_mplx *m,
+ h2_stream *stream,
+ h2_stream_pri_cmp_fn *cmp,
+ h2_session *session)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ if (m->aborted) {
+ rv = APR_ECONNABORTED;
+ goto cleanup;
+ }
+ if (!stream->request) {
+ rv = APR_EINVAL;
+ goto cleanup;
+ }
+ if (APLOGctrace1(m->c1)) {
+ const h2_request *r = stream->request;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_STRM_MSG(stream, "process %s %s://%s%s"),
+ r->method, r->scheme, r->authority, r->path);
+ }
+
+ stream->scheduled = 1;
+ h2_ihash_add(m->streams, stream);
+ if (h2_stream_is_ready(stream)) {
+ /* already have a response */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_STRM_MSG(stream, "process, ready already"));
+ }
+ else {
+ /* last chance to set anything up before stream is processed
+ * by worker threads. */
+ rv = h2_stream_prepare_processing(stream);
+ if (APR_SUCCESS != rv) goto cleanup;
+ h2_iq_add(m->q, stream->id, cmp, session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_STRM_MSG(stream, "process, added to q"));
+ }
+
+cleanup:
+ return rv;
+}
+
+void h2_mplx_c1_process(h2_mplx *m,
+ h2_iqueue *ready_to_process,
+ h2_stream_get_fn *get_stream,
+ h2_stream_pri_cmp_fn *stream_pri_cmp,
+ h2_session *session,
+ unsigned int *pstream_count)
+{
+ apr_status_t rv;
+ int sid;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ while ((sid = h2_iq_shift(ready_to_process)) > 0) {
+ h2_stream *stream = get_stream(session, sid);
+ if (stream) {
+ ap_assert(!stream->scheduled);
+ rv = c1_process_stream(session->mplx, stream, stream_pri_cmp, session);
+ if (APR_SUCCESS != rv) {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ }
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_MPLX_MSG(m, "stream %d not found to process"), sid);
+ }
+ }
+ if ((m->processing_count < m->processing_limit) && !h2_iq_empty(m->q)) {
+ H2_MPLX_LEAVE(m);
+ rv = h2_workers_activate(m->workers, m->producer);
+ H2_MPLX_ENTER_ALWAYS(m);
+ if (rv != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1, APLOGNO(10021)
+ H2_MPLX_MSG(m, "activate at workers"));
+ }
+ }
+ *pstream_count = h2_ihash_count(m->streams);
+
+#if APR_POOL_DEBUG
+ do {
+ apr_size_t mem_g, mem_m, mem_s, mem_c1;
+
+ mem_g = pchild? apr_pool_num_bytes(pchild, 1) : 0;
+ mem_m = apr_pool_num_bytes(m->pool, 1);
+ mem_s = apr_pool_num_bytes(session->pool, 1);
+ mem_c1 = apr_pool_num_bytes(m->c1->pool, 1);
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c1,
+ H2_MPLX_MSG(m, "child mem=%ld, mplx mem=%ld, session mem=%ld, c1=%ld"),
+ (long)mem_g, (long)mem_m, (long)mem_s, (long)mem_c1);
+
+ } while (0);
+#endif
+
+ H2_MPLX_LEAVE(m);
+}
+
+static void c2_beam_input_write_notify(void *ctx, h2_bucket_beam *beam)
+{
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ (void)beam;
+ if (conn_ctx && conn_ctx->stream_id && conn_ctx->pipe_in[H2_PIPE_IN]) {
+ apr_file_putc(1, conn_ctx->pipe_in[H2_PIPE_IN]);
+ }
+}
+
+static void add_stream_poll_event(h2_mplx *m, int stream_id, h2_iqueue *q)
+{
+ apr_thread_mutex_lock(m->poll_lock);
+ if (h2_iq_append(q, stream_id) && h2_iq_count(q) == 1) {
+ /* newly added first */
+ apr_pollset_wakeup(m->pollset);
+ }
+ apr_thread_mutex_unlock(m->poll_lock);
+}
+
+static void c2_beam_input_read_notify(void *ctx, h2_bucket_beam *beam)
+{
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+ add_stream_poll_event(conn_ctx->mplx, conn_ctx->stream_id,
+ conn_ctx->mplx->streams_input_read);
+ }
+}
+
+static void c2_beam_output_write_notify(void *ctx, h2_bucket_beam *beam)
+{
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+ add_stream_poll_event(conn_ctx->mplx, conn_ctx->stream_id,
+ conn_ctx->mplx->streams_output_written);
+ }
+}
+
+static apr_status_t c2_setup_io(h2_mplx *m, conn_rec *c2, h2_stream *stream, h2_c2_transit *transit)
+{
+ h2_conn_ctx_t *conn_ctx;
+ apr_status_t rv = APR_SUCCESS;
+ const char *action = "init";
+
+ rv = h2_conn_ctx_init_for_c2(&conn_ctx, c2, m, stream, transit);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ if (!conn_ctx->beam_out) {
+ action = "create output beam";
+ rv = h2_beam_create(&conn_ctx->beam_out, c2, conn_ctx->req_pool,
+ stream->id, "output", 0, c2->base_server->timeout);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ h2_beam_buffer_size_set(conn_ctx->beam_out, m->stream_max_mem);
+ h2_beam_on_was_empty(conn_ctx->beam_out, c2_beam_output_write_notify, c2);
+ }
+
+ memset(&conn_ctx->pipe_in, 0, sizeof(conn_ctx->pipe_in));
+ if (stream->input) {
+ conn_ctx->beam_in = stream->input;
+ h2_beam_on_send(stream->input, c2_beam_input_write_notify, c2);
+ h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2);
+ h2_beam_on_consumed(stream->input, c1_input_consumed, stream);
+#if H2_USE_PIPES
+ action = "create input write pipe";
+ rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in[H2_PIPE_OUT],
+ &conn_ctx->pipe_in[H2_PIPE_IN],
+ APR_READ_BLOCK,
+ c2->pool, c2->pool);
+ if (APR_SUCCESS != rv) goto cleanup;
+#endif
+ }
+
+cleanup:
+ stream->output = (APR_SUCCESS == rv)? conn_ctx->beam_out : NULL;
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c2,
+ H2_STRM_LOG(APLOGNO(10309), stream,
+ "error %s"), action);
+ }
+ return rv;
+}
+
+static conn_rec *s_next_c2(h2_mplx *m)
+{
+ h2_stream *stream = NULL;
+ apr_status_t rv = APR_SUCCESS;
+ apr_uint32_t sid;
+ conn_rec *c2 = NULL;
+ h2_c2_transit *transit = NULL;
+
+ while (!m->aborted && !stream && (m->processing_count < m->processing_limit)
+ && (sid = h2_iq_shift(m->q)) > 0) {
+ stream = h2_ihash_get(m->streams, sid);
+ }
+
+ if (!stream) {
+ if (m->processing_count >= m->processing_limit && !h2_iq_empty(m->q)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1,
+ H2_MPLX_MSG(m, "delaying request processing. "
+ "Current limit is %d and %d workers are in use."),
+ m->processing_limit, m->processing_count);
+ }
+ goto cleanup;
+ }
+
+ if (sid > m->max_stream_id_started) {
+ m->max_stream_id_started = sid;
+ }
+
+ transit = c2_transit_get(m);
+#if AP_HAS_RESPONSE_BUCKETS
+ c2 = ap_create_secondary_connection(transit->pool, m->c1, transit->bucket_alloc);
+#else
+ c2 = h2_c2_create(m->c1, transit->pool, transit->bucket_alloc);
+#endif
+ if (!c2) goto cleanup;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c1,
+ H2_STRM_MSG(stream, "created new c2"));
+
+ rv = c2_setup_io(m, c2, stream, transit);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ stream->c2 = c2;
+ ++m->processing_count;
+
+cleanup:
+ if (APR_SUCCESS != rv && c2) {
+ h2_c2_destroy(c2);
+ c2 = NULL;
+ }
+ if (transit && !c2) {
+ c2_transit_recycle(m, transit);
+ }
+ return c2;
+}
+
+static conn_rec *c2_prod_next(void *baton, int *phas_more)
+{
+ h2_mplx *m = baton;
+ conn_rec *c = NULL;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+ if (!m->aborted) {
+ c = s_next_c2(m);
+ *phas_more = (c != NULL && !h2_iq_empty(m->q));
+ }
+ H2_MPLX_LEAVE(m);
+ return c;
+}
+
+static void s_c2_done(h2_mplx *m, conn_rec *c2, h2_conn_ctx_t *conn_ctx)
+{
+ h2_stream *stream;
+
+ ap_assert(conn_ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_mplx(%s-%d): c2 done", conn_ctx->id, conn_ctx->stream_id);
+
+ AP_DEBUG_ASSERT(apr_atomic_read32(&conn_ctx->done) == 0);
+ apr_atomic_set32(&conn_ctx->done, 1);
+ conn_ctx->done_at = apr_time_now();
+ ++c2->keepalives;
+ /* From here on, the final handling of c2 is done by c1 processing.
+ * Which means we can give it c1's scoreboard handle for updates. */
+ c2->sbh = m->c1->sbh;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_mplx(%s-%d): request done, %f ms elapsed",
+ conn_ctx->id, conn_ctx->stream_id,
+ (conn_ctx->done_at - conn_ctx->started_at) / 1000.0);
+
+ if (!conn_ctx->has_final_response) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, conn_ctx->last_err, c2,
+ "h2_c2(%s-%d): processing finished without final response",
+ conn_ctx->id, conn_ctx->stream_id);
+ c2->aborted = 1;
+ }
+ else if (!c2->aborted) {
+ s_mplx_be_happy(m, c2, conn_ctx);
+ }
+
+ stream = h2_ihash_get(m->streams, conn_ctx->stream_id);
+ if (stream) {
+ /* stream not done yet. trigger a potential polling on the output
+ * since nothing more will happening here. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ H2_STRM_MSG(stream, "c2_done, stream open"));
+ c2_beam_output_write_notify(c2, NULL);
+ }
+ else if ((stream = h2_ihash_get(m->shold, conn_ctx->stream_id)) != NULL) {
+ /* stream is done, was just waiting for this. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ H2_STRM_MSG(stream, "c2_done, in hold"));
+ c1c2_stream_joined(m, stream);
+ }
+ else {
+ int i;
+
+ for (i = 0; i < m->spurge->nelts; ++i) {
+ if (stream == APR_ARRAY_IDX(m->spurge, i, h2_stream*)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2,
+ H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
+ ap_assert("stream should not be in spurge" == NULL);
+ return;
+ }
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2, APLOGNO(03518)
+ "h2_mplx(%s-%d): c2_done, stream not found",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_assert("stream should still be available" == NULL);
+ }
+}
+
+static void c2_prod_done(void *baton, conn_rec *c2)
+{
+ h2_mplx *m = baton;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
+ AP_DEBUG_ASSERT(conn_ctx);
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ --m->processing_count;
+ s_c2_done(m, c2, conn_ctx);
+ if (m->join_wait) apr_thread_cond_signal(m->join_wait);
+
+ H2_MPLX_LEAVE(m);
+}
+
+static void workers_shutdown(void *baton, int graceful)
+{
+ h2_mplx *m = baton;
+
+ apr_thread_mutex_lock(m->poll_lock);
+ /* time to wakeup and assess what to do */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_MPLX_MSG(m, "workers shutdown, waking pollset"));
+ m->shutdown = 1;
+ if (!graceful) {
+ m->aborted = 1;
+ }
+ apr_pollset_wakeup(m->pollset);
+ apr_thread_mutex_unlock(m->poll_lock);
+}
+
+/*******************************************************************************
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+static void s_mplx_be_happy(h2_mplx *m, conn_rec *c, h2_conn_ctx_t *conn_ctx)
+{
+ apr_time_t now;
+
+ if (m->processing_limit < m->processing_max
+ && conn_ctx->started_at > m->last_mood_change) {
+ --m->irritations_since;
+ if (m->processing_limit < m->processing_max
+ && ((now = apr_time_now()) - m->last_mood_change >= m->mood_update_interval
+ || m->irritations_since < -m->processing_limit)) {
+ m->processing_limit = H2MIN(m->processing_limit * 2, m->processing_max);
+ m->last_mood_change = now;
+ m->irritations_since = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_MPLX_MSG(m, "mood update, increasing worker limit to %d"),
+ m->processing_limit);
+ }
+ }
+}
+
+static void m_be_annoyed(h2_mplx *m)
+{
+ apr_time_t now;
+
+ if (m->processing_limit > 2) {
+ ++m->irritations_since;
+ if (((now = apr_time_now()) - m->last_mood_change >= m->mood_update_interval)
+ || (m->irritations_since >= m->processing_limit)) {
+
+ if (m->processing_limit > 16) {
+ m->processing_limit = 16;
+ }
+ else if (m->processing_limit > 8) {
+ m->processing_limit = 8;
+ }
+ else if (m->processing_limit > 4) {
+ m->processing_limit = 4;
+ }
+ else if (m->processing_limit > 2) {
+ m->processing_limit = 2;
+ }
+ m->last_mood_change = now;
+ m->irritations_since = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_MPLX_MSG(m, "mood update, decreasing worker limit to %d"),
+ m->processing_limit);
+ }
+ }
+}
+
+/*******************************************************************************
+ * mplx master events dispatching
+ ******************************************************************************/
+
+static int reset_is_acceptable(h2_stream *stream)
+{
+ /* client may terminate a stream via H2 RST_STREAM message at any time.
+ * This is annyoing when we have committed resources (e.g. worker threads)
+ * to it, so our mood (e.g. willingness to commit resources on this
+ * connection in the future) goes down.
+ *
+ * This is a DoS protection. We do not want to make it too easy for
+ * a client to eat up server resources.
+ *
+ * However: there are cases where a RST_STREAM is the only way to end
+ * a request. This includes websockets and server-side-event streams (SSEs).
+ * The responses to such requests continue forever otherwise.
+ *
+ */
+ if (!stream_is_running(stream)) return 1;
+ if (!(stream->id & 0x01)) return 1; /* stream initiated by us. acceptable. */
+ if (!stream->response) return 0; /* no response headers produced yet. bad. */
+ if (!stream->out_data_frames) return 0; /* no response body data sent yet. bad. */
+ return 1; /* otherwise, be forgiving */
+}
+
+apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id)
+{
+ h2_stream *stream;
+ apr_status_t status = APR_SUCCESS;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+ stream = h2_ihash_get(m->streams, stream_id);
+ if (stream && !reset_is_acceptable(stream)) {
+ m_be_annoyed(m);
+ }
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static apr_status_t mplx_pollset_create(h2_mplx *m)
+{
+ /* stream0 output only */
+ return apr_pollset_create(&m->pollset, 1, m->pool,
+ APR_POLLSET_WAKEABLE);
+}
+
+static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx)
+{
+ apr_status_t rv;
+ const apr_pollfd_t *results, *pfd;
+ apr_int32_t nresults, i;
+ h2_conn_ctx_t *conn_ctx;
+ h2_stream *stream;
+
+ /* Make sure we are not called recursively. */
+ ap_assert(!m->polling);
+ m->polling = 1;
+ do {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_MPLX_MSG(m, "enter polling timeout=%d"),
+ (int)apr_time_sec(timeout));
+
+ apr_array_clear(m->streams_ev_in);
+ apr_array_clear(m->streams_ev_out);
+
+ do {
+ /* add streams we started processing in the meantime */
+ apr_thread_mutex_lock(m->poll_lock);
+ if (!h2_iq_empty(m->streams_input_read)
+ || !h2_iq_empty(m->streams_output_written)) {
+ while ((i = h2_iq_shift(m->streams_input_read))) {
+ stream = h2_ihash_get(m->streams, i);
+ if (stream) {
+ APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = stream;
+ }
+ }
+ while ((i = h2_iq_shift(m->streams_output_written))) {
+ stream = h2_ihash_get(m->streams, i);
+ if (stream) {
+ APR_ARRAY_PUSH(m->streams_ev_out, h2_stream*) = stream;
+ }
+ }
+ nresults = 0;
+ rv = APR_SUCCESS;
+ apr_thread_mutex_unlock(m->poll_lock);
+ break;
+ }
+ apr_thread_mutex_unlock(m->poll_lock);
+
+ H2_MPLX_LEAVE(m);
+ rv = apr_pollset_poll(m->pollset, timeout >= 0? timeout : -1, &nresults, &results);
+ H2_MPLX_ENTER_ALWAYS(m);
+ if (APR_STATUS_IS_EINTR(rv) && m->shutdown) {
+ if (!m->aborted) {
+ rv = APR_SUCCESS;
+ }
+ goto cleanup;
+ }
+ } while (APR_STATUS_IS_EINTR(rv));
+
+ if (APR_SUCCESS != rv) {
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_MPLX_MSG(m, "polling timed out "));
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1, APLOGNO(10310) \
+ H2_MPLX_MSG(m, "polling failed"));
+ }
+ goto cleanup;
+ }
+
+ for (i = 0; i < nresults; i++) {
+ pfd = &results[i];
+ conn_ctx = pfd->client_data;
+
+ AP_DEBUG_ASSERT(conn_ctx);
+ if (conn_ctx->stream_id == 0) {
+ if (on_stream_input) {
+ APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = m->stream0;
+ }
+ continue;
+ }
+ }
+
+ if (on_stream_input && m->streams_ev_in->nelts) {
+ H2_MPLX_LEAVE(m);
+ for (i = 0; i < m->streams_ev_in->nelts; ++i) {
+ on_stream_input(on_ctx, APR_ARRAY_IDX(m->streams_ev_in, i, h2_stream*));
+ }
+ H2_MPLX_ENTER_ALWAYS(m);
+ }
+ if (on_stream_output && m->streams_ev_out->nelts) {
+ H2_MPLX_LEAVE(m);
+ for (i = 0; i < m->streams_ev_out->nelts; ++i) {
+ on_stream_output(on_ctx, APR_ARRAY_IDX(m->streams_ev_out, i, h2_stream*));
+ }
+ H2_MPLX_ENTER_ALWAYS(m);
+ }
+ break;
+ } while(1);
+
+cleanup:
+ m->polling = 0;
+ return rv;
+}
+
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
new file mode 100644
index 0000000..1f79aa8
--- /dev/null
+++ b/modules/http2/h2_mplx.h
@@ -0,0 +1,218 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_mplx__
+#define __mod_h2__h2_mplx__
+
+/**
+ * The stream multiplexer. It performs communication between the
+ * primary HTTP/2 connection (c1) to the secondary connections (c2)
+ * that process the requests, aka. HTTP/2 streams.
+ *
+ * There is one h2_mplx instance for each h2_session.
+ *
+ * Naming Convention:
+ * "h2_mplx_c1_" are methods only to be called by the primary connection
+ * "h2_mplx_c2_" are methods only to be called by a secondary connection
+ * "h2_mplx_worker_" are methods only to be called by a h2 worker thread
+ */
+
+struct apr_pool_t;
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+struct h2_bucket_beam;
+struct h2_config;
+struct h2_ihash_t;
+struct h2_stream;
+struct h2_request;
+struct apr_thread_cond_t;
+struct h2_workers;
+struct h2_iqueue;
+
+#include <apr_queue.h>
+
+#include "h2_workers.h"
+
+typedef struct h2_c2_transit h2_c2_transit;
+
+struct h2_c2_transit {
+ apr_pool_t *pool;
+ apr_bucket_alloc_t *bucket_alloc;
+};
+
+typedef struct h2_mplx h2_mplx;
+
+struct h2_mplx {
+ int child_num; /* child this runs in */
+ apr_uint32_t id; /* id unique per child */
+ conn_rec *c1; /* the main connection */
+ apr_pool_t *pool;
+ struct h2_stream *stream0; /* HTTP/2's stream 0 */
+ server_rec *s; /* server for master conn */
+
+ int shutdown; /* we are shutting down */
+ int aborted; /* we need to get out of here asap */
+ int polling; /* is waiting/processing pollset events */
+ ap_conn_producer_t *producer; /* registered producer at h2_workers */
+
+ struct h2_ihash_t *streams; /* all streams active */
+ struct h2_ihash_t *shold; /* all streams done with c2 processing ongoing */
+ apr_array_header_t *spurge; /* all streams done, ready for destroy */
+
+ struct h2_iqueue *q; /* all stream ids that need to be started */
+
+ apr_size_t stream_max_mem; /* max memory to buffer for a stream */
+ apr_uint32_t max_streams; /* max # of concurrent streams */
+ apr_uint32_t max_stream_id_started; /* highest stream id that started processing */
+
+ apr_uint32_t processing_count; /* # of c2 working for this mplx */
+ apr_uint32_t processing_limit; /* current limit on processing c2s, dynamic */
+ apr_uint32_t processing_max; /* max, hard limit of processing c2s */
+
+ apr_time_t last_mood_change; /* last time, processing limit changed */
+ apr_interval_time_t mood_update_interval; /* how frequent we update at most */
+ apr_uint32_t irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
+
+ apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *join_wait;
+
+ apr_pollset_t *pollset; /* pollset for c1/c2 IO events */
+ apr_array_header_t *streams_ev_in;
+ apr_array_header_t *streams_ev_out;
+
+ apr_thread_mutex_t *poll_lock; /* protect modifications of queues below */
+ struct h2_iqueue *streams_input_read; /* streams whose input has been read from */
+ struct h2_iqueue *streams_output_written; /* streams whose output has been written to */
+
+ struct h2_workers *workers; /* h2 workers process wide instance */
+
+ request_rec *scratch_r; /* pseudo request_rec for scoreboard reporting */
+
+ apr_uint32_t max_spare_transits; /* max number of transit pools idling */
+ apr_array_header_t *c2_transits; /* base pools for running c2 connections */
+};
+
+apr_status_t h2_mplx_c1_child_init(apr_pool_t *pool, server_rec *s);
+
+/**
+ * Create the multiplexer for the given HTTP2 session.
+ * Implicitly has reference count 1.
+ */
+h2_mplx *h2_mplx_c1_create(int child_id, apr_uint32_t id,
+ struct h2_stream *stream0,
+ server_rec *s, apr_pool_t *master,
+ struct h2_workers *workers);
+
+/**
+ * Destroy the mplx, shutting down all ongoing processing.
+ * @param m the mplx destroyed
+ * @param wait condition var to wait on for ref counter == 0
+ */
+void h2_mplx_c1_destroy(h2_mplx *m);
+
+/**
+ * Shut down the multiplexer gracefully. Will no longer schedule new streams
+ * but let the ongoing ones finish normally.
+ * @return the highest stream id being/been processed
+ */
+int h2_mplx_c1_shutdown(h2_mplx *m);
+
+/**
+ * Notifies mplx that a stream has been completely handled on the main
+ * connection and is ready for cleanup.
+ *
+ * @param m the mplx itself
+ * @param stream the stream ready for cleanup
+ * @param pstream_count return the number of streams active
+ */
+apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, struct h2_stream *stream,
+ unsigned int *pstream_count);
+
+int h2_mplx_c1_stream_is_running(h2_mplx *m, struct h2_stream *stream);
+
+/**
+ * Process a stream request.
+ *
+ * @param m the multiplexer
+ * @param read_to_process
+ * @param input_pending
+ * @param cmp the stream priority compare function
+ * @param pstream_count on return the number of streams active in mplx
+ */
+void h2_mplx_c1_process(h2_mplx *m,
+ struct h2_iqueue *read_to_process,
+ h2_stream_get_fn *get_stream,
+ h2_stream_pri_cmp_fn *cmp,
+ struct h2_session *session,
+ unsigned int *pstream_count);
+
+/**
+ * Stream priorities have changed, reschedule pending requests.
+ *
+ * @param m the multiplexer
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
+ struct h2_session *session);
+
+typedef void stream_ev_callback(void *ctx, struct h2_stream *stream);
+
+/**
+ * Poll the primary connection for input and the active streams for output.
+ * Invoke the callback for any stream where an event happened.
+ */
+apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx);
+
+void h2_mplx_c2_input_read(h2_mplx *m, conn_rec *c2);
+void h2_mplx_c2_output_written(h2_mplx *m, conn_rec *c2);
+
+typedef int h2_mplx_stream_cb(struct h2_stream *s, void *userdata);
+
+/**
+ * Iterate over all streams known to mplx from the primary connection.
+ * @param m the mplx
+ * @param cb the callback to invoke on each stream
+ * @param ctx userdata passed to the callback
+ */
+apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+
+/**
+ * A stream has been RST_STREAM by the client. Abort
+ * any processing going on and remove from processing
+ * queue.
+ */
+apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id);
+
+/**
+ * Get readonly access to a stream for a secondary connection.
+ */
+const struct h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id);
+
+/**
+ * A h2 worker asks for a secondary connection to process.
+ * @param out_c2 non-NULL, a pointer where to reveive the next
+ * secondary connection to process.
+ */
+apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c2);
+
+#define H2_MPLX_MSG(m, msg) \
+ "h2_mplx(%d-%lu): "msg, m->child_num, (unsigned long)m->id
+
+#endif /* defined(__mod_h2__h2_mplx__) */
diff --git a/modules/http2/h2_private.h b/modules/http2/h2_private.h
new file mode 100644
index 0000000..516be13
--- /dev/null
+++ b/modules/http2/h2_private.h
@@ -0,0 +1,28 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_h2_h2_private_h
+#define mod_h2_h2_private_h
+
+#include <apr_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+extern module AP_MODULE_DECLARE_DATA http2_module;
+
+APLOG_USE_MODULE(http2);
+
+#endif
diff --git a/modules/http2/h2_protocol.c b/modules/http2/h2_protocol.c
new file mode 100644
index 0000000..874753e
--- /dev/null
+++ b/modules/http2/h2_protocol.c
@@ -0,0 +1,485 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_ssl.h>
+#include <http_log.h>
+
+#include "mod_http2.h"
+#include "h2_private.h"
+
+#include "h2_bucket_beam.h"
+#include "h2_stream.h"
+#include "h2_c2.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_c1.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_protocol.h"
+#include "mod_http2.h"
+
+const char *h2_protocol_ids_tls[] = {
+ "h2", NULL
+};
+
+const char *h2_protocol_ids_clear[] = {
+ "h2c", NULL
+};
+
+const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
+
+/*******************************************************************************
+ * HTTP/2 error stuff
+ */
+static const char *h2_err_descr[] = {
+ "no error", /* 0x0 */
+ "protocol error",
+ "internal error",
+ "flow control error",
+ "settings timeout",
+ "stream closed", /* 0x5 */
+ "frame size error",
+ "refused stream",
+ "cancel",
+ "compression error",
+ "connect error", /* 0xa */
+ "enhance your calm",
+ "inadequate security",
+ "http/1.1 required",
+};
+
+const char *h2_protocol_err_description(unsigned int h2_error)
+{
+ if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) {
+ return h2_err_descr[h2_error];
+ }
+ return "unknown http/2 error code";
+}
+
+/*******************************************************************************
+ * Check connection security requirements of RFC 7540
+ */
+
+/*
+ * Black Listed Ciphers from RFC 7549 Appendix A
+ *
+ */
+static const char *RFC7540_names[] = {
+ /* ciphers with NULL encrpytion */
+ "NULL-MD5", /* TLS_NULL_WITH_NULL_NULL */
+ /* same */ /* TLS_RSA_WITH_NULL_MD5 */
+ "NULL-SHA", /* TLS_RSA_WITH_NULL_SHA */
+ "NULL-SHA256", /* TLS_RSA_WITH_NULL_SHA256 */
+ "PSK-NULL-SHA", /* TLS_PSK_WITH_NULL_SHA */
+ "DHE-PSK-NULL-SHA", /* TLS_DHE_PSK_WITH_NULL_SHA */
+ "RSA-PSK-NULL-SHA", /* TLS_RSA_PSK_WITH_NULL_SHA */
+ "PSK-NULL-SHA256", /* TLS_PSK_WITH_NULL_SHA256 */
+ "PSK-NULL-SHA384", /* TLS_PSK_WITH_NULL_SHA384 */
+ "DHE-PSK-NULL-SHA256", /* TLS_DHE_PSK_WITH_NULL_SHA256 */
+ "DHE-PSK-NULL-SHA384", /* TLS_DHE_PSK_WITH_NULL_SHA384 */
+ "RSA-PSK-NULL-SHA256", /* TLS_RSA_PSK_WITH_NULL_SHA256 */
+ "RSA-PSK-NULL-SHA384", /* TLS_RSA_PSK_WITH_NULL_SHA384 */
+ "ECDH-ECDSA-NULL-SHA", /* TLS_ECDH_ECDSA_WITH_NULL_SHA */
+ "ECDHE-ECDSA-NULL-SHA", /* TLS_ECDHE_ECDSA_WITH_NULL_SHA */
+ "ECDH-RSA-NULL-SHA", /* TLS_ECDH_RSA_WITH_NULL_SHA */
+ "ECDHE-RSA-NULL-SHA", /* TLS_ECDHE_RSA_WITH_NULL_SHA */
+ "AECDH-NULL-SHA", /* TLS_ECDH_anon_WITH_NULL_SHA */
+ "ECDHE-PSK-NULL-SHA", /* TLS_ECDHE_PSK_WITH_NULL_SHA */
+ "ECDHE-PSK-NULL-SHA256", /* TLS_ECDHE_PSK_WITH_NULL_SHA256 */
+ "ECDHE-PSK-NULL-SHA384", /* TLS_ECDHE_PSK_WITH_NULL_SHA384 */
+
+ /* DES/3DES ciphers */
+ "PSK-3DES-EDE-CBC-SHA", /* TLS_PSK_WITH_3DES_EDE_CBC_SHA */
+ "DHE-PSK-3DES-EDE-CBC-SHA", /* TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA */
+ "RSA-PSK-3DES-EDE-CBC-SHA", /* TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA */
+ "ECDH-ECDSA-DES-CBC3-SHA", /* TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-ECDSA-DES-CBC3-SHA", /* TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDH-RSA-DES-CBC3-SHA", /* TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-RSA-DES-CBC3-SHA", /* TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA */
+ "AECDH-DES-CBC3-SHA", /* TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA */
+ "SRP-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA */
+ "SRP-RSA-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA */
+ "SRP-DSS-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-PSK-3DES-EDE-CBC-SHA", /* TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA */
+ "DES-CBC-SHA", /* TLS_RSA_WITH_DES_CBC_SHA */
+ "DES-CBC3-SHA", /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */
+ "DHE-DSS-DES-CBC3-SHA", /* TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA */
+ "DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_WITH_DES_CBC_SHA */
+ "DHE-RSA-DES-CBC3-SHA", /* TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA */
+ "ADH-DES-CBC-SHA", /* TLS_DH_anon_WITH_DES_CBC_SHA */
+ "ADH-DES-CBC3-SHA", /* TLS_DH_anon_WITH_3DES_EDE_CBC_SHA */
+ "EXP-DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA */
+ "DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_WITH_DES_CBC_SHA */
+ "DH-DSS-DES-CBC3-SHA", /* TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA */
+ "EXP-DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_WITH_DES_CBC_SHA */
+ "DH-RSA-DES-CBC3-SHA", /* TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA */
+
+ /* blacklisted EXPORT ciphers */
+ "EXP-RC4-MD5", /* TLS_RSA_EXPORT_WITH_RC4_40_MD5 */
+ "EXP-RC2-CBC-MD5", /* TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 */
+ "EXP-DES-CBC-SHA", /* TLS_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-DHE-DSS-DES-CBC-SHA", /* TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-ADH-DES-CBC-SHA", /* TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-ADH-RC4-MD5", /* TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 */
+
+ /* blacklisted RC4 encryption */
+ "RC4-MD5", /* TLS_RSA_WITH_RC4_128_MD5 */
+ "RC4-SHA", /* TLS_RSA_WITH_RC4_128_SHA */
+ "ADH-RC4-MD5", /* TLS_DH_anon_WITH_RC4_128_MD5 */
+ "KRB5-RC4-SHA", /* TLS_KRB5_WITH_RC4_128_SHA */
+ "KRB5-RC4-MD5", /* TLS_KRB5_WITH_RC4_128_MD5 */
+ "EXP-KRB5-RC4-SHA", /* TLS_KRB5_EXPORT_WITH_RC4_40_SHA */
+ "EXP-KRB5-RC4-MD5", /* TLS_KRB5_EXPORT_WITH_RC4_40_MD5 */
+ "PSK-RC4-SHA", /* TLS_PSK_WITH_RC4_128_SHA */
+ "DHE-PSK-RC4-SHA", /* TLS_DHE_PSK_WITH_RC4_128_SHA */
+ "RSA-PSK-RC4-SHA", /* TLS_RSA_PSK_WITH_RC4_128_SHA */
+ "ECDH-ECDSA-RC4-SHA", /* TLS_ECDH_ECDSA_WITH_RC4_128_SHA */
+ "ECDHE-ECDSA-RC4-SHA", /* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA */
+ "ECDH-RSA-RC4-SHA", /* TLS_ECDH_RSA_WITH_RC4_128_SHA */
+ "ECDHE-RSA-RC4-SHA", /* TLS_ECDHE_RSA_WITH_RC4_128_SHA */
+ "AECDH-RC4-SHA", /* TLS_ECDH_anon_WITH_RC4_128_SHA */
+ "ECDHE-PSK-RC4-SHA", /* TLS_ECDHE_PSK_WITH_RC4_128_SHA */
+
+ /* blacklisted AES128 encrpytion ciphers */
+ "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA */
+ "DH-DSS-AES128-SHA", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA */
+ "DH-RSA-AES128-SHA", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA */
+ "DHE-DSS-AES128-SHA", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA */
+ "DHE-RSA-AES128-SHA", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA */
+ "ADH-AES128-SHA", /* TLS_DH_anon_WITH_AES_128_CBC_SHA */
+ "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA256 */
+ "DH-DSS-AES128-SHA256", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA256 */
+ "DH-RSA-AES128-SHA256", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA256 */
+ "DHE-DSS-AES128-SHA256", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 */
+ "DHE-RSA-AES128-SHA256", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-ECDSA-AES128-SHA", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA */
+ "ECDHE-ECDSA-AES128-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */
+ "ECDH-RSA-AES128-SHA", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA */
+ "ECDHE-RSA-AES128-SHA", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */
+ "AECDH-AES128-SHA", /* TLS_ECDH_anon_WITH_AES_128_CBC_SHA */
+ "ECDHE-ECDSA-AES128-SHA256", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-ECDSA-AES128-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 */
+ "ECDHE-RSA-AES128-SHA256", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-RSA-AES128-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 */
+ "ADH-AES128-SHA256", /* TLS_DH_anon_WITH_AES_128_CBC_SHA256 */
+ "PSK-AES128-CBC-SHA", /* TLS_PSK_WITH_AES_128_CBC_SHA */
+ "DHE-PSK-AES128-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA */
+ "RSA-PSK-AES128-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA */
+ "PSK-AES128-CBC-SHA256", /* TLS_PSK_WITH_AES_128_CBC_SHA256 */
+ "DHE-PSK-AES128-CBC-SHA256", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 */
+ "RSA-PSK-AES128-CBC-SHA256", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 */
+ "ECDHE-PSK-AES128-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA */
+ "ECDHE-PSK-AES128-CBC-SHA256", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 */
+ "AES128-CCM", /* TLS_RSA_WITH_AES_128_CCM */
+ "AES128-CCM8", /* TLS_RSA_WITH_AES_128_CCM_8 */
+ "PSK-AES128-CCM", /* TLS_PSK_WITH_AES_128_CCM */
+ "PSK-AES128-CCM8", /* TLS_PSK_WITH_AES_128_CCM_8 */
+ "AES128-GCM-SHA256", /* TLS_RSA_WITH_AES_128_GCM_SHA256 */
+ "DH-RSA-AES128-GCM-SHA256", /* TLS_DH_RSA_WITH_AES_128_GCM_SHA256 */
+ "DH-DSS-AES128-GCM-SHA256", /* TLS_DH_DSS_WITH_AES_128_GCM_SHA256 */
+ "ADH-AES128-GCM-SHA256", /* TLS_DH_anon_WITH_AES_128_GCM_SHA256 */
+ "PSK-AES128-GCM-SHA256", /* TLS_PSK_WITH_AES_128_GCM_SHA256 */
+ "RSA-PSK-AES128-GCM-SHA256", /* TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 */
+ "ECDH-ECDSA-AES128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 */
+ "ECDH-RSA-AES128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 */
+ "SRP-AES-128-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_128_CBC_SHA */
+ "SRP-RSA-AES-128-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA */
+ "SRP-DSS-AES-128-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA */
+
+ /* blacklisted AES256 encrpytion ciphers */
+ "AES256-SHA", /* TLS_RSA_WITH_AES_256_CBC_SHA */
+ "DH-DSS-AES256-SHA", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA */
+ "DH-RSA-AES256-SHA", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA */
+ "DHE-DSS-AES256-SHA", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA */
+ "DHE-RSA-AES256-SHA", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA */
+ "ADH-AES256-SHA", /* TLS_DH_anon_WITH_AES_256_CBC_SHA */
+ "AES256-SHA256", /* TLS_RSA_WITH_AES_256_CBC_SHA256 */
+ "DH-DSS-AES256-SHA256", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA256 */
+ "DH-RSA-AES256-SHA256", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA256 */
+ "DHE-DSS-AES256-SHA256", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 */
+ "DHE-RSA-AES256-SHA256", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 */
+ "ADH-AES256-SHA256", /* TLS_DH_anon_WITH_AES_256_CBC_SHA256 */
+ "ECDH-ECDSA-AES256-SHA", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA */
+ "ECDHE-ECDSA-AES256-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */
+ "ECDH-RSA-AES256-SHA", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA */
+ "ECDHE-RSA-AES256-SHA", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */
+ "AECDH-AES256-SHA", /* TLS_ECDH_anon_WITH_AES_256_CBC_SHA */
+ "ECDHE-ECDSA-AES256-SHA384", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 */
+ "ECDH-ECDSA-AES256-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 */
+ "ECDHE-RSA-AES256-SHA384", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 */
+ "ECDH-RSA-AES256-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 */
+ "PSK-AES256-CBC-SHA", /* TLS_PSK_WITH_AES_256_CBC_SHA */
+ "DHE-PSK-AES256-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA */
+ "RSA-PSK-AES256-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA */
+ "PSK-AES256-CBC-SHA384", /* TLS_PSK_WITH_AES_256_CBC_SHA384 */
+ "DHE-PSK-AES256-CBC-SHA384", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 */
+ "RSA-PSK-AES256-CBC-SHA384", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 */
+ "ECDHE-PSK-AES256-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA */
+ "ECDHE-PSK-AES256-CBC-SHA384", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 */
+ "SRP-AES-256-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_256_CBC_SHA */
+ "SRP-RSA-AES-256-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA */
+ "SRP-DSS-AES-256-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA */
+ "AES256-CCM", /* TLS_RSA_WITH_AES_256_CCM */
+ "AES256-CCM8", /* TLS_RSA_WITH_AES_256_CCM_8 */
+ "PSK-AES256-CCM", /* TLS_PSK_WITH_AES_256_CCM */
+ "PSK-AES256-CCM8", /* TLS_PSK_WITH_AES_256_CCM_8 */
+ "AES256-GCM-SHA384", /* TLS_RSA_WITH_AES_256_GCM_SHA384 */
+ "DH-RSA-AES256-GCM-SHA384", /* TLS_DH_RSA_WITH_AES_256_GCM_SHA384 */
+ "DH-DSS-AES256-GCM-SHA384", /* TLS_DH_DSS_WITH_AES_256_GCM_SHA384 */
+ "ADH-AES256-GCM-SHA384", /* TLS_DH_anon_WITH_AES_256_GCM_SHA384 */
+ "PSK-AES256-GCM-SHA384", /* TLS_PSK_WITH_AES_256_GCM_SHA384 */
+ "RSA-PSK-AES256-GCM-SHA384", /* TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 */
+ "ECDH-ECDSA-AES256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 */
+ "ECDH-RSA-AES256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 */
+
+ /* blacklisted CAMELLIA128 encrpytion ciphers */
+ "CAMELLIA128-SHA", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "DH-DSS-CAMELLIA128-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA */
+ "DH-RSA-CAMELLIA128-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "DHE-DSS-CAMELLIA128-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA */
+ "DHE-RSA-CAMELLIA128-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "ADH-CAMELLIA128-SHA", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA */
+ "ECDHE-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDH-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDHE-RSA-CAMELLIA128-SHA256", /* TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDH-RSA-CAMELLIA128-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "PSK-CAMELLIA128-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-PSK-CAMELLIA128-SHA256", /* TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "RSA-PSK-CAMELLIA128-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDHE-PSK-CAMELLIA128-SHA256", /* TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "CAMELLIA128-GCM-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "DH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "DH-DSS-CAMELLIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ADH-CAMELLIA128-GCM-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ECDH-ECDSA-CAMELLIA128-GCM-SHA256",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ECDH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "PSK-CAMELLIA128-GCM-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 */
+ "RSA-PSK-CAMELLIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 */
+ "CAMELLIA128-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DH-DSS-CAMELLIA128-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DH-RSA-CAMELLIA128-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-DSS-CAMELLIA128-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-RSA-CAMELLIA128-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ADH-CAMELLIA128-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 */
+
+ /* blacklisted CAMELLIA256 encrpytion ciphers */
+ "CAMELLIA256-SHA", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "DH-RSA-CAMELLIA256-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "DH-DSS-CAMELLIA256-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA */
+ "DHE-DSS-CAMELLIA256-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA */
+ "DHE-RSA-CAMELLIA256-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "ADH-CAMELLIA256-SHA", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA */
+ "ECDHE-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDH-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDHE-RSA-CAMELLIA256-SHA384", /* TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDH-RSA-CAMELLIA256-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "PSK-CAMELLIA256-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "DHE-PSK-CAMELLIA256-SHA384", /* TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "RSA-PSK-CAMELLIA256-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDHE-PSK-CAMELLIA256-SHA384", /* TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "CAMELLIA256-SHA256", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DH-DSS-CAMELLIA256-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DH-RSA-CAMELLIA256-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DHE-DSS-CAMELLIA256-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DHE-RSA-CAMELLIA256-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "ADH-CAMELLIA256-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 */
+ "CAMELLIA256-GCM-SHA384", /* TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "DH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "DH-DSS-CAMELLIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ADH-CAMELLIA256-GCM-SHA384", /* TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ECDH-ECDSA-CAMELLIA256-GCM-SHA384",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ECDH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "PSK-CAMELLIA256-GCM-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 */
+ "RSA-PSK-CAMELLIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 */
+
+ /* The blacklisted ARIA encrpytion ciphers */
+ "ARIA128-SHA256", /* TLS_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ARIA256-SHA384", /* TLS_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "DH-DSS-ARIA128-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 */
+ "DH-DSS-ARIA256-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 */
+ "DH-RSA-ARIA128-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "DH-RSA-ARIA256-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-DSS-ARIA128-SHA256", /* TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-DSS-ARIA256-SHA384", /* TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-RSA-ARIA128-SHA256", /* TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-RSA-ARIA256-SHA384", /* TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ADH-ARIA128-SHA256", /* TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 */
+ "ADH-ARIA256-SHA384", /* TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 */
+ "ECDHE-ECDSA-ARIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-ECDSA-ARIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDH-ECDSA-ARIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDH-ECDSA-ARIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDHE-RSA-ARIA128-SHA256", /* TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-RSA-ARIA256-SHA384", /* TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDH-RSA-ARIA128-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDH-RSA-ARIA256-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ARIA128-GCM-SHA256", /* TLS_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "ARIA256-GCM-SHA384", /* TLS_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "DH-DSS-ARIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 */
+ "DH-DSS-ARIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 */
+ "DH-RSA-ARIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "DH-RSA-ARIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "ADH-ARIA128-GCM-SHA256", /* TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 */
+ "ADH-ARIA256-GCM-SHA384", /* TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 */
+ "ECDH-ECDSA-ARIA128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 */
+ "ECDH-ECDSA-ARIA256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 */
+ "ECDH-RSA-ARIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "ECDH-RSA-ARIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "PSK-ARIA128-SHA256", /* TLS_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "PSK-ARIA256-SHA384", /* TLS_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-PSK-ARIA128-SHA256", /* TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-PSK-ARIA256-SHA384", /* TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "RSA-PSK-ARIA128-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "RSA-PSK-ARIA256-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "ARIA128-GCM-SHA256", /* TLS_PSK_WITH_ARIA_128_GCM_SHA256 */
+ "ARIA256-GCM-SHA384", /* TLS_PSK_WITH_ARIA_256_GCM_SHA384 */
+ "RSA-PSK-ARIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 */
+ "RSA-PSK-ARIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 */
+ "ECDHE-PSK-ARIA128-SHA256", /* TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-PSK-ARIA256-SHA384", /* TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 */
+
+ /* blacklisted SEED encryptions */
+ "SEED-SHA", /*TLS_RSA_WITH_SEED_CBC_SHA */
+ "DH-DSS-SEED-SHA", /* TLS_DH_DSS_WITH_SEED_CBC_SHA */
+ "DH-RSA-SEED-SHA", /* TLS_DH_RSA_WITH_SEED_CBC_SHA */
+ "DHE-DSS-SEED-SHA", /* TLS_DHE_DSS_WITH_SEED_CBC_SHA */
+ "DHE-RSA-SEED-SHA", /* TLS_DHE_RSA_WITH_SEED_CBC_SHA */
+ "ADH-SEED-SHA", /* TLS_DH_anon_WITH_SEED_CBC_SHA */
+
+ /* blacklisted KRB5 ciphers */
+ "KRB5-DES-CBC-SHA", /* TLS_KRB5_WITH_DES_CBC_SHA */
+ "KRB5-DES-CBC3-SHA", /* TLS_KRB5_WITH_3DES_EDE_CBC_SHA */
+ "KRB5-IDEA-CBC-SHA", /* TLS_KRB5_WITH_IDEA_CBC_SHA */
+ "KRB5-DES-CBC-MD5", /* TLS_KRB5_WITH_DES_CBC_MD5 */
+ "KRB5-DES-CBC3-MD5", /* TLS_KRB5_WITH_3DES_EDE_CBC_MD5 */
+ "KRB5-IDEA-CBC-MD5", /* TLS_KRB5_WITH_IDEA_CBC_MD5 */
+ "EXP-KRB5-DES-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA */
+ "EXP-KRB5-DES-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 */
+ "EXP-KRB5-RC2-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA */
+ "EXP-KRB5-RC2-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 */
+
+ /* blacklisted exoticas */
+ "DHE-DSS-CBC-SHA", /* TLS_DHE_DSS_WITH_DES_CBC_SHA */
+ "IDEA-CBC-SHA", /* TLS_RSA_WITH_IDEA_CBC_SHA */
+
+ /* not really sure if the following names are correct */
+ "SSL3_CK_SCSV", /* TLS_EMPTY_RENEGOTIATION_INFO_SCSV */
+ "SSL3_CK_FALLBACK_SCSV"
+};
+static size_t RFC7540_names_LEN = sizeof(RFC7540_names)/sizeof(RFC7540_names[0]);
+
+
+static apr_hash_t *BLCNames;
+
+static void cipher_init(apr_pool_t *pool)
+{
+ apr_hash_t *hash = apr_hash_make(pool);
+ const char *source;
+ unsigned int i;
+
+ source = "rfc7540";
+ for (i = 0; i < RFC7540_names_LEN; ++i) {
+ apr_hash_set(hash, RFC7540_names[i], APR_HASH_KEY_STRING, source);
+ }
+
+ BLCNames = hash;
+}
+
+static int cipher_is_blacklisted(const char *cipher, const char **psource)
+{
+ *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING);
+ return !!*psource;
+}
+
+apr_status_t h2_protocol_init(apr_pool_t *pool, server_rec *s)
+{
+ (void)pool;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init");
+ cipher_init(pool);
+
+ return APR_SUCCESS;
+}
+
+int h2_protocol_is_acceptable_c1(conn_rec *c, request_rec *r, int require_all)
+{
+ int is_tls = ap_ssl_conn_is_ssl(c);
+
+ if (is_tls && h2_config_cgeti(c, H2_CONF_MODERN_TLS_ONLY) > 0) {
+ /* Check TLS connection for modern TLS parameters, as defined in
+ * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ */
+ apr_pool_t *pool = c->pool;
+ server_rec *s = c->base_server;
+ const char *val;
+
+ /* Need Tlsv1.2 or higher, rfc 7540, ch. 9.2
+ */
+ val = ap_ssl_var_lookup(pool, s, c, NULL, "SSL_PROTOCOL");
+ if (val && *val) {
+ if (strncmp("TLS", val, 3)
+ || !strcmp("TLSv1", val)
+ || !strcmp("TLSv1.1", val)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050)
+ "h2_h2(%ld): tls protocol not suitable: %s",
+ (long)c->id, val);
+ return 0;
+ }
+ }
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051)
+ "h2_h2(%ld): tls protocol is indetermined", (long)c->id);
+ return 0;
+ }
+
+ if (val && !strcmp("TLSv1.2", val)) {
+ /* Check TLS cipher blacklist, defined pre-TLSv1.3, so only
+ * checking for 1.2 */
+ val = ap_ssl_var_lookup(pool, s, c, NULL, "SSL_CIPHER");
+ if (val && *val) {
+ const char *source;
+ if (cipher_is_blacklisted(val, &source)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052)
+ "h2_h2(%ld): tls cipher %s blacklisted by %s",
+ (long)c->id, val, source);
+ return 0;
+ }
+ }
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053)
+ "h2_h2(%ld): tls cipher is indetermined", (long)c->id);
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
diff --git a/modules/http2/h2_protocol.h b/modules/http2/h2_protocol.h
new file mode 100644
index 0000000..ed48e89
--- /dev/null
+++ b/modules/http2/h2_protocol.h
@@ -0,0 +1,56 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_protocol__
+#define __mod_h2__h2_protocol__
+
+/**
+ * List of protocol identifiers that we support in cleartext
+ * negotiations. NULL terminated.
+ */
+extern const char *h2_protocol_ids_clear[];
+
+/**
+ * List of protocol identifiers that we support in TLS encrypted
+ * negotiations (ALPN). NULL terminated.
+ */
+extern const char *h2_protocol_ids_tls[];
+
+/**
+ * Provide a user readable description of the HTTP/2 error code-
+ * @param h2_error http/2 error code, as in rfc 7540, ch. 7
+ * @return textual description of code or that it is unknown.
+ */
+const char *h2_protocol_err_description(unsigned int h2_error);
+
+/*
+ * One time, post config initialization.
+ */
+apr_status_t h2_protocol_init(apr_pool_t *pool, server_rec *s);
+
+/**
+ * Check if the given primary connection fulfills the protocol
+ * requirements for HTTP/2.
+ * @param c the connection
+ * @param require_all != 0 iff any missing connection properties make
+ * the test fail. For example, a cipher might not have been selected while
+ * the handshake is still ongoing.
+ * @return != 0 iff protocol requirements are met
+ */
+int h2_protocol_is_acceptable_c1(conn_rec *c, request_rec *r, int require_all);
+
+
+#endif /* defined(__mod_h2__h2_protocol__) */
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
new file mode 100644
index 0000000..c3f2ff3
--- /dev/null
+++ b/modules/http2/h2_proxy_session.c
@@ -0,0 +1,1719 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <apr_strings.h>
+#include <nghttp2/nghttp2.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <http_protocol.h>
+#include <mod_proxy.h>
+
+#include "mod_http2.h"
+#include "h2.h"
+#include "h2_proxy_util.h"
+#include "h2_proxy_session.h"
+
+APLOG_USE_MODULE(proxy_http2);
+
+typedef struct h2_proxy_stream {
+ int id;
+ apr_pool_t *pool;
+ h2_proxy_session *session;
+
+ const char *url;
+ request_rec *r;
+ h2_proxy_request *req;
+ const char *real_server_uri;
+ const char *p_server_uri;
+ int standalone;
+
+ h2_proxy_stream_state_t state;
+ unsigned int suspended : 1;
+ unsigned int waiting_on_100 : 1;
+ unsigned int waiting_on_ping : 1;
+ unsigned int headers_ended : 1;
+ uint32_t error_code;
+
+ apr_bucket_brigade *input;
+ apr_off_t data_sent;
+ apr_bucket_brigade *output;
+ apr_off_t data_received;
+
+ apr_table_t *saves;
+} h2_proxy_stream;
+
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg);
+static void ping_arrived(h2_proxy_session *session);
+static apr_status_t check_suspended(h2_proxy_session *session);
+static void stream_resume(h2_proxy_stream *stream);
+static apr_status_t submit_trailers(h2_proxy_stream *stream);
+
+/*
+ * The H2_PING connection sub-state: a state independant of the H2_SESSION state
+ * of the connection:
+ * - H2_PING_ST_NONE: no interference with request handling, ProxyTimeout in effect.
+ * When entered, all suspended streams are unsuspended again.
+ * - H2_PING_ST_AWAIT_ANY: new requests are suspended, a possibly configured "ping"
+ * timeout is in effect. Any frame received transits to H2_PING_ST_NONE.
+ * - H2_PING_ST_AWAIT_PING: same as above, but only a PING frame transits
+ * to H2_PING_ST_NONE.
+ *
+ * An AWAIT state is entered on a new connection or when re-using a connection and
+ * the last frame received has been some time ago. The latter sends a PING frame
+ * and insists on an answer, the former is satisfied by any frame received from the
+ * backend.
+ *
+ * This works for new connections as there is always at least one SETTINGS frame
+ * that the backend sends. When re-using connection, we send a PING and insist on
+ * receiving one back, as there might be frames in our connection buffers from
+ * some time ago. Since some servers have protections against PING flooding, we
+ * only ever have one PING unanswered.
+ *
+ * Requests are suspended while in a PING state, as we do not want to send data
+ * before we can be reasonably sure that the connection is working (at least on
+ * the h2 protocol level). This also means that the session can do blocking reads
+ * when expecting PING answers.
+ */
+static void set_ping_timeout(h2_proxy_session *session)
+{
+ if (session->ping_timeout != -1 && session->save_timeout == -1) {
+ apr_socket_t *socket = NULL;
+
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &session->save_timeout);
+ apr_socket_timeout_set(socket, session->ping_timeout);
+ }
+ }
+}
+
+static void unset_ping_timeout(h2_proxy_session *session)
+{
+ if (session->save_timeout != -1) {
+ apr_socket_t *socket = NULL;
+
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_set(socket, session->save_timeout);
+ session->save_timeout = -1;
+ }
+ }
+}
+
+static void enter_ping_state(h2_proxy_session *session, h2_ping_state_t state)
+{
+ if (session->ping_state == state) return;
+ switch (session->ping_state) {
+ case H2_PING_ST_NONE:
+ /* leaving NONE, enforce timeout, send frame maybe */
+ if (H2_PING_ST_AWAIT_PING == state) {
+ unset_ping_timeout(session);
+ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
+ }
+ set_ping_timeout(session);
+ session->ping_state = state;
+ break;
+ default:
+ /* no switching between the != NONE states */
+ if (H2_PING_ST_NONE == state) {
+ session->ping_state = state;
+ unset_ping_timeout(session);
+ ping_arrived(session);
+ }
+ break;
+ }
+}
+
+static void ping_new_session(h2_proxy_session *session, proxy_conn_rec *p_conn)
+{
+ session->save_timeout = -1;
+ session->ping_timeout = (p_conn->worker->s->ping_timeout_set?
+ p_conn->worker->s->ping_timeout : -1);
+ session->ping_state = H2_PING_ST_NONE;
+ enter_ping_state(session, H2_PING_ST_AWAIT_ANY);
+}
+
+static void ping_reuse_session(h2_proxy_session *session)
+{
+ if (H2_PING_ST_NONE == session->ping_state) {
+ apr_interval_time_t age = apr_time_now() - session->last_frame_received;
+ if (age > apr_time_from_sec(1)) {
+ enter_ping_state(session, H2_PING_ST_AWAIT_PING);
+ }
+ }
+}
+
+static void ping_ev_frame_received(h2_proxy_session *session, const nghttp2_frame *frame)
+{
+ session->last_frame_received = apr_time_now();
+ switch (session->ping_state) {
+ case H2_PING_ST_NONE:
+ /* nop */
+ break;
+ case H2_PING_ST_AWAIT_ANY:
+ enter_ping_state(session, H2_PING_ST_NONE);
+ break;
+ case H2_PING_ST_AWAIT_PING:
+ if (NGHTTP2_PING == frame->hd.type) {
+ enter_ping_state(session, H2_PING_ST_NONE);
+ }
+ /* we may receive many other frames while we are waiting for the
+ * PING answer. They may come all from our connection buffers and
+ * say nothing about the current state of the backend. */
+ break;
+ }
+}
+
+static apr_status_t proxy_session_pre_close(void *theconn)
+{
+ proxy_conn_rec *p_conn = (proxy_conn_rec *)theconn;
+ h2_proxy_session *session = p_conn->data;
+
+ if (session && session->ngh2) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "proxy_session(%s): pool cleanup, state=%d, streams=%d",
+ session->id, session->state,
+ (int)h2_proxy_ihash_count(session->streams));
+ session->aborted = 1;
+ dispatch_event(session, H2_PROXYS_EV_PRE_CLOSE, 0, NULL);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ p_conn->data = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static int proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush)
+{
+ apr_status_t status;
+ apr_off_t transferred;
+
+ if (flush) {
+ apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ apr_brigade_length(bb, 0, &transferred);
+ if (transferred != -1)
+ p_conn->worker->s->transferred += transferred;
+ status = ap_pass_brigade(origin->output_filters, bb);
+ /* Cleanup the brigade now to avoid buckets lifetime
+ * issues in case of error returned below. */
+ apr_brigade_cleanup(bb);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, origin, APLOGNO(03357)
+ "pass output failed to %pI (%s)",
+ p_conn->addr, p_conn->hostname);
+ }
+ return status;
+}
+
+static ssize_t raw_send(nghttp2_session *ngh2, const uint8_t *data,
+ size_t length, int flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ apr_bucket *b;
+ apr_status_t status;
+ int flush = 1;
+
+ if (data) {
+ b = apr_bucket_transient_create((const char*)data, length,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->output, b);
+ }
+
+ status = proxy_pass_brigade(session->c->bucket_alloc,
+ session->p_conn, session->c,
+ session->output, flush);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_sesssion(%s): raw_send %d bytes, flush=%d",
+ session->id, (int)length, flush);
+ if (status != APR_SUCCESS) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ return length;
+}
+
+static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ request_rec *r;
+ int n;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03341)
+ "h2_proxy_session(%s): recv FRAME[%s]",
+ session->id, buffer);
+ }
+
+ ping_ev_frame_received(session, frame);
+ /* Action for frame types: */
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (!stream) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ r = stream->r;
+ if (r->status >= 100 && r->status < 200) {
+ /* By default, we will forward all interim responses when
+ * we are sitting on a HTTP/2 connection to the client */
+ int forward = session->h2_front;
+ switch(r->status) {
+ case 100:
+ if (stream->waiting_on_100) {
+ stream->waiting_on_100 = 0;
+ r->status_line = ap_get_status_line(r->status);
+ forward = 1;
+ }
+ break;
+ case 103:
+ /* workaround until we get this into http protocol base
+ * parts. without this, unknown codes are converted to
+ * 500... */
+ r->status_line = "103 Early Hints";
+ break;
+ default:
+ r->status_line = ap_get_status_line(r->status);
+ break;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03487)
+ "h2_proxy_session(%s): got interim HEADERS, "
+ "status=%d, will forward=%d",
+ session->id, r->status, forward);
+ if (forward) {
+ ap_send_interim_response(r, 1);
+ }
+ }
+ stream_resume(stream);
+ break;
+ case NGHTTP2_PING:
+ break;
+ case NGHTTP2_PUSH_PROMISE:
+ break;
+ case NGHTTP2_SETTINGS:
+ if (frame->settings.niv > 0) {
+ n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
+ if (n > 0) {
+ session->remote_max_concurrent = n;
+ }
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ /* we expect the remote server to tell us the highest stream id
+ * that it has started processing. */
+ session->last_stream_id = frame->goaway.last_stream_id;
+ dispatch_event(session, H2_PROXYS_EV_REMOTE_GOAWAY, 0, NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int before_frame_send(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03343)
+ "h2_proxy_session(%s): sent FRAME[%s]",
+ session->id, buffer);
+ }
+ return 0;
+}
+
+static int add_header(void *table, const char *n, const char *v)
+{
+ apr_table_add(table, n, v);
+ return 1;
+}
+
+static void process_proxy_header(apr_table_t *headers, h2_proxy_stream *stream,
+ const char *n, const char *v)
+{
+ static const struct {
+ const char *name;
+ ap_proxy_header_reverse_map_fn func;
+ } transform_hdrs[] = {
+ { "Location", ap_proxy_location_reverse_map },
+ { "Content-Location", ap_proxy_location_reverse_map },
+ { "URI", ap_proxy_location_reverse_map },
+ { "Destination", ap_proxy_location_reverse_map },
+ { "Set-Cookie", ap_proxy_cookie_reverse_map },
+ { NULL, NULL }
+ };
+ request_rec *r = stream->r;
+ proxy_dir_conf *dconf;
+ int i;
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ if (!dconf->preserve_host) {
+ for (i = 0; transform_hdrs[i].name; ++i) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+ apr_table_add(headers, n, (*transform_hdrs[i].func)(r, dconf, v));
+ return;
+ }
+ }
+ if (!ap_cstr_casecmp("Link", n)) {
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ apr_table_add(headers, n, h2_proxy_link_reverse_map(r, dconf,
+ stream->real_server_uri, stream->p_server_uri, v));
+ return;
+ }
+ }
+ apr_table_add(headers, n, v);
+}
+
+static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
+ const char *n, apr_size_t nlen,
+ const char *v, apr_size_t vlen)
+{
+ if (n[0] == ':') {
+ if (!stream->data_received && !strncmp(":status", n, nlen)) {
+ char *s = apr_pstrndup(stream->r->pool, v, vlen);
+
+ apr_table_setn(stream->r->notes, "proxy-status", s);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got status %s",
+ stream->session->id, stream->id, s);
+ stream->r->status = (int)apr_atoi64(s);
+ if (stream->r->status <= 0) {
+ stream->r->status = 500;
+ return APR_EGENERAL;
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): on_header %s: %s",
+ stream->session->id, stream->id, n, v);
+ if (!h2_proxy_res_ignore_header(n, nlen)) {
+ char *hname, *hvalue;
+ apr_table_t *headers = (stream->headers_ended?
+ stream->r->trailers_out : stream->r->headers_out);
+
+ hname = apr_pstrndup(stream->pool, n, nlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
+ hvalue = apr_pstrndup(stream->pool, v, vlen);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got header %s: %s",
+ stream->session->id, stream->id, hname, hvalue);
+ process_proxy_header(headers, stream, hname, hvalue);
+ }
+ return APR_SUCCESS;
+}
+
+static int log_header(void *ctx, const char *key, const char *value)
+{
+ h2_proxy_stream *stream = ctx;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out %s: %s",
+ stream->session->id, stream->id, key, value);
+ return 1;
+}
+
+static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ request_rec *r = stream->r;
+ apr_pool_t *p = r->pool;
+ const char *buf;
+
+ /* Now, add in the cookies from the response to the ones already saved */
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
+
+ /* and now load 'em all in */
+ if (!apr_is_empty_table(stream->saves)) {
+ apr_table_unset(r->headers_out, "Set-Cookie");
+ r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves);
+ }
+
+ if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
+ ap_set_content_type(r, apr_pstrdup(p, buf));
+ }
+
+ /* handle Via header in response */
+ if (session->conf->viaopt != via_off
+ && session->conf->viaopt != via_block) {
+ const char *server_name = ap_get_server_name(stream->r);
+ apr_port_t port = ap_get_server_port(stream->r);
+ char portstr[32];
+
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which doesn't make sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == stream->r->hostname) {
+ server_name = stream->r->server->server_hostname;
+ }
+ if (ap_is_default_port(port, stream->r)) {
+ portstr[0] = '\0';
+ }
+ else {
+ apr_snprintf(portstr, sizeof(portstr), ":%d", port);
+ }
+
+ /* create a "Via:" response header entry and merge it */
+ apr_table_add(r->headers_out, "Via",
+ (session->conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr)
+ );
+ }
+ if (r->status >= 200) stream->headers_ended = 1;
+
+ if (APLOGrtrace2(stream->r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out after merging",
+ stream->session->id, stream->id);
+ apr_table_do(log_header, stream, stream->r->headers_out, NULL);
+ }
+}
+
+static int stream_response_data(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id, const uint8_t *data,
+ size_t len, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ apr_bucket *b;
+ apr_status_t status;
+
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03358)
+ "h2_proxy_session(%s): recv data chunk for "
+ "unknown stream %d, ignored",
+ session->id, stream_id);
+ return 0;
+ }
+
+ if (!stream->data_received) {
+ /* last chance to manipulate response headers.
+ * after this, only trailers */
+ h2_proxy_stream_end_headers_out(stream);
+ }
+ stream->data_received += len;
+
+ b = apr_bucket_transient_create((const char*)data, len,
+ stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ /* always flush after a DATA frame, as we have no other indication
+ * of buffer use */
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+
+ status = ap_pass_brigade(stream->r->output_filters, stream->output);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359)
+ "h2_proxy_session(%s): stream=%d, response DATA %ld, %ld"
+ " total", session->id, stream_id, (long)len,
+ (long)stream->data_received);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
+ "h2_proxy_session(%s): passing output on stream %d",
+ session->id, stream->id);
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+ return 0;
+}
+
+static int on_stream_close(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ if (!session->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03360)
+ "h2_proxy_session(%s): stream=%d, closed, err=%d",
+ session->id, stream_id, error_code);
+ stream = h2_proxy_ihash_get(session->streams, stream_id);
+ if (stream) {
+ stream->error_code = error_code;
+ }
+ dispatch_event(session, H2_PROXYS_EV_STREAM_DONE, stream_id, NULL);
+ }
+ return 0;
+}
+
+static int on_header(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *namearg, size_t nlen,
+ const uint8_t *valuearg, size_t vlen, uint8_t flags,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ const char *n = (const char*)namearg;
+ const char *v = (const char*)valuearg;
+
+ (void)session;
+ if (frame->hd.type == NGHTTP2_HEADERS && nlen) {
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (stream) {
+ if (h2_proxy_stream_add_header_out(stream, n, nlen, v, vlen)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+ }
+ else if (frame->hd.type == NGHTTP2_PUSH_PROMISE) {
+ }
+
+ return 0;
+}
+
+static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source, void *user_data)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status = APR_SUCCESS;
+
+ *data_flags = 0;
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361)
+ "h2_proxy_stream(NULL): data_read, stream %d not found",
+ stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (stream->session->ping_state != H2_PING_ST_NONE) {
+ /* suspend until we hear from the other side */
+ stream->waiting_on_ping = 1;
+ status = APR_EAGAIN;
+ }
+ else if (stream->r->expecting_100) {
+ /* suspend until the answer comes */
+ stream->waiting_on_100 = 1;
+ status = APR_EAGAIN;
+ }
+ else if (APR_BRIGADE_EMPTY(stream->input)) {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ H2MAX(APR_BUCKET_BUFF_SIZE, length));
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): request body read",
+ stream->session->id, stream->id);
+ }
+
+ if (status == APR_SUCCESS) {
+ size_t readlen = 0;
+ while (status == APR_SUCCESS
+ && (readlen < length)
+ && !APR_BRIGADE_EMPTY(stream->input)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(stream->input);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ else {
+ /* we do nothing more regarding any meta here */
+ }
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+
+ if (status == APR_SUCCESS && blen > 0) {
+ size_t copylen = H2MIN(length - readlen, blen);
+ memcpy(buf, bdata, copylen);
+ buf += copylen;
+ readlen += copylen;
+ if (copylen < blen) {
+ /* We have data left in the bucket. Split it. */
+ status = apr_bucket_split(b, copylen);
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ stream->data_sent += readlen;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468)
+ "h2_proxy_stream(%d): request DATA %ld, %ld"
+ " total, flags=%d", stream->id, (long)readlen, (long)stream->data_sent,
+ (int)*data_flags);
+ if ((*data_flags & NGHTTP2_DATA_FLAG_EOF) && !apr_is_empty_table(stream->r->trailers_in)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(10179)
+ "h2_proxy_stream(%d): submit trailers", stream->id);
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;
+ submit_trailers(stream);
+ }
+ return readlen;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ /* suspended stream, needs to be re-awakened */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): suspending",
+ stream->session->id, stream_id);
+ stream->suspended = 1;
+ h2_proxy_iq_add(stream->session->suspended, stream->id, NULL, NULL);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ else {
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+}
+
+#ifdef H2_NG2_INVALID_HEADER_CB
+static int on_invalid_header_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03469)
+ "h2_proxy_session(%s-%d): denying stream with invalid header "
+ "'%s: %s'", session->id, (int)frame->hd.stream_id,
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
+ }
+ return nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ frame->hd.stream_id,
+ NGHTTP2_PROTOCOL_ERROR);
+}
+#endif
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ int h2_front,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done)
+{
+ if (!p_conn->data) {
+ apr_pool_t *pool = p_conn->scpool;
+ h2_proxy_session *session;
+ nghttp2_session_callbacks *cbs;
+ nghttp2_option *option;
+
+ session = apr_pcalloc(pool, sizeof(*session));
+ apr_pool_pre_cleanup_register(pool, p_conn, proxy_session_pre_close);
+ p_conn->data = session;
+
+ session->id = apr_pstrdup(p_conn->scpool, id);
+ session->c = p_conn->connection;
+ session->p_conn = p_conn;
+ session->conf = conf;
+ session->pool = p_conn->scpool;
+ session->state = H2_PROXYS_ST_INIT;
+ session->h2_front = h2_front;
+ session->window_bits_stream = window_bits_stream;
+ session->window_bits_connection = window_bits_connection;
+ session->streams = h2_proxy_ihash_create(pool, offsetof(h2_proxy_stream, id));
+ session->suspended = h2_proxy_iq_create(pool, 5);
+ session->done = done;
+
+ session->input = apr_brigade_create(session->pool, session->c->bucket_alloc);
+ session->output = apr_brigade_create(session->pool, session->c->bucket_alloc);
+
+ nghttp2_session_callbacks_new(&cbs);
+ nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv);
+ nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, stream_response_data);
+ nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close);
+ nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
+ nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send);
+ nghttp2_session_callbacks_set_send_callback(cbs, raw_send);
+#ifdef H2_NG2_INVALID_HEADER_CB
+ nghttp2_session_callbacks_set_on_invalid_header_callback(cbs, on_invalid_header_cb);
+#endif
+ nghttp2_option_new(&option);
+ nghttp2_option_set_peer_max_concurrent_streams(option, 100);
+ nghttp2_option_set_no_auto_window_update(option, 0);
+
+ nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
+
+ nghttp2_option_del(option);
+ nghttp2_session_callbacks_del(cbs);
+
+ ping_new_session(session, p_conn);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
+ "setup session for %s", p_conn->hostname);
+ }
+ else {
+ h2_proxy_session *session = p_conn->data;
+ ping_reuse_session(session);
+ }
+ return p_conn->data;
+}
+
+static apr_status_t session_start(h2_proxy_session *session)
+{
+ nghttp2_settings_entry settings[2];
+ int rv, add_conn_window;
+ apr_socket_t *s;
+
+ s = ap_get_conn_socket(session->c);
+#if (!defined(WIN32) && !defined(NETWARE)) || defined(DOXYGEN)
+ if (s) {
+ ap_sock_disable_nagle(s);
+ }
+#endif
+
+ settings[0].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH;
+ settings[0].value = 0;
+ settings[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[1].value = (1 << session->window_bits_stream) - 1;
+
+ rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE, settings,
+ H2_ALEN(settings));
+
+ /* If the connection window is larger than our default, trigger a WINDOW_UPDATE */
+ add_conn_window = ((1 << session->window_bits_connection) - 1 -
+ NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE);
+ if (!rv && add_conn_window != 0) {
+ rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE, 0, add_conn_window);
+ }
+ return rv? APR_EGENERAL : APR_SUCCESS;
+}
+
+static apr_status_t open_stream(h2_proxy_session *session, const char *url,
+ request_rec *r, int standalone,
+ h2_proxy_stream **pstream)
+{
+ h2_proxy_stream *stream;
+ apr_uri_t puri;
+ const char *authority, *scheme, *path;
+ apr_status_t status;
+ proxy_dir_conf *dconf;
+
+ stream = apr_pcalloc(r->pool, sizeof(*stream));
+
+ stream->pool = r->pool;
+ stream->url = url;
+ stream->r = r;
+ stream->standalone = standalone;
+ stream->session = session;
+ stream->state = H2_STREAM_ST_IDLE;
+
+ stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+
+ stream->req = h2_proxy_req_create(1, stream->pool);
+
+ status = apr_uri_parse(stream->pool, url, &puri);
+ if (status != APR_SUCCESS)
+ return status;
+
+ scheme = (strcmp(puri.scheme, "h2")? "http" : "https");
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ if (dconf->preserve_host) {
+ authority = apr_table_get(r->headers_in, "Host");
+ if (authority == NULL) {
+ authority = r->hostname;
+ }
+ }
+ else {
+ authority = puri.hostname;
+ if (!ap_strchr_c(authority, ':') && puri.port
+ && apr_uri_port_of_scheme(scheme) != puri.port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "authority=%s from uri.hostname=%s and uri.port=%d",
+ authority, puri.hostname, puri.port);
+ }
+ /* See #235, we use only :authority when available and remove Host:
+ * since differing values are not acceptable, see RFC 9113 ch. 8.3.1 */
+ if (authority && strlen(authority)) {
+ apr_table_unset(r->headers_in, "Host");
+ }
+
+ /* we need this for mapping relative uris in headers ("Link") back
+ * to local uris */
+ stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority);
+ stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
+ path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
+
+ h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
+ authority, path, r->headers_in);
+
+ if (dconf->add_forwarded_headers) {
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+ const char *buf;
+
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+ apr_table_mergen(stream->req->headers, "X-Forwarded-For",
+ r->useragent_ip);
+
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+ if ((buf = apr_table_get(r->headers_in, "Host"))) {
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Host", buf);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+ * name of this proxy server is (if there are more than one)
+ * XXX: This duplicates Via: - do we strictly need it?
+ */
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Server",
+ r->server->server_hostname);
+ }
+ }
+
+ /* Tuck away all already existing cookies */
+ stream->saves = apr_table_make(r->pool, 2);
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
+
+ *pstream = stream;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *stream)
+{
+ h2_proxy_ngheader *hd;
+ nghttp2_data_provider *pp = NULL;
+ nghttp2_data_provider provider;
+ int rv, may_have_request_body = 1;
+ apr_status_t status;
+
+ hd = h2_proxy_util_nghd_make_req(stream->pool, stream->req);
+
+ /* If we expect a 100-continue response, we must refrain from reading
+ any input until we get it. Reading the input will possibly trigger
+ HTTP_IN filter to generate the 100-continue itself. */
+ if (stream->waiting_on_100 || stream->waiting_on_ping) {
+ /* make a small test if we get an EOF/EOS immediately */
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ may_have_request_body = APR_STATUS_IS_EAGAIN(status)
+ || (status == APR_SUCCESS
+ && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)));
+ }
+
+ if (may_have_request_body) {
+ provider.source.fd = 0;
+ provider.source.ptr = NULL;
+ provider.read_callback = stream_request_data;
+ pp = &provider;
+ }
+
+ rv = nghttp2_submit_request(session->ngh2, NULL,
+ hd->nv, hd->nvlen, pp, stream);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+ "h2_proxy_session(%s): submit %s%s -> %d",
+ session->id, stream->req->authority, stream->req->path,
+ rv);
+ if (rv > 0) {
+ stream->id = rv;
+ stream->state = H2_STREAM_ST_OPEN;
+ h2_proxy_ihash_add(session->streams, stream);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_SUBMITTED, rv, NULL);
+
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+static apr_status_t submit_trailers(h2_proxy_stream *stream)
+{
+ h2_proxy_ngheader *hd;
+ int rv;
+
+ hd = h2_proxy_util_nghd_make(stream->pool, stream->r->trailers_in);
+ rv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, hd->nv, hd->nvlen);
+ return rv == 0? APR_SUCCESS: APR_EGENERAL;
+}
+
+static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t readlen = 0;
+ ssize_t n;
+
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* nop */
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+ if (status == APR_SUCCESS && blen > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)bdata, blen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): feeding %ld bytes -> %ld",
+ session->id, (long)blen, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ status = APR_EGENERAL;
+ }
+ }
+ else {
+ size_t rlen = (size_t)n;
+ readlen += rlen;
+ if (rlen < blen) {
+ apr_bucket_split(b, rlen);
+ }
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_session(%s): fed %ld bytes of input to session",
+ session->id, (long)readlen);
+ if (readlen == 0 && status == APR_SUCCESS) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block,
+ apr_interval_time_t timeout)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (APR_BRIGADE_EMPTY(session->input)) {
+ apr_socket_t *socket = NULL;
+ apr_time_t save_timeout = -1;
+
+ if (block && timeout > 0) {
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &save_timeout);
+ apr_socket_timeout_set(socket, timeout);
+ }
+ else {
+ /* cannot block on timeout */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03379)
+ "h2_proxy_session(%s): unable to get conn socket",
+ session->id);
+ return APR_ENOTIMPL;
+ }
+ }
+
+ status = ap_get_brigade(session->c->input_filters, session->input,
+ AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ 64 * 1024);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ "h2_proxy_session(%s): read from conn", session->id);
+ if (socket && save_timeout != -1) {
+ apr_socket_timeout_set(socket, save_timeout);
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ status = feed_brigade(session, session->input);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* nop */
+ }
+ else if (!APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03380)
+ "h2_proxy_session(%s): read error", session->id);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+
+ return status;
+}
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *session,
+ const char *url, request_rec *r,
+ int standalone)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status;
+
+ status = open_stream(session, url, r, standalone, &stream);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03381)
+ "process stream(%d): %s %s%s, original: %s",
+ stream->id, stream->req->method,
+ stream->req->authority, stream->req->path,
+ r->the_request);
+ status = submit_stream(session, stream);
+ }
+ return status;
+}
+
+static void stream_resume(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_stream(%s-%d): resuming",
+ session->id, stream->id);
+ stream->suspended = 0;
+ h2_proxy_iq_remove(session->suspended, stream->id);
+ nghttp2_session_resume_data(session->ngh2, stream->id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+}
+
+static int is_waiting_for_backend(h2_proxy_session *session)
+{
+ return ((session->ping_state != H2_PING_ST_NONE)
+ || ((session->suspended->nelts <= 0)
+ && !nghttp2_session_want_write(session->ngh2)
+ && nghttp2_session_want_read(session->ngh2)));
+}
+
+static apr_status_t check_suspended(h2_proxy_session *session)
+{
+ h2_proxy_stream *stream;
+ int i, stream_id;
+ apr_status_t status;
+
+ for (i = 0; i < session->suspended->nelts; ++i) {
+ stream_id = session->suspended->elts[i];
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ if (stream->waiting_on_100 || stream->waiting_on_ping) {
+ status = APR_EAGAIN;
+ }
+ else {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ }
+ if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->input)) {
+ stream_resume(stream);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c,
+ APLOGNO(03382) "h2_proxy_stream(%s-%d): check input",
+ session->id, stream_id);
+ stream_resume(stream);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ else {
+ /* gone? */
+ h2_proxy_iq_remove(session->suspended, stream_id);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ return APR_EAGAIN;
+}
+
+static apr_status_t session_shutdown(h2_proxy_session *session, int reason,
+ const char *msg)
+{
+ apr_status_t status = APR_SUCCESS;
+ const char *err = msg;
+
+ ap_assert(session);
+ if (!err && reason) {
+ err = nghttp2_strerror(reason);
+ }
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
+ reason, (uint8_t*)err, err? strlen(err):0);
+ status = nghttp2_session_send(session->ngh2);
+ dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err);
+ return status;
+}
+
+
+static const char *StateNames[] = {
+ "INIT", /* H2_PROXYS_ST_INIT */
+ "DONE", /* H2_PROXYS_ST_DONE */
+ "IDLE", /* H2_PROXYS_ST_IDLE */
+ "BUSY", /* H2_PROXYS_ST_BUSY */
+ "WAIT", /* H2_PROXYS_ST_WAIT */
+ "LSHUTDOWN", /* H2_PROXYS_ST_LOCAL_SHUTDOWN */
+ "RSHUTDOWN", /* H2_PROXYS_ST_REMOTE_SHUTDOWN */
+};
+
+static const char *state_name(h2_proxys_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static int is_accepting_streams(h2_proxy_session *session)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_WAIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void transit(h2_proxy_session *session, const char *action,
+ h2_proxys_state nstate)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03345)
+ "h2_proxy_session(%s): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+}
+
+static void ev_init(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ if (h2_proxy_ihash_empty(session->streams)) {
+ transit(session, "init", H2_PROXYS_ST_IDLE);
+ }
+ else {
+ transit(session, "init", H2_PROXYS_ST_BUSY);
+ }
+ break;
+
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_local_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* already did that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* all done */
+ transit(session, "local goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "local goaway", H2_PROXYS_ST_LOCAL_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_remote_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* already received that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* all done */
+ transit(session, "remote goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "remote goaway", H2_PROXYS_ST_REMOTE_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_conn_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "conn error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, arg, session->c,
+ "h2_proxy_session(%s): conn error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_proto_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "proto error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): proto error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_conn_timeout(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_no_io(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* nothing for input and output to do. If we remain
+ * in this state, we go into a tight loop and suck up
+ * CPU cycles. Ideally, we'd like to do a blocking read, but that
+ * is not possible if we have scheduled tasks and wait
+ * for them to produce something. */
+ if (h2_proxy_ihash_empty(session->streams)) {
+ if (!is_accepting_streams(session)) {
+ /* We are no longer accepting new streams and have
+ * finished processing existing ones. Time to leave. */
+ session_shutdown(session, arg, msg);
+ transit(session, "no io", H2_PROXYS_ST_DONE);
+ }
+ else {
+ /* When we have no streams, no task events are possible,
+ * switch to blocking reads */
+ transit(session, "no io", H2_PROXYS_ST_IDLE);
+ }
+ }
+ else {
+ /* Unable to do blocking reads, as we wait on events from
+ * task processing in other threads. Do a busy wait with
+ * backoff timer. */
+ transit(session, "no io", H2_PROXYS_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_submitted(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream submitted", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_done(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ h2_proxy_stream *stream;
+ apr_bucket *b;
+
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ /* if the stream's connection is aborted, do not send anything
+ * more on it. */
+ apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
+ int touched = (stream->data_sent ||
+ stream_id <= session->last_stream_id);
+ if (!session->c->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+ "h2_proxy_sesssion(%s): stream(%d) closed "
+ "(touched=%d, error=%d)",
+ session->id, stream_id, touched, stream->error_code);
+
+ if (status != APR_SUCCESS) {
+ b = ap_bucket_error_create(HTTP_SERVICE_UNAVAILABLE, NULL, stream->r->pool,
+ stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
+ else if (!stream->data_received) {
+ /* if the response had no body, this is the time to flush
+ * an empty brigade which will also write the response headers */
+ h2_proxy_stream_end_headers_out(stream);
+ stream->data_received = 1;
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
+ }
+
+ stream->state = H2_STREAM_ST_CLOSED;
+ h2_proxy_ihash_remove(session->streams, stream_id);
+ h2_proxy_iq_remove(session->suspended, stream_id);
+ if (session->done) {
+ session->done(session, stream->r, status, touched);
+ }
+ }
+
+ switch (session->state) {
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_resumed(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream resumed", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_data_read(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "data read", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_ngh2_done(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_pre_close(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* nop */
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_PROXYS_EV_INIT:
+ ev_init(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_LOCAL_GOAWAY:
+ ev_local_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_REMOTE_GOAWAY:
+ ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_ERROR:
+ ev_conn_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PROTO_ERROR:
+ ev_proto_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_TIMEOUT:
+ ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NO_IO:
+ ev_no_io(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_SUBMITTED:
+ ev_stream_submitted(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_DONE:
+ ev_stream_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_RESUMED:
+ ev_stream_resumed(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_DATA_READ:
+ ev_data_read(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NGH2_DONE:
+ ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PRE_CLOSE:
+ ev_pre_close(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): unknown event %d",
+ session->id, ev);
+ break;
+ }
+}
+
+static int send_loop(h2_proxy_session *session)
+{
+ while (nghttp2_session_want_write(session->ngh2)) {
+ int rv = nghttp2_session_send(session->ngh2);
+ if (rv < 0 && nghttp2_is_fatal(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): write, rv=%d", session->id, rv);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+apr_status_t h2_proxy_session_process(h2_proxy_session *session)
+{
+ apr_status_t status;
+ int have_written = 0, have_read = 0;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): process", session->id);
+
+run_loop:
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ status = session_start(session);
+ if (status == APR_SUCCESS) {
+ dispatch_event(session, H2_PROXYS_EV_INIT, 0, NULL);
+ goto run_loop;
+ }
+ else {
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+ break;
+
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ have_written = send_loop(session);
+
+ if (nghttp2_session_want_read(session->ngh2)) {
+ status = h2_proxy_session_read(session, 0, 0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ }
+ }
+
+ if (!have_written && !have_read
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NO_IO, 0, NULL);
+ goto run_loop;
+ }
+ break;
+
+ case H2_PROXYS_ST_WAIT:
+ if (is_waiting_for_backend(session)) {
+ /* we can do a blocking read with the default timeout (as
+ * configured via ProxyTimeout in our socket. There is
+ * nothing we want to send or check until we get more data
+ * from the backend. */
+ status = h2_proxy_session_read(session, 1, 0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+ }
+ else {
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ return status;
+ }
+ }
+ else if (check_suspended(session) == APR_EAGAIN) {
+ /* no stream has become resumed. Do a blocking read with
+ * ever increasing timeouts... */
+ if (session->wait_timeout < 25) {
+ session->wait_timeout = 25;
+ }
+ else {
+ session->wait_timeout = H2MIN(apr_time_from_msec(100),
+ 2*session->wait_timeout);
+ }
+
+ status = h2_proxy_session_read(session, 1, session->wait_timeout);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ APLOGNO(03365)
+ "h2_proxy_session(%s): WAIT read, timeout=%fms",
+ session->id, session->wait_timeout/1000.0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)
+ || APR_STATUS_IS_EAGAIN(status)) {
+ /* go back to checking all inputs again */
+ transit(session, "wait cycle", H2_PROXYS_ST_BUSY);
+ }
+ }
+ break;
+
+ case H2_PROXYS_ST_IDLE:
+ break;
+
+ case H2_PROXYS_ST_DONE: /* done, session terminated */
+ return APR_EOF;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, session->c,
+ APLOGNO(03346)"h2_proxy_session(%s): unknown state %d",
+ session->id, session->state);
+ dispatch_event(session, H2_PROXYS_EV_PROTO_ERROR, 0, NULL);
+ break;
+ }
+
+
+ if (have_read || have_written) {
+ session->wait_timeout = 0;
+ }
+
+ if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NGH2_DONE, 0, NULL);
+ }
+
+ return APR_SUCCESS; /* needs to be called again */
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ h2_proxy_request_done *done;
+} cleanup_iter_ctx;
+
+static int cancel_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ nghttp2_submit_rst_stream(ctx->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, 0);
+ return 1;
+}
+
+void h2_proxy_session_cancel_all(h2_proxy_session *session)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = session->done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366)
+ "h2_proxy_session(%s): cancel %d streams",
+ session->id, (int)h2_proxy_ihash_count(session->streams));
+ h2_proxy_ihash_iter(session->streams, cancel_iter, &ctx);
+ session_shutdown(session, 0, NULL);
+ }
+}
+
+static int done_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ int touched = (stream->data_sent ||
+ stream->id <= ctx->session->last_stream_id);
+ ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched);
+ return 1;
+}
+
+void h2_proxy_session_cleanup(h2_proxy_session *session,
+ h2_proxy_request_done *done)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03519)
+ "h2_proxy_session(%s): terminated, %d streams unfinished",
+ session->id, (int)h2_proxy_ihash_count(session->streams));
+ h2_proxy_ihash_iter(session->streams, done_iter, &ctx);
+ h2_proxy_ihash_clear(session->streams);
+ }
+}
+
+static int ping_arrived_iter(void *udata, void *val)
+{
+ h2_proxy_stream *stream = val;
+ if (stream->waiting_on_ping) {
+ stream->waiting_on_ping = 0;
+ stream_resume(stream);
+ }
+ return 1;
+}
+
+static void ping_arrived(h2_proxy_session *session)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03470)
+ "h2_proxy_session(%s): ping arrived, unblocking streams",
+ session->id);
+ h2_proxy_ihash_iter(session->streams, ping_arrived_iter, &session);
+ }
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ conn_rec *c;
+ apr_off_t bytes;
+ int updated;
+} win_update_ctx;
+
diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h
new file mode 100644
index 0000000..f40e5ee
--- /dev/null
+++ b/modules/http2/h2_proxy_session.h
@@ -0,0 +1,133 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_proxy_session_h
+#define h2_proxy_session_h
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#include <nghttp2/nghttp2.h>
+
+struct h2_proxy_iqueue;
+struct h2_proxy_ihash_t;
+
+typedef enum {
+ H2_STREAM_ST_IDLE,
+ H2_STREAM_ST_OPEN,
+ H2_STREAM_ST_RESV_LOCAL,
+ H2_STREAM_ST_RESV_REMOTE,
+ H2_STREAM_ST_CLOSED_INPUT,
+ H2_STREAM_ST_CLOSED_OUTPUT,
+ H2_STREAM_ST_CLOSED,
+} h2_proxy_stream_state_t;
+
+typedef enum {
+ H2_PROXYS_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_PROXYS_ST_DONE, /* finished, connection close */
+ H2_PROXYS_ST_IDLE, /* no streams to process */
+ H2_PROXYS_ST_BUSY, /* read/write without stop */
+ H2_PROXYS_ST_WAIT, /* waiting for tasks reporting back */
+ H2_PROXYS_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
+ H2_PROXYS_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+} h2_proxys_state;
+
+typedef enum {
+ H2_PROXYS_EV_INIT, /* session was initialized */
+ H2_PROXYS_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_PROXYS_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_PROXYS_EV_CONN_ERROR, /* connection error */
+ H2_PROXYS_EV_PROTO_ERROR, /* protocol error */
+ H2_PROXYS_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_PROXYS_EV_NO_IO, /* nothing has been read or written */
+ H2_PROXYS_EV_STREAM_SUBMITTED, /* stream has been submitted */
+ H2_PROXYS_EV_STREAM_DONE, /* stream has been finished */
+ H2_PROXYS_EV_STREAM_RESUMED, /* stream signalled availability of headers/data */
+ H2_PROXYS_EV_DATA_READ, /* connection data has been read */
+ H2_PROXYS_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */
+} h2_proxys_event_t;
+
+typedef enum {
+ H2_PING_ST_NONE, /* normal connection mode, ProxyTimeout rules */
+ H2_PING_ST_AWAIT_ANY, /* waiting for any frame from backend */
+ H2_PING_ST_AWAIT_PING, /* waiting for PING frame from backend */
+} h2_ping_state_t;
+
+typedef struct h2_proxy_session h2_proxy_session;
+typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
+ apr_status_t status, int touched);
+
+struct h2_proxy_session {
+ const char *id;
+ conn_rec *c;
+ proxy_conn_rec *p_conn;
+ proxy_server_conf *conf;
+ apr_pool_t *pool;
+ nghttp2_session *ngh2; /* the nghttp2 session itself */
+
+ unsigned int aborted : 1;
+ unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */
+
+ h2_proxy_request_done *done;
+ void *user_data;
+
+ unsigned char window_bits_stream;
+ unsigned char window_bits_connection;
+
+ h2_proxys_state state;
+ apr_interval_time_t wait_timeout;
+
+ struct h2_proxy_ihash_t *streams;
+ struct h2_proxy_iqueue *suspended;
+ apr_size_t remote_max_concurrent;
+ int last_stream_id; /* last stream id processed by backend, or 0 */
+ apr_time_t last_frame_received;
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
+
+ h2_ping_state_t ping_state;
+ apr_time_t ping_timeout;
+ apr_time_t save_timeout;
+};
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ int h2_front,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done);
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *s, const char *url,
+ request_rec *r, int standalone);
+
+/**
+ * Perform a step in processing the proxy session. Will return aftert
+ * one read/write cycle and indicate session status by status code.
+ * @param s the session to process
+ * @return APR_EAGAIN when processing needs to be invoked again
+ * APR_SUCCESS when all streams have been processed, session still live
+ * APR_EOF when the session has been terminated
+ */
+apr_status_t h2_proxy_session_process(h2_proxy_session *s);
+
+void h2_proxy_session_cancel_all(h2_proxy_session *s);
+
+void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
+
+#define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
+
+#endif /* h2_proxy_session_h */
diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c
new file mode 100644
index 0000000..dc69ec0
--- /dev/null
+++ b/modules/http2/h2_proxy_util.c
@@ -0,0 +1,1355 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_request.h>
+#include <mod_proxy.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_proxy_util.h"
+
+APLOG_USE_MODULE(proxy_http2);
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_proxy_log2(int n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_proxy_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_proxy_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_proxy_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_proxy_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_proxy_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_proxy_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_proxy_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+typedef struct {
+ h2_proxy_ihash_t *ih;
+ int *buffer;
+ size_t max;
+ size_t len;
+} icollect_ctx;
+
+static int icollect_iter(void *x, void *val)
+{
+ icollect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max)
+{
+ icollect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_proxy_ihash_iter(ih, icollect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_proxy_ihash_remove(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_proxy_iqueue *q, int nlen);
+static void iq_swap(h2_proxy_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top,
+ h2_proxy_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom,
+ h2_proxy_iq_cmp *cmp, void *ctx);
+
+h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_proxy_iqueue *q = apr_pcalloc(pool, sizeof(h2_proxy_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_proxy_iq_empty(h2_proxy_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_proxy_iq_count(h2_proxy_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+}
+
+int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_proxy_iq_clear(h2_proxy_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisons to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_proxy_iq_shift(h2_proxy_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+static void iq_grow(h2_proxy_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_proxy_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top,
+ h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom,
+ h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+/*******************************************************************************
+ * h2_proxy_ngheader
+ ******************************************************************************/
+#define H2_HD_MATCH_LIT_CS(l, name) \
+ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static int h2_util_ignore_header(const char *name)
+{
+ /* never forward, ch. 8.1.2.2 */
+ return (H2_HD_MATCH_LIT_CS("connection", name)
+ || H2_HD_MATCH_LIT_CS("proxy-connection", name)
+ || H2_HD_MATCH_LIT_CS("upgrade", name)
+ || H2_HD_MATCH_LIT_CS("keep-alive", name)
+ || H2_HD_MATCH_LIT_CS("transfer-encoding", name));
+}
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
+#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
+
+static int add_header(h2_proxy_ngheader *ngh,
+ const char *key, size_t key_len,
+ const char *value, size_t val_len)
+{
+ nghttp2_nv *nv = &ngh->nv[ngh->nvlen++];
+
+ nv->name = (uint8_t*)key;
+ nv->namelen = key_len;
+ nv->value = (uint8_t*)value;
+ nv->valuelen = val_len;
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ add_header(ctx, key, strlen(key), value, strlen(value));
+ }
+ return 1;
+}
+
+h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const h2_proxy_request *req)
+{
+
+ h2_proxy_ngheader *ngh;
+ size_t n;
+
+ ap_assert(req);
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
+
+ n = 4;
+ apr_table_do(count_header, &n, req->headers, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
+ NV_ADD_LIT_CS(ngh, ":authority", req->authority);
+ NV_ADD_LIT_CS(ngh, ":path", req->path);
+ NV_ADD_LIT_CS(ngh, ":method", req->method);
+ apr_table_do(add_table_header, ngh, req->headers, NULL);
+
+ return ngh;
+}
+
+h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers)
+{
+
+ h2_proxy_ngheader *ngh;
+ size_t n;
+
+ n = 0;
+ apr_table_do(count_header, &n, headers, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ apr_table_do(add_table_header, ngh, headers, NULL);
+
+ return ngh;
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredProxyRespHds[] = {
+ H2_DEF_LITERAL("alt-svc"),
+};
+
+static int ignore_header(const literal *lits, size_t llen,
+ const char *name, size_t nlen)
+{
+ const literal *lit;
+ size_t i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int h2_proxy_req_ignore_header(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
+}
+
+int h2_proxy_res_ignore_header(const char *name, size_t len)
+{
+ return (h2_proxy_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
+}
+
+void h2_proxy_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_proxy_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header)
+{
+ h2_proxy_request *req = apr_pcalloc(pool, sizeof(h2_proxy_request));
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+
+ return req;
+}
+
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool)
+{
+ return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL);
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ size_t klen = strlen(key);
+ if (!h2_proxy_req_ignore_header(key, klen)) {
+ h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers)
+{
+ h1_ctx x;
+ const char *val;
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
+
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
+ if ((val = apr_table_get(headers, "TE")) && ap_find_token(pool, val, "trailers")) {
+ /* client accepts trailers, forward this information */
+ apr_table_addn(req->headers, "TE", "trailers");
+ }
+ apr_table_setn(req->headers, "te", "trailers");
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
+
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+typedef struct {
+ apr_pool_t *pool;
+ request_rec *r;
+ proxy_dir_conf *conf;
+ const char *s;
+ int slen;
+ int i;
+ const char *server_uri;
+ int su_len;
+ const char *real_backend_uri;
+ int rbu_len;
+ const char *p_server_uri;
+ int psu_len;
+ int link_start;
+ int link_end;
+} link_ctx;
+
+static int attr_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case '-':
+ case '.':
+ case '^':
+ case '_':
+ case '`':
+ case '|':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int ptoken_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ case ':':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '@':
+ case '[':
+ case ']':
+ case '^':
+ case '_':
+ case '`':
+ case '{':
+ case '|':
+ case '}':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int skip_ws(link_ctx *ctx)
+{
+ char c;
+ while (ctx->i < ctx->slen
+ && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) {
+ ++ctx->i;
+ }
+ return (ctx->i < ctx->slen);
+}
+
+static int find_chr(link_ctx *ctx, char c, int *pidx)
+{
+ int j;
+ for (j = ctx->i; j < ctx->slen; ++j) {
+ if (ctx->s[j] == c) {
+ *pidx = j;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_chr(link_ctx *ctx, char c)
+{
+ if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) {
+ ++ctx->i;
+ return 1;
+ }
+ return 0;
+}
+
+static int skip_qstring(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '\"')) {
+ int end;
+ if (find_chr(ctx, '\"', &end)) {
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_ptoken(link_ctx *ctx)
+{
+ if (skip_ws(ctx)) {
+ int i;
+ for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int read_link(link_ctx *ctx)
+{
+ ctx->link_start = ctx->link_end = 0;
+ if (skip_ws(ctx) && read_chr(ctx, '<')) {
+ int end;
+ if (find_chr(ctx, '>', &end)) {
+ ctx->link_start = ctx->i;
+ ctx->link_end = end;
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_pname(link_ctx *ctx)
+{
+ if (skip_ws(ctx)) {
+ int i;
+ for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_pvalue(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '=')) {
+ if (skip_qstring(ctx) || skip_ptoken(ctx)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_param(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ';')) {
+ if (skip_pname(ctx)) {
+ skip_pvalue(ctx); /* value is optional */
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_sep(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ',')) {
+ return 1;
+ }
+ return 0;
+}
+
+static size_t subst_str(link_ctx *ctx, int start, int end, const char *ns)
+{
+ int olen, nlen, plen;
+ int delta;
+ char *p;
+
+ olen = end - start;
+ nlen = (int)strlen(ns);
+ delta = nlen - olen;
+ plen = ctx->slen + delta + 1;
+ p = apr_palloc(ctx->pool, plen);
+ memcpy(p, ctx->s, start);
+ memcpy(p + start, ns, nlen);
+ strcpy(p + start + nlen, ctx->s + end);
+ ctx->s = p;
+ ctx->slen = plen - 1; /* (int)strlen(p) */
+ if (ctx->i >= end) {
+ ctx->i += delta;
+ }
+ return nlen;
+}
+
+static void map_link(link_ctx *ctx)
+{
+ if (ctx->link_start < ctx->link_end) {
+ char buffer[HUGE_STRING_LEN];
+ size_t need_len, link_len, buffer_len, prepend_p_server;
+ const char *mapped;
+
+ buffer[0] = '\0';
+ buffer_len = 0;
+ link_len = ctx->link_end - ctx->link_start;
+ need_len = link_len + 1;
+ prepend_p_server = (ctx->s[ctx->link_start] == '/');
+ if (prepend_p_server) {
+ /* common to use relative uris in link header, for mappings
+ * to work need to prefix the backend server uri */
+ need_len += ctx->psu_len;
+ apr_cpystrn(buffer, ctx->p_server_uri, sizeof(buffer));
+ buffer_len = ctx->psu_len;
+ }
+ if (need_len > sizeof(buffer)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ctx->r, APLOGNO(03482)
+ "link_reverse_map uri too long, skipped: %s", ctx->s);
+ return;
+ }
+ apr_cpystrn(buffer + buffer_len, ctx->s + ctx->link_start, link_len + 1);
+ if (!prepend_p_server
+ && strcmp(ctx->real_backend_uri, ctx->p_server_uri)
+ && !strncmp(buffer, ctx->real_backend_uri, ctx->rbu_len)) {
+ /* the server uri and our local proxy uri we use differ, for mapping
+ * to work, we need to use the proxy uri */
+ int path_start = ctx->link_start + ctx->rbu_len;
+ link_len -= ctx->rbu_len;
+ memcpy(buffer, ctx->p_server_uri, ctx->psu_len);
+ memcpy(buffer + ctx->psu_len, ctx->s + path_start, link_len);
+ buffer_len = ctx->psu_len + link_len;
+ buffer[buffer_len] = '\0';
+ }
+ mapped = ap_proxy_location_reverse_map(ctx->r, ctx->conf, buffer);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->r,
+ "reverse_map[%s] %s --> %s", ctx->p_server_uri, buffer, mapped);
+ if (mapped != buffer) {
+ if (prepend_p_server) {
+ if (ctx->server_uri == NULL) {
+ ctx->server_uri = ap_construct_url(ctx->pool, "", ctx->r);
+ ctx->su_len = (int)strlen(ctx->server_uri);
+ }
+ if (!strncmp(mapped, ctx->server_uri, ctx->su_len)) {
+ mapped += ctx->su_len;
+ }
+ }
+ subst_str(ctx, ctx->link_start, ctx->link_end, mapped);
+ }
+ }
+}
+
+/* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1>
+ Link = "Link" ":" #link-value
+ link-value = "<" URI-Reference ">" *( ";" link-param )
+ link-param = ( ( "rel" "=" relation-types )
+ | ( "anchor" "=" <"> URI-Reference <"> )
+ | ( "rev" "=" relation-types )
+ | ( "hreflang" "=" Language-Tag )
+ | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) )
+ | ( "title" "=" quoted-string )
+ | ( "title*" "=" ext-value )
+ | ( "type" "=" ( media-type | quoted-mt ) )
+ | ( link-extension ) )
+ link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] )
+ | ( ext-name-star "=" ext-value )
+ ext-name-star = parmname "*" ; reserved for RFC2231-profiled
+ ; extensions. Whitespace NOT
+ ; allowed in between.
+ ptoken = 1*ptokenchar
+ ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "("
+ | ")" | "*" | "+" | "-" | "." | "/" | DIGIT
+ | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA
+ | "[" | "]" | "^" | "_" | "`" | "{" | "|"
+ | "}" | "~"
+ media-type = type-name "/" subtype-name
+ quoted-mt = <"> media-type <">
+ relation-types = relation-type
+ | <"> relation-type *( 1*SP relation-type ) <">
+ relation-type = reg-rel-type | ext-rel-type
+ reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" )
+ ext-rel-type = URI
+
+ and from <https://tools.ietf.org/html/rfc5987>
+ parmname = 1*attr-char
+ attr-char = ALPHA / DIGIT
+ / "!" / "#" / "$" / "&" / "+" / "-" / "."
+ / "^" / "_" / "`" / "|" / "~"
+ */
+
+const char *h2_proxy_link_reverse_map(request_rec *r,
+ proxy_dir_conf *conf,
+ const char *real_backend_uri,
+ const char *proxy_server_uri,
+ const char *s)
+{
+ link_ctx ctx;
+
+ if (r->proxyreq != PROXYREQ_REVERSE) {
+ return s;
+ }
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.r = r;
+ ctx.pool = r->pool;
+ ctx.conf = conf;
+ ctx.real_backend_uri = real_backend_uri;
+ ctx.rbu_len = (int)strlen(ctx.real_backend_uri);
+ ctx.p_server_uri = proxy_server_uri;
+ ctx.psu_len = (int)strlen(ctx.p_server_uri);
+ ctx.s = s;
+ ctx.slen = (int)strlen(s);
+ while (read_link(&ctx)) {
+ while (skip_param(&ctx)) {
+ /* nop */
+ }
+ map_link(&ctx);
+ if (!read_sep(&ctx)) {
+ break;
+ }
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "link_reverse_map %s --> %s", s, ctx.s);
+ return ctx.s;
+}
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+struct h2_proxy_fifo {
+ void **elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int nth_index(h2_proxy_fifo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t fifo_destroy(void *data)
+{
+ h2_proxy_fifo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int index_of(h2_proxy_fifo *fifo, void *elem)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (elem == fifo->elems[nth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t create_int(h2_proxy_fifo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_proxy_fifo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_proxy_fifo_count(h2_proxy_fifo *fifo)
+{
+ return fifo->count;
+}
+
+int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo)
+{
+ return fifo->nelems;
+}
+
+static apr_status_t check_not_empty(h2_proxy_fifo *fifo, int block)
+{
+ if (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ while (fifo->count == 0) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push(h2_proxy_fifo *fifo, void *elem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ if (fifo->set && index_of(fifo, elem) >= 0) {
+ /* set mode, elem already member */
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[nth_index(fifo, fifo->count)] = elem;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 1);
+}
+
+apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 0);
+}
+
+static void *pull_head(h2_proxy_fifo *fifo)
+{
+ void *elem;
+
+ ap_assert(fifo->count > 0);
+ elem = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = nth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return elem;
+}
+
+static apr_status_t fifo_pull(h2_proxy_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) {
+ apr_thread_mutex_unlock(fifo->lock);
+ *pelem = NULL;
+ return rv;
+ }
+
+ ap_assert(fifo->count > 0);
+ *pelem = pull_head(fifo);
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 1);
+}
+
+apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 0);
+}
+
+apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ void *e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[nth_index(fifo, i)];
+ if (e == elem) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[nth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h
new file mode 100644
index 0000000..202363d
--- /dev/null
+++ b/modules/http2/h2_proxy_util.h
@@ -0,0 +1,257 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_proxy_util__
+#define __mod_h2__h2_proxy_util__
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_proxy_request;
+struct nghttp2_frame;
+
+int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_proxy_ihash_t h2_proxy_ihash_t;
+typedef int h2_proxy_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih);
+int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih);
+void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx);
+
+void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val);
+void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id);
+void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val);
+void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih);
+
+size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max);
+size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_proxy_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_proxy_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_proxy_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_proxy_iq_empty(h2_proxy_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_proxy_iq_count(h2_proxy_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the task to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ */
+void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_proxy_iq_clear(h2_proxy_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first stream id from the queue or NULL if the queue is empty.
+ * The task will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @return the first stream id of the queue, 0 if empty
+ */
+int h2_proxy_iq_shift(h2_proxy_iqueue *q);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_proxy_log2(n) iff n is a power of 2 */
+unsigned char h2_proxy_log2(int n);
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+void h2_proxy_util_camel_case_header(char *s, size_t len);
+int h2_proxy_res_ignore_header(const char *name, size_t len);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+typedef struct h2_proxy_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_proxy_ngheader;
+h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const struct h2_proxy_request *req);
+
+h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers);
+
+/*******************************************************************************
+ * h2_proxy_request helpers
+ ******************************************************************************/
+typedef struct h2_proxy_request h2_proxy_request;
+
+struct h2_proxy_request {
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
+ const char *scheme;
+ const char *authority;
+ const char *path;
+
+ apr_table_t *headers;
+
+ apr_time_t request_time;
+
+ int chunked; /* iff request body needs to be forwarded as chunked */
+};
+
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool);
+apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
+/*******************************************************************************
+ * reverse mapping for link headers
+ ******************************************************************************/
+const char *h2_proxy_link_reverse_map(request_rec *r,
+ proxy_dir_conf *conf,
+ const char *real_server_uri,
+ const char *proxy_server_uri,
+ const char *s);
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_proxy_fifo h2_proxy_fifo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity elements. Elements can
+ * appear several times.
+ */
+apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity elements. Elements only
+ * appear once. Pushing an element already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo);
+apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo);
+
+int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo);
+int h2_proxy_fifo_count(h2_proxy_fifo *fifo);
+
+/**
+ * Push en element into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param elem the element to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem);
+apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem);
+
+apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem);
+apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem);
+
+/**
+ * Remove the elem from the queue, will remove multiple appearances.
+ * @param elem the element to remove
+ * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem);
+
+
+#endif /* defined(__mod_h2__h2_proxy_util__) */
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
new file mode 100644
index 0000000..462c470
--- /dev/null
+++ b/modules/http2/h2_push.c
@@ -0,0 +1,876 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_hash.h>
+#include <apr_time.h>
+
+#ifdef H2_OPENSSL
+#include <openssl/evp.h>
+#endif
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_protocol.h>
+
+#include "h2_private.h"
+#include "h2_protocol.h"
+#include "h2_util.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+static const char *policy_str(h2_push_policy policy)
+{
+ switch (policy) {
+ case H2_PUSH_NONE:
+ return "none";
+ case H2_PUSH_FAST_LOAD:
+ return "fast-load";
+ case H2_PUSH_HEAD:
+ return "head";
+ default:
+ return "default";
+ }
+}
+
+typedef struct {
+ const h2_request *req;
+ apr_uint32_t push_policy;
+ apr_pool_t *pool;
+ apr_array_header_t *pushes;
+ const char *s;
+ size_t slen;
+ size_t i;
+
+ const char *link;
+ apr_table_t *params;
+ char b[4096];
+} link_ctx;
+
+static int attr_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case '-':
+ case '.':
+ case '^':
+ case '_':
+ case '`':
+ case '|':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int ptoken_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ case ':':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '@':
+ case '[':
+ case ']':
+ case '^':
+ case '_':
+ case '`':
+ case '{':
+ case '|':
+ case '}':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int skip_ws(link_ctx *ctx)
+{
+ char c;
+ while (ctx->i < ctx->slen
+ && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) {
+ ++ctx->i;
+ }
+ return (ctx->i < ctx->slen);
+}
+
+static int find_chr(link_ctx *ctx, char c, size_t *pidx)
+{
+ size_t j;
+ for (j = ctx->i; j < ctx->slen; ++j) {
+ if (ctx->s[j] == c) {
+ *pidx = j;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_chr(link_ctx *ctx, char c)
+{
+ if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) {
+ ++ctx->i;
+ return 1;
+ }
+ return 0;
+}
+
+static char *mk_str(link_ctx *ctx, size_t end)
+{
+ if (ctx->i < end) {
+ return apr_pstrndup(ctx->pool, ctx->s + ctx->i, end - ctx->i);
+ }
+ return (char*)"";
+}
+
+static int read_qstring(link_ctx *ctx, const char **ps)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '\"')) {
+ size_t end;
+ if (find_chr(ctx, '\"', &end)) {
+ *ps = mk_str(ctx, end);
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_ptoken(link_ctx *ctx, const char **ps)
+{
+ if (skip_ws(ctx)) {
+ size_t i;
+ for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ *ps = mk_str(ctx, i);
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int read_link(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '<')) {
+ size_t end;
+ if (find_chr(ctx, '>', &end)) {
+ ctx->link = mk_str(ctx, end);
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_pname(link_ctx *ctx, const char **pname)
+{
+ if (skip_ws(ctx)) {
+ size_t i;
+ for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ *pname = mk_str(ctx, i);
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_pvalue(link_ctx *ctx, const char **pvalue)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '=')) {
+ if (read_qstring(ctx, pvalue) || read_ptoken(ctx, pvalue)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_param(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ';')) {
+ const char *name, *value = "";
+ if (read_pname(ctx, &name)) {
+ read_pvalue(ctx, &value); /* value is optional */
+ apr_table_setn(ctx->params, name, value);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_sep(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ',')) {
+ return 1;
+ }
+ return 0;
+}
+
+static void init_params(link_ctx *ctx)
+{
+ if (!ctx->params) {
+ ctx->params = apr_table_make(ctx->pool, 5);
+ }
+ else {
+ apr_table_clear(ctx->params);
+ }
+}
+
+static int same_authority(const h2_request *req, const apr_uri_t *uri)
+{
+ if (uri->scheme != NULL && strcmp(uri->scheme, req->scheme)) {
+ return 0;
+ }
+ if (uri->hostinfo != NULL && strcmp(uri->hostinfo, req->authority)) {
+ return 0;
+ }
+ return 1;
+}
+
+static int set_push_header(void *ctx, const char *key, const char *value)
+{
+ size_t klen = strlen(key);
+ if (H2_HD_MATCH_LIT("User-Agent", key, klen)
+ || H2_HD_MATCH_LIT("Accept", key, klen)
+ || H2_HD_MATCH_LIT("Accept-Encoding", key, klen)
+ || H2_HD_MATCH_LIT("Accept-Language", key, klen)
+ || H2_HD_MATCH_LIT("Cache-Control", key, klen)) {
+ apr_table_setn(ctx, key, value);
+ }
+ return 1;
+}
+
+static int has_param(link_ctx *ctx, const char *param)
+{
+ const char *p = apr_table_get(ctx->params, param);
+ return !!p;
+}
+
+static int has_relation(link_ctx *ctx, const char *rel)
+{
+ const char *s, *val = apr_table_get(ctx->params, "rel");
+ if (val) {
+ if (!strcmp(rel, val)) {
+ return 1;
+ }
+ s = ap_strstr_c(val, rel);
+ if (s && (s == val || s[-1] == ' ')) {
+ s += strlen(rel);
+ if (!*s || *s == ' ') {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int add_push(link_ctx *ctx)
+{
+ /* so, we have read a Link header and need to decide
+ * if we transform it into a push.
+ */
+ if (has_relation(ctx, "preload") && !has_param(ctx, "nopush")) {
+ apr_uri_t uri;
+ if (apr_uri_parse(ctx->pool, ctx->link, &uri) == APR_SUCCESS) {
+ if (uri.path && same_authority(ctx->req, &uri)) {
+ char *path;
+ const char *method;
+ apr_table_t *headers;
+ h2_request *req;
+ h2_push *push;
+
+ /* We only want to generate pushes for resources in the
+ * same authority than the original request.
+ * icing: i think that is wise, otherwise we really need to
+ * check that the vhost/server is available and uses the same
+ * TLS (if any) parameters.
+ */
+ path = apr_uri_unparse(ctx->pool, &uri, APR_URI_UNP_OMITSITEPART);
+ push = apr_pcalloc(ctx->pool, sizeof(*push));
+ switch (ctx->push_policy) {
+ case H2_PUSH_HEAD:
+ method = "HEAD";
+ break;
+ default:
+ method = "GET";
+ break;
+ }
+ headers = apr_table_make(ctx->pool, 5);
+ apr_table_do(set_push_header, headers, ctx->req->headers, NULL);
+ req = h2_request_create(0, ctx->pool, method, ctx->req->scheme,
+ ctx->req->authority, path, headers);
+ /* atm, we do not push on pushes */
+ h2_request_end_headers(req, ctx->pool, 0);
+ push->req = req;
+ if (has_param(ctx, "critical")) {
+ h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
+ prio->dependency = H2_DEPENDANT_BEFORE;
+ push->priority = prio;
+ }
+ if (!ctx->pushes) {
+ ctx->pushes = apr_array_make(ctx->pool, 5, sizeof(h2_push*));
+ }
+ APR_ARRAY_PUSH(ctx->pushes, h2_push*) = push;
+ }
+ }
+ }
+ return 0;
+}
+
+static void inspect_link(link_ctx *ctx, const char *s, size_t slen)
+{
+ /* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1>
+ Link = "Link" ":" #link-value
+ link-value = "<" URI-Reference ">" *( ";" link-param )
+ link-param = ( ( "rel" "=" relation-types )
+ | ( "anchor" "=" <"> URI-Reference <"> )
+ | ( "rev" "=" relation-types )
+ | ( "hreflang" "=" Language-Tag )
+ | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) )
+ | ( "title" "=" quoted-string )
+ | ( "title*" "=" ext-value )
+ | ( "type" "=" ( media-type | quoted-mt ) )
+ | ( link-extension ) )
+ link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] )
+ | ( ext-name-star "=" ext-value )
+ ext-name-star = parmname "*" ; reserved for RFC2231-profiled
+ ; extensions. Whitespace NOT
+ ; allowed in between.
+ ptoken = 1*ptokenchar
+ ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "("
+ | ")" | "*" | "+" | "-" | "." | "/" | DIGIT
+ | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA
+ | "[" | "]" | "^" | "_" | "`" | "{" | "|"
+ | "}" | "~"
+ media-type = type-name "/" subtype-name
+ quoted-mt = <"> media-type <">
+ relation-types = relation-type
+ | <"> relation-type *( 1*SP relation-type ) <">
+ relation-type = reg-rel-type | ext-rel-type
+ reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" )
+ ext-rel-type = URI
+
+ and from <https://tools.ietf.org/html/rfc5987>
+ parmname = 1*attr-char
+ attr-char = ALPHA / DIGIT
+ / "!" / "#" / "$" / "&" / "+" / "-" / "."
+ / "^" / "_" / "`" / "|" / "~"
+ */
+
+ ctx->s = s;
+ ctx->slen = slen;
+ ctx->i = 0;
+
+ while (read_link(ctx)) {
+ init_params(ctx);
+ while (read_param(ctx)) {
+ /* nop */
+ }
+ add_push(ctx);
+ if (!read_sep(ctx)) {
+ break;
+ }
+ }
+}
+
+static int head_iter(void *ctx, const char *key, const char *value)
+{
+ if (!apr_strnatcasecmp("link", key)) {
+ inspect_link(ctx, value, strlen(value));
+ }
+ return 1;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const ap_bucket_response *res)
+#else
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const struct h2_headers *res)
+#endif
+{
+ if (req && push_policy != H2_PUSH_NONE) {
+ /* Collect push candidates from the request/response pair.
+ *
+ * One source for pushes are "rel=preload" link headers
+ * in the response.
+ *
+ * TODO: This may be extended in the future by hooks or callbacks
+ * where other modules can provide push information directly.
+ */
+ if (res->headers) {
+ link_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.req = req;
+ ctx.push_policy = push_policy;
+ ctx.pool = p;
+
+ apr_table_do(head_iter, &ctx, res->headers, NULL);
+ if (ctx.pushes) {
+ apr_table_setn(res->headers, "push-policy",
+ policy_str(push_policy));
+ }
+ return ctx.pushes;
+ }
+ }
+ return NULL;
+}
+
+#define GCSLOG_LEVEL APLOG_TRACE1
+
+typedef struct h2_push_diary_entry {
+ apr_uint64_t hash;
+} h2_push_diary_entry;
+
+
+#ifdef H2_OPENSSL
+static void sha256_update(EVP_MD_CTX *ctx, const char *s)
+{
+ EVP_DigestUpdate(ctx, s, strlen(s));
+}
+
+static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ EVP_MD_CTX *md;
+ apr_uint64_t val;
+ unsigned char hash[EVP_MAX_MD_SIZE];
+ unsigned len, i;
+
+ md = EVP_MD_CTX_create();
+ ap_assert(md != NULL);
+
+ i = EVP_DigestInit_ex(md, EVP_sha256(), NULL);
+ ap_assert(i == 1);
+ sha256_update(md, push->req->scheme);
+ sha256_update(md, "://");
+ sha256_update(md, push->req->authority);
+ sha256_update(md, push->req->path);
+ EVP_DigestFinal(md, hash, &len);
+
+ val = 0;
+ for (i = 0; i != len; ++i)
+ val = val * 256 + hash[i];
+ *phash = val >> (64 - diary->mask_bits);
+}
+#endif
+
+
+static unsigned int val_apr_hash(const char *str)
+{
+ apr_ssize_t len = (apr_ssize_t)strlen(str);
+ return apr_hashfunc_default(str, &len);
+}
+
+static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ apr_uint64_t val;
+ (void)diary;
+#if APR_UINT64_MAX > UINT_MAX
+ val = ((apr_uint64_t)(val_apr_hash(push->req->scheme)) << 32);
+ val ^= ((apr_uint64_t)(val_apr_hash(push->req->authority)) << 16);
+ val ^= val_apr_hash(push->req->path);
+#else
+ val = val_apr_hash(push->req->scheme);
+ val ^= val_apr_hash(push->req->authority);
+ val ^= val_apr_hash(push->req->path);
+#endif
+ *phash = val;
+}
+
+static apr_int32_t ceil_power_of_2(apr_int32_t n)
+{
+ if (n <= 2) return 2;
+ --n;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ return ++n;
+}
+
+static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype,
+ int N)
+{
+ h2_push_diary *diary = NULL;
+
+ if (N > 0) {
+ diary = apr_pcalloc(p, sizeof(*diary));
+
+ diary->NMax = ceil_power_of_2(N);
+ diary->N = diary->NMax;
+ /* the mask we use in value comparison depends on where we got
+ * the values from. If we calculate them ourselves, we can use
+ * the full 64 bits.
+ * If we set the diary via a compressed golomb set, we have less
+ * relevant bits and need to use a smaller mask. */
+ diary->mask_bits = 64;
+ /* grows by doubling, start with a power of 2 */
+ diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry));
+
+ switch (dtype) {
+#ifdef H2_OPENSSL
+ case H2_PUSH_DIGEST_SHA256:
+ diary->dtype = H2_PUSH_DIGEST_SHA256;
+ diary->dcalc = calc_sha256_hash;
+ break;
+#endif /* ifdef H2_OPENSSL */
+ default:
+ diary->dtype = H2_PUSH_DIGEST_APR_HASH;
+ diary->dcalc = calc_apr_hash;
+ break;
+ }
+ }
+
+ return diary;
+}
+
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N)
+{
+ return diary_create(p, H2_PUSH_DIGEST_SHA256, N);
+}
+
+static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash)
+{
+ if (diary) {
+ h2_push_diary_entry *e;
+ int i;
+
+ /* search from the end, where the last accessed digests are */
+ for (i = diary->entries->nelts-1; i >= 0; --i) {
+ e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry);
+ if (e->hash == hash) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+static void move_to_last(h2_push_diary *diary, apr_size_t idx)
+{
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+ h2_push_diary_entry e;
+ apr_size_t lastidx;
+
+ /* Move an existing entry to the last place */
+ if (diary->entries->nelts <= 0)
+ return;
+
+ /* move entry[idx] to the end */
+ lastidx = diary->entries->nelts - 1;
+ if (idx < lastidx) {
+ e = entries[idx];
+ memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx));
+ entries[lastidx] = e;
+ }
+}
+
+static void remove_first(h2_push_diary *diary)
+{
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+ int lastidx;
+
+ /* move remaining entries to index 0 */
+ lastidx = diary->entries->nelts - 1;
+ if (lastidx > 0) {
+ --diary->entries->nelts;
+ memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts);
+ }
+}
+
+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
+{
+ while (diary->entries->nelts >= diary->N) {
+ remove_first(diary);
+ }
+ /* append a new diary entry at the end */
+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash);
+}
+
+apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
+{
+ apr_array_header_t *npushes = pushes;
+ h2_push_diary_entry e;
+ int i, idx;
+
+ if (session->push_diary && pushes) {
+ npushes = NULL;
+
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push;
+
+ push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ session->push_diary->dcalc(session->push_diary, &e.hash, push);
+ idx = h2_push_diary_find(session->push_diary, e.hash);
+ if (idx >= 0) {
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c1,
+ "push_diary_update: already there PUSH %s", push->req->path);
+ move_to_last(session->push_diary, (apr_size_t)idx);
+ }
+ else {
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c1,
+ "push_diary_update: adding PUSH %s", push->req->path);
+ if (!npushes) {
+ npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*));
+ }
+ APR_ARRAY_PUSH(npushes, h2_push*) = push;
+ h2_push_diary_append(session->push_diary, &e);
+ }
+ }
+ }
+ return npushes;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const ap_bucket_response *res)
+#else
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_headers *res)
+#endif
+{
+ apr_array_header_t *pushes;
+
+ pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
+ return h2_push_diary_update(stream->session, pushes);
+}
+
+typedef struct {
+ h2_push_diary *diary;
+ unsigned char log2p;
+ int mask_bits;
+ int delta_bits;
+ int fixed_bits;
+ apr_uint64_t fixed_mask;
+ apr_pool_t *pool;
+ unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last;
+} gset_encoder;
+
+static int cmp_puint64(const void *p1, const void *p2)
+{
+ const apr_uint64_t *pu1 = p1, *pu2 = p2;
+ return (*pu1 > *pu2)? 1 : ((*pu1 == *pu2)? 0 : -1);
+}
+
+/* in golomb bit stream encoding, bit 0 is the 8th of the first char, or
+ * more generally:
+ * char(bit/8) & cbit_mask[(bit % 8)]
+ */
+static unsigned char cbit_mask[] = {
+ 0x80u,
+ 0x40u,
+ 0x20u,
+ 0x10u,
+ 0x08u,
+ 0x04u,
+ 0x02u,
+ 0x01u,
+};
+
+static apr_status_t gset_encode_bit(gset_encoder *encoder, int bit)
+{
+ if (++encoder->bit >= 8) {
+ if (++encoder->offset >= encoder->datalen) {
+ apr_size_t nlen = encoder->datalen*2;
+ unsigned char *ndata = apr_pcalloc(encoder->pool, nlen);
+ if (!ndata) {
+ return APR_ENOMEM;
+ }
+ memcpy(ndata, encoder->data, encoder->datalen);
+ encoder->data = ndata;
+ encoder->datalen = nlen;
+ }
+ encoder->bit = 0;
+ encoder->data[encoder->offset] = 0xffu;
+ }
+ if (!bit) {
+ encoder->data[encoder->offset] &= ~cbit_mask[encoder->bit];
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval)
+{
+ apr_uint64_t delta, flex_bits;
+ apr_status_t status = APR_SUCCESS;
+ int i;
+
+ delta = pval - encoder->last;
+ encoder->last = pval;
+ flex_bits = (delta >> encoder->fixed_bits);
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool,
+ "h2_push_diary_enc: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT" flex_bits=%"APR_UINT64_T_FMT", "
+ ", fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT,
+ pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask);
+ for (; flex_bits != 0; --flex_bits) {
+ status = gset_encode_bit(encoder, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ status = gset_encode_bit(encoder, 0);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (i = encoder->fixed_bits-1; i >= 0; --i) {
+ status = gset_encode_bit(encoder, (delta >> i) & 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
+ int maxP, const char *authority,
+ const char **pdata, apr_size_t *plen)
+{
+ int nelts, N;
+ unsigned char log2n, log2pmax;
+ gset_encoder encoder;
+ apr_uint64_t *hashes;
+ apr_size_t hash_count, i;
+
+ nelts = diary->entries->nelts;
+ N = ceil_power_of_2(nelts);
+ log2n = h2_log2(N);
+
+ /* Now log2p is the max number of relevant bits, so that
+ * log2p + log2n == mask_bits. We can use a lower log2p
+ * and have a shorter set encoding...
+ */
+ log2pmax = h2_log2(ceil_power_of_2(maxP));
+
+ memset(&encoder, 0, sizeof(encoder));
+ encoder.diary = diary;
+ encoder.log2p = H2MIN(diary->mask_bits - log2n, log2pmax);
+ encoder.mask_bits = log2n + encoder.log2p;
+ encoder.delta_bits = diary->mask_bits - encoder.mask_bits;
+ encoder.fixed_bits = encoder.log2p;
+ encoder.fixed_mask = 1;
+ encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1;
+ encoder.pool = pool;
+ encoder.datalen = 512;
+ encoder.data = apr_pcalloc(encoder.pool, encoder.datalen);
+
+ encoder.data[0] = log2n;
+ encoder.data[1] = encoder.log2p;
+ encoder.offset = 1;
+ encoder.bit = 8;
+ encoder.last = 0;
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, "
+ "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s",
+ (int)nelts, (int)N, (int)log2n, diary->mask_bits,
+ (int)encoder.mask_bits, (int)encoder.delta_bits,
+ (int)encoder.log2p, authority);
+
+ if (!authority || !diary->authority
+ || !strcmp("*", authority) || !strcmp(diary->authority, authority)) {
+ hash_count = diary->entries->nelts;
+ hashes = apr_pcalloc(encoder.pool, hash_count);
+ for (i = 0; i < hash_count; ++i) {
+ hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash
+ >> encoder.delta_bits);
+ }
+
+ qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64);
+ for (i = 0; i < hash_count; ++i) {
+ if (!i || (hashes[i] != hashes[i-1])) {
+ gset_encode_next(&encoder, hashes[i]);
+ }
+ }
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: golomb compressed hashes, %d bytes",
+ (int)encoder.offset + 1);
+ }
+ *pdata = (const char *)encoder.data;
+ *plen = encoder.offset + 1;
+
+ return APR_SUCCESS;
+}
+
diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h
new file mode 100644
index 0000000..947b73b
--- /dev/null
+++ b/modules/http2/h2_push.h
@@ -0,0 +1,158 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_push__
+#define __mod_h2__h2_push__
+
+#include <http_protocol.h>
+
+#include "h2.h"
+#include "h2_headers.h"
+
+struct h2_request;
+struct h2_ngheader;
+struct h2_session;
+struct h2_stream;
+
+typedef struct h2_push {
+ const struct h2_request *req;
+ h2_priority *priority;
+} h2_push;
+
+typedef enum {
+ H2_PUSH_DIGEST_APR_HASH,
+ H2_PUSH_DIGEST_SHA256
+} h2_push_digest_type;
+
+/*******************************************************************************
+ * push diary
+ *
+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
+ * connection. It records a hash value from the absolute URL of the resource
+ * pushed.
+ * - Lacking openssl,
+ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it
+ * falls back to apr_hashfunc_default()
+ * - whatever the method to generate the hash, the diary keeps a maximum of 64
+ * bits per hash, limiting the memory consumption to about
+ * H2PushDiarySize * 8
+ * bytes. Entries are sorted by most recently used and oldest entries are
+ * forgotten first.
+ * - While useful by itself to avoid duplicated PUSHes on the same connection,
+ * the original idea was that clients provided a 'Cache-Digest' header with
+ * the values of *their own* cached resources. This was described in
+ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
+ * and some subsequent revisions that tweaked values but kept the overall idea.
+ * - The draft was abandoned by the IETF http-wg, as support from major clients,
+ * e.g. browsers, was lacking for various reasons.
+ * - For these reasons, mod_h2 abandoned its support for client supplied values
+ * but keeps the diary. It seems to provide value for applications using PUSH,
+ * is configurable in size and defaults to a very moderate amount of memory
+ * used.
+ * - The cache digest header is a Golomb Coded Set of hash values, but it may
+ * limit the amount of bits per hash value even further. For a good description
+ * of GCS, read here:
+ * <http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters>
+ ******************************************************************************/
+
+
+/*
+ * The push diary is based on the abandoned draft
+ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
+ * that describes how to use golomb filters.
+ */
+
+typedef struct h2_push_diary h2_push_diary;
+
+typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
+
+struct h2_push_diary {
+ apr_array_header_t *entries;
+ int NMax; /* Maximum for N, should size change be necessary */
+ int N; /* Current maximum number of entries, power of 2 */
+ apr_uint64_t mask; /* mask for relevant bits */
+ unsigned int mask_bits; /* number of relevant bits */
+ const char *authority;
+ h2_push_digest_type dtype;
+ h2_push_digest_calc *dcalc;
+};
+
+/**
+ * Determine the list of h2_push'es to send to the client on behalf of
+ * the given request/response pair.
+ *
+ * @param p the pool to use
+ * @param req the requst from the client
+ * @param res the response from the server
+ * @return array of h2_push addresses or NULL
+ */
+#if AP_HAS_RESPONSE_BUCKETS
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const ap_bucket_response *res);
+#else
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const struct h2_headers *res);
+#endif
+
+/**
+ * Create a new push diary for the given maximum number of entries.
+ *
+ * @param p the pool to use
+ * @param N the max number of entries, rounded up to 2^x
+ * @return the created diary, might be NULL of max_entries is 0
+ */
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N);
+
+/**
+ * Filters the given pushes against the diary and returns only those pushes
+ * that were newly entered in the diary.
+ */
+apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_header_t *pushes);
+
+/**
+ * Collect pushes for the given request/response pair, enter them into the
+ * diary and return those pushes newly entered.
+ */
+#if AP_HAS_RESPONSE_BUCKETS
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const ap_bucket_response *res);
+#else
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_headers *res);
+#endif
+
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param authority the authority to get the data for, use NULL/"*" for all
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
+ int maxP, const char *authority,
+ const char **pdata, apr_size_t *plen);
+
+#endif /* defined(__mod_h2__h2_push__) */
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
new file mode 100644
index 0000000..20e94cd
--- /dev/null
+++ b/modules/http2/h2_request.c
@@ -0,0 +1,519 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_strmatch.h"
+
+#include <ap_mmn.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_ssl.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mpm.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2_config.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_util.h"
+
+
+h2_request *h2_request_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+
+ return req;
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+ apr_status_t status;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ int was_added;
+ h2_req_add_header(x->headers, x->pool, key, strlen(key), value, strlen(value), 0, &was_added);
+ return 1;
+}
+
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+ request_rec *r)
+{
+ h2_request *req;
+ const char *scheme, *authority, *path;
+ h1_ctx x;
+
+ *preq = NULL;
+ scheme = apr_pstrdup(pool, r->parsed_uri.scheme? r->parsed_uri.scheme
+ : ap_http_scheme(r));
+ authority = apr_pstrdup(pool, r->hostname);
+ path = apr_uri_unparse(pool, &r->parsed_uri, APR_URI_UNP_OMITSITEPART);
+
+ if (!r->method || !scheme || !r->hostname || !path) {
+ return APR_EINVAL;
+ }
+
+ /* The authority we carry in h2_request is the 'authority' part of
+ * the URL for the request. r->hostname has stripped any port info that
+ * might have been present. Do we need to add it?
+ */
+ if (!ap_strchr_c(authority, ':')) {
+ if (r->parsed_uri.port_str) {
+ /* Yes, it was there, add it again. */
+ authority = apr_pstrcat(pool, authority, ":", r->parsed_uri.port_str, NULL);
+ }
+ else if (!r->parsed_uri.hostname && r->server && r->server->port) {
+ /* If there was no hostname in the parsed URL, the URL was relative.
+ * In that case, we restore port from our server->port, if it
+ * is known and not the default port for the scheme. */
+ apr_port_t defport = apr_uri_port_of_scheme(scheme);
+ if (defport != r->server->port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(pool, "%s:%d", authority,
+ (int)r->server->port);
+ }
+ }
+ }
+
+ req = apr_pcalloc(pool, sizeof(*req));
+ req->method = apr_pstrdup(pool, r->method);
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = apr_table_make(pool, 10);
+ req->http_status = H2_HTTP_STATUS_UNSET;
+
+ x.pool = pool;
+ x.headers = req->headers;
+ x.status = APR_SUCCESS;
+ apr_table_do(set_h1_header, &x, r->headers_in, NULL);
+
+ *preq = req;
+ return x.status;
+}
+
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen,
+ size_t max_field_len, int *pwas_added)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ *pwas_added = 0;
+ if (nlen <= 0) {
+ return status;
+ }
+
+ if (name[0] == ':') {
+ /* pseudo header, see ch. 8.1.2.3, always should come first */
+ if (!apr_is_empty_table(req->headers)) {
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,
+ APLOGNO(02917)
+ "h2_request: pseudo header after request start");
+ return APR_EGENERAL;
+ }
+
+ if (H2_HEADER_METHOD_LEN == nlen
+ && !strncmp(H2_HEADER_METHOD, name, nlen)) {
+ req->method = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_SCHEME_LEN == nlen
+ && !strncmp(H2_HEADER_SCHEME, name, nlen)) {
+ req->scheme = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_PATH_LEN == nlen
+ && !strncmp(H2_HEADER_PATH, name, nlen)) {
+ req->path = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_AUTH_LEN == nlen
+ && !strncmp(H2_HEADER_AUTH, name, nlen)) {
+ req->authority = apr_pstrndup(pool, value, vlen);
+ }
+ else {
+ char buffer[32];
+ memset(buffer, 0, 32);
+ strncpy(buffer, name, (nlen > 31)? 31 : nlen);
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool,
+ APLOGNO(02954)
+ "h2_request: ignoring unknown pseudo header %s",
+ buffer);
+ }
+ }
+ else {
+ /* non-pseudo header, add to table */
+ status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen,
+ max_field_len, pwas_added);
+ }
+
+ return status;
+}
+
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+ size_t raw_bytes)
+{
+ /* rfc7540, ch. 8.1.2.3: without :authority, Host: must be there */
+ if (req->authority && !strlen(req->authority)) {
+ req->authority = NULL;
+ }
+ if (!req->authority) {
+ const char *host = apr_table_get(req->headers, "Host");
+ if (!host) {
+ return APR_BADARG;
+ }
+ req->authority = host;
+ }
+ else {
+ apr_table_setn(req->headers, "Host", req->authority);
+ }
+ req->raw_bytes += raw_bytes;
+
+ return APR_SUCCESS;
+}
+
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
+{
+ h2_request *dst = apr_pmemdup(p, src, sizeof(*dst));
+ dst->method = apr_pstrdup(p, src->method);
+ dst->scheme = apr_pstrdup(p, src->scheme);
+ dst->authority = apr_pstrdup(p, src->authority);
+ dst->path = apr_pstrdup(p, src->path);
+ dst->headers = apr_table_clone(p, src->headers);
+ return dst;
+}
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20120211, 106)
+static request_rec *my_ap_create_request(conn_rec *c)
+{
+ apr_pool_t *p;
+ request_rec *r;
+
+ apr_pool_create(&p, c->pool);
+ apr_pool_tag(p, "request");
+ r = apr_pcalloc(p, sizeof(request_rec));
+ AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)c);
+ r->pool = p;
+ r->connection = c;
+ r->server = c->base_server;
+
+ r->user = NULL;
+ r->ap_auth_type = NULL;
+
+ r->allowed_methods = ap_make_method_list(p, 2);
+
+ r->headers_in = apr_table_make(r->pool, 5);
+ r->trailers_in = apr_table_make(r->pool, 5);
+ r->subprocess_env = apr_table_make(r->pool, 25);
+ r->headers_out = apr_table_make(r->pool, 12);
+ r->err_headers_out = apr_table_make(r->pool, 5);
+ r->trailers_out = apr_table_make(r->pool, 5);
+ r->notes = apr_table_make(r->pool, 5);
+
+ r->request_config = ap_create_request_config(r->pool);
+ /* Must be set before we run create request hook */
+
+ r->proto_output_filters = c->output_filters;
+ r->output_filters = r->proto_output_filters;
+ r->proto_input_filters = c->input_filters;
+ r->input_filters = r->proto_input_filters;
+ ap_run_create_request(r);
+ r->per_dir_config = r->server->lookup_defaults;
+
+ r->sent_bodyct = 0; /* bytect isn't for body */
+
+ r->read_length = 0;
+ r->read_body = REQUEST_NO_BODY;
+
+ r->status = HTTP_OK; /* Until further notice */
+ r->header_only = 0;
+ r->the_request = NULL;
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+ r->useragent_addr = c->client_addr;
+ r->useragent_ip = c->client_ip;
+ return r;
+}
+#endif
+
+#if AP_HAS_RESPONSE_BUCKETS
+apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
+{
+ conn_rec *c = r->connection;
+ apr_table_t *headers = apr_table_clone(r->pool, req->headers);
+ const char *uri = req->path;
+
+ AP_DEBUG_ASSERT(req->authority);
+ if (req->scheme && (ap_cstr_casecmp(req->scheme,
+ ap_ssl_conn_is_ssl(c->master? c->master : c)? "https" : "http")
+ || !ap_cstr_casecmp("CONNECT", req->method))) {
+ /* Client sent a non-matching ':scheme' pseudo header or CONNECT.
+ * In this case, we use an absolute URI.
+ */
+ uri = apr_psprintf(r->pool, "%s://%s%s",
+ req->scheme, req->authority, req->path ? req->path : "");
+ }
+
+ return ap_bucket_request_create(req->method, uri, "HTTP/2.0", headers,
+ r->pool, c->bucket_alloc);
+}
+#endif
+
+static void assign_headers(request_rec *r, const h2_request *req,
+ int no_body)
+{
+ const char *cl;
+
+ r->headers_in = apr_table_clone(r->pool, req->headers);
+ if (req->authority) {
+ /* for internal handling, we have to simulate that :authority
+ * came in as Host:, RFC 9113 ch. says that mismatches between
+ * :authority and Host: SHOULD be rejected as malformed. However,
+ * we are more lenient and just replace any Host: if we have
+ * an :authority.
+ */
+ const char *orig_host = apr_table_get(req->headers, "Host");
+ if (orig_host && strcmp(req->authority, orig_host)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10401)
+ "overwriting 'Host: %s' with :authority: %s'",
+ orig_host, req->authority);
+ apr_table_setn(r->subprocess_env, "H2_ORIGINAL_HOST", orig_host);
+ }
+ apr_table_setn(r->headers_in, "Host", req->authority);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "set 'Host: %s' from :authority", req->authority);
+ }
+
+ cl = apr_table_get(req->headers, "Content-Length");
+ if (no_body) {
+ if (!cl && apr_table_get(req->headers, "Content-Type")) {
+ /* If we have a content-type, but already seen eos, no more
+ * data will come. Signal a zero content length explicitly.
+ */
+ apr_table_setn(req->headers, "Content-Length", "0");
+ }
+ }
+#if !AP_HAS_RESPONSE_BUCKETS
+ else if (!cl) {
+ /* there may be a body and we have internal HTTP/1.1 processing.
+ * If the Content-Length is unspecified, we MUST simulate
+ * chunked Transfer-Encoding.
+ *
+ * HTTP/2 does not need a Content-Length for framing. Ideally
+ * all clients set the EOS flag on the header frame if they
+ * do not intent to send a body. However, forwarding proxies
+ * might just no know at the time and send an empty DATA
+ * frame with EOS much later.
+ */
+ apr_table_mergen(r->headers_in, "Transfer-Encoding", "chunked");
+ }
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+}
+
+request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
+ int no_body)
+{
+ int access_status = HTTP_OK;
+
+#if AP_MODULE_MAGIC_AT_LEAST(20120211, 106)
+ request_rec *r = ap_create_request(c);
+#else
+ request_rec *r = my_ap_create_request(c);
+#endif
+
+#if AP_MODULE_MAGIC_AT_LEAST(20120211, 107)
+ assign_headers(r, req, no_body);
+ ap_run_pre_read_request(r, c);
+
+ /* Time to populate r with the data we have. */
+ r->request_time = req->request_time;
+ AP_DEBUG_ASSERT(req->authority);
+ if (req->scheme && (ap_cstr_casecmp(req->scheme,
+ ap_ssl_conn_is_ssl(c->master? c->master : c)? "https" : "http")
+ || !ap_cstr_casecmp("CONNECT", req->method))) {
+ /* Client sent a non-matching ':scheme' pseudo header. Forward this
+ * via an absolute URI in the request line.
+ */
+ r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0",
+ req->method, req->scheme, req->authority,
+ req->path ? req->path : "");
+ }
+ else if (req->path) {
+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+ req->method, req->path);
+ }
+ else {
+ /* We should only come here on a request that is errored already.
+ * create a request line that passes parsing, we'll die anyway.
+ */
+ AP_DEBUG_ASSERT(req->http_status != H2_HTTP_STATUS_UNSET);
+ r->the_request = apr_psprintf(r->pool, "%s / HTTP/2.0", req->method);
+ }
+
+ /* Start with r->hostname = NULL, ap_check_request_header() will get it
+ * form Host: header, otherwise we get complains about port numbers.
+ */
+ r->hostname = NULL;
+
+ /* Validate HTTP/1 request and select vhost. */
+ if (!ap_parse_request_line(r) || !ap_check_request_header(r)) {
+ /* we may have switched to another server still */
+ r->per_dir_config = r->server->lookup_defaults;
+ if (req->http_status != H2_HTTP_STATUS_UNSET) {
+ access_status = req->http_status;
+ /* Be safe and close the connection */
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ access_status = r->status;
+ }
+ r->status = HTTP_OK;
+ goto die;
+ }
+#else
+ {
+ const char *s;
+
+ assign_headers(r, req, no_body);
+ ap_run_pre_read_request(r, c);
+
+ /* Time to populate r with the data we have. */
+ r->request_time = req->request_time;
+ r->method = apr_pstrdup(r->pool, req->method);
+ /* Provide quick information about the request method as soon as known */
+ r->method_number = ap_method_number_of(r->method);
+ if (r->method_number == M_GET && r->method[0] == 'H') {
+ r->header_only = 1;
+ }
+ ap_parse_uri(r, req->path ? req->path : "");
+ r->protocol = (char*)"HTTP/2.0";
+ r->proto_num = HTTP_VERSION(2, 0);
+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+ r->method, req->path ? req->path : "");
+
+ /* Start with r->hostname = NULL, ap_check_request_header() will get it
+ * form Host: header, otherwise we get complains about port numbers.
+ */
+ r->hostname = NULL;
+ ap_update_vhost_from_headers(r);
+
+ /* we may have switched to another server */
+ r->per_dir_config = r->server->lookup_defaults;
+
+ s = apr_table_get(r->headers_in, "Expect");
+ if (s && s[0]) {
+ if (ap_cstr_casecmp(s, "100-continue") == 0) {
+ r->expecting_100 = 1;
+ }
+ else {
+ r->status = HTTP_EXPECTATION_FAILED;
+ access_status = r->status;
+ goto die;
+ }
+ }
+ }
+#endif
+
+ /* we may have switched to another server */
+ r->per_dir_config = r->server->lookup_defaults;
+
+ if (req->http_status != H2_HTTP_STATUS_UNSET) {
+ access_status = req->http_status;
+ r->status = HTTP_OK;
+ /* Be safe and close the connection */
+ c->keepalive = AP_CONN_CLOSE;
+ goto die;
+ }
+
+ /*
+ * Add the HTTP_IN filter here to ensure that ap_discard_request_body
+ * called by ap_die and by ap_send_error_response works correctly on
+ * status codes that do not cause the connection to be dropped and
+ * in situations where the connection should be kept alive.
+ */
+ ap_add_input_filter_handle(ap_http_input_filter_handle,
+ NULL, r, r->connection);
+
+ if ((access_status = ap_post_read_request(r))) {
+ /* Request check post hooks failed. An example of this would be a
+ * request for a vhost where h2 is disabled --> 421.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03367)
+ "h2_request: access_status=%d, request_create failed",
+ access_status);
+ goto die;
+ }
+
+ AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
+ (char *)r->uri, (char *)r->server->defn_name,
+ r->status);
+ return r;
+
+die:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "ap_die(%d) for %s", access_status, r->the_request);
+ ap_die(access_status, r);
+
+ /* ap_die() sent the response through the output filters, we must now
+ * end the request with an EOR bucket for stream/pipeline accounting.
+ */
+ {
+ apr_bucket_brigade *eor_bb;
+#if AP_MODULE_MAGIC_AT_LEAST(20180905, 1)
+ eor_bb = ap_acquire_brigade(c);
+ APR_BRIGADE_INSERT_TAIL(eor_bb,
+ ap_bucket_eor_create(c->bucket_alloc, r));
+ ap_pass_brigade(c->output_filters, eor_bb);
+ ap_release_brigade(c, eor_bb);
+#else
+ eor_bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(eor_bb,
+ ap_bucket_eor_create(c->bucket_alloc, r));
+ ap_pass_brigade(c->output_filters, eor_bb);
+ apr_brigade_destroy(eor_bb);
+#endif
+ }
+
+ r = NULL;
+ AP_READ_REQUEST_FAILURE((uintptr_t)r);
+ return NULL;
+}
diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h
new file mode 100644
index 0000000..7e20b69
--- /dev/null
+++ b/modules/http2/h2_request.h
@@ -0,0 +1,59 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_request__
+#define __mod_h2__h2_request__
+
+#include "h2.h"
+
+h2_request *h2_request_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header);
+
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+ request_rec *r);
+
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen,
+ size_t max_field_len, int *pwas_added);
+
+apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+ size_t raw_bytes);
+
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
+
+/**
+ * Create a request_rec representing the h2_request to be
+ * processed on the given connection.
+ *
+ * @param req the h2 request to process
+ * @param conn the connection to process the request on
+ * @param no_body != 0 iff the request is known to have no body
+ * @return the request_rec representing the request
+ */
+request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn,
+ int no_body);
+
+#if AP_HAS_RESPONSE_BUCKETS
+apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r);
+#endif
+
+#endif /* defined(__mod_h2__h2_request__) */
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
new file mode 100644
index 0000000..7ba49cf
--- /dev/null
+++ b/modules/http2/h2_session.c
@@ -0,0 +1,1991 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <apr_thread_cond.h>
+#include <apr_atomic.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_protocol.h>
+#include <scoreboard.h>
+
+#include <mpm_common.h>
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h> /* for getpid() */
+#endif
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_bucket_eos.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_protocol.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_stream.h"
+#include "h2_c2.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_version.h"
+#include "h2_workers.h"
+
+
+static void transit(h2_session *session, const char *action,
+ h2_session_state nstate);
+
+static void on_stream_state_enter(void *ctx, h2_stream *stream);
+static void on_stream_state_event(void *ctx, h2_stream *stream, h2_stream_event_t ev);
+static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev);
+
+static int h2_session_status_from_apr_status(apr_status_t rv)
+{
+ if (rv == APR_SUCCESS) {
+ return NGHTTP2_NO_ERROR;
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ else if (APR_STATUS_IS_EOF(rv)) {
+ return NGHTTP2_ERR_EOF;
+ }
+ return NGHTTP2_ERR_PROTO;
+}
+
+static h2_stream *get_stream(h2_session *session, int stream_id)
+{
+ return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+}
+
+void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg)
+{
+ h2_session_dispatch_event(session, ev, err, msg);
+}
+
+static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
+{
+ int unprocessed = (!h2_stream_is_at_or_past(stream, H2_SS_CLOSED)
+ && (H2_STREAM_CLIENT_INITIATED(stream->id)?
+ (!stream->session->local.accepting
+ && stream->id > stream->session->local.accepted_max)
+ :
+ (!stream->session->remote.accepting
+ && stream->id > stream->session->remote.accepted_max))
+ );
+ if (unprocessed) {
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ return 0;
+ }
+ return 1;
+}
+
+static void cleanup_unprocessed_streams(h2_session *session)
+{
+ h2_mplx_c1_streams_do(session->mplx, rst_unprocessed_stream, session);
+}
+
+static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on)
+{
+ h2_stream * stream;
+ apr_pool_t *stream_pool;
+
+ apr_pool_create(&stream_pool, session->pool);
+ apr_pool_tag(stream_pool, "h2_stream");
+
+ stream = h2_stream_create(stream_id, stream_pool, session,
+ session->monitor, initiated_on);
+ if (stream) {
+ nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
+ }
+ return stream;
+}
+
+/**
+ * Determine the priority order of streams.
+ * - if both stream depend on the same one, compare weights
+ * - if one stream is closer to the root, prioritize that one
+ * - if both are on the same level, use the weight of their root
+ * level ancestors
+ */
+static int spri_cmp(int sid1, nghttp2_stream *s1,
+ int sid2, nghttp2_stream *s2, h2_session *session)
+{
+ nghttp2_stream *p1, *p2;
+
+ p1 = nghttp2_stream_get_parent(s1);
+ p2 = nghttp2_stream_get_parent(s2);
+
+ if (p1 == p2) {
+ int32_t w1, w2;
+
+ w1 = nghttp2_stream_get_weight(s1);
+ w2 = nghttp2_stream_get_weight(s2);
+ return w2 - w1;
+ }
+ else if (!p1) {
+ /* stream 1 closer to root */
+ return -1;
+ }
+ else if (!p2) {
+ /* stream 2 closer to root */
+ return 1;
+ }
+ return spri_cmp(sid1, p1, sid2, p2, session);
+}
+
+static int stream_pri_cmp(int sid1, int sid2, void *ctx)
+{
+ h2_session *session = ctx;
+ nghttp2_stream *s1, *s2;
+
+ s1 = nghttp2_session_find_stream(session->ngh2, sid1);
+ s2 = nghttp2_session_find_stream(session->ngh2, sid2);
+
+ if (s1 == s2) {
+ return 0;
+ }
+ else if (!s1) {
+ return 1;
+ }
+ else if (!s2) {
+ return -1;
+ }
+ return spri_cmp(sid1, s1, sid2, s2, session);
+}
+
+/*
+ * Callback when nghttp2 wants to send bytes back to the client.
+ */
+static ssize_t send_cb(nghttp2_session *ngh2,
+ const uint8_t *data, size_t length,
+ int flags, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t rv;
+ (void)ngh2;
+ (void)flags;
+
+ if (h2_c1_io_needs_flush(&session->io)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
+ rv = h2_c1_io_add_data(&session->io, (const char *)data, length);
+ if (APR_SUCCESS == rv) {
+ return length;
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, session->c1,
+ APLOGNO(03062) "h2_session: send error");
+ return h2_session_status_from_apr_status(rv);
+ }
+}
+
+static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ int error, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ (void)ngh2;
+
+ if (APLOGcdebug(session->c1)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03063), session,
+ "recv invalid FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ return 0;
+}
+
+static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id,
+ const uint8_t *data, size_t len, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t status = APR_EINVAL;
+ h2_stream * stream;
+ int rv = 0;
+
+ stream = get_stream(session, stream_id);
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_STRM_MSG(session, stream_id, "write %ld bytes of DATA"),
+ (long)len);
+ status = h2_stream_recv_DATA(stream, flags, data, len);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03064)
+ H2_SSSN_STRM_MSG(session, stream_id,
+ "on_data_chunk for unknown stream"));
+ rv = NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (status != APR_SUCCESS) {
+ /* count this as consumed explicitly as no one will read it */
+ nghttp2_session_consume(session->ngh2, stream_id, len);
+ }
+ return rv;
+}
+
+static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *stream;
+
+ (void)ngh2;
+ stream = get_stream(session, stream_id);
+ if (stream) {
+ if (error_code) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(03065), stream,
+ "closing with err=%d %s"),
+ (int)error_code, h2_protocol_err_description(error_code));
+ h2_stream_rst(stream, error_code);
+ }
+ }
+ return 0;
+}
+
+static int on_begin_headers_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *s = NULL;
+
+ /* We may see HEADERs at the start of a stream or after all DATA
+ * streams to carry trailers. */
+ (void)ngh2;
+ s = get_stream(session, frame->hd.stream_id);
+ if (s) {
+ /* nop */
+ }
+ else if (session->local.accepting) {
+ s = h2_session_open_stream(userp, frame->hd.stream_id, 0);
+ }
+ return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
+}
+
+static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags,
+ void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream * stream;
+ apr_status_t status;
+
+ (void)flags;
+ stream = get_stream(session, frame->hd.stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(02920)
+ H2_SSSN_STRM_MSG(session, frame->hd.stream_id,
+ "on_header unknown stream"));
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_add_header(stream, (const char *)name, namelen,
+ (const char *)value, valuelen);
+ if (status != APR_SUCCESS
+ && (!stream->rtmp
+ || stream->rtmp->http_status == H2_HTTP_STATUS_UNSET)) {
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+ return 0;
+}
+
+/**
+ * nghttp2 session has received a complete frame. Most are used by nghttp2
+ * for processing of internal state. Some, like HEADER and DATA frames,
+ * we need to act on.
+ */
+static int on_frame_recv_cb(nghttp2_session *ng2s,
+ const nghttp2_frame *frame,
+ void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *stream;
+ apr_status_t rv = APR_SUCCESS;
+
+ stream = frame->hd.stream_id? get_stream(session, frame->hd.stream_id) : NULL;
+ if (APLOGcdebug(session->c1)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10302), stream,
+ "recv FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03066), session,
+ "recv FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ }
+
+ ++session->frames_received;
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ /* This can be HEADERS for a new stream, defining the request,
+ * or HEADER may come after DATA at the end of a stream as in
+ * trailers */
+ if (stream) {
+ rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
+ break;
+ case NGHTTP2_DATA:
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(02923), stream,
+ "DATA, len=%ld, flags=%d"),
+ (long)frame->hd.length, frame->hd.flags);
+ rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
+ break;
+ case NGHTTP2_PRIORITY:
+ session->reprioritize = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_STRM_MSG(session, frame->hd.stream_id, "PRIORITY frame "
+ " weight=%d, dependsOn=%d, exclusive=%d"),
+ frame->priority.pri_spec.weight,
+ frame->priority.pri_spec.stream_id,
+ frame->priority.pri_spec.exclusive);
+ break;
+ case NGHTTP2_WINDOW_UPDATE:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_STRM_MSG(session, frame->hd.stream_id,
+ "WINDOW_UPDATE incr=%d"),
+ frame->window_update.window_size_increment);
+ break;
+ case NGHTTP2_RST_STREAM:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03067)
+ H2_SSSN_STRM_MSG(session, frame->hd.stream_id,
+ "RST_STREAM by client, error=%d"),
+ (int)frame->rst_stream.error_code);
+ if (stream && stream->initiated_on) {
+ /* A stream reset on a request we sent it. Normal, when the
+ * client does not want it. */
+ ++session->pushes_reset;
+ }
+ else {
+ /* A stream reset on a request it sent us. Could happen in a browser
+ * when the user navigates away or cancels loading - maybe. */
+ h2_mplx_c1_client_rst(session->mplx, frame->hd.stream_id);
+ }
+ ++session->streams_reset;
+ break;
+ case NGHTTP2_GOAWAY:
+ if (frame->goaway.error_code == 0
+ && frame->goaway.last_stream_id == ((1u << 31) - 1)) {
+ /* shutdown notice. Should not come from a client... */
+ session->remote.accepting = 0;
+ }
+ else {
+ session->remote.accepted_max = frame->goaway.last_stream_id;
+ h2_session_dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
+ frame->goaway.error_code, NULL);
+ }
+ break;
+ case NGHTTP2_SETTINGS:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length);
+ break;
+ default:
+ if (APLOGctrace2(session->c1)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer,
+ sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer);
+ }
+ break;
+ }
+
+ if (session->state == H2_SESSION_ST_IDLE) {
+ /* We received a frame, but session is in state IDLE. That means the frame
+ * did not really progress any of the (possibly) open streams. It was a meta
+ * frame, e.g. SETTINGS/WINDOW_UPDATE/unknown/etc.
+ * Remember: IDLE means we cannot send because either there are no streams open or
+ * all open streams are blocked on exhausted WINDOWs for outgoing data.
+ * The more frames we receive that do not change this, the less interested we
+ * become in serving this connection. This is expressed in increasing "idle_delays".
+ * Eventually, the connection will timeout and we'll close it. */
+ session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received);
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_MSG(session, "session has %ld idle frames"),
+ (long)session->idle_frames);
+ if (session->idle_frames > 10) {
+ apr_size_t busy_frames = H2MAX(session->frames_received - session->idle_frames, 1);
+ int idle_ratio = (int)(session->idle_frames / busy_frames);
+ if (idle_ratio > 100) {
+ session->idle_delay = apr_time_from_msec(H2MIN(1000, idle_ratio));
+ }
+ else if (idle_ratio > 10) {
+ session->idle_delay = apr_time_from_msec(10);
+ }
+ else if (idle_ratio > 1) {
+ session->idle_delay = apr_time_from_msec(1);
+ }
+ else {
+ session->idle_delay = 0;
+ }
+ }
+ }
+
+ if (APR_SUCCESS != rv) return NGHTTP2_ERR_PROTO;
+ return 0;
+}
+
+static char immortal_zeros[H2_MAX_PADLEN];
+
+static int on_send_data_cb(nghttp2_session *ngh2,
+ nghttp2_frame *frame,
+ const uint8_t *framehd,
+ size_t length,
+ nghttp2_data_source *source,
+ void *userp)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_session *session = (h2_session *)userp;
+ int stream_id = (int)frame->hd.stream_id;
+ unsigned char padlen;
+ int eos;
+ h2_stream *stream;
+ apr_bucket *b;
+ apr_off_t len = length;
+
+ (void)ngh2;
+ (void)source;
+ ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1));
+ padlen = (unsigned char)frame->data.padlen;
+
+ stream = get_stream(session, stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c1,
+ APLOGNO(02924)
+ H2_SSSN_STRM_MSG(session, stream_id, "send_data, stream not found"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
+ (long)length);
+
+ status = h2_c1_io_add_data(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN);
+ if (padlen && status == APR_SUCCESS) {
+ --padlen;
+ status = h2_c1_io_add_data(&session->io, (const char *)&padlen, 1);
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
+ H2_STRM_MSG(stream, "writing frame header"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
+ H2_STRM_MSG(stream, "send_data_cb, reading stream"));
+ apr_brigade_cleanup(session->bbtmp);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ else if (len != (apr_off_t)length) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
+ H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, "
+ "got %ld from stream"), (long)length, (long)len);
+ apr_brigade_cleanup(session->bbtmp);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (padlen) {
+ b = apr_bucket_immortal_create(immortal_zeros, padlen,
+ session->c1->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ }
+
+ status = h2_c1_io_append(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+
+ if (status == APR_SUCCESS) {
+ stream->out_data_frames++;
+ stream->out_data_octets += length;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "sent data length=%ld, total=%ld"),
+ (long)length, (long)stream->out_data_octets);
+ return 0;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
+ H2_STRM_LOG(APLOGNO(02925), stream, "failed send_data_cb"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+}
+
+static int on_frame_send_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_session *session = user_data;
+ h2_stream *stream;
+ int stream_id = frame->hd.stream_id;
+
+ ++session->frames_sent;
+ switch (frame->hd.type) {
+ case NGHTTP2_PUSH_PROMISE:
+ /* PUSH_PROMISE we report on the promised stream */
+ stream_id = frame->push_promise.promised_stream_id;
+ break;
+ default:
+ break;
+ }
+
+ stream = get_stream(session, stream_id);
+ if (APLOGcdebug(session->c1)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10303), stream,
+ "sent FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03068), session,
+ "sent FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ }
+
+ if (stream) {
+ h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
+ return 0;
+}
+
+#ifdef H2_NG2_INVALID_HEADER_CB
+static int on_invalid_header_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags, void *user_data)
+{
+ h2_session *session = user_data;
+ h2_stream *stream;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03456)
+ H2_SSSN_STRM_MSG(session, frame->hd.stream_id,
+ "invalid header '%s: %s'"),
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
+ stream = get_stream(session, frame->hd.stream_id);
+ if (stream) {
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ return 0;
+}
+#endif
+
+static ssize_t select_padding_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ size_t max_payloadlen, void *user_data)
+{
+ h2_session *session = user_data;
+ size_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
+ size_t padded_len = frame_len;
+
+ /* Determine # of padding bytes to append to frame. Unless session->padding_always
+ * the number my be capped by the ui.write_size that currently applies.
+ */
+ if (session->padding_max) {
+ int n = ap_random_pick(0, session->padding_max);
+ padded_len = H2MIN(max_payloadlen + H2_FRAME_HDR_LEN, frame_len + n);
+ }
+
+ if (padded_len != frame_len) {
+ if (!session->padding_always && session->io.write_size
+ && (padded_len > session->io.write_size)
+ && (frame_len <= session->io.write_size)) {
+ padded_len = session->io.write_size;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)",
+ (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN,
+ (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size);
+ return padded_len - H2_FRAME_HDR_LEN;
+ }
+ return frame->hd.length;
+}
+
+#define NGH2_SET_CALLBACK(callbacks, name, fn)\
+nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
+
+static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
+{
+ int rv = nghttp2_session_callbacks_new(pcb);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c,
+ APLOGNO(02926) "nghttp2_session_callbacks_new: %s",
+ nghttp2_strerror(rv));
+ return APR_EGENERAL;
+ }
+
+ NGH2_SET_CALLBACK(*pcb, send, send_cb);
+ NGH2_SET_CALLBACK(*pcb, on_frame_recv, on_frame_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_invalid_frame_recv, on_invalid_frame_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_data_chunk_recv, on_data_chunk_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb);
+ NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb);
+ NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
+ NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
+ NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb);
+#ifdef H2_NG2_INVALID_HEADER_CB
+ NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
+#endif
+ NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb);
+ return APR_SUCCESS;
+}
+
+static void update_child_status(h2_session *session, int status,
+ const char *msg, const h2_stream *stream)
+{
+ /* Assume that we also change code/msg when something really happened and
+ * avoid updating the scoreboard in between */
+ if (session->last_status_code != status
+ || session->last_status_msg != msg) {
+ char sbuffer[1024];
+ sbuffer[0] = '\0';
+ if (stream) {
+ apr_snprintf(sbuffer, sizeof(sbuffer),
+ ": stream %d, %s %s",
+ stream->id,
+ stream->request? stream->request->method : "",
+ stream->request? stream->request->path : "");
+ }
+ apr_snprintf(session->status, sizeof(session->status),
+ "[%d/%d] %s%s",
+ (int)(session->remote.emitted_count + session->pushes_submitted),
+ (int)session->streams_done,
+ msg? msg : "-", sbuffer);
+ ap_update_child_status_from_server(session->c1->sbh, status,
+ session->c1, session->s);
+ ap_update_child_status_descr(session->c1->sbh, status, session->status);
+ }
+}
+
+static apr_status_t h2_session_shutdown_notice(h2_session *session)
+{
+ apr_status_t status;
+
+ ap_assert(session);
+ if (!session->local.accepting) {
+ return APR_SUCCESS;
+ }
+
+ nghttp2_submit_shutdown_notice(session->ngh2);
+ session->local.accepting = 0;
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_c1_io_assure_flushed(&session->io);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03457), session, "sent shutdown notice"));
+ return status;
+}
+
+static apr_status_t h2_session_shutdown(h2_session *session, int error,
+ const char *msg, int force_close)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ ap_assert(session);
+ if (session->local.shutdown) {
+ return APR_SUCCESS;
+ }
+
+ if (error && !msg) {
+ if (APR_STATUS_IS_EPIPE(error)) {
+ msg = "remote close";
+ }
+ }
+
+ if (error || force_close) {
+ /* not a graceful shutdown, we want to leave...
+ * Do not start further streams that are waiting to be scheduled.
+ * Find out the max stream id that we habe been processed or
+ * are still actively working on.
+ * Remove all streams greater than this number without submitting
+ * a RST_STREAM frame, since that should be clear from the GOAWAY
+ * we send. */
+ session->local.accepted_max = h2_mplx_c1_shutdown(session->mplx);
+ session->local.error = error;
+ session->local.error_msg = msg;
+ }
+ else {
+ /* graceful shutdown. we will continue processing all streams
+ * we have, but no longer accept new ones. Report the max stream
+ * we have received and discard all new ones. */
+ }
+
+ session->local.accepting = 0;
+ session->local.shutdown = 1;
+ if (!session->c1->aborted) {
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
+ session->local.accepted_max,
+ error, (uint8_t*)msg, msg? strlen(msg):0);
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_c1_io_assure_flushed(&session->io);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03069), session,
+ "sent GOAWAY, err=%d, msg=%s"), error, msg? msg : "");
+ }
+ h2_session_dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
+ return status;
+}
+
+static apr_status_t session_cleanup(h2_session *session, const char *trigger)
+{
+ conn_rec *c = session->c1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_SSSN_MSG(session, "pool_cleanup"));
+
+ if (session->state != H2_SESSION_ST_DONE
+ && session->state != H2_SESSION_ST_INIT) {
+ /* Not good. The connection is being torn down and we have
+ * not sent a goaway. This is considered a protocol error and
+ * the client has to assume that any streams "in flight" may have
+ * been processed and are not safe to retry.
+ * As clients with idle connection may only learn about a closed
+ * connection when sending the next request, this has the effect
+ * that at least this one request will fail.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ H2_SSSN_LOG(APLOGNO(03199), session,
+ "connection disappeared without proper "
+ "goodbye, clients will be confused, should not happen"));
+ }
+
+ transit(session, trigger, H2_SESSION_ST_CLEANUP);
+ h2_mplx_c1_destroy(session->mplx);
+ session->mplx = NULL;
+
+ ap_assert(session->ngh2);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ h2_conn_ctx_detach(c);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t session_pool_cleanup(void *data)
+{
+ conn_rec *c = data;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+ h2_session *session = conn_ctx? conn_ctx->session : NULL;
+
+ if (session) {
+ int mpm_state = 0;
+ int level;
+
+ ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state);
+ level = (AP_MPMQ_STOPPING == mpm_state)? APLOG_DEBUG : APLOG_WARNING;
+ /* if the session is still there, now is the last chance
+ * to perform cleanup. Normally, cleanup should have happened
+ * earlier in the connection pre_close.
+ * However, when the server is stopping, it may shutdown connections
+ * without running the pre_close hooks. Do not want about that. */
+ ap_log_cerror(APLOG_MARK, level, 0, c,
+ H2_SSSN_LOG(APLOGNO(10020), session,
+ "session cleanup triggered by pool cleanup. "
+ "this should have happened earlier already."));
+ return session_cleanup(session, "pool cleanup");
+ }
+ return APR_SUCCESS;
+}
+
+static /* atomic */ apr_uint32_t next_id;
+
+apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *r,
+ server_rec *s, h2_workers *workers)
+{
+ nghttp2_session_callbacks *callbacks = NULL;
+ nghttp2_option *options = NULL;
+ uint32_t n;
+ int thread_num;
+ apr_pool_t *pool = NULL;
+ h2_session *session;
+ h2_stream *stream0;
+ apr_status_t status;
+ int rv;
+
+ *psession = NULL;
+ apr_pool_create(&pool, c->pool);
+ apr_pool_tag(pool, "h2_session");
+ session = apr_pcalloc(pool, sizeof(h2_session));
+ if (!session) {
+ return APR_ENOMEM;
+ }
+
+ *psession = session;
+ /* c->id does not give a unique id for the lifetime of the session.
+ * mpms like event change c->id when re-activating a keepalive
+ * connection based on the child_num+thread_num of the worker
+ * processing it.
+ * We'd like to have an id that remains constant and unique bc
+ * h2 streams can live through keepalive periods. While double id
+ * will not lead to processing failures, it will confuse log analysis.
+ */
+#if AP_MODULE_MAGIC_AT_LEAST(20211221, 8)
+ ap_sb_get_child_thread(c->sbh, &session->child_num, &thread_num);
+#else
+ (void)thread_num;
+ session->child_num = (int)getpid();
+#endif
+ session->id = apr_atomic_inc32(&next_id);
+ session->c1 = c;
+ session->r = r;
+ session->s = s;
+ session->pool = pool;
+ session->workers = workers;
+
+ session->state = H2_SESSION_ST_INIT;
+ session->local.accepting = 1;
+ session->remote.accepting = 1;
+
+ session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
+ session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
+
+ session->out_c1_blocked = h2_iq_create(session->pool, (int)session->max_stream_count);
+ session->ready_to_process = h2_iq_create(session->pool, (int)session->max_stream_count);
+
+ session->monitor = apr_pcalloc(pool, sizeof(h2_stream_monitor));
+ session->monitor->ctx = session;
+ session->monitor->on_state_enter = on_stream_state_enter;
+ session->monitor->on_state_event = on_stream_state_event;
+ session->monitor->on_event = on_stream_event;
+
+ stream0 = h2_stream_create(0, session->pool, session, NULL, 0);
+ stream0->c2 = session->c1; /* stream0's connection is the main connection */
+ session->mplx = h2_mplx_c1_create(session->child_num, session->id,
+ stream0, s, session->pool, workers);
+ if (!session->mplx) {
+ apr_pool_destroy(pool);
+ return APR_ENOTIMPL;
+ }
+
+ h2_c1_io_init(&session->io, session);
+ session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS);
+ if (session->padding_max) {
+ session->padding_max = (0x01 << session->padding_max) - 1;
+ }
+ session->padding_always = h2_config_sgeti(s, H2_CONF_PADDING_ALWAYS);
+ session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
+
+ status = init_callbacks(c, &callbacks);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
+ "nghttp2: error in init_callbacks");
+ apr_pool_destroy(pool);
+ return status;
+ }
+
+ rv = nghttp2_option_new(&options);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02928) "nghttp2_option_new: %s",
+ nghttp2_strerror(rv));
+ apr_pool_destroy(pool);
+ return status;
+ }
+ nghttp2_option_set_peer_max_concurrent_streams(options, (uint32_t)session->max_stream_count);
+ /* We need to handle window updates ourself, otherwise we
+ * get flooded by nghttp2. */
+ nghttp2_option_set_no_auto_window_update(options, 1);
+#ifdef H2_NG2_NO_CLOSED_STREAMS
+ /* We do not want nghttp2 to keep information about closed streams as
+ * that accumulates memory on long connections. This makes PRIORITY
+ * setting in relation to older streams non-working. */
+ nghttp2_option_set_no_closed_streams(options, 1);
+#endif
+#ifdef H2_NG2_RFC9113_STRICTNESS
+ /* nghttp2 v1.50.0 introduces the strictness checks on leading/trailing
+ * whitespace of RFC 9113 for fields. But, by default, it RST streams
+ * carrying such. We do not want that. We want to strip the ws and
+ * handle them, just like the HTTP/1.1 parser does. */
+ nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation(options, 1);
+#endif
+ rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
+ session, options);
+ nghttp2_session_callbacks_del(callbacks);
+ nghttp2_option_del(options);
+
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02929) "nghttp2_session_server_new: %s",
+ nghttp2_strerror(rv));
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+
+ n = h2_config_sgeti(s, H2_CONF_PUSH_DIARY_SIZE);
+ session->push_diary = h2_push_diary_create(session->pool, n);
+
+ if (APLOGcdebug(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ H2_SSSN_LOG(APLOGNO(03200), session,
+ "created, max_streams=%d, stream_mem=%d, "
+ "workers_limit=%d, workers_max=%d, "
+ "push_diary(type=%d,N=%d)"),
+ (int)session->max_stream_count,
+ (int)session->max_stream_mem,
+ session->mplx->processing_limit,
+ session->mplx->processing_max,
+ session->push_diary->dtype,
+ (int)session->push_diary->N);
+ }
+
+ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t h2_session_start(h2_session *session, int *rv)
+{
+ apr_status_t status = APR_SUCCESS;
+ nghttp2_settings_entry settings[3];
+ size_t slen;
+ int win_size;
+
+ ap_assert(session);
+ /* Start the conversation by submitting our SETTINGS frame */
+ *rv = 0;
+ if (session->r) {
+ const char *s, *cs;
+ apr_size_t dlen;
+ h2_stream * stream;
+
+ /* 'h2c' mode: we should have a 'HTTP2-Settings' header with
+ * base64 encoded client settings. */
+ s = apr_table_get(session->r->headers_in, "HTTP2-Settings");
+ if (!s) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EINVAL, session->r,
+ APLOGNO(02931)
+ "HTTP2-Settings header missing in request");
+ return APR_EINVAL;
+ }
+ cs = NULL;
+ dlen = h2_util_base64url_decode(&cs, s, session->pool);
+
+ if (APLOGrdebug(session->r)) {
+ char buffer[128];
+ h2_util_hex_dump(buffer, 128, (char*)cs, dlen);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, APLOGNO(03070)
+ "upgrading h2c session with HTTP2-Settings: %s -> %s (%d)",
+ s, buffer, (int)dlen);
+ }
+
+ *rv = nghttp2_session_upgrade(session->ngh2, (uint8_t*)cs, dlen, NULL);
+ if (*rv != 0) {
+ status = APR_EINVAL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
+ APLOGNO(02932) "nghttp2_session_upgrade: %s",
+ nghttp2_strerror(*rv));
+ return status;
+ }
+
+ /* Now we need to auto-open stream 1 for the request we got. */
+ stream = h2_session_open_stream(session, 1, 0);
+ if (!stream) {
+ status = APR_EGENERAL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
+ APLOGNO(02933) "open stream 1: %s",
+ nghttp2_strerror(*rv));
+ return status;
+ }
+
+ status = h2_stream_set_request_rec(stream, session->r, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ slen = 0;
+ settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
+ settings[slen].value = (uint32_t)session->max_stream_count;
+ ++slen;
+ win_size = h2_config_sgeti(session->s, H2_CONF_WIN_SIZE);
+ if (win_size != H2_INITIAL_WINDOW_SIZE) {
+ settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[slen].value = win_size;
+ ++slen;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
+ H2_SSSN_LOG(APLOGNO(03201), session,
+ "start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"),
+ (long)win_size, (int)session->max_stream_count);
+ *rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE,
+ settings, slen);
+ if (*rv != 0) {
+ status = APR_EGENERAL;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c1,
+ H2_SSSN_LOG(APLOGNO(02935), session,
+ "nghttp2_submit_settings: %s"), nghttp2_strerror(*rv));
+ }
+ else {
+ /* use maximum possible value for connection window size. We are only
+ * interested in per stream flow control. which have the initial window
+ * size configured above.
+ * Therefore, for our use, the connection window can only get in the
+ * way. Example: if we allow 100 streams with a 32KB window each, we
+ * buffer up to 3.2 MB of data. Unless we do separate connection window
+ * interim updates, any smaller connection window will lead to blocking
+ * in DATA flow.
+ */
+ *rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE,
+ 0, NGHTTP2_MAX_WINDOW_SIZE - win_size);
+ if (*rv != 0) {
+ status = APR_EGENERAL;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c1,
+ H2_SSSN_LOG(APLOGNO(02970), session,
+ "nghttp2_submit_window_update: %s"),
+ nghttp2_strerror(*rv));
+ }
+ }
+
+ return status;
+}
+
+struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
+ h2_push *push)
+{
+ h2_stream *stream;
+ h2_ngheader *ngh;
+ apr_status_t status;
+ int nid = 0;
+
+ status = h2_req_create_ngheader(&ngh, is->pool, push->req);
+ if (status == APR_SUCCESS) {
+ nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
+ ngh->nv, ngh->nvlen, NULL);
+ }
+ if (status != APR_SUCCESS || nid <= 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
+ H2_STRM_LOG(APLOGNO(03075), is,
+ "submitting push promise fail: %s"), nghttp2_strerror(nid));
+ return NULL;
+ }
+ ++session->pushes_promised;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(03076), is, "SERVER_PUSH %d for %s %s on %d"),
+ nid, push->req->method, push->req->path, is->id);
+
+ stream = h2_session_open_stream(session, nid, is->id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(03077), is,
+ "failed to create stream obj %d"), nid);
+ /* kill the push_promise */
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
+ NGHTTP2_INTERNAL_ERROR);
+ return NULL;
+ }
+
+ h2_session_set_prio(session, stream, push->priority);
+ h2_stream_set_request(stream, push->req);
+ return stream;
+}
+
+static int valid_weight(float f)
+{
+ int w = (int)f;
+ return (w < NGHTTP2_MIN_WEIGHT? NGHTTP2_MIN_WEIGHT :
+ (w > NGHTTP2_MAX_WEIGHT)? NGHTTP2_MAX_WEIGHT : w);
+}
+
+apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
+ const h2_priority *prio)
+{
+ apr_status_t status = APR_SUCCESS;
+ nghttp2_stream *s_grandpa, *s_parent, *s;
+
+ if (prio == NULL) {
+ /* we treat this as a NOP */
+ return APR_SUCCESS;
+ }
+ s = nghttp2_session_find_stream(session->ngh2, stream->id);
+ if (!s) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_STRM_MSG(stream, "lookup of nghttp2_stream failed"));
+ return APR_EINVAL;
+ }
+
+ s_parent = nghttp2_stream_get_parent(s);
+ if (s_parent) {
+ nghttp2_priority_spec ps;
+ int id_parent, id_grandpa, w_parent, w;
+ int rv = 0;
+ const char *ptype = "AFTER";
+ h2_dependency dep = prio->dependency;
+
+ id_parent = nghttp2_stream_get_stream_id(s_parent);
+ s_grandpa = nghttp2_stream_get_parent(s_parent);
+ if (s_grandpa) {
+ id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
+ }
+ else {
+ /* parent of parent does not exist,
+ * only possible if parent == root */
+ dep = H2_DEPENDANT_AFTER;
+ }
+
+ switch (dep) {
+ case H2_DEPENDANT_INTERLEAVED:
+ /* PUSHed stream is to be interleaved with initiating stream.
+ * It is made a sibling of the initiating stream and gets a
+ * proportional weight [1, MAX_WEIGHT] of the initiaing
+ * stream weight.
+ */
+ ptype = "INTERLEAVED";
+ w_parent = nghttp2_stream_get_weight(s_parent);
+ w = valid_weight(w_parent * ((float)prio->weight / NGHTTP2_MAX_WEIGHT));
+ nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
+ break;
+
+ case H2_DEPENDANT_BEFORE:
+ /* PUSHed stream os to be sent BEFORE the initiating stream.
+ * It gets the same weight as the initiating stream, replaces
+ * that stream in the dependency tree and has the initiating
+ * stream as child.
+ */
+ ptype = "BEFORE";
+ w = w_parent = nghttp2_stream_get_weight(s_parent);
+ nghttp2_priority_spec_init(&ps, stream->id, w_parent, 0);
+ id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
+ rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps);
+ if (rv < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03202)
+ H2_SSSN_STRM_MSG(session, id_parent,
+ "PUSH BEFORE, weight=%d, depends=%d, returned=%d"),
+ ps.weight, ps.stream_id, rv);
+ return APR_EGENERAL;
+ }
+ nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
+ break;
+
+ case H2_DEPENDANT_AFTER:
+ /* The PUSHed stream is to be sent after the initiating stream.
+ * Give if the specified weight and let it depend on the intiating
+ * stream.
+ */
+ /* fall through, it's the default */
+ default:
+ nghttp2_priority_spec_init(&ps, id_parent, valid_weight(prio->weight), 0);
+ break;
+ }
+
+
+ rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(03203), stream,
+ "PUSH %s, weight=%d, depends=%d, returned=%d"),
+ ptype, ps.weight, ps.stream_id, rv);
+ status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
+ }
+
+ return status;
+}
+
+int h2_session_push_enabled(h2_session *session)
+{
+ /* iff we can and they can and want */
+ return (session->remote.accepting /* remote GOAWAY received */
+ && h2_config_sgeti(session->s, H2_CONF_PUSH)
+ && nghttp2_session_get_remote_settings(session->ngh2,
+ NGHTTP2_SETTINGS_ENABLE_PUSH));
+}
+
+static int h2_session_want_send(h2_session *session)
+{
+ return nghttp2_session_want_write(session->ngh2)
+ || h2_c1_io_pending(&session->io);
+}
+
+static apr_status_t h2_session_send(h2_session *session)
+{
+ int ngrv, pending = 0;
+ apr_status_t rv = APR_SUCCESS;
+
+ while (nghttp2_session_want_write(session->ngh2)) {
+ ngrv = nghttp2_session_send(session->ngh2);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ "nghttp2_session_send: %d", (int)ngrv);
+ pending = 1;
+ if (ngrv != 0 && ngrv != NGHTTP2_ERR_WOULDBLOCK) {
+ if (nghttp2_is_fatal(ngrv)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_PROTO_ERROR,
+ ngrv, nghttp2_strerror(ngrv));
+ rv = APR_EGENERAL;
+ goto cleanup;
+ }
+ }
+ if (h2_c1_io_needs_flush(&session->io)) {
+ rv = h2_c1_io_assure_flushed(&session->io);
+ pending = 0;
+ }
+ }
+ if (pending) {
+ rv = h2_c1_io_pass(&session->io);
+ }
+cleanup:
+ if (rv != APR_SUCCESS) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, rv, NULL);
+ }
+ return rv;
+}
+
+/**
+ * A streams input state has changed.
+ */
+static void on_stream_input(void *ctx, h2_stream *stream)
+{
+ h2_session *session = ctx;
+
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "on_input change"));
+ update_child_status(session, SERVER_BUSY_READ, "read", stream);
+ if (stream->id == 0) {
+ /* input on primary connection available? read */
+ h2_c1_read(session);
+ }
+ else {
+ h2_stream_on_input_change(stream);
+ }
+}
+
+/**
+ * A streams output state has changed.
+ */
+static void on_stream_output(void *ctx, h2_stream *stream)
+{
+ h2_session *session = ctx;
+
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "on_output change"));
+ if (stream->id != 0) {
+ update_child_status(session, SERVER_BUSY_WRITE, "write", stream);
+ h2_stream_on_output_change(stream);
+ }
+}
+
+
+static const char *StateNames[] = {
+ "INIT", /* H2_SESSION_ST_INIT */
+ "DONE", /* H2_SESSION_ST_DONE */
+ "IDLE", /* H2_SESSION_ST_IDLE */
+ "BUSY", /* H2_SESSION_ST_BUSY */
+ "WAIT", /* H2_SESSION_ST_WAIT */
+ "CLEANUP", /* H2_SESSION_ST_CLEANUP */
+};
+
+const char *h2_session_state_str(h2_session_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static void transit(h2_session *session, const char *action, h2_session_state nstate)
+{
+ int ostate;
+
+ if (session->state != nstate) {
+ ostate = session->state;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03078), session,
+ "transit [%s] -- %s --> [%s]"),
+ h2_session_state_str(ostate), action,
+ h2_session_state_str(nstate));
+
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (!session->remote.emitted_count) {
+ /* on fresh connections, with async mpm, do not return
+ * to mpm for a second. This gives the first request a better
+ * chance to arrive (und connection leaving IDLE state).
+ * If we return to mpm right away, this connection has the
+ * same chance of being cleaned up by the mpm as connections
+ * that already served requests - not fair. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_SSSN_LOG("", session, "enter idle"));
+ }
+ else {
+ /* normal keepalive setup */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_SSSN_LOG("", session, "enter keepalive"));
+ }
+ session->state = nstate;
+ break;
+ case H2_SESSION_ST_DONE:
+ break;
+ default:
+ /* nop */
+ session->state = nstate;
+ break;
+ }
+ }
+}
+
+static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ transit(session, "init", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_input_pending(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_WAIT:
+ transit(session, "input read", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_session_ev_input_exhausted(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ if (!h2_session_want_send(session)) {
+ if (session->open_streams == 0) {
+ transit(session, "input exhausted, no streams", H2_SESSION_ST_IDLE);
+ }
+ else {
+ transit(session, "input exhausted", H2_SESSION_ST_WAIT);
+ }
+ }
+ break;
+ case H2_SESSION_ST_WAIT:
+ if (session->open_streams == 0) {
+ transit(session, "input exhausted, no streams", H2_SESSION_ST_IDLE);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
+{
+ cleanup_unprocessed_streams(session);
+ transit(session, "local goaway", H2_SESSION_ST_DONE);
+}
+
+static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg)
+{
+ if (!session->remote.shutdown) {
+ session->remote.error = arg;
+ session->remote.accepting = 0;
+ session->remote.shutdown = 1;
+ cleanup_unprocessed_streams(session);
+ transit(session, "remote goaway", H2_SESSION_ST_DONE);
+ }
+}
+
+static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_DONE:
+ /* just leave */
+ transit(session, "conn error", H2_SESSION_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03401), session,
+ "conn error -> shutdown"));
+ h2_session_shutdown(session, arg, msg, 0);
+ break;
+ }
+}
+
+static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
+{
+ if (!session->local.shutdown) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03402), session,
+ "proto error -> shutdown"));
+ h2_session_shutdown(session, arg, msg, 0);
+ }
+}
+
+static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg)
+{
+ transit(session, msg, H2_SESSION_ST_DONE);
+ if (!session->local.shutdown) {
+ h2_session_shutdown(session, arg, msg, 1);
+ }
+}
+
+static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_SESSION_ST_DONE);
+ break;
+ }
+}
+
+static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ /* nop */
+ break;
+ default:
+ h2_session_shutdown_notice(session);
+#if !AP_MODULE_MAGIC_AT_LEAST(20120211, 110)
+ h2_workers_graceful_shutdown(session->workers);
+#endif
+ break;
+ }
+}
+
+static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg)
+{
+ h2_session_shutdown(session, arg, msg, 1);
+}
+
+static void h2_session_ev_no_more_streams(h2_session *session)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(10304), session, "no more streams"));
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ if (!h2_session_want_send(session)) {
+ if (session->local.accepting) {
+ /* We wait for new frames on c1 only. */
+ transit(session, "all streams done", H2_SESSION_ST_IDLE);
+ }
+ else {
+ /* We are no longer accepting new streams.
+ * Time to leave. */
+ h2_session_shutdown(session, 0, "done", 0);
+ transit(session, "c1 done after goaway", H2_SESSION_ST_DONE);
+ }
+ }
+ else {
+ transit(session, "no more streams", H2_SESSION_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_created(h2_session *session, h2_stream *stream)
+{
+ /* nop */
+}
+
+static void ev_stream_open(h2_session *session, h2_stream *stream)
+{
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
+ ++session->remote.emitted_count;
+ if (stream->id > session->remote.emitted_max) {
+ session->remote.emitted_max = stream->id;
+ session->local.accepted_max = stream->id;
+ }
+ }
+ else {
+ if (stream->id > session->local.emitted_max) {
+ ++session->local.emitted_count;
+ session->remote.emitted_max = stream->id;
+ }
+ }
+ /* Stream state OPEN means we have received all request headers
+ * and can start processing the stream. */
+ h2_iq_append(session->ready_to_process, stream->id);
+ update_child_status(session, SERVER_BUSY_READ, "schedule", stream);
+}
+
+static void ev_stream_closed(h2_session *session, h2_stream *stream)
+{
+ apr_bucket *b;
+
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)
+ && (stream->id > session->local.completed_max)) {
+ session->local.completed_max = stream->id;
+ }
+ /* The stream might have data in the buffers of the main connection.
+ * We can only free the allocated resources once all had been written.
+ * Send a special buckets on the connection that gets destroyed when
+ * all preceding data has been handled. On its destruction, it is safe
+ * to purge all resources of the stream. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "adding h2_eos to c1 out"));
+ b = h2_bucket_eos_create(session->c1->bucket_alloc, stream);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ h2_c1_io_append(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+}
+
+static void on_stream_state_enter(void *ctx, h2_stream *stream)
+{
+ h2_session *session = ctx;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "entered state"));
+ switch (stream->state) {
+ case H2_SS_IDLE: /* stream was created */
+ ev_stream_created(session, stream);
+ break;
+ case H2_SS_OPEN: /* stream has request headers */
+ case H2_SS_RSVD_L:
+ ev_stream_open(session, stream);
+ break;
+ case H2_SS_CLOSED_L: /* stream output was closed, but remote end is not */
+ /* If the stream is still being processed, it could still be reading
+ * its input (theoretically, http request hangling does not normally).
+ * But when processing is done, we need to cancel the stream as no
+ * one is consuming the input any longer.
+ * This happens, for example, on a large POST when the response
+ * is ready early due to the POST being denied. */
+ if (!h2_mplx_c1_stream_is_running(session->mplx, stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10305), stream, "remote close missing"));
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, H2_ERR_NO_ERROR);
+ }
+ break;
+ case H2_SS_CLOSED_R: /* stream input was closed */
+ break;
+ case H2_SS_CLOSED: /* stream in+out were closed */
+ ev_stream_closed(session, stream);
+ break;
+ case H2_SS_CLEANUP:
+ nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
+ h2_mplx_c1_stream_cleanup(session->mplx, stream, &session->open_streams);
+ ++session->streams_done;
+ update_child_status(session, SERVER_BUSY_WRITE, "done", stream);
+ if (session->open_streams == 0) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
+ 0, "stream done");
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev)
+{
+ h2_session *session = ctx;
+ switch (ev) {
+ case H2_SEV_IN_DATA_PENDING:
+ session->input_flushed = 1;
+ break;
+ case H2_SEV_OUT_C1_BLOCK:
+ h2_iq_append(session->out_c1_blocked, stream->id);
+ break;
+ default:
+ /* NOP */
+ break;
+ }
+}
+
+static void on_stream_state_event(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev)
+{
+ h2_session *session = ctx;
+ switch (ev) {
+ case H2_SEV_CANCELLED:
+ if (session->state != H2_SESSION_ST_DONE) {
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, stream->rst_error);
+ }
+ break;
+ default:
+ /* NOP */
+ break;
+ }
+}
+
+void h2_session_dispatch_event(h2_session *session, h2_session_event_t ev,
+ apr_status_t arg, const char *msg)
+{
+ switch (ev) {
+ case H2_SESSION_EV_INIT:
+ h2_session_ev_init(session, arg, msg);
+ break;
+ case H2_SESSION_EV_INPUT_PENDING:
+ h2_session_ev_input_pending(session, arg, msg);
+ break;
+ case H2_SESSION_EV_INPUT_EXHAUSTED:
+ h2_session_ev_input_exhausted(session, arg, msg);
+ break;
+ case H2_SESSION_EV_LOCAL_GOAWAY:
+ h2_session_ev_local_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_REMOTE_GOAWAY:
+ h2_session_ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_ERROR:
+ h2_session_ev_conn_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PROTO_ERROR:
+ h2_session_ev_proto_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_TIMEOUT:
+ h2_session_ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NGH2_DONE:
+ h2_session_ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_SESSION_EV_MPM_STOPPING:
+ h2_session_ev_mpm_stopping(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PRE_CLOSE:
+ h2_session_ev_pre_close(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NO_MORE_STREAMS:
+ h2_session_ev_no_more_streams(session);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_SSSN_MSG(session, "unknown event %d"), ev);
+ break;
+ }
+}
+
+static void unblock_c1_out(h2_session *session) {
+ int sid;
+
+ while ((sid = h2_iq_shift(session->out_c1_blocked)) > 0) {
+ nghttp2_session_resume_data(session->ngh2, sid);
+ }
+}
+
+apr_status_t h2_session_process(h2_session *session, int async)
+{
+ apr_status_t status = APR_SUCCESS;
+ conn_rec *c = session->c1;
+ int rv, mpm_state, trace = APLOGctrace3(c);
+
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session, "process start, async=%d"), async);
+ }
+
+ if (H2_SESSION_ST_INIT == session->state) {
+ if (!h2_protocol_is_acceptable_c1(c, session->r, 1)) {
+ const char *msg = nghttp2_strerror(NGHTTP2_INADEQUATE_SECURITY);
+ update_child_status(session, SERVER_BUSY_READ, msg, NULL);
+ h2_session_shutdown(session, APR_EINVAL, msg, 1);
+ }
+ else {
+ update_child_status(session, SERVER_BUSY_READ, "init", NULL);
+ status = h2_session_start(session, &rv);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03079), session,
+ "started on %s:%d"),
+ session->s->server_hostname,
+ c->local_addr->port);
+ if (status != APR_SUCCESS) {
+ h2_session_dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, status, NULL);
+ }
+ else {
+ h2_session_dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
+ }
+ }
+ }
+
+ while (session->state != H2_SESSION_ST_DONE) {
+
+ /* PR65731: we may get a new connection to process while the
+ * MPM already is stopping. For example due to having reached
+ * MaxRequestsPerChild limit.
+ * Since this is supposed to handle things gracefully, we need to:
+ * a) fully initialize the session before GOAWAYing
+ * b) give the client the chance to submit at least one request
+ */
+ if (session->state != H2_SESSION_ST_INIT /* no longer intializing */
+ && session->local.accepted_max > 0 /* have gotten at least one stream */
+ && session->local.accepting /* have not already locally shut down */
+ && !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
+ }
+ }
+
+ session->status[0] = '\0';
+
+ if (h2_session_want_send(session)) {
+ h2_session_send(session);
+ }
+ else if (!nghttp2_session_want_read(session->ngh2)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
+ }
+
+ if (!h2_iq_empty(session->ready_to_process)) {
+ h2_mplx_c1_process(session->mplx, session->ready_to_process,
+ get_stream, stream_pri_cmp, session,
+ &session->open_streams);
+ transit(session, "scheduled stream", H2_SESSION_ST_BUSY);
+ }
+
+ if (session->input_flushed) {
+ transit(session, "forwarded input", H2_SESSION_ST_BUSY);
+ session->input_flushed = 0;
+ }
+
+ if (!h2_iq_empty(session->out_c1_blocked)) {
+ unblock_c1_out(session);
+ transit(session, "unblocked output", H2_SESSION_ST_BUSY);
+ }
+
+ if (session->reprioritize) {
+ h2_mplx_c1_reprioritize(session->mplx, stream_pri_cmp, session);
+ session->reprioritize = 0;
+ }
+
+ if (h2_session_want_send(session)) {
+ h2_session_send(session);
+ }
+
+ status = h2_c1_io_assure_flushed(&session->io);
+ if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL);
+ }
+
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ ap_assert(0);
+ h2_c1_read(session);
+ break;
+
+ case H2_SESSION_ST_IDLE:
+ ap_assert(session->open_streams == 0);
+ ap_assert(nghttp2_session_want_read(session->ngh2));
+ if (!h2_session_want_send(session)) {
+ /* Give any new incoming request a short grace period to
+ * arrive while we are still hot and return to the mpm
+ * connection handling when nothing really happened. */
+ h2_c1_read(session);
+ if (H2_SESSION_ST_IDLE == session->state) {
+ if (async) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(10306), session,
+ "returning to mpm c1 monitoring"));
+ goto leaving;
+ }
+ else {
+ /* Not an async mpm, we must continue waiting
+ * for client data to arrive until the configured
+ * server Timeout/KeepAliveTimeout happens */
+ apr_time_t timeout = (session->open_streams == 0)?
+ session->s->keep_alive_timeout :
+ session->s->timeout;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ H2_SSSN_MSG(session, "polling timeout=%d"),
+ (int)apr_time_sec(timeout));
+ status = h2_mplx_c1_poll(session->mplx, timeout,
+ on_stream_input,
+ on_stream_output, session);
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ if (session->open_streams == 0) {
+ h2_session_dispatch_event(session,
+ H2_SESSION_EV_CONN_TIMEOUT, status, NULL);
+ break;
+ }
+ }
+ else if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, status, NULL);
+ break;
+ }
+ }
+ }
+ }
+ else {
+ transit(session, "c1 io pending", H2_SESSION_ST_BUSY);
+ }
+ break;
+
+ case H2_SESSION_ST_BUSY:
+ /* IO happening in and out. Make sure we react to c2 events
+ * inbetween send and receive. */
+ status = h2_mplx_c1_poll(session->mplx, 0,
+ on_stream_input, on_stream_output, session);
+ if (APR_SUCCESS != status && !APR_STATUS_IS_TIMEUP(status)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL);
+ break;
+ }
+ h2_c1_read(session);
+ break;
+
+ case H2_SESSION_ST_WAIT:
+ status = h2_c1_io_assure_flushed(&session->io);
+ if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL);
+ break;
+ }
+ if (session->open_streams == 0) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
+ 0, "streams really done");
+ if (session->state != H2_SESSION_ST_WAIT) {
+ break;
+ }
+ }
+ /* No IO happening and input is exhausted. Make sure we have
+ * flushed any possibly pending output and then wait with
+ * the c1 connection timeout for sth to happen in our c1/c2 sockets/pipes */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ H2_SSSN_MSG(session, "polling timeout=%d, open_streams=%d"),
+ (int)apr_time_sec(session->s->timeout), session->open_streams);
+ status = h2_mplx_c1_poll(session->mplx, session->s->timeout,
+ on_stream_input, on_stream_output, session);
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ if (session->open_streams == 0) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, status, NULL);
+ break;
+ }
+ }
+ else if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL);
+ break;
+ }
+ break;
+
+ case H2_SESSION_ST_DONE:
+ h2_c1_read(session);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ H2_SSSN_LOG(APLOGNO(03080), session,
+ "unknown state"));
+ h2_session_dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, APR_EGENERAL, NULL);
+ break;
+ }
+ }
+
+leaving:
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session, "process returns"));
+ }
+
+ if (session->state == H2_SESSION_ST_DONE) {
+ if (session->local.error) {
+ char buffer[128];
+ const char *msg;
+ if (session->local.error_msg) {
+ msg = session->local.error_msg;
+ }
+ else {
+ msg = apr_strerror(session->local.error, buffer, sizeof(buffer));
+ }
+ update_child_status(session, SERVER_CLOSING, msg, NULL);
+ }
+ else {
+ update_child_status(session, SERVER_CLOSING, "done", NULL);
+ }
+ }
+ else if (APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL);
+ update_child_status(session, SERVER_CLOSING, "error", NULL);
+ }
+
+ return (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
+}
+
+apr_status_t h2_session_pre_close(h2_session *session, int async)
+{
+ apr_status_t status;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_SSSN_MSG(session, "pre_close"));
+ h2_session_dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0,
+ (session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL);
+ status = session_cleanup(session, "pre_close");
+ if (status == APR_SUCCESS) {
+ /* no one should hold a reference to this session any longer and
+ * the h2_conn_ctx_twas removed from the connection.
+ * Take the pool (and thus all subpools etc. down now, instead of
+ * during cleanup of main connection pool. */
+ apr_pool_destroy(session->pool);
+ }
+ return status;
+}
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
new file mode 100644
index 0000000..fbddfdd
--- /dev/null
+++ b/modules/http2/h2_session.h
@@ -0,0 +1,205 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_session__
+#define __mod_h2__h2_session__
+
+#include "h2_c1_io.h"
+
+/**
+ * A HTTP/2 connection, a session with a specific client.
+ *
+ * h2_session sits on top of a httpd conn_rec* instance and takes complete
+ * control of the connection data. It receives protocol frames from the
+ * client. For new HTTP/2 streams it creates secondary connections
+ * to execute the requests in h2 workers.
+ */
+
+#include "h2.h"
+
+struct apr_thread_mutext_t;
+struct apr_thread_cond_t;
+struct h2_ctx;
+struct h2_config;
+struct h2_ihash_t;
+struct h2_mplx;
+struct h2_priority;
+struct h2_push;
+struct h2_push_diary;
+struct h2_session;
+struct h2_stream;
+struct h2_stream_monitor;
+struct h2_workers;
+
+struct nghttp2_session;
+
+typedef enum {
+ H2_SESSION_EV_INIT, /* session was initialized */
+ H2_SESSION_EV_INPUT_PENDING, /* c1 input may have data pending */
+ H2_SESSION_EV_INPUT_EXHAUSTED, /* c1 input exhausted */
+ H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_SESSION_EV_CONN_ERROR, /* connection error */
+ H2_SESSION_EV_PROTO_ERROR, /* protocol error */
+ H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
+ H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
+ H2_SESSION_EV_NO_MORE_STREAMS, /* no more streams to process */
+} h2_session_event_t;
+
+typedef struct h2_session {
+ int child_num; /* child number this session runs in */
+ apr_uint32_t id; /* identifier of this session, unique per child */
+ conn_rec *c1; /* the main connection this session serves */
+ request_rec *r; /* the request that started this in case
+ * of 'h2c', NULL otherwise */
+ server_rec *s; /* server/vhost we're starting on */
+ apr_pool_t *pool; /* pool to use in session */
+ struct h2_mplx *mplx; /* multiplexer for stream data */
+ struct h2_workers *workers; /* for executing streams */
+ struct h2_c1_io_in_ctx_t *cin; /* connection input filter context */
+ h2_c1_io io; /* io on httpd conn filters */
+ unsigned int padding_max; /* max number of padding bytes */
+ int padding_always; /* padding has precedence over I/O optimizations */
+ struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
+
+ h2_session_state state; /* state session is in */
+
+ h2_session_props local; /* properties of local session */
+ h2_session_props remote; /* properites of remote session */
+
+ unsigned int reprioritize : 1; /* scheduled streams priority changed */
+ unsigned int flush : 1; /* flushing output necessary */
+ apr_interval_time_t wait_us; /* timeout during BUSY_WAIT state, micro secs */
+
+ struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
+
+ struct h2_stream_monitor *monitor;/* monitor callbacks for streams */
+ unsigned int open_streams; /* number of streams processing */
+
+ unsigned int streams_done; /* number of http/2 streams handled */
+ unsigned int responses_submitted; /* number of http/2 responses submitted */
+ unsigned int streams_reset; /* number of http/2 streams reset by client */
+ unsigned int pushes_promised; /* number of http/2 push promises submitted */
+ unsigned int pushes_submitted; /* number of http/2 pushed responses submitted */
+ unsigned int pushes_reset; /* number of http/2 pushed reset by client */
+
+ apr_size_t frames_received; /* number of http/2 frames received */
+ apr_size_t frames_sent; /* number of http/2 frames sent */
+
+ apr_size_t max_stream_count; /* max number of open streams */
+ apr_size_t max_stream_mem; /* max buffer memory for a single stream */
+
+ apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */
+ apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */
+
+ apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
+
+ char status[64]; /* status message for scoreboard */
+ int last_status_code; /* the one already reported */
+ const char *last_status_msg; /* the one already reported */
+
+ int input_flushed; /* stream input was flushed */
+ struct h2_iqueue *out_c1_blocked; /* all streams with output blocked on c1 buffer full */
+ struct h2_iqueue *ready_to_process; /* all streams ready for processing */
+
+} h2_session;
+
+const char *h2_session_state_str(h2_session_state state);
+
+/**
+ * Create a new h2_session for the given connection.
+ * The session will apply the configured parameter.
+ * @param psession pointer receiving the created session on success or NULL
+ * @param c the connection to work on
+ * @param r optional request when protocol was upgraded
+ * @param cfg the module config to apply
+ * @param workers the worker pool to use
+ * @return the created session
+ */
+apr_status_t h2_session_create(h2_session **psession,
+ conn_rec *c, request_rec *r, server_rec *,
+ struct h2_workers *workers);
+
+void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
+
+/**
+ * Process the given HTTP/2 session until it is ended or a fatal
+ * error occurred.
+ *
+ * @param session the sessionm to process
+ */
+apr_status_t h2_session_process(h2_session *session, int async);
+
+/**
+ * Last chance to do anything before the connection is closed.
+ */
+apr_status_t h2_session_pre_close(h2_session *session, int async);
+
+/**
+ * Called when a serious error occurred and the session needs to terminate
+ * without further connection io.
+ * @param session the session to abort
+ * @param reason the apache status that caused the abort
+ */
+void h2_session_abort(h2_session *session, apr_status_t reason);
+
+/**
+ * Returns if client settings have push enabled.
+ * @param != 0 iff push is enabled in client settings
+ */
+int h2_session_push_enabled(h2_session *session);
+
+/**
+ * Submit a push promise on the stream and schedule the new steam for
+ * processing..
+ *
+ * @param session the session to work in
+ * @param is the stream initiating the push
+ * @param push the push to promise
+ * @return the new promised stream or NULL
+ */
+struct h2_stream *h2_session_push(h2_session *session,
+ struct h2_stream *is, struct h2_push *push);
+
+apr_status_t h2_session_set_prio(h2_session *session,
+ struct h2_stream *stream,
+ const struct h2_priority *prio);
+
+/**
+ * Dispatch a event happending during session processing.
+ * @param session the sessiont
+ * @param ev the event that happened
+ * @param arg integer argument (event type dependant)
+ * @param msg destriptive message
+ */
+void h2_session_dispatch_event(h2_session *session, h2_session_event_t ev,
+ int arg, const char *msg);
+
+
+#define H2_SSSN_MSG(s, msg) \
+ "h2_session(%d-%lu,%s,%d): "msg, s->child_num, (unsigned long)s->id, \
+ h2_session_state_str(s->state), \
+ s->open_streams
+
+#define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg)
+
+#define H2_SSSN_STRM_MSG(s, stream_id, msg) \
+ "h2_stream(%d-%lu-%d): "msg, s->child_num, (unsigned long)s->id, stream_id
+
+#endif /* defined(__mod_h2__h2_session__) */
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
new file mode 100644
index 0000000..cf6f798
--- /dev/null
+++ b/modules/http2/h2_stream.c
@@ -0,0 +1,1712 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_strmatch.h"
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+#include <http_protocol.h>
+#include <http_ssl.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_c1.h"
+#include "h2_config.h"
+#include "h2_protocol.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_c2.h"
+#include "h2_conn_ctx.h"
+#include "h2_c2.h"
+#include "h2_util.h"
+
+
+static const char *h2_ss_str(const h2_stream_state_t state)
+{
+ switch (state) {
+ case H2_SS_IDLE:
+ return "IDLE";
+ case H2_SS_RSVD_L:
+ return "RESERVED_LOCAL";
+ case H2_SS_RSVD_R:
+ return "RESERVED_REMOTE";
+ case H2_SS_OPEN:
+ return "OPEN";
+ case H2_SS_CLOSED_L:
+ return "HALF_CLOSED_LOCAL";
+ case H2_SS_CLOSED_R:
+ return "HALF_CLOSED_REMOTE";
+ case H2_SS_CLOSED:
+ return "CLOSED";
+ case H2_SS_CLEANUP:
+ return "CLEANUP";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char *h2_stream_state_str(const h2_stream *stream)
+{
+ return h2_ss_str(stream->state);
+}
+
+/* Abbreviations for stream transit tables */
+#define S_XXX (-2) /* Programming Error */
+#define S_ERR (-1) /* Protocol Error */
+#define S_NOP (0) /* No Change */
+#define S_IDL (H2_SS_IDL + 1)
+#define S_RS_L (H2_SS_RSVD_L + 1)
+#define S_RS_R (H2_SS_RSVD_R + 1)
+#define S_OPEN (H2_SS_OPEN + 1)
+#define S_CL_L (H2_SS_CLOSED_L + 1)
+#define S_CL_R (H2_SS_CLOSED_R + 1)
+#define S_CLS (H2_SS_CLOSED + 1)
+#define S_CLN (H2_SS_CLEANUP + 1)
+
+/* state transisitions when certain frame types are sent */
+static int trans_on_send[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_ERR, S_ERR, S_ERR, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* DATA */
+{ S_ERR, S_ERR, S_CL_R, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* HEADERS */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */
+{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */
+{ S_RS_L,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */
+};
+/* state transisitions when certain frame types are received */
+static int trans_on_recv[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_ERR, S_ERR, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* DATA */
+{ S_OPEN,S_CL_L, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* HEADERS */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */
+{ S_ERR, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */
+{ S_RS_R,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */
+};
+/* state transisitions when certain events happen */
+static int trans_on_event[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_XXX, S_ERR, S_ERR, S_CL_L, S_CLS, S_XXX, S_XXX, S_XXX, },/* EV_CLOSED_L*/
+{ S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/
+{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/
+{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/
+{ S_NOP, S_XXX, S_CLS, S_XXX, S_XXX, S_CLS, S_XXX, S_XXX, },/* EV_IN_ERROR*/
+};
+
+static int on_map(h2_stream_state_t state, int map[H2_SS_MAX])
+{
+ int op = map[state];
+ switch (op) {
+ case S_XXX:
+ case S_ERR:
+ return op;
+ case S_NOP:
+ return state;
+ default:
+ return op-1;
+ }
+}
+
+static int on_frame(h2_stream_state_t state, int frame_type,
+ int frame_map[][H2_SS_MAX], apr_size_t maxlen)
+{
+ ap_assert(frame_type >= 0);
+ ap_assert(state >= 0);
+ if ((apr_size_t)frame_type >= maxlen) {
+ return state; /* NOP, ignore unknown frame types */
+ }
+ return on_map(state, frame_map[frame_type]);
+}
+
+static int on_frame_send(h2_stream_state_t state, int frame_type)
+{
+ return on_frame(state, frame_type, trans_on_send, H2_ALEN(trans_on_send));
+}
+
+static int on_frame_recv(h2_stream_state_t state, int frame_type)
+{
+ return on_frame(state, frame_type, trans_on_recv, H2_ALEN(trans_on_recv));
+}
+
+static int on_event(h2_stream* stream, h2_stream_event_t ev)
+{
+ if (stream->monitor && stream->monitor->on_event) {
+ stream->monitor->on_event(stream->monitor->ctx, stream, ev);
+ }
+ if (ev < H2_ALEN(trans_on_event)) {
+ return on_map(stream->state, trans_on_event[ev]);
+ }
+ return stream->state;
+}
+
+static ssize_t stream_data_cb(nghttp2_session *ng2s,
+ int32_t stream_id,
+ uint8_t *buf,
+ size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *puser);
+
+static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
+{
+ if (APLOG_C_IS_LEVEL(s->session->c1, lvl)) {
+ conn_rec *c = s->session->c1;
+ char buffer[4 * 1024];
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
+
+ len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c,
+ H2_STRM_MSG(s, "out-buffer(%s)"), len? buffer : "empty");
+ }
+}
+
+static void stream_setup_input(h2_stream *stream)
+{
+ if (stream->input != NULL) return;
+ ap_assert(!stream->input_closed);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "setup input beam"));
+ h2_beam_create(&stream->input, stream->session->c1,
+ stream->pool, stream->id,
+ "input", 0, stream->session->s->timeout);
+}
+
+apr_status_t h2_stream_prepare_processing(h2_stream *stream)
+{
+ /* Right before processing starts, last chance to decide if
+ * there is need to an input beam. */
+ if (!stream->input_closed) {
+ stream_setup_input(stream);
+ }
+ return APR_SUCCESS;
+}
+
+static int input_buffer_is_empty(h2_stream *stream)
+{
+ return !stream->in_buffer || APR_BRIGADE_EMPTY(stream->in_buffer);
+}
+
+static apr_status_t input_flush(h2_stream *stream)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t written;
+
+ if (input_buffer_is_empty(stream)) goto cleanup;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "flush input"));
+ status = h2_beam_send(stream->input, stream->session->c1,
+ stream->in_buffer, APR_BLOCK_READ, &written);
+ stream->in_last_write = apr_time_now();
+ if (APR_SUCCESS != status && h2_stream_is_at(stream, H2_SS_CLOSED_L)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c1,
+ H2_STRM_MSG(stream, "send input error"));
+ h2_stream_dispatch(stream, H2_SEV_IN_ERROR);
+ }
+cleanup:
+ return status;
+}
+
+static void input_append_bucket(h2_stream *stream, apr_bucket *b)
+{
+ if (!stream->in_buffer) {
+ stream_setup_input(stream);
+ stream->in_buffer = apr_brigade_create(
+ stream->pool, stream->session->c1->bucket_alloc);
+ }
+ APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
+}
+
+static void input_append_data(h2_stream *stream, const char *data, apr_size_t len)
+{
+ if (!stream->in_buffer) {
+ stream_setup_input(stream);
+ stream->in_buffer = apr_brigade_create(
+ stream->pool, stream->session->c1->bucket_alloc);
+ }
+ apr_brigade_write(stream->in_buffer, NULL, NULL, data, len);
+}
+
+
+static apr_status_t close_input(h2_stream *stream)
+{
+ conn_rec *c = stream->session->c1;
+ apr_status_t rv = APR_SUCCESS;
+ apr_bucket *b;
+
+ if (stream->input_closed) goto cleanup;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "closing input"));
+ if (!stream->rst_error
+ && stream->trailers_in
+ && !apr_is_empty_table(stream->trailers_in)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "adding trailers"));
+#if AP_HAS_RESPONSE_BUCKETS
+ b = ap_bucket_headers_create(stream->trailers_in,
+ stream->pool, c->bucket_alloc);
+#else
+ b = h2_bucket_headers_create(c->bucket_alloc,
+ h2_headers_create(HTTP_OK, stream->trailers_in, NULL,
+ stream->in_trailer_octets, stream->pool));
+#endif
+ input_append_bucket(stream, b);
+ stream->trailers_in = NULL;
+ }
+
+ stream->input_closed = 1;
+ if (stream->input) {
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ input_append_bucket(stream, b);
+ input_flush(stream);
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "input flush + EOS"));
+ }
+
+cleanup:
+ return rv;
+}
+
+static void on_state_enter(h2_stream *stream)
+{
+ if (stream->monitor && stream->monitor->on_state_enter) {
+ stream->monitor->on_state_enter(stream->monitor->ctx, stream);
+ }
+}
+
+static void on_state_event(h2_stream *stream, h2_stream_event_t ev)
+{
+ if (stream->monitor && stream->monitor->on_state_event) {
+ stream->monitor->on_state_event(stream->monitor->ctx, stream, ev);
+ }
+}
+
+static void on_state_invalid(h2_stream *stream)
+{
+ if (stream->monitor && stream->monitor->on_state_invalid) {
+ stream->monitor->on_state_invalid(stream->monitor->ctx, stream);
+ }
+ /* stream got an event/frame invalid in its state */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "invalid state event"));
+ switch (stream->state) {
+ case H2_SS_OPEN:
+ case H2_SS_RSVD_L:
+ case H2_SS_RSVD_R:
+ case H2_SS_CLOSED_L:
+ case H2_SS_CLOSED_R:
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ break;
+ default:
+ break;
+ }
+}
+
+static apr_status_t transit(h2_stream *stream, int new_state)
+{
+ if ((h2_stream_state_t)new_state == stream->state) {
+ return APR_SUCCESS;
+ }
+ else if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(03081), stream, "invalid transition"));
+ on_state_invalid(stream);
+ return APR_EINVAL;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "transit to [%s]"), h2_ss_str(new_state));
+ stream->state = new_state;
+ switch (new_state) {
+ case H2_SS_IDLE:
+ break;
+ case H2_SS_RSVD_L:
+ close_input(stream);
+ break;
+ case H2_SS_RSVD_R:
+ break;
+ case H2_SS_OPEN:
+ break;
+ case H2_SS_CLOSED_L:
+ break;
+ case H2_SS_CLOSED_R:
+ close_input(stream);
+ break;
+ case H2_SS_CLOSED:
+ close_input(stream);
+ if (stream->out_buffer) {
+ apr_brigade_cleanup(stream->out_buffer);
+ }
+ break;
+ case H2_SS_CLEANUP:
+ break;
+ }
+ on_state_enter(stream);
+ return APR_SUCCESS;
+}
+
+void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor)
+{
+ stream->monitor = monitor;
+}
+
+void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
+{
+ int new_state;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "dispatch event %d"), ev);
+ new_state = on_event(stream, ev);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10002), stream, "invalid event %d"), ev);
+ on_state_invalid(stream);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return;
+ }
+ else if ((h2_stream_state_t)new_state == stream->state) {
+ /* nop */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "non-state event %d"), ev);
+ return;
+ }
+ else {
+ on_state_event(stream, ev);
+ transit(stream, new_state);
+ }
+}
+
+static void set_policy_for(h2_stream *stream, h2_request *r)
+{
+ int enabled = h2_session_push_enabled(stream->session);
+ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled);
+}
+
+apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
+{
+ apr_status_t status = APR_SUCCESS;
+ int new_state, eos = 0;
+
+ new_state = on_frame_send(stream->state, ftype);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "invalid frame %d send"), ftype);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return transit(stream, new_state);
+ }
+
+ ++stream->out_frames;
+ stream->out_frame_octets += frame_len;
+ switch (ftype) {
+ case NGHTTP2_DATA:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_HEADERS:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_PUSH_PROMISE:
+ /* start pushed stream */
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp != NULL);
+ status = h2_stream_end_headers(stream, 1, 0);
+ if (status != APR_SUCCESS) goto leave;
+ break;
+
+ default:
+ break;
+ }
+ status = transit(stream, new_state);
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
+ }
+leave:
+ return status;
+}
+
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
+{
+ apr_status_t status = APR_SUCCESS;
+ int new_state, eos = 0;
+
+ new_state = on_frame_recv(stream->state, ftype);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "invalid frame %d recv"), ftype);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return transit(stream, new_state);
+ }
+
+ switch (ftype) {
+ case NGHTTP2_DATA:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_HEADERS:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ if (h2_stream_is_at_or_past(stream, H2_SS_OPEN)) {
+ /* trailer HEADER */
+ if (!eos) {
+ h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
+ }
+ stream->in_trailer_octets += frame_len;
+ }
+ else {
+ /* request HEADER */
+ ap_assert(stream->request == NULL);
+ if (stream->rtmp == NULL) {
+ /* This can only happen, if the stream has received no header
+ * name/value pairs at all. The latest nghttp2 version have become
+ * pretty good at detecting this early. In any case, we have
+ * to abort the connection here, since this is clearly a protocol error */
+ return APR_EINVAL;
+ }
+ status = h2_stream_end_headers(stream, eos, frame_len);
+ if (status != APR_SUCCESS) goto leave;
+ }
+ break;
+
+ default:
+ break;
+ }
+ status = transit(stream, new_state);
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
+ }
+leave:
+ return status;
+}
+
+apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
+ const uint8_t *data, size_t len)
+{
+ h2_session *session = stream->session;
+ apr_status_t status = APR_SUCCESS;
+
+ stream->in_data_frames++;
+ if (len > 0) {
+ if (APLOGctrace3(session->c1)) {
+ const char *load = apr_pstrndup(stream->pool, (const char *)data, len);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c1,
+ H2_STRM_MSG(stream, "recv DATA, len=%d: -->%s<--"),
+ (int)len, load);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c1,
+ H2_STRM_MSG(stream, "recv DATA, len=%d"), (int)len);
+ }
+ stream->in_data_octets += len;
+ input_append_data(stream, (const char*)data, len);
+ input_flush(stream);
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+ }
+ return status;
+}
+
+h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
+ h2_stream_monitor *monitor, int initiated_on)
+{
+ h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
+
+ stream->id = id;
+ stream->initiated_on = initiated_on;
+ stream->created = apr_time_now();
+ stream->state = H2_SS_IDLE;
+ stream->pool = pool;
+ stream->session = session;
+ stream->monitor = monitor;
+
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ if (id) {
+ stream->in_window_size =
+ nghttp2_session_get_stream_local_window_size(
+ stream->session->ngh2, stream->id);
+ }
+#endif
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(03082), stream, "created"));
+ on_state_enter(stream);
+ return stream;
+}
+
+void h2_stream_cleanup(h2_stream *stream)
+{
+ /* Stream is done on c1. There might still be processing on a c2
+ * going on. The input/output beams get aborted and the stream's
+ * end of the in/out notifications get closed.
+ */
+ ap_assert(stream);
+ if (stream->out_buffer) {
+ apr_brigade_cleanup(stream->out_buffer);
+ }
+}
+
+void h2_stream_destroy(h2_stream *stream)
+{
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "destroy"));
+ apr_pool_destroy(stream->pool);
+}
+
+void h2_stream_rst(h2_stream *stream, int error_code)
+{
+ stream->rst_error = error_code;
+ if (stream->c2) {
+ h2_c2_abort(stream->c2, stream->session->c1);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "reset, error=%d"), error_code);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
+}
+
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+ request_rec *r, int eos)
+{
+ h2_request *req;
+ apr_status_t status;
+
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp == NULL);
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+ status = h2_request_rcreate(&req, stream->pool, r);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r,
+ H2_STRM_LOG(APLOGNO(03058), stream,
+ "set_request_rec %s host=%s://%s%s"),
+ req->method, req->scheme, req->authority, req->path);
+ stream->rtmp = req;
+ /* simulate the frames that led to this */
+ return h2_stream_recv_frame(stream, NGHTTP2_HEADERS,
+ NGHTTP2_FLAG_END_STREAM, 0);
+ }
+ return status;
+}
+
+void h2_stream_set_request(h2_stream *stream, const h2_request *r)
+{
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp == NULL);
+ stream->rtmp = h2_request_clone(stream->pool, r);
+}
+
+static void set_error_response(h2_stream *stream, int http_status)
+{
+ if (!h2_stream_is_ready(stream) && stream->rtmp) {
+ stream->rtmp->http_status = http_status;
+ }
+}
+
+static apr_status_t add_trailer(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen,
+ size_t max_field_len, int *pwas_added)
+{
+ conn_rec *c = stream->session->c1;
+ char *hname, *hvalue;
+ const char *existing;
+
+ *pwas_added = 0;
+ if (nlen == 0 || name[0] == ':') {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c,
+ H2_STRM_LOG(APLOGNO(03060), stream,
+ "pseudo header in trailer"));
+ return APR_EINVAL;
+ }
+ if (h2_ignore_req_trailer(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ if (!stream->trailers_in) {
+ stream->trailers_in = apr_table_make(stream->pool, 5);
+ }
+ hname = apr_pstrndup(stream->pool, name, nlen);
+ h2_util_camel_case_header(hname, nlen);
+ existing = apr_table_get(stream->trailers_in, hname);
+ if (max_field_len
+ && ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len)) {
+ /* "key: (oldval, )?nval" is too long */
+ return APR_EINVAL;
+ }
+ if (!existing) *pwas_added = 1;
+ hvalue = apr_pstrndup(stream->pool, value, vlen);
+ apr_table_mergen(stream->trailers_in, hname, hvalue);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ h2_session *session = stream->session;
+ int error = 0, was_added = 0;
+ apr_status_t status = APR_SUCCESS;
+
+ if (stream->response) {
+ return APR_EINVAL;
+ }
+
+ if (name[0] == ':') {
+ if (vlen > APR_INT32_MAX || (int)vlen > session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+ if (!h2_stream_is_ready(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10178), stream,
+ "Request pseudo header exceeds "
+ "LimitRequestFieldSize: %s"), name);
+ }
+ error = HTTP_REQUEST_URI_TOO_LARGE;
+ goto cleanup;
+ }
+ }
+
+ if (session->s->limit_req_fields > 0
+ && stream->request_headers_added > session->s->limit_req_fields) {
+ /* already over limit, count this attempt, but do not take it in */
+ ++stream->request_headers_added;
+ }
+ else if (H2_SS_IDLE == stream->state) {
+ if (!stream->rtmp) {
+ stream->rtmp = h2_request_create(stream->id, stream->pool,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ status = h2_request_add_header(stream->rtmp, stream->pool,
+ name, nlen, value, vlen,
+ session->s->limit_req_fieldsize, &was_added);
+ if (was_added) ++stream->request_headers_added;
+ }
+ else if (H2_SS_OPEN == stream->state) {
+ status = add_trailer(stream, name, nlen, value, vlen,
+ session->s->limit_req_fieldsize, &was_added);
+ if (was_added) ++stream->request_headers_added;
+ }
+ else {
+ status = APR_EINVAL;
+ goto cleanup;
+ }
+
+ if (APR_EINVAL == status) {
+ /* header too long */
+ if (!h2_stream_is_ready(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
+ "LimitRequestFieldSize: %.*s"),
+ (int)H2MIN(nlen, 80), name);
+ }
+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+ goto cleanup;
+ }
+
+ if (session->s->limit_req_fields > 0
+ && stream->request_headers_added > session->s->limit_req_fields) {
+ /* too many header lines */
+ if (stream->request_headers_added > session->s->limit_req_fields + 100) {
+ /* yeah, right, this request is way over the limit, say goodbye */
+ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
+ return APR_ECONNRESET;
+ }
+ if (!h2_stream_is_ready(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
+ "exceeds LimitRequestFields"));
+ }
+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+ goto cleanup;
+ }
+
+cleanup:
+ if (error) {
+ set_error_response(stream, error);
+ return APR_EINVAL;
+ }
+ else if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_STRM_MSG(stream, "header %s not accepted"), name);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
+ }
+ return status;
+}
+
+typedef struct {
+ apr_size_t maxlen;
+ const char *failed_key;
+} val_len_check_ctx;
+
+static int table_check_val_len(void *baton, const char *key, const char *value)
+{
+ val_len_check_ctx *ctx = baton;
+
+ if (strlen(value) <= ctx->maxlen) return 1;
+ ctx->failed_key = key;
+ return 0;
+}
+
+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
+{
+ apr_status_t status;
+ val_len_check_ctx ctx;
+ int is_http_or_https;
+ h2_request *req = stream->rtmp;
+
+ status = h2_request_end_headers(req, stream->pool, raw_bytes);
+ if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) {
+ goto cleanup;
+ }
+
+ /* keep on returning APR_SUCCESS for error responses, so that we
+ * send it and do not RST the stream.
+ */
+ set_policy_for(stream, req);
+
+ ctx.maxlen = stream->session->s->limit_req_fieldsize;
+ ctx.failed_key = NULL;
+ apr_table_do(table_check_val_len, &ctx, req->headers, NULL);
+ if (ctx.failed_key) {
+ if (!h2_stream_is_ready(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10230), stream,"Request header exceeds "
+ "LimitRequestFieldSize: %.*s"),
+ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
+ }
+ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ goto cleanup;
+ }
+
+ /* http(s) scheme. rfc7540, ch. 8.1.2.3:
+ * This [:path] pseudo-header field MUST NOT be empty for "http" or "https"
+ * URIs; "http" or "https" URIs that do not contain a path component
+ * MUST include a value of '/'. The exception to this rule is an
+ * OPTIONS request for an "http" or "https" URI that does not include
+ * a path component; these MUST include a ":path" pseudo-header field
+ * with a value of '*'
+ *
+ * All HTTP/2 requests MUST include exactly one valid value for the
+ * ":method", ":scheme", and ":path" pseudo-header fields, unless it is
+ * a CONNECT request.
+ */
+ is_http_or_https = (!req->scheme
+ || !(ap_cstr_casecmpn(req->scheme, "http", 4) != 0
+ || (req->scheme[4] != '\0'
+ && (apr_tolower(req->scheme[4]) != 's'
+ || req->scheme[5] != '\0'))));
+
+ /* CONNECT. rfc7540, ch. 8.3:
+ * In HTTP/2, the CONNECT method is used to establish a tunnel over a
+ * single HTTP/2 stream to a remote host for similar purposes. The HTTP
+ * header field mapping works as defined in Section 8.1.2.3 ("Request
+ * Pseudo-Header Fields"), with a few differences. Specifically:
+ * o The ":method" pseudo-header field is set to "CONNECT".
+ * o The ":scheme" and ":path" pseudo-header fields MUST be omitted.
+ * o The ":authority" pseudo-header field contains the host and port to
+ * connect to (equivalent to the authority-form of the request-target
+ * of CONNECT requests (see [RFC7230], Section 5.3)).
+ */
+ if (!ap_cstr_casecmp(req->method, "CONNECT")) {
+ if (req->scheme || req->path) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10384), stream, "Request to CONNECT "
+ "with :scheme or :path specified, sending 400 answer"));
+ set_error_response(stream, HTTP_BAD_REQUEST);
+ goto cleanup;
+ }
+ }
+ else if (is_http_or_https) {
+ if (!req->path) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10385), stream, "Request for http(s) "
+ "resource without :path, sending 400 answer"));
+ set_error_response(stream, HTTP_BAD_REQUEST);
+ goto cleanup;
+ }
+ if (!req->scheme) {
+ req->scheme = ap_ssl_conn_is_ssl(stream->session->c1)? "https" : "http";
+ }
+ }
+
+ if (req->scheme && (req->path && req->path[0] != '/')) {
+ /* We still have a scheme, which means we need to pass an absolute URI into
+ * our HTTP protocol handling and the missing '/' at the start will prevent
+ * us from doing so (as it then confuses path and authority). */
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10379), stream, "Request :scheme '%s' and "
+ "path '%s' do not allow creating an absolute URL. Failing "
+ "request with 400."), req->scheme, req->path);
+ set_error_response(stream, HTTP_BAD_REQUEST);
+ goto cleanup;
+ }
+
+cleanup:
+ if (APR_SUCCESS == status) {
+ stream->request = req;
+ stream->rtmp = NULL;
+
+ if (APLOGctrace4(stream->session->c1)) {
+ int i;
+ const apr_array_header_t *t_h = apr_table_elts(req->headers);
+ const apr_table_entry_t *t_elt = (apr_table_entry_t *)t_h->elts;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, stream->session->c1,
+ H2_STRM_MSG(stream,"headers received from client:"));
+ for (i = 0; i < t_h->nelts; i++, t_elt++) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, stream->session->c1,
+ H2_STRM_MSG(stream, " %s: %s"),
+ ap_escape_logitem(stream->pool, t_elt->key),
+ ap_escape_logitem(stream->pool, t_elt->val));
+ }
+ }
+ }
+ return status;
+}
+
+static apr_bucket *get_first_response_bucket(apr_bucket_brigade *bb)
+{
+ if (bb) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+#if AP_HAS_RESPONSE_BUCKETS
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ return b;
+ }
+#else
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return b;
+ }
+#endif
+ b = APR_BUCKET_NEXT(b);
+ }
+ }
+ return NULL;
+}
+
+static void stream_do_error_bucket(h2_stream *stream, apr_bucket *b)
+{
+ int err = ((ap_bucket_error *)(b->data))->status;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "error bucket received, err=%d"), err);
+ if (err >= 500) {
+ err = NGHTTP2_INTERNAL_ERROR;
+ }
+ else if (err >= 400) {
+ err = NGHTTP2_STREAM_CLOSED;
+ }
+ else {
+ err = NGHTTP2_PROTOCOL_ERROR;
+ }
+ h2_stream_rst(stream, err);
+}
+
+static apr_status_t buffer_output_receive(h2_stream *stream)
+{
+ apr_status_t rv = APR_EAGAIN;
+ apr_off_t buf_len;
+ conn_rec *c1 = stream->session->c1;
+ apr_bucket *b, *e;
+
+ if (!stream->output) {
+ goto cleanup;
+ }
+ if (stream->rst_error) {
+ rv = APR_ECONNRESET;
+ goto cleanup;
+ }
+
+ if (!stream->out_buffer) {
+ stream->out_buffer = apr_brigade_create(stream->pool, c1->bucket_alloc);
+ buf_len = 0;
+ }
+ else {
+ /* if the brigade contains a file bucket, its normal report length
+ * might be megabytes, but the memory used is tiny. For buffering,
+ * we are only interested in the memory footprint. */
+ buf_len = h2_brigade_mem_size(stream->out_buffer);
+ }
+
+ if (buf_len > APR_INT32_MAX
+ || (apr_size_t)buf_len >= stream->session->max_stream_mem) {
+ /* we have buffered enough. No need to read more.
+ * However, we have now output pending for which we may not
+ * receive another poll event. We need to make sure that this
+ * stream is not suspended so we keep on processing output.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_STRM_MSG(stream, "out_buffer, already has %ld length"),
+ (long)buf_len);
+ rv = APR_SUCCESS;
+ goto cleanup;
+ }
+
+ if (stream->output_eos) {
+ rv = APR_BRIGADE_EMPTY(stream->out_buffer)? APR_EOF : APR_SUCCESS;
+ }
+ else {
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre");
+ rv = h2_beam_receive(stream->output, stream->session->c1, stream->out_buffer,
+ APR_NONBLOCK_READ, stream->session->max_stream_mem - buf_len);
+ if (APR_SUCCESS != rv) {
+ if (APR_EAGAIN != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_STRM_MSG(stream, "out_buffer, receive unsuccessful"));
+ }
+ }
+ }
+
+ /* get rid of buckets we have no need for */
+ if (!APR_BRIGADE_EMPTY(stream->out_buffer)) {
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_FLUSH(b)) { /* we flush any c1 data already */
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ stream->output_eos = 1;
+ }
+ else if (AP_BUCKET_IS_ERROR(b)) {
+ stream_do_error_bucket(stream, b);
+ break;
+ }
+ }
+ else if (b->length == 0) { /* zero length data */
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ b = e;
+ }
+ }
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "out_buffer, after receive");
+
+cleanup:
+ return rv;
+}
+
+static int bucket_pass_to_c1(apr_bucket *b)
+{
+#if AP_HAS_RESPONSE_BUCKETS
+ return !AP_BUCKET_IS_RESPONSE(b)
+ && !AP_BUCKET_IS_HEADERS(b)
+ && !APR_BUCKET_IS_EOS(b);
+#else
+ return !H2_BUCKET_IS_HEADERS(b) && !APR_BUCKET_IS_EOS(b);
+#endif
+}
+
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+ rv = h2_append_brigade(bb, stream->out_buffer, plen, peos, bucket_pass_to_c1);
+ if (APR_SUCCESS == rv && !*peos && !*plen) {
+ rv = APR_EAGAIN;
+ }
+ return rv;
+}
+
+static apr_status_t stream_do_trailers(h2_stream *stream)
+{
+ conn_rec *c1 = stream->session->c1;
+ int ngrv;
+ h2_ngheader *nh = NULL;
+ apr_bucket *b, *e;
+#if AP_HAS_RESPONSE_BUCKETS
+ ap_bucket_headers *headers = NULL;
+#else
+ h2_headers *headers = NULL;
+#endif
+ apr_status_t rv;
+
+ ap_assert(stream->response);
+ ap_assert(stream->out_buffer);
+
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
+ if (AP_BUCKET_IS_HEADERS(b)) {
+ headers = b->data;
+#else /* AP_HAS_RESPONSE_BUCKETS */
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ headers = h2_bucket_headers_get(b);
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_STRM_MSG(stream, "process trailers"));
+ break;
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ break;
+ }
+ }
+ else {
+ break;
+ }
+ b = e;
+ }
+
+ if (!headers) {
+ rv = APR_EAGAIN;
+ goto cleanup;
+ }
+
+ rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
+ (int)nh->nvlen);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ goto cleanup;
+ }
+
+ ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
+ if (nghttp2_is_fatal(ngrv)) {
+ rv = APR_EGENERAL;
+ h2_session_dispatch_event(stream->session,
+ H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv));
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ APLOGNO(02940) "submit_response: %s",
+ nghttp2_strerror(rv));
+ }
+ stream->sent_trailers = 1;
+
+cleanup:
+ return rv;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, ap_bucket_response *response)
+#else
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
+#endif
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_array_header_t *pushes;
+ int i;
+
+ pushes = h2_push_collect_update(stream, stream->request, response);
+ if (pushes && !apr_is_empty_array(pushes)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "found %d push candidates"),
+ pushes->nelts);
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ h2_stream *s = h2_session_push(stream->session, stream, push);
+ if (!s) {
+ status = APR_ECONNRESET;
+ break;
+ }
+ }
+ }
+ return status;
+}
+
+apr_table_t *h2_stream_get_trailers(h2_stream *stream)
+{
+ return NULL;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+const h2_priority *h2_stream_get_priority(h2_stream *stream,
+ ap_bucket_response *response)
+#else
+const h2_priority *h2_stream_get_priority(h2_stream *stream,
+ h2_headers *response)
+#endif
+{
+ if (response && stream->initiated_on) {
+ const char *ctype = apr_table_get(response->headers, "content-type");
+ if (ctype) {
+ /* FIXME: Not good enough, config needs to come from request->server */
+ return h2_cconfig_get_priority(stream->session->c1, ctype);
+ }
+ }
+ return NULL;
+}
+
+int h2_stream_is_ready(h2_stream *stream)
+{
+ /* Have we sent a response or do we have the response in our buffer? */
+ if (stream->response) {
+ return 1;
+ }
+ else if (stream->out_buffer && get_first_response_bucket(stream->out_buffer)) {
+ return 1;
+ }
+ return 0;
+}
+
+int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state)
+{
+ return stream->state == state;
+}
+
+int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state)
+{
+ switch (state) {
+ case H2_SS_IDLE:
+ return 1; /* by definition */
+ case H2_SS_RSVD_R: /*fall through*/
+ case H2_SS_RSVD_L: /*fall through*/
+ case H2_SS_OPEN:
+ return stream->state == state || stream->state >= H2_SS_OPEN;
+ case H2_SS_CLOSED_R: /*fall through*/
+ case H2_SS_CLOSED_L: /*fall through*/
+ case H2_SS_CLOSED:
+ return stream->state == state || stream->state >= H2_SS_CLOSED;
+ case H2_SS_CLEANUP:
+ return stream->state == state;
+ default:
+ return 0;
+ }
+}
+
+apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
+{
+ h2_session *session = stream->session;
+
+ if (amount > 0) {
+ apr_off_t consumed = amount;
+
+ while (consumed > 0) {
+ int len = (consumed > INT_MAX)? INT_MAX : (int)consumed;
+ nghttp2_session_consume(session->ngh2, stream->id, len);
+ consumed -= len;
+ }
+
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ if (1) {
+ int cur_size = nghttp2_session_get_stream_local_window_size(
+ session->ngh2, stream->id);
+ int win = stream->in_window_size;
+ int thigh = win * 8/10;
+ int tlow = win * 2/10;
+ const int win_max = 2*1024*1024;
+ const int win_min = 32*1024;
+
+ /* Work in progress, probably should add directives for these
+ * values once this stabilizes somewhat. The general idea is
+ * to adapt stream window sizes if the input window changes
+ * a) very quickly (< good RTT) from full to empty
+ * b) only a little bit (> bad RTT)
+ * where in a) it grows and in b) it shrinks again.
+ */
+ if (cur_size > thigh && amount > thigh && win < win_max) {
+ /* almost empty again with one reported consumption, how
+ * long did this take? */
+ long ms = apr_time_msec(apr_time_now() - stream->in_last_write);
+ if (ms < 40) {
+ win = H2MIN(win_max, win + (64*1024));
+ }
+ }
+ else if (cur_size < tlow && amount < tlow && win > win_min) {
+ /* staying full, for how long already? */
+ long ms = apr_time_msec(apr_time_now() - stream->in_last_write);
+ if (ms > 700) {
+ win = H2MAX(win_min, win - (32*1024));
+ }
+ }
+
+ if (win != stream->in_window_size) {
+ stream->in_window_size = win;
+ nghttp2_session_set_local_window_size(session->ngh2,
+ NGHTTP2_FLAG_NONE, stream->id, win);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "consumed %ld bytes, window now %d/%d"),
+ (long)amount, cur_size, stream->in_window_size);
+ }
+#endif /* #ifdef H2_NG2_LOCAL_WIN_SIZE */
+ }
+ return APR_SUCCESS;
+}
+
+static apr_off_t output_data_buffered(h2_stream *stream, int *peos, int *pheader_blocked)
+{
+ /* How much data do we have in our buffers that we can write? */
+ apr_off_t buf_len = 0;
+ apr_bucket *b;
+
+ *peos = *pheader_blocked = 0;
+ if (stream->out_buffer) {
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *peos = 1;
+ break;
+ }
+#if AP_HAS_RESPONSE_BUCKETS
+ else if (AP_BUCKET_IS_RESPONSE(b)) {
+ break;
+ }
+ else if (AP_BUCKET_IS_HEADERS(b)) {
+ *pheader_blocked = 1;
+ break;
+ }
+#else
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ *pheader_blocked = 1;
+ break;
+ }
+#endif
+ }
+ else {
+ buf_len += b->length;
+ }
+ b = APR_BUCKET_NEXT(b);
+ }
+ }
+ return buf_len;
+}
+
+static ssize_t stream_data_cb(nghttp2_session *ng2s,
+ int32_t stream_id,
+ uint8_t *buf,
+ size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *puser)
+{
+ h2_session *session = (h2_session *)puser;
+ conn_rec *c1 = session->c1;
+ apr_off_t buf_len;
+ int eos, header_blocked;
+ apr_status_t rv;
+ h2_stream *stream;
+
+ /* nghttp2 wants to send more DATA for the stream.
+ * we should have submitted the final response at this time
+ * after receiving output via stream_do_responses() */
+ ap_assert(session);
+ (void)ng2s;
+ (void)buf;
+ (void)source;
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c1,
+ APLOGNO(02937)
+ H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ if (!stream->output || !stream->response || !stream->out_buffer) {
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ if (stream->rst_error) {
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ if (h2_c1_io_needs_flush(&session->io)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_SSSN_STRM_MSG(session, stream_id, "suspending on c1 out needs flush"));
+ h2_stream_dispatch(stream, H2_SEV_OUT_C1_BLOCK);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+
+ /* determine how much we'd like to send. We cannot send more than
+ * is requested. But we can reduce the size in case the master
+ * connection operates in smaller chunks. (TSL warmup) */
+ if (stream->session->io.write_size > 0) {
+ apr_size_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN;
+ if (length > chunk_len) {
+ length = chunk_len;
+ }
+ }
+
+ /* How much data do we have in our buffers that we can write?
+ * if not enough, receive more. */
+ buf_len = output_data_buffered(stream, &eos, &header_blocked);
+ if (buf_len < (apr_off_t)length && !eos
+ && !header_blocked && !stream->rst_error) {
+ /* read more? */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ H2_SSSN_STRM_MSG(session, stream_id,
+ "need more (read len=%ld, %ld in buffer)"),
+ (long)length, (long)buf_len);
+ rv = buffer_output_receive(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_SSSN_STRM_MSG(session, stream_id,
+ "buffer_output_received"));
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* currently, no more is available */
+ }
+ else if (APR_SUCCESS == rv) {
+ /* got some, re-assess */
+ buf_len = output_data_buffered(stream, &eos, &header_blocked);
+ }
+ else if (APR_EOF == rv) {
+ if (!stream->output_eos) {
+ /* Seeing APR_EOF without an EOS bucket received before indicates
+ * that stream output is incomplete. Commonly, we expect to see
+ * an ERROR bucket to have been generated. But faulty handlers
+ * may not have generated one.
+ * We need to RST the stream bc otherwise the client thinks
+ * it is all fine. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_SSSN_STRM_MSG(session, stream_id, "rst stream"));
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_SSSN_STRM_MSG(session, stream_id,
+ "eof on receive (read len=%ld, %ld in buffer)"),
+ (long)length, (long)buf_len);
+ eos = 1;
+ rv = APR_SUCCESS;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+
+ if (stream->rst_error) {
+ return NGHTTP2_ERR_DEFERRED;
+ }
+
+ if (buf_len == 0 && header_blocked) {
+ rv = stream_do_trailers(stream);
+ if (APR_SUCCESS != rv && !APR_STATUS_IS_EAGAIN(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ H2_STRM_LOG(APLOGNO(10300), stream,
+ "data_cb, error processing trailers"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ length = 0;
+ eos = 0;
+ }
+ else if (buf_len > (apr_off_t)length) {
+ eos = 0; /* Any EOS we have in the buffer does not apply yet */
+ }
+ else {
+ length = (size_t)buf_len;
+ }
+
+ if (length) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ H2_STRM_MSG(stream, "data_cb, sending len=%ld, eos=%d"),
+ (long)length, eos);
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
+ }
+ else if (!eos && !stream->sent_trailers) {
+ /* We have not reached the end of DATA yet, DEFER sending */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+ H2_STRM_LOG(APLOGNO(03071), stream, "data_cb, suspending"));
+ return NGHTTP2_ERR_DEFERRED;
+ }
+
+ if (eos) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ return length;
+}
+
+static apr_status_t stream_do_response(h2_stream *stream)
+{
+ conn_rec *c1 = stream->session->c1;
+ apr_status_t rv = APR_EAGAIN;
+ int ngrv, is_empty = 0;
+ h2_ngheader *nh = NULL;
+ apr_bucket *b, *e;
+#if AP_HAS_RESPONSE_BUCKETS
+ ap_bucket_response *resp = NULL;
+#else
+ h2_headers *resp = NULL;
+#endif
+ nghttp2_data_provider provider, *pprovider = NULL;
+
+ ap_assert(!stream->response);
+ ap_assert(stream->out_buffer);
+
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ resp = b->data;
+#else /* AP_HAS_RESPONSE_BUCKETS */
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ resp = h2_bucket_headers_get(b);
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_STRM_MSG(stream, "process response %d"),
+ resp->status);
+ is_empty = (e != APR_BRIGADE_SENTINEL(stream->out_buffer)
+ && APR_BUCKET_IS_EOS(e));
+ break;
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ rv = APR_EINVAL;
+ goto cleanup;
+ }
+ else if (AP_BUCKET_IS_ERROR(b)) {
+ stream_do_error_bucket(stream, b);
+ rv = APR_EINVAL;
+ goto cleanup;
+ }
+ }
+ else {
+ /* data buckets before response headers, an error */
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ rv = APR_EINVAL;
+ goto cleanup;
+ }
+ b = e;
+ }
+
+ if (!resp) {
+ rv = APR_EAGAIN;
+ goto cleanup;
+ }
+
+ if (resp->status < 100) {
+ h2_stream_rst(stream, resp->status);
+ goto cleanup;
+ }
+
+ if (resp->status == HTTP_FORBIDDEN && resp->notes) {
+ const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is not allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1,
+ H2_STRM_LOG(APLOGNO(03061), stream,
+ "renegotiate forbidden, cause: %s"), cause);
+ h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED);
+ goto cleanup;
+ }
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+ H2_STRM_LOG(APLOGNO(03073), stream,
+ "submit response %d"), resp->status);
+
+ /* If this stream is not a pushed one itself,
+ * and HTTP/2 server push is enabled here,
+ * and the response HTTP status is not sth >= 400,
+ * and the remote side has pushing enabled,
+ * -> find and perform any pushes on this stream
+ * *before* we submit the stream response itself.
+ * This helps clients avoid opening new streams on Link
+ * resp that get pushed right afterwards.
+ *
+ * *) the response code is relevant, as we do not want to
+ * make pushes on 401 or 403 codes and friends.
+ * And if we see a 304, we do not push either
+ * as the client, having this resource in its cache, might
+ * also have the pushed ones as well.
+ */
+ if (!stream->initiated_on
+ && !stream->response
+ && stream->request && stream->request->method
+ && !strcmp("GET", stream->request->method)
+ && (resp->status < 400)
+ && (resp->status != 304)
+ && h2_session_push_enabled(stream->session)) {
+ /* PUSH is possible and enabled on server, unless the request
+ * denies it, submit resources to push */
+ const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
+ if (!s || strcmp(s, "0")) {
+ h2_stream_submit_pushes(stream, resp);
+ }
+ }
+
+ if (!stream->pref_priority) {
+ stream->pref_priority = h2_stream_get_priority(stream, resp);
+ }
+ h2_session_set_prio(stream->session, stream, stream->pref_priority);
+
+ if (resp->status == 103
+ && !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
+ /* suppress sending this to the client, it might have triggered
+ * pushes and served its purpose nevertheless */
+ rv = APR_SUCCESS;
+ goto cleanup;
+ }
+ if (resp->status >= 200) {
+ stream->response = resp;
+ }
+
+ if (!is_empty) {
+ memset(&provider, 0, sizeof(provider));
+ provider.source.fd = stream->id;
+ provider.read_callback = stream_data_cb;
+ pprovider = &provider;
+ }
+
+ rv = h2_res_create_ngheader(&nh, stream->pool, resp);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ goto cleanup;
+ }
+
+ ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id,
+ nh->nv, nh->nvlen, pprovider);
+ if (nghttp2_is_fatal(ngrv)) {
+ rv = APR_EGENERAL;
+ h2_session_dispatch_event(stream->session,
+ H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv));
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ APLOGNO(10402) "submit_response: %s",
+ nghttp2_strerror(rv));
+ goto cleanup;
+ }
+
+ if (stream->initiated_on) {
+ ++stream->session->pushes_submitted;
+ }
+ else {
+ ++stream->session->responses_submitted;
+ }
+
+cleanup:
+ return rv;
+}
+
+static void stream_do_responses(h2_stream *stream)
+{
+ h2_session *session = stream->session;
+ conn_rec *c1 = session->c1;
+ apr_status_t rv;
+
+ ap_assert(!stream->response);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ H2_STRM_MSG(stream, "do_response"));
+ rv = buffer_output_receive(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_SSSN_STRM_MSG(session, stream->id,
+ "buffer_output_received2"));
+ if (APR_SUCCESS != rv && APR_EAGAIN != rv) {
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ else {
+ /* process all headers sitting at the buffer head. */
+ do {
+ rv = stream_do_response(stream);
+ } while (APR_SUCCESS == rv
+ && !stream->rst_error
+ && !stream->response);
+ }
+}
+
+void h2_stream_on_output_change(h2_stream *stream)
+{
+ conn_rec *c1 = stream->session->c1;
+ apr_status_t rv = APR_EAGAIN;
+
+ /* stream->pout_recv_write signalled a change. Check what has happend, read
+ * from it and act on seeing a response/data. */
+ if (!stream->output) {
+ /* c2 has not assigned the output beam to the stream (yet). */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
+ H2_STRM_MSG(stream, "read_output, no output beam registered"));
+ }
+ else if (h2_stream_is_at_or_past(stream, H2_SS_CLOSED)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10301), stream, "already closed"));
+ }
+ else if (h2_stream_is_at(stream, H2_SS_CLOSED_L)) {
+ /* We have delivered a response to a stream that was not closed
+ * by the client. This could be a POST with body that we negate
+ * and we need to RST_STREAM to end if. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+ H2_STRM_LOG(APLOGNO(10313), stream, "remote close missing"));
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ }
+ else {
+ /* stream is not closed, a change in output happened. There are
+ * two modes of operation here:
+ * 1) the final response has been submitted. nghttp2 is invoking
+ * stream_data_cb() to progress the stream. This handles DATA,
+ * trailers, EOS and ERRORs.
+ * When stream_data_cb() runs out of things to send, it returns
+ * NGHTTP2_ERR_DEFERRED and nghttp2 *suspends* further processing
+ * until we tell it to resume.
+ * 2) We have not seen the *final* response yet. The stream can not
+ * send any response DATA. The nghttp2 stream_data_cb() is not
+ * invoked. We need to receive output, expecting not DATA but
+ * RESPONSEs (intermediate may arrive) and submit those. On
+ * the final response, nghttp2 will start calling stream_data_cb().
+ */
+ if (stream->response) {
+ nghttp2_session_resume_data(stream->session->ngh2, stream->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ H2_STRM_MSG(stream, "resumed"));
+ }
+ else {
+ stream_do_responses(stream);
+ if (!stream->rst_error) {
+ nghttp2_session_resume_data(stream->session->ngh2, stream->id);
+ }
+ }
+ }
+}
+
+void h2_stream_on_input_change(h2_stream *stream)
+{
+ ap_assert(stream->input);
+ h2_beam_report_consumption(stream->input);
+ if (h2_stream_is_at(stream, H2_SS_CLOSED_L)
+ && !h2_mplx_c1_stream_is_running(stream->session->mplx, stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ }
+}
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
new file mode 100644
index 0000000..695d56a
--- /dev/null
+++ b/modules/http2/h2_stream.h
@@ -0,0 +1,326 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_stream__
+#define __mod_h2__h2_stream__
+
+#include <http_protocol.h>
+
+#include "h2.h"
+#include "h2_headers.h"
+
+/**
+ * A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms.
+ *
+ * A stream always belongs to a h2_session, the one managing the
+ * connection to the client. The h2_session writes to the h2_stream,
+ * adding HEADERS and DATA and finally an EOS. When headers are done,
+ * h2_stream is scheduled for handling, which is expected to produce
+ * h2_headers/RESPONSE buckets.
+ *
+ * The h2_headers may be followed by more h2_headers (interim responses) and
+ * by DATA frames read from the h2_stream until EOS is reached. Trailers
+ * are send when a last h2_headers is received. This always closes the stream
+ * output.
+ */
+
+struct h2_mplx;
+struct h2_priority;
+struct h2_request;
+struct h2_session;
+struct h2_bucket_beam;
+
+typedef struct h2_stream h2_stream;
+
+typedef void h2_stream_state_cb(void *ctx, h2_stream *stream);
+typedef void h2_stream_event_cb(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev);
+
+/**
+ * Callback structure for events and stream state transisitions
+ */
+typedef struct h2_stream_monitor {
+ void *ctx;
+ h2_stream_state_cb *on_state_enter; /* called when a state is entered */
+ h2_stream_state_cb *on_state_invalid; /* called when an invalid state change
+ was detected */
+ h2_stream_event_cb *on_state_event; /* called right before the given event
+ result in a new stream state */
+ h2_stream_event_cb *on_event; /* called for events that do not
+ trigger a state change */
+} h2_stream_monitor;
+
+struct h2_stream {
+ int id; /* http2 stream identifier */
+ int initiated_on; /* initiating stream id (PUSH) or 0 */
+ apr_pool_t *pool; /* the memory pool for this stream */
+ struct h2_session *session; /* the session this stream belongs to */
+ h2_stream_state_t state; /* state of this stream */
+
+ apr_time_t created; /* when stream was created */
+
+ const struct h2_request *request; /* the request made in this stream */
+ struct h2_request *rtmp; /* request being assembled */
+ apr_table_t *trailers_in; /* optional, incoming trailers */
+ int request_headers_added; /* number of request headers added */
+
+#if AP_HAS_RESPONSE_BUCKETS
+ ap_bucket_response *response; /* the final, non-interim response or NULL */
+#else
+ struct h2_headers *response; /* the final, non-interim response or NULL */
+#endif
+
+ struct h2_bucket_beam *input;
+ apr_bucket_brigade *in_buffer;
+ int in_window_size;
+ apr_time_t in_last_write;
+
+ struct h2_bucket_beam *output;
+ apr_bucket_brigade *out_buffer;
+
+ int rst_error; /* stream error for RST_STREAM */
+ unsigned int aborted : 1; /* was aborted */
+ unsigned int scheduled : 1; /* stream has been scheduled */
+ unsigned int input_closed : 1; /* no more request data/trailers coming */
+ unsigned int push_policy; /* which push policy to use for this request */
+ unsigned int sent_trailers : 1; /* trailers have been submitted */
+ unsigned int output_eos : 1; /* output EOS in buffer/sent */
+
+ conn_rec *c2; /* connection processing stream */
+
+ const h2_priority *pref_priority; /* preferred priority for this stream */
+ apr_off_t out_frames; /* # of frames sent out */
+ apr_off_t out_frame_octets; /* # of RAW frame octets sent out */
+ apr_off_t out_data_frames; /* # of DATA frames sent */
+ apr_off_t out_data_octets; /* # of DATA octets (payload) sent */
+ apr_off_t in_data_frames; /* # of DATA frames received */
+ apr_off_t in_data_octets; /* # of DATA octets (payload) received */
+ apr_off_t in_trailer_octets; /* # of HEADER octets (payload) received in trailers */
+
+ h2_stream_monitor *monitor; /* optional monitor for stream states */
+};
+
+
+#define H2_STREAM_RST(s, def) (s->rst_error? s->rst_error : (def))
+
+/**
+ * Create a stream in H2_SS_IDLE state.
+ * @param id the stream identifier
+ * @param pool the memory pool to use for this stream
+ * @param session the session this stream belongs to
+ * @param monitor an optional monitor to be called for events and
+ * state transisitions
+ * @param initiated_on the id of the stream this one was initiated on (PUSH)
+ *
+ * @return the newly opened stream
+ */
+h2_stream *h2_stream_create(int id, apr_pool_t *pool,
+ struct h2_session *session,
+ h2_stream_monitor *monitor,
+ int initiated_on);
+
+/**
+ * Destroy memory pool if still owned by the stream.
+ */
+void h2_stream_destroy(h2_stream *stream);
+
+/**
+ * Perform any late initialization before stream starts processing.
+ */
+apr_status_t h2_stream_prepare_processing(h2_stream *stream);
+
+/*
+ * Set a new monitor for this stream, replacing any existing one. Can
+ * be called with NULL to have no monitor installed.
+ */
+void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor);
+
+/**
+ * Dispatch (handle) an event on the given stream.
+ * @param stream the streama the event happened on
+ * @param ev the type of event
+ */
+void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev);
+
+/**
+ * Determine if stream is at given state.
+ * @param stream the stream to check
+ * @param state the state to look for
+ * @return != 0 iff stream is at given state.
+ */
+int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state);
+
+/**
+ * Determine if stream is reached given state or is past this state.
+ * @param stream the stream to check
+ * @param state the state to look for
+ * @return != 0 iff stream is at or past given state.
+ */
+int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state);
+
+/**
+ * Cleanup references into requst processing.
+ *
+ * @param stream the stream to cleanup
+ */
+void h2_stream_cleanup(h2_stream *stream);
+
+/**
+ * Notify the stream that amount bytes have been consumed of its input
+ * since the last invocation of this method (delta amount).
+ */
+apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount);
+
+/**
+ * Set complete stream headers from given h2_request.
+ *
+ * @param stream stream to write request to
+ * @param r the request with all the meta data
+ * @param eos != 0 iff stream input is closed
+ */
+void h2_stream_set_request(h2_stream *stream, const h2_request *r);
+
+/**
+ * Set complete stream header from given request_rec.
+ *
+ * @param stream stream to write request to
+ * @param r the request with all the meta data
+ * @param eos != 0 iff stream input is closed
+ */
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+ request_rec *r, int eos);
+
+/*
+ * Add a HTTP/2 header (including pseudo headers) or trailer
+ * to the given stream, depending on stream state.
+ *
+ * @param stream stream to write the header to
+ * @param name the name of the HTTP/2 header
+ * @param nlen the number of characters in name
+ * @param value the header value
+ * @param vlen the number of characters in value
+ */
+apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+/* End the construction of request headers */
+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
+
+
+apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+
+/*
+ * Process a frame of received DATA.
+ *
+ * @param stream stream to write the data to
+ * @param flags the frame flags
+ * @param data the beginning of the bytes to write
+ * @param len the number of bytes to write
+ */
+apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
+ const uint8_t *data, size_t len);
+
+/**
+ * Reset the stream. Stream write/reads will return errors afterwards.
+ *
+ * @param stream the stream to reset
+ * @param error_code the HTTP/2 error code
+ */
+void h2_stream_rst(h2_stream *stream, int error_code);
+
+/**
+ * Stream input signals change. Take necessary actions.
+ * @param stream the stream to read output for
+ */
+void h2_stream_on_input_change(h2_stream *stream);
+
+/**
+ * Stream output signals change. Take necessary actions.
+ * @param stream the stream to read output for
+ */
+void h2_stream_on_output_change(h2_stream *stream);
+
+/**
+ * Read a maximum number of bytes into the bucket brigade.
+ *
+ * @param stream the stream to read from
+ * @param bb the brigade to append output to
+ * @param plen (in-/out) max. number of bytes to append and on return actual
+ * number of bytes appended to brigade
+ * @param peos (out) != 0 iff end of stream has been reached while reading
+ * @return APR_SUCCESS if out information was computed successfully.
+ * APR_EAGAIN if not data is available and end of stream has not been
+ * reached yet.
+ */
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos);
+
+/**
+ * Get optional trailers for this stream, may be NULL. Meaningful
+ * results can only be expected when the end of the response body has
+ * been reached.
+ *
+ * @param stream to ask for trailers
+ * @return trailers for NULL
+ */
+apr_table_t *h2_stream_get_trailers(h2_stream *stream);
+
+/**
+ * Submit any server push promises on this stream and schedule
+ * the streams for these.
+ *
+ * @param stream the stream for which to submit
+ */
+#if AP_HAS_RESPONSE_BUCKETS
+apr_status_t h2_stream_submit_pushes(h2_stream *stream,
+ ap_bucket_response *response);
+#else
+apr_status_t h2_stream_submit_pushes(h2_stream *stream,
+ struct h2_headers *response);
+#endif
+
+/**
+ * Get priority information set for this stream.
+ */
+#if AP_HAS_RESPONSE_BUCKETS
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
+ ap_bucket_response *response);
+#else
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
+ struct h2_headers *response);
+#endif
+
+/**
+ * Return a textual representation of the stream state as in RFC 7540
+ * nomenclator, all caps, underscores.
+ */
+const char *h2_stream_state_str(const h2_stream *stream);
+
+/**
+ * Determine if stream is ready for submitting a response or a RST
+ * @param stream the stream to check
+ */
+int h2_stream_is_ready(h2_stream *stream);
+
+#define H2_STRM_MSG(s, msg) \
+ "h2_stream(%d-%lu-%d,%s): "msg, s->session->child_num, \
+ (unsigned long)s->session->id, s->id, h2_stream_state_str(s)
+
+#define H2_STRM_LOG(aplogno, s, msg) aplogno H2_STRM_MSG(s, msg)
+
+#endif /* defined(__mod_h2__h2_stream__) */
diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
new file mode 100644
index 0000000..a30f27c
--- /dev/null
+++ b/modules/http2/h2_switch.c
@@ -0,0 +1,232 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_ssl.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2.h"
+
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_c1.h"
+#include "h2_c2.h"
+#include "h2_protocol.h"
+#include "h2_switch.h"
+
+/*******************************************************************************
+ * Once per lifetime init, retrieve optional functions
+ */
+apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s)
+{
+ (void)pool;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_switch init");
+
+ return APR_SUCCESS;
+}
+
+static int h2_protocol_propose(conn_rec *c, request_rec *r,
+ server_rec *s,
+ const apr_array_header_t *offers,
+ apr_array_header_t *proposals)
+{
+ int proposed = 0;
+ int is_tls = ap_ssl_conn_is_ssl(c);
+ const char **protos = is_tls? h2_protocol_ids_tls : h2_protocol_ids_clear;
+
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+
+ if (strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))) {
+ /* We do not know how to switch from anything else but http/1.1.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03083)
+ "protocol switch: current proto != http/1.1, declined");
+ return DECLINED;
+ }
+
+ if (!h2_protocol_is_acceptable_c1(c, r, 0)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
+ "protocol propose: connection requirements not met");
+ return DECLINED;
+ }
+
+ if (r) {
+ /* So far, this indicates an HTTP/1 Upgrade header initiated
+ * protocol switch. For that, the HTTP2-Settings header needs
+ * to be present and valid for the connection.
+ */
+ const char *p;
+
+ if (!h2_c1_can_upgrade(r)) {
+ return DECLINED;
+ }
+
+ p = apr_table_get(r->headers_in, "HTTP2-Settings");
+ if (!p) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03085)
+ "upgrade without HTTP2-Settings declined");
+ return DECLINED;
+ }
+
+ p = apr_table_get(r->headers_in, "Connection");
+ if (!ap_find_token(r->pool, p, "http2-settings")) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03086)
+ "upgrade without HTTP2-Settings declined");
+ return DECLINED;
+ }
+
+ /* We also allow switching only for requests that have no body.
+ */
+ p = apr_table_get(r->headers_in, "Content-Length");
+ if (p && strcmp(p, "0")) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087)
+ "upgrade with content-length: %s, declined", p);
+ return DECLINED;
+ }
+ }
+
+ while (*protos) {
+ /* Add all protocols we know (tls or clear) and that
+ * are part of the offerings (if there have been any).
+ */
+ if (!offers || ap_array_str_contains(offers, *protos)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "proposing protocol '%s'", *protos);
+ APR_ARRAY_PUSH(proposals, const char*) = *protos;
+ proposed = 1;
+ }
+ ++protos;
+ }
+ return proposed? DECLINED : OK;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+static void remove_output_filters_below(ap_filter_t *f, ap_filter_type ftype)
+{
+ ap_filter_t *fnext;
+
+ while (f && f->frec->ftype < ftype) {
+ fnext = f->next;
+ ap_remove_output_filter(f);
+ f = fnext;
+ }
+}
+
+static void remove_input_filters_below(ap_filter_t *f, ap_filter_type ftype)
+{
+ ap_filter_t *fnext;
+
+ while (f && f->frec->ftype < ftype) {
+ fnext = f->next;
+ ap_remove_input_filter(f);
+ f = fnext;
+ }
+}
+#endif
+
+static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
+ const char *protocol)
+{
+ int found = 0;
+ const char **protos = ap_ssl_conn_is_ssl(c)? h2_protocol_ids_tls : h2_protocol_ids_clear;
+ const char **p = protos;
+
+ (void)s;
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+
+ while (*p) {
+ if (!strcmp(*p, protocol)) {
+ found = 1;
+ break;
+ }
+ p++;
+ }
+
+ if (found) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "switching protocol to '%s'", protocol);
+ h2_conn_ctx_create_for_c1(c, s, protocol);
+
+ if (r != NULL) {
+ apr_status_t status;
+#if AP_HAS_RESPONSE_BUCKETS
+ /* Switching in the middle of a request means that
+ * we have to send out the response to this one in h2
+ * format. So we need to take over the connection
+ * and remove all old filters with type up to the
+ * CONNEDCTION/NETWORK ones.
+ */
+ remove_input_filters_below(r->input_filters, AP_FTYPE_CONNECTION);
+ remove_output_filters_below(r->output_filters, AP_FTYPE_CONNECTION);
+#else
+ /* Switching in the middle of a request means that
+ * we have to send out the response to this one in h2
+ * format. So we need to take over the connection
+ * right away.
+ */
+ ap_remove_input_filter_byhandle(r->input_filters, "http_in");
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+#endif
+ /* Ok, start an h2_conn on this one. */
+ status = h2_c1_setup(c, r, s);
+
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
+ "session setup");
+ h2_conn_ctx_detach(c);
+ return !OK;
+ }
+
+ h2_c1_run(c);
+ }
+ return OK;
+ }
+
+ return DECLINED;
+}
+
+static const char *h2_protocol_get(const conn_rec *c)
+{
+ h2_conn_ctx_t *ctx;
+
+ if (c->master) {
+ c = c->master;
+ }
+ ctx = h2_conn_ctx_get(c);
+ return ctx? ctx->protocol : NULL;
+}
+
+void h2_switch_register_hooks(void)
+{
+ ap_hook_protocol_propose(h2_protocol_propose, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_protocol_switch(h2_protocol_switch, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_protocol_get(h2_protocol_get, NULL, NULL, APR_HOOK_MIDDLE);
+}
diff --git a/modules/http2/h2_switch.h b/modules/http2/h2_switch.h
new file mode 100644
index 0000000..7be8a23
--- /dev/null
+++ b/modules/http2/h2_switch.h
@@ -0,0 +1,30 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_switch__
+#define __mod_h2__h2_switch__
+
+/*
+ * One time, post config initialization.
+ */
+apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s);
+
+/* Register apache hooks for protocol switching
+ */
+void h2_switch_register_hooks(void);
+
+
+#endif /* defined(__mod_h2__h2_switch__) */
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
new file mode 100644
index 0000000..728cee9
--- /dev/null
+++ b/modules/http2/h2_util.c
@@ -0,0 +1,1929 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_protocol.h>
+#include <http_request.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_headers.h"
+#include "h2_util.h"
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(int n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+size_t h2_util_hex_dump(char *buffer, size_t maxlen,
+ const char *data, size_t datalen)
+{
+ size_t offset = 0;
+ size_t maxoffset = (maxlen-4);
+ size_t i;
+ for (i = 0; i < datalen && offset < maxoffset; ++i) {
+ const char *sep = (i && i % 16 == 0)? "\n" : " ";
+ int n = apr_snprintf(buffer+offset, maxoffset-offset,
+ "%2x%s", ((unsigned int)data[i]&0xff), sep);
+ offset += n;
+ }
+ strcpy(buffer+offset, (i<datalen)? "..." : "");
+ return strlen(buffer);
+}
+
+void h2_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+/* base64 url encoding */
+
+#define N6 (unsigned int)-1
+
+static const unsigned int BASE64URL_UINT6[] = {
+/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 0 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, 62, N6, N6, /* 2 */
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */
+ N6, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, N6, N6, N6, N6, 63, /* 5 */
+ N6, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, N6, N6, N6, N6, N6, /* 7 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 8 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 9 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* a */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* b */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* c */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* d */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* e */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6 /* f */
+};
+static const unsigned char BASE64URL_CHARS[] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', /* 30 - 39 */
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', /* 40 - 49 */
+ 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', /* 50 - 59 */
+ '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */
+};
+
+#define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ]
+
+apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded,
+ apr_pool_t *pool)
+{
+ const unsigned char *e = (const unsigned char *)encoded;
+ const unsigned char *p = e;
+ unsigned char *d;
+ unsigned int n;
+ long len, mlen, remain, i;
+
+ while (*p && BASE64URL_UINT6[ *p ] != N6) {
+ ++p;
+ }
+ len = (int)(p - e);
+ mlen = (len/4)*4;
+ *decoded = apr_pcalloc(pool, (apr_size_t)len + 1);
+
+ i = 0;
+ d = (unsigned char*)*decoded;
+ for (; i < mlen; i += 4) {
+ n = ((BASE64URL_UINT6[ e[i+0] ] << 18) +
+ (BASE64URL_UINT6[ e[i+1] ] << 12) +
+ (BASE64URL_UINT6[ e[i+2] ] << 6) +
+ (BASE64URL_UINT6[ e[i+3] ]));
+ *d++ = (unsigned char)(n >> 16);
+ *d++ = (unsigned char)(n >> 8 & 0xffu);
+ *d++ = (unsigned char)(n & 0xffu);
+ }
+ remain = len - mlen;
+ switch (remain) {
+ case 2:
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12));
+ *d++ = (unsigned char)(n >> 16);
+ remain = 1;
+ break;
+ case 3:
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12) +
+ (BASE64URL_UINT6[ e[mlen+2] ] << 6));
+ *d++ = (unsigned char)(n >> 16);
+ *d++ = (unsigned char)(n >> 8 & 0xffu);
+ remain = 2;
+ break;
+ default: /* do nothing */
+ break;
+ }
+ return (apr_size_t)(mlen/4*3 + remain);
+}
+
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t dlen, apr_pool_t *pool)
+{
+ int i, len = (int)dlen;
+ apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */
+ const unsigned char *udata = (const unsigned char*)data;
+ unsigned char *enc, *p = apr_pcalloc(pool, slen);
+
+ enc = p;
+ for (i = 0; i < len-2; i+= 3) {
+ *p++ = BASE64URL_CHAR( (udata[i] >> 2) );
+ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) );
+ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) + (udata[i+2] >> 6) );
+ *p++ = BASE64URL_CHAR( (udata[i+2]) );
+ }
+
+ if (i < len) {
+ *p++ = BASE64URL_CHAR( (udata[i] >> 2) );
+ if (i == (len - 1)) {
+ *p++ = BASE64URL_CHARS[ ((unsigned int)udata[i] << 4) & 0x3fu ];
+ }
+ else {
+ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) );
+ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) );
+ }
+ }
+ *p++ = '\0';
+ return (char *)enc;
+}
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+unsigned int h2_ihash_count(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_ihash_empty(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_ihash_get(h2_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_ihash_add(h2_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_ihash_remove(h2_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_ihash_clear(h2_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_iqueue *q, int nlen);
+static void iq_swap(h2_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx);
+
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ return q;
+}
+
+int h2_iq_empty(h2_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_iq_count(h2_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (h2_iq_contains(q, sid)) {
+ return 0;
+ }
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+ return 1;
+}
+
+int h2_iq_append(h2_iqueue *q, int sid)
+{
+ return h2_iq_add(q, sid, NULL, NULL);
+}
+
+int h2_iq_remove(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_iq_clear(h2_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisons to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all ints below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_iq_shift(h2_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max)
+{
+ size_t i;
+ for (i = 0; i < max; ++i) {
+ pint[i] = h2_iq_shift(q);
+ if (pint[i] == 0) {
+ break;
+ }
+ }
+ return i;
+}
+
+static void iq_grow(h2_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+int h2_iq_contains(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+struct h2_fifo {
+ void **elems;
+ int capacity;
+ int set;
+ int in;
+ int out;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static apr_status_t fifo_destroy(void *data)
+{
+ h2_fifo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int index_of(h2_fifo *fifo, void *elem)
+{
+ int i;
+
+ for (i = fifo->out; i != fifo->in; i = (i + 1) % fifo->capacity) {
+ if (elem == fifo->elems[i]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_fifo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->capacity = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_fifo_term(h2_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_fifo_count(h2_fifo *fifo)
+{
+ int n;
+
+ apr_thread_mutex_lock(fifo->lock);
+ n = fifo->count;
+ apr_thread_mutex_unlock(fifo->lock);
+ return n;
+}
+
+static apr_status_t check_not_empty(h2_fifo *fifo, int block)
+{
+ while (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block)
+{
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (fifo->set && index_of(fifo, elem) >= 0) {
+ /* set mode, elem already member */
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->capacity) {
+ if (block) {
+ while (fifo->count == fifo->capacity) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+
+ fifo->elems[fifo->in++] = elem;
+ if (fifo->in >= fifo->capacity) {
+ fifo->in -= fifo->capacity;
+ }
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_signal(fifo->not_empty);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push(h2_fifo *fifo, void *elem, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = fifo_push_int(fifo, elem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 1);
+}
+
+apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 0);
+}
+
+static apr_status_t pull_head(h2_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+ int was_full;
+
+ if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) {
+ *pelem = NULL;
+ return rv;
+ }
+ *pelem = fifo->elems[fifo->out++];
+ if (fifo->out >= fifo->capacity) {
+ fifo->out -= fifo->capacity;
+ }
+ was_full = (fifo->count == fifo->capacity);
+ --fifo->count;
+ if (was_full) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_pull(h2_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = pull_head(fifo, pelem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 1);
+}
+
+apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 0);
+}
+
+static apr_status_t fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx, int block)
+{
+ apr_status_t rv;
+ void *elem;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = pull_head(fifo, &elem, block))) {
+ switch (fn(elem, ctx)) {
+ case H2_FIFO_OP_PULL:
+ break;
+ case H2_FIFO_OP_REPUSH:
+ rv = fifo_push_int(fifo, elem, block);
+ break;
+ }
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx)
+{
+ return fifo_peek(fifo, fn, ctx, 1);
+}
+
+apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx)
+{
+ return fifo_peek(fifo, fn, ctx, 0);
+}
+
+apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, last_count = fifo->count;
+
+ for (i = fifo->out; i != fifo->in; i = (i + 1) % fifo->capacity) {
+ if (fifo->elems[i] == elem) {
+ --fifo->count;
+ if (fifo->count == 0) {
+ fifo->out = fifo->in = 0;
+ }
+ else if (i == fifo->out) {
+ /* first element */
+ ++fifo->out;
+ if (fifo->out >= fifo->capacity) {
+ fifo->out -= fifo->capacity;
+ }
+ }
+ else if (((i + 1) % fifo->capacity) == fifo->in) {
+ /* last element */
+ --fifo->in;
+ if (fifo->in < 0) {
+ fifo->in += fifo->capacity;
+ }
+ }
+ else if (i > fifo->out) {
+ /* between out and in/capacity, move elements below up */
+ memmove(&fifo->elems[fifo->out+1], &fifo->elems[fifo->out],
+ (i - fifo->out) * sizeof(void*));
+ ++fifo->out;
+ if (fifo->out >= fifo->capacity) {
+ fifo->out -= fifo->capacity;
+ }
+ }
+ else {
+ /* we wrapped around, move elements above down */
+ AP_DEBUG_ASSERT((fifo->in - i - 1) > 0);
+ AP_DEBUG_ASSERT((fifo->in - i - 1) < fifo->capacity);
+ memmove(&fifo->elems[i], &fifo->elems[i + 1],
+ (fifo->in - i - 1) * sizeof(void*));
+ --fifo->in;
+ if (fifo->in < 0) {
+ fifo->in += fifo->capacity;
+ }
+ }
+ }
+ }
+ if (fifo->count != last_count) {
+ if (last_count == fifo->capacity) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+/*******************************************************************************
+ * FIFO int queue
+ ******************************************************************************/
+
+struct h2_ififo {
+ int *elems;
+ int capacity;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int inth_index(h2_ififo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->capacity;
+}
+
+static apr_status_t ififo_destroy(void *data)
+{
+ h2_ififo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int iindex_of(h2_ififo *fifo, int id)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (id == fifo->elems[inth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_ififo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(int));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->capacity = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, ififo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return icreate_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return icreate_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_ififo_term(h2_ififo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_ififo_count(h2_ififo *fifo)
+{
+ return fifo->count;
+}
+
+static apr_status_t icheck_not_empty(h2_ififo *fifo, int block)
+{
+ while (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block)
+{
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (fifo->set && iindex_of(fifo, id) >= 0) {
+ /* set mode, elem already member */
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->capacity) {
+ if (block) {
+ while (fifo->count == fifo->capacity) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->capacity);
+ fifo->elems[inth_index(fifo, fifo->count)] = id;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_push(h2_ififo *fifo, int id, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ififo_push_int(fifo, id, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_push(h2_ififo *fifo, int id)
+{
+ return ififo_push(fifo, id, 1);
+}
+
+apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id)
+{
+ return ififo_push(fifo, id, 0);
+}
+
+static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = icheck_not_empty(fifo, block)) != APR_SUCCESS) {
+ *pi = 0;
+ return rv;
+ }
+ *pi = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = inth_index(fifo, 1);
+ if (fifo->count+1 == fifo->capacity) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_pull(h2_ififo *fifo, int *pi, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ipull_head(fifo, pi, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi)
+{
+ return ififo_pull(fifo, pi, 1);
+}
+
+apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi)
+{
+ return ififo_pull(fifo, pi, 0);
+}
+
+static apr_status_t ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx, int block)
+{
+ apr_status_t rv;
+ int id;
+
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) {
+ switch (fn(id, ctx)) {
+ case H2_FIFO_OP_PULL:
+ break;
+ case H2_FIFO_OP_REPUSH:
+ rv = ififo_push_int(fifo, id, block);
+ break;
+ }
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx)
+{
+ return ififo_peek(fifo, fn, ctx, 1);
+}
+
+apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx)
+{
+ return ififo_peek(fifo, fn, ctx, 0);
+}
+
+static apr_status_t ififo_remove(h2_ififo *fifo, int id)
+{
+ int rc, i;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ int e = fifo->elems[inth_index(fifo, i)];
+ if (e == id) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[inth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (!rc) {
+ return APR_EAGAIN;
+ }
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->capacity) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
+{
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ififo_remove(fifo, id);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+/*******************************************************************************
+ * h2_util for apt_table_t
+ ******************************************************************************/
+
+typedef struct {
+ apr_size_t bytes;
+ apr_size_t pair_extra;
+} table_bytes_ctx;
+
+static int count_bytes(void *x, const char *key, const char *value)
+{
+ table_bytes_ctx *ctx = x;
+ if (key) {
+ ctx->bytes += strlen(key);
+ }
+ if (value) {
+ ctx->bytes += strlen(value);
+ }
+ ctx->bytes += ctx->pair_extra;
+ return 1;
+}
+
+apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
+{
+ table_bytes_ctx ctx;
+
+ ctx.bytes = 0;
+ ctx.pair_extra = pair_extra;
+ apr_table_do(count_bytes, &ctx, t, NULL);
+ return ctx.bytes;
+}
+
+
+/*******************************************************************************
+ * h2_util for bucket brigades
+ ******************************************************************************/
+
+static void fit_bucket_into(apr_bucket *b, apr_off_t *plen)
+{
+ /* signed apr_off_t is at least as large as unsigned apr_size_t.
+ * Problems may arise when they are both the same size. Then
+ * the bucket length *may* be larger than a value we can hold
+ * in apr_off_t. Before casting b->length to apr_off_t we must
+ * check the limitations.
+ * After we resized the bucket, it is safe to cast and substract.
+ */
+ if ((sizeof(apr_off_t) == sizeof(apr_int64_t)
+ && b->length > APR_INT64_MAX)
+ || (sizeof(apr_off_t) == sizeof(apr_int32_t)
+ && b->length > APR_INT32_MAX)
+ || *plen < (apr_off_t)b->length) {
+ /* bucket is longer the *plen */
+ apr_bucket_split(b, *plen);
+ }
+ *plen -= (apr_off_t)b->length;
+}
+
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
+{
+ apr_bucket *b;
+ apr_off_t remain = length;
+ apr_status_t status = APR_SUCCESS;
+
+ while (!APR_BRIGADE_EMPTY(src)) {
+ b = APR_BRIGADE_FIRST(src);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ }
+ else {
+ if (remain <= 0) {
+ return status;
+ }
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ fit_bucket_into(b, &remain);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
+{
+ apr_bucket *b, *next;
+ apr_off_t remain = length;
+ apr_status_t status = APR_SUCCESS;
+
+ for (b = APR_BRIGADE_FIRST(src);
+ b != APR_BRIGADE_SENTINEL(src);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* fall through */
+ }
+ else {
+ if (remain <= 0) {
+ return status;
+ }
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ fit_bucket_into(b, &remain);
+ }
+ status = apr_bucket_copy(b, &b);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ }
+ return status;
+}
+
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep)
+{
+ apr_size_t off = 0;
+ if (sep && *sep) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", sep);
+ }
+
+ if (bmax <= off) {
+ return off;
+ }
+ else if (APR_BUCKET_IS_METADATA(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", b->type->name);
+ }
+ else if (bmax > off) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]",
+ b->type->name,
+ (long)(b->length == ((apr_size_t)-1)?
+ -1 : b->length));
+ }
+ return off;
+}
+
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb)
+{
+ apr_size_t off = 0;
+ const char *sp = "";
+ apr_bucket *b;
+
+ if (bmax > 1) {
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = APR_BRIGADE_FIRST(bb);
+ (bmax > off) && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
+ }
+ if (bmax > off) {
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
+ }
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
+ }
+ }
+ return off;
+}
+
+apr_status_t h2_append_brigade(apr_bucket_brigade *to,
+ apr_bucket_brigade *from,
+ apr_off_t *plen,
+ int *peos,
+ h2_bucket_gate *should_append)
+{
+ apr_bucket *e;
+ apr_off_t start, remain;
+ apr_status_t rv;
+
+ *peos = 0;
+ start = remain = *plen;
+
+ while (!APR_BRIGADE_EMPTY(from)) {
+ e = APR_BRIGADE_FIRST(from);
+
+ if (!should_append(e)) {
+ goto leave;
+ }
+ else if (APR_BUCKET_IS_METADATA(e)) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ *peos = 1;
+ apr_bucket_delete(e);
+ continue;
+ }
+ }
+ else {
+ if (remain <= 0) {
+ goto leave;
+ }
+ if (e->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ fit_bucket_into(e, &remain);
+ }
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(to, e);
+ }
+leave:
+ *plen = start - remain;
+ return APR_SUCCESS;
+}
+
+apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_off_t total = 0;
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ total += sizeof(*b);
+ if (b->length > 0) {
+ if (APR_BUCKET_IS_HEAP(b)
+ || APR_BUCKET_IS_POOL(b)) {
+ total += b->length;
+ }
+ }
+ }
+ return total;
+}
+
+
+/*******************************************************************************
+ * h2_ngheader
+ ******************************************************************************/
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_resp_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+static const char *inv_field_name_chr(const char *token)
+{
+ const char *p = ap_scan_http_token(token);
+ if (p == token && *p == ':') {
+ p = ap_scan_http_token(++p);
+ }
+ return (p && *p)? p : NULL;
+}
+
+static const char *inv_field_value_chr(const char *token)
+{
+ const char *p = ap_scan_http_field_content(token);
+ return (p && *p)? p : NULL;
+}
+
+static void strip_field_value_ws(nghttp2_nv *nv)
+{
+ while(nv->valuelen && (nv->value[0] == ' ' || nv->value[0] == '\t')) {
+ nv->value++; nv->valuelen--;
+ }
+ while(nv->valuelen && (nv->value[nv->valuelen-1] == ' '
+ || nv->value[nv->valuelen-1] == '\t')) {
+ nv->valuelen--;
+ }
+}
+
+typedef struct ngh_ctx {
+ apr_pool_t *p;
+ int unsafe;
+ h2_ngheader *ngh;
+ apr_status_t status;
+} ngh_ctx;
+
+static int add_header(ngh_ctx *ctx, const char *key, const char *value)
+{
+ nghttp2_nv *nv = &(ctx->ngh)->nv[(ctx->ngh)->nvlen++];
+ const char *p;
+
+ if (!ctx->unsafe) {
+ if ((p = inv_field_name_chr(key))) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p,
+ "h2_request: head field '%s: %s' has invalid char %s",
+ key, value, p);
+ ctx->status = APR_EINVAL;
+ return 0;
+ }
+ if ((p = inv_field_value_chr(value))) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p,
+ "h2_request: head field '%s: %s' has invalid char %s",
+ key, value, p);
+ ctx->status = APR_EINVAL;
+ return 0;
+ }
+ }
+ nv->name = (uint8_t*)key;
+ nv->namelen = strlen(key);
+ nv->value = (uint8_t*)value;
+ nv->valuelen = strlen(value);
+ strip_field_value_ws(nv);
+
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_resp_header(key)) {
+ add_header(ctx, key, value);
+ }
+ return 1;
+}
+
+static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p,
+ int unsafe, size_t key_count,
+ const char *keys[], const char *values[],
+ apr_table_t *headers)
+{
+ ngh_ctx ctx;
+ size_t n, i;
+
+ ctx.p = p;
+ ctx.unsafe = unsafe;
+
+ n = key_count;
+ apr_table_do(count_header, &n, headers, NULL);
+
+ *ph = ctx.ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ if (!ctx.ngh) {
+ return APR_ENOMEM;
+ }
+
+ ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ if (!ctx.ngh->nv) {
+ return APR_ENOMEM;
+ }
+
+ ctx.status = APR_SUCCESS;
+ for (i = 0; i < key_count; ++i) {
+ if (!add_header(&ctx, keys[i], values[i])) {
+ return ctx.status;
+ }
+ }
+
+ apr_table_do(add_table_header, &ctx, headers, NULL);
+
+ return ctx.status;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+
+static int is_unsafe(ap_bucket_response *h)
+{
+ const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL;
+ return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE));
+}
+
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ ap_bucket_headers *headers)
+{
+ return ngheader_create(ph, p, 0,
+ 0, NULL, NULL, headers->headers);
+}
+
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ ap_bucket_response *response)
+{
+ const char *keys[] = {
+ ":status"
+ };
+ const char *values[] = {
+ apr_psprintf(p, "%d", response->status)
+ };
+ return ngheader_create(ph, p, is_unsafe(response),
+ H2_ALEN(keys), keys, values, response->headers);
+}
+
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+static int is_unsafe(h2_headers *h)
+{
+ const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL;
+ return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE));
+}
+
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ return ngheader_create(ph, p, is_unsafe(headers),
+ 0, NULL, NULL, headers->headers);
+}
+
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ const char *keys[] = {
+ ":status"
+ };
+ const char *values[] = {
+ apr_psprintf(p, "%d", headers->status)
+ };
+ return ngheader_create(ph, p, is_unsafe(headers),
+ H2_ALEN(keys), keys, values, headers->headers);
+}
+
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req)
+{
+
+ const char *keys[] = {
+ ":scheme",
+ ":authority",
+ ":path",
+ ":method",
+ };
+ const char *values[] = {
+ req->scheme,
+ req->authority,
+ req->path,
+ req->method,
+ };
+
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
+
+ return ngheader_create(ph, p, 0, H2_ALEN(keys), keys, values, req->headers);
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredRequestTrailers[] = { /* Ignore, see rfc7230, ch. 4.1.2 */
+ H2_DEF_LITERAL("te"),
+ H2_DEF_LITERAL("host"),
+ H2_DEF_LITERAL("range"),
+ H2_DEF_LITERAL("cookie"),
+ H2_DEF_LITERAL("expect"),
+ H2_DEF_LITERAL("pragma"),
+ H2_DEF_LITERAL("max-forwards"),
+ H2_DEF_LITERAL("cache-control"),
+ H2_DEF_LITERAL("authorization"),
+ H2_DEF_LITERAL("content-length"),
+ H2_DEF_LITERAL("proxy-authorization"),
+};
+static literal IgnoredResponseHeaders[] = {
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredResponseTrailers[] = {
+ H2_DEF_LITERAL("age"),
+ H2_DEF_LITERAL("date"),
+ H2_DEF_LITERAL("vary"),
+ H2_DEF_LITERAL("cookie"),
+ H2_DEF_LITERAL("expires"),
+ H2_DEF_LITERAL("warning"),
+ H2_DEF_LITERAL("location"),
+ H2_DEF_LITERAL("retry-after"),
+ H2_DEF_LITERAL("cache-control"),
+ H2_DEF_LITERAL("www-authenticate"),
+ H2_DEF_LITERAL("proxy-authenticate"),
+};
+
+static int contains_name(const literal *lits, size_t llen, nghttp2_nv *nv)
+{
+ const literal *lit;
+ size_t i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nv->namelen
+ && !apr_strnatcasecmp(lit->name, (const char *)nv->name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int h2_util_ignore_resp_header(const char *name)
+{
+ nghttp2_nv nv;
+
+ nv.name = (uint8_t*)name;
+ nv.namelen = strlen(name);
+ return contains_name(H2_LIT_ARGS(IgnoredResponseHeaders), &nv);
+}
+
+
+static int h2_req_ignore_header(nghttp2_nv *nv)
+{
+ return contains_name(H2_LIT_ARGS(IgnoredRequestHeaders), nv);
+}
+
+int h2_ignore_req_trailer(const char *name, size_t len)
+{
+ nghttp2_nv nv;
+
+ nv.name = (uint8_t*)name;
+ nv.namelen = strlen(name);
+ return (h2_req_ignore_header(&nv)
+ || contains_name(H2_LIT_ARGS(IgnoredRequestTrailers), &nv));
+}
+
+int h2_ignore_resp_trailer(const char *name, size_t len)
+{
+ nghttp2_nv nv;
+
+ nv.name = (uint8_t*)name;
+ nv.namelen = strlen(name);
+ return (contains_name(H2_LIT_ARGS(IgnoredResponseHeaders), &nv)
+ || contains_name(H2_LIT_ARGS(IgnoredResponseTrailers), &nv));
+}
+
+static apr_status_t req_add_header(apr_table_t *headers, apr_pool_t *pool,
+ nghttp2_nv *nv, size_t max_field_len,
+ int *pwas_added)
+{
+ char *hname, *hvalue;
+ const char *existing;
+
+ *pwas_added = 0;
+ strip_field_value_ws(nv);
+
+ if (h2_req_ignore_header(nv)) {
+ return APR_SUCCESS;
+ }
+ else if (nv->namelen == sizeof("cookie")-1
+ && !apr_strnatcasecmp("cookie", (const char *)nv->name)) {
+ existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ if (max_field_len
+ && strlen(existing) + nv->valuelen + nv->namelen + 4
+ > max_field_len) {
+ /* "key: oldval, nval" is too long */
+ return APR_EINVAL;
+ }
+ hvalue = apr_pstrndup(pool, (const char*)nv->value, nv->valuelen);
+ apr_table_setn(headers, "Cookie",
+ apr_psprintf(pool, "%s; %s", existing, hvalue));
+ return APR_SUCCESS;
+ }
+ }
+ else if (nv->namelen == sizeof("host")-1
+ && !apr_strnatcasecmp("host", (const char *)nv->name)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, (const char*)nv->name, nv->namelen);
+ h2_util_camel_case_header(hname, nv->namelen);
+ existing = apr_table_get(headers, hname);
+ if (max_field_len) {
+ if ((existing? strlen(existing)+2 : 0) + nv->valuelen + nv->namelen + 2
+ > max_field_len) {
+ /* "key: (oldval, )?nval" is too long */
+ return APR_EINVAL;
+ }
+ }
+ if (!existing) *pwas_added = 1;
+ hvalue = apr_pstrndup(pool, (const char*)nv->value, nv->valuelen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen,
+ size_t max_field_len, int *pwas_added)
+{
+ nghttp2_nv nv;
+
+ nv.name = (uint8_t*)name;
+ nv.namelen = nlen;
+ nv.value = (uint8_t*)value;
+ nv.valuelen = vlen;
+ return req_add_header(headers, pool, &nv, max_field_len, pwas_added);
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ if (len)
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
+
+/*******************************************************************************
+ * push policy
+ ******************************************************************************/
+int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled)
+{
+ h2_push_policy policy = H2_PUSH_NONE;
+ if (push_enabled) {
+ const char *val = apr_table_get(headers, "accept-push-policy");
+ if (val) {
+ if (ap_find_token(p, val, "fast-load")) {
+ policy = H2_PUSH_FAST_LOAD;
+ }
+ else if (ap_find_token(p, val, "head")) {
+ policy = H2_PUSH_HEAD;
+ }
+ else if (ap_find_token(p, val, "default")) {
+ policy = H2_PUSH_DEFAULT;
+ }
+ else if (ap_find_token(p, val, "none")) {
+ policy = H2_PUSH_NONE;
+ }
+ else {
+ /* nothing known found in this header, go by default */
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ else {
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ return policy;
+}
+
+void h2_util_drain_pipe(apr_file_t *pipe)
+{
+ char rb[512];
+ apr_size_t nr = sizeof(rb);
+
+ while (apr_file_read(pipe, rb, &nr) == APR_SUCCESS) {
+ /* Although we write just one byte to the other end of the pipe
+ * during wakeup, multiple threads could call the wakeup.
+ * So simply drain out from the input side of the pipe all
+ * the data.
+ */
+ if (nr != sizeof(rb))
+ break;
+ }
+}
+
+apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe)
+{
+ char rb[512];
+ apr_size_t nr = sizeof(rb);
+
+ return apr_file_read(pipe, rb, &nr);
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+
+static int add_header_lengths(void *ctx, const char *name, const char *value)
+{
+ apr_size_t *plen = ctx;
+ *plen += strlen(name) + strlen(value);
+ return 1;
+}
+
+apr_size_t headers_length_estimate(ap_bucket_headers *hdrs)
+{
+ apr_size_t len = 0;
+ apr_table_do(add_header_lengths, &len, hdrs->headers, NULL);
+ return len;
+}
+
+apr_size_t response_length_estimate(ap_bucket_response *resp)
+{
+ apr_size_t len = 3 + 1 + 8 + (resp->reason? strlen(resp->reason) : 10);
+ apr_table_do(add_header_lengths, &len, resp->headers, NULL);
+ return len;
+}
+
+#endif /* AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
new file mode 100644
index 0000000..d2e6548
--- /dev/null
+++ b/modules/http2/h2_util.h
@@ -0,0 +1,519 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_util__
+#define __mod_h2__h2_util__
+
+#include <nghttp2/nghttp2.h>
+#include <http_protocol.h>
+
+#include "h2.h"
+#include "h2_headers.h"
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_request;
+struct nghttp2_frame;
+
+size_t h2_util_hex_dump(char *buffer, size_t maxlen,
+ const char *data, size_t datalen);
+
+void h2_util_camel_case_header(char *s, size_t len);
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_ihash_t h2_ihash_t;
+typedef int h2_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+unsigned int h2_ihash_count(h2_ihash_t *ih);
+int h2_ihash_empty(h2_ihash_t *ih);
+void *h2_ihash_get(h2_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
+
+void h2_ihash_add(h2_ihash_t *ih, void *val);
+void h2_ihash_remove(h2_ihash_t *ih, int id);
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
+void h2_ihash_clear(h2_ihash_t *ih);
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param pool the memory pool
+ * @param capacity the initial capacity of the queue
+ */
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no ints in the queue.
+ * @param q the queue to check
+ */
+int h2_iq_empty(h2_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_iq_count(h2_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the id to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ * @return != 0 iff id was not already there
+ */
+int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Append the id to the queue if not already present.
+ *
+ * @param q the queue to append the id to
+ * @param sid the id to append
+ * @return != 0 iff id was not already there
+ */
+int h2_iq_append(h2_iqueue *q, int sid);
+
+/**
+ * Remove the int from the queue. Return != 0 iff it was found.
+ * @param q the queue
+ * @param sid the stream id to remove
+ * @return != 0 iff int was found in queue
+ */
+int h2_iq_remove(h2_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_iq_clear(h2_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the int ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first id from the queue or 0 if the queue is empty.
+ * The id is being removed.
+ *
+ * @param q the queue to get the first id from
+ * @return the first id of the queue, 0 if empty
+ */
+int h2_iq_shift(h2_iqueue *q);
+
+/**
+ * Get the first max ids from the queue. All these ids will be removed.
+ *
+ * @param q the queue to get the first ids from
+ * @param pint the int array to receive the values
+ * @param max the maximum number of ids to shift
+ * @return the actual number of ids shifted
+ */
+size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max);
+
+/**
+ * Determine if int is in the queue already
+ *
+ * @param q the queue
+ * @param sid the integer id to check for
+ * @return != 0 iff sid is already in the queue
+ */
+int h2_iq_contains(h2_iqueue *q, int sid);
+
+/*******************************************************************************
+ * FIFO queue (void* elements)
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_fifo h2_fifo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity elements. Elements can
+ * appear several times.
+ */
+apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity elements. Elements only
+ * appear once. Pushing an element already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_fifo_term(h2_fifo *fifo);
+
+int h2_fifo_count(h2_fifo *fifo);
+
+/**
+ * Push en element into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param elem the element to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem);
+apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem);
+
+apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem);
+apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem);
+
+typedef enum {
+ H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */
+ H2_FIFO_OP_REPUSH, /* pull and immediately re-push it */
+} h2_fifo_op_t;
+
+typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx);
+
+/**
+ * Call given function on the head of the queue, once it exists, and
+ * perform the returned operation on it. The queue will hold its lock during
+ * this time, so no other operations on the queue are possible.
+ * @param fifo the queue to peek at
+ * @param fn the function to call on the head, once available
+ * @param ctx context to pass in call to function
+ */
+apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx);
+
+/**
+ * Non-blocking version of h2_fifo_peek.
+ */
+apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx);
+
+/**
+ * Remove the elem from the queue, will remove multiple appearances.
+ * @param elem the element to remove
+ * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem);
+
+/*******************************************************************************
+ * iFIFO queue (int elements)
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_ififo h2_ififo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity int. ints can
+ * appear several times.
+ */
+apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity integers. Ints only
+ * appear once. Pushing an int already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_ififo_term(h2_ififo *fifo);
+
+int h2_ififo_count(h2_ififo *fifo);
+
+/**
+ * Push an int into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param id the int to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_ififo_push(h2_ififo *fifo, int id);
+apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id);
+
+apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi);
+apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi);
+
+typedef h2_fifo_op_t h2_ififo_peek_fn(int head, void *ctx);
+
+/**
+ * Call given function on the head of the queue, once it exists, and
+ * perform the returned operation on it. The queue will hold its lock during
+ * this time, so no other operations on the queue are possible.
+ * @param fifo the queue to peek at
+ * @param fn the function to call on the head, once available
+ * @param ctx context to pass in call to function
+ */
+apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx);
+
+/**
+ * Non-blocking version of h2_fifo_peek.
+ */
+apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx);
+
+/**
+ * Remove the integer from the queue, will remove multiple appearances.
+ * @param id the integer to remove
+ * @return APR_SUCCESS iff > 0 ints were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(int n);
+
+/**
+ * Count the bytes that all key/value pairs in a table have
+ * in length (exlucding terminating 0s), plus additional extra per pair.
+ *
+ * @param t the table to inspect
+ * @param pair_extra the extra amount to add per pair
+ * @return the number of bytes all key/value pairs have
+ */
+apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra);
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+int h2_ignore_req_trailer(const char *name, size_t len);
+int h2_ignore_resp_trailer(const char *name, size_t len);
+
+/**
+ * Set the push policy for the given request. Takes request headers into
+ * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00
+ * for details.
+ *
+ * @param headers the http headers to inspect
+ * @param p the pool to use
+ * @param push_enabled if HTTP/2 server push is generally enabled for this request
+ * @return the push policy desired
+ */
+int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled);
+
+/*******************************************************************************
+ * base64 url encoding, different table from normal base64
+ ******************************************************************************/
+/**
+ * I always wanted to write my own base64url decoder...not. See
+ * https://tools.ietf.org/html/rfc4648#section-5 for description.
+ */
+apr_size_t h2_util_base64url_decode(const char **decoded,
+ const char *encoded,
+ apr_pool_t *pool);
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t len, apr_pool_t *pool);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+
+int h2_util_ignore_resp_header(const char *name);
+
+typedef struct h2_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_ngheader;
+
+#if AP_HAS_RESPONSE_BUCKETS
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ ap_bucket_headers *headers);
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ ap_bucket_response *response);
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req);
+#else
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req);
+#endif
+
+/**
+ * Add a HTTP/2 header and return the table key if it really was added
+ * and not ignored.
+ */
+apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen,
+ size_t max_field_len, int *pwas_added);
+
+/*******************************************************************************
+ * apr brigade helpers
+ ******************************************************************************/
+
+/**
+ * Concatenate at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
+ */
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
+/**
+ * Copy at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
+ */
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
+typedef apr_status_t h2_util_pass_cb(void *ctx,
+ const char *data, apr_off_t len);
+
+/**
+ * Print a bucket's meta data (type and length) to the buffer.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep);
+
+/**
+ * Prints the brigade bucket types and lengths into the given buffer
+ * up to bmax.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb);
+/**
+ * Logs the bucket brigade (which bucket types with what length)
+ * to the log at the given level.
+ * @param c the connection to log for
+ * @param sid the stream identifier this brigade belongs to
+ * @param level the log level (as in APLOG_*)
+ * @param tag a short message text about the context
+ * @param bb the brigade to log
+ */
+#define h2_util_bb_log(c, sid, level, tag, bb) \
+if (APLOG_C_IS_LEVEL(c, level)) { \
+ do { \
+ char buffer[4 * 1024]; \
+ const char *line = "(null)"; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
+ ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \
+ ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \
+ } while(0); \
+}
+
+
+typedef int h2_bucket_gate(apr_bucket *b);
+/**
+ * Transfer buckets from one brigade to another with a limit on the
+ * maximum amount of bytes transferred. Does no setaside magic, lifetime
+ * of brigades must fit.
+ * @param to brigade to transfer buckets to
+ * @param from brigades to remove buckets from
+ * @param plen maximum bytes to transfer, actual bytes transferred
+ * @param peos if an EOS bucket was transferred
+ */
+apr_status_t h2_append_brigade(apr_bucket_brigade *to,
+ apr_bucket_brigade *from,
+ apr_off_t *plen,
+ int *peos,
+ h2_bucket_gate *should_append);
+
+/**
+ * Get an approximnation of the memory footprint of the given
+ * brigade. This varies from apr_brigade_length as
+ * - no buckets are ever read
+ * - only buckets known to allocate memory (HEAP+POOL) are counted
+ * - the bucket struct itself is counted
+ */
+apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb);
+
+/**
+ * Drain a pipe used for notification.
+ */
+void h2_util_drain_pipe(apr_file_t *pipe);
+
+/**
+ * Wait on data arriving on a pipe.
+ */
+apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe);
+
+
+#if AP_HAS_RESPONSE_BUCKETS
+/**
+ * Give an estimate of the length of the header fields,
+ * without compression or other formatting decorations.
+ */
+apr_size_t headers_length_estimate(ap_bucket_headers *hdrs);
+
+/**
+ * Give an estimate of the length of the response meta data size,
+ * without compression or other formatting decorations.
+ */
+apr_size_t response_length_estimate(ap_bucket_response *resp);
+#endif /* AP_HAS_RESPONSE_BUCKETS */
+
+#endif /* defined(__mod_h2__h2_util__) */
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
new file mode 100644
index 0000000..c961089
--- /dev/null
+++ b/modules/http2/h2_version.h
@@ -0,0 +1,41 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_h2_h2_version_h
+#define mod_h2_h2_version_h
+
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
+/**
+ * @macro
+ * Version number of the http2 module as c string
+ */
+#define MOD_HTTP2_VERSION "2.0.11"
+
+/**
+ * @macro
+ * Numerical representation of the version number of the http2 module
+ * release. This is a 24 bit number with 8 bits for major number, 8 bits
+ * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
+ */
+#define MOD_HTTP2_VERSION_NUM 0x02000b
+
+
+#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
new file mode 100644
index 0000000..e7e2039
--- /dev/null
+++ b/modules/http2/h2_workers.c
@@ -0,0 +1,626 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_ring.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <http_connection.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_protocol.h>
+
+#include "h2.h"
+#include "h2_private.h"
+#include "h2_mplx.h"
+#include "h2_c2.h"
+#include "h2_workers.h"
+#include "h2_util.h"
+
+typedef enum {
+ PROD_IDLE,
+ PROD_ACTIVE,
+ PROD_JOINED,
+} prod_state_t;
+
+struct ap_conn_producer_t {
+ APR_RING_ENTRY(ap_conn_producer_t) link;
+ const char *name;
+ void *baton;
+ ap_conn_producer_next *fn_next;
+ ap_conn_producer_done *fn_done;
+ ap_conn_producer_shutdown *fn_shutdown;
+ volatile prod_state_t state;
+ volatile int conns_active;
+};
+
+
+typedef enum {
+ H2_SLOT_FREE,
+ H2_SLOT_RUN,
+ H2_SLOT_ZOMBIE,
+} h2_slot_state_t;
+
+typedef struct h2_slot h2_slot;
+struct h2_slot {
+ APR_RING_ENTRY(h2_slot) link;
+ apr_uint32_t id;
+ apr_pool_t *pool;
+ h2_slot_state_t state;
+ volatile int should_shutdown;
+ volatile int is_idle;
+ h2_workers *workers;
+ ap_conn_producer_t *prod;
+ apr_thread_t *thread;
+ struct apr_thread_cond_t *more_work;
+ int activations;
+};
+
+struct h2_workers {
+ server_rec *s;
+ apr_pool_t *pool;
+
+ apr_uint32_t max_slots;
+ apr_uint32_t min_active;
+ volatile apr_time_t idle_limit;
+ volatile int aborted;
+ volatile int shutdown;
+ int dynamic;
+
+ volatile apr_uint32_t active_slots;
+ volatile apr_uint32_t idle_slots;
+
+ apr_threadattr_t *thread_attr;
+ h2_slot *slots;
+
+ APR_RING_HEAD(h2_slots_free, h2_slot) free;
+ APR_RING_HEAD(h2_slots_idle, h2_slot) idle;
+ APR_RING_HEAD(h2_slots_busy, h2_slot) busy;
+ APR_RING_HEAD(h2_slots_zombie, h2_slot) zombie;
+
+ APR_RING_HEAD(ap_conn_producer_active, ap_conn_producer_t) prod_active;
+ APR_RING_HEAD(ap_conn_producer_idle, ap_conn_producer_t) prod_idle;
+
+ struct apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *prod_done;
+ struct apr_thread_cond_t *all_done;
+};
+
+
+static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx);
+
+static apr_status_t activate_slot(h2_workers *workers)
+{
+ h2_slot *slot;
+ apr_pool_t *pool;
+ apr_status_t rv;
+
+ if (APR_RING_EMPTY(&workers->free, h2_slot, link)) {
+ return APR_EAGAIN;
+ }
+ slot = APR_RING_FIRST(&workers->free);
+ ap_assert(slot->state == H2_SLOT_FREE);
+ APR_RING_REMOVE(slot, link);
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: activate slot %d", slot->id);
+
+ slot->state = H2_SLOT_RUN;
+ slot->should_shutdown = 0;
+ slot->is_idle = 0;
+ slot->pool = NULL;
+ ++workers->active_slots;
+ rv = apr_pool_create(&pool, workers->pool);
+ if (APR_SUCCESS != rv) goto cleanup;
+ apr_pool_tag(pool, "h2_worker_slot");
+ slot->pool = pool;
+
+ rv = ap_thread_create(&slot->thread, workers->thread_attr,
+ slot_run, slot, slot->pool);
+
+cleanup:
+ if (rv != APR_SUCCESS) {
+ AP_DEBUG_ASSERT(0);
+ slot->state = H2_SLOT_FREE;
+ if (slot->pool) {
+ apr_pool_destroy(slot->pool);
+ slot->pool = NULL;
+ }
+ APR_RING_INSERT_TAIL(&workers->free, slot, h2_slot, link);
+ --workers->active_slots;
+ }
+ return rv;
+}
+
+static void join_zombies(h2_workers *workers)
+{
+ h2_slot *slot;
+ apr_status_t status;
+
+ while (!APR_RING_EMPTY(&workers->zombie, h2_slot, link)) {
+ slot = APR_RING_FIRST(&workers->zombie);
+ APR_RING_REMOVE(slot, link);
+ ap_assert(slot->state == H2_SLOT_ZOMBIE);
+ ap_assert(slot->thread != NULL);
+
+ apr_thread_mutex_unlock(workers->lock);
+ apr_thread_join(&status, slot->thread);
+ apr_thread_mutex_lock(workers->lock);
+
+ slot->thread = NULL;
+ slot->state = H2_SLOT_FREE;
+ if (slot->pool) {
+ apr_pool_destroy(slot->pool);
+ slot->pool = NULL;
+ }
+ APR_RING_INSERT_TAIL(&workers->free, slot, h2_slot, link);
+ }
+}
+
+static void wake_idle_worker(h2_workers *workers, ap_conn_producer_t *prod)
+{
+ if (!APR_RING_EMPTY(&workers->idle, h2_slot, link)) {
+ h2_slot *slot;
+ for (slot = APR_RING_FIRST(&workers->idle);
+ slot != APR_RING_SENTINEL(&workers->idle, h2_slot, link);
+ slot = APR_RING_NEXT(slot, link)) {
+ if (slot->is_idle && !slot->should_shutdown) {
+ apr_thread_cond_signal(slot->more_work);
+ slot->is_idle = 0;
+ return;
+ }
+ }
+ }
+ if (workers->dynamic && !workers->shutdown
+ && (workers->active_slots < workers->max_slots)) {
+ activate_slot(workers);
+ }
+}
+
+/**
+ * Get the next connection to work on.
+ */
+static conn_rec *get_next(h2_slot *slot)
+{
+ h2_workers *workers = slot->workers;
+ conn_rec *c = NULL;
+ ap_conn_producer_t *prod;
+ int has_more;
+
+ slot->prod = NULL;
+ if (!APR_RING_EMPTY(&workers->prod_active, ap_conn_producer_t, link)) {
+ slot->prod = prod = APR_RING_FIRST(&workers->prod_active);
+ APR_RING_REMOVE(prod, link);
+ AP_DEBUG_ASSERT(PROD_ACTIVE == prod->state);
+
+ c = prod->fn_next(prod->baton, &has_more);
+ if (c && has_more) {
+ APR_RING_INSERT_TAIL(&workers->prod_active, prod, ap_conn_producer_t, link);
+ wake_idle_worker(workers, slot->prod);
+ }
+ else {
+ prod->state = PROD_IDLE;
+ APR_RING_INSERT_TAIL(&workers->prod_idle, prod, ap_conn_producer_t, link);
+ }
+ if (c) {
+ ++prod->conns_active;
+ }
+ }
+
+ return c;
+}
+
+static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx)
+{
+ h2_slot *slot = wctx;
+ h2_workers *workers = slot->workers;
+ conn_rec *c;
+ apr_status_t rv;
+
+ apr_thread_mutex_lock(workers->lock);
+ slot->state = H2_SLOT_RUN;
+ ++slot->activations;
+ APR_RING_ELEM_INIT(slot, link);
+ for(;;) {
+ if (APR_RING_NEXT(slot, link) != slot) {
+ /* slot is part of the idle ring from the last loop */
+ APR_RING_REMOVE(slot, link);
+ --workers->idle_slots;
+ }
+ slot->is_idle = 0;
+
+ if (!workers->aborted && !slot->should_shutdown) {
+ APR_RING_INSERT_TAIL(&workers->busy, slot, h2_slot, link);
+ do {
+ c = get_next(slot);
+ if (!c) {
+ break;
+ }
+ apr_thread_mutex_unlock(workers->lock);
+ /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
+ *
+ * Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+ * for the request_rec they handle, it needs to be unique for secondary
+ * connections also.
+ *
+ * The MPM module assigns the connection ids and mod_unique_id is using
+ * that one to generate identifier for requests. While the implementation
+ * works for HTTP/1.x, the parallel execution of several requests per
+ * connection will generate duplicate identifiers on load.
+ *
+ * The original implementation for secondary connection identifiers used
+ * to shift the master connection id up and assign the stream id to the
+ * lower bits. This was cramped on 32 bit systems, but on 64bit there was
+ * enough space.
+ *
+ * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
+ * connection id, even on 64bit systems. Therefore collisions in request ids.
+ *
+ * The way master connection ids are generated, there is some space "at the
+ * top" of the lower 32 bits on allmost all systems. If you have a setup
+ * with 64k threads per child and 255 child processes, you live on the edge.
+ *
+ * The new implementation shifts 8 bits and XORs in the worker
+ * id. This will experience collisions with > 256 h2 workers and heavy
+ * load still. There seems to be no way to solve this in all possible
+ * configurations by mod_h2 alone.
+ */
+ if (c->master) {
+ c->id = (c->master->id << 8)^slot->id;
+ }
+ c->current_thread = thread;
+ AP_DEBUG_ASSERT(slot->prod);
+
+#if AP_HAS_RESPONSE_BUCKETS
+ ap_process_connection(c, ap_get_conn_socket(c));
+#else
+ h2_c2_process(c, thread, slot->id);
+#endif
+ slot->prod->fn_done(slot->prod->baton, c);
+
+ apr_thread_mutex_lock(workers->lock);
+ if (--slot->prod->conns_active <= 0) {
+ apr_thread_cond_broadcast(workers->prod_done);
+ }
+ if (slot->prod->state == PROD_IDLE) {
+ APR_RING_REMOVE(slot->prod, link);
+ slot->prod->state = PROD_ACTIVE;
+ APR_RING_INSERT_TAIL(&workers->prod_active, slot->prod, ap_conn_producer_t, link);
+ }
+
+ } while (!workers->aborted && !slot->should_shutdown);
+ APR_RING_REMOVE(slot, link); /* no longer busy */
+ }
+
+ if (workers->aborted || slot->should_shutdown) {
+ break;
+ }
+
+ join_zombies(workers);
+
+ /* we are idle */
+ APR_RING_INSERT_TAIL(&workers->idle, slot, h2_slot, link);
+ ++workers->idle_slots;
+ slot->is_idle = 1;
+ if (slot->id >= workers->min_active && workers->idle_limit > 0) {
+ rv = apr_thread_cond_timedwait(slot->more_work, workers->lock,
+ workers->idle_limit);
+ if (APR_TIMEUP == rv) {
+ APR_RING_REMOVE(slot, link);
+ --workers->idle_slots;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ "h2_workers: idle timeout slot %d in state %d (%d activations)",
+ slot->id, slot->state, slot->activations);
+ break;
+ }
+ }
+ else {
+ apr_thread_cond_wait(slot->more_work, workers->lock);
+ }
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: terminate slot %d in state %d (%d activations)",
+ slot->id, slot->state, slot->activations);
+ slot->is_idle = 0;
+ slot->state = H2_SLOT_ZOMBIE;
+ slot->should_shutdown = 0;
+ APR_RING_INSERT_TAIL(&workers->zombie, slot, h2_slot, link);
+ --workers->active_slots;
+ if (workers->active_slots <= 0) {
+ apr_thread_cond_broadcast(workers->all_done);
+ }
+ apr_thread_mutex_unlock(workers->lock);
+
+ apr_thread_exit(thread, APR_SUCCESS);
+ return NULL;
+}
+
+static void wake_all_idles(h2_workers *workers)
+{
+ h2_slot *slot;
+ for (slot = APR_RING_FIRST(&workers->idle);
+ slot != APR_RING_SENTINEL(&workers->idle, h2_slot, link);
+ slot = APR_RING_NEXT(slot, link))
+ {
+ apr_thread_cond_signal(slot->more_work);
+ }
+}
+
+static apr_status_t workers_pool_cleanup(void *data)
+{
+ h2_workers *workers = data;
+ apr_time_t end, timeout = apr_time_from_sec(1);
+ apr_status_t rv;
+ int n = 0, wait_sec = 5;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s,
+ "h2_workers: cleanup %d workers (%d idle)",
+ workers->active_slots, workers->idle_slots);
+ apr_thread_mutex_lock(workers->lock);
+ workers->shutdown = 1;
+ workers->aborted = 1;
+ wake_all_idles(workers);
+ apr_thread_mutex_unlock(workers->lock);
+
+ /* wait for all the workers to become zombies and join them.
+ * this gets called after the mpm shuts down and all connections
+ * have either been handled (graceful) or we are forced exiting
+ * (ungrateful). Either way, we show limited patience. */
+ end = apr_time_now() + apr_time_from_sec(wait_sec);
+ while (apr_time_now() < end) {
+ apr_thread_mutex_lock(workers->lock);
+ if (!(n = workers->active_slots)) {
+ apr_thread_mutex_unlock(workers->lock);
+ break;
+ }
+ wake_all_idles(workers);
+ rv = apr_thread_cond_timedwait(workers->all_done, workers->lock, timeout);
+ apr_thread_mutex_unlock(workers->lock);
+
+ if (APR_TIMEUP == rv) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s,
+ APLOGNO(10290) "h2_workers: waiting for workers to close, "
+ "still seeing %d workers (%d idle) living",
+ workers->active_slots, workers->idle_slots);
+ }
+ }
+ if (n) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, workers->s,
+ APLOGNO(10291) "h2_workers: cleanup, %d workers (%d idle) "
+ "did not exit after %d seconds.",
+ n, workers->idle_slots, wait_sec);
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s,
+ "h2_workers: cleanup all workers terminated");
+ apr_thread_mutex_lock(workers->lock);
+ join_zombies(workers);
+ apr_thread_mutex_unlock(workers->lock);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s,
+ "h2_workers: cleanup zombie workers joined");
+
+ return APR_SUCCESS;
+}
+
+h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
+ int max_slots, int min_active,
+ apr_time_t idle_limit)
+{
+ apr_status_t rv;
+ h2_workers *workers;
+ apr_pool_t *pool;
+ apr_allocator_t *allocator;
+ int locked = 0;
+ apr_uint32_t i;
+
+ ap_assert(s);
+ ap_assert(pchild);
+ ap_assert(idle_limit > 0);
+
+ /* let's have our own pool that will be parent to all h2_worker
+ * instances we create. This happens in various threads, but always
+ * guarded by our lock. Without this pool, all subpool creations would
+ * happen on the pool handed to us, which we do not guard.
+ */
+ rv = apr_allocator_create(&allocator);
+ if (rv != APR_SUCCESS) {
+ goto cleanup;
+ }
+ rv = apr_pool_create_ex(&pool, pchild, NULL, allocator);
+ if (rv != APR_SUCCESS) {
+ apr_allocator_destroy(allocator);
+ goto cleanup;
+ }
+ apr_allocator_owner_set(allocator, pool);
+ apr_pool_tag(pool, "h2_workers");
+ workers = apr_pcalloc(pool, sizeof(h2_workers));
+ if (!workers) {
+ return NULL;
+ }
+
+ workers->s = s;
+ workers->pool = pool;
+ workers->min_active = min_active;
+ workers->max_slots = max_slots;
+ workers->idle_limit = idle_limit;
+ workers->dynamic = (workers->min_active < workers->max_slots);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "h2_workers: created with min=%d max=%d idle_ms=%d",
+ workers->min_active, workers->max_slots,
+ (int)apr_time_as_msec(idle_limit));
+
+ APR_RING_INIT(&workers->idle, h2_slot, link);
+ APR_RING_INIT(&workers->busy, h2_slot, link);
+ APR_RING_INIT(&workers->free, h2_slot, link);
+ APR_RING_INIT(&workers->zombie, h2_slot, link);
+
+ APR_RING_INIT(&workers->prod_active, ap_conn_producer_t, link);
+ APR_RING_INIT(&workers->prod_idle, ap_conn_producer_t, link);
+
+ rv = apr_threadattr_create(&workers->thread_attr, workers->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+
+ if (ap_thread_stacksize != 0) {
+ apr_threadattr_stacksize_set(workers->thread_attr,
+ ap_thread_stacksize);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: using stacksize=%ld",
+ (long)ap_thread_stacksize);
+ }
+
+ rv = apr_thread_mutex_create(&workers->lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+ rv = apr_thread_cond_create(&workers->all_done, workers->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+ rv = apr_thread_cond_create(&workers->prod_done, workers->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+
+ apr_thread_mutex_lock(workers->lock);
+ locked = 1;
+
+ /* create the slots and put them on the free list */
+ workers->slots = apr_pcalloc(workers->pool, workers->max_slots * sizeof(h2_slot));
+
+ for (i = 0; i < workers->max_slots; ++i) {
+ workers->slots[i].id = i;
+ workers->slots[i].state = H2_SLOT_FREE;
+ workers->slots[i].workers = workers;
+ APR_RING_ELEM_INIT(&workers->slots[i], link);
+ APR_RING_INSERT_TAIL(&workers->free, &workers->slots[i], h2_slot, link);
+ rv = apr_thread_cond_create(&workers->slots[i].more_work, workers->pool);
+ if (rv != APR_SUCCESS) goto cleanup;
+ }
+
+ /* activate the min amount of workers */
+ for (i = 0; i < workers->min_active; ++i) {
+ rv = activate_slot(workers);
+ if (rv != APR_SUCCESS) goto cleanup;
+ }
+
+cleanup:
+ if (locked) {
+ apr_thread_mutex_unlock(workers->lock);
+ }
+ if (rv == APR_SUCCESS) {
+ /* Stop/join the workers threads when the MPM child exits (pchild is
+ * destroyed), and as a pre_cleanup of pchild thus before the threads
+ * pools (children of workers->pool) so that they are not destroyed
+ * before/under us.
+ */
+ apr_pool_pre_cleanup_register(pchild, workers, workers_pool_cleanup);
+ return workers;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s,
+ "h2_workers: errors initializing");
+ return NULL;
+}
+
+apr_uint32_t h2_workers_get_max_workers(h2_workers *workers)
+{
+ return workers->max_slots;
+}
+
+void h2_workers_shutdown(h2_workers *workers, int graceful)
+{
+ ap_conn_producer_t *prod;
+
+ apr_thread_mutex_lock(workers->lock);
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
+ "h2_workers: shutdown graceful=%d", graceful);
+ workers->shutdown = 1;
+ workers->idle_limit = apr_time_from_sec(1);
+ wake_all_idles(workers);
+ for (prod = APR_RING_FIRST(&workers->prod_idle);
+ prod != APR_RING_SENTINEL(&workers->prod_idle, ap_conn_producer_t, link);
+ prod = APR_RING_NEXT(prod, link)) {
+ if (prod->fn_shutdown) {
+ prod->fn_shutdown(prod->baton, graceful);
+ }
+ }
+ apr_thread_mutex_unlock(workers->lock);
+}
+
+ap_conn_producer_t *h2_workers_register(h2_workers *workers,
+ apr_pool_t *producer_pool,
+ const char *name,
+ ap_conn_producer_next *fn_next,
+ ap_conn_producer_done *fn_done,
+ ap_conn_producer_shutdown *fn_shutdown,
+ void *baton)
+{
+ ap_conn_producer_t *prod;
+
+ prod = apr_pcalloc(producer_pool, sizeof(*prod));
+ APR_RING_ELEM_INIT(prod, link);
+ prod->name = name;
+ prod->fn_next = fn_next;
+ prod->fn_done = fn_done;
+ prod->fn_shutdown = fn_shutdown;
+ prod->baton = baton;
+
+ apr_thread_mutex_lock(workers->lock);
+ prod->state = PROD_IDLE;
+ APR_RING_INSERT_TAIL(&workers->prod_idle, prod, ap_conn_producer_t, link);
+ apr_thread_mutex_unlock(workers->lock);
+
+ return prod;
+}
+
+apr_status_t h2_workers_join(h2_workers *workers, ap_conn_producer_t *prod)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ apr_thread_mutex_lock(workers->lock);
+ if (PROD_JOINED == prod->state) {
+ AP_DEBUG_ASSERT(APR_RING_NEXT(prod, link) == prod); /* should be in no ring */
+ rv = APR_EINVAL;
+ }
+ else {
+ AP_DEBUG_ASSERT(PROD_ACTIVE == prod->state || PROD_IDLE == prod->state);
+ APR_RING_REMOVE(prod, link);
+ prod->state = PROD_JOINED; /* prevent further activations */
+ while (prod->conns_active > 0) {
+ apr_thread_cond_wait(workers->prod_done, workers->lock);
+ }
+ APR_RING_ELEM_INIT(prod, link); /* make it link to itself */
+ }
+ apr_thread_mutex_unlock(workers->lock);
+ return rv;
+}
+
+apr_status_t h2_workers_activate(h2_workers *workers, ap_conn_producer_t *prod)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_thread_mutex_lock(workers->lock);
+ if (PROD_IDLE == prod->state) {
+ APR_RING_REMOVE(prod, link);
+ prod->state = PROD_ACTIVE;
+ APR_RING_INSERT_TAIL(&workers->prod_active, prod, ap_conn_producer_t, link);
+ wake_idle_worker(workers, prod);
+ }
+ else if (PROD_JOINED == prod->state) {
+ rv = APR_EINVAL;
+ }
+ apr_thread_mutex_unlock(workers->lock);
+ return rv;
+}
diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h
new file mode 100644
index 0000000..c219304
--- /dev/null
+++ b/modules/http2/h2_workers.h
@@ -0,0 +1,129 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_workers__
+#define __mod_h2__h2_workers__
+
+/* Thread pool specific to executing secondary connections.
+ * Has a minimum and maximum number of workers it creates.
+ * Starts with minimum workers and adds some on load,
+ * reduces the number again when idle.
+ */
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+struct h2_mplx;
+struct h2_request;
+struct h2_fifo;
+
+typedef struct h2_workers h2_workers;
+
+
+/**
+ * Create a worker set with a maximum number of 'slots', e.g. worker
+ * threads to run. Always keep `min_active` workers running. Shutdown
+ * any additional workers after `idle_secs` seconds of doing nothing.
+ *
+ * @oaram s the base server
+ * @param pool for allocations
+ * @param min_active minimum number of workers to run
+ * @param max_slots maximum number of worker slots
+ * @param idle_limit upper duration of idle after a non-minimal slots shuts down
+ */
+h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
+ int max_slots, int min_active, apr_time_t idle_limit);
+
+/**
+ * Shut down processing.
+ */
+void h2_workers_shutdown(h2_workers *workers, int graceful);
+
+/**
+ * Get the maximum number of workers.
+ */
+apr_uint32_t h2_workers_get_max_workers(h2_workers *workers);
+
+/**
+ * ap_conn_producer_t is the source of connections (conn_rec*) to run.
+ *
+ * Active producers are queried by idle workers for connections.
+ * If they do not hand one back, they become inactive and are not
+ * queried further. `h2_workers_activate()` places them on the active
+ * list again.
+ *
+ * A producer finishing MUST call `h2_workers_join()` which removes
+ * it completely from workers processing and waits for all ongoing
+ * work for this producer to be done.
+ */
+typedef struct ap_conn_producer_t ap_conn_producer_t;
+
+/**
+ * Ask a producer for the next connection to process.
+ * @param baton value from producer registration
+ * @param pconn holds the connection to process on return
+ * @param pmore if the producer has more connections that may be retrieved
+ * @return APR_SUCCESS for a connection to process, APR_EAGAIN for no
+ * connection being available at the time.
+ */
+typedef conn_rec *ap_conn_producer_next(void *baton, int *pmore);
+
+/**
+ * Tell the producer that processing the connection is done.
+ * @param baton value from producer registration
+ * @param conn the connection that has been processed.
+ */
+typedef void ap_conn_producer_done(void *baton, conn_rec *conn);
+
+/**
+ * Tell the producer that the workers are shutting down.
+ * @param baton value from producer registration
+ * @param graceful != 0 iff shutdown is graceful
+ */
+typedef void ap_conn_producer_shutdown(void *baton, int graceful);
+
+/**
+ * Register a new producer with the given `baton` and callback functions.
+ * Will allocate internal structures from the given pool (but make no use
+ * of the pool after registration).
+ * Producers are inactive on registration. See `h2_workers_activate()`.
+ * @param producer_pool to allocate the producer from
+ * @param name descriptive name of the producer, must not be unique
+ * @param fn_next callback for retrieving connections to process
+ * @param fn_done callback for processed connections
+ * @param baton provided value passed on in callbacks
+ * @return the producer instance created
+ */
+ap_conn_producer_t *h2_workers_register(h2_workers *workers,
+ apr_pool_t *producer_pool,
+ const char *name,
+ ap_conn_producer_next *fn_next,
+ ap_conn_producer_done *fn_done,
+ ap_conn_producer_shutdown *fn_shutdown,
+ void *baton);
+
+/**
+ * Stop retrieving more connection from the producer and wait
+ * for all ongoing for from that producer to be done.
+ */
+apr_status_t h2_workers_join(h2_workers *workers, ap_conn_producer_t *producer);
+
+/**
+ * Activate a producer. A worker will query the producer for a connection
+ * to process, once a worker is available.
+ * This may be called, irregardless of the producers active/inactive.
+ */
+apr_status_t h2_workers_activate(h2_workers *workers, ap_conn_producer_t *producer);
+
+#endif /* defined(__mod_h2__h2_workers__) */
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
new file mode 100644
index 0000000..8a1ee3f
--- /dev/null
+++ b/modules/http2/mod_http2.c
@@ -0,0 +1,349 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_want.h>
+
+#include <httpd.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <mpm_common.h>
+
+#include "mod_http2.h"
+
+#include <nghttp2/nghttp2.h>
+#include "h2_stream.h"
+#include "h2_c1.h"
+#include "h2_c2.h"
+#include "h2_session.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_protocol.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_switch.h"
+#include "h2_version.h"
+#include "h2_bucket_beam.h"
+
+
+static void h2_hooks(apr_pool_t *pool);
+
+AP_DECLARE_MODULE(http2) = {
+ STANDARD20_MODULE_STUFF,
+ h2_config_create_dir, /* func to create per dir config */
+ h2_config_merge_dir, /* func to merge per dir config */
+ h2_config_create_svr, /* func to create per server config */
+ h2_config_merge_svr, /* func to merge per server config */
+ h2_cmds, /* command handlers */
+ h2_hooks,
+#if defined(AP_MODULE_FLAG_NONE)
+ AP_MODULE_FLAG_ALWAYS_MERGE
+#endif
+};
+
+static int h2_h2_fixups(request_rec *r);
+
+typedef struct {
+ unsigned int change_prio : 1;
+ unsigned int sha256 : 1;
+ unsigned int inv_headers : 1;
+ unsigned int dyn_windows : 1;
+} features;
+
+static features myfeats;
+static int mpm_warned;
+
+/* The module initialization. Called once as apache hook, before any multi
+ * processing (threaded or not) happens. It is typically at least called twice,
+ * see
+ * http://wiki.apache.org/httpd/ModuleLife
+ * Since the first run is just a "practise" run, we want to initialize for real
+ * only on the second try. This defeats the purpose of the first dry run a bit,
+ * since apache wants to verify that a new configuration actually will work.
+ * So if we have trouble with the configuration, this will only be detected
+ * when the server has already switched.
+ * On the other hand, when we initialize lib nghttp2, all possible crazy things
+ * might happen and this might even eat threads. So, better init on the real
+ * invocation, for now at least.
+ */
+static int h2_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *mod_h2_init_key = "mod_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status;
+
+ (void)plog;(void)ptemp;
+#ifdef H2_NG2_CHANGE_PRIO
+ myfeats.change_prio = 1;
+#endif
+#ifdef H2_OPENSSL
+ myfeats.sha256 = 1;
+#endif
+#ifdef H2_NG2_INVALID_HEADER_CB
+ myfeats.inv_headers = 1;
+#endif
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ myfeats.dyn_windows = 1;
+#endif
+
+ apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool);
+ if ( data == NULL ) {
+ ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03089)
+ "initializing post config dry run");
+ apr_pool_userdata_set((const void *)1, mod_h2_init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090)
+ "mod_http2 (v%s, feats=%s%s%s%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION,
+ myfeats.change_prio? "CHPRIO" : "",
+ myfeats.sha256? "+SHA256" : "",
+ myfeats.inv_headers? "+INVHD" : "",
+ myfeats.dyn_windows? "+DWINS" : "",
+ ngh2? ngh2->version_str : "unknown");
+
+ if (!h2_mpm_supported() && !mpm_warned) {
+ mpm_warned = 1;
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10034)
+ "The mpm module (%s) is not supported by mod_http2. The mpm determines "
+ "how things are processed in your server. HTTP/2 has more demands in "
+ "this regard and the currently selected mpm will just not do. "
+ "This is an advisory warning. Your server will continue to work, but "
+ "the HTTP/2 protocol will be inactive.",
+ h2_conn_mpm_name());
+ }
+
+ status = h2_protocol_init(p, s);
+ if (status == APR_SUCCESS) {
+ status = h2_switch_init(p, s);
+ }
+
+ return status;
+}
+
+static char *http2_var_lookup(apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *name);
+static int http2_is_h2(conn_rec *);
+
+static void http2_get_num_workers(server_rec *s, int *minw, int *maxw)
+{
+ apr_time_t tdummy;
+
+ h2_get_workers_config(s, minw, maxw, &tdummy);
+}
+
+/* Runs once per created child process. Perform any process
+ * related initionalization here.
+ */
+static void h2_child_init(apr_pool_t *pchild, server_rec *s)
+{
+ apr_status_t rv;
+
+ /* Set up our connection processing */
+ rv = h2_c1_child_init(pchild, s);
+ if (APR_SUCCESS == rv) {
+ rv = h2_c2_child_init(pchild, s);
+ }
+ if (APR_SUCCESS != rv) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
+ APLOGNO(02949) "initializing connection handling");
+ }
+}
+
+/* Install this module into the apache2 infrastructure.
+ */
+static void h2_hooks(apr_pool_t *pool)
+{
+ static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
+
+ APR_REGISTER_OPTIONAL_FN(http2_is_h2);
+ APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
+ APR_REGISTER_OPTIONAL_FN(http2_get_num_workers);
+
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
+
+ /* Run once after configuration is set, but before mpm children initialize.
+ */
+ ap_hook_post_config(h2_post_config, mod_ssl, NULL, APR_HOOK_MIDDLE);
+
+ /* Run once after a child process has been created.
+ */
+ ap_hook_child_init(h2_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+#if AP_MODULE_MAGIC_AT_LEAST(20120211, 110)
+ ap_hook_child_stopping(h2_c1_child_stopping, NULL, NULL, APR_HOOK_MIDDLE);
+#endif
+
+ h2_c1_register_hooks();
+ h2_switch_register_hooks();
+ h2_c2_register_hooks();
+
+ /* Setup subprocess env for certain variables
+ */
+ ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE);
+}
+
+static const char *val_HTTP2(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx)
+{
+ return ctx? "on" : "off";
+}
+
+static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r,
+ h2_conn_ctx_t *conn_ctx)
+{
+ if (conn_ctx) {
+ if (r) {
+ if (conn_ctx->stream_id) {
+ const h2_stream *stream = h2_mplx_c2_stream_get(conn_ctx->mplx, conn_ctx->stream_id);
+ if (stream && stream->push_policy != H2_PUSH_NONE) {
+ return "on";
+ }
+ }
+ }
+ else if (c && h2_session_push_enabled(conn_ctx->session)) {
+ return "on";
+ }
+ }
+ else if (s) {
+ if (h2_config_geti(r, s, H2_CONF_PUSH)) {
+ return "on";
+ }
+ }
+ return "off";
+}
+
+static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r,
+ h2_conn_ctx_t *conn_ctx)
+{
+ if (conn_ctx) {
+ if (conn_ctx->stream_id && !H2_STREAM_CLIENT_INITIATED(conn_ctx->stream_id)) {
+ return "PUSHED";
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r,
+ h2_conn_ctx_t *conn_ctx)
+{
+ if (conn_ctx) {
+ if (conn_ctx->stream_id && !H2_STREAM_CLIENT_INITIATED(conn_ctx->stream_id)) {
+ const h2_stream *stream = h2_mplx_c2_stream_get(conn_ctx->mplx, conn_ctx->stream_id);
+ if (stream) {
+ return apr_itoa(p, stream->initiated_on);
+ }
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx)
+{
+ if (c) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+ if (conn_ctx) {
+ return conn_ctx->stream_id == 0? conn_ctx->id
+ : apr_psprintf(p, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx)
+{
+ const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx);
+ if (cp && (cp = ap_strrchr_c(cp, '-'))) {
+ return ++cp;
+ }
+ return NULL;
+}
+
+typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx);
+typedef struct h2_var_def {
+ const char *name;
+ h2_var_lookup *lookup;
+ unsigned int subprocess : 1; /* should be set in r->subprocess_env */
+} h2_var_def;
+
+static h2_var_def H2_VARS[] = {
+ { "HTTP2", val_HTTP2, 1 },
+ { "H2PUSH", val_H2_PUSH, 1 },
+ { "H2_PUSH", val_H2_PUSH, 1 },
+ { "H2_PUSHED", val_H2_PUSHED, 1 },
+ { "H2_PUSHED_ON", val_H2_PUSHED_ON, 1 },
+ { "H2_STREAM_ID", val_H2_STREAM_ID, 1 },
+ { "H2_STREAM_TAG", val_H2_STREAM_TAG, 1 },
+};
+
+#ifndef H2_ALEN
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+#endif
+
+
+static int http2_is_h2(conn_rec *c)
+{
+ return h2_conn_ctx_get(c->master? c->master : c) != NULL;
+}
+
+static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, char *name)
+{
+ unsigned int i;
+ /* If the # of vars grow, we need to put definitions in a hash */
+ for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (!strcmp(vdef->name, name)) {
+ h2_conn_ctx_t *ctx = (r? h2_conn_ctx_get(c) :
+ h2_conn_ctx_get(c->master? c->master : c));
+ return (char *)vdef->lookup(p, s, c, r, ctx);
+ }
+ }
+ return (char*)"";
+}
+
+static int h2_h2_fixups(request_rec *r)
+{
+ if (r->connection->master) {
+ h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
+ unsigned int i;
+
+ for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (vdef->subprocess) {
+ apr_table_setn(r->subprocess_env, vdef->name,
+ vdef->lookup(r->pool, r->server, r->connection,
+ r, ctx));
+ }
+ }
+ }
+ return DECLINED;
+}
diff --git a/modules/http2/mod_http2.dep b/modules/http2/mod_http2.dep
new file mode 100644
index 0000000..25c0ede
--- /dev/null
+++ b/modules/http2/mod_http2.dep
@@ -0,0 +1,1431 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_http2.mak
+
+./h2_alt_svc.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_util.h"\
+
+
+./h2_bucket_eos.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_stream.h"\
+
+
+./h2_config.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+
+
+./h2_conn.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_version.h"\
+ ".\h2_workers.h"\
+
+
+./h2_conn_io.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_config.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_util.h"\
+
+
+./h2_ctx.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_task.h"\
+
+
+./h2_filter.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_version.h"\
+
+
+./h2_from_h1.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_time.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_private.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_h2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\ssl\mod_ssl.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_int_queue.c : \
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+
+
+./h2_io.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_io_set.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_io_set.h"\
+ ".\h2_private.h"\
+
+
+./h2_mplx.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_workers.h"\
+ ".\mod_http2.h"\
+
+
+./h2_ngn_shed.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_push.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_util.h"\
+
+
+./h2_request.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_core.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_util.h"\
+
+
+./h2_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_base64.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_config.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_version.h"\
+ ".\h2_workers.h"\
+
+
+./h2_stream.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_switch.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_switch.h"\
+
+
+./h2_task.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_core.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_atomic.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+
+
+./h2_task_input.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_task_output.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_util.h"\
+
+
+./h2_workers.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_atomic.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_task.h"\
+ ".\h2_workers.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_switch.h"\
+ ".\h2_task.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
new file mode 100644
index 0000000..d9ff222
--- /dev/null
+++ b/modules/http2/mod_http2.dsp
@@ -0,0 +1,187 @@
+# Microsoft Developer Studio Project File - Name="mod_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_http2 - Win32 Release"
+# Name "mod_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_bucket_beam.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_bucket_eos.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_c1.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_c1_io.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_c2.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_c2_filter.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_conn_ctx.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_headers.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_mplx.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_protocol.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_push.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_request.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_stream.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_switch.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_workers.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h
new file mode 100644
index 0000000..f68edcd
--- /dev/null
+++ b/modules/http2/mod_http2.h
@@ -0,0 +1,79 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOD_HTTP2_H__
+#define __MOD_HTTP2_H__
+
+/** The http2_var_lookup() optional function retrieves HTTP2 environment
+ * variables. */
+APR_DECLARE_OPTIONAL_FN(char *,
+ http2_var_lookup, (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *));
+
+/** An optional function which returns non-zero if the given connection
+ * or its master connection is using HTTP/2. */
+APR_DECLARE_OPTIONAL_FN(int,
+ http2_is_h2, (conn_rec *));
+
+APR_DECLARE_OPTIONAL_FN(void,
+ http2_get_num_workers, (server_rec *s,
+ int *minw, int *max));
+
+/*******************************************************************************
+ * START HTTP/2 request engines (DEPRECATED)
+ ******************************************************************************/
+
+/* The following functions were introduced for the experimental mod_proxy_http2
+ * support, but have been abandoned since.
+ * They are still declared here for backward compatibility, in case someone
+ * tries to build an old mod_proxy_http2 against it, but will disappear
+ * completely sometime in the future.
+ */
+
+struct apr_thread_cond_t;
+typedef struct h2_req_engine h2_req_engine;
+typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+
+typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_size_t req_buffer_size,
+ request_rec *r,
+ http2_output_consumed **pconsumed,
+ void **pbaton);
+
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_push, (const char *engine_type,
+ request_rec *r,
+ http2_req_engine_init *einit));
+
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_pull, (h2_req_engine *engine,
+ apr_read_type_e block,
+ int capacity,
+ request_rec **pr));
+APR_DECLARE_OPTIONAL_FN(void,
+ http2_req_engine_done, (h2_req_engine *engine,
+ conn_rec *rconn,
+ apr_status_t status));
+
+
+/*******************************************************************************
+ * END HTTP/2 request engines (DEPRECATED)
+ ******************************************************************************/
+
+#endif
diff --git a/modules/http2/mod_http2.mak b/modules/http2/mod_http2.mak
new file mode 100644
index 0000000..26611c7
--- /dev/null
+++ b/modules/http2/mod_http2.mak
@@ -0,0 +1,533 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_http2 - Win32 Release" && "$(CFG)" != "mod_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
+ -@erase "$(INTDIR)\h2_bucket_eos.obj"
+ -@erase "$(INTDIR)\h2_config.obj"
+ -@erase "$(INTDIR)\h2_conn.obj"
+ -@erase "$(INTDIR)\h2_conn_io.obj"
+ -@erase "$(INTDIR)\h2_ctx.obj"
+ -@erase "$(INTDIR)\h2_filter.obj"
+ -@erase "$(INTDIR)\h2_from_h1.obj"
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+ -@erase "$(INTDIR)\h2_stream.obj"
+ -@erase "$(INTDIR)\h2_switch.obj"
+ -@erase "$(INTDIR)\h2_task.obj"
+ -@erase "$(INTDIR)\h2_util.obj"
+ -@erase "$(INTDIR)\h2_workers.obj"
+ -@erase "$(INTDIR)\mod_http2.obj"
+ -@erase "$(INTDIR)\mod_http2.res"
+ -@erase "$(INTDIR)\mod_http2_src.idb"
+ -@erase "$(INTDIR)\mod_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_http2.exp"
+ -@erase "$(OUTDIR)\mod_http2.lib"
+ -@erase "$(OUTDIR)\mod_http2.pdb"
+ -@erase "$(OUTDIR)\mod_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
+ "$(INTDIR)\h2_bucket_eos.obj" \
+ "$(INTDIR)\h2_config.obj" \
+ "$(INTDIR)\h2_conn.obj" \
+ "$(INTDIR)\h2_conn_io.obj" \
+ "$(INTDIR)\h2_ctx.obj" \
+ "$(INTDIR)\h2_filter.obj" \
+ "$(INTDIR)\h2_from_h1.obj" \
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_session.obj" \
+ "$(INTDIR)\h2_stream.obj" \
+ "$(INTDIR)\h2_switch.obj" \
+ "$(INTDIR)\h2_task.obj" \
+ "$(INTDIR)\h2_util.obj" \
+ "$(INTDIR)\h2_workers.obj" \
+ "$(INTDIR)\mod_http2.obj" \
+ "$(INTDIR)\mod_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib"
+
+"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so"
+ if exist .\Release\mod_http2.so.manifest mt.exe -manifest .\Release\mod_http2.so.manifest -outputresource:.\Release\mod_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
+ -@erase "$(INTDIR)\h2_bucket_eos.obj"
+ -@erase "$(INTDIR)\h2_config.obj"
+ -@erase "$(INTDIR)\h2_conn.obj"
+ -@erase "$(INTDIR)\h2_conn_io.obj"
+ -@erase "$(INTDIR)\h2_ctx.obj"
+ -@erase "$(INTDIR)\h2_filter.obj"
+ -@erase "$(INTDIR)\h2_from_h1.obj"
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+ -@erase "$(INTDIR)\h2_stream.obj"
+ -@erase "$(INTDIR)\h2_switch.obj"
+ -@erase "$(INTDIR)\h2_task.obj"
+ -@erase "$(INTDIR)\h2_util.obj"
+ -@erase "$(INTDIR)\h2_workers.obj"
+ -@erase "$(INTDIR)\mod_http2.obj"
+ -@erase "$(INTDIR)\mod_http2.res"
+ -@erase "$(INTDIR)\mod_http2_src.idb"
+ -@erase "$(INTDIR)\mod_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_http2.exp"
+ -@erase "$(OUTDIR)\mod_http2.lib"
+ -@erase "$(OUTDIR)\mod_http2.pdb"
+ -@erase "$(OUTDIR)\mod_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
+ "$(INTDIR)\h2_bucket_eos.obj" \
+ "$(INTDIR)\h2_config.obj" \
+ "$(INTDIR)\h2_conn.obj" \
+ "$(INTDIR)\h2_conn_io.obj" \
+ "$(INTDIR)\h2_ctx.obj" \
+ "$(INTDIR)\h2_filter.obj" \
+ "$(INTDIR)\h2_from_h1.obj" \
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_session.obj" \
+ "$(INTDIR)\h2_stream.obj" \
+ "$(INTDIR)\h2_switch.obj" \
+ "$(INTDIR)\h2_task.obj" \
+ "$(INTDIR)\h2_util.obj" \
+ "$(INTDIR)\h2_workers.obj" \
+ "$(INTDIR)\mod_http2.obj" \
+ "$(INTDIR)\mod_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib"
+
+"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so"
+ if exist .\Debug\mod_http2.so.manifest mt.exe -manifest .\Debug\mod_http2.so.manifest -outputresource:.\Debug\mod_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_http2.dep")
+!INCLUDE "mod_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release" || "$(CFG)" == "mod_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+SOURCE=./h2_alt_svc.c
+
+"$(INTDIR)\h2_alt_svc.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_beam.c
+
+"$(INTDIR)/h2_bucket_beam.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_eos.c
+
+"$(INTDIR)\h2_bucket_eos.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_config.c
+
+"$(INTDIR)\h2_config.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_conn.c
+
+"$(INTDIR)\h2_conn.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_conn_io.c
+
+"$(INTDIR)\h2_conn_io.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_ctx.c
+
+"$(INTDIR)\h2_ctx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_filter.c
+
+"$(INTDIR)\h2_filter.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_from_h1.c
+
+"$(INTDIR)\h2_from_h1.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_h2.c
+
+"$(INTDIR)\h2_h2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_headers.c
+
+"$(INTDIR)\h2_headers.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_mplx.c
+
+"$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_push.c
+
+"$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_request.c
+
+"$(INTDIR)\h2_request.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_session.c
+
+"$(INTDIR)\h2_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_stream.c
+
+"$(INTDIR)\h2_stream.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_switch.c
+
+"$(INTDIR)\h2_switch.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_task.c
+
+"$(INTDIR)\h2_task.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_util.c
+
+"$(INTDIR)\h2_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_workers.c
+
+"$(INTDIR)\h2_workers.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_http2.c
+
+"$(INTDIR)\mod_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
new file mode 100644
index 0000000..5abccab
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.c
@@ -0,0 +1,470 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <nghttp2/nghttp2.h>
+
+#include <ap_mmn.h>
+#include <httpd.h>
+#include <mod_proxy.h>
+#include "mod_http2.h"
+
+
+#include "mod_proxy_http2.h"
+#include "h2.h"
+#include "h2_proxy_util.h"
+#include "h2_version.h"
+#include "h2_proxy_session.h"
+
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+static void register_hook(apr_pool_t *p);
+
+AP_DECLARE_MODULE(proxy_http2) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ register_hook, /* register hooks */
+#if defined(AP_MODULE_FLAG_NONE)
+ AP_MODULE_FLAG_ALWAYS_MERGE
+#endif
+};
+
+/* Optional functions from mod_http2 */
+static int (*is_h2)(conn_rec *c);
+
+typedef struct h2_proxy_ctx {
+ const char *id;
+ conn_rec *master;
+ conn_rec *owner;
+ apr_pool_t *pool;
+ server_rec *server;
+ const char *proxy_func;
+ char server_portstr[32];
+ proxy_conn_rec *p_conn;
+ proxy_worker *worker;
+ proxy_server_conf *conf;
+
+ apr_size_t req_buffer_size;
+ int capacity;
+
+ unsigned is_ssl : 1;
+
+ request_rec *r; /* the request processed in this ctx */
+ apr_status_t r_status; /* status of request work */
+ int r_done; /* request was processed, not necessarily successfully */
+ int r_may_retry; /* request may be retried */
+ h2_proxy_session *session; /* current http2 session against backend */
+} h2_proxy_ctx;
+
+static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *init_key = "mod_proxy_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status = APR_SUCCESS;
+ (void)plog;(void)ptemp;
+
+ apr_pool_userdata_get(&data, init_key, s->process->pool);
+ if ( data == NULL ) {
+ apr_pool_userdata_set((const void *)1, init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03349)
+ "mod_proxy_http2 (v%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
+
+ is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
+
+ return status;
+}
+
+/**
+ * canonicalize the url into the request, if it is meant for us.
+ * slightly modified copy from mod_http
+ */
+static int proxy_http2_canon(request_rec *r, char *url)
+{
+ char *host, *path, sport[7];
+ char *search = NULL;
+ const char *err;
+ const char *scheme;
+ const char *http_scheme;
+ apr_port_t port, def_port;
+
+ /* ap_port_of_scheme() */
+ if (ap_cstr_casecmpn(url, "h2c:", 4) == 0) {
+ url += 4;
+ scheme = "h2c";
+ http_scheme = "http";
+ }
+ else if (ap_cstr_casecmpn(url, "h2:", 3) == 0) {
+ url += 3;
+ scheme = "h2";
+ http_scheme = "https";
+ }
+ else {
+ return DECLINED;
+ }
+ port = def_port = ap_proxy_port_of_scheme(http_scheme);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "HTTP2: canonicalising URL %s", url);
+
+ /* do syntatic check.
+ * We break the URL into host, port, path, search
+ */
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03350)
+ "error parsing URL %s: %s", url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path.
+ *
+ * In a reverse proxy, our URL has been processed, so canonicalise
+ * unless proxy-nocanon is set to say it's raw
+ * In a forward proxy, we have and MUST NOT MANGLE the original.
+ */
+ switch (r->proxyreq) {
+ default: /* wtf are we doing here? */
+ case PROXYREQ_REVERSE:
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
+ }
+ else if (apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the encoded path already */
+ search = r->args;
+ }
+ else {
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, (int)strlen(url),
+ enc_path, flags, r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
+ search = r->args;
+ }
+ break;
+ case PROXYREQ_PROXY:
+ path = url;
+ break;
+ }
+ /*
+ * If we have a raw control character or a ' ' in nocanon path or
+ * r->args, correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10420)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
+ if (search && *ap_scan_vchar_obstext(search)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10412)
+ "To be forwarded query string contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
+
+ if (port != def_port) {
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ }
+ else {
+ sport[0] = '\0';
+ }
+
+ if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
+ "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+ return OK;
+}
+
+static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
+{
+ h2_proxy_ctx *ctx = session->user_data;
+ const char *url;
+ apr_status_t status;
+
+ url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
+ apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
+ ctx->p_conn->connection->local_addr->port));
+ status = h2_proxy_session_submit(session, url, r, 1);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
+ "pass request body failed to %pI (%s) from %s (%s)",
+ ctx->p_conn->addr, ctx->p_conn->hostname ?
+ ctx->p_conn->hostname: "", session->c->client_ip,
+ session->c->remote_host ? session->c->remote_host: "");
+ }
+ return status;
+}
+
+static void request_done(h2_proxy_ctx *ctx, request_rec *r,
+ apr_status_t status, int touched)
+{
+ if (r == ctx->r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
+ "h2_proxy_session(%s): request done, touched=%d",
+ ctx->id, touched);
+ ctx->r_done = 1;
+ if (touched) ctx->r_may_retry = 0;
+ ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
+ : HTTP_SERVICE_UNAVAILABLE);
+ }
+}
+
+static void session_req_done(h2_proxy_session *session, request_rec *r,
+ apr_status_t status, int touched)
+{
+ request_done(session->user_data, r, status, touched);
+}
+
+static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
+ apr_status_t status = OK;
+ int h2_front;
+
+ /* Step Four: Send the Request in a new HTTP/2 stream and
+ * loop until we got the response or encounter errors.
+ */
+ h2_front = is_h2? is_h2(ctx->owner) : 0;
+ ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
+ h2_front, 30,
+ h2_proxy_log2((int)ctx->req_buffer_size),
+ session_req_done);
+ if (!ctx->session) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner,
+ APLOGNO(03372) "session unavailable");
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
+ "eng(%s): run session %s", ctx->id, ctx->session->id);
+ ctx->session->user_data = ctx;
+
+ ctx->r_done = 0;
+ add_request(ctx->session, ctx->r);
+
+ while (!ctx->owner->aborted && !ctx->r_done) {
+
+ status = h2_proxy_session_process(ctx->session);
+ if (status != APR_SUCCESS) {
+ /* Encountered an error during session processing */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03375) "eng(%s): end of session %s",
+ ctx->id, ctx->session->id);
+ /* Any open stream of that session needs to
+ * a) be reopened on the new session iff safe to do so
+ * b) reported as done (failed) otherwise
+ */
+ h2_proxy_session_cleanup(ctx->session, session_req_done);
+ goto out;
+ }
+ }
+
+out:
+ if (ctx->owner->aborted) {
+ /* master connection gone */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
+ /* cancel all ongoing requests */
+ h2_proxy_session_cancel_all(ctx->session);
+ h2_proxy_session_process(ctx->session);
+ }
+
+ ctx->session->user_data = NULL;
+ ctx->session = NULL;
+ return status;
+}
+
+static int proxy_http2_handler(request_rec *r,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ char *url,
+ const char *proxyname,
+ apr_port_t proxyport)
+{
+ const char *proxy_func;
+ char *locurl = url, *u;
+ apr_size_t slen;
+ int is_ssl = 0;
+ apr_status_t status;
+ h2_proxy_ctx *ctx;
+ apr_uri_t uri;
+ int reconnects = 0;
+
+ /* find the scheme */
+ if ((url[0] != 'h' && url[0] != 'H') || url[1] != '2') {
+ return DECLINED;
+ }
+ u = strchr(url, ':');
+ if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') {
+ return DECLINED;
+ }
+ slen = (u - url);
+ switch(slen) {
+ case 2:
+ proxy_func = "H2";
+ is_ssl = 1;
+ break;
+ case 3:
+ if (url[2] != 'c' && url[2] != 'C') {
+ return DECLINED;
+ }
+ proxy_func = "H2C";
+ break;
+ default:
+ return DECLINED;
+ }
+
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->master = r->connection->master? r->connection->master : r->connection;
+ ctx->id = apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
+ ctx->owner = r->connection;
+ ctx->pool = r->pool;
+ ctx->server = r->server;
+ ctx->proxy_func = proxy_func;
+ ctx->is_ssl = is_ssl;
+ ctx->worker = worker;
+ ctx->conf = conf;
+ ctx->req_buffer_size = (32*1024);
+ ctx->r = r;
+ ctx->r_status = status = HTTP_SERVICE_UNAVAILABLE;
+ ctx->r_done = 0;
+ ctx->r_may_retry = 1;
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+
+ /* scheme says, this is for us. */
+ apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->r,
+ "H2: serving URL %s", url);
+
+run_connect:
+ if (ctx->owner->aborted) goto cleanup;
+
+ /* Get a proxy_conn_rec from the worker, might be a new one, might
+ * be one still open from another request, or it might fail if the
+ * worker is stopped or in error. */
+ if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn,
+ ctx->worker, ctx->server)) != OK) {
+ goto cleanup;
+ }
+
+ ctx->p_conn->is_ssl = ctx->is_ssl;
+
+ /* Step One: Determine the URL to connect to (might be a proxy),
+ * initialize the backend accordingly and determine the server
+ * port string we can expect in responses. */
+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->r, conf, worker,
+ ctx->p_conn, &uri, &locurl,
+ proxyname, proxyport,
+ ctx->server_portstr,
+ sizeof(ctx->server_portstr))) != OK) {
+ goto cleanup;
+ }
+
+ /* Step Two: Make the Connection (or check that an already existing
+ * socket is still usable). On success, we have a socket connected to
+ * backend->hostname. */
+ if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker,
+ ctx->server)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
+ "H2: failed to make connection to backend: %s",
+ ctx->p_conn->hostname);
+ goto cleanup;
+ }
+
+ /* Step Three: Create conn_rec for the socket we have open now. */
+ status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r);
+ if (status != OK) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+ "setup new connection: is_ssl=%d %s %s %s",
+ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
+ locurl, ctx->p_conn->hostname);
+ ctx->r_status = status;
+ goto cleanup;
+ }
+
+ if (!ctx->p_conn->data && ctx->is_ssl) {
+ /* New SSL connection: set a note on the connection about what
+ * protocol we need. */
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-alpn-protos", "h2");
+ }
+
+ if (ctx->owner->aborted) goto cleanup;
+ status = ctx_run(ctx);
+
+ if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->owner->aborted) {
+ /* Not successfully processed, but may retry, tear down old conn and start over */
+ if (ctx->p_conn) {
+ ctx->p_conn->close = 1;
+#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
+ proxy_run_detach_backend(r, ctx->p_conn);
+#endif
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+ ++reconnects;
+ if (reconnects < 2) {
+ goto run_connect;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
+ "giving up after %d reconnects, request-done=%d",
+ reconnects, ctx->r_done);
+ }
+
+cleanup:
+ if (ctx->p_conn) {
+ if (status != APR_SUCCESS) {
+ /* close socket when errors happened or session shut down (EOF) */
+ ctx->p_conn->close = 1;
+ }
+#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
+ proxy_run_detach_backend(ctx->r, ctx->p_conn);
+#endif
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03377) "leaving handler");
+ return ctx->r_status;
+}
+
+static void register_hook(apr_pool_t *p)
+{
+ ap_hook_post_config(h2_proxy_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+
+ proxy_hook_scheme_handler(proxy_http2_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(proxy_http2_canon, NULL, NULL, APR_HOOK_FIRST);
+}
+
diff --git a/modules/http2/mod_proxy_http2.dep b/modules/http2/mod_proxy_http2.dep
new file mode 100644
index 0000000..641fca6
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dep
@@ -0,0 +1,208 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_proxy_http2.mak
+
+./h2_proxy_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_proxy_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_proxy_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_util.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_proxy_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_request.h"\
+ ".\h2_proxy_util.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+ ".\mod_proxy_http2.h"\
+
diff --git a/modules/http2/mod_proxy_http2.dsp b/modules/http2/mod_proxy_http2.dsp
new file mode 100644
index 0000000..5d6305f
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dsp
@@ -0,0 +1,119 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_http2 - Win32 Release"
+# Name "mod_proxy_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_proxy_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_proxy_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_proxy_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_proxy_http2.h b/modules/http2/mod_proxy_http2.h
new file mode 100644
index 0000000..0048ed9
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.h
@@ -0,0 +1,21 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOD_PROXY_HTTP2_H__
+#define __MOD_PROXY_HTTP2_H__
+
+
+#endif
diff --git a/modules/http2/mod_proxy_http2.mak b/modules/http2/mod_proxy_http2.mak
new file mode 100644
index 0000000..e8e0624
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.mak
@@ -0,0 +1,427 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_proxy_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_proxy_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_proxy_http2 - Win32 Release" && "$(CFG)" != "mod_proxy_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Release" "mod_http2 - Win32 Release" "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" "mod_http2 - Win32 ReleaseCLEAN" "mod_proxy - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Release\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Release\mod_proxy_http2.so.manifest mt.exe -manifest .\Release\mod_proxy_http2.so.manifest -outputresource:.\Release\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Debug" "mod_http2 - Win32 Debug" "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" "mod_http2 - Win32 DebugCLEAN" "mod_proxy - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Debug\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Debug\mod_proxy_http2.so.manifest mt.exe -manifest .\Debug\mod_proxy_http2.so.manifest -outputresource:.\Debug\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_proxy_http2.dep")
+!INCLUDE "mod_proxy_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_proxy_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" || "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_http2 - Win32 Release" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release"
+ cd "."
+
+"mod_http2 - Win32 ReleaseCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release" RECURSE=1 CLEAN
+ cd "."
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_http2 - Win32 Debug" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug"
+ cd "."
+
+"mod_http2 - Win32 DebugCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug" RECURSE=1 CLEAN
+ cd "."
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_proxy - Win32 Release" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release"
+ cd "..\http2"
+
+"mod_proxy - Win32 ReleaseCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_proxy - Win32 Debug" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug"
+ cd "..\http2"
+
+"mod_proxy - Win32 DebugCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ENDIF
+
+SOURCE=./h2_proxy_session.c
+
+"$(INTDIR)\h2_proxy_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_proxy_util.c
+
+"$(INTDIR)\h2_proxy_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_proxy_http2.c
+
+"$(INTDIR)\mod_proxy_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+