summaryrefslogtreecommitdiffstats
path: root/modules/http
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--modules/http/.indent.pro54
-rw-r--r--modules/http/Makefile.in3
-rw-r--r--modules/http/byterange_filter.c611
-rw-r--r--modules/http/chunk_filter.c197
-rw-r--r--modules/http/config.m420
-rw-r--r--modules/http/http_core.c328
-rw-r--r--modules/http/http_etag.c220
-rw-r--r--modules/http/http_filters.c1921
-rw-r--r--modules/http/http_protocol.c1685
-rw-r--r--modules/http/http_request.c862
-rw-r--r--modules/http/mod_mime.c1026
-rw-r--r--modules/http/mod_mime.dep55
-rw-r--r--modules/http/mod_mime.dsp111
-rw-r--r--modules/http/mod_mime.exp1
-rw-r--r--modules/http/mod_mime.mak353
-rw-r--r--modules/http2/.gitignore35
-rw-r--r--modules/http2/Makefile.in20
-rw-r--r--modules/http2/NWGNUmakefile246
-rw-r--r--modules/http2/NWGNUmod_http2395
-rw-r--r--modules/http2/NWGNUproxyht2288
-rw-r--r--modules/http2/README.h270
-rw-r--r--modules/http2/config2.m4235
-rw-r--r--modules/http2/h2.h166
-rw-r--r--modules/http2/h2_alt_svc.c131
-rw-r--r--modules/http2/h2_alt_svc.h40
-rw-r--r--modules/http2/h2_bucket_beam.c1283
-rw-r--r--modules/http2/h2_bucket_beam.h406
-rw-r--r--modules/http2/h2_bucket_eos.c127
-rw-r--r--modules/http2/h2_bucket_eos.h32
-rw-r--r--modules/http2/h2_config.c702
-rw-r--r--modules/http2/h2_config.h106
-rw-r--r--modules/http2/h2_conn.c370
-rw-r--r--modules/http2/h2_conn.h77
-rw-r--r--modules/http2/h2_conn_io.c389
-rw-r--r--modules/http2/h2_conn_io.h77
-rw-r--r--modules/http2/h2_ctx.c121
-rw-r--r--modules/http2/h2_ctx.h78
-rw-r--r--modules/http2/h2_filter.c568
-rw-r--r--modules/http2/h2_filter.h73
-rw-r--r--modules/http2/h2_from_h1.c875
-rw-r--r--modules/http2/h2_from_h1.h50
-rw-r--r--modules/http2/h2_h2.c765
-rw-r--r--modules/http2/h2_h2.h79
-rw-r--r--modules/http2/h2_headers.c178
-rw-r--r--modules/http2/h2_headers.h79
-rw-r--r--modules/http2/h2_mplx.c1282
-rw-r--r--modules/http2/h2_mplx.h330
-rw-r--r--modules/http2/h2_ngn_shed.c392
-rw-r--r--modules/http2/h2_ngn_shed.h79
-rw-r--r--modules/http2/h2_private.h28
-rw-r--r--modules/http2/h2_proxy_session.c1584
-rw-r--r--modules/http2/h2_proxy_session.h128
-rw-r--r--modules/http2/h2_proxy_util.c1335
-rw-r--r--modules/http2/h2_proxy_util.h256
-rw-r--r--modules/http2/h2_push.c1060
-rw-r--r--modules/http2/h2_push.h120
-rw-r--r--modules/http2/h2_request.c339
-rw-r--r--modules/http2/h2_request.h48
-rw-r--r--modules/http2/h2_session.c2360
-rw-r--r--modules/http2/h2_session.h228
-rw-r--r--modules/http2/h2_stream.c1078
-rw-r--r--modules/http2/h2_stream.h309
-rw-r--r--modules/http2/h2_switch.c194
-rw-r--r--modules/http2/h2_switch.h30
-rw-r--r--modules/http2/h2_task.c769
-rw-r--r--modules/http2/h2_task.h127
-rw-r--r--modules/http2/h2_util.c2015
-rw-r--r--modules/http2/h2_util.h544
-rw-r--r--modules/http2/h2_version.h41
-rw-r--r--modules/http2/h2_workers.c383
-rw-r--r--modules/http2/h2_workers.h82
-rw-r--r--modules/http2/mod_http2.c393
-rw-r--r--modules/http2/mod_http2.dep1433
-rw-r--r--modules/http2/mod_http2.dsp195
-rw-r--r--modules/http2/mod_http2.h101
-rw-r--r--modules/http2/mod_http2.mak542
-rw-r--r--modules/http2/mod_proxy_http2.c674
-rw-r--r--modules/http2/mod_proxy_http2.dep208
-rw-r--r--modules/http2/mod_proxy_http2.dsp119
-rw-r--r--modules/http2/mod_proxy_http2.h21
-rw-r--r--modules/http2/mod_proxy_http2.mak427
81 files changed, 34762 insertions, 0 deletions
diff --git a/modules/http/.indent.pro b/modules/http/.indent.pro
new file mode 100644
index 0000000..a9fbe9f
--- /dev/null
+++ b/modules/http/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/modules/http/Makefile.in b/modules/http/Makefile.in
new file mode 100644
index 0000000..167b343
--- /dev/null
+++ b/modules/http/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/modules/http/byterange_filter.c b/modules/http/byterange_filter.c
new file mode 100644
index 0000000..de585c5
--- /dev/null
+++ b/modules/http/byterange_filter.c
@@ -0,0 +1,611 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * byterange_filter.c --- HTTP byterange filter and friends.
+ */
+
+#include "apr.h"
+
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifndef AP_DEFAULT_MAX_RANGES
+#define AP_DEFAULT_MAX_RANGES 200
+#endif
+#ifndef AP_DEFAULT_MAX_OVERLAPS
+#define AP_DEFAULT_MAX_OVERLAPS 20
+#endif
+#ifndef AP_DEFAULT_MAX_REVERSALS
+#define AP_DEFAULT_MAX_REVERSALS 20
+#endif
+
+#define MAX_PREALLOC_RANGES 100
+
+APLOG_USE_MODULE(http);
+
+typedef struct indexes_t {
+ apr_off_t start;
+ apr_off_t end;
+} indexes_t;
+
+/*
+ * Returns: number of ranges (merged) or -1 for no-good
+ */
+static int ap_set_byterange(request_rec *r, apr_off_t clength,
+ apr_array_header_t **indexes,
+ int *overlaps, int *reversals)
+{
+ const char *range;
+ const char *ct;
+ char *cur;
+ apr_array_header_t *merged;
+ int num_ranges = 0, unsatisfiable = 0;
+ apr_off_t ostart = 0, oend = 0, sum_lengths = 0;
+ int in_merge = 0;
+ indexes_t *idx;
+ int ranges = 1;
+ int i;
+ const char *it;
+
+ *overlaps = 0;
+ *reversals = 0;
+
+ if (r->assbackwards) {
+ return 0;
+ }
+
+ /*
+ * Check for Range request-header (HTTP/1.1) or Request-Range for
+ * backwards-compatibility with second-draft Luotonen/Franks
+ * byte-ranges (e.g. Netscape Navigator 2-3).
+ *
+ * We support this form, with Request-Range, and (farther down) we
+ * send multipart/x-byteranges instead of multipart/byteranges for
+ * Request-Range based requests to work around a bug in Netscape
+ * Navigator 2-3 and MSIE 3.
+ */
+
+ if (!(range = apr_table_get(r->headers_in, "Range"))) {
+ range = apr_table_get(r->headers_in, "Request-Range");
+ }
+
+ if (!range || strncasecmp(range, "bytes=", 6) || r->status != HTTP_OK) {
+ return 0;
+ }
+
+ /* is content already a single range? */
+ if (apr_table_get(r->headers_out, "Content-Range")) {
+ return 0;
+ }
+
+ /* is content already a multiple range? */
+ if ((ct = apr_table_get(r->headers_out, "Content-Type"))
+ && (!strncasecmp(ct, "multipart/byteranges", 20)
+ || !strncasecmp(ct, "multipart/x-byteranges", 22))) {
+ return 0;
+ }
+
+ /*
+ * Check the If-Range header for Etag or Date.
+ */
+ if (AP_CONDITION_NOMATCH == ap_condition_if_range(r, r->headers_out)) {
+ return 0;
+ }
+
+ range += 6;
+ it = range;
+ while (*it) {
+ if (*it++ == ',') {
+ ranges++;
+ }
+ }
+ it = range;
+ if (ranges > MAX_PREALLOC_RANGES) {
+ ranges = MAX_PREALLOC_RANGES;
+ }
+ *indexes = apr_array_make(r->pool, ranges, sizeof(indexes_t));
+ while ((cur = ap_getword(r->pool, &range, ','))) {
+ char *dash;
+ char *errp;
+ apr_off_t number, start, end;
+
+ if (!*cur)
+ break;
+
+ /*
+ * Per RFC 2616 14.35.1: If there is at least one syntactically invalid
+ * byte-range-spec, we must ignore the whole header.
+ */
+
+ if (!(dash = strchr(cur, '-'))) {
+ return 0;
+ }
+
+ if (dash == cur) {
+ /* In the form "-5" */
+ if (apr_strtoff(&number, dash+1, &errp, 10) || *errp) {
+ return 0;
+ }
+ if (number < 1) {
+ return 0;
+ }
+ start = clength - number;
+ end = clength - 1;
+ }
+ else {
+ *dash++ = '\0';
+ if (apr_strtoff(&number, cur, &errp, 10) || *errp) {
+ return 0;
+ }
+ start = number;
+ if (*dash) {
+ if (apr_strtoff(&number, dash, &errp, 10) || *errp) {
+ return 0;
+ }
+ end = number;
+ if (start > end) {
+ return 0;
+ }
+ }
+ else { /* "5-" */
+ end = clength - 1;
+ /*
+ * special case: 0-
+ * ignore all other ranges provided
+ * return as a single range: 0-
+ */
+ if (start == 0) {
+ num_ranges = 0;
+ sum_lengths = 0;
+ in_merge = 1;
+ oend = end;
+ ostart = start;
+ apr_array_clear(*indexes);
+ break;
+ }
+ }
+ }
+
+ if (start < 0) {
+ start = 0;
+ }
+ if (start >= clength) {
+ unsatisfiable = 1;
+ continue;
+ }
+ if (end >= clength) {
+ end = clength - 1;
+ }
+
+ if (!in_merge) {
+ /* new set */
+ ostart = start;
+ oend = end;
+ in_merge = 1;
+ continue;
+ }
+ in_merge = 0;
+
+ if (start >= ostart && end <= oend) {
+ in_merge = 1;
+ }
+
+ if (start < ostart && end >= ostart-1) {
+ ostart = start;
+ ++*reversals;
+ in_merge = 1;
+ }
+ if (end >= oend && start <= oend+1 ) {
+ oend = end;
+ in_merge = 1;
+ }
+
+ if (in_merge) {
+ ++*overlaps;
+ continue;
+ } else {
+ idx = (indexes_t *)apr_array_push(*indexes);
+ idx->start = ostart;
+ idx->end = oend;
+ sum_lengths += oend - ostart + 1;
+ /* new set again */
+ in_merge = 1;
+ ostart = start;
+ oend = end;
+ num_ranges++;
+ }
+ }
+
+ if (in_merge) {
+ idx = (indexes_t *)apr_array_push(*indexes);
+ idx->start = ostart;
+ idx->end = oend;
+ sum_lengths += oend - ostart + 1;
+ num_ranges++;
+ }
+ else if (num_ranges == 0 && unsatisfiable) {
+ /* If all ranges are unsatisfiable, we should return 416 */
+ return -1;
+ }
+ if (sum_lengths > clength) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Sum of ranges larger than file, ignoring.");
+ return 0;
+ }
+
+ /*
+ * create the merged table now, now that we know we need it
+ */
+ merged = apr_array_make(r->pool, num_ranges, sizeof(char *));
+ idx = (indexes_t *)(*indexes)->elts;
+ for (i = 0; i < (*indexes)->nelts; i++, idx++) {
+ char **new = (char **)apr_array_push(merged);
+ *new = apr_psprintf(r->pool, "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT,
+ idx->start, idx->end);
+ }
+
+ r->status = HTTP_PARTIAL_CONTENT;
+ r->range = apr_array_pstrcat(r->pool, merged, ',');
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01583)
+ "Range: %s | %s (%d : %d : %"APR_OFF_T_FMT")",
+ it, r->range, *overlaps, *reversals, clength);
+
+ return num_ranges;
+}
+
+/*
+ * Here we try to be compatible with clients that want multipart/x-byteranges
+ * instead of multipart/byteranges (also see above), as per HTTP/1.1. We
+ * look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
+ * that the browser supports an older protocol. We also check User-Agent
+ * for Microsoft Internet Explorer 3, which needs this as well.
+ */
+static int use_range_x(request_rec *r)
+{
+ const char *ua;
+ return (apr_table_get(r->headers_in, "Request-Range")
+ || ((ua = apr_table_get(r->headers_in, "User-Agent"))
+ && ap_strstr_c(ua, "MSIE 3")));
+}
+
+#define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
+
+static apr_status_t copy_brigade_range(apr_bucket_brigade *bb,
+ apr_bucket_brigade *bbout,
+ apr_off_t start,
+ apr_off_t end)
+{
+ apr_bucket *first = NULL, *last = NULL, *out_first = NULL, *e;
+ apr_uint64_t pos = 0, off_first = 0, off_last = 0;
+ apr_status_t rv;
+ apr_uint64_t start64, end64;
+ apr_off_t pofft = 0;
+
+ /*
+ * Once we know that start and end are >= 0 convert everything to apr_uint64_t.
+ * See the comments in apr_brigade_partition why.
+ * In short apr_off_t (for values >= 0)and apr_size_t fit into apr_uint64_t.
+ */
+ start64 = (apr_uint64_t)start;
+ end64 = (apr_uint64_t)end;
+
+ if (start < 0 || end < 0 || start64 > end64)
+ return APR_EINVAL;
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ apr_uint64_t elen64;
+ /* we know that no bucket has undefined length (-1) */
+ AP_DEBUG_ASSERT(e->length != (apr_size_t)(-1));
+ elen64 = (apr_uint64_t)e->length;
+ if (!first && (elen64 + pos > start64)) {
+ first = e;
+ off_first = pos;
+ }
+ if (elen64 + pos > end64) {
+ last = e;
+ off_last = pos;
+ break;
+ }
+ pos += elen64;
+ }
+ if (!first || !last)
+ return APR_EINVAL;
+
+ e = first;
+ while (1)
+ {
+ apr_bucket *copy;
+ AP_DEBUG_ASSERT(e != APR_BRIGADE_SENTINEL(bb));
+ rv = apr_bucket_copy(e, &copy);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+
+ APR_BRIGADE_INSERT_TAIL(bbout, copy);
+ if (e == first) {
+ if (off_first != start64) {
+ rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first));
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+ out_first = APR_BUCKET_NEXT(copy);
+ apr_bucket_delete(copy);
+ }
+ else {
+ out_first = copy;
+ }
+ }
+ if (e == last) {
+ if (e == first) {
+ off_last += start64 - off_first;
+ copy = out_first;
+ }
+ if (end64 - off_last != (apr_uint64_t)e->length) {
+ rv = apr_bucket_split(copy, (apr_size_t)(end64 + 1 - off_last));
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+ copy = APR_BUCKET_NEXT(copy);
+ if (copy != APR_BRIGADE_SENTINEL(bbout)) {
+ apr_bucket_delete(copy);
+ }
+ }
+ break;
+ }
+ e = APR_BUCKET_NEXT(e);
+ }
+
+ AP_DEBUG_ASSERT(APR_SUCCESS == apr_brigade_length(bbout, 1, &pofft));
+ pos = (apr_uint64_t)pofft;
+ AP_DEBUG_ASSERT(pos == end64 - start64 + 1);
+ return APR_SUCCESS;
+}
+
+static apr_status_t send_416(ap_filter_t *f, apr_bucket_brigade *tmpbb)
+{
+ apr_bucket *e;
+ conn_rec *c = f->r->connection;
+ ap_remove_output_filter(f);
+ f->r->status = HTTP_OK;
+ e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL,
+ f->r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(tmpbb, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(tmpbb, e);
+ return ap_pass_brigade(f->next, tmpbb);
+}
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket *e;
+ apr_bucket_brigade *bsend;
+ apr_bucket_brigade *tmpbb;
+ apr_off_t range_start;
+ apr_off_t range_end;
+ apr_off_t clength = 0;
+ apr_status_t rv;
+ int found = 0;
+ int num_ranges;
+ char *bound_head = NULL;
+ apr_array_header_t *indexes;
+ indexes_t *idx;
+ int i;
+ int original_status;
+ int max_ranges, max_overlaps, max_reversals;
+ int overlaps = 0, reversals = 0;
+ core_dir_config *core_conf = ap_get_core_module_config(r->per_dir_config);
+
+ max_ranges = ( (core_conf->max_ranges >= 0 || core_conf->max_ranges == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_ranges
+ : AP_DEFAULT_MAX_RANGES );
+ max_overlaps = ( (core_conf->max_overlaps >= 0 || core_conf->max_overlaps == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_overlaps
+ : AP_DEFAULT_MAX_OVERLAPS );
+ max_reversals = ( (core_conf->max_reversals >= 0 || core_conf->max_reversals == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_reversals
+ : AP_DEFAULT_MAX_REVERSALS );
+ /*
+ * Iterate through the brigade until reaching EOS or a bucket with
+ * unknown length.
+ */
+ for (e = APR_BRIGADE_FIRST(bb);
+ (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)
+ && e->length != (apr_size_t)-1);
+ e = APR_BUCKET_NEXT(e)) {
+ clength += e->length;
+ }
+
+ /*
+ * Don't attempt to do byte range work if this brigade doesn't
+ * contain an EOS, or if any of the buckets has an unknown length;
+ * this avoids the cases where it is expensive to perform
+ * byteranging (i.e. may require arbitrary amounts of memory).
+ */
+ if (!APR_BUCKET_IS_EOS(e) || clength <= 0) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ original_status = r->status;
+ num_ranges = ap_set_byterange(r, clength, &indexes, &overlaps, &reversals);
+
+ /* No Ranges or we hit a limit? We have nothing to do, get out of the way. */
+ if (num_ranges == 0 ||
+ (max_ranges >= 0 && num_ranges > max_ranges) ||
+ (max_overlaps >= 0 && overlaps > max_overlaps) ||
+ (max_reversals >= 0 && reversals > max_reversals)) {
+ r->status = original_status;
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* this brigade holds what we will be sending */
+ bsend = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ if (num_ranges < 0)
+ return send_416(f, bsend);
+
+ if (num_ranges > 1) {
+ /* Is ap_make_content_type required here? */
+ const char *orig_ct = ap_make_content_type(r, r->content_type);
+
+ ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
+ use_range_x(r) ? "/x-" : "/",
+ "byteranges; boundary=",
+ ap_multipart_boundary, NULL));
+
+ if (orig_ct) {
+ bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ap_multipart_boundary,
+ CRLF "Content-type: ",
+ orig_ct,
+ CRLF "Content-range: bytes ",
+ NULL);
+ }
+ else {
+ /* if we have no type for the content, do our best */
+ bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ap_multipart_boundary,
+ CRLF "Content-range: bytes ",
+ NULL);
+ }
+ ap_xlate_proto_to_ascii(bound_head, strlen(bound_head));
+ }
+
+ tmpbb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ idx = (indexes_t *)indexes->elts;
+ for (i = 0; i < indexes->nelts; i++, idx++) {
+ range_start = idx->start;
+ range_end = idx->end;
+
+ rv = copy_brigade_range(bb, tmpbb, range_start, range_end);
+ if (rv != APR_SUCCESS ) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01584)
+ "copy_brigade_range() failed [%" APR_OFF_T_FMT
+ "-%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]",
+ range_start, range_end, clength);
+ continue;
+ }
+ found = 1;
+
+ /*
+ * For single range requests, we must produce Content-Range header.
+ * Otherwise, we need to produce the multipart boundaries.
+ */
+ if (num_ranges == 1) {
+ apr_table_setn(r->headers_out, "Content-Range",
+ apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
+ range_start, range_end, clength));
+ }
+ else {
+ char *ts;
+
+ e = apr_bucket_pool_create(bound_head, strlen(bound_head),
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
+ range_start, range_end, clength);
+ ap_xlate_proto_to_ascii(ts, strlen(ts));
+ e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ APR_BRIGADE_CONCAT(bsend, tmpbb);
+ if (i && !(i & 0x1F)) {
+ /*
+ * Every now and then, pass what we have down the filter chain.
+ * In this case, the content-length filter cannot calculate and
+ * set the content length and we must remove any Content-Length
+ * header already present.
+ */
+ apr_table_unset(r->headers_out, "Content-Length");
+ if ((rv = ap_pass_brigade(f->next, bsend)) != APR_SUCCESS)
+ return rv;
+ apr_brigade_cleanup(bsend);
+ }
+ }
+
+ if (found == 0) {
+ /* bsend is assumed to be empty if we get here. */
+ return send_416(f, bsend);
+ }
+
+ if (num_ranges > 1) {
+ char *end;
+
+ /* add the final boundary */
+ end = apr_pstrcat(r->pool, CRLF "--", ap_multipart_boundary, "--" CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(end, strlen(end));
+ e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ /* we're done with the original content - all of our data is in bsend. */
+ apr_brigade_cleanup(bb);
+ apr_brigade_destroy(tmpbb);
+
+ /* send our multipart output */
+ return ap_pass_brigade(f->next, bsend);
+}
diff --git a/modules/http/chunk_filter.c b/modules/http/chunk_filter.c
new file mode 100644
index 0000000..cb1501a
--- /dev/null
+++ b/modules/http/chunk_filter.c
@@ -0,0 +1,197 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * chunk_filter.c --- HTTP/1.1 chunked transfer encoding filter.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/*
+ * A pointer to this is used to memorize in the filter context that a bad
+ * gateway error bucket had been seen. It is used as an invented unique pointer.
+ */
+static char bad_gateway_seen;
+
+apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+#define ASCII_CRLF "\015\012"
+#define ASCII_ZERO "\060"
+ conn_rec *c = f->r->connection;
+ apr_bucket_brigade *more, *tmp;
+ apr_bucket *e;
+ apr_status_t rv;
+
+ for (more = tmp = NULL; b; b = more, more = NULL) {
+ apr_off_t bytes = 0;
+ apr_bucket *eos = NULL;
+ apr_bucket *flush = NULL;
+ /* XXX: chunk_hdr must remain at this scope since it is used in a
+ * transient bucket.
+ */
+ char chunk_hdr[20]; /* enough space for the snprintf below */
+
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ /* there shouldn't be anything after the eos */
+ ap_remove_output_filter(f);
+ eos = e;
+ break;
+ }
+ if (AP_BUCKET_IS_ERROR(e)
+ && (((ap_bucket_error *)(e->data))->status
+ == HTTP_BAD_GATEWAY)) {
+ /*
+ * We had a broken backend. Memorize this in the filter
+ * context.
+ */
+ f->ctx = &bad_gateway_seen;
+ continue;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ flush = e;
+ if (e != APR_BRIGADE_LAST(b)) {
+ more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+ }
+ break;
+ }
+ else if (e->length == (apr_size_t)-1) {
+ /* unknown amount of data (e.g. a pipe) */
+ const char *data;
+ apr_size_t len;
+
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len > 0) {
+ /*
+ * There may be a new next bucket representing the
+ * rest of the data stream on which a read() may
+ * block so we pass down what we have so far.
+ */
+ bytes += len;
+ more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+ break;
+ }
+ else {
+ /* If there was nothing in this bucket then we can
+ * safely move on to the next one without pausing
+ * to pass down what we have counted up so far.
+ */
+ continue;
+ }
+ }
+ else {
+ bytes += e->length;
+ }
+ }
+
+ /*
+ * XXX: if there aren't very many bytes at this point it may
+ * be a good idea to set them aside and return for more,
+ * unless we haven't finished counting this brigade yet.
+ */
+ /* if there are content bytes, then wrap them in a chunk */
+ if (bytes > 0) {
+ apr_size_t hdr_len;
+ /*
+ * Insert the chunk header, specifying the number of bytes in
+ * the chunk.
+ */
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes);
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+
+ /*
+ * Insert the end-of-chunk CRLF before an EOS or
+ * FLUSH bucket, or appended to the brigade
+ */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
+ if (eos != NULL) {
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+ else if (flush != NULL) {
+ APR_BUCKET_INSERT_BEFORE(flush, e);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+ }
+
+ /* RFC 2616, Section 3.6.1
+ *
+ * If there is an EOS bucket, then prefix it with:
+ * 1) the last-chunk marker ("0" CRLF)
+ * 2) the trailer
+ * 3) the end-of-chunked body CRLF
+ *
+ * We only do this if we have not seen an error bucket with
+ * status HTTP_BAD_GATEWAY. We have memorized an
+ * error bucket that we had seen in the filter context.
+ * The error bucket with status HTTP_BAD_GATEWAY indicates that the
+ * connection to the backend (mod_proxy) broke in the middle of the
+ * response. In order to signal the client that something went wrong
+ * we do not create the last-chunk marker and set c->keepalive to
+ * AP_CONN_CLOSE in the core output filter.
+ *
+ * XXX: it would be nice to combine this with the end-of-chunk
+ * marker above, but this is a bit more straight-forward for
+ * now.
+ */
+ if (eos && !f->ctx) {
+ /* XXX: (2) trailers ... does not yet exist */
+ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
+ /* <trailers> */
+ ASCII_CRLF, 5, c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+
+ /* pass the brigade to the next filter. */
+ rv = ap_pass_brigade(f->next, b);
+ apr_brigade_cleanup(b);
+ if (rv != APR_SUCCESS || eos != NULL) {
+ return rv;
+ }
+ tmp = b;
+ }
+ return APR_SUCCESS;
+}
diff --git a/modules/http/config.m4 b/modules/http/config.m4
new file mode 100644
index 0000000..6496007
--- /dev/null
+++ b/modules/http/config.m4
@@ -0,0 +1,20 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(http)
+
+http_objects="http_core.lo http_protocol.lo http_request.lo http_filters.lo chunk_filter.lo byterange_filter.lo http_etag.lo"
+
+dnl mod_http should only be built as a static module for now.
+dnl this will hopefully be "fixed" at some point in the future by
+dnl refactoring mod_http and moving some things to the core and
+dnl vice versa so that the core does not depend upon mod_http.
+if test "$enable_http" = "yes"; then
+ enable_http="static"
+elif test "$enable_http" = "shared"; then
+ AC_MSG_ERROR([mod_http can not be built as a shared DSO])
+fi
+
+APACHE_MODULE(http,[HTTP protocol handling. The http module is a basic one that enables the server to function as an HTTP server. It is only useful to disable it if you want to use another protocol module instead. Don't disable this module unless you are really sure what you are doing. Note: This module will always be linked statically.], $http_objects, , static)
+APACHE_MODULE(mime, mapping of file-extension to MIME. Disabling this module is normally not recommended., , , yes)
+
+APACHE_MODPATH_FINISH
diff --git a/modules/http/http_core.c b/modules/http/http_core.c
new file mode 100644
index 0000000..35869b4
--- /dev/null
+++ b/modules/http/http_core.c
@@ -0,0 +1,328 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/* Handles for core filters */
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_outerror_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
+
+AP_DECLARE_DATA const char *ap_multipart_boundary;
+
+/* If we are using an MPM That Supports Async Connections,
+ * use a different processing function
+ */
+static int async_mpm = 0;
+
+static const char *set_keep_alive_timeout(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ apr_interval_time_t timeout;
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ /* Stolen from mod_proxy.c */
+ if (ap_timeout_parameter_parse(arg, &timeout, "s") != APR_SUCCESS)
+ return "KeepAliveTimeout has wrong format";
+ cmd->server->keep_alive_timeout = timeout;
+
+ /* We don't want to take into account whether or not KeepAliveTimeout is
+ * set for the main server, because if no http_module directive is used
+ * for a vhost, it will inherit the http_srv_cfg from the main server.
+ * However keep_alive_timeout_set helps determine whether the vhost should
+ * use its own configured timeout or the one from the vhost declared first
+ * on the same IP:port (ie. c->base_server, and the legacy behaviour).
+ */
+ if (cmd->server->is_virtual) {
+ cmd->server->keep_alive_timeout_set = 1;
+ }
+ return NULL;
+}
+
+static const char *set_keep_alive(cmd_parms *cmd, void *dummy,
+ int arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive = arg;
+ return NULL;
+}
+
+static const char *set_keep_alive_max(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive_max = atoi(arg);
+ return NULL;
+}
+
+static const command_rec http_cmds[] = {
+ AP_INIT_TAKE1("KeepAliveTimeout", set_keep_alive_timeout, NULL, RSRC_CONF,
+ "Keep-Alive timeout duration (sec)"),
+ AP_INIT_TAKE1("MaxKeepAliveRequests", set_keep_alive_max, NULL, RSRC_CONF,
+ "Maximum number of Keep-Alive requests per connection, "
+ "or 0 for infinite"),
+ AP_INIT_FLAG("KeepAlive", set_keep_alive, NULL, RSRC_CONF,
+ "Whether persistent connections should be On or Off"),
+ { NULL }
+};
+
+static const char *http_scheme(const request_rec *r)
+{
+ /*
+ * The http module shouldn't return anything other than
+ * "http" (the default) or "https".
+ */
+ if (r->server->server_scheme &&
+ (strcmp(r->server->server_scheme, "https") == 0))
+ return "https";
+
+ return "http";
+}
+
+static apr_port_t http_port(const request_rec *r)
+{
+ if (r->server->server_scheme &&
+ (strcmp(r->server->server_scheme, "https") == 0))
+ return DEFAULT_HTTPS_PORT;
+
+ return DEFAULT_HTTP_PORT;
+}
+
+static int ap_process_http_async_connection(conn_rec *c)
+{
+ request_rec *r = NULL;
+ conn_state_t *cs = c->cs;
+
+ AP_DEBUG_ASSERT(cs != NULL);
+ AP_DEBUG_ASSERT(cs->state == CONN_STATE_READ_REQUEST_LINE);
+
+ while (cs->state == CONN_STATE_READ_REQUEST_LINE) {
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+
+ if ((r = ap_read_request(c))) {
+
+ c->keepalive = AP_CONN_UNKNOWN;
+ /* process the request if it was read without error */
+
+ if (r->status == HTTP_OK) {
+ cs->state = CONN_STATE_HANDLER;
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ ap_process_async_request(r);
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ r = NULL;
+ }
+
+ if (cs->state != CONN_STATE_WRITE_COMPLETION &&
+ cs->state != CONN_STATE_SUSPENDED) {
+ /* Something went wrong; close the connection */
+ cs->state = CONN_STATE_LINGER;
+ }
+ }
+ else { /* ap_read_request failed - client may have closed */
+ cs->state = CONN_STATE_LINGER;
+ }
+ }
+
+ return OK;
+}
+
+static int ap_process_http_sync_connection(conn_rec *c)
+{
+ request_rec *r;
+ conn_state_t *cs = c->cs;
+ apr_socket_t *csd = NULL;
+ int mpm_state = 0;
+
+ /*
+ * Read and process each request found on our connection
+ * until no requests are left or we decide to close.
+ */
+
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ while ((r = ap_read_request(c)) != NULL) {
+ apr_interval_time_t keep_alive_timeout = r->server->keep_alive_timeout;
+
+ /* To preserve legacy behaviour, use the keepalive timeout from the
+ * base server (first on this IP:port) when none is explicitly
+ * configured on this server.
+ */
+ if (!r->server->keep_alive_timeout_set) {
+ keep_alive_timeout = c->base_server->keep_alive_timeout;
+ }
+
+ c->keepalive = AP_CONN_UNKNOWN;
+ /* process the request if it was read without error */
+
+ if (r->status == HTTP_OK) {
+ if (cs)
+ cs->state = CONN_STATE_HANDLER;
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ ap_process_request(r);
+ /* After the call to ap_process_request, the
+ * request pool will have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ r = NULL;
+ }
+
+ if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted)
+ break;
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, NULL);
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ break;
+ }
+
+ if (!csd) {
+ csd = ap_get_conn_socket(c);
+ }
+ apr_socket_opt_set(csd, APR_INCOMPLETE_READ, 1);
+ apr_socket_timeout_set(csd, keep_alive_timeout);
+ /* Go straight to select() to wait for the next request */
+ }
+
+ return OK;
+}
+
+static int ap_process_http_connection(conn_rec *c)
+{
+ if (async_mpm && !c->clogging_input_filters) {
+ return ap_process_http_async_connection(c);
+ }
+ else {
+ return ap_process_http_sync_connection(c);
+ }
+}
+
+static int http_create_request(request_rec *r)
+{
+ if (!r->main && !r->prev) {
+ ap_add_output_filter_handle(ap_byterange_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_content_length_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_header_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_outerror_filter_handle,
+ NULL, r, r->connection);
+ }
+
+ return OK;
+}
+
+static int http_send_options(request_rec *r)
+{
+ if ((r->method_number == M_OPTIONS) && r->uri && (r->uri[0] == '*') &&
+ (r->uri[1] == '\0')) {
+ return DONE; /* Send HTTP pong, without Allow header */
+ }
+ return DECLINED;
+}
+
+static int http_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ apr_uint64_t val;
+ if (ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm) != APR_SUCCESS) {
+ async_mpm = 0;
+ }
+ ap_random_insecure_bytes(&val, sizeof(val));
+ ap_multipart_boundary = apr_psprintf(p, "%0" APR_UINT64_T_HEX_FMT, val);
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(http_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_process_connection(ap_process_http_connection, NULL, NULL,
+ APR_HOOK_REALLY_LAST);
+ ap_hook_map_to_storage(ap_send_http_trace,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_map_to_storage(http_send_options,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_http_scheme(http_scheme,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_default_port(http_port,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_create_request(http_create_request, NULL, NULL, APR_HOOK_REALLY_LAST);
+ ap_http_input_filter_handle =
+ ap_register_input_filter("HTTP_IN", ap_http_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_http_header_filter_handle =
+ ap_register_output_filter("HTTP_HEADER", ap_http_header_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_chunk_filter_handle =
+ ap_register_output_filter("CHUNK", ap_http_chunk_filter,
+ NULL, AP_FTYPE_TRANSCODE);
+ ap_http_outerror_filter_handle =
+ ap_register_output_filter("HTTP_OUTERROR", ap_http_outerror_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_byterange_filter_handle =
+ ap_register_output_filter("BYTERANGE", ap_byterange_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_method_registry_init(p);
+}
+
+AP_DECLARE_MODULE(http) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ http_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/modules/http/http_etag.c b/modules/http/http_etag.c
new file mode 100644
index 0000000..7f3c6d9
--- /dev/null
+++ b/modules/http/http_etag.c
@@ -0,0 +1,220 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+/* Generate the human-readable hex representation of an apr_uint64_t
+ * (basically a faster version of 'sprintf("%llx")')
+ */
+#define HEX_DIGITS "0123456789abcdef"
+static char *etag_uint64_to_hex(char *next, apr_uint64_t u)
+{
+ int printing = 0;
+ int shift = sizeof(apr_uint64_t) * 8 - 4;
+ do {
+ unsigned short next_digit = (unsigned short)
+ ((u >> shift) & (apr_uint64_t)0xf);
+ if (next_digit) {
+ *next++ = HEX_DIGITS[next_digit];
+ printing = 1;
+ }
+ else if (printing) {
+ *next++ = HEX_DIGITS[next_digit];
+ }
+ shift -= 4;
+ } while (shift);
+ *next++ = HEX_DIGITS[u & (apr_uint64_t)0xf];
+ return next;
+}
+
+#define ETAG_WEAK "W/"
+#define CHARS_PER_UINT64 (sizeof(apr_uint64_t) * 2)
+/*
+ * Construct an entity tag (ETag) from resource information. If it's a real
+ * file, build in some of the file characteristics. If the modification time
+ * is newer than (request-time minus 1 second), mark the ETag as weak - it
+ * could be modified again in as short an interval. We rationalize the
+ * modification time we're given to keep it from being in the future.
+ */
+AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak)
+{
+ char *weak;
+ apr_size_t weak_len;
+ char *etag;
+ char *next;
+ core_dir_config *cfg;
+ etag_components_t etag_bits;
+ etag_components_t bits_added;
+
+ cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config);
+ etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add;
+
+ /*
+ * If it's a file (or we wouldn't be here) and no ETags
+ * should be set for files, return an empty string and
+ * note it for the header-sender to ignore.
+ */
+ if (etag_bits & ETAG_NONE) {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ return "";
+ }
+
+ if (etag_bits == ETAG_UNSET) {
+ etag_bits = ETAG_BACKWARD;
+ }
+ /*
+ * Make an ETag header out of various pieces of information. We use
+ * the last-modified date and, if we have a real file, the
+ * length and inode number - note that this doesn't have to match
+ * the content-length (i.e. includes), it just has to be unique
+ * for the file.
+ *
+ * If the request was made within a second of the last-modified date,
+ * we send a weak tag instead of a strong one, since it could
+ * be modified again later in the second, and the validation
+ * would be incorrect.
+ */
+ if ((r->request_time - r->mtime > (1 * APR_USEC_PER_SEC)) &&
+ !force_weak) {
+ weak = NULL;
+ weak_len = 0;
+ }
+ else {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (r->finfo.filetype != APR_NOFILE) {
+ /*
+ * ETag gets set to [W/]"inode-size-mtime", modulo any
+ * FileETag keywords.
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") +
+ 3 * CHARS_PER_UINT64 + 1);
+ next = etag;
+ if (weak) {
+ while (*weak) {
+ *next++ = *weak++;
+ }
+ }
+ *next++ = '"';
+ bits_added = 0;
+ if (etag_bits & ETAG_INODE) {
+ next = etag_uint64_to_hex(next, r->finfo.inode);
+ bits_added |= ETAG_INODE;
+ }
+ if (etag_bits & ETAG_SIZE) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_uint64_to_hex(next, r->finfo.size);
+ bits_added |= ETAG_SIZE;
+ }
+ if (etag_bits & ETAG_MTIME) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_uint64_to_hex(next, r->mtime);
+ }
+ *next++ = '"';
+ *next = '\0';
+ }
+ else {
+ /*
+ * Not a file document, so just use the mtime: [W/]"mtime"
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ CHARS_PER_UINT64 + 1);
+ next = etag;
+ if (weak) {
+ while (*weak) {
+ *next++ = *weak++;
+ }
+ }
+ *next++ = '"';
+ next = etag_uint64_to_hex(next, r->mtime);
+ *next++ = '"';
+ *next = '\0';
+ }
+
+ return etag;
+}
+
+AP_DECLARE(void) ap_set_etag(request_rec *r)
+{
+ char *etag;
+ char *variant_etag, *vlv;
+ int vlv_weak;
+
+ if (!r->vlist_validator) {
+ etag = ap_make_etag(r, 0);
+
+ /* If we get a blank etag back, don't set the header. */
+ if (!etag[0]) {
+ return;
+ }
+ }
+ else {
+ /* If we have a variant list validator (vlv) due to the
+ * response being negotiated, then we create a structured
+ * entity tag which merges the variant etag with the variant
+ * list validator (vlv). This merging makes revalidation
+ * somewhat safer, ensures that caches which can deal with
+ * Vary will (eventually) be updated if the set of variants is
+ * changed, and is also a protocol requirement for transparent
+ * content negotiation.
+ */
+
+ /* if the variant list validator is weak, we make the whole
+ * structured etag weak. If we would not, then clients could
+ * have problems merging range responses if we have different
+ * variants with the same non-globally-unique strong etag.
+ */
+
+ vlv = r->vlist_validator;
+ vlv_weak = (vlv[0] == 'W');
+
+ variant_etag = ap_make_etag(r, vlv_weak);
+
+ /* If we get a blank etag back, don't append vlv and stop now. */
+ if (!variant_etag[0]) {
+ return;
+ }
+
+ /* merge variant_etag and vlv into a structured etag */
+ variant_etag[strlen(variant_etag) - 1] = '\0';
+ if (vlv_weak) {
+ vlv += 3;
+ }
+ else {
+ vlv++;
+ }
+ etag = apr_pstrcat(r->pool, variant_etag, ";", vlv, NULL);
+ }
+
+ apr_table_setn(r->headers_out, "ETag", etag);
+}
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
new file mode 100644
index 0000000..9828cdf
--- /dev/null
+++ b/modules/http/http_filters.c
@@ -0,0 +1,1921 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_filter.c --- HTTP routines which either filters or deal with filters.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_connection.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+APLOG_USE_MODULE(http);
+
+typedef struct http_filter_ctx
+{
+ apr_off_t remaining;
+ apr_off_t limit;
+ apr_off_t limit_used;
+ apr_int32_t chunk_used;
+ apr_int32_t chunk_bws;
+ apr_int32_t chunkbits;
+ enum
+ {
+ BODY_NONE, /* streamed data */
+ BODY_LENGTH, /* data constrained by content length */
+ BODY_CHUNK, /* chunk expected */
+ BODY_CHUNK_PART, /* chunk digits */
+ BODY_CHUNK_EXT, /* chunk extension */
+ BODY_CHUNK_CR, /* got space(s) after digits, expect [CR]LF or ext */
+ BODY_CHUNK_LF, /* got CR after digits or ext, expect LF */
+ BODY_CHUNK_DATA, /* data constrained by chunked encoding */
+ BODY_CHUNK_END, /* chunked data terminating CRLF */
+ BODY_CHUNK_END_LF, /* got CR after data, expect LF */
+ BODY_CHUNK_TRAILER /* trailers */
+ } state;
+ unsigned int eos_sent :1;
+ apr_bucket_brigade *bb;
+} http_ctx_t;
+
+/* bail out if some error in the HTTP input filter happens */
+static apr_status_t bail_out_on_error(http_ctx_t *ctx,
+ ap_filter_t *f,
+ int http_error)
+{
+ apr_bucket *e;
+ apr_bucket_brigade *bb = ctx->bb;
+
+ apr_brigade_cleanup(bb);
+
+ if (f->r->proxyreq == PROXYREQ_RESPONSE) {
+ switch (http_error) {
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return APR_ENOSPC;
+
+ case HTTP_REQUEST_TIME_OUT:
+ return APR_INCOMPLETE;
+
+ case HTTP_NOT_IMPLEMENTED:
+ return APR_ENOTIMPL;
+
+ default:
+ return APR_EGENERAL;
+ }
+ }
+
+ e = ap_bucket_error_create(http_error,
+ NULL, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ /* If chunked encoding / content-length are corrupt, we may treat parts
+ * of this request's body as the next one's headers.
+ * To be safe, disable keep-alive.
+ */
+ f->r->connection->keepalive = AP_CONN_CLOSE;
+ return ap_pass_brigade(f->r->output_filters, bb);
+}
+
+/**
+ * Parse a chunk line with optional extension, detect overflow.
+ * There are several error cases:
+ * 1) If the chunk link is misformatted, APR_EINVAL is returned.
+ * 2) If the conversion would require too many bits, APR_EGENERAL is returned.
+ * 3) If the conversion used the correct number of bits, but an overflow
+ * caused only the sign bit to flip, then APR_ENOSPC is returned.
+ * A negative chunk length always indicates an overflow error.
+ */
+static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer,
+ apr_size_t len, int linelimit, int strict)
+{
+ apr_size_t i = 0;
+
+ while (i < len) {
+ char c = buffer[i];
+
+ ap_xlate_proto_from_ascii(&c, 1);
+
+ /* handle CRLF after the chunk */
+ if (ctx->state == BODY_CHUNK_END
+ || ctx->state == BODY_CHUNK_END_LF) {
+ if (c == LF) {
+ if (strict && (ctx->state != BODY_CHUNK_END_LF)) {
+ /*
+ * CR missing before LF.
+ */
+ return APR_EINVAL;
+ }
+ ctx->state = BODY_CHUNK;
+ }
+ else if (c == CR && ctx->state == BODY_CHUNK_END) {
+ ctx->state = BODY_CHUNK_END_LF;
+ }
+ else {
+ /*
+ * CRLF expected.
+ */
+ return APR_EINVAL;
+ }
+ i++;
+ continue;
+ }
+
+ /* handle start of the chunk */
+ if (ctx->state == BODY_CHUNK) {
+ if (!apr_isxdigit(c)) {
+ /*
+ * Detect invalid character at beginning. This also works for
+ * empty chunk size lines.
+ */
+ return APR_EINVAL;
+ }
+ else {
+ ctx->state = BODY_CHUNK_PART;
+ }
+ ctx->remaining = 0;
+ ctx->chunkbits = sizeof(apr_off_t) * 8;
+ ctx->chunk_used = 0;
+ ctx->chunk_bws = 0;
+ }
+
+ if (c == LF) {
+ if (strict && (ctx->state != BODY_CHUNK_LF)) {
+ /*
+ * CR missing before LF.
+ */
+ return APR_EINVAL;
+ }
+ if (ctx->remaining) {
+ ctx->state = BODY_CHUNK_DATA;
+ }
+ else {
+ ctx->state = BODY_CHUNK_TRAILER;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_LF) {
+ /*
+ * LF expected.
+ */
+ return APR_EINVAL;
+ }
+ else if (c == CR) {
+ ctx->state = BODY_CHUNK_LF;
+ }
+ else if (c == ';') {
+ ctx->state = BODY_CHUNK_EXT;
+ }
+ else if (ctx->state == BODY_CHUNK_EXT) {
+ /*
+ * Control chars (excluding tabs) are invalid.
+ * TODO: more precisely limit input
+ */
+ if (c != '\t' && apr_iscntrl(c)) {
+ return APR_EINVAL;
+ }
+ }
+ else if (c == ' ' || c == '\t') {
+ /* Be lenient up to 10 implied *LWS, a legacy of RFC 2616,
+ * and noted as errata to RFC7230;
+ * https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4667
+ */
+ ctx->state = BODY_CHUNK_CR;
+ if (++ctx->chunk_bws > 10) {
+ return APR_EINVAL;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_CR) {
+ /*
+ * ';', CR or LF expected.
+ */
+ return APR_EINVAL;
+ }
+ else if (ctx->state == BODY_CHUNK_PART) {
+ int xvalue;
+
+ /* ignore leading zeros */
+ if (!ctx->remaining && c == '0') {
+ i++;
+ continue;
+ }
+
+ ctx->chunkbits -= 4;
+ if (ctx->chunkbits < 0) {
+ /* overflow */
+ return APR_ENOSPC;
+ }
+
+ if (c >= '0' && c <= '9') {
+ xvalue = c - '0';
+ }
+ else if (c >= 'A' && c <= 'F') {
+ xvalue = c - 'A' + 0xa;
+ }
+ else if (c >= 'a' && c <= 'f') {
+ xvalue = c - 'a' + 0xa;
+ }
+ else {
+ /* bogus character */
+ return APR_EINVAL;
+ }
+
+ ctx->remaining = (ctx->remaining << 4) | xvalue;
+ if (ctx->remaining < 0) {
+ /* overflow */
+ return APR_ENOSPC;
+ }
+ }
+ else {
+ /* Should not happen */
+ return APR_EGENERAL;
+ }
+
+ i++;
+ }
+
+ /* sanity check */
+ ctx->chunk_used += len;
+ if (ctx->chunk_used < 0 || ctx->chunk_used > linelimit) {
+ return APR_ENOSPC;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
+ apr_bucket_brigade *b, int merge)
+{
+ int rv;
+ apr_bucket *e;
+ request_rec *r = f->r;
+ apr_table_t *saved_headers_in = r->headers_in;
+ int saved_status = r->status;
+
+ r->status = HTTP_OK;
+ r->headers_in = r->trailers_in;
+ apr_table_clear(r->headers_in);
+ ap_get_mime_headers(r);
+
+ if(r->status == HTTP_OK) {
+ r->status = saved_status;
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ rv = APR_SUCCESS;
+ }
+ else {
+ const char *error_notes = apr_table_get(r->notes,
+ "error-notes");
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02656)
+ "Error while reading HTTP trailer: %i%s%s",
+ r->status, error_notes ? ": " : "",
+ error_notes ? error_notes : "");
+ rv = APR_EINVAL;
+ }
+
+ if(!merge) {
+ r->headers_in = saved_headers_in;
+ }
+ else {
+ r->headers_in = apr_table_overlay(r->pool, saved_headers_in,
+ r->trailers_in);
+ }
+
+ return rv;
+}
+
+/* This is the HTTP_INPUT filter for HTTP requests and responses from
+ * proxied servers (mod_proxy). It handles chunked and content-length
+ * bodies. This can only be inserted/used after the headers
+ * are successfully parsed.
+ */
+apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(f->r->server->module_config,
+ &core_module);
+ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
+ apr_bucket *e;
+ http_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+ int http_error = HTTP_REQUEST_ENTITY_TOO_LARGE;
+ apr_bucket_brigade *bb;
+ int again;
+
+ /* just get out of the way of things we don't want. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return ap_get_brigade(f->next, b, mode, block, readbytes);
+ }
+
+ if (!ctx) {
+ const char *tenc, *lenp;
+ f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+ ctx->state = BODY_NONE;
+ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ bb = ctx->bb;
+
+ /* LimitRequestBody does not apply to proxied responses.
+ * Consider implementing this check in its own filter.
+ * Would adding a directive to limit the size of proxied
+ * responses be useful?
+ */
+ if (!f->r->proxyreq) {
+ ctx->limit = ap_get_limit_req_body(f->r);
+ }
+ else {
+ ctx->limit = 0;
+ }
+
+ tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
+ lenp = apr_table_get(f->r->headers_in, "Content-Length");
+
+ if (tenc) {
+ if (strcasecmp(tenc, "chunked") == 0 /* fast path */
+ || ap_find_last_token(f->r->pool, tenc, "chunked")) {
+ ctx->state = BODY_CHUNK;
+ }
+ else if (f->r->proxyreq == PROXYREQ_RESPONSE) {
+ /* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23
+ * Section 3.3.3.3: "If a Transfer-Encoding header field is
+ * present in a response and the chunked transfer coding is not
+ * the final encoding, the message body length is determined by
+ * reading the connection until it is closed by the server."
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02555)
+ "Unknown Transfer-Encoding: %s; "
+ "using read-until-close", tenc);
+ tenc = NULL;
+ }
+ else {
+ /* Something that isn't a HTTP request, unless some future
+ * edition defines new transfer encodings, is unsupported.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
+ "Unknown Transfer-Encoding: %s", tenc);
+ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST);
+ }
+ lenp = NULL;
+ }
+ if (lenp) {
+ char *endstr;
+
+ ctx->state = BODY_LENGTH;
+
+ /* Protects against over/underflow, non-digit chars in the
+ * string (excluding leading space) (the endstr checks)
+ * and a negative number. */
+ if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10)
+ || endstr == lenp || *endstr || ctx->remaining < 0) {
+
+ ctx->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587)
+ "Invalid Content-Length");
+
+ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST);
+ }
+
+ /* If we have a limit in effect and we know the C-L ahead of
+ * time, stop it here if it is invalid.
+ */
+ if (ctx->limit && ctx->limit < ctx->remaining) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01588)
+ "Requested content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
+ return bail_out_on_error(ctx, f, HTTP_REQUEST_ENTITY_TOO_LARGE);
+ }
+ }
+
+ /* If we don't have a request entity indicated by the headers, EOS.
+ * (BODY_NONE is a valid intermediate state due to trailers,
+ * but it isn't a valid starting state.)
+ *
+ * RFC 2616 Section 4.4 note 5 states that connection-close
+ * is invalid for a request entity - request bodies must be
+ * denoted by C-L or T-E: chunked.
+ *
+ * Note that since the proxy uses this filter to handle the
+ * proxied *response*, proxy responses MUST be exempt.
+ */
+ if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+
+ /* Since we're about to read data, send 100-Continue if needed.
+ * Only valid on chunked and C-L bodies where the C-L is > 0. */
+ if ((ctx->state == BODY_CHUNK ||
+ (ctx->state == BODY_LENGTH && ctx->remaining > 0)) &&
+ f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1) &&
+ !(f->r->eos_sent || f->r->bytes_sent)) {
+ if (!ap_is_HTTP_SUCCESS(f->r->status)) {
+ ctx->state = BODY_NONE;
+ ctx->eos_sent = 1;
+ }
+ else {
+ char *tmp;
+ int len;
+
+ /* if we send an interim response, we're no longer
+ * in a state of expecting one.
+ */
+ f->r->expecting_100 = 0;
+ tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL " ",
+ ap_get_status_line(HTTP_CONTINUE), CRLF CRLF,
+ NULL);
+ len = strlen(tmp);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_cleanup(bb);
+ e = apr_bucket_pool_create(tmp, len, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ e = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ rv = ap_pass_brigade(f->c->output_filters, bb);
+ if (rv != APR_SUCCESS) {
+ return AP_FILTER_ERROR;
+ }
+ }
+ }
+ }
+
+ /* sanity check in case we're read twice */
+ if (ctx->eos_sent) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ return APR_SUCCESS;
+ }
+
+ do {
+ apr_brigade_cleanup(b);
+ again = 0; /* until further notice */
+
+ /* read and handle the brigade */
+ switch (ctx->state) {
+ case BODY_CHUNK:
+ case BODY_CHUNK_PART:
+ case BODY_CHUNK_EXT:
+ case BODY_CHUNK_CR:
+ case BODY_CHUNK_LF:
+ case BODY_CHUNK_END:
+ case BODY_CHUNK_END_LF: {
+
+ rv = ap_get_brigade(f->next, b, AP_MODE_GETLINE, block, 0);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv == APR_EOF) {
+ return APR_INCOMPLETE;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ e = APR_BRIGADE_FIRST(b);
+ while (e != APR_BRIGADE_SENTINEL(b)) {
+ const char *buffer;
+ apr_size_t len;
+
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ int parsing = 0;
+
+ rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ);
+
+ if (rv == APR_SUCCESS) {
+ parsing = 1;
+ rv = parse_chunk_size(ctx, buffer, len,
+ f->r->server->limit_req_fieldsize, strict);
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590)
+ "Error reading/parsing chunk %s ",
+ (APR_ENOSPC == rv) ? "(overflow)" : "");
+ if (parsing) {
+ if (rv != APR_ENOSPC) {
+ http_error = HTTP_BAD_REQUEST;
+ }
+ return bail_out_on_error(ctx, f, http_error);
+ }
+ return rv;
+ }
+ }
+
+ apr_bucket_delete(e);
+ e = APR_BRIGADE_FIRST(b);
+ }
+ again = 1; /* come around again */
+
+ if (ctx->state == BODY_CHUNK_TRAILER) {
+ /* Treat UNSET as DISABLE - trailers aren't merged by default */
+ return read_chunked_trailers(ctx, f, b,
+ conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE);
+ }
+
+ break;
+ }
+ case BODY_NONE:
+ case BODY_LENGTH:
+ case BODY_CHUNK_DATA: {
+
+ /* Ensure that the caller can not go over our boundary point. */
+ if (ctx->state != BODY_NONE && ctx->remaining < readbytes) {
+ readbytes = ctx->remaining;
+ }
+ if (readbytes > 0) {
+ apr_off_t totalread;
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv == APR_EOF && ctx->state != BODY_NONE
+ && ctx->remaining > 0) {
+ return APR_INCOMPLETE;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* How many bytes did we just read? */
+ apr_brigade_length(b, 0, &totalread);
+
+ /* If this happens, we have a bucket of unknown length. Die because
+ * it means our assumptions have changed. */
+ AP_DEBUG_ASSERT(totalread >= 0);
+
+ if (ctx->state != BODY_NONE) {
+ ctx->remaining -= totalread;
+ if (ctx->remaining > 0) {
+ e = APR_BRIGADE_LAST(b);
+ if (APR_BUCKET_IS_EOS(e)) {
+ apr_bucket_delete(e);
+ return APR_INCOMPLETE;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_DATA) {
+ /* next chunk please */
+ ctx->state = BODY_CHUNK_END;
+ ctx->chunk_used = 0;
+ }
+ }
+
+ /* We have a limit in effect. */
+ if (ctx->limit) {
+ /* FIXME: Note that we might get slightly confused on
+ * chunked inputs as we'd need to compensate for the chunk
+ * lengths which may not really count. This seems to be up
+ * for interpretation.
+ */
+ ctx->limit_used += totalread;
+ if (ctx->limit < ctx->limit_used) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
+ APLOGNO(01591) "Read content length of "
+ "%" APR_OFF_T_FMT " is larger than the "
+ "configured limit of %" APR_OFF_T_FMT,
+ ctx->limit_used, ctx->limit);
+ return bail_out_on_error(ctx, f,
+ HTTP_REQUEST_ENTITY_TOO_LARGE);
+ }
+ }
+ }
+
+ /* If we have no more bytes remaining on a C-L request,
+ * save the caller a round trip to discover EOS.
+ */
+ if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ }
+
+ break;
+ }
+ case BODY_CHUNK_TRAILER: {
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ break;
+ }
+ default: {
+ /* Should not happen */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02901)
+ "Unexpected body state (%i)", (int)ctx->state);
+ return APR_EGENERAL;
+ }
+ }
+
+ } while (again);
+
+ return APR_SUCCESS;
+}
+
+struct check_header_ctx {
+ request_rec *r;
+ int strict;
+};
+
+/* check a single header, to be used with apr_table_do() */
+static int check_header(struct check_header_ctx *ctx,
+ const char *name, const char **val)
+{
+ const char *pos, *end;
+ char *dst = NULL;
+
+ if (name[0] == '\0') {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02428)
+ "Empty response header name, aborting request");
+ return 0;
+ }
+
+ if (ctx->strict) {
+ end = ap_scan_http_token(name);
+ }
+ else {
+ end = ap_scan_vchar_obstext(name);
+ }
+ if (*end) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02429)
+ "Response header name '%s' contains invalid "
+ "characters, aborting request",
+ name);
+ return 0;
+ }
+
+ for (pos = *val; *pos; pos = end) {
+ end = ap_scan_http_field_content(pos);
+ if (*end) {
+ if (end[0] != CR || end[1] != LF || (end[2] != ' ' &&
+ end[2] != '\t')) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02430)
+ "Response header '%s' value of '%s' contains "
+ "invalid characters, aborting request",
+ name, pos);
+ return 0;
+ }
+ if (!dst) {
+ *val = dst = apr_palloc(ctx->r->pool, strlen(*val) + 1);
+ }
+ }
+ if (dst) {
+ memcpy(dst, pos, end - pos);
+ dst += end - pos;
+ if (*end) {
+ /* skip folding and replace with a single space */
+ end += 3 + strspn(end + 3, "\t ");
+ *dst++ = ' ';
+ }
+ }
+ }
+ if (dst) {
+ *dst = '\0';
+ }
+ return 1;
+}
+
+static int check_headers_table(apr_table_t *t, struct check_header_ctx *ctx)
+{
+ const apr_array_header_t *headers = apr_table_elts(t);
+ apr_table_entry_t *header;
+ int i;
+
+ for (i = 0; i < headers->nelts; ++i) {
+ header = &APR_ARRAY_IDX(headers, i, apr_table_entry_t);
+ if (!header->key) {
+ continue;
+ }
+ if (!check_header(ctx, header->key, (const char **)&header->val)) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Check headers for HTTP conformance
+ * @return 1 if ok, 0 if bad
+ */
+static APR_INLINE int check_headers(request_rec *r)
+{
+ struct check_header_ctx ctx;
+ core_server_config *conf =
+ ap_get_core_module_config(r->server->module_config);
+
+ ctx.r = r;
+ ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
+ return check_headers_table(r->headers_out, &ctx) &&
+ check_headers_table(r->err_headers_out, &ctx);
+}
+
+static int check_headers_recursion(request_rec *r)
+{
+ void *check = NULL;
+ apr_pool_userdata_get(&check, "check_headers_recursion", r->pool);
+ if (check) {
+ return 1;
+ }
+ apr_pool_userdata_setn("true", "check_headers_recursion", NULL, r->pool);
+ return 0;
+}
+
+typedef struct header_struct {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+} header_struct;
+
+/* Send a single HTTP header field to the client. Note that this function
+ * is used in calls to apr_table_do(), so don't change its interface.
+ * It returns true unless there was a write error of some kind.
+ */
+static int form_header_field(header_struct *h,
+ const char *fieldname, const char *fieldval)
+{
+#if APR_CHARSET_EBCDIC
+ char *headfield;
+ apr_size_t len;
+
+ headfield = apr_pstrcat(h->pool, fieldname, ": ", fieldval, CRLF, NULL);
+ len = strlen(headfield);
+
+ ap_xlate_proto_to_ascii(headfield, len);
+ apr_brigade_write(h->bb, NULL, NULL, headfield, len);
+#else
+ struct iovec vec[4];
+ struct iovec *v = vec;
+ v->iov_base = (void *)fieldname;
+ v->iov_len = strlen(fieldname);
+ v++;
+ v->iov_base = ": ";
+ v->iov_len = sizeof(": ") - 1;
+ v++;
+ v->iov_base = (void *)fieldval;
+ v->iov_len = strlen(fieldval);
+ v++;
+ v->iov_base = CRLF;
+ v->iov_len = sizeof(CRLF) - 1;
+ apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
+#endif /* !APR_CHARSET_EBCDIC */
+ return 1;
+}
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && strcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fixup_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+/* Send a request's HTTP response headers to the client.
+ */
+static apr_status_t send_all_header_fields(header_struct *h,
+ const request_rec *r)
+{
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ struct iovec *vec;
+ struct iovec *vec_next;
+
+ elts = apr_table_elts(r->headers_out);
+ if (elts->nelts == 0) {
+ return APR_SUCCESS;
+ }
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ t_end = t_elt + elts->nelts;
+ vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
+ sizeof(struct iovec));
+ vec_next = vec;
+
+ /* For each field, generate
+ * name ": " value CRLF
+ */
+ do {
+ vec_next->iov_base = (void*)(t_elt->key);
+ vec_next->iov_len = strlen(t_elt->key);
+ vec_next++;
+ vec_next->iov_base = ": ";
+ vec_next->iov_len = sizeof(": ") - 1;
+ vec_next++;
+ vec_next->iov_base = (void*)(t_elt->val);
+ vec_next->iov_len = strlen(t_elt->val);
+ vec_next++;
+ vec_next->iov_base = CRLF;
+ vec_next->iov_len = sizeof(CRLF) - 1;
+ vec_next++;
+ t_elt++;
+ } while (t_elt < t_end);
+
+ if (APLOGrtrace4(r)) {
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ do {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
+ ap_escape_logitem(r->pool, t_elt->key),
+ ap_escape_logitem(r->pool, t_elt->val));
+ t_elt++;
+ } while (t_elt < t_end);
+ }
+
+#if APR_CHARSET_EBCDIC
+ {
+ apr_size_t len;
+ char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
+ }
+#else
+ return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
+#endif
+}
+
+/* Confirm that the status line is well-formed and matches r->status.
+ * If they don't match, a filter may have negated the status line set by a
+ * handler.
+ * Zap r->status_line if bad.
+ */
+static apr_status_t validate_status_line(request_rec *r)
+{
+ char *end;
+
+ if (r->status_line) {
+ int len = strlen(r->status_line);
+ if (len < 3
+ || apr_strtoi64(r->status_line, &end, 10) != r->status
+ || (end - 3) != r->status_line
+ || (len >= 4 && ! apr_isspace(r->status_line[3]))) {
+ r->status_line = NULL;
+ return APR_EGENERAL;
+ }
+ /* Since we passed the above check, we know that length three
+ * is equivalent to only a 3 digit numeric http status.
+ * RFC2616 mandates a trailing space, let's add it.
+ */
+ if (len == 3) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, " ", NULL);
+ return APR_EGENERAL;
+ }
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+/*
+ * Determine the protocol to use for the response. Potentially downgrade
+ * to HTTP/1.0 in some situations and/or turn off keepalives.
+ *
+ * also prepare r->status_line.
+ */
+static void basic_http_header_check(request_rec *r,
+ const char **protocol)
+{
+ apr_status_t rv;
+
+ if (r->assbackwards) {
+ /* no such thing as a response protocol */
+ return;
+ }
+
+ rv = validate_status_line(r);
+
+ if (!r->status_line) {
+ r->status_line = ap_get_status_line(r->status);
+ } else if (rv != APR_SUCCESS) {
+ /* Status line is OK but our own reason phrase
+ * would be preferred if defined
+ */
+ const char *tmp = ap_get_status_line(r->status);
+ if (!strncmp(tmp, r->status_line, 3)) {
+ r->status_line = tmp;
+ }
+ }
+
+ /* Note that we must downgrade before checking for force responses. */
+ if (r->proto_num > HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "downgrade-1.0")) {
+ r->proto_num = HTTP_VERSION(1,0);
+ }
+
+ /* kludge around broken browsers when indicated by force-response-1.0
+ */
+ if (r->proto_num == HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "force-response-1.0")) {
+ *protocol = "HTTP/1.0";
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ *protocol = AP_SERVER_PROTOCOL;
+ }
+
+}
+
+/* fill "bb" with a barebones/initial HTTP response header */
+static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
+ const char *protocol)
+{
+ char *date = NULL;
+ const char *proxy_date = NULL;
+ const char *server = NULL;
+ const char *us = ap_get_server_banner();
+ header_struct h;
+ struct iovec vec[4];
+
+ if (r->assbackwards) {
+ /* there are no headers to send */
+ return;
+ }
+
+ /* Output the HTTP/1.x Status-Line and the Date and Server fields */
+
+ vec[0].iov_base = (void *)protocol;
+ vec[0].iov_len = strlen(protocol);
+ vec[1].iov_base = (void *)" ";
+ vec[1].iov_len = sizeof(" ") - 1;
+ vec[2].iov_base = (void *)(r->status_line);
+ vec[2].iov_len = strlen(r->status_line);
+ vec[3].iov_base = (void *)CRLF;
+ vec[3].iov_len = sizeof(CRLF) - 1;
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ tmp = apr_pstrcatv(r->pool, vec, 4, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_write(bb, NULL, NULL, tmp, len);
+ }
+#else
+ apr_brigade_writev(bb, NULL, NULL, vec, 4);
+#endif
+
+ h.pool = r->pool;
+ h.bb = bb;
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r->proxyreq != PROXYREQ_NONE) {
+ proxy_date = apr_table_get(r->headers_out, "Date");
+ if (!proxy_date) {
+ /*
+ * proxy_date needs to be const. So use date for the creation of
+ * our own Date header and pass it over to proxy_date later to
+ * avoid a compiler warning.
+ */
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+ server = apr_table_get(r->headers_out, "Server");
+ }
+ else {
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+
+ form_header_field(&h, "Date", proxy_date ? proxy_date : date );
+
+ if (!server && *us)
+ server = us;
+ if (server)
+ form_header_field(&h, "Server", server);
+
+ if (APLOGrtrace3(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "Response sent with status %d%s",
+ r->status,
+ APLOGrtrace4(r) ? ", headers:" : "");
+
+ /*
+ * Date and Server are less interesting, use TRACE5 for them while
+ * using TRACE4 for the other headers.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Date: %s",
+ proxy_date ? proxy_date : date );
+ if (server)
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Server: %s",
+ server);
+ }
+
+
+ /* unset so we don't send them again */
+ apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
+ if (server) {
+ apr_table_unset(r->headers_out, "Server");
+ }
+}
+
+AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
+{
+ const char *protocol = NULL;
+
+ basic_http_header_check(r, &protocol);
+ basic_http_header(r, bb, protocol);
+}
+
+static void terminate_header(apr_bucket_brigade *bb)
+{
+ char crlf[] = CRLF;
+ apr_size_t buflen;
+
+ buflen = strlen(crlf);
+ ap_xlate_proto_to_ascii(crlf, buflen);
+ apr_brigade_write(bb, NULL, NULL, crlf, buflen);
+}
+
+AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
+{
+ core_server_config *conf;
+ int rv;
+ apr_bucket_brigade *bb;
+ header_struct h;
+ apr_bucket *b;
+ int body;
+ char *bodyread = NULL, *bodyoff;
+ apr_size_t bodylen = 0;
+ apr_size_t bodybuf;
+ long res = -1; /* init to avoid gcc -Wall warning */
+
+ if (r->method_number != M_TRACE) {
+ return DECLINED;
+ }
+
+ /* Get the original request */
+ while (r->prev) {
+ r = r->prev;
+ }
+ conf = ap_get_core_module_config(r->server->module_config);
+
+ if (conf->trace_enable == AP_TRACE_DISABLE) {
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE denied by server configuration");
+ return HTTP_METHOD_NOT_ALLOWED;
+ }
+
+ if (conf->trace_enable == AP_TRACE_EXTENDED)
+ /* XXX: should be = REQUEST_CHUNKED_PASS */
+ body = REQUEST_CHUNKED_DECHUNK;
+ else
+ body = REQUEST_NO_BODY;
+
+ if ((rv = ap_setup_client_block(r, body))) {
+ if (rv == HTTP_REQUEST_ENTITY_TOO_LARGE)
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE with a request body is not allowed");
+ return rv;
+ }
+
+ if (ap_should_client_block(r)) {
+
+ if (r->remaining > 0) {
+ if (r->remaining > 65536) {
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k\n");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+ /* always 32 extra bytes to catch chunk header exceptions */
+ bodybuf = (apr_size_t)r->remaining + 32;
+ }
+ else {
+ /* Add an extra 8192 for chunk headers */
+ bodybuf = 73730;
+ }
+
+ bodyoff = bodyread = apr_palloc(r->pool, bodybuf);
+
+ /* only while we have enough for a chunked header */
+ while ((!bodylen || bodybuf >= 32) &&
+ (res = ap_get_client_block(r, bodyoff, bodybuf)) > 0) {
+ bodylen += res;
+ bodybuf -= res;
+ bodyoff += res;
+ }
+ if (res > 0 && bodybuf < 32) {
+ /* discard_rest_of_request_body into our buffer */
+ while (ap_get_client_block(r, bodyread, bodylen) > 0)
+ ;
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k\n");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ if (res < 0) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ ap_set_content_type(r, "message/http");
+
+ /* Now we recreate the request, and echo it back */
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ len = strlen(r->the_request);
+ tmp = apr_pmemdup(r->pool, r->the_request, len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_putstrs(bb, NULL, NULL, tmp, CRLF_ASCII, NULL);
+ }
+#else
+ apr_brigade_putstrs(bb, NULL, NULL, r->the_request, CRLF, NULL);
+#endif
+ h.pool = r->pool;
+ h.bb = bb;
+ apr_table_do((int (*) (void *, const char *, const char *))
+ form_header_field, (void *) &h, r->headers_in, NULL);
+ apr_brigade_puts(bb, NULL, NULL, CRLF_ASCII);
+
+ /* If configured to accept a body, echo the body */
+ if (bodylen) {
+ b = apr_bucket_pool_create(bodyread, bodylen,
+ r->pool, bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+
+ ap_pass_brigade(r->output_filters, bb);
+
+ return DONE;
+}
+
+typedef struct header_filter_ctx {
+ int headers_sent;
+} header_filter_ctx;
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ const char *clheader;
+ const char *protocol = NULL;
+ apr_bucket *e;
+ apr_bucket_brigade *b2;
+ header_struct h;
+ header_filter_ctx *ctx = f->ctx;
+ const char *ctype;
+ ap_bucket_error *eb = NULL;
+ apr_status_t rv = APR_SUCCESS;
+ int recursive_error = 0;
+
+ AP_DEBUG_ASSERT(!r->main);
+
+ if (!ctx) {
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
+ }
+ else if (ctx->headers_sent) {
+ /* Eat body if response must not have one. */
+ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ /* Still next filters may be waiting for EOS, so pass it (alone)
+ * when encountered and be done with this filter.
+ */
+ e = APR_BRIGADE_LAST(b);
+ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) {
+ APR_BUCKET_REMOVE(e);
+ apr_brigade_cleanup(b);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+ }
+ apr_brigade_cleanup(b);
+ return rv;
+ }
+ }
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e) && !eb) {
+ eb = e->data;
+ continue;
+ }
+ /*
+ * If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ if (AP_BUCKET_IS_EOC(e)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+ }
+ }
+
+ if (!ctx->headers_sent && !check_headers(r)) {
+ /* We may come back here from ap_die() below,
+ * so clear anything from this response.
+ */
+ apr_table_clear(r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ apr_brigade_cleanup(b);
+
+ /* Don't recall ap_die() if we come back here (from its own internal
+ * redirect or error response), otherwise we can end up in infinite
+ * recursion; better fall through with 500, minimal headers and an
+ * empty body (EOS only).
+ */
+ if (!check_headers_recursion(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return AP_FILTER_ERROR;
+ }
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ e = ap_bucket_eoc_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ ap_set_content_length(r, 0);
+ recursive_error = 1;
+ }
+ else if (eb) {
+ int status;
+ status = eb->status;
+ apr_brigade_cleanup(b);
+ ap_die(status, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (r->assbackwards) {
+ r->sent_bodyct = 1;
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+ goto out;
+ }
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ *
+ * Note: the force-response-1.0 should come before the call to
+ * basic_http_header_check()
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fixup_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ basic_http_header_check(r, &protocol);
+ ap_set_keepalive(r);
+
+ if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
+ }
+ else if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!strcasecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cacheable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ b2 = apr_brigade_create(r->pool, c->bucket_alloc);
+ basic_http_header(r, b2, protocol);
+
+ h.pool = r->pool;
+ h.bb = b2;
+
+ if (r->status == HTTP_NOT_MODIFIED) {
+ apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
+ (void *) &h, r->headers_out,
+ "Connection",
+ "Keep-Alive",
+ "ETag",
+ "Content-Location",
+ "Expires",
+ "Cache-Control",
+ "Vary",
+ "Warning",
+ "WWW-Authenticate",
+ "Proxy-Authenticate",
+ "Set-Cookie",
+ "Set-Cookie2",
+ NULL);
+ }
+ else {
+ send_all_header_fields(&h, r);
+ }
+
+ terminate_header(b2);
+
+ rv = ap_pass_brigade(f->next, b2);
+ if (rv != APR_SUCCESS) {
+ goto out;
+ }
+ ctx->headers_sent = 1;
+
+ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_brigade_cleanup(b);
+ goto out;
+ }
+
+ r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
+
+ if (r->chunked) {
+ /* We can't add this filter until we have already sent the headers.
+ * If we add it before this point, then the headers will be chunked
+ * as well, and that is just wrong.
+ */
+ ap_add_output_filter("CHUNK", NULL, r, r->connection);
+ }
+
+ /* Don't remove this filter until after we have added the CHUNK filter.
+ * Otherwise, f->next won't be the CHUNK filter and thus the first
+ * brigade won't be chunked properly.
+ */
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+out:
+ if (recursive_error) {
+ return AP_FILTER_ERROR;
+ }
+ return rv;
+}
+
+/*
+ * Map specific APR codes returned by the filter stack to HTTP error
+ * codes, or the default status code provided. Use it as follows:
+ *
+ * return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ *
+ * If the filter has already handled the error, AP_FILTER_ERROR will
+ * be returned, which is cleanly passed through.
+ *
+ * These mappings imply that the filter stack is reading from the
+ * downstream client, the proxy will map these codes differently.
+ */
+AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status)
+{
+ switch (rv) {
+ case AP_FILTER_ERROR:
+ return AP_FILTER_ERROR;
+
+ case APR_ENOSPC:
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+
+ case APR_ENOTIMPL:
+ return HTTP_NOT_IMPLEMENTED;
+
+ case APR_TIMEUP:
+ case APR_ETIMEDOUT:
+ return HTTP_REQUEST_TIME_OUT;
+
+ default:
+ return status;
+ }
+}
+
+/* In HTTP/1.1, any method can have a body. However, most GET handlers
+ * wouldn't know what to do with a request body if they received one.
+ * This helper routine tests for and reads any message body in the request,
+ * simply discarding whatever it receives. We need to do this because
+ * failing to read the request body would cause it to be interpreted
+ * as the next request on a persistent connection.
+ *
+ * Since we return an error status if the request is malformed, this
+ * routine should be called at the beginning of a no-body handler, e.g.,
+ *
+ * if ((retval = ap_discard_request_body(r)) != OK) {
+ * return retval;
+ * }
+ */
+AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ int seen_eos;
+ apr_status_t rv;
+
+ /* Sometimes we'll get in a state where the input handling has
+ * detected an error where we want to drop the connection, so if
+ * that's the case, don't read the data as that is what we're trying
+ * to avoid.
+ *
+ * This function is also a no-op on a subrequest.
+ */
+ if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
+ ap_status_drops_connection(r->status)) {
+ return OK;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ seen_eos = 0;
+ do {
+ apr_bucket *bucket;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ }
+
+ for (bucket = APR_BRIGADE_FIRST(bb);
+ bucket != APR_BRIGADE_SENTINEL(bb);
+ bucket = APR_BUCKET_NEXT(bucket))
+ {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+
+ /* These are metadata buckets. */
+ if (bucket->length == 0) {
+ continue;
+ }
+
+ /* We MUST read because in case we have an unknown-length
+ * bucket or one that morphs, we want to exhaust it.
+ */
+ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ } while (!seen_eos);
+
+ return OK;
+}
+
+/* Here we deal with getting the request message body from the client.
+ * Whether or not the request contains a body is signaled by the presence
+ * of a non-zero Content-Length or by a Transfer-Encoding: chunked.
+ *
+ * Note that this is more complicated than it was in Apache 1.1 and prior
+ * versions, because chunked support means that the module does less.
+ *
+ * The proper procedure is this:
+ *
+ * 1. Call ap_setup_client_block() near the beginning of the request
+ * handler. This will set up all the necessary properties, and will
+ * return either OK, or an error code. If the latter, the module should
+ * return that error code. The second parameter selects the policy to
+ * apply if the request message indicates a body, and how a chunked
+ * transfer-coding should be interpreted. Choose one of
+ *
+ * REQUEST_NO_BODY Send 413 error if message has any body
+ * REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
+ * REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
+ * REQUEST_CHUNKED_PASS If chunked, pass the chunk headers with body.
+ *
+ * In order to use the last two options, the caller MUST provide a buffer
+ * large enough to hold a chunk-size line, including any extensions.
+ *
+ * 2. When you are ready to read a body (if any), call ap_should_client_block().
+ * This will tell the module whether or not to read input. If it is 0,
+ * the module should assume that there is no message body to read.
+ *
+ * 3. Finally, call ap_get_client_block in a loop. Pass it a buffer and its size.
+ * It will put data into the buffer (not necessarily a full buffer), and
+ * return the length of the input block. When it is done reading, it will
+ * return 0 if EOF, or -1 if there was an error.
+ * If an error occurs on input, we force an end to keepalive.
+ *
+ * This step also sends a 100 Continue response to HTTP/1.1 clients if appropriate.
+ */
+
+AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
+{
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+
+ r->read_body = read_policy;
+ r->read_chunked = 0;
+ r->remaining = 0;
+
+ if (tenc) {
+ if (strcasecmp(tenc, "chunked")) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592)
+ "Unknown Transfer-Encoding %s", tenc);
+ return HTTP_NOT_IMPLEMENTED;
+ }
+ if (r->read_body == REQUEST_CHUNKED_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01593)
+ "chunked Transfer-Encoding forbidden: %s", r->uri);
+ return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
+ }
+
+ r->read_chunked = 1;
+ }
+ else if (lenp) {
+ char *endstr;
+
+ if (apr_strtoff(&r->remaining, lenp, &endstr, 10)
+ || *endstr || r->remaining < 0) {
+ r->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01594)
+ "Invalid Content-Length");
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ if ((r->read_body == REQUEST_NO_BODY)
+ && (r->read_chunked || (r->remaining > 0))) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01595)
+ "%s with body is not allowed for %s", r->method, r->uri);
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+#ifdef AP_DEBUG
+ {
+ /* Make sure ap_getline() didn't leave any droppings. */
+ core_request_config *req_cfg =
+ (core_request_config *)ap_get_core_module_config(r->request_config);
+ AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
+ }
+#endif
+
+ return OK;
+}
+
+AP_DECLARE(int) ap_should_client_block(request_rec *r)
+{
+ /* First check if we have already read the request body */
+
+ if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* get_client_block is called in a loop to get the request message body.
+ * This is quite simple if the client includes a content-length
+ * (the normal case), but gets messy if the body is chunked. Note that
+ * r->remaining is used to maintain state across calls and that
+ * r->read_length is the total number of bytes given to the caller
+ * across all invocations. It is messy because we have to be careful not
+ * to read past the data provided by the client, since these reads block.
+ * Returns 0 on End-of-body, -1 on error or premature chunk end.
+ *
+ */
+AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
+ apr_size_t bufsiz)
+{
+ apr_status_t rv;
+ apr_bucket_brigade *bb;
+
+ if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
+ return 0;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ if (bb == NULL) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ return -1;
+ }
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, bufsiz);
+
+ /* We lose the failure code here. This is why ap_get_client_block should
+ * not be used.
+ */
+ if (rv == AP_FILTER_ERROR) {
+ /* AP_FILTER_ERROR means a filter has responded already,
+ * we are DONE.
+ */
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+ if (rv != APR_SUCCESS) {
+ /* if we actually fail here, we want to just return and
+ * stop trying to read data from the client.
+ */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* If this fails, it means that a filter is written incorrectly and that
+ * it needs to learn how to properly handle APR_BLOCK_READ requests by
+ * returning data when requested.
+ */
+ AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
+
+ /* Check to see if EOS in the brigade.
+ *
+ * If so, we have to leave a nugget for the *next* ap_get_client_block
+ * call to return 0.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (r->read_chunked) {
+ r->remaining = -1;
+ }
+ else {
+ r->remaining = 0;
+ }
+ }
+
+ rv = apr_brigade_flatten(bb, buffer, &bufsiz);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* XXX yank me? */
+ r->read_length += bufsiz;
+
+ apr_brigade_destroy(bb);
+ return bufsiz;
+}
+
+/* Context struct for ap_http_outerror_filter */
+typedef struct {
+ int seen_eoc;
+} outerror_filter_ctx_t;
+
+/* Filter to handle any error buckets on output */
+apr_status_t ap_http_outerror_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ outerror_filter_ctx_t *ctx = (outerror_filter_ctx_t *)(f->ctx);
+ apr_bucket *e;
+
+ /* Create context if none is present */
+ if (!ctx) {
+ ctx = apr_pcalloc(r->pool, sizeof(outerror_filter_ctx_t));
+ f->ctx = ctx;
+ }
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e)) {
+ /*
+ * Start of error handling state tree. Just one condition
+ * right now :)
+ */
+ if (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY) {
+ /* stream aborted and we have not ended it yet */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ continue;
+ }
+ /* Detect EOC buckets and memorize this in the context. */
+ if (AP_BUCKET_IS_EOC(e)) {
+ ctx->seen_eoc = 1;
+ }
+ }
+ /*
+ * Remove all data buckets that are in a brigade after an EOC bucket
+ * was seen, as an EOC bucket tells us that no (further) resource
+ * and protocol data should go out to the client. OTOH meta buckets
+ * are still welcome as they might trigger needed actions down in
+ * the chain (e.g. in network filters like SSL).
+ * Remark 1: It is needed to dump ALL data buckets in the brigade
+ * since an filter in between might have inserted data
+ * buckets BEFORE the EOC bucket sent by the original
+ * sender and we do NOT want this data to be sent.
+ * Remark 2: Dumping all data buckets here does not necessarily mean
+ * that no further data is send to the client as:
+ * 1. Network filters like SSL can still be triggered via
+ * meta buckets to talk with the client e.g. for a
+ * clean shutdown.
+ * 2. There could be still data that was buffered before
+ * down in the chain that gets flushed by a FLUSH or an
+ * EOS bucket.
+ */
+ if (ctx->seen_eoc) {
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ APR_BUCKET_REMOVE(e);
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, b);
+}
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
new file mode 100644
index 0000000..e419eb6
--- /dev/null
+++ b/modules/http/http_protocol.c
@@ -0,0 +1,1685 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_protocol.c --- routines which directly communicate with the client.
+ *
+ * Code originally by Rob McCool; much redone by Robert S. Thau
+ * and the Apache Software Foundation.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+#include "ap_mpm.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+APLOG_USE_MODULE(http);
+
+/* New Apache routine to map status codes into array indicies
+ * e.g. 100 -> 0, 101 -> 1, 200 -> 2 ...
+ * The number of status lines must equal the value of
+ * RESPONSE_CODES (httpd.h) and must be listed in order.
+ * No gaps are allowed between X00 and the largest Xnn
+ * for any X (see ap_index_of_response).
+ * When adding a new code here, add a define to httpd.h
+ * as well.
+ */
+
+static const char * const status_lines[RESPONSE_CODES] =
+{
+ "100 Continue",
+ "101 Switching Protocols",
+ "102 Processing",
+#define LEVEL_200 3
+ "200 OK",
+ "201 Created",
+ "202 Accepted",
+ "203 Non-Authoritative Information",
+ "204 No Content",
+ "205 Reset Content",
+ "206 Partial Content",
+ "207 Multi-Status",
+ "208 Already Reported",
+ NULL, /* 209 */
+ NULL, /* 210 */
+ NULL, /* 211 */
+ NULL, /* 212 */
+ NULL, /* 213 */
+ NULL, /* 214 */
+ NULL, /* 215 */
+ NULL, /* 216 */
+ NULL, /* 217 */
+ NULL, /* 218 */
+ NULL, /* 219 */
+ NULL, /* 220 */
+ NULL, /* 221 */
+ NULL, /* 222 */
+ NULL, /* 223 */
+ NULL, /* 224 */
+ NULL, /* 225 */
+ "226 IM Used",
+#define LEVEL_300 30
+ "300 Multiple Choices",
+ "301 Moved Permanently",
+ "302 Found",
+ "303 See Other",
+ "304 Not Modified",
+ "305 Use Proxy",
+ NULL, /* 306 */
+ "307 Temporary Redirect",
+ "308 Permanent Redirect",
+#define LEVEL_400 39
+ "400 Bad Request",
+ "401 Unauthorized",
+ "402 Payment Required",
+ "403 Forbidden",
+ "404 Not Found",
+ "405 Method Not Allowed",
+ "406 Not Acceptable",
+ "407 Proxy Authentication Required",
+ "408 Request Timeout",
+ "409 Conflict",
+ "410 Gone",
+ "411 Length Required",
+ "412 Precondition Failed",
+ "413 Request Entity Too Large",
+ "414 Request-URI Too Long",
+ "415 Unsupported Media Type",
+ "416 Requested Range Not Satisfiable",
+ "417 Expectation Failed",
+ NULL, /* 418 */
+ NULL, /* 419 */
+ NULL, /* 420 */
+ "421 Misdirected Request",
+ "422 Unprocessable Entity",
+ "423 Locked",
+ "424 Failed Dependency",
+ NULL, /* 425 */
+ "426 Upgrade Required",
+ NULL, /* 427 */
+ "428 Precondition Required",
+ "429 Too Many Requests",
+ NULL, /* 430 */
+ "431 Request Header Fields Too Large",
+ NULL, /* 432 */
+ NULL, /* 433 */
+ NULL, /* 434 */
+ NULL, /* 435 */
+ NULL, /* 436 */
+ NULL, /* 437 */
+ NULL, /* 438 */
+ NULL, /* 439 */
+ NULL, /* 440 */
+ NULL, /* 441 */
+ NULL, /* 442 */
+ NULL, /* 443 */
+ NULL, /* 444 */
+ NULL, /* 445 */
+ NULL, /* 446 */
+ NULL, /* 447 */
+ NULL, /* 448 */
+ NULL, /* 449 */
+ NULL, /* 450 */
+ "451 Unavailable For Legal Reasons",
+#define LEVEL_500 91
+ "500 Internal Server Error",
+ "501 Not Implemented",
+ "502 Bad Gateway",
+ "503 Service Unavailable",
+ "504 Gateway Timeout",
+ "505 HTTP Version Not Supported",
+ "506 Variant Also Negotiates",
+ "507 Insufficient Storage",
+ "508 Loop Detected",
+ NULL, /* 509 */
+ "510 Not Extended",
+ "511 Network Authentication Required"
+};
+
+APR_HOOK_STRUCT(
+ APR_HOOK_LINK(insert_error_filter)
+)
+
+AP_IMPLEMENT_HOOK_VOID(insert_error_filter, (request_rec *r), (r))
+
+/* The index of the first bit field that is used to index into a limit
+ * bitmask. M_INVALID + 1 to METHOD_NUMBER_LAST.
+ */
+#define METHOD_NUMBER_FIRST (M_INVALID + 1)
+
+/* The max method number. Method numbers are used to shift bitmasks,
+ * so this cannot exceed 63, and all bits high is equal to -1, which is a
+ * special flag, so the last bit used has index 62.
+ */
+#define METHOD_NUMBER_LAST 62
+
+static int is_mpm_running(void)
+{
+ int mpm_state = 0;
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ return 0;
+ }
+
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
+AP_DECLARE(int) ap_set_keepalive(request_rec *r)
+{
+ int ka_sent = 0;
+ int left = r->server->keep_alive_max - r->connection->keepalives;
+ int wimpy = ap_find_token(r->pool,
+ apr_table_get(r->headers_out, "Connection"),
+ "close");
+ const char *conn = apr_table_get(r->headers_in, "Connection");
+
+ /* The following convoluted conditional determines whether or not
+ * the current connection should remain persistent after this response
+ * (a.k.a. HTTP Keep-Alive) and whether or not the output message
+ * body should use the HTTP/1.1 chunked transfer-coding. In English,
+ *
+ * IF we have not marked this connection as errored;
+ * and the client isn't expecting 100-continue (PR47087 - more
+ * input here could be the client continuing when we're
+ * closing the request).
+ * and the response body has a defined length due to the status code
+ * being 304 or 204, the request method being HEAD, already
+ * having defined Content-Length or Transfer-Encoding: chunked, or
+ * the request version being HTTP/1.1 and thus capable of being set
+ * as chunked [we know the (r->chunked = 1) side-effect is ugly];
+ * and the server configuration enables keep-alive;
+ * and the server configuration has a reasonable inter-request timeout;
+ * and there is no maximum # requests or the max hasn't been reached;
+ * and the response status does not require a close;
+ * and the response generator has not already indicated close;
+ * and the client did not request non-persistence (Connection: close);
+ * and we haven't been configured to ignore the buggy twit
+ * or they're a buggy twit coming through a HTTP/1.1 proxy
+ * and the client is requesting an HTTP/1.0-style keep-alive
+ * or the client claims to be HTTP/1.1 compliant (perhaps a proxy);
+ * and this MPM process is not already exiting
+ * THEN we can be persistent, which requires more headers be output.
+ *
+ * Note that the condition evaluation order is extremely important.
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && !r->expecting_100
+ && (r->header_only
+ || AP_STATUS_IS_HEADER_ONLY(r->status)
+ || apr_table_get(r->headers_out, "Content-Length")
+ || ap_find_last_token(r->pool,
+ apr_table_get(r->headers_out,
+ "Transfer-Encoding"),
+ "chunked")
+ || ((r->proto_num >= HTTP_VERSION(1,1))
+ && (r->chunked = 1))) /* THIS CODE IS CORRECT, see above. */
+ && r->server->keep_alive
+ && (r->server->keep_alive_timeout > 0)
+ && ((r->server->keep_alive_max == 0)
+ || (left > 0))
+ && !ap_status_drops_connection(r->status)
+ && !wimpy
+ && !ap_find_token(r->pool, conn, "close")
+ && (!apr_table_get(r->subprocess_env, "nokeepalive")
+ || apr_table_get(r->headers_in, "Via"))
+ && ((ka_sent = ap_find_token(r->pool, conn, "keep-alive"))
+ || (r->proto_num >= HTTP_VERSION(1,1)))
+ && is_mpm_running()) {
+
+ r->connection->keepalive = AP_CONN_KEEPALIVE;
+ r->connection->keepalives++;
+
+ /* If they sent a Keep-Alive token, send one back */
+ if (ka_sent) {
+ if (r->server->keep_alive_max) {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d, max=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout),
+ left));
+ }
+ else {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout)));
+ }
+ apr_table_mergen(r->headers_out, "Connection", "Keep-Alive");
+ }
+
+ return 1;
+ }
+
+ /* Otherwise, we need to indicate that we will be closing this
+ * connection immediately after the current response.
+ *
+ * We only really need to send "close" to HTTP/1.1 clients, but we
+ * always send it anyway, because a broken proxy may identify itself
+ * as HTTP/1.0, but pass our request along with our HTTP/1.1 tag
+ * to a HTTP/1.1 client. Better safe than sorry.
+ */
+ if (!wimpy) {
+ apr_table_mergen(r->headers_out, "Connection", "close");
+ }
+
+ /*
+ * If we had previously been a keepalive connection and this
+ * is the last one, then bump up the number of keepalives
+ * we've had
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && r->server->keep_alive_max
+ && !left) {
+ r->connection->keepalives++;
+ }
+ r->connection->keepalive = AP_CONN_CLOSE;
+
+ return 0;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_match, *etag;
+
+ /* A server MUST use the strong comparison function (see section 13.3.3)
+ * to compare the entity tags in If-Match.
+ */
+ if ((if_match = apr_table_get(r->headers_in, "If-Match")) != NULL) {
+ if (if_match[0] == '*'
+ || ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_match, etag))) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_unmodified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_unmodified;
+
+ if_unmodified = apr_table_get(r->headers_in, "If-Unmodified-Since");
+ if (if_unmodified) {
+ apr_int64_t mtime, reqtime;
+
+ apr_time_t ius = apr_time_sec(apr_date_parse_http(if_unmodified));
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ if ((ius != APR_DATE_BAD) && (mtime > ius)) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_none_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_nonematch, *etag;
+
+ if_nonematch = apr_table_get(r->headers_in, "If-None-Match");
+ if (if_nonematch != NULL) {
+
+ if (if_nonematch[0] == '*') {
+ return AP_CONDITION_STRONG;
+ }
+
+ /* See section 13.3.3 for rules on how to determine if two entities tags
+ * match. The weak comparison function can only be used with GET or HEAD
+ * requests.
+ */
+ if (r->method_number == M_GET) {
+ if ((etag = apr_table_get(headers, "ETag")) != NULL) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ if (ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ if (ap_find_etag_weak(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ }
+ }
+
+ else if ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ return AP_CONDITION_NOMATCH;
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_modified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_modified_since;
+
+ if ((if_modified_since = apr_table_get(r->headers_in, "If-Modified-Since"))
+ != NULL) {
+ apr_int64_t mtime;
+ apr_int64_t ims, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ ims = apr_time_sec(apr_date_parse_http(if_modified_since));
+
+ if (ims >= mtime && ims <= reqtime) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_range(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_range, *etag;
+
+ if ((if_range = apr_table_get(r->headers_in, "If-Range"))
+ && apr_table_get(r->headers_in, "Range")) {
+ if (if_range[0] == '"') {
+
+ if ((etag = apr_table_get(headers, "ETag"))
+ && !strcmp(if_range, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+
+ }
+ else {
+ apr_int64_t mtime;
+ apr_int64_t rtime, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ rtime = apr_time_sec(apr_date_parse_http(if_range));
+
+ if (rtime == mtime) {
+ if (reqtime < mtime + 60) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(int) ap_meets_conditions(request_rec *r)
+{
+ int not_modified = -1; /* unset by default */
+ ap_condition_e cond;
+
+ /* Check for conditional requests --- note that we only want to do
+ * this if we are successful so far and we are not processing a
+ * subrequest or an ErrorDocument.
+ *
+ * The order of the checks is important, since ETag checks are supposed
+ * to be more accurate than checks relative to the modification time.
+ * However, not all documents are guaranteed to *have* ETags, and some
+ * might have Last-Modified values w/o ETags, so this gets a little
+ * complicated.
+ */
+
+ if (!ap_is_HTTP_SUCCESS(r->status) || r->no_local_copy) {
+ return OK;
+ }
+
+ /* If an If-Match request-header field was given
+ * AND the field value is not "*" (meaning match anything)
+ * AND if our strong ETag does not match any entity tag in that field,
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+
+ /* Else if a valid If-Unmodified-Since request-header field was given
+ * AND the requested resource has been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_unmodified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+
+ /* If an If-None-Match request-header field was given
+ * AND the field value is "*" (meaning match anything)
+ * OR our ETag matches any of the entity tags in that field, fail.
+ *
+ * If the request method was GET or HEAD, failure means the server
+ * SHOULD respond with a 304 (Not Modified) response.
+ * For all other request methods, failure means the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ *
+ * GET or HEAD allow weak etag comparison, all other methods require
+ * strong comparison. We can only use weak if it's not a range request.
+ */
+ cond = ap_condition_if_none_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ else {
+ return HTTP_PRECONDITION_FAILED;
+ }
+ }
+
+ /* If a valid If-Modified-Since request-header field was given
+ * AND it is a GET or HEAD request
+ * AND the requested resource has not been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 304 (Not Modified).
+ * A date later than the server's current request time is invalid.
+ */
+ cond = ap_condition_if_modified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ }
+
+ /* If an If-Range and an Range header is present, we must return
+ * 200 OK. The byterange filter will convert it to a range response.
+ */
+ cond = ap_condition_if_range(r, r->headers_out);
+ if (cond > AP_CONDITION_NONE) {
+ return OK;
+ }
+
+ if (not_modified == 1) {
+ return HTTP_NOT_MODIFIED;
+ }
+
+ return OK;
+}
+
+/**
+ * Singleton registry of additional methods. This maps new method names
+ * such as "MYGET" to methnums, which are int offsets into bitmasks.
+ *
+ * This follows the same technique as standard M_GET, M_POST, etc. These
+ * are dynamically assigned when modules are loaded and <Limit GET MYGET>
+ * directives are processed.
+ */
+static apr_hash_t *methods_registry = NULL;
+static int cur_method_number = METHOD_NUMBER_FIRST;
+
+/* internal function to register one method/number pair */
+static void register_one_method(apr_pool_t *p, const char *methname,
+ int methnum)
+{
+ int *pnum = apr_palloc(p, sizeof(*pnum));
+
+ *pnum = methnum;
+ apr_hash_set(methods_registry, methname, APR_HASH_KEY_STRING, pnum);
+}
+
+/* This internal function is used to clear the method registry
+ * and reset the cur_method_number counter.
+ */
+static apr_status_t ap_method_registry_destroy(void *notused)
+{
+ methods_registry = NULL;
+ cur_method_number = METHOD_NUMBER_FIRST;
+ return APR_SUCCESS;
+}
+
+AP_DECLARE(void) ap_method_registry_init(apr_pool_t *p)
+{
+ methods_registry = apr_hash_make(p);
+ apr_pool_cleanup_register(p, NULL,
+ ap_method_registry_destroy,
+ apr_pool_cleanup_null);
+
+ /* put all the standard methods into the registry hash to ease the
+ * mapping operations between name and number
+ * HEAD is a special-instance of the GET method and shares the same ID
+ */
+ register_one_method(p, "GET", M_GET);
+ register_one_method(p, "HEAD", M_GET);
+ register_one_method(p, "PUT", M_PUT);
+ register_one_method(p, "POST", M_POST);
+ register_one_method(p, "DELETE", M_DELETE);
+ register_one_method(p, "CONNECT", M_CONNECT);
+ register_one_method(p, "OPTIONS", M_OPTIONS);
+ register_one_method(p, "TRACE", M_TRACE);
+ register_one_method(p, "PATCH", M_PATCH);
+ register_one_method(p, "PROPFIND", M_PROPFIND);
+ register_one_method(p, "PROPPATCH", M_PROPPATCH);
+ register_one_method(p, "MKCOL", M_MKCOL);
+ register_one_method(p, "COPY", M_COPY);
+ register_one_method(p, "MOVE", M_MOVE);
+ register_one_method(p, "LOCK", M_LOCK);
+ register_one_method(p, "UNLOCK", M_UNLOCK);
+ register_one_method(p, "VERSION-CONTROL", M_VERSION_CONTROL);
+ register_one_method(p, "CHECKOUT", M_CHECKOUT);
+ register_one_method(p, "UNCHECKOUT", M_UNCHECKOUT);
+ register_one_method(p, "CHECKIN", M_CHECKIN);
+ register_one_method(p, "UPDATE", M_UPDATE);
+ register_one_method(p, "LABEL", M_LABEL);
+ register_one_method(p, "REPORT", M_REPORT);
+ register_one_method(p, "MKWORKSPACE", M_MKWORKSPACE);
+ register_one_method(p, "MKACTIVITY", M_MKACTIVITY);
+ register_one_method(p, "BASELINE-CONTROL", M_BASELINE_CONTROL);
+ register_one_method(p, "MERGE", M_MERGE);
+}
+
+AP_DECLARE(int) ap_method_register(apr_pool_t *p, const char *methname)
+{
+ int *methnum;
+
+ if (methods_registry == NULL) {
+ ap_method_registry_init(p);
+ }
+
+ if (methname == NULL) {
+ return M_INVALID;
+ }
+
+ /* Check if the method was previously registered. If it was
+ * return the associated method number.
+ */
+ methnum = (int *)apr_hash_get(methods_registry, methname,
+ APR_HASH_KEY_STRING);
+ if (methnum != NULL)
+ return *methnum;
+
+ if (cur_method_number > METHOD_NUMBER_LAST) {
+ /* The method registry has run out of dynamically
+ * assignable method numbers. Log this and return M_INVALID.
+ */
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p, APLOGNO(01610)
+ "Maximum new request methods %d reached while "
+ "registering method %s.",
+ METHOD_NUMBER_LAST, methname);
+ return M_INVALID;
+ }
+
+ register_one_method(p, methname, cur_method_number);
+ return cur_method_number++;
+}
+
+#define UNKNOWN_METHOD (-1)
+
+static int lookup_builtin_method(const char *method, apr_size_t len)
+{
+ /* Note: the following code was generated by the "shilka" tool from
+ the "cocom" parsing/compilation toolkit. It is an optimized lookup
+ based on analysis of the input keywords. Postprocessing was done
+ on the shilka output, but the basic structure and analysis is
+ from there. Should new HTTP methods be added, then manual insertion
+ into this code is fine, or simply re-running the shilka tool on
+ the appropriate input. */
+
+ /* Note: it is also quite reasonable to just use our method_registry,
+ but I'm assuming (probably incorrectly) we want more speed here
+ (based on the optimizations the previous code was doing). */
+
+ switch (len)
+ {
+ case 3:
+ switch (method[0])
+ {
+ case 'P':
+ return (method[1] == 'U'
+ && method[2] == 'T'
+ ? M_PUT : UNKNOWN_METHOD);
+ case 'G':
+ return (method[1] == 'E'
+ && method[2] == 'T'
+ ? M_GET : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 4:
+ switch (method[0])
+ {
+ case 'H':
+ return (method[1] == 'E'
+ && method[2] == 'A'
+ && method[3] == 'D'
+ ? M_GET : UNKNOWN_METHOD);
+ case 'P':
+ return (method[1] == 'O'
+ && method[2] == 'S'
+ && method[3] == 'T'
+ ? M_POST : UNKNOWN_METHOD);
+ case 'M':
+ return (method[1] == 'O'
+ && method[2] == 'V'
+ && method[3] == 'E'
+ ? M_MOVE : UNKNOWN_METHOD);
+ case 'L':
+ return (method[1] == 'O'
+ && method[2] == 'C'
+ && method[3] == 'K'
+ ? M_LOCK : UNKNOWN_METHOD);
+ case 'C':
+ return (method[1] == 'O'
+ && method[2] == 'P'
+ && method[3] == 'Y'
+ ? M_COPY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 5:
+ switch (method[2])
+ {
+ case 'T':
+ return (memcmp(method, "PATCH", 5) == 0
+ ? M_PATCH : UNKNOWN_METHOD);
+ case 'R':
+ return (memcmp(method, "MERGE", 5) == 0
+ ? M_MERGE : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "MKCOL", 5) == 0
+ ? M_MKCOL : UNKNOWN_METHOD);
+ case 'B':
+ return (memcmp(method, "LABEL", 5) == 0
+ ? M_LABEL : UNKNOWN_METHOD);
+ case 'A':
+ return (memcmp(method, "TRACE", 5) == 0
+ ? M_TRACE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 6:
+ switch (method[0])
+ {
+ case 'U':
+ switch (method[5])
+ {
+ case 'K':
+ return (memcmp(method, "UNLOCK", 6) == 0
+ ? M_UNLOCK : UNKNOWN_METHOD);
+ case 'E':
+ return (memcmp(method, "UPDATE", 6) == 0
+ ? M_UPDATE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+ case 'R':
+ return (memcmp(method, "REPORT", 6) == 0
+ ? M_REPORT : UNKNOWN_METHOD);
+ case 'D':
+ return (memcmp(method, "DELETE", 6) == 0
+ ? M_DELETE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 7:
+ switch (method[1])
+ {
+ case 'P':
+ return (memcmp(method, "OPTIONS", 7) == 0
+ ? M_OPTIONS : UNKNOWN_METHOD);
+ case 'O':
+ return (memcmp(method, "CONNECT", 7) == 0
+ ? M_CONNECT : UNKNOWN_METHOD);
+ case 'H':
+ return (memcmp(method, "CHECKIN", 7) == 0
+ ? M_CHECKIN : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 8:
+ switch (method[0])
+ {
+ case 'P':
+ return (memcmp(method, "PROPFIND", 8) == 0
+ ? M_PROPFIND : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "CHECKOUT", 8) == 0
+ ? M_CHECKOUT : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 9:
+ return (memcmp(method, "PROPPATCH", 9) == 0
+ ? M_PROPPATCH : UNKNOWN_METHOD);
+
+ case 10:
+ switch (method[0])
+ {
+ case 'U':
+ return (memcmp(method, "UNCHECKOUT", 10) == 0
+ ? M_UNCHECKOUT : UNKNOWN_METHOD);
+ case 'M':
+ return (memcmp(method, "MKACTIVITY", 10) == 0
+ ? M_MKACTIVITY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 11:
+ return (memcmp(method, "MKWORKSPACE", 11) == 0
+ ? M_MKWORKSPACE : UNKNOWN_METHOD);
+
+ case 15:
+ return (memcmp(method, "VERSION-CONTROL", 15) == 0
+ ? M_VERSION_CONTROL : UNKNOWN_METHOD);
+
+ case 16:
+ return (memcmp(method, "BASELINE-CONTROL", 16) == 0
+ ? M_BASELINE_CONTROL : UNKNOWN_METHOD);
+
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ /* NOTREACHED */
+}
+
+/* Get the method number associated with the given string, assumed to
+ * contain an HTTP method. Returns M_INVALID if not recognized.
+ *
+ * This is the first step toward placing method names in a configurable
+ * list. Hopefully it (and other routines) can eventually be moved to
+ * something like a mod_http_methods.c, complete with config stuff.
+ */
+AP_DECLARE(int) ap_method_number_of(const char *method)
+{
+ int len = strlen(method);
+ int which = lookup_builtin_method(method, len);
+
+ if (which != UNKNOWN_METHOD)
+ return which;
+
+ /* check if the method has been dynamically registered */
+ if (methods_registry != NULL) {
+ int *methnum = apr_hash_get(methods_registry, method, len);
+
+ if (methnum != NULL) {
+ return *methnum;
+ }
+ }
+
+ return M_INVALID;
+}
+
+/*
+ * Turn a known method number into a name.
+ */
+AP_DECLARE(const char *) ap_method_name_of(apr_pool_t *p, int methnum)
+{
+ apr_hash_index_t *hi = apr_hash_first(p, methods_registry);
+
+ /* scan through the hash table, looking for a value that matches
+ the provided method number. */
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if (*(int *)val == methnum)
+ return key;
+ }
+
+ /* it wasn't found in the hash */
+ return NULL;
+}
+
+/* The index is found by its offset from the x00 code of each level.
+ * Although this is fast, it will need to be replaced if some nutcase
+ * decides to define a high-numbered code before the lower numbers.
+ * If that sad event occurs, replace the code below with a linear search
+ * from status_lines[shortcut[i]] to status_lines[shortcut[i+1]-1];
+ * or use NULL to fill the gaps.
+ */
+AP_DECLARE(int) ap_index_of_response(int status)
+{
+ static int shortcut[6] = {0, LEVEL_200, LEVEL_300, LEVEL_400,
+ LEVEL_500, RESPONSE_CODES};
+ int i, pos;
+
+ if (status < 100) { /* Below 100 is illegal for HTTP status */
+ return LEVEL_500;
+ }
+
+ for (i = 0; i < 5; i++) {
+ status -= 100;
+ if (status < 100) {
+ pos = (status + shortcut[i]);
+ if (pos < shortcut[i + 1] && status_lines[pos] != NULL) {
+ return pos;
+ }
+ else {
+ return LEVEL_500; /* status unknown (falls in gap) */
+ }
+ }
+ }
+ return LEVEL_500; /* 600 or above is also illegal */
+}
+
+AP_DECLARE(const char *) ap_get_status_line(int status)
+{
+ return status_lines[ap_index_of_response(status)];
+}
+
+/* Build the Allow field-value from the request handler method mask.
+ */
+static char *make_allow(request_rec *r)
+{
+ apr_int64_t mask;
+ apr_array_header_t *allow = apr_array_make(r->pool, 10, sizeof(char *));
+ apr_hash_index_t *hi = apr_hash_first(r->pool, methods_registry);
+ /* For TRACE below */
+ core_server_config *conf =
+ ap_get_core_module_config(r->server->module_config);
+
+ mask = r->allowed_methods->method_mask;
+
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if ((mask & (AP_METHOD_BIT << *(int *)val)) != 0) {
+ APR_ARRAY_PUSH(allow, const char *) = key;
+ }
+ }
+
+ /* TRACE is tested on a per-server basis */
+ if (conf->trace_enable != AP_TRACE_DISABLE)
+ *(const char **)apr_array_push(allow) = "TRACE";
+
+ /* ### this is rather annoying. we should enforce registration of
+ ### these methods */
+ if ((mask & (AP_METHOD_BIT << M_INVALID))
+ && (r->allowed_methods->method_list != NULL)
+ && (r->allowed_methods->method_list->nelts != 0)) {
+ apr_array_cat(allow, r->allowed_methods->method_list);
+ }
+
+ return apr_array_pstrcat(r->pool, allow, ',');
+}
+
+AP_DECLARE(int) ap_send_http_options(request_rec *r)
+{
+ if (r->assbackwards) {
+ return DECLINED;
+ }
+
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+
+ /* the request finalization will send an EOS, which will flush all
+ * the headers out (including the Allow header)
+ */
+
+ return OK;
+}
+
+AP_DECLARE(void) ap_set_content_type(request_rec *r, const char *ct)
+{
+ if (!ct) {
+ r->content_type = NULL;
+ }
+ else if (!r->content_type || strcmp(r->content_type, ct)) {
+ r->content_type = ct;
+ }
+}
+
+AP_DECLARE(void) ap_set_accept_ranges(request_rec *r)
+{
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ apr_table_setn(r->headers_out, "Accept-Ranges",
+ (d->max_ranges == AP_MAXRANGES_NORANGES) ? "none"
+ : "bytes");
+}
+static const char *add_optional_notes(request_rec *r,
+ const char *prefix,
+ const char *key,
+ const char *suffix)
+{
+ const char *notes, *result;
+
+ if ((notes = apr_table_get(r->notes, key)) == NULL) {
+ result = apr_pstrcat(r->pool, prefix, suffix, NULL);
+ }
+ else {
+ result = apr_pstrcat(r->pool, prefix, notes, suffix, NULL);
+ }
+
+ return result;
+}
+
+/* construct and return the default error message for a given
+ * HTTP defined error code
+ */
+static const char *get_canned_error_string(int status,
+ request_rec *r,
+ const char *location)
+{
+ apr_pool_t *p = r->pool;
+ const char *error_notes, *h1, *s1;
+
+ switch (status) {
+ case HTTP_MOVED_PERMANENTLY:
+ case HTTP_MOVED_TEMPORARILY:
+ case HTTP_TEMPORARY_REDIRECT:
+ case HTTP_PERMANENT_REDIRECT:
+ return(apr_pstrcat(p,
+ "<p>The document has moved <a href=\"",
+ ap_escape_html(r->pool, location),
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_SEE_OTHER:
+ return(apr_pstrcat(p,
+ "<p>The answer to your request is located "
+ "<a href=\"",
+ ap_escape_html(r->pool, location),
+ "\">here</a>.</p>\n",
+ NULL));
+ case HTTP_USE_PROXY:
+ return(apr_pstrcat(p,
+ "<p>This resource is only accessible "
+ "through the proxy\n",
+ ap_escape_html(r->pool, location),
+ "<br />\nYou will need to configure "
+ "your client to use that proxy.</p>\n",
+ NULL));
+ case HTTP_PROXY_AUTHENTICATION_REQUIRED:
+ case HTTP_UNAUTHORIZED:
+ return("<p>This server could not verify that you\n"
+ "are authorized to access the document\n"
+ "requested. Either you supplied the wrong\n"
+ "credentials (e.g., bad password), or your\n"
+ "browser doesn't understand how to supply\n"
+ "the credentials required.</p>\n");
+ case HTTP_BAD_REQUEST:
+ return(add_optional_notes(r,
+ "<p>Your browser sent a request that "
+ "this server could not understand.<br />\n",
+ "error-notes",
+ "</p>\n"));
+ case HTTP_FORBIDDEN:
+ s1 = apr_pstrcat(p,
+ "<p>You don't have permission to access ",
+ ap_escape_html(r->pool, r->uri),
+ "\non this server.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_NOT_FOUND:
+ return(apr_pstrcat(p,
+ "<p>The requested URL ",
+ ap_escape_html(r->pool, r->uri),
+ " was not found on this server.</p>\n",
+ NULL));
+ case HTTP_METHOD_NOT_ALLOWED:
+ return(apr_pstrcat(p,
+ "<p>The requested method ",
+ ap_escape_html(r->pool, r->method),
+ " is not allowed for the URL ",
+ ap_escape_html(r->pool, r->uri),
+ ".</p>\n",
+ NULL));
+ case HTTP_NOT_ACCEPTABLE:
+ s1 = apr_pstrcat(p,
+ "<p>An appropriate representation of the "
+ "requested resource ",
+ ap_escape_html(r->pool, r->uri),
+ " could not be found on this server.</p>\n",
+ NULL);
+ return(add_optional_notes(r, s1, "variant-list", ""));
+ case HTTP_MULTIPLE_CHOICES:
+ return(add_optional_notes(r, "", "variant-list", ""));
+ case HTTP_LENGTH_REQUIRED:
+ s1 = apr_pstrcat(p,
+ "<p>A request of the requested method ",
+ ap_escape_html(r->pool, r->method),
+ " requires a valid Content-length.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_PRECONDITION_FAILED:
+ return(apr_pstrcat(p,
+ "<p>The precondition on the request "
+ "for the URL ",
+ ap_escape_html(r->pool, r->uri),
+ " evaluated to false.</p>\n",
+ NULL));
+ case HTTP_NOT_IMPLEMENTED:
+ s1 = apr_pstrcat(p,
+ "<p>",
+ ap_escape_html(r->pool, r->method), " to ",
+ ap_escape_html(r->pool, r->uri),
+ " not supported.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_BAD_GATEWAY:
+ s1 = "<p>The proxy server received an invalid" CRLF
+ "response from an upstream server.<br />" CRLF;
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_VARIANT_ALSO_VARIES:
+ return(apr_pstrcat(p,
+ "<p>A variant for the requested "
+ "resource\n<pre>\n",
+ ap_escape_html(r->pool, r->uri),
+ "\n</pre>\nis itself a negotiable resource. "
+ "This indicates a configuration error.</p>\n",
+ NULL));
+ case HTTP_REQUEST_TIME_OUT:
+ return("<p>Server timeout waiting for the HTTP request from the client.</p>\n");
+ case HTTP_GONE:
+ return(apr_pstrcat(p,
+ "<p>The requested resource<br />",
+ ap_escape_html(r->pool, r->uri),
+ "<br />\nis no longer available on this server "
+ "and there is no forwarding address.\n"
+ "Please remove all references to this "
+ "resource.</p>\n",
+ NULL));
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return(apr_pstrcat(p,
+ "The requested resource<br />",
+ ap_escape_html(r->pool, r->uri), "<br />\n",
+ "does not allow request data with ",
+ ap_escape_html(r->pool, r->method),
+ " requests, or the amount of data provided in\n"
+ "the request exceeds the capacity limit.\n",
+ NULL));
+ case HTTP_REQUEST_URI_TOO_LARGE:
+ s1 = "<p>The requested URL's length exceeds the capacity\n"
+ "limit for this server.<br />\n";
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ case HTTP_UNSUPPORTED_MEDIA_TYPE:
+ return("<p>The supplied request data is not in a format\n"
+ "acceptable for processing by this resource.</p>\n");
+ case HTTP_RANGE_NOT_SATISFIABLE:
+ return("<p>None of the range-specifier values in the Range\n"
+ "request-header field overlap the current extent\n"
+ "of the selected resource.</p>\n");
+ case HTTP_EXPECTATION_FAILED:
+ s1 = apr_table_get(r->headers_in, "Expect");
+ if (s1)
+ s1 = apr_pstrcat(p,
+ "<p>The expectation given in the Expect request-header\n"
+ "field could not be met by this server.\n"
+ "The client sent<pre>\n Expect: ",
+ ap_escape_html(r->pool, s1), "\n</pre>\n",
+ NULL);
+ else
+ s1 = "<p>No expectation was seen, the Expect request-header \n"
+ "field was not presented by the client.\n";
+ return add_optional_notes(r, s1, "error-notes", "</p>"
+ "<p>Only the 100-continue expectation is supported.</p>\n");
+ case HTTP_UNPROCESSABLE_ENTITY:
+ return("<p>The server understands the media type of the\n"
+ "request entity, but was unable to process the\n"
+ "contained instructions.</p>\n");
+ case HTTP_LOCKED:
+ return("<p>The requested resource is currently locked.\n"
+ "The lock must be released or proper identification\n"
+ "given before the method can be applied.</p>\n");
+ case HTTP_FAILED_DEPENDENCY:
+ return("<p>The method could not be performed on the resource\n"
+ "because the requested action depended on another\n"
+ "action and that other action failed.</p>\n");
+ case HTTP_UPGRADE_REQUIRED:
+ return("<p>The requested resource can only be retrieved\n"
+ "using SSL. The server is willing to upgrade the current\n"
+ "connection to SSL, but your client doesn't support it.\n"
+ "Either upgrade your client, or try requesting the page\n"
+ "using https://\n");
+ case HTTP_PRECONDITION_REQUIRED:
+ return("<p>The request is required to be conditional.</p>\n");
+ case HTTP_TOO_MANY_REQUESTS:
+ return("<p>The user has sent too many requests\n"
+ "in a given amount of time.</p>\n");
+ case HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE:
+ return("<p>The server refused this request because\n"
+ "the request header fields are too large.</p>\n");
+ case HTTP_INSUFFICIENT_STORAGE:
+ return("<p>The method could not be performed on the resource\n"
+ "because the server is unable to store the\n"
+ "representation needed to successfully complete the\n"
+ "request. There is insufficient free space left in\n"
+ "your storage allocation.</p>\n");
+ case HTTP_SERVICE_UNAVAILABLE:
+ return("<p>The server is temporarily unable to service your\n"
+ "request due to maintenance downtime or capacity\n"
+ "problems. Please try again later.</p>\n");
+ case HTTP_GATEWAY_TIME_OUT:
+ return("<p>The gateway did not receive a timely response\n"
+ "from the upstream server or application.</p>\n");
+ case HTTP_LOOP_DETECTED:
+ return("<p>The server terminated an operation because\n"
+ "it encountered an infinite loop.</p>\n");
+ case HTTP_NOT_EXTENDED:
+ return("<p>A mandatory extension policy in the request is not\n"
+ "accepted by the server for this resource.</p>\n");
+ case HTTP_NETWORK_AUTHENTICATION_REQUIRED:
+ return("<p>The client needs to authenticate to gain\n"
+ "network access.</p>\n");
+ case HTTP_MISDIRECTED_REQUEST:
+ return("<p>The client needs a new connection for this\n"
+ "request as the requested host name does not match\n"
+ "the Server Name Indication (SNI) in use for this\n"
+ "connection.</p>\n");
+ case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
+ s1 = apr_pstrcat(p,
+ "<p>Access to ", ap_escape_html(r->pool, r->uri),
+ "\nhas been denied for legal reasons.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
+ default: /* HTTP_INTERNAL_SERVER_ERROR */
+ /*
+ * This comparison to expose error-notes could be modified to
+ * use a configuration directive and export based on that
+ * directive. For now "*" is used to designate an error-notes
+ * that is totally safe for any user to see (ie lacks paths,
+ * database passwords, etc.)
+ */
+ if (((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL)
+ && (h1 = apr_table_get(r->notes, "verbose-error-to")) != NULL
+ && (strcmp(h1, "*") == 0)) {
+ return(apr_pstrcat(p, error_notes, "<p />\n", NULL));
+ }
+ else {
+ return(apr_pstrcat(p,
+ "<p>The server encountered an internal "
+ "error or\n"
+ "misconfiguration and was unable to complete\n"
+ "your request.</p>\n"
+ "<p>Please contact the server "
+ "administrator at \n ",
+ ap_escape_html(r->pool,
+ r->server->server_admin),
+ " to inform them of the time this "
+ "error occurred,\n"
+ " and the actions you performed just before "
+ "this error.</p>\n"
+ "<p>More information about this error "
+ "may be available\n"
+ "in the server error log.</p>\n",
+ NULL));
+ }
+ /*
+ * It would be nice to give the user the information they need to
+ * fix the problem directly since many users don't have access to
+ * the error_log (think University sites) even though they can easily
+ * get this error by misconfiguring an htaccess file. However, the
+ * e error notes tend to include the real file pathname in this case,
+ * which some people consider to be a breach of privacy. Until we
+ * can figure out a way to remove the pathname, leave this commented.
+ *
+ * if ((error_notes = apr_table_get(r->notes,
+ * "error-notes")) != NULL) {
+ * return(apr_pstrcat(p, error_notes, "<p />\n", NULL);
+ * }
+ * else {
+ * return "";
+ * }
+ */
+ }
+}
+
+/* We should have named this send_canned_response, since it is used for any
+ * response that can be generated by the server from the request record.
+ * This includes all 204 (no content), 3xx (redirect), 4xx (client error),
+ * and 5xx (server error) messages that have not been redirected to another
+ * handler via the ErrorDocument feature.
+ */
+AP_DECLARE(void) ap_send_error_response(request_rec *r, int recursive_error)
+{
+ int status = r->status;
+ int idx = ap_index_of_response(status);
+ char *custom_response;
+ const char *location = apr_table_get(r->headers_out, "Location");
+
+ /* At this point, we are starting the response over, so we have to reset
+ * this value.
+ */
+ r->eos_sent = 0;
+
+ /* and we need to get rid of any RESOURCE filters that might be lurking
+ * around, thinking they are in the middle of the original request
+ */
+
+ r->output_filters = r->proto_output_filters;
+
+ ap_run_insert_error_filter(r);
+
+ /* We need to special-case the handling of 204 and 304 responses,
+ * since they have specific HTTP requirements and do not include a
+ * message body. Note that being assbackwards here is not an option.
+ */
+ if (AP_STATUS_IS_HEADER_ONLY(status)) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ /*
+ * It's possible that the Location field might be in r->err_headers_out
+ * instead of r->headers_out; use the latter if possible, else the
+ * former.
+ */
+ if (location == NULL) {
+ location = apr_table_get(r->err_headers_out, "Location");
+ }
+
+ if (!r->assbackwards) {
+ apr_table_t *tmp = r->headers_out;
+
+ /* For all HTTP/1.x responses for which we generate the message,
+ * we need to avoid inheriting the "normal status" header fields
+ * that may have been set by the request handler before the
+ * error or redirect, except for Location on external redirects.
+ */
+ r->headers_out = r->err_headers_out;
+ r->err_headers_out = tmp;
+ apr_table_clear(r->err_headers_out);
+
+ if (ap_is_HTTP_REDIRECT(status) || (status == HTTP_CREATED)) {
+ if ((location != NULL) && *location) {
+ apr_table_setn(r->headers_out, "Location", location);
+ }
+ else {
+ location = ""; /* avoids coredump when printing, below */
+ }
+ }
+
+ r->content_languages = NULL;
+ r->content_encoding = NULL;
+ r->clength = 0;
+
+ if (apr_table_get(r->subprocess_env,
+ "suppress-error-charset") != NULL) {
+ core_request_config *request_conf =
+ ap_get_core_module_config(r->request_config);
+ request_conf->suppress_charset = 1; /* avoid adding default
+ * charset later
+ */
+ ap_set_content_type(r, "text/html");
+ }
+ else {
+ ap_set_content_type(r, "text/html; charset=iso-8859-1");
+ }
+
+ if ((status == HTTP_METHOD_NOT_ALLOWED)
+ || (status == HTTP_NOT_IMPLEMENTED)) {
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+ }
+
+ if (r->header_only) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+
+ if ((custom_response = ap_response_code_string(r, idx))) {
+ /*
+ * We have a custom response output. This should only be
+ * a text-string to write back. But if the ErrorDocument
+ * was a local redirect and the requested resource failed
+ * for any reason, the custom_response will still hold the
+ * redirect URL. We don't really want to output this URL
+ * as a text message, so first check the custom response
+ * string to ensure that it is a text-string (using the
+ * same test used in ap_die(), i.e. does it start with a ").
+ *
+ * If it's not a text string, we've got a recursive error or
+ * an external redirect. If it's a recursive error, ap_die passes
+ * us the second error code so we can write both, and has already
+ * backed up to the original error. If it's an external redirect,
+ * it hasn't happened yet; we may never know if it fails.
+ */
+ if (custom_response[0] == '\"') {
+ ap_rputs(custom_response + 1, r);
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+ {
+ const char *title = status_lines[idx];
+ const char *h1;
+
+ /* Accept a status_line set by a module, but only if it begins
+ * with the correct 3 digit status code
+ */
+ if (r->status_line) {
+ char *end;
+ int len = strlen(r->status_line);
+ if (len >= 3
+ && apr_strtoi64(r->status_line, &end, 10) == r->status
+ && (end - 3) == r->status_line
+ && (len < 4 || apr_isspace(r->status_line[3]))
+ && (len < 5 || apr_isalnum(r->status_line[4]))) {
+ /* Since we passed the above check, we know that length three
+ * is equivalent to only a 3 digit numeric http status.
+ * RFC2616 mandates a trailing space, let's add it.
+ * If we have an empty reason phrase, we also add "Unknown Reason".
+ */
+ if (len == 3) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, " Unknown Reason", NULL);
+ } else if (len == 4) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, "Unknown Reason", NULL);
+ }
+ title = r->status_line;
+ }
+ }
+
+ /* folks decided they didn't want the error code in the H1 text */
+ h1 = &title[4];
+
+ /* can't count on a charset filter being in place here,
+ * so do ebcdic->ascii translation explicitly (if needed)
+ */
+
+ ap_rvputs_proto_in_ascii(r,
+ DOCTYPE_HTML_2_0
+ "<html><head>\n<title>", title,
+ "</title>\n</head><body>\n<h1>", h1, "</h1>\n",
+ NULL);
+
+ ap_rvputs_proto_in_ascii(r,
+ get_canned_error_string(status, r, location),
+ NULL);
+
+ if (recursive_error) {
+ ap_rvputs_proto_in_ascii(r, "<p>Additionally, a ",
+ status_lines[ap_index_of_response(recursive_error)],
+ "\nerror was encountered while trying to use an "
+ "ErrorDocument to handle the request.</p>\n", NULL);
+ }
+ ap_rvputs_proto_in_ascii(r, ap_psignature("<hr>\n", r), NULL);
+ ap_rvputs_proto_in_ascii(r, "</body></html>\n", NULL);
+ }
+ ap_finalize_request_protocol(r);
+}
+
+/*
+ * Create a new method list with the specified number of preallocated
+ * extension slots.
+ */
+AP_DECLARE(ap_method_list_t *) ap_make_method_list(apr_pool_t *p, int nelts)
+{
+ ap_method_list_t *ml;
+
+ ml = (ap_method_list_t *) apr_palloc(p, sizeof(ap_method_list_t));
+ ml->method_mask = 0;
+ ml->method_list = apr_array_make(p, nelts, sizeof(char *));
+ return ml;
+}
+
+/*
+ * Make a copy of a method list (primarily for subrequests that may
+ * subsequently change it; don't want them changing the parent's, too!).
+ */
+AP_DECLARE(void) ap_copy_method_list(ap_method_list_t *dest,
+ ap_method_list_t *src)
+{
+ int i;
+ char **imethods;
+ char **omethods;
+
+ dest->method_mask = src->method_mask;
+ imethods = (char **) src->method_list->elts;
+ for (i = 0; i < src->method_list->nelts; ++i) {
+ omethods = (char **) apr_array_push(dest->method_list);
+ *omethods = apr_pstrdup(dest->method_list->pool, imethods[i]);
+ }
+}
+
+/*
+ * Return true if the specified HTTP method is in the provided
+ * method list.
+ */
+AP_DECLARE(int) ap_method_in_list(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+
+ /*
+ * If it's one of our known methods, use the shortcut and check the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ return !!(l->method_mask & (AP_METHOD_BIT << methnum));
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if ((l->method_list == NULL) || (l->method_list->nelts == 0)) {
+ return 0;
+ }
+
+ return ap_array_str_contains(l->method_list, method);
+}
+
+/*
+ * Add the specified method to a method list (if it isn't already there).
+ */
+AP_DECLARE(void) ap_method_list_add(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+ const char **xmethod;
+
+ /*
+ * If it's one of our known methods, use the shortcut and use the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ l->method_mask |= (AP_METHOD_BIT << methnum);
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (ap_array_str_contains(l->method_list, method)) {
+ return;
+ }
+
+ xmethod = (const char **) apr_array_push(l->method_list);
+ *xmethod = method;
+}
+
+/*
+ * Remove the specified method from a method list.
+ */
+AP_DECLARE(void) ap_method_list_remove(ap_method_list_t *l,
+ const char *method)
+{
+ int methnum;
+ char **methods;
+
+ /*
+ * If it's a known methods, either builtin or registered
+ * by a module, use the bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ l->method_mask &= ~(AP_METHOD_BIT << methnum);
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (l->method_list->nelts != 0) {
+ int i, j, k;
+ methods = (char **)l->method_list->elts;
+ for (i = 0; i < l->method_list->nelts; ) {
+ if (strcmp(method, methods[i]) == 0) {
+ for (j = i, k = i + 1; k < l->method_list->nelts; ++j, ++k) {
+ methods[j] = methods[k];
+ }
+ --l->method_list->nelts;
+ }
+ else {
+ ++i;
+ }
+ }
+ }
+}
+
+/*
+ * Reset a method list to be completely empty.
+ */
+AP_DECLARE(void) ap_clear_method_list(ap_method_list_t *l)
+{
+ l->method_mask = 0;
+ l->method_list->nelts = 0;
+}
+
diff --git a/modules/http/http_request.c b/modules/http/http_request.c
new file mode 100644
index 0000000..9e7c4db
--- /dev/null
+++ b/modules/http/http_request.c
@@ -0,0 +1,862 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_request.c: functions to get and process requests
+ *
+ * Rob McCool 3/21/93
+ *
+ * Thoroughly revamped by rst for Apache. NB this file reads
+ * best from the bottom up.
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_file_io.h"
+#include "apr_fnmatch.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_filter.h"
+#include "util_charset.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+APLOG_USE_MODULE(http);
+
+/*****************************************************************
+ *
+ * Mainline request processing...
+ */
+
+/* XXX A cleaner and faster way to do this might be to pass the request_rec
+ * down the filter chain as a parameter. It would need to change for
+ * subrequest vs. main request filters; perhaps the subrequest filter could
+ * make the switch.
+ */
+static void update_r_in_filters(ap_filter_t *f,
+ request_rec *from,
+ request_rec *to)
+{
+ while (f) {
+ if (f->r == from) {
+ f->r = to;
+ }
+ f = f->next;
+ }
+}
+
+static void ap_die_r(int type, request_rec *r, int recursive_error)
+{
+ char *custom_response;
+ request_rec *r_1st_err = r;
+
+ if (type == OK || type == DONE) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ if (!ap_is_HTTP_VALID_RESPONSE(type)) {
+ ap_filter_t *next;
+
+ /*
+ * Check if we still have the ap_http_header_filter in place. If
+ * this is the case we should not ignore the error here because
+ * it means that we have not sent any response at all and never
+ * will. This is bad. Sent an internal server error instead.
+ */
+ next = r->output_filters;
+ while (next && (next->frec != ap_http_header_filter_handle)) {
+ next = next->next;
+ }
+
+ /*
+ * If next != NULL then we left the while above because of
+ * next->frec == ap_http_header_filter
+ */
+ if (next) {
+ if (type != AP_FILTER_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01579)
+ "Invalid response status %i", type);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02831)
+ "Response from AP_FILTER_ERROR");
+ }
+ type = HTTP_INTERNAL_SERVER_ERROR;
+ }
+ else {
+ return;
+ }
+ }
+
+ /*
+ * The following takes care of Apache redirects to custom response URLs
+ * Note that if we are already dealing with the response to some other
+ * error condition, we just report on the original error, and give up on
+ * any attempt to handle the other thing "intelligently"...
+ */
+ if (recursive_error != HTTP_OK) {
+ while (r_1st_err->prev && (r_1st_err->prev->status != HTTP_OK))
+ r_1st_err = r_1st_err->prev; /* Get back to original error */
+
+ if (r_1st_err != r) {
+ /* The recursive error was caused by an ErrorDocument specifying
+ * an internal redirect to a bad URI. ap_internal_redirect has
+ * changed the filter chains to point to the ErrorDocument's
+ * request_rec. Back out those changes so we can safely use the
+ * original failing request_rec to send the canned error message.
+ *
+ * ap_send_error_response gets rid of existing resource filters
+ * on the output side, so we can skip those.
+ */
+ update_r_in_filters(r_1st_err->proto_output_filters, r, r_1st_err);
+ update_r_in_filters(r_1st_err->input_filters, r, r_1st_err);
+ }
+
+ custom_response = NULL; /* Do NOT retry the custom thing! */
+ }
+ else {
+ int error_index = ap_index_of_response(type);
+ custom_response = ap_response_code_string(r, error_index);
+ recursive_error = 0;
+ }
+
+ r->status = type;
+
+ /*
+ * This test is done here so that none of the auth modules needs to know
+ * about proxy authentication. They treat it like normal auth, and then
+ * we tweak the status.
+ */
+ if (HTTP_UNAUTHORIZED == r->status && PROXYREQ_PROXY == r->proxyreq) {
+ r->status = HTTP_PROXY_AUTHENTICATION_REQUIRED;
+ }
+
+ /* If we don't want to keep the connection, make sure we mark that the
+ * connection is not eligible for keepalive. If we want to keep the
+ * connection, be sure that the request body (if any) has been read.
+ */
+ if (ap_status_drops_connection(r->status)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+
+ /*
+ * Two types of custom redirects --- plain text, and URLs. Plain text has
+ * a leading '"', so the URL code, here, is triggered on its absence
+ */
+
+ if (custom_response && custom_response[0] != '"') {
+
+ if (ap_is_url(custom_response)) {
+ /*
+ * The URL isn't local, so lets drop through the rest of this
+ * apache code, and continue with the usual REDIRECT handler.
+ * But note that the client will ultimately see the wrong
+ * status...
+ */
+ r->status = HTTP_MOVED_TEMPORARILY;
+ apr_table_setn(r->headers_out, "Location", custom_response);
+ }
+ else if (custom_response[0] == '/') {
+ const char *error_notes, *original_method;
+ int original_method_number;
+ r->no_local_copy = 1; /* Do NOT send HTTP_NOT_MODIFIED for
+ * error documents! */
+ /*
+ * This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ apr_table_setn(r->subprocess_env, "REQUEST_METHOD", r->method);
+
+ /*
+ * Provide a special method for modules to communicate
+ * more informative (than the plain canned) messages to us.
+ * Propagate them to ErrorDocuments via the ERROR_NOTES variable:
+ */
+ if ((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL) {
+ apr_table_setn(r->subprocess_env, "ERROR_NOTES", error_notes);
+ }
+ original_method = r->method;
+ original_method_number = r->method_number;
+ r->method = "GET";
+ r->method_number = M_GET;
+ ap_internal_redirect(custom_response, r);
+ /* preserve ability to see %<m in the access log */
+ r->method = original_method;
+ r->method_number = original_method_number;
+ return;
+ }
+ else {
+ /*
+ * Dumb user has given us a bad url to redirect to --- fake up
+ * dying with a recursive server error...
+ */
+ recursive_error = HTTP_INTERNAL_SERVER_ERROR;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01580)
+ "Invalid error redirection directive: %s",
+ custom_response);
+ }
+ }
+ ap_send_error_response(r_1st_err, recursive_error);
+}
+
+AP_DECLARE(void) ap_die(int type, request_rec *r)
+{
+ ap_die_r(type, r, r->status);
+}
+
+AP_DECLARE(apr_status_t) ap_check_pipeline(conn_rec *c, apr_bucket_brigade *bb,
+ unsigned int max_blank_lines)
+{
+ apr_status_t rv = APR_EOF;
+ ap_input_mode_t mode = AP_MODE_SPECULATIVE;
+ unsigned int num_blank_lines = 0;
+ apr_size_t cr = 0;
+ char buf[2];
+
+ while (c->keepalive != AP_CONN_CLOSE && !c->aborted) {
+ apr_size_t len = cr + 1;
+
+ apr_brigade_cleanup(bb);
+ rv = ap_get_brigade(c->input_filters, bb, mode,
+ APR_NONBLOCK_READ, len);
+ if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb) || !max_blank_lines) {
+ if (mode == AP_MODE_READBYTES) {
+ /* Unexpected error, stop with this connection */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(02967)
+ "Can't consume pipelined empty lines");
+ c->keepalive = AP_CONN_CLOSE;
+ rv = APR_EGENERAL;
+ }
+ else if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) {
+ if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
+ /* Pipe is dead */
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ /* Pipe is up and empty */
+ rv = APR_EAGAIN;
+ }
+ }
+ else {
+ apr_off_t n = 0;
+ /* Single read asked, (non-meta-)data available? */
+ rv = apr_brigade_length(bb, 0, &n);
+ if (rv == APR_SUCCESS && n <= 0) {
+ rv = APR_EAGAIN;
+ }
+ }
+ break;
+ }
+
+ /* Lookup and consume blank lines */
+ rv = apr_brigade_flatten(bb, buf, &len);
+ if (rv != APR_SUCCESS || len != cr + 1) {
+ int log_level;
+ if (mode == AP_MODE_READBYTES) {
+ /* Unexpected error, stop with this connection */
+ c->keepalive = AP_CONN_CLOSE;
+ log_level = APLOG_ERR;
+ rv = APR_EGENERAL;
+ }
+ else {
+ /* Let outside (non-speculative/blocking) read determine
+ * where this possible failure comes from (metadata,
+ * morphed EOF socket, ...). Debug only here.
+ */
+ log_level = APLOG_DEBUG;
+ rv = APR_SUCCESS;
+ }
+ ap_log_cerror(APLOG_MARK, log_level, rv, c, APLOGNO(02968)
+ "Can't check pipelined data");
+ break;
+ }
+
+ if (mode == AP_MODE_READBYTES) {
+ /* [CR]LF consumed, try next */
+ mode = AP_MODE_SPECULATIVE;
+ cr = 0;
+ }
+ else if (cr) {
+ AP_DEBUG_ASSERT(len == 2 && buf[0] == APR_ASCII_CR);
+ if (buf[1] == APR_ASCII_LF) {
+ /* consume this CRLF */
+ mode = AP_MODE_READBYTES;
+ num_blank_lines++;
+ }
+ else {
+ /* CR(?!LF) is data */
+ break;
+ }
+ }
+ else {
+ if (buf[0] == APR_ASCII_LF) {
+ /* consume this LF */
+ mode = AP_MODE_READBYTES;
+ num_blank_lines++;
+ }
+ else if (buf[0] == APR_ASCII_CR) {
+ cr = 1;
+ }
+ else {
+ /* Not [CR]LF, some data */
+ break;
+ }
+ }
+ if (num_blank_lines > max_blank_lines) {
+ /* Enough blank lines with this connection,
+ * stop and don't recycle it.
+ */
+ c->keepalive = AP_CONN_CLOSE;
+ rv = APR_NOTFOUND;
+ break;
+ }
+ }
+
+ return rv;
+}
+
+#define RETRIEVE_BRIGADE_FROM_POOL(bb, key, pool, allocator) do { \
+ apr_pool_userdata_get((void **)&bb, key, pool); \
+ if (bb == NULL) { \
+ bb = apr_brigade_create(pool, allocator); \
+ apr_pool_userdata_setn((const void *)bb, key, NULL, pool); \
+ } \
+ else { \
+ apr_brigade_cleanup(bb); \
+ } \
+} while(0)
+
+AP_DECLARE(void) ap_process_request_after_handler(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ conn_rec *c = r->connection;
+ apr_status_t rv;
+
+ /* Send an EOR bucket through the output filter chain. When
+ * this bucket is destroyed, the request will be logged and
+ * its pool will be freed
+ */
+ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_after_handler_brigade",
+ c->pool, c->bucket_alloc);
+ b = ap_bucket_eor_create(c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+
+ ap_pass_brigade(c->output_filters, bb);
+
+ /* The EOR bucket has either been handled by an output filter (eg.
+ * deleted or moved to a buffered_bb => no more in bb), or an error
+ * occured before that (eg. c->aborted => still in bb) and we ought
+ * to destroy it now. So cleanup any remaining bucket along with
+ * the orphan request (if any).
+ */
+ apr_brigade_cleanup(bb);
+
+ /* From here onward, it is no longer safe to reference r
+ * or r->pool, because r->pool may have been destroyed
+ * already by the EOR bucket's cleanup function.
+ */
+
+ /* Check pipeline consuming blank lines, they must not be interpreted as
+ * the next pipelined request, otherwise we would block on the next read
+ * without flushing data, and hence possibly delay pending response(s)
+ * until the next/real request comes in or the keepalive timeout expires.
+ */
+ rv = ap_check_pipeline(c, bb, DEFAULT_LIMIT_BLANK_LINES);
+ c->data_in_input_filters = (rv == APR_SUCCESS);
+ apr_brigade_cleanup(bb);
+
+ if (c->cs)
+ c->cs->state = (c->aborted) ? CONN_STATE_LINGER
+ : CONN_STATE_WRITE_COMPLETION;
+ AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, r->status);
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+}
+
+void ap_process_async_request(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ int access_status;
+
+ /* Give quick handlers a shot at serving the request on the fast
+ * path, bypassing all of the other Apache hooks.
+ *
+ * This hook was added to enable serving files out of a URI keyed
+ * content cache ( e.g., Mike Abbott's Quick Shortcut Cache,
+ * described here: http://oss.sgi.com/projects/apache/mod_qsc.html )
+ *
+ * It may have other uses as well, such as routing requests directly to
+ * content handlers that have the ability to grok HTTP and do their
+ * own access checking, etc (e.g. servlet engines).
+ *
+ * Use this hook with extreme care and only if you know what you are
+ * doing.
+ */
+ AP_PROCESS_REQUEST_ENTRY((uintptr_t)r, r->uri);
+ if (ap_extended_status) {
+ ap_time_process_request(r->connection->sbh, START_PREQUEST);
+ }
+
+ if (APLOGrtrace4(r)) {
+ int i;
+ const apr_array_header_t *t_h = apr_table_elts(r->headers_in);
+ const apr_table_entry_t *t_elt = (apr_table_entry_t *)t_h->elts;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r,
+ "Headers received from client:");
+ for (i = 0; i < t_h->nelts; i++, t_elt++) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
+ ap_escape_logitem(r->pool, t_elt->key),
+ ap_escape_logitem(r->pool, t_elt->val));
+ }
+ }
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&r->invoke_mtx, APR_THREAD_MUTEX_DEFAULT, r->pool);
+ apr_thread_mutex_lock(r->invoke_mtx);
+#endif
+ access_status = ap_run_quick_handler(r, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(r);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(r);
+ }
+ }
+
+ if (access_status == SUSPENDED) {
+ /* TODO: Should move these steps into a generic function, so modules
+ * working on a suspended request can also call _ENTRY again.
+ */
+ AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, access_status);
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+ if (c->cs)
+ c->cs->state = CONN_STATE_SUSPENDED;
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(r->invoke_mtx);
+#endif
+ return;
+ }
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(r->invoke_mtx);
+#endif
+
+ ap_die_r(access_status, r, HTTP_OK);
+
+ ap_process_request_after_handler(r);
+}
+
+AP_DECLARE(void) ap_process_request(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ conn_rec *c = r->connection;
+ apr_status_t rv;
+
+ ap_process_async_request(r);
+
+ if (!c->data_in_input_filters) {
+ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_brigade",
+ c->pool, c->bucket_alloc);
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+ rv = ap_pass_brigade(c->output_filters, bb);
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ /*
+ * Notice a timeout as an error message. This might be
+ * valuable for detecting clients with broken network
+ * connections or possible DoS attacks.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, APLOGNO(01581)
+ "flushing data to the client");
+ }
+ apr_brigade_cleanup(bb);
+ }
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+}
+
+static apr_table_t *rename_original_env(apr_pool_t *p, apr_table_t *t)
+{
+ const apr_array_header_t *env_arr = apr_table_elts(t);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *) env_arr->elts;
+ apr_table_t *new = apr_table_make(p, env_arr->nalloc);
+ int i;
+
+ for (i = 0; i < env_arr->nelts; ++i) {
+ if (!elts[i].key)
+ continue;
+ apr_table_setn(new, apr_pstrcat(p, "REDIRECT_", elts[i].key, NULL),
+ elts[i].val);
+ }
+
+ return new;
+}
+
+static request_rec *internal_internal_redirect(const char *new_uri,
+ request_rec *r) {
+ int access_status;
+ request_rec *new;
+ const char *vary_header;
+
+ if (ap_is_recursion_limit_exceeded(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return NULL;
+ }
+
+ new = (request_rec *) apr_pcalloc(r->pool, sizeof(request_rec));
+
+ new->connection = r->connection;
+ new->server = r->server;
+ new->pool = r->pool;
+
+ /*
+ * A whole lot of this really ought to be shared with http_protocol.c...
+ * another missing cleanup. It's particularly inappropriate to be
+ * setting header_only, etc., here.
+ */
+
+ new->method = r->method;
+ new->method_number = r->method_number;
+ new->allowed_methods = ap_make_method_list(new->pool, 2);
+ ap_parse_uri(new, new_uri);
+ new->parsed_uri.port_str = r->parsed_uri.port_str;
+ new->parsed_uri.port = r->parsed_uri.port;
+
+ new->request_config = ap_create_request_config(r->pool);
+
+ new->per_dir_config = r->server->lookup_defaults;
+
+ new->prev = r;
+ r->next = new;
+
+ new->useragent_addr = r->useragent_addr;
+ new->useragent_ip = r->useragent_ip;
+
+ /* Must have prev and next pointers set before calling create_request
+ * hook.
+ */
+ ap_run_create_request(new);
+
+ /* Inherit the rest of the protocol info... */
+
+ new->the_request = r->the_request;
+
+ new->allowed = r->allowed;
+
+ new->status = r->status;
+ new->assbackwards = r->assbackwards;
+ new->header_only = r->header_only;
+ new->protocol = r->protocol;
+ new->proto_num = r->proto_num;
+ new->hostname = r->hostname;
+ new->request_time = r->request_time;
+ new->main = r->main;
+
+ new->headers_in = r->headers_in;
+ new->trailers_in = r->trailers_in;
+ new->headers_out = apr_table_make(r->pool, 12);
+ if (ap_is_HTTP_REDIRECT(new->status)) {
+ const char *location = apr_table_get(r->headers_out, "Location");
+ if (location)
+ apr_table_setn(new->headers_out, "Location", location);
+ }
+
+ /* A module (like mod_rewrite) can force an internal redirect
+ * to carry over the Vary header (if present).
+ */
+ if (apr_table_get(r->notes, "redirect-keeps-vary")) {
+ if((vary_header = apr_table_get(r->headers_out, "Vary"))) {
+ apr_table_setn(new->headers_out, "Vary", vary_header);
+ }
+ }
+
+ new->err_headers_out = r->err_headers_out;
+ new->trailers_out = apr_table_make(r->pool, 5);
+ new->subprocess_env = rename_original_env(r->pool, r->subprocess_env);
+ new->notes = apr_table_make(r->pool, 5);
+
+ new->htaccess = r->htaccess;
+ new->no_cache = r->no_cache;
+ new->expecting_100 = r->expecting_100;
+ new->no_local_copy = r->no_local_copy;
+ new->read_length = r->read_length; /* We can only read it once */
+ new->vlist_validator = r->vlist_validator;
+
+ new->proto_output_filters = r->proto_output_filters;
+ new->proto_input_filters = r->proto_input_filters;
+
+ new->input_filters = new->proto_input_filters;
+
+ if (new->main) {
+ ap_filter_t *f, *nextf;
+
+ /* If this is a subrequest, the filter chain may contain a
+ * mixture of filters specific to the old request (r), and
+ * some inherited from r->main. Here, inherit that filter
+ * chain, and remove all those which are specific to the old
+ * request; ensuring the subreq filter is left in place. */
+ new->output_filters = r->output_filters;
+
+ f = new->output_filters;
+ do {
+ nextf = f->next;
+
+ if (f->r == r && f->frec != ap_subreq_core_filter_handle) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01582)
+ "dropping filter '%s' in internal redirect from %s to %s",
+ f->frec->name, r->unparsed_uri, new_uri);
+
+ /* To remove the filter, first set f->r to the *new*
+ * request_rec, so that ->output_filters on 'new' is
+ * changed (if necessary) when removing the filter. */
+ f->r = new;
+ ap_remove_output_filter(f);
+ }
+
+ f = nextf;
+
+ /* Stop at the protocol filters. If a protocol filter has
+ * been newly installed for this resource, better leave it
+ * in place, though it's probably a misconfiguration or
+ * filter bug to get into this state. */
+ } while (f && f != new->proto_output_filters);
+ }
+ else {
+ /* If this is not a subrequest, clear out all
+ * resource-specific filters. */
+ new->output_filters = new->proto_output_filters;
+ }
+
+ update_r_in_filters(new->input_filters, r, new);
+ update_r_in_filters(new->output_filters, r, new);
+
+ apr_table_setn(new->subprocess_env, "REDIRECT_STATUS",
+ apr_itoa(r->pool, r->status));
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ new->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+#if APR_HAS_THREADS
+ new->invoke_mtx = r->invoke_mtx;
+#endif
+
+ /*
+ * XXX: hmm. This is because mod_setenvif and mod_unique_id really need
+ * to do their thing on internal redirects as well. Perhaps this is a
+ * misnamed function.
+ */
+ if ((access_status = ap_run_post_read_request(new))) {
+ ap_die(access_status, new);
+ return NULL;
+ }
+
+ return new;
+}
+
+/* XXX: Is this function is so bogus and fragile that we deep-6 it? */
+AP_DECLARE(void) ap_internal_fast_redirect(request_rec *rr, request_rec *r)
+{
+ /* We need to tell POOL_DEBUG that we're guaranteeing that rr->pool
+ * will exist as long as r->pool. Otherwise we run into troubles because
+ * some values in this request will be allocated in r->pool, and others in
+ * rr->pool.
+ */
+ apr_pool_join(r->pool, rr->pool);
+ r->proxyreq = rr->proxyreq;
+ r->no_cache = (r->no_cache && rr->no_cache);
+ r->no_local_copy = (r->no_local_copy && rr->no_local_copy);
+ r->mtime = rr->mtime;
+ r->uri = rr->uri;
+ r->filename = rr->filename;
+ r->canonical_filename = rr->canonical_filename;
+ r->path_info = rr->path_info;
+ r->args = rr->args;
+ r->finfo = rr->finfo;
+ r->handler = rr->handler;
+ ap_set_content_type(r, rr->content_type);
+ r->content_encoding = rr->content_encoding;
+ r->content_languages = rr->content_languages;
+ r->per_dir_config = rr->per_dir_config;
+ /* copy output headers from subrequest, but leave negotiation headers */
+ r->notes = apr_table_overlay(r->pool, rr->notes, r->notes);
+ r->headers_out = apr_table_overlay(r->pool, rr->headers_out,
+ r->headers_out);
+ r->err_headers_out = apr_table_overlay(r->pool, rr->err_headers_out,
+ r->err_headers_out);
+ r->trailers_out = apr_table_overlay(r->pool, rr->trailers_out,
+ r->trailers_out);
+ r->subprocess_env = apr_table_overlay(r->pool, rr->subprocess_env,
+ r->subprocess_env);
+
+ r->output_filters = rr->output_filters;
+ r->input_filters = rr->input_filters;
+
+ /* If any filters pointed at the now-defunct rr, we must point them
+ * at our "new" instance of r. In particular, some of rr's structures
+ * will now be bogus (say rr->headers_out). If a filter tried to modify
+ * their f->r structure when it is pointing to rr, the real request_rec
+ * will not get updated. Fix that here.
+ */
+ update_r_in_filters(r->input_filters, rr, r);
+ update_r_in_filters(r->output_filters, rr, r);
+
+ if (r->main) {
+ ap_filter_t *next = r->output_filters;
+ while (next && (next != r->proto_output_filters)) {
+ if (next->frec == ap_subreq_core_filter_handle) {
+ break;
+ }
+ next = next->next;
+ }
+ if (!next || next == r->proto_output_filters) {
+ ap_add_output_filter_handle(ap_subreq_core_filter_handle,
+ NULL, r, r->connection);
+ }
+ }
+ else {
+ /*
+ * We need to check if we now have the SUBREQ_CORE filter in our filter
+ * chain. If this is the case we need to remove it since we are NO
+ * subrequest. But we need to keep in mind that the SUBREQ_CORE filter
+ * does not necessarily need to be the first filter in our chain. So we
+ * need to go through the chain. But we only need to walk up the chain
+ * until the proto_output_filters as the SUBREQ_CORE filter is below the
+ * protocol filters.
+ */
+ ap_filter_t *next;
+
+ next = r->output_filters;
+ while (next && (next->frec != ap_subreq_core_filter_handle)
+ && (next != r->proto_output_filters)) {
+ next = next->next;
+ }
+ if (next && (next->frec == ap_subreq_core_filter_handle)) {
+ ap_remove_output_filter(next);
+ }
+ }
+}
+
+AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ AP_INTERNAL_REDIRECT(r->uri, new_uri);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ access_status = ap_run_quick_handler(new, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(new);
+ }
+ }
+ ap_die(access_status, new);
+}
+
+/* This function is designed for things like actions or CGI scripts, when
+ * using AddHandler, and you want to preserve the content type across
+ * an internal redirect.
+ */
+AP_DECLARE(void) ap_internal_redirect_handler(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ if (r->handler)
+ ap_set_content_type(new, r->content_type);
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(new);
+ }
+ ap_die(access_status, new);
+}
+
+AP_DECLARE(void) ap_allow_methods(request_rec *r, int reset, ...)
+{
+ const char *method;
+ va_list methods;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ va_start(methods, reset);
+ while ((method = va_arg(methods, const char *)) != NULL) {
+ ap_method_list_add(r->allowed_methods, method);
+ }
+ va_end(methods);
+}
+
+AP_DECLARE(void) ap_allow_standard_methods(request_rec *r, int reset, ...)
+{
+ int method;
+ va_list methods;
+ apr_int64_t mask;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ mask = 0;
+ va_start(methods, reset);
+ while ((method = va_arg(methods, int)) != -1) {
+ mask |= (AP_METHOD_BIT << method);
+ }
+ va_end(methods);
+
+ r->allowed_methods->method_mask |= mask;
+}
diff --git a/modules/http/mod_mime.c b/modules/http/mod_mime.c
new file mode 100644
index 0000000..28c53be
--- /dev/null
+++ b/modules/http/mod_mime.c
@@ -0,0 +1,1026 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_mime.c: Sends/gets MIME headers for requests
+ *
+ * Rob McCool
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_hash.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+/* XXXX - fix me / EBCDIC
+ * there was a cludge here which would use its
+ * own version apr_isascii(). Indicating that
+ * on some platforms that might be needed.
+ *
+ * #define OS_ASC(c) (c) -- for mere mortals
+ * or
+ * #define OS_ASC(c) (ebcdic2ascii[c]) -- for dino's
+ *
+ * #define apr_isascii(c) ((OS_ASC(c) & 0x80) == 0)
+ */
+
+/* XXXXX - fix me - See note with NOT_PROXY
+ */
+
+typedef struct attrib_info {
+ char *name;
+ int offset;
+} attrib_info;
+
+/* Information to which an extension can be mapped
+ */
+typedef struct extension_info {
+ char *forced_type; /* Additional AddTyped stuff */
+ char *encoding_type; /* Added with AddEncoding... */
+ char *language_type; /* Added with AddLanguage... */
+ char *handler; /* Added with AddHandler... */
+ char *charset_type; /* Added with AddCharset... */
+ char *input_filters; /* Added with AddInputFilter... */
+ char *output_filters; /* Added with AddOutputFilter... */
+} extension_info;
+
+#define MULTIMATCH_UNSET 0
+#define MULTIMATCH_ANY 1
+#define MULTIMATCH_NEGOTIATED 2
+#define MULTIMATCH_HANDLERS 4
+#define MULTIMATCH_FILTERS 8
+
+typedef struct {
+ apr_hash_t *extension_mappings; /* Map from extension name to
+ * extension_info structure */
+
+ apr_array_header_t *remove_mappings; /* A simple list, walked once */
+
+ char *default_language; /* Language if no AddLanguage ext found */
+
+ int multimatch; /* Extensions to include in multiview matching
+ * for filenames, e.g. Filters and Handlers
+ */
+ int use_path_info; /* If set to 0, only use filename.
+ * If set to 1, append PATH_INFO to filename for
+ * lookups.
+ * If set to 2, this value is unset and is
+ * effectively 0.
+ */
+} mime_dir_config;
+
+typedef struct param_s {
+ char *attr;
+ char *val;
+ struct param_s *next;
+} param;
+
+typedef struct {
+ const char *type;
+ apr_size_t type_len;
+ const char *subtype;
+ apr_size_t subtype_len;
+ param *param;
+} content_type;
+
+static char tspecial[] = {
+ '(', ')', '<', '>', '@', ',', ';', ':',
+ '\\', '"', '/', '[', ']', '?', '=',
+ '\0'
+};
+
+module AP_MODULE_DECLARE_DATA mime_module;
+
+static void *create_mime_dir_config(apr_pool_t *p, char *dummy)
+{
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ new->extension_mappings = NULL;
+ new->remove_mappings = NULL;
+
+ new->default_language = NULL;
+
+ new->multimatch = MULTIMATCH_UNSET;
+
+ new->use_path_info = 2;
+
+ return new;
+}
+/*
+ * Overlay one hash table of extension_mappings onto another
+ */
+static void *overlay_extension_mappings(apr_pool_t *p,
+ const void *key,
+ apr_ssize_t klen,
+ const void *overlay_val,
+ const void *base_val,
+ const void *data)
+{
+ const extension_info *overlay_info = (const extension_info *)overlay_val;
+ const extension_info *base_info = (const extension_info *)base_val;
+ extension_info *new_info = apr_pmemdup(p, base_info, sizeof(extension_info));
+
+ if (overlay_info->forced_type) {
+ new_info->forced_type = overlay_info->forced_type;
+ }
+ if (overlay_info->encoding_type) {
+ new_info->encoding_type = overlay_info->encoding_type;
+ }
+ if (overlay_info->language_type) {
+ new_info->language_type = overlay_info->language_type;
+ }
+ if (overlay_info->handler) {
+ new_info->handler = overlay_info->handler;
+ }
+ if (overlay_info->charset_type) {
+ new_info->charset_type = overlay_info->charset_type;
+ }
+ if (overlay_info->input_filters) {
+ new_info->input_filters = overlay_info->input_filters;
+ }
+ if (overlay_info->output_filters) {
+ new_info->output_filters = overlay_info->output_filters;
+ }
+
+ return new_info;
+}
+
+/* Member is the offset within an extension_info of the pointer to reset
+ */
+static void remove_items(apr_pool_t *p, apr_array_header_t *remove,
+ apr_hash_t *mappings)
+{
+ attrib_info *suffix = (attrib_info *) remove->elts;
+ int i;
+ for (i = 0; i < remove->nelts; i++) {
+ extension_info *exinfo = apr_hash_get(mappings,
+ suffix[i].name,
+ APR_HASH_KEY_STRING);
+ if (exinfo && *(const char**)((char *)exinfo + suffix[i].offset)) {
+ extension_info *copyinfo = exinfo;
+ exinfo = apr_pmemdup(p, copyinfo, sizeof(*exinfo));
+ apr_hash_set(mappings, suffix[i].name,
+ APR_HASH_KEY_STRING, exinfo);
+
+ *(const char**)((char *)exinfo + suffix[i].offset) = NULL;
+ }
+ }
+}
+
+static void *merge_mime_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ mime_dir_config *base = (mime_dir_config *)basev;
+ mime_dir_config *add = (mime_dir_config *)addv;
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ if (base->extension_mappings && add->extension_mappings) {
+ new->extension_mappings = apr_hash_merge(p, add->extension_mappings,
+ base->extension_mappings,
+ overlay_extension_mappings,
+ NULL);
+ }
+ else {
+ if (base->extension_mappings == NULL) {
+ new->extension_mappings = add->extension_mappings;
+ }
+ else {
+ new->extension_mappings = base->extension_mappings;
+ }
+ /* We may not be merging the tables, but if we potentially will change
+ * an exinfo member, then we are about to trounce it anyways.
+ * We must have a copy for safety.
+ */
+ if (new->extension_mappings && add->remove_mappings) {
+ new->extension_mappings =
+ apr_hash_copy(p, new->extension_mappings);
+ }
+ }
+
+ if (new->extension_mappings) {
+ if (add->remove_mappings)
+ remove_items(p, add->remove_mappings, new->extension_mappings);
+ }
+ new->remove_mappings = NULL;
+
+ new->default_language = add->default_language ?
+ add->default_language : base->default_language;
+
+ new->multimatch = (add->multimatch != MULTIMATCH_UNSET) ?
+ add->multimatch : base->multimatch;
+
+ if ((add->use_path_info & 2) == 0) {
+ new->use_path_info = add->use_path_info;
+ }
+ else {
+ new->use_path_info = base->use_path_info;
+ }
+
+ return new;
+}
+
+static const char *add_extension_info(cmd_parms *cmd, void *m_,
+ const char *value_, const char* ext)
+{
+ mime_dir_config *m=m_;
+ extension_info *exinfo;
+ int offset = (int) (long) cmd->info;
+ char *key = apr_pstrdup(cmd->temp_pool, ext);
+ char *value = apr_pstrdup(cmd->pool, value_);
+ ap_str_tolower(value);
+ ap_str_tolower(key);
+
+ if (*key == '.') {
+ ++key;
+ }
+ if (!m->extension_mappings) {
+ m->extension_mappings = apr_hash_make(cmd->pool);
+ exinfo = NULL;
+ }
+ else {
+ exinfo = (extension_info*)apr_hash_get(m->extension_mappings, key,
+ APR_HASH_KEY_STRING);
+ }
+ if (!exinfo) {
+ exinfo = apr_pcalloc(cmd->pool, sizeof(extension_info));
+ key = apr_pstrdup(cmd->pool, key);
+ apr_hash_set(m->extension_mappings, key, APR_HASH_KEY_STRING, exinfo);
+ }
+ *(const char**)((char *)exinfo + offset) = value;
+ return NULL;
+}
+
+/*
+ * As RemoveType should also override the info from TypesConfig, we add an
+ * empty string as type instead of actually removing the type.
+ */
+static const char *remove_extension_type(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ return add_extension_info(cmd, m_, "", ext);
+}
+
+/*
+ * Note handler names are un-added with each per_dir_config merge.
+ * This keeps the association from being inherited, but not
+ * from being re-added at a subordinate level.
+ */
+static const char *remove_extension_info(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ attrib_info *suffix;
+ if (*ext == '.') {
+ ++ext;
+ }
+ if (!m->remove_mappings) {
+ m->remove_mappings = apr_array_make(cmd->pool, 4, sizeof(*suffix));
+ }
+ suffix = (attrib_info *)apr_array_push(m->remove_mappings);
+ suffix->name = apr_pstrdup(cmd->pool, ext);
+ ap_str_tolower(suffix->name);
+ suffix->offset = (int) (long) cmd->info;
+ return NULL;
+}
+
+/* The sole bit of server configuration that the MIME module has is
+ * the name of its config file, so...
+ */
+
+static const char *set_types_config(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ ap_set_module_config(cmd->server->module_config, &mime_module,
+ (void *)arg);
+ return NULL;
+}
+
+static const char *multiviews_match(cmd_parms *cmd, void *m_,
+ const char *include)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ const char *errmsg;
+
+ errmsg = ap_check_cmd_context(cmd, NOT_IN_LOCATION);
+ if (errmsg != NULL) {
+ return errmsg;
+ }
+
+ if (strcasecmp(include, "Any") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_ANY)) {
+ return "Any is incompatible with NegotiatedOnly, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_ANY;
+ }
+ else if (strcasecmp(include, "NegotiatedOnly") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_NEGOTIATED)) {
+ return "NegotiatedOnly is incompatible with Any, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_NEGOTIATED;
+ }
+ else if (strcasecmp(include, "Filters") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Filters is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_FILTERS;
+ }
+ else if (strcasecmp(include, "Handlers") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Handlers is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_HANDLERS;
+ }
+ else {
+ return apr_psprintf(cmd->pool, "Unrecognized option '%s'", include);
+ }
+
+ return NULL;
+}
+
+static const command_rec mime_cmds[] =
+{
+ AP_INIT_ITERATE2("AddCharset", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "a charset (e.g., iso-2022-jp), followed by one or more "
+ "file extensions"),
+ AP_INIT_ITERATE2("AddEncoding", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "an encoding (e.g., gzip), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddHandler", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "a handler name followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddInputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "input filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddLanguage", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "a language (e.g., fr), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddOutputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "output filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddType", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "a mime type followed by one or more file extensions"),
+ AP_INIT_TAKE1("DefaultLanguage", ap_set_string_slot,
+ (void*)APR_OFFSETOF(mime_dir_config, default_language), OR_FILEINFO,
+ "language to use for documents with no other language file extension"),
+ AP_INIT_ITERATE("MultiviewsMatch", multiviews_match, NULL, OR_FILEINFO,
+ "NegotiatedOnly (default), Handlers and/or Filters, or Any"),
+ AP_INIT_ITERATE("RemoveCharset", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveEncoding", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveHandler", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveInputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveLanguage", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveOutputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveType", remove_extension_type,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_TAKE1("TypesConfig", set_types_config, NULL, RSRC_CONF,
+ "the MIME types config file"),
+ AP_INIT_FLAG("ModMimeUsePathInfo", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mime_dir_config, use_path_info), ACCESS_CONF,
+ "Set to 'yes' to allow mod_mime to use path info for type checking"),
+ {NULL}
+};
+
+static apr_hash_t *mime_type_extensions;
+
+static int mime_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *types_confname = ap_get_module_config(s->module_config,
+ &mime_module);
+ apr_status_t status;
+
+ if (!types_confname) {
+ types_confname = AP_TYPES_CONFIG_FILE;
+ }
+
+ types_confname = ap_server_root_relative(p, types_confname);
+ if (!types_confname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s, APLOGNO(01596)
+ "Invalid mime types config path %s",
+ (const char *)ap_get_module_config(s->module_config,
+ &mime_module));
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if ((status = ap_pcfg_openfile(&f, ptemp, types_confname))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s, APLOGNO(01597)
+ "could not open mime types config file %s.",
+ types_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ mime_type_extensions = apr_hash_make(p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ const char *ll = l, *ct;
+
+ if (l[0] == '#') {
+ continue;
+ }
+ ct = ap_getword_conf(p, &ll);
+
+ while (ll[0]) {
+ char *ext = ap_getword_conf(p, &ll);
+ ap_str_tolower(ext);
+ apr_hash_set(mime_type_extensions, ext, APR_HASH_KEY_STRING, ct);
+ }
+ }
+ ap_cfg_closefile(f);
+ return OK;
+}
+
+static const char *zap_sp(const char *s)
+{
+ if (s == NULL) {
+ return (NULL);
+ }
+ if (*s == '\0') {
+ return (s);
+ }
+
+ /* skip prefixed white space */
+ for (; *s == ' ' || *s == '\t' || *s == '\n'; s++)
+ ;
+
+ return (s);
+}
+
+static char *zap_sp_and_dup(apr_pool_t *p, const char *start,
+ const char *end, apr_size_t *len)
+{
+ while ((start < end) && apr_isspace(*start)) {
+ start++;
+ }
+ while ((end > start) && apr_isspace(*(end - 1))) {
+ end--;
+ }
+ if (len) {
+ *len = end - start;
+ }
+ return apr_pstrmemdup(p, start, end - start);
+}
+
+static int is_token(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && apr_isgraph(c)
+ && (strchr(tspecial, c) == NULL)) ? 1 : -1;
+ return res;
+}
+
+static int is_qtext(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && (c != '"') && (c != '\\') && (c != '\n'))
+ ? 1 : -1;
+ return res;
+}
+
+static int is_quoted_pair(const char *s)
+{
+ int res = -1;
+ int c;
+
+ if (*s == '\\') {
+ c = (int) *(s + 1);
+ if (c && apr_isascii(c)) {
+ res = 1;
+ }
+ }
+ return (res);
+}
+
+static content_type *analyze_ct(request_rec *r, const char *s)
+{
+ const char *cp, *mp;
+ char *attribute, *value;
+ int quoted = 0;
+ server_rec * ss = r->server;
+ apr_pool_t * p = r->pool;
+
+ content_type *ctp;
+ param *pp, *npp;
+
+ /* initialize ctp */
+ ctp = (content_type *)apr_palloc(p, sizeof(content_type));
+ ctp->type = NULL;
+ ctp->subtype = NULL;
+ ctp->param = NULL;
+
+ mp = s;
+
+ /* getting a type */
+ cp = mp;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01598)
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type = cp;
+ do {
+ cp++;
+ } while (*cp && (*cp != '/') && !apr_isspace(*cp) && (*cp != ';'));
+ if (!*cp || (*cp == ';')) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01599)
+ "Cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (*cp != '/') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01600)
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type_len = cp - ctp->type;
+
+ cp++; /* skip the '/' */
+
+ /* getting a subtype */
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01601)
+ "Cannot get media subtype.");
+ return (NULL);
+ }
+ ctp->subtype = cp;
+ do {
+ cp++;
+ } while (*cp && !apr_isspace(*cp) && (*cp != ';'));
+ ctp->subtype_len = cp - ctp->subtype;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+
+ if (*cp == '\0') {
+ return (ctp);
+ }
+
+ /* getting parameters */
+ cp++; /* skip the ';' */
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01602)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ attribute = NULL;
+ value = NULL;
+
+ while (cp != NULL && *cp != '\0') {
+ if (attribute == NULL) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ continue;
+ }
+ else if (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ continue;
+ }
+ else if (*cp == '=') {
+ attribute = zap_sp_and_dup(p, mp, cp, NULL);
+ if (attribute == NULL || *attribute == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01603)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ cp++;
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01604)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ continue;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01605)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ else {
+ if (mp == cp) {
+ if (*cp == '"') {
+ quoted = 1;
+ cp++;
+ }
+ else {
+ quoted = 0;
+ }
+ }
+ if (quoted > 0) {
+ while (quoted && *cp != '\0') {
+ if (is_qtext(*cp) > 0) {
+ cp++;
+ }
+ else if (is_quoted_pair(cp) > 0) {
+ cp += 2;
+ }
+ else if (*cp == '"') {
+ cp++;
+ while (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ }
+ if (*cp != ';' && *cp != '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01606)
+ "Cannot get media parameter.");
+ return(NULL);
+ }
+ quoted = 0;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01607)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ else {
+ while (1) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ }
+ else if (*cp == '\0' || *cp == ';') {
+ break;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01608)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ value = zap_sp_and_dup(p, mp, cp, NULL);
+ if (value == NULL || *value == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01609)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+
+ pp = apr_palloc(p, sizeof(param));
+ pp->attr = attribute;
+ pp->val = value;
+ pp->next = NULL;
+
+ if (ctp->param == NULL) {
+ ctp->param = pp;
+ }
+ else {
+ npp = ctp->param;
+ while (npp->next) {
+ npp = npp->next;
+ }
+ npp->next = pp;
+ }
+ quoted = 0;
+ attribute = NULL;
+ value = NULL;
+ if (*cp == '\0') {
+ break;
+ }
+ cp++;
+ mp = cp;
+ }
+ }
+ return (ctp);
+}
+
+/*
+ * find_ct is the hook routine for determining content-type and other
+ * MIME-related metadata. It assumes that r->filename has already been
+ * set and stat has been called for r->finfo. It also assumes that the
+ * non-path base file name is not the empty string unless it is a dir.
+ */
+static int find_ct(request_rec *r)
+{
+ mime_dir_config *conf;
+ apr_array_header_t *exception_list;
+ char *ext;
+ const char *fn, *fntmp, *type, *charset = NULL, *resource_name;
+ int found_metadata = 0;
+
+ if (r->finfo.filetype == APR_DIR) {
+ ap_set_content_type(r, DIR_MAGIC_TYPE);
+ return OK;
+ }
+
+ if (!r->filename) {
+ return DECLINED;
+ }
+
+ conf = (mime_dir_config *)ap_get_module_config(r->per_dir_config,
+ &mime_module);
+ exception_list = apr_array_make(r->pool, 2, sizeof(char *));
+
+ /* If use_path_info is explicitly set to on (value & 1 == 1), append. */
+ if (conf->use_path_info & 1) {
+ resource_name = apr_pstrcat(r->pool, r->filename, r->path_info, NULL);
+ }
+ else {
+ resource_name = r->filename;
+ }
+
+ /* Always drop the path leading up to the file name.
+ */
+ if ((fn = ap_strrchr_c(resource_name, '/')) == NULL) {
+ fn = resource_name;
+ }
+ else {
+ ++fn;
+ }
+
+
+ /* The exception list keeps track of those filename components that
+ * are not associated with extensions indicating metadata.
+ * The base name is always the first exception (i.e., "txt.html" has
+ * a basename of "txt" even though it might look like an extension).
+ * Leading dots are considered to be part of the base name (a file named
+ * ".png" is likely not a png file but just a hidden file called png).
+ */
+ fntmp = fn;
+ while (*fntmp == '.')
+ fntmp++;
+ fntmp = ap_strchr_c(fntmp, '.');
+ if (fntmp) {
+ ext = apr_pstrmemdup(r->pool, fn, fntmp - fn);
+ fn = fntmp + 1;
+ }
+ else {
+ ext = apr_pstrdup(r->pool, fn);
+ fn += strlen(fn);
+ }
+
+ *((const char **)apr_array_push(exception_list)) = ext;
+
+ /* Parse filename extensions which can be in any order
+ */
+ while (*fn && (ext = ap_getword(r->pool, &fn, '.'))) {
+ const extension_info *exinfo = NULL;
+ int found;
+ char *extcase;
+
+ if (*ext == '\0') { /* ignore empty extensions "bad..html" */
+ continue;
+ }
+
+ found = 0;
+
+ /* Save the ext in extcase before converting it to lower case.
+ */
+ extcase = apr_pstrdup(r->pool, ext);
+ ap_str_tolower(ext);
+
+ if (conf->extension_mappings != NULL) {
+ exinfo = (extension_info*)apr_hash_get(conf->extension_mappings,
+ ext, APR_HASH_KEY_STRING);
+ }
+
+ if (exinfo == NULL || !exinfo->forced_type) {
+ if ((type = apr_hash_get(mime_type_extensions, ext,
+ APR_HASH_KEY_STRING)) != NULL) {
+ ap_set_content_type(r, (char*) type);
+ found = 1;
+ }
+ }
+
+ if (exinfo != NULL) {
+
+ /* empty string is treated as special case for RemoveType */
+ if (exinfo->forced_type && *exinfo->forced_type) {
+ ap_set_content_type(r, exinfo->forced_type);
+ found = 1;
+ }
+
+ if (exinfo->charset_type) {
+ charset = exinfo->charset_type;
+ found = 1;
+ }
+ if (exinfo->language_type) {
+ if (!r->content_languages) {
+ r->content_languages = apr_array_make(r->pool, 2,
+ sizeof(char *));
+ }
+ *((const char **)apr_array_push(r->content_languages))
+ = exinfo->language_type;
+ found = 1;
+ }
+ if (exinfo->encoding_type) {
+ if (!r->content_encoding) {
+ r->content_encoding = exinfo->encoding_type;
+ }
+ else {
+ /* XXX should eliminate duplicate entities
+ *
+ * ah no. Order is important and double encoding is neither
+ * forbidden nor impossible. -- nd
+ */
+ r->content_encoding = apr_pstrcat(r->pool,
+ r->content_encoding,
+ ", ",
+ exinfo->encoding_type,
+ NULL);
+ }
+ found = 1;
+ }
+ /* The following extensions are not 'Found'. That is, they don't
+ * make any contribution to metadata negotiation, so they must have
+ * been explicitly requested by name.
+ */
+ if (exinfo->handler && r->proxyreq == PROXYREQ_NONE) {
+ r->handler = exinfo->handler;
+ if (conf->multimatch & MULTIMATCH_HANDLERS) {
+ found = 1;
+ }
+ }
+ /* XXX Two significant problems; 1, we don't check to see if we are
+ * setting redundant filters. 2, we insert these in the types
+ * config hook, which may be too early (dunno.)
+ */
+ if (exinfo->input_filters) {
+ const char *filter, *filters = exinfo->input_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_input_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ if (exinfo->output_filters) {
+ const char *filter, *filters = exinfo->output_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_output_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ }
+
+ if (found || (conf->multimatch & MULTIMATCH_ANY)) {
+ found_metadata = 1;
+ }
+ else {
+ *((const char **) apr_array_push(exception_list)) = extcase;
+ }
+ }
+
+ /*
+ * Need to set a notes entry on r for unrecognized elements.
+ * Somebody better claim them! If we did absolutely nothing,
+ * skip the notes to alert mod_negotiation we are clueless.
+ */
+ if (found_metadata) {
+ apr_table_setn(r->notes, "ap-mime-exceptions-list",
+ (void *)exception_list);
+ }
+
+ if (r->content_type) {
+ content_type *ctp;
+ int override = 0;
+
+ if ((ctp = analyze_ct(r, r->content_type))) {
+ param *pp = ctp->param;
+ char *base_content_type = apr_palloc(r->pool, ctp->type_len +
+ ctp->subtype_len +
+ sizeof("/"));
+ char *tmp = base_content_type;
+ memcpy(tmp, ctp->type, ctp->type_len);
+ tmp += ctp->type_len;
+ *tmp++ = '/';
+ memcpy(tmp, ctp->subtype, ctp->subtype_len);
+ tmp += ctp->subtype_len;
+ *tmp = 0;
+ ap_set_content_type(r, base_content_type);
+ while (pp != NULL) {
+ if (charset && !strcmp(pp->attr, "charset")) {
+ if (!override) {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; charset=",
+ charset,
+ NULL));
+ override = 1;
+ }
+ }
+ else {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; ", pp->attr,
+ "=", pp->val,
+ NULL));
+ }
+ pp = pp->next;
+ }
+ if (charset && !override) {
+ ap_set_content_type(r, apr_pstrcat(r->pool, r->content_type,
+ "; charset=", charset,
+ NULL));
+ }
+ }
+ }
+
+ /* Set default language, if none was specified by the extensions
+ * and we have a DefaultLanguage setting in force
+ */
+
+ if (!r->content_languages && conf->default_language) {
+ const char **new;
+
+ if (!r->content_languages) {
+ r->content_languages = apr_array_make(r->pool, 2, sizeof(char *));
+ }
+ new = (const char **)apr_array_push(r->content_languages);
+ *new = conf->default_language;
+ }
+
+ if (!r->content_type) {
+ return DECLINED;
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(mime_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_type_checker(find_ct,NULL,NULL,APR_HOOK_MIDDLE);
+ /*
+ * this hook seems redundant ... is there any reason a type checker isn't
+ * allowed to do this already? I'd think that fixups in general would be
+ * the last opportunity to get the filters right.
+ * ap_hook_insert_filter(mime_insert_filters,NULL,NULL,APR_HOOK_MIDDLE);
+ */
+}
+
+AP_DECLARE_MODULE(mime) = {
+ STANDARD20_MODULE_STUFF,
+ create_mime_dir_config, /* create per-directory config structure */
+ merge_mime_dir_configs, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ mime_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/modules/http/mod_mime.dep b/modules/http/mod_mime.dep
new file mode 100644
index 0000000..7a195a1
--- /dev/null
+++ b/modules/http/mod_mime.dep
@@ -0,0 +1,55 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_mime.mak
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+.\mod_mime.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+
diff --git a/modules/http/mod_mime.dsp b/modules/http/mod_mime.dsp
new file mode 100644
index 0000000..71ba2ad
--- /dev/null
+++ b/modules/http/mod_mime.dsp
@@ -0,0 +1,111 @@
+# Microsoft Developer Studio Project File - Name="mod_mime" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mime - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:".\Release\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mime - Win32 Release"
+# Name "mod_mime - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_mime.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http/mod_mime.exp b/modules/http/mod_mime.exp
new file mode 100644
index 0000000..f2e38db
--- /dev/null
+++ b/modules/http/mod_mime.exp
@@ -0,0 +1 @@
+mime_module
diff --git a/modules/http/mod_mime.mak b/modules/http/mod_mime.mak
new file mode 100644
index 0000000..14d106f
--- /dev/null
+++ b/modules/http/mod_mime.mak
@@ -0,0 +1,353 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_mime.dsp
+!IF "$(CFG)" == ""
+CFG=mod_mime - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_mime - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_mime - Win32 Release" && "$(CFG)" != "mod_mime - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\mod_mime.obj"
+ -@erase "$(INTDIR)\mod_mime.res"
+ -@erase "$(INTDIR)\mod_mime_src.idb"
+ -@erase "$(INTDIR)\mod_mime_src.pdb"
+ -@erase "$(OUTDIR)\mod_mime.exp"
+ -@erase "$(OUTDIR)\mod_mime.lib"
+ -@erase "$(OUTDIR)\mod_mime.pdb"
+ -@erase "$(OUTDIR)\mod_mime.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_mime_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_mime.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_mime.pdb" /debug /out:"$(OUTDIR)\mod_mime.so" /implib:"$(OUTDIR)\mod_mime.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\mod_mime.obj" \
+ "$(INTDIR)\mod_mime.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib"
+
+"$(OUTDIR)\mod_mime.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_mime.so"
+ if exist .\Release\mod_mime.so.manifest mt.exe -manifest .\Release\mod_mime.so.manifest -outputresource:.\Release\mod_mime.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\mod_mime.obj"
+ -@erase "$(INTDIR)\mod_mime.res"
+ -@erase "$(INTDIR)\mod_mime_src.idb"
+ -@erase "$(INTDIR)\mod_mime_src.pdb"
+ -@erase "$(OUTDIR)\mod_mime.exp"
+ -@erase "$(OUTDIR)\mod_mime.lib"
+ -@erase "$(OUTDIR)\mod_mime.pdb"
+ -@erase "$(OUTDIR)\mod_mime.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_mime_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_mime.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_mime.pdb" /debug /out:"$(OUTDIR)\mod_mime.so" /implib:"$(OUTDIR)\mod_mime.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+LINK32_OBJS= \
+ "$(INTDIR)\mod_mime.obj" \
+ "$(INTDIR)\mod_mime.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib"
+
+"$(OUTDIR)\mod_mime.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_mime.so"
+ if exist .\Debug\mod_mime.so.manifest mt.exe -manifest .\Debug\mod_mime.so.manifest -outputresource:.\Debug\mod_mime.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_mime.dep")
+!INCLUDE "mod_mime.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_mime.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_mime - Win32 Release" || "$(CFG)" == "mod_mime - Win32 Debug"
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http"
+
+!ENDIF
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+
+"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+
+"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=.\mod_mime.c
+
+"$(INTDIR)\mod_mime.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
diff --git a/modules/http2/.gitignore b/modules/http2/.gitignore
new file mode 100644
index 0000000..ca49620
--- /dev/null
+++ b/modules/http2/.gitignore
@@ -0,0 +1,35 @@
+*.xcuserstate
+sandbox/httpd/packages/httpd-2.4.x.tar.gz
+sandbox/test/conf/sites/mod-h2.greenbytes.de.conf
+*.o
+*.slo
+*.lo
+*.la
+*.pcap
+.libs
+.configured
+.deps
+compile
+aclocal.m4
+autom4te.cache
+autoscan.log
+config.guess
+config.log
+config.status
+config.sub
+config.h
+config.h.in
+config.h.in~
+configure
+configure.scan
+depcomp
+install-sh
+libtool
+ltmain.sh
+missing
+stamp-h1
+Makefile.in
+Makefile
+mod_h2-*.tar.gz
+mod_h2/h2_version.h
+m4
diff --git a/modules/http2/Makefile.in b/modules/http2/Makefile.in
new file mode 100644
index 0000000..4395bc3
--- /dev/null
+++ b/modules/http2/Makefile.in
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# standard stuff
+#
+
+include $(top_srcdir)/build/special.mk
diff --git a/modules/http2/NWGNUmakefile b/modules/http2/NWGNUmakefile
new file mode 100644
index 0000000..d4a51ed
--- /dev/null
+++ b/modules/http2/NWGNUmakefile
@@ -0,0 +1,246 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mod_http2.nlm \
+ $(OBJDIR)/proxyht2.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2
new file mode 100644
index 0000000..f6d4a38
--- /dev/null
+++ b/modules/http2/NWGNUmod_http2
@@ -0,0 +1,395 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/ \
+ $(NGH2SRC)/lib/includes \
+ $(SERVER)/mpm/NetWare \
+ $(STDMOD)/ssl \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ -DHAVE_CONFIG_H \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mod_http2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Support module (w/ NGHTTP2 Lib)
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(OBJDIR)/nghttp2.lib \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/h2_alt_svc.o \
+ $(OBJDIR)/h2_bucket_beam.o \
+ $(OBJDIR)/h2_bucket_eos.o \
+ $(OBJDIR)/h2_config.o \
+ $(OBJDIR)/h2_conn.o \
+ $(OBJDIR)/h2_conn_io.o \
+ $(OBJDIR)/h2_ctx.o \
+ $(OBJDIR)/h2_filter.o \
+ $(OBJDIR)/h2_from_h1.o \
+ $(OBJDIR)/h2_h2.o \
+ $(OBJDIR)/h2_mplx.o \
+ $(OBJDIR)/h2_ngn_shed.o \
+ $(OBJDIR)/h2_push.o \
+ $(OBJDIR)/h2_request.o \
+ $(OBJDIR)/h2_headers.o \
+ $(OBJDIR)/h2_session.o \
+ $(OBJDIR)/h2_stream.o \
+ $(OBJDIR)/h2_switch.o \
+ $(OBJDIR)/h2_task.o \
+ $(OBJDIR)/h2_util.o \
+ $(OBJDIR)/h2_workers.o \
+ $(OBJDIR)/mod_http2.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(OBJDIR)/nghttp2.lib \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ @$(OBJDIR)/mod_http2.imp \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs := $(sort $(patsubst $(NGH2SRC)/lib/%.c,$(OBJDIR)/%.o,$(wildcard $(NGH2SRC)/lib/*.c)))
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(NGH2SRC)/lib/config.h $(TARGET_lib)
+
+nlms :: libs $(OBJDIR)/mod_http2.imp $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+ $(call DEL,$(NGH2SRC)/lib/config.h)
+#
+# Any specialized rules here
+#
+vpath %.c $(NGH2SRC)/lib
+
+$(NGH2SRC)/lib/config.h : NWGNUmod_http2
+ @-$(RM) $@
+ @echo $(DL)GEN $@$(DL)
+ @echo $(DL)/* For NetWare target.$(DL) > $@
+ @echo $(DL)** Do not edit - created by Make!$(DL) >> $@
+ @echo $(DL)*/$(DL) >> $@
+ @echo $(DL)#ifndef NGH2_CONFIG_H$(DL) >> $@
+ @echo $(DL)#define NGH2_CONFIG_H$(DL) >> $@
+ @echo #define HAVE_ARPA_INET_H 1 >> $@
+ @echo #define HAVE_CHOWN 1 >> $@
+ @echo #define HAVE_DECL_STRERROR_R 1 >> $@
+ @echo #define HAVE_DLFCN_H 1 >> $@
+ @echo #define HAVE_DUP2 1 >> $@
+ @echo #define HAVE_FCNTL_H 1 >> $@
+ @echo #define HAVE_GETCWD 1 >> $@
+ @echo #define HAVE_INTTYPES_H 1 >> $@
+ @echo #define HAVE_LIMITS_H 1 >> $@
+ @echo #define HAVE_LOCALTIME_R 1 >> $@
+ @echo #define HAVE_MALLOC 1 >> $@
+ @echo #define HAVE_MEMCHR 1 >> $@
+ @echo #define HAVE_MEMMOVE 1 >> $@
+ @echo #define HAVE_MEMORY_H 1 >> $@
+ @echo #define HAVE_MEMSET 1 >> $@
+ @echo #define HAVE_NETDB_H 1 >> $@
+ @echo #define HAVE_NETINET_IN_H 1 >> $@
+ @echo #define HAVE_PTRDIFF_T 1 >> $@
+ @echo #define HAVE_PWD_H 1 >> $@
+ @echo #define HAVE_SOCKET 1 >> $@
+ @echo #define HAVE_SQRT 1 >> $@
+ @echo #define HAVE_STDDEF_H 1 >> $@
+ @echo #define HAVE_STDINT_H 1 >> $@
+ @echo #define HAVE_STDLIB_H 1 >> $@
+ @echo #define HAVE_STRCHR 1 >> $@
+ @echo #define HAVE_STRDUP 1 >> $@
+ @echo #define HAVE_STRERROR 1 >> $@
+ @echo #define HAVE_STRERROR_R 1 >> $@
+ @echo #define HAVE_STRINGS_H 1 >> $@
+ @echo #define HAVE_STRING_H 1 >> $@
+ @echo #define HAVE_STRSTR 1 >> $@
+ @echo #define HAVE_STRTOL 1 >> $@
+ @echo #define HAVE_STRTOUL 1 >> $@
+ @echo #define HAVE_SYSLOG_H 1 >> $@
+ @echo #define HAVE_SYS_SOCKET_H 1 >> $@
+ @echo #define HAVE_SYS_STAT_H 1 >> $@
+ @echo #define HAVE_SYS_TIME_H 1 >> $@
+ @echo #define HAVE_SYS_TYPES_H 1 >> $@
+ @echo #define HAVE_TIME_H 1 >> $@
+ @echo #define HAVE_UNISTD_H 1 >> $@
+
+ @echo #define SIZEOF_INT_P 4 >> $@
+ @echo #define STDC_HEADERS 1 >> $@
+ @echo #define STRERROR_R_CHAR_P 4 >> $@
+
+# Hint to compiler a function parameter is not used
+ @echo #define _U_ >> $@
+
+ @echo #ifndef __cplusplus >> $@
+ @echo #define inline __inline >> $@
+ @echo #endif >> $@
+
+ @echo $(DL)#endif /* NGH2_CONFIG_H */$(DL) >> $@
+
+#
+# Exports from mod_http2 for mod_proxy_http2
+$(OBJDIR)/mod_http2.imp : NWGNUmod_http2
+ @-$(RM) $@
+ @echo $(DL)GEN $@$(DL)
+ @echo $(DL) (HTTP2)$(DL) > $@
+ @echo $(DL) http2_module,$(DL) >> $@
+ @echo $(DL) nghttp2_is_fatal,$(DL) >> $@
+ @echo $(DL) nghttp2_option_del,$(DL) >> $@
+ @echo $(DL) nghttp2_option_new,$(DL) >> $@
+ @echo $(DL) nghttp2_option_set_no_auto_window_update,$(DL) >> $@
+ @echo $(DL) nghttp2_option_set_peer_max_concurrent_streams,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_del,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_new,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_before_frame_send_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_data_chunk_recv_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_frame_recv_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_header_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_stream_close_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_send_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_client_new2,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume_connection,$(DL) >> $@
+ @echo $(DL) nghttp2_session_del,$(DL) >> $@
+ @echo $(DL) nghttp2_session_get_remote_settings,$(DL) >> $@
+ @echo $(DL) nghttp2_session_get_stream_user_data,$(DL) >> $@
+ @echo $(DL) nghttp2_session_mem_recv,$(DL) >> $@
+ @echo $(DL) nghttp2_session_resume_data,$(DL) >> $@
+ @echo $(DL) nghttp2_session_send,$(DL) >> $@
+ @echo $(DL) nghttp2_session_want_read,$(DL) >> $@
+ @echo $(DL) nghttp2_session_want_write,$(DL) >> $@
+ @echo $(DL) nghttp2_strerror,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_goaway,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_ping,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_request,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_rst_stream,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_settings,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_window_update,$(DL) >> $@
+ @echo $(DL) nghttp2_version$(DL) >> $@
+
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/NWGNUproxyht2 b/modules/http2/NWGNUproxyht2
new file mode 100644
index 0000000..ca44ad7
--- /dev/null
+++ b/modules/http2/NWGNUproxyht2
@@ -0,0 +1,288 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/includes \
+ $(STDMOD)/proxy \
+ $(SERVER)/mpm/NetWare \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyht2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Proxy module
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy_http2.o \
+ $(OBJDIR)/h2_proxy_session.o \
+ $(OBJDIR)/h2_proxy_util.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ mod_proxy \
+ mod_http2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ @$(OBJDIR)/mod_http2.imp \
+ ap_proxy_acquire_connection \
+ ap_proxy_canon_netloc \
+ ap_proxy_canonenc \
+ ap_proxy_connect_backend \
+ ap_proxy_connection_create \
+ ap_proxy_cookie_reverse_map \
+ ap_proxy_determine_connection \
+ ap_proxy_location_reverse_map \
+ ap_proxy_port_of_scheme \
+ ap_proxy_release_connection \
+ ap_proxy_ssl_connection_cleanup \
+ ap_sock_disable_nagle \
+ proxy_hook_canon_handler \
+ proxy_hook_scheme_handler \
+ proxy_module \
+ proxy_run_detach_backend \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_http2_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs :=
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/README.h2 b/modules/http2/README.h2
new file mode 100644
index 0000000..f2956f3
--- /dev/null
+++ b/modules/http2/README.h2
@@ -0,0 +1,70 @@
+The http2 module adds support for the HTTP/2 protocol to the server.
+
+Specifically, it supports the protocols "h2" (HTTP2 over TLS) and "h2c"
+(HTTP2 over plain HTTP connections via Upgrade). Additionally it offers
+the "direct" mode for both encrypted and unencrypted connections.
+
+You may enable it for the whole server or specific virtual hosts only.
+
+
+BUILD
+
+If you have libnghttp2 (https://nghttp2.org) installed on your system, simply
+add
+
+ --enable-http2
+
+to your httpd ./configure invocation. Should libnghttp2 reside in a unusual
+location, add
+
+ --with-nghttp2=<path>
+
+to ./configure. <path> is expected to be the installation prefix, so there
+should be a <path>/lib/libnghttp2.*. If your system support pkg-config,
+<path>/lib/pkgconfig/libnghttp2.pc will be inspected.
+
+If you want to link nghttp2 statically into the mod_http2 module, you may
+similarly to mod_ssl add
+
+ --enable-nghttp2-staticlib-deps
+
+For this, the lib directory should only contain the libnghttp2.a, not its
+shared cousins.
+
+
+CONFIGURATION
+
+If mod_http2 is enabled for a site or not depends on the new "Protocols"
+directive. This directive list all protocols enabled for a server or
+virtual host.
+
+If you do not specify "Protocols" all available protocols are enabled. For
+sites using TLS, the protocol supported by mod_http2 is "h2". For cleartext
+http:, the offered protocol is "h2c".
+
+The following is an example of a server that only supports http/1.1 in
+general and offers h2 for a specific virtual host.
+
+ ...
+ Protocols http/1.1
+ <virtualhost *:443>
+ Protocols h2 http/1.1
+ ...
+ </virtualhost>
+
+Please see the documentation of mod_http2 for a complete list and explanation
+of all options.
+
+
+TLS CONFIGURATION
+
+If you want to use HTTP/2 with a browser, most modern browsers will support
+it without further configuration. However, browsers so far only support
+HTTP/2 over TLS and are especially picky about the certificate and
+encryption ciphers used.
+
+Server admins may look for up-to-date information about "modern" TLS
+compatibility under:
+
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
new file mode 100644
index 0000000..e8cefe3
--- /dev/null
+++ b/modules/http2/config2.m4
@@ -0,0 +1,235 @@
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl # start of module specific part
+APACHE_MODPATH_INIT(http2)
+
+dnl # list of module object files
+http2_objs="dnl
+mod_http2.lo dnl
+h2_alt_svc.lo dnl
+h2_bucket_beam.lo dnl
+h2_bucket_eos.lo dnl
+h2_config.lo dnl
+h2_conn.lo dnl
+h2_conn_io.lo dnl
+h2_ctx.lo dnl
+h2_filter.lo dnl
+h2_from_h1.lo dnl
+h2_h2.lo dnl
+h2_headers.lo dnl
+h2_mplx.lo dnl
+h2_ngn_shed.lo dnl
+h2_push.lo dnl
+h2_request.lo dnl
+h2_session.lo dnl
+h2_stream.lo dnl
+h2_switch.lo dnl
+h2_task.lo dnl
+h2_util.lo dnl
+h2_workers.lo dnl
+"
+
+dnl
+dnl APACHE_CHECK_NGHTTP2
+dnl
+dnl Configure for nghttp2, giving preference to
+dnl "--with-nghttp2=<path>" if it was specified.
+dnl
+AC_DEFUN([APACHE_CHECK_NGHTTP2],[
+ AC_CACHE_CHECK([for nghttp2], [ac_cv_nghttp2], [
+ dnl initialise the variables we use
+ ac_cv_nghttp2=no
+ ap_nghttp2_found=""
+ ap_nghttp2_base=""
+ ap_nghttp2_libs=""
+
+ dnl Determine the nghttp2 base directory, if any
+ AC_MSG_CHECKING([for user-provided nghttp2 base directory])
+ AC_ARG_WITH(nghttp2, APACHE_HELP_STRING(--with-nghttp2=PATH, nghttp2 installation directory), [
+ dnl If --with-nghttp2 specifies a directory, we use that directory
+ if test "x$withval" != "xyes" -a "x$withval" != "x"; then
+ dnl This ensures $withval is actually a directory and that it is absolute
+ ap_nghttp2_base="`cd $withval ; pwd`"
+ fi
+ ])
+ if test "x$ap_nghttp2_base" = "x"; then
+ AC_MSG_RESULT(none)
+ else
+ AC_MSG_RESULT($ap_nghttp2_base)
+ fi
+
+ dnl Run header and version checks
+ saved_CPPFLAGS="$CPPFLAGS"
+ saved_LIBS="$LIBS"
+ saved_LDFLAGS="$LDFLAGS"
+
+ dnl Before doing anything else, load in pkg-config variables
+ if test -n "$PKGCONFIG"; then
+ saved_PKG_CONFIG_PATH="$PKG_CONFIG_PATH"
+ AC_MSG_CHECKING([for pkg-config along $PKG_CONFIG_PATH])
+ if test "x$ap_nghttp2_base" != "x" ; then
+ if test -f "${ap_nghttp2_base}/lib/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ elif test -f "${ap_nghttp2_base}/lib64/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib64/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ fi
+ fi
+ AC_ARG_ENABLE(nghttp2-staticlib-deps,APACHE_HELP_STRING(--enable-nghttp2-staticlib-deps,[link mod_http2 with dependencies of libnghttp2's static libraries (as indicated by "pkg-config --static"). Must be specified in addition to --enable-http2.]), [
+ if test "$enableval" = "yes"; then
+ PKGCONFIG_LIBOPTS="--static"
+ fi
+ ])
+ ap_nghttp2_libs="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-l --silence-errors libnghttp2`"
+ if test $? -eq 0; then
+ ap_nghttp2_found="yes"
+ pkglookup="`$PKGCONFIG --cflags-only-I libnghttp2`"
+ APR_ADDTO(CPPFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_CFLAGS, [$pkglookup])
+ pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-L libnghttp2`"
+ APR_ADDTO(LDFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
+ pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-other libnghttp2`"
+ APR_ADDTO(LDFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
+ fi
+ PKG_CONFIG_PATH="$saved_PKG_CONFIG_PATH"
+ fi
+
+ dnl fall back to the user-supplied directory if not found via pkg-config
+ if test "x$ap_nghttp2_base" != "x" -a "x$ap_nghttp2_found" = "x"; then
+ APR_ADDTO(CPPFLAGS, [-I$ap_nghttp2_base/include])
+ APR_ADDTO(MOD_CFLAGS, [-I$ap_nghttp2_base/include])
+ APR_ADDTO(LDFLAGS, [-L$ap_nghttp2_base/lib])
+ APR_ADDTO(MOD_LDFLAGS, [-L$ap_nghttp2_base/lib])
+ if test "x$ap_platform_runtime_link_flag" != "x"; then
+ APR_ADDTO(LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib])
+ APR_ADDTO(MOD_LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib])
+ fi
+ fi
+
+ AC_MSG_CHECKING([for nghttp2 version >= 1.2.1])
+ AC_TRY_COMPILE([#include <nghttp2/nghttp2ver.h>],[
+#if !defined(NGHTTP2_VERSION_NUM)
+#error "Missing nghttp2 version"
+#endif
+#if NGHTTP2_VERSION_NUM < 0x010201
+#error "Unsupported nghttp2 version " NGHTTP2_VERSION_TEXT
+#endif],
+ [AC_MSG_RESULT(OK)
+ ac_cv_nghttp2=yes],
+ [AC_MSG_RESULT(FAILED)])
+
+ if test "x$ac_cv_nghttp2" = "xyes"; then
+ ap_nghttp2_libs="${ap_nghttp2_libs:--lnghttp2} `$apr_config --libs`"
+ APR_ADDTO(MOD_LDFLAGS, [$ap_nghttp2_libs])
+ APR_ADDTO(LIBS, [$ap_nghttp2_libs])
+
+ dnl Run library and function checks
+ liberrors=""
+ AC_CHECK_HEADERS([nghttp2/nghttp2.h])
+ AC_CHECK_FUNCS([nghttp2_session_server_new2], [], [liberrors="yes"])
+ if test "x$liberrors" != "x"; then
+ AC_MSG_WARN([nghttp2 library is unusable])
+ fi
+dnl # nghttp2 >= 1.3.0: access to stream weights
+ AC_CHECK_FUNCS([nghttp2_stream_get_weight], [], [liberrors="yes"])
+ if test "x$liberrors" != "x"; then
+ AC_MSG_WARN([nghttp2 version >= 1.3.0 is required])
+ fi
+dnl # nghttp2 >= 1.5.0: changing stream priorities
+ AC_CHECK_FUNCS([nghttp2_session_change_stream_priority],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_CHANGE_PRIO"])], [])
+dnl # nghttp2 >= 1.14.0: invalid header callback
+ AC_CHECK_FUNCS([nghttp2_session_callbacks_set_on_invalid_header_callback],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_INVALID_HEADER_CB"])], [])
+dnl # nghttp2 >= 1.15.0: get/set stream window sizes
+ AC_CHECK_FUNCS([nghttp2_session_get_stream_local_window_size],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_LOCAL_WIN_SIZE"])], [])
+ else
+ AC_MSG_WARN([nghttp2 version is too old])
+ fi
+
+ dnl restore
+ CPPFLAGS="$saved_CPPFLAGS"
+ LIBS="$saved_LIBS"
+ LDFLAGS="$saved_LDFLAGS"
+ ])
+ if test "x$ac_cv_nghttp2" = "xyes"; then
+ AC_DEFINE(HAVE_NGHTTP2, 1, [Define if nghttp2 is available])
+ fi
+])
+
+
+dnl # hook module into the Autoconf mechanism (--enable-http2)
+APACHE_MODULE(http2, [HTTP/2 protocol handling in addition to HTTP protocol
+handling. Implemented by mod_http2. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. This module
+is usually linked shared and requires loading. ], $http2_objs, , most, [
+ APACHE_CHECK_OPENSSL
+ if test "$ac_cv_openssl" = "yes" ; then
+ APR_ADDTO(MOD_CPPFLAGS, ["-DH2_OPENSSL"])
+ fi
+
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_HTTP2_LDADD, [-export-symbols-regex http2_module])
+ fi
+ else
+ enable_http2=no
+ fi
+])
+
+# Ensure that other modules can pick up mod_http2.h
+# icing: hold back for now until it is more stable
+#APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
+
+
+
+dnl # list of module object files
+proxy_http2_objs="dnl
+mod_proxy_http2.lo dnl
+h2_proxy_session.lo dnl
+h2_proxy_util.lo dnl
+"
+
+dnl # hook module into the Autoconf mechanism (--enable-proxy_http2)
+APACHE_MODULE(proxy_http2, [HTTP/2 proxy module. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. Also requires --enable-proxy.], $proxy_http2_objs, , no, [
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_PROXY_HTTP2_LDADD, [-export-symbols-regex proxy_http2_module])
+ fi
+ else
+ enable_proxy_http2=no
+ fi
+], proxy)
+
+
+dnl # end of module specific part
+APACHE_MODPATH_FINISH
+
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
new file mode 100644
index 0000000..38b4019
--- /dev/null
+++ b/modules/http2/h2.h
@@ -0,0 +1,166 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2__
+#define __mod_h2__h2__
+
+/**
+ * The magic PRIamble of RFC 7540 that is always sent when starting
+ * a h2 communication.
+ */
+extern const char *H2_MAGIC_TOKEN;
+
+#define H2_ERR_NO_ERROR (0x00)
+#define H2_ERR_PROTOCOL_ERROR (0x01)
+#define H2_ERR_INTERNAL_ERROR (0x02)
+#define H2_ERR_FLOW_CONTROL_ERROR (0x03)
+#define H2_ERR_SETTINGS_TIMEOUT (0x04)
+#define H2_ERR_STREAM_CLOSED (0x05)
+#define H2_ERR_FRAME_SIZE_ERROR (0x06)
+#define H2_ERR_REFUSED_STREAM (0x07)
+#define H2_ERR_CANCEL (0x08)
+#define H2_ERR_COMPRESSION_ERROR (0x09)
+#define H2_ERR_CONNECT_ERROR (0x0a)
+#define H2_ERR_ENHANCE_YOUR_CALM (0x0b)
+#define H2_ERR_INADEQUATE_SECURITY (0x0c)
+#define H2_ERR_HTTP_1_1_REQUIRED (0x0d)
+
+#define H2_HEADER_METHOD ":method"
+#define H2_HEADER_METHOD_LEN 7
+#define H2_HEADER_SCHEME ":scheme"
+#define H2_HEADER_SCHEME_LEN 7
+#define H2_HEADER_AUTH ":authority"
+#define H2_HEADER_AUTH_LEN 10
+#define H2_HEADER_PATH ":path"
+#define H2_HEADER_PATH_LEN 5
+#define H2_CRLF "\r\n"
+
+/* Max data size to write so it fits inside a TLS record */
+#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9)
+
+/* Size of the frame header itself in HTTP/2 */
+#define H2_FRAME_HDR_LEN 9
+
+/* Maximum number of padding bytes in a frame, rfc7540 */
+#define H2_MAX_PADLEN 256
+/* Initial default window size, RFC 7540 ch. 6.5.2 */
+#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1)
+
+#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01)
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#define H2MAX(x,y) ((x) > (y) ? (x) : (y))
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+typedef enum {
+ H2_DEPENDANT_AFTER,
+ H2_DEPENDANT_INTERLEAVED,
+ H2_DEPENDANT_BEFORE,
+} h2_dependency;
+
+typedef struct h2_priority {
+ h2_dependency dependency;
+ int weight;
+} h2_priority;
+
+typedef enum {
+ H2_PUSH_NONE,
+ H2_PUSH_DEFAULT,
+ H2_PUSH_HEAD,
+ H2_PUSH_FAST_LOAD,
+} h2_push_policy;
+
+typedef enum {
+ H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_SESSION_ST_DONE, /* finished, connection close */
+ H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */
+ H2_SESSION_ST_BUSY, /* read/write without stop */
+ H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */
+ H2_SESSION_ST_CLEANUP, /* pool is being cleaned up */
+} h2_session_state;
+
+typedef struct h2_session_props {
+ int accepted_max; /* the highest remote stream id was/will be handled */
+ int completed_max; /* the highest remote stream completed */
+ int emitted_count; /* the number of local streams sent */
+ int emitted_max; /* the highest local stream id sent */
+ int error; /* the last session error encountered */
+ unsigned int accepting : 1; /* if the session is accepting new streams */
+ unsigned int shutdown : 1; /* if the final GOAWAY has been sent */
+} h2_session_props;
+
+typedef enum h2_stream_state_t {
+ H2_SS_IDLE,
+ H2_SS_RSVD_R,
+ H2_SS_RSVD_L,
+ H2_SS_OPEN,
+ H2_SS_CLOSED_R,
+ H2_SS_CLOSED_L,
+ H2_SS_CLOSED,
+ H2_SS_CLEANUP,
+ H2_SS_MAX
+} h2_stream_state_t;
+
+typedef enum {
+ H2_SEV_CLOSED_L,
+ H2_SEV_CLOSED_R,
+ H2_SEV_CANCELLED,
+ H2_SEV_EOS_SENT,
+ H2_SEV_IN_DATA_PENDING,
+} h2_stream_event_t;
+
+
+/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal
+ * format that will be fed to various httpd input filters to finally
+ * become a request_rec to be handled by soemone.
+ */
+typedef struct h2_request h2_request;
+
+struct h2_request {
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
+ const char *scheme;
+ const char *authority;
+ const char *path;
+ apr_table_t *headers;
+
+ apr_time_t request_time;
+ unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
+ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
+};
+
+typedef struct h2_headers h2_headers;
+
+struct h2_headers {
+ int status;
+ apr_table_t *headers;
+ apr_table_t *notes;
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
+};
+
+typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
+
+typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
+
+/* Note key to attach connection task id to conn_rec/request_rec instances */
+
+#define H2_TASK_ID_NOTE "http2-task-id"
+#define H2_FILTER_DEBUG_NOTE "http2-debug"
+#define H2_HDR_CONFORMANCE "http2-hdr-conformance"
+#define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
+
+#endif /* defined(__mod_h2__h2__) */
diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c
new file mode 100644
index 0000000..295a16d
--- /dev/null
+++ b/modules/http2/h2_alt_svc.c
@@ -0,0 +1,131 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_strings.h>
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_alt_svc.h"
+#include "h2_ctx.h"
+#include "h2_config.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+
+static int h2_alt_svc_handler(request_rec *r);
+
+void h2_alt_svc_register_hooks(void)
+{
+ ap_hook_post_read_request(h2_alt_svc_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+/**
+ * Parse an Alt-Svc specifier as described in "HTTP Alternative Services"
+ * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04)
+ * with the following changes:
+ * - do not percent encode token values
+ * - do not use quotation marks
+ */
+h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool)
+{
+ const char *sep = ap_strchr_c(s, '=');
+ if (sep) {
+ const char *alpn = apr_pstrmemdup(pool, s, sep - s);
+ const char *host = NULL;
+ int port = 0;
+ s = sep + 1;
+ sep = ap_strchr_c(s, ':'); /* mandatory : */
+ if (sep) {
+ if (sep != s) { /* optional host */
+ host = apr_pstrmemdup(pool, s, sep - s);
+ }
+ s = sep + 1;
+ if (*s) { /* must be a port number */
+ port = (int)apr_atoi64(s);
+ if (port > 0 && port < (0x1 << 16)) {
+ h2_alt_svc *as = apr_pcalloc(pool, sizeof(*as));
+ as->alpn = alpn;
+ as->host = host;
+ as->port = port;
+ return as;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+#define h2_alt_svc_IDX(list, i) ((h2_alt_svc**)(list)->elts)[i]
+
+static int h2_alt_svc_handler(request_rec *r)
+{
+ const h2_config *cfg;
+ int i;
+
+ if (r->connection->keepalives > 0) {
+ /* Only announce Alt-Svc on the first response */
+ return DECLINED;
+ }
+
+ if (h2_ctx_rget(r)) {
+ return DECLINED;
+ }
+
+ cfg = h2_config_sget(r->server);
+ if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) {
+ const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
+ if (!alt_svc_used) {
+ /* We have alt-svcs defined and client is not already using
+ * one, announce the services that were configured and match.
+ * The security of this connection determines if we allow
+ * other host names or ports only.
+ */
+ const char *alt_svc = "";
+ const char *svc_ma = "";
+ int secure = h2_h2_is_tls(r->connection);
+ int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE);
+ if (ma >= 0) {
+ svc_ma = apr_psprintf(r->pool, "; ma=%d", ma);
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03043)
+ "h2_alt_svc: announce %s for %s:%d",
+ (secure? "secure" : "insecure"),
+ r->hostname, (int)r->server->port);
+ for (i = 0; i < cfg->alt_svcs->nelts; ++i) {
+ h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i);
+ const char *ahost = as->host;
+ if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) {
+ ahost = NULL;
+ }
+ if (secure || !ahost) {
+ alt_svc = apr_psprintf(r->pool, "%s%s%s=\"%s:%d\"%s",
+ alt_svc,
+ (*alt_svc? ", " : ""), as->alpn,
+ ahost? ahost : "", as->port,
+ svc_ma);
+ }
+ }
+ if (*alt_svc) {
+ apr_table_setn(r->headers_out, "Alt-Svc", alt_svc);
+ }
+ }
+ }
+
+ return DECLINED;
+}
diff --git a/modules/http2/h2_alt_svc.h b/modules/http2/h2_alt_svc.h
new file mode 100644
index 0000000..479e4d1
--- /dev/null
+++ b/modules/http2/h2_alt_svc.h
@@ -0,0 +1,40 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_alt_svc__
+#define __mod_h2__h2_alt_svc__
+
+typedef struct h2_alt_svc h2_alt_svc;
+
+struct h2_alt_svc {
+ const char *alpn;
+ const char *host;
+ int port;
+};
+
+void h2_alt_svc_register_hooks(void);
+
+/**
+ * Parse an Alt-Svc specifier as described in "HTTP Alternative Services"
+ * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04)
+ * with the following changes:
+ * - do not percent encode token values
+ * - do not use quotation marks
+ */
+h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool);
+
+
+#endif /* defined(__mod_h2__h2_alt_svc__) */
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
new file mode 100644
index 0000000..f79cbe3
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.c
@@ -0,0 +1,1283 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_lib.h>
+#include <apr_atomic.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_buckets.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_protocol.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_util.h"
+#include "h2_bucket_beam.h"
+
+static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy);
+
+#define H2_BPROXY_NEXT(e) APR_RING_NEXT((e), link)
+#define H2_BPROXY_PREV(e) APR_RING_PREV((e), link)
+#define H2_BPROXY_REMOVE(e) APR_RING_REMOVE((e), link)
+
+#define H2_BPROXY_LIST_INIT(b) APR_RING_INIT(&(b)->list, h2_beam_proxy, link);
+#define H2_BPROXY_LIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, h2_beam_proxy, link)
+#define H2_BPROXY_LIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, h2_beam_proxy, link)
+#define H2_BPROXY_LIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BPROXY_LIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_PROXY_BLIST_INSERT_HEAD(b, e) do { \
+ h2_beam_proxy *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_INSERT_TAIL(b, e) do { \
+ h2_beam_proxy *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+ } while (0)
+
+
+/*******************************************************************************
+ * beam bucket with reference to beam and bucket it represents
+ ******************************************************************************/
+
+const apr_bucket_type_t h2_bucket_type_beam;
+
+#define H2_BUCKET_IS_BEAM(e) (e->type == &h2_bucket_type_beam)
+
+struct h2_beam_proxy {
+ apr_bucket_refcount refcount;
+ APR_RING_ENTRY(h2_beam_proxy) link;
+ h2_bucket_beam *beam;
+ apr_bucket *bsender;
+ apr_size_t n;
+};
+
+static const char Dummy = '\0';
+
+static apr_status_t beam_bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ h2_beam_proxy *d = b->data;
+ if (d->bsender) {
+ const char *data;
+ apr_status_t status = apr_bucket_read(d->bsender, &data, len, block);
+ if (status == APR_SUCCESS) {
+ *str = data + b->start;
+ *len = b->length;
+ }
+ return status;
+ }
+ *str = &Dummy;
+ *len = 0;
+ return APR_ECONNRESET;
+}
+
+static void beam_bucket_destroy(void *data)
+{
+ h2_beam_proxy *d = data;
+
+ if (apr_bucket_shared_destroy(d)) {
+ /* When the beam gets destroyed before this bucket, it will
+ * NULLify its reference here. This is not protected by a mutex,
+ * so it will not help with race conditions.
+ * But it lets us shut down memory pool with circulare beam
+ * references. */
+ if (d->beam) {
+ h2_beam_emitted(d->beam, d);
+ }
+ apr_bucket_free(d);
+ }
+}
+
+static apr_bucket * h2_beam_bucket_make(apr_bucket *b,
+ h2_bucket_beam *beam,
+ apr_bucket *bsender, apr_size_t n)
+{
+ h2_beam_proxy *d;
+
+ d = apr_bucket_alloc(sizeof(*d), b->list);
+ H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
+ d->beam = beam;
+ d->bsender = bsender;
+ d->n = n;
+
+ b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0);
+ b->type = &h2_bucket_type_beam;
+
+ return b;
+}
+
+static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
+ apr_bucket *bsender,
+ apr_bucket_alloc_t *list,
+ apr_size_t n)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ return h2_beam_bucket_make(b, beam, bsender, n);
+}
+
+const apr_bucket_type_t h2_bucket_type_beam = {
+ "BEAM", 5, APR_BUCKET_DATA,
+ beam_bucket_destroy,
+ beam_bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_shared_split,
+ apr_bucket_shared_copy
+};
+
+/*******************************************************************************
+ * h2_blist, a brigade without allocations
+ ******************************************************************************/
+
+static apr_array_header_t *beamers;
+
+static apr_status_t cleanup_beamers(void *dummy)
+{
+ (void)dummy;
+ beamers = NULL;
+ return APR_SUCCESS;
+}
+
+void h2_register_bucket_beamer(h2_bucket_beamer *beamer)
+{
+ if (!beamers) {
+ apr_pool_cleanup_register(apr_hook_global_pool, NULL,
+ cleanup_beamers, apr_pool_cleanup_null);
+ beamers = apr_array_make(apr_hook_global_pool, 10,
+ sizeof(h2_bucket_beamer*));
+ }
+ APR_ARRAY_PUSH(beamers, h2_bucket_beamer*) = beamer;
+}
+
+static apr_bucket *h2_beam_bucket(h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+{
+ apr_bucket *b = NULL;
+ int i;
+ if (beamers) {
+ for (i = 0; i < beamers->nelts && b == NULL; ++i) {
+ h2_bucket_beamer *beamer;
+
+ beamer = APR_ARRAY_IDX(beamers, i, h2_bucket_beamer*);
+ b = beamer(beam, dest, src);
+ }
+ }
+ return b;
+}
+
+
+/*******************************************************************************
+ * bucket beam that can transport buckets across threads
+ ******************************************************************************/
+
+static void mutex_leave(void *ctx, apr_thread_mutex_t *lock)
+{
+ apr_thread_mutex_unlock(lock);
+}
+
+static apr_status_t mutex_enter(void *ctx, h2_beam_lock *pbl)
+{
+ h2_bucket_beam *beam = ctx;
+ pbl->mutex = beam->lock;
+ pbl->leave = mutex_leave;
+ return apr_thread_mutex_lock(pbl->mutex);
+}
+
+static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ return mutex_enter(beam, pbl);
+}
+
+static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ if (pbl->leave) {
+ pbl->leave(pbl->leave_ctx, pbl->mutex);
+ }
+}
+
+static apr_off_t bucket_mem_used(apr_bucket *b)
+{
+ if (APR_BUCKET_IS_FILE(b)) {
+ return 0;
+ }
+ else {
+ /* should all have determinate length */
+ return b->length;
+ }
+}
+
+static int report_consumption(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ int rv = 0;
+ apr_off_t len = beam->received_bytes - beam->cons_bytes_reported;
+ h2_beam_io_callback *cb = beam->cons_io_cb;
+
+ if (len > 0) {
+ if (cb) {
+ void *ctx = beam->cons_ctx;
+
+ if (pbl) leave_yellow(beam, pbl);
+ cb(ctx, beam, len);
+ if (pbl) enter_yellow(beam, pbl);
+ rv = 1;
+ }
+ beam->cons_bytes_reported += len;
+ }
+ return rv;
+}
+
+static void report_prod_io(h2_bucket_beam *beam, int force, h2_beam_lock *pbl)
+{
+ apr_off_t len = beam->sent_bytes - beam->prod_bytes_reported;
+ if (force || len > 0) {
+ h2_beam_io_callback *cb = beam->prod_io_cb;
+ if (cb) {
+ void *ctx = beam->prod_ctx;
+
+ leave_yellow(beam, pbl);
+ cb(ctx, beam, len);
+ enter_yellow(beam, pbl);
+ }
+ beam->prod_bytes_reported += len;
+ }
+}
+
+static apr_size_t calc_buffered(h2_bucket_beam *beam)
+{
+ apr_size_t len = 0;
+ apr_bucket *b;
+ for (b = H2_BLIST_FIRST(&beam->send_list);
+ b != H2_BLIST_SENTINEL(&beam->send_list);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b->length == ((apr_size_t)-1)) {
+ /* do not count */
+ }
+ else if (APR_BUCKET_IS_FILE(b)) {
+ /* if unread, has no real mem footprint. */
+ }
+ else {
+ len += b->length;
+ }
+ }
+ return len;
+}
+
+static void r_purge_sent(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ /* delete all sender buckets in purge brigade, needs to be called
+ * from sender thread only */
+ while (!H2_BLIST_EMPTY(&beam->purge_list)) {
+ b = H2_BLIST_FIRST(&beam->purge_list);
+ apr_bucket_delete(b);
+ }
+}
+
+static apr_size_t calc_space_left(h2_bucket_beam *beam)
+{
+ if (beam->max_buf_size > 0) {
+ apr_off_t len = calc_buffered(beam);
+ return (beam->max_buf_size > len? (beam->max_buf_size - len) : 0);
+ }
+ return APR_SIZE_MAX;
+}
+
+static int buffer_is_empty(h2_bucket_beam *beam)
+{
+ return ((!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer))
+ && H2_BLIST_EMPTY(&beam->send_list));
+}
+
+static apr_status_t wait_empty(h2_bucket_beam *beam, apr_read_type_e block,
+ apr_thread_mutex_t *lock)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ while (!buffer_is_empty(beam) && APR_SUCCESS == rv) {
+ if (APR_BLOCK_READ != block || !lock) {
+ rv = APR_EAGAIN;
+ }
+ else if (beam->timeout > 0) {
+ rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout);
+ }
+ else {
+ rv = apr_thread_cond_wait(beam->change, lock);
+ }
+ }
+ return rv;
+}
+
+static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block,
+ apr_thread_mutex_t *lock)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ while (buffer_is_empty(beam) && APR_SUCCESS == rv) {
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (beam->closed) {
+ rv = APR_EOF;
+ }
+ else if (APR_BLOCK_READ != block || !lock) {
+ rv = APR_EAGAIN;
+ }
+ else if (beam->timeout > 0) {
+ rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout);
+ }
+ else {
+ rv = apr_thread_cond_wait(beam->change, lock);
+ }
+ }
+ return rv;
+}
+
+static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block,
+ apr_size_t *pspace_left, h2_beam_lock *bl)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t left;
+
+ while (0 == (left = calc_space_left(beam)) && APR_SUCCESS == rv) {
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (block != APR_BLOCK_READ || !bl->mutex) {
+ rv = APR_EAGAIN;
+ }
+ else {
+ if (beam->timeout > 0) {
+ rv = apr_thread_cond_timedwait(beam->change, bl->mutex, beam->timeout);
+ }
+ else {
+ rv = apr_thread_cond_wait(beam->change, bl->mutex);
+ }
+ }
+ }
+ *pspace_left = left;
+ return rv;
+}
+
+static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
+{
+ h2_beam_lock bl;
+ apr_bucket *b, *next;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ /* even when beam buckets are split, only the one where
+ * refcount drops to 0 will call us */
+ H2_BPROXY_REMOVE(proxy);
+ /* invoked from receiver thread, the last beam bucket for the send
+ * bucket is about to be destroyed.
+ * remove it from the hold, where it should be now */
+ if (proxy->bsender) {
+ for (b = H2_BLIST_FIRST(&beam->hold_list);
+ b != H2_BLIST_SENTINEL(&beam->hold_list);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b == proxy->bsender) {
+ break;
+ }
+ }
+ if (b != H2_BLIST_SENTINEL(&beam->hold_list)) {
+ /* bucket is in hold as it should be, mark this one
+ * and all before it for purging. We might have placed meta
+ * buckets without a receiver proxy into the hold before it
+ * and schedule them for purging now */
+ for (b = H2_BLIST_FIRST(&beam->hold_list);
+ b != H2_BLIST_SENTINEL(&beam->hold_list);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b == proxy->bsender) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
+ break;
+ }
+ else if (APR_BUCKET_IS_METADATA(b)) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
+ }
+ else {
+ /* another data bucket before this one in hold. this
+ * is normal since DATA buckets need not be destroyed
+ * in order */
+ }
+ }
+
+ proxy->bsender = NULL;
+ }
+ else {
+ /* it should be there unless we screwed up */
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->send_pool,
+ APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
+ "in hold, n=%d", beam->id, beam->tag,
+ (int)proxy->n);
+ ap_assert(!proxy->bsender);
+ }
+ }
+ /* notify anyone waiting on space to become available */
+ if (!bl.mutex) {
+ r_purge_sent(beam);
+ }
+ else {
+ apr_thread_cond_broadcast(beam->change);
+ }
+ leave_yellow(beam, &bl);
+ }
+}
+
+static void h2_blist_cleanup(h2_blist *bl)
+{
+ apr_bucket *e;
+
+ while (!H2_BLIST_EMPTY(bl)) {
+ e = H2_BLIST_FIRST(bl);
+ apr_bucket_delete(e);
+ }
+}
+
+static apr_status_t beam_close(h2_bucket_beam *beam)
+{
+ if (!beam->closed) {
+ beam->closed = 1;
+ apr_thread_cond_broadcast(beam->change);
+ }
+ return APR_SUCCESS;
+}
+
+int h2_beam_is_closed(h2_bucket_beam *beam)
+{
+ return beam->closed;
+}
+
+static int pool_register(h2_bucket_beam *beam, apr_pool_t *pool,
+ apr_status_t (*cleanup)(void *))
+{
+ if (pool && pool != beam->pool) {
+ apr_pool_pre_cleanup_register(pool, beam, cleanup);
+ return 1;
+ }
+ return 0;
+}
+
+static int pool_kill(h2_bucket_beam *beam, apr_pool_t *pool,
+ apr_status_t (*cleanup)(void *)) {
+ if (pool && pool != beam->pool) {
+ apr_pool_cleanup_kill(pool, beam, cleanup);
+ return 1;
+ }
+ return 0;
+}
+
+static apr_status_t beam_recv_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+ /* receiver pool has gone away, clear references */
+ beam->recv_buffer = NULL;
+ beam->recv_pool = NULL;
+ return APR_SUCCESS;
+}
+
+static apr_status_t beam_send_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+ /* sender is going away, clear up all references to its memory */
+ r_purge_sent(beam);
+ h2_blist_cleanup(&beam->send_list);
+ report_consumption(beam, NULL);
+ while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) {
+ h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
+ H2_BPROXY_REMOVE(proxy);
+ proxy->beam = NULL;
+ proxy->bsender = NULL;
+ }
+ h2_blist_cleanup(&beam->purge_list);
+ h2_blist_cleanup(&beam->hold_list);
+ beam->send_pool = NULL;
+ return APR_SUCCESS;
+}
+
+static void beam_set_send_pool(h2_bucket_beam *beam, apr_pool_t *pool)
+{
+ if (beam->send_pool != pool) {
+ if (beam->send_pool && beam->send_pool != beam->pool) {
+ pool_kill(beam, beam->send_pool, beam_send_cleanup);
+ beam_send_cleanup(beam);
+ }
+ beam->send_pool = pool;
+ pool_register(beam, beam->send_pool, beam_send_cleanup);
+ }
+}
+
+static void recv_buffer_cleanup(h2_bucket_beam *beam, h2_beam_lock *bl)
+{
+ if (beam->recv_buffer && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
+ apr_bucket_brigade *bb = beam->recv_buffer;
+ apr_off_t bblen = 0;
+
+ beam->recv_buffer = NULL;
+ apr_brigade_length(bb, 0, &bblen);
+ beam->received_bytes += bblen;
+
+ /* need to do this unlocked since bucket destroy might
+ * call this beam again. */
+ if (bl) leave_yellow(beam, bl);
+ apr_brigade_destroy(bb);
+ if (bl) enter_yellow(beam, bl);
+
+ apr_thread_cond_broadcast(beam->change);
+ if (beam->cons_ev_cb) {
+ beam->cons_ev_cb(beam->cons_ctx, beam);
+ }
+ }
+}
+
+static apr_status_t beam_cleanup(h2_bucket_beam *beam, int from_pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ int safe_send = (beam->owner == H2_BEAM_OWNER_SEND);
+ int safe_recv = (beam->owner == H2_BEAM_OWNER_RECV);
+
+ /*
+ * Owner of the beam is going away, depending on which side it owns,
+ * cleanup strategies will differ.
+ *
+ * In general, receiver holds references to memory from sender.
+ * Clean up receiver first, if safe, then cleanup sender, if safe.
+ */
+
+ /* When called from pool destroy, io callbacks are disabled */
+ if (from_pool) {
+ beam->cons_io_cb = NULL;
+ }
+
+ /* When modify send is not safe, this means we still have multi-thread
+ * protection and the owner is receiving the buckets. If the sending
+ * side has not gone away, this means we could have dangling buckets
+ * in our lists that never get destroyed. This should not happen. */
+ ap_assert(safe_send || !beam->send_pool);
+ if (!H2_BLIST_EMPTY(&beam->send_list)) {
+ ap_assert(beam->send_pool);
+ }
+
+ if (safe_recv) {
+ if (beam->recv_pool) {
+ pool_kill(beam, beam->recv_pool, beam_recv_cleanup);
+ beam->recv_pool = NULL;
+ }
+ recv_buffer_cleanup(beam, NULL);
+ }
+ else {
+ beam->recv_buffer = NULL;
+ beam->recv_pool = NULL;
+ }
+
+ if (safe_send && beam->send_pool) {
+ pool_kill(beam, beam->send_pool, beam_send_cleanup);
+ status = beam_send_cleanup(beam);
+ }
+
+ if (safe_recv) {
+ ap_assert(H2_BPROXY_LIST_EMPTY(&beam->proxies));
+ ap_assert(H2_BLIST_EMPTY(&beam->send_list));
+ ap_assert(H2_BLIST_EMPTY(&beam->hold_list));
+ ap_assert(H2_BLIST_EMPTY(&beam->purge_list));
+ }
+ return status;
+}
+
+static apr_status_t beam_pool_cleanup(void *data)
+{
+ return beam_cleanup(data, 1);
+}
+
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam)
+{
+ apr_pool_cleanup_kill(beam->pool, beam, beam_pool_cleanup);
+ return beam_cleanup(beam, 0);
+}
+
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *pool,
+ int id, const char *tag,
+ h2_beam_owner_t owner,
+ apr_size_t max_buf_size,
+ apr_interval_time_t timeout)
+{
+ h2_bucket_beam *beam;
+ apr_status_t rv = APR_SUCCESS;
+
+ beam = apr_pcalloc(pool, sizeof(*beam));
+ if (!beam) {
+ return APR_ENOMEM;
+ }
+
+ beam->id = id;
+ beam->tag = tag;
+ beam->pool = pool;
+ beam->owner = owner;
+ H2_BLIST_INIT(&beam->send_list);
+ H2_BLIST_INIT(&beam->hold_list);
+ H2_BLIST_INIT(&beam->purge_list);
+ H2_BPROXY_LIST_INIT(&beam->proxies);
+ beam->tx_mem_limits = 1;
+ beam->max_buf_size = max_buf_size;
+ beam->timeout = timeout;
+
+ rv = apr_thread_mutex_create(&beam->lock, APR_THREAD_MUTEX_DEFAULT, pool);
+ if (APR_SUCCESS == rv) {
+ rv = apr_thread_cond_create(&beam->change, pool);
+ if (APR_SUCCESS == rv) {
+ apr_pool_pre_cleanup_register(pool, beam, beam_pool_cleanup);
+ *pbeam = beam;
+ }
+ }
+ return rv;
+}
+
+void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->max_buf_size = buffer_size;
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ apr_size_t buffer_size = 0;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ buffer_size = beam->max_buf_size;
+ leave_yellow(beam, &bl);
+ }
+ return buffer_size;
+}
+
+void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->timeout = timeout;
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ apr_interval_time_t timeout = 0;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ timeout = beam->timeout;
+ leave_yellow(beam, &bl);
+ }
+ return timeout;
+}
+
+void h2_beam_abort(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->aborted = 1;
+ r_purge_sent(beam);
+ h2_blist_cleanup(&beam->send_list);
+ report_consumption(beam, &bl);
+ apr_thread_cond_broadcast(beam->change);
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_status_t h2_beam_close(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_sent(beam);
+ beam_close(beam);
+ report_consumption(beam, &bl);
+ leave_yellow(beam, &bl);
+ }
+ return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+}
+
+apr_status_t h2_beam_leave(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ recv_buffer_cleanup(beam, &bl);
+ beam->aborted = 1;
+ beam_close(beam);
+ leave_yellow(beam, &bl);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block)
+{
+ apr_status_t status;
+ h2_beam_lock bl;
+
+ if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) {
+ status = wait_empty(beam, block, bl.mutex);
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+static void move_to_hold(h2_bucket_beam *beam,
+ apr_bucket_brigade *sender_bb)
+{
+ apr_bucket *b;
+ while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) {
+ b = APR_BRIGADE_FIRST(sender_bb);
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ }
+}
+
+static apr_status_t append_bucket(h2_bucket_beam *beam,
+ apr_bucket *b,
+ apr_read_type_e block,
+ apr_size_t *pspace_left,
+ h2_beam_lock *pbl)
+{
+ const char *data;
+ apr_size_t len;
+ apr_status_t status;
+ int can_beam = 0, check_len;
+
+ if (beam->aborted) {
+ return APR_ECONNABORTED;
+ }
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ beam->closed = 1;
+ }
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ return APR_SUCCESS;
+ }
+ else if (APR_BUCKET_IS_FILE(b)) {
+ /* For file buckets the problem is their internal readpool that
+ * is used on the first read to allocate buffer/mmap.
+ * Since setting aside a file bucket will de-register the
+ * file cleanup function from the previous pool, we need to
+ * call that only from the sender thread.
+ *
+ * Currently, we do not handle file bucket with refcount > 1 as
+ * the beam is then not in complete control of the file's lifetime.
+ * Which results in the bug that a file get closed by the receiver
+ * while the sender or the beam still have buckets using it.
+ *
+ * Additionally, we allow callbacks to prevent beaming file
+ * handles across. The use case for this is to limit the number
+ * of open file handles and rather use a less efficient beam
+ * transport. */
+ apr_bucket_file *bf = b->data;
+ apr_file_t *fd = bf->fd;
+ can_beam = (bf->refcount.refcount == 1);
+ if (can_beam && beam->can_beam_fn) {
+ can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
+ }
+ check_len = !can_beam;
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *data;
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ check_len = 1;
+ }
+
+ if (check_len) {
+ if (b->length > *pspace_left) {
+ apr_bucket_split(b, *pspace_left);
+ }
+ *pspace_left -= b->length;
+ }
+
+ /* The fundamental problem is that reading a sender bucket from
+ * a receiver thread is a total NO GO, because the bucket might use
+ * its pool/bucket_alloc from a foreign thread and that will
+ * corrupt. */
+ status = APR_ENOTIMPL;
+ if (APR_BUCKET_IS_TRANSIENT(b)) {
+ /* this takes care of transient buckets and converts them
+ * into heap ones. Other bucket types might or might not be
+ * affected by this. */
+ status = apr_bucket_setaside(b, beam->send_pool);
+ }
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ /* For heap buckets read from a receiver thread is fine. The
+ * data will be there and live until the bucket itself is
+ * destroyed. */
+ status = APR_SUCCESS;
+ }
+ else if (APR_BUCKET_IS_POOL(b)) {
+ /* pool buckets are bastards that register at pool cleanup
+ * to morph themselves into heap buckets. That may happen anytime,
+ * even after the bucket data pointer has been read. So at
+ * any time inside the receiver thread, the pool bucket memory
+ * may disappear. yikes. */
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ apr_bucket_heap_make(b, data, len, NULL);
+ }
+ }
+ else if (APR_BUCKET_IS_FILE(b) && can_beam) {
+ status = apr_bucket_setaside(b, beam->send_pool);
+ }
+
+ if (status == APR_ENOTIMPL) {
+ /* we have no knowledge about the internals of this bucket,
+ * but hope that after read, its data stays immutable for the
+ * lifetime of the bucket. (see pool bucket handling above for
+ * a counter example).
+ * We do the read while in the sender thread, so that the bucket may
+ * use pools/allocators safely. */
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ status = apr_bucket_setaside(b, beam->send_pool);
+ }
+ }
+
+ if (status != APR_SUCCESS && status != APR_ENOTIMPL) {
+ return status;
+ }
+
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ beam->sent_bytes += b->length;
+
+ return APR_SUCCESS;
+}
+
+void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
+{
+ h2_beam_lock bl;
+ /* Called from the sender thread to add buckets to the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_sent(beam);
+ beam_set_send_pool(beam, p);
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_status_t h2_beam_send(h2_bucket_beam *beam,
+ apr_bucket_brigade *sender_bb,
+ apr_read_type_e block)
+{
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t space_left = 0;
+ h2_beam_lock bl;
+
+ /* Called from the sender thread to add buckets to the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ ap_assert(beam->send_pool);
+ r_purge_sent(beam);
+
+ if (beam->aborted) {
+ move_to_hold(beam, sender_bb);
+ rv = APR_ECONNABORTED;
+ }
+ else if (sender_bb) {
+ int force_report = !APR_BRIGADE_EMPTY(sender_bb);
+
+ space_left = calc_space_left(beam);
+ while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
+ if (space_left <= 0) {
+ report_prod_io(beam, force_report, &bl);
+ r_purge_sent(beam);
+ rv = wait_not_full(beam, block, &space_left, &bl);
+ if (APR_SUCCESS != rv) {
+ break;
+ }
+ }
+ b = APR_BRIGADE_FIRST(sender_bb);
+ rv = append_bucket(beam, b, block, &space_left, &bl);
+ }
+
+ report_prod_io(beam, force_report, &bl);
+ apr_thread_cond_broadcast(beam->change);
+ }
+ report_consumption(beam, &bl);
+ leave_yellow(beam, &bl);
+ }
+ return rv;
+}
+
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_beam_lock bl;
+ apr_bucket *bsender, *brecv, *ng;
+ int transferred = 0;
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t remain;
+ int transferred_buckets = 0;
+
+ /* Called from the receiver thread to take buckets from the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ if (readbytes <= 0) {
+ readbytes = APR_SIZE_MAX;
+ }
+ remain = readbytes;
+
+transfer:
+ if (beam->aborted) {
+ recv_buffer_cleanup(beam, &bl);
+ status = APR_ECONNABORTED;
+ goto leave;
+ }
+
+ /* transfer enough buckets from our receiver brigade, if we have one */
+ while (remain >= 0
+ && beam->recv_buffer
+ && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
+
+ brecv = APR_BRIGADE_FIRST(beam->recv_buffer);
+ if (brecv->length > 0 && remain <= 0) {
+ break;
+ }
+ APR_BUCKET_REMOVE(brecv);
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
+ ++transferred;
+ }
+
+ /* transfer from our sender brigade, transforming sender buckets to
+ * receiver ones until we have enough */
+ while (remain >= 0 && !H2_BLIST_EMPTY(&beam->send_list)) {
+
+ brecv = NULL;
+ bsender = H2_BLIST_FIRST(&beam->send_list);
+ if (bsender->length > 0 && remain <= 0) {
+ break;
+ }
+
+ if (APR_BUCKET_IS_METADATA(bsender)) {
+ if (APR_BUCKET_IS_EOS(bsender)) {
+ brecv = apr_bucket_eos_create(bb->bucket_alloc);
+ beam->close_sent = 1;
+ }
+ else if (APR_BUCKET_IS_FLUSH(bsender)) {
+ brecv = apr_bucket_flush_create(bb->bucket_alloc);
+ }
+ else if (AP_BUCKET_IS_ERROR(bsender)) {
+ ap_bucket_error *eb = (ap_bucket_error *)bsender;
+ brecv = ap_bucket_error_create(eb->status, eb->data,
+ bb->p, bb->bucket_alloc);
+ }
+ }
+ else if (bsender->length == 0) {
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+ continue;
+ }
+ else if (APR_BUCKET_IS_FILE(bsender)) {
+ /* This is set aside into the target brigade pool so that
+ * any read operation messes with that pool and not
+ * the sender one. */
+ apr_bucket_file *f = (apr_bucket_file *)bsender->data;
+ apr_file_t *fd = f->fd;
+ int setaside = (f->readpool != bb->p);
+
+ if (setaside) {
+ status = apr_file_setaside(&fd, fd, bb->p);
+ if (status != APR_SUCCESS) {
+ goto leave;
+ }
+ ++beam->files_beamed;
+ }
+ ng = apr_brigade_insert_file(bb, fd, bsender->start, bsender->length,
+ bb->p);
+#if APR_HAS_MMAP
+ /* disable mmap handling as this leads to segfaults when
+ * the underlying file is changed while memory pointer has
+ * been handed out. See also PR 59348 */
+ apr_bucket_file_enable_mmap(ng, 0);
+#endif
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+
+ remain -= bsender->length;
+ ++transferred;
+ ++transferred_buckets;
+ continue;
+ }
+ else {
+ /* create a "receiver" standin bucket. we took care about the
+ * underlying sender bucket and its data when we placed it into
+ * the sender brigade.
+ * the beam bucket will notify us on destruction that bsender is
+ * no longer needed. */
+ brecv = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc,
+ beam->buckets_sent++);
+ }
+
+ /* Place the sender bucket into our hold, to be destroyed when no
+ * receiver bucket references it any more. */
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+
+ beam->received_bytes += bsender->length;
+ ++transferred_buckets;
+
+ if (brecv) {
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
+ ++transferred;
+ }
+ else {
+ /* let outside hook determine how bucket is beamed */
+ leave_yellow(beam, &bl);
+ brecv = h2_beam_bucket(beam, bb, bsender);
+ enter_yellow(beam, &bl);
+
+ while (brecv && brecv != APR_BRIGADE_SENTINEL(bb)) {
+ ++transferred;
+ remain -= brecv->length;
+ brecv = APR_BUCKET_NEXT(brecv);
+ }
+ }
+ }
+
+ if (remain < 0) {
+ /* too much, put some back into out recv_buffer */
+ remain = readbytes;
+ for (brecv = APR_BRIGADE_FIRST(bb);
+ brecv != APR_BRIGADE_SENTINEL(bb);
+ brecv = APR_BUCKET_NEXT(brecv)) {
+ remain -= (beam->tx_mem_limits? bucket_mem_used(brecv)
+ : brecv->length);
+ if (remain < 0) {
+ apr_bucket_split(brecv, brecv->length+remain);
+ beam->recv_buffer = apr_brigade_split_ex(bb,
+ APR_BUCKET_NEXT(brecv),
+ beam->recv_buffer);
+ break;
+ }
+ }
+ }
+
+ if (beam->closed && buffer_is_empty(beam)) {
+ /* beam is closed and we have nothing more to receive */
+ if (!beam->close_sent) {
+ apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ beam->close_sent = 1;
+ ++transferred;
+ status = APR_SUCCESS;
+ }
+ }
+
+ if (transferred_buckets > 0) {
+ if (beam->cons_ev_cb) {
+ beam->cons_ev_cb(beam->cons_ctx, beam);
+ }
+ }
+
+ if (transferred) {
+ apr_thread_cond_broadcast(beam->change);
+ status = APR_SUCCESS;
+ }
+ else {
+ status = wait_not_empty(beam, block, bl.mutex);
+ if (status != APR_SUCCESS) {
+ goto leave;
+ }
+ goto transfer;
+ }
+leave:
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_ev_callback *ev_cb,
+ h2_beam_io_callback *io_cb, void *ctx)
+{
+ h2_beam_lock bl;
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->cons_ev_cb = ev_cb;
+ beam->cons_io_cb = io_cb;
+ beam->cons_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_on_produced(h2_bucket_beam *beam,
+ h2_beam_io_callback *io_cb, void *ctx)
+{
+ h2_beam_lock bl;
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->prod_io_cb = io_cb;
+ beam->prod_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_on_file_beam(h2_bucket_beam *beam,
+ h2_beam_can_beam_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->can_beam_fn = cb;
+ beam->can_beam_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ for (b = H2_BLIST_FIRST(&beam->send_list);
+ b != H2_BLIST_SENTINEL(&beam->send_list);
+ b = APR_BUCKET_NEXT(b)) {
+ /* should all have determinate length */
+ l += b->length;
+ }
+ leave_yellow(beam, &bl);
+ }
+ return l;
+}
+
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ for (b = H2_BLIST_FIRST(&beam->send_list);
+ b != H2_BLIST_SENTINEL(&beam->send_list);
+ b = APR_BUCKET_NEXT(b)) {
+ l += bucket_mem_used(b);
+ }
+ leave_yellow(beam, &bl);
+ }
+ return l;
+}
+
+int h2_beam_empty(h2_bucket_beam *beam)
+{
+ int empty = 1;
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ empty = (H2_BLIST_EMPTY(&beam->send_list)
+ && (!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer)));
+ leave_yellow(beam, &bl);
+ }
+ return empty;
+}
+
+int h2_beam_holds_proxies(h2_bucket_beam *beam)
+{
+ int has_proxies = 1;
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ has_proxies = !H2_BPROXY_LIST_EMPTY(&beam->proxies);
+ leave_yellow(beam, &bl);
+ }
+ return has_proxies;
+}
+
+int h2_beam_was_received(h2_bucket_beam *beam)
+{
+ int happend = 0;
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ happend = (beam->received_bytes > 0);
+ leave_yellow(beam, &bl);
+ }
+ return happend;
+}
+
+apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam)
+{
+ apr_size_t n = 0;
+ h2_beam_lock bl;
+
+ if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
+ n = beam->files_beamed;
+ leave_yellow(beam, &bl);
+ }
+ return n;
+}
+
+int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file)
+{
+ return 0;
+}
+
+int h2_beam_report_consumption(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ int rv = 0;
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ rv = report_consumption(beam, &bl);
+ leave_yellow(beam, &bl);
+ }
+ return rv;
+}
+
+void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg)
+{
+ if (beam && APLOG_C_IS_LEVEL(c,level)) {
+ ap_log_cerror(APLOG_MARK, level, 0, c,
+ "beam(%ld-%d,%s,closed=%d,aborted=%d,empty=%d,buf=%ld): %s",
+ (c->master? c->master->id : c->id), beam->id, beam->tag,
+ beam->closed, beam->aborted, h2_beam_empty(beam),
+ (long)h2_beam_get_buffered(beam), msg);
+ }
+}
+
+
diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h
new file mode 100644
index 0000000..f260762
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.h
@@ -0,0 +1,406 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_bucket_beam_h
+#define h2_bucket_beam_h
+
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+
+/*******************************************************************************
+ * apr_bucket list without bells and whistles
+ ******************************************************************************/
+
+/**
+ * h2_blist can hold a list of buckets just like apr_bucket_brigade, but
+ * does not to any allocations or related features.
+ */
+typedef struct {
+ APR_RING_HEAD(h2_bucket_list, apr_bucket) list;
+} h2_blist;
+
+#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link);
+#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link)
+#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link)
+#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_BLIST_INSERT_HEAD(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_INSERT_TAIL(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+
+/*******************************************************************************
+ * h2_bucket_beam
+ ******************************************************************************/
+
+/**
+ * A h2_bucket_beam solves the task of transferring buckets, esp. their data,
+ * across threads with zero buffer copies.
+ *
+ * When a thread, let's call it the sender thread, wants to send buckets to
+ * another, the green thread, it creates a h2_bucket_beam and adds buckets
+ * via the h2_beam_send(). It gives the beam to the green thread which then
+ * can receive buckets into its own brigade via h2_beam_receive().
+ *
+ * Sending and receiving can happen concurrently.
+ *
+ * The beam can limit the amount of data it accepts via the buffer_size. This
+ * can also be adjusted during its lifetime. Sends and receives can be done blocking.
+ * A timeout can be set for such blocks.
+ *
+ * Care needs to be taken when terminating the beam. The beam registers at
+ * the pool it was created with and will cleanup after itself. However, if
+ * received buckets do still exist, already freed memory might be accessed.
+ * The beam does a assertion on this condition.
+ *
+ * The proper way of shutting down a beam is to first make sure there are no
+ * more green buckets out there, then cleanup the beam to purge eventually
+ * still existing sender buckets and then, possibly, terminate the beam itself
+ * (or the pool it was created with).
+ *
+ * The following restrictions apply to bucket transport:
+ * - only EOS and FLUSH meta buckets are copied through. All other meta buckets
+ * are kept in the beams hold.
+ * - all kind of data buckets are transported through:
+ * - transient buckets are converted to heap ones on send
+ * - heap and pool buckets require no extra handling
+ * - buckets with indeterminate length are read on send
+ * - file buckets will transfer the file itself into a new bucket, if allowed
+ * - all other buckets are read on send to make sure data is present
+ *
+ * This assures that when the sender thread sends its sender buckets, the data
+ * is made accessible while still on the sender side. The sender bucket then enters
+ * the beams hold storage.
+ * When the green thread calls receive, sender buckets in the hold are wrapped
+ * into special beam buckets. Beam buckets on read present the data directly
+ * from the internal sender one, but otherwise live on the green side. When a
+ * beam bucket gets destroyed, it notifies its beam that the corresponding
+ * sender bucket from the hold may be destroyed.
+ * Since the destruction of green buckets happens in the green thread, any
+ * corresponding sender bucket can not immediately be destroyed, as that would
+ * result in race conditions.
+ * Instead, the beam transfers such sender buckets from the hold to the purge
+ * storage. Next time there is a call from the sender side, the buckets in
+ * purge will be deleted.
+ *
+ * There are callbacks that can be registesender with a beam:
+ * - a "consumed" callback that gets called on the sender side with the
+ * amount of data that has been received by the green side. The amount
+ * is a delta from the last callback invocation. The sender side can trigger
+ * these callbacks by calling h2_beam_send() with a NULL brigade.
+ * - a "can_beam_file" callback that can prohibit the transfer of file handles
+ * through the beam. This will cause file buckets to be read on send and
+ * its data buffer will then be transports just like a heap bucket would.
+ * When no callback is registered, no restrictions apply and all files are
+ * passed through.
+ * File handles transfersender to the green side will stay there until the
+ * receiving brigade's pool is destroyed/cleared. If the pool lives very
+ * long or if many different files are beamed, the process might run out
+ * of available file handles.
+ *
+ * The name "beam" of course is inspired by good old transporter
+ * technology where humans are kept inside the transporter's memory
+ * buffers until the transmission is complete. Star gates use a similar trick.
+ */
+
+typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock);
+
+typedef struct {
+ apr_thread_mutex_t *mutex;
+ h2_beam_mutex_leave *leave;
+ void *leave_ctx;
+} h2_beam_lock;
+
+typedef struct h2_bucket_beam h2_bucket_beam;
+
+typedef apr_status_t h2_beam_mutex_enter(void *ctx, h2_beam_lock *pbl);
+
+typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam,
+ apr_off_t bytes);
+typedef void h2_beam_ev_callback(void *ctx, h2_bucket_beam *beam);
+
+typedef struct h2_beam_proxy h2_beam_proxy;
+typedef struct {
+ APR_RING_HEAD(h2_beam_proxy_list, h2_beam_proxy) list;
+} h2_bproxy_list;
+
+typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam,
+ apr_file_t *file);
+
+typedef enum {
+ H2_BEAM_OWNER_SEND,
+ H2_BEAM_OWNER_RECV
+} h2_beam_owner_t;
+
+/**
+ * Will deny all transfer of apr_file_t across the beam and force
+ * a data copy instead.
+ */
+int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file);
+
+struct h2_bucket_beam {
+ int id;
+ const char *tag;
+ apr_pool_t *pool;
+ h2_beam_owner_t owner;
+ h2_blist send_list;
+ h2_blist hold_list;
+ h2_blist purge_list;
+ apr_bucket_brigade *recv_buffer;
+ h2_bproxy_list proxies;
+ apr_pool_t *send_pool;
+ apr_pool_t *recv_pool;
+
+ apr_size_t max_buf_size;
+ apr_interval_time_t timeout;
+
+ apr_off_t sent_bytes; /* amount of bytes send */
+ apr_off_t received_bytes; /* amount of bytes received */
+
+ apr_size_t buckets_sent; /* # of beam buckets sent */
+ apr_size_t files_beamed; /* how many file handles have been set aside */
+
+ unsigned int aborted : 1;
+ unsigned int closed : 1;
+ unsigned int close_sent : 1;
+ unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */
+
+ struct apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *change;
+
+ apr_off_t cons_bytes_reported; /* amount of bytes reported as consumed */
+ h2_beam_ev_callback *cons_ev_cb;
+ h2_beam_io_callback *cons_io_cb;
+ void *cons_ctx;
+
+ apr_off_t prod_bytes_reported; /* amount of bytes reported as produced */
+ h2_beam_io_callback *prod_io_cb;
+ void *prod_ctx;
+
+ h2_beam_can_beam_callback *can_beam_fn;
+ void *can_beam_ctx;
+};
+
+/**
+ * Creates a new bucket beam for transfer of buckets across threads.
+ *
+ * The pool the beam is created with will be protected by the given
+ * mutex and will be used in multiple threads. It needs a pool allocator
+ * that is only used inside that same mutex.
+ *
+ * @param pbeam will hold the created beam on return
+ * @param pool pool owning the beam, beam will cleanup when pool released
+ * @param id identifier of the beam
+ * @param tag tag identifying beam for logging
+ * @param owner if the beam is owned by the sender or receiver, e.g. if
+ * the pool owner is using this beam for sending or receiving
+ * @param buffer_size maximum memory footprint of buckets buffered in beam, or
+ * 0 for no limitation
+ * @param timeout timeout for blocking operations
+ */
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam,
+ apr_pool_t *pool,
+ int id, const char *tag,
+ h2_beam_owner_t owner,
+ apr_size_t buffer_size,
+ apr_interval_time_t timeout);
+
+/**
+ * Destroys the beam immediately without cleanup.
+ */
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam);
+
+/**
+ * Send buckets from the given brigade through the beam. Will hold buckets
+ * internally as long as they have not been processed by the receiving side.
+ * All accepted buckets are removed from the given brigade. Will return with
+ * APR_EAGAIN on non-blocking sends when not all buckets could be accepted.
+ *
+ * Call from the sender side only.
+ */
+apr_status_t h2_beam_send(h2_bucket_beam *beam,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block);
+
+/**
+ * Register the pool from which future buckets are send. This defines
+ * the lifetime of the buckets, e.g. the pool should not be cleared/destroyed
+ * until the data is no longer needed (or has been received).
+ */
+void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p);
+
+/**
+ * Receive buckets from the beam into the given brigade. Will return APR_EOF
+ * when reading past an EOS bucket. Reads can be blocking until data is
+ * available or the beam has been closed. Non-blocking calls return APR_EAGAIN
+ * if no data is available.
+ *
+ * Call from the receiver side only.
+ */
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ apr_bucket_brigade *green_buckets,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+/**
+ * Determine if beam is empty.
+ */
+int h2_beam_empty(h2_bucket_beam *beam);
+
+/**
+ * Determine if beam has handed out proxy buckets that are not destroyed.
+ */
+int h2_beam_holds_proxies(h2_bucket_beam *beam);
+
+/**
+ * Abort the beam. Will cleanup any buffered buckets and answer all send
+ * and receives with APR_ECONNABORTED.
+ *
+ * Call from the sender side only.
+ */
+void h2_beam_abort(h2_bucket_beam *beam);
+
+/**
+ * Close the beam. Sending an EOS bucket serves the same purpose.
+ *
+ * Call from the sender side only.
+ */
+apr_status_t h2_beam_close(h2_bucket_beam *beam);
+
+/**
+ * Receives leaves the beam, e.g. will no longer read. This will
+ * interrupt any sender blocked writing and fail future send.
+ *
+ * Call from the receiver side only.
+ */
+apr_status_t h2_beam_leave(h2_bucket_beam *beam);
+
+int h2_beam_is_closed(h2_bucket_beam *beam);
+
+/**
+ * Return APR_SUCCESS when all buckets in transit have been handled.
+ * When called with APR_BLOCK_READ and a mutex set, will wait until the green
+ * side has consumed all data. Otherwise APR_EAGAIN is returned.
+ * With clear_buffers set, any queued data is discarded.
+ * If a timeout is set on the beam, waiting might also time out and
+ * return APR_ETIMEUP.
+ *
+ * Call from the sender side only.
+ */
+apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block);
+
+/**
+ * Set/get the timeout for blocking read/write operations. Only works
+ * if a mutex has been set for the beam.
+ */
+void h2_beam_timeout_set(h2_bucket_beam *beam,
+ apr_interval_time_t timeout);
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam);
+
+/**
+ * Set/get the maximum buffer size for beam data (memory footprint).
+ */
+void h2_beam_buffer_size_set(h2_bucket_beam *beam,
+ apr_size_t buffer_size);
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
+
+/**
+ * Register a callback to be invoked on the sender side with the
+ * amount of bytes that have been consumed by the receiver, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param ev_cb the callback or NULL, called when bytes are consumed
+ * @param io_cb the callback or NULL, called on sender with bytes consumed
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the sender side, io callbacks invoked on sender side, ev callback
+ * from any side.
+ */
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_ev_callback *ev_cb,
+ h2_beam_io_callback *io_cb, void *ctx);
+
+/**
+ * Call any registered consumed handler, if any changes have happened
+ * since the last invocation.
+ * @return !=0 iff a handler has been called
+ *
+ * Needs to be invoked from the sending side.
+ */
+int h2_beam_report_consumption(h2_bucket_beam *beam);
+
+/**
+ * Register a callback to be invoked on the receiver side with the
+ * amount of bytes that have been produces by the sender, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param io_cb the callback or NULL, called on receiver with bytes produced
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the receiver side, callbacks invoked on either side.
+ */
+void h2_beam_on_produced(h2_bucket_beam *beam,
+ h2_beam_io_callback *io_cb, void *ctx);
+
+/**
+ * Register a callback that may prevent a file from being beam as
+ * file handle, forcing the file content to be copied. Then no callback
+ * is set (NULL), file handles are transferred directly.
+ * @param beam the beam to set the callback on
+ * @param io_cb the callback or NULL, called on receiver with bytes produced
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the receiver side, callbacks invoked on either side.
+ */
+void h2_beam_on_file_beam(h2_bucket_beam *beam,
+ h2_beam_can_beam_callback *cb, void *ctx);
+
+/**
+ * Get the amount of bytes currently buffered in the beam (unread).
+ */
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam);
+
+/**
+ * Get the memory used by the buffered buckets, approximately.
+ */
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam);
+
+/**
+ * Return != 0 iff (some) data from the beam has been received.
+ */
+int h2_beam_was_received(h2_bucket_beam *beam);
+
+apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam);
+
+typedef apr_bucket *h2_bucket_beamer(h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src);
+
+void h2_register_bucket_beamer(h2_bucket_beamer *beamer);
+
+void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg);
+
+#endif /* h2_bucket_beam_h */
diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c
new file mode 100644
index 0000000..c89d499
--- /dev/null
+++ b/modules/http2/h2_bucket_eos.c
@@ -0,0 +1,127 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_mplx.h"
+#include "h2_stream.h"
+#include "h2_bucket_eos.h"
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_stream *stream;
+} h2_bucket_eos;
+
+static apr_status_t bucket_cleanup(void *data)
+{
+ h2_stream **pstream = data;
+
+ if (*pstream) {
+ /* If bucket_destroy is called after us, this prevents
+ * bucket_destroy from trying to destroy the stream again. */
+ *pstream = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket *h2_bucket_eos_make(apr_bucket *b, h2_stream *stream)
+{
+ h2_bucket_eos *h;
+
+ h = apr_bucket_alloc(sizeof(*h), b->list);
+ h->stream = stream;
+
+ b = apr_bucket_shared_make(b, h, 0, 0);
+ b->type = &h2_bucket_type_eos;
+
+ return b;
+}
+
+apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list,
+ h2_stream *stream)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_eos_make(b, stream);
+ if (stream) {
+ h2_bucket_eos *h = b->data;
+ apr_pool_pre_cleanup_register(stream->pool, &h->stream, bucket_cleanup);
+ }
+ return b;
+}
+
+static void bucket_destroy(void *data)
+{
+ h2_bucket_eos *h = data;
+
+ if (apr_bucket_shared_destroy(h)) {
+ h2_stream *stream = h->stream;
+ if (stream && stream->pool) {
+ apr_pool_cleanup_kill(stream->pool, &h->stream, bucket_cleanup);
+ }
+ apr_bucket_free(h);
+ if (stream) {
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+ }
+ }
+}
+
+const apr_bucket_type_t h2_bucket_type_eos = {
+ "H2EOS", 5, APR_BUCKET_METADATA,
+ bucket_destroy,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
diff --git a/modules/http2/h2_bucket_eos.h b/modules/http2/h2_bucket_eos.h
new file mode 100644
index 0000000..04e32e3
--- /dev/null
+++ b/modules/http2/h2_bucket_eos.h
@@ -0,0 +1,32 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_http2_h2_bucket_stream_eos_h
+#define mod_http2_h2_bucket_stream_eos_h
+
+struct h2_stream;
+
+/** End Of HTTP/2 STREAM (H2EOS) bucket */
+extern const apr_bucket_type_t h2_bucket_type_eos;
+
+#define H2_BUCKET_IS_H2EOS(e) (e->type == &h2_bucket_type_eos)
+
+apr_bucket *h2_bucket_eos_make(apr_bucket *b, struct h2_stream *stream);
+
+apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list,
+ struct h2_stream *stream);
+
+#endif /* mod_http2_h2_bucket_stream_eos_h */
diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c
new file mode 100644
index 0000000..8766355
--- /dev/null
+++ b/modules/http2/h2_config.c
@@ -0,0 +1,702 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_hash.h>
+#include <apr_lib.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_vhost.h>
+
+#include <ap_mpm.h>
+
+#include <apr_strings.h>
+
+#include "h2.h"
+#include "h2_alt_svc.h"
+#include "h2_ctx.h"
+#include "h2_conn.h"
+#include "h2_config.h"
+#include "h2_h2.h"
+#include "h2_private.h"
+
+#define DEF_VAL (-1)
+
+#define H2_CONFIG_GET(a, b, n) \
+ (((a)->n == DEF_VAL)? (b) : (a))->n
+
+static h2_config defconf = {
+ "default",
+ 100, /* max_streams */
+ H2_INITIAL_WINDOW_SIZE, /* window_size */
+ -1, /* min workers */
+ -1, /* max workers */
+ 10 * 60, /* max workers idle secs */
+ 32 * 1024, /* stream max mem size */
+ NULL, /* no alt-svcs */
+ -1, /* alt-svc max age */
+ 0, /* serialize headers */
+ -1, /* h2 direct mode */
+ 1, /* modern TLS only */
+ -1, /* HTTP/1 Upgrade support */
+ 1024*1024, /* TLS warmup size */
+ 1, /* TLS cooldown secs */
+ 1, /* HTTP/2 server push enabled */
+ NULL, /* map of content-type to priorities */
+ 256, /* push diary size */
+ 0, /* copy files across threads */
+ NULL, /* push list */
+ 0, /* early hints, http status 103 */
+};
+
+void h2_config_init(apr_pool_t *pool)
+{
+ (void)pool;
+}
+
+static void *h2_config_create(apr_pool_t *pool,
+ const char *prefix, const char *x)
+{
+ h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+ const char *s = x? x : "unknown";
+ char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
+
+ conf->name = name;
+ conf->h2_max_streams = DEF_VAL;
+ conf->h2_window_size = DEF_VAL;
+ conf->min_workers = DEF_VAL;
+ conf->max_workers = DEF_VAL;
+ conf->max_worker_idle_secs = DEF_VAL;
+ conf->stream_max_mem_size = DEF_VAL;
+ conf->alt_svc_max_age = DEF_VAL;
+ conf->serialize_headers = DEF_VAL;
+ conf->h2_direct = DEF_VAL;
+ conf->modern_tls_only = DEF_VAL;
+ conf->h2_upgrade = DEF_VAL;
+ conf->tls_warmup_size = DEF_VAL;
+ conf->tls_cooldown_secs = DEF_VAL;
+ conf->h2_push = DEF_VAL;
+ conf->priorities = NULL;
+ conf->push_diary_size = DEF_VAL;
+ conf->copy_files = DEF_VAL;
+ conf->push_list = NULL;
+ conf->early_hints = DEF_VAL;
+ return conf;
+}
+
+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
+{
+ return h2_config_create(pool, "srv", s->defn_name);
+}
+
+void *h2_config_create_dir(apr_pool_t *pool, char *x)
+{
+ return h2_config_create(pool, "dir", x);
+}
+
+static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
+{
+ h2_config *base = (h2_config *)basev;
+ h2_config *add = (h2_config *)addv;
+ h2_config *n = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+ char *name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
+ n->name = name;
+
+ n->h2_max_streams = H2_CONFIG_GET(add, base, h2_max_streams);
+ n->h2_window_size = H2_CONFIG_GET(add, base, h2_window_size);
+ n->min_workers = H2_CONFIG_GET(add, base, min_workers);
+ n->max_workers = H2_CONFIG_GET(add, base, max_workers);
+ n->max_worker_idle_secs = H2_CONFIG_GET(add, base, max_worker_idle_secs);
+ n->stream_max_mem_size = H2_CONFIG_GET(add, base, stream_max_mem_size);
+ n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
+ n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
+ n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers);
+ n->h2_direct = H2_CONFIG_GET(add, base, h2_direct);
+ n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only);
+ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
+ n->tls_warmup_size = H2_CONFIG_GET(add, base, tls_warmup_size);
+ n->tls_cooldown_secs = H2_CONFIG_GET(add, base, tls_cooldown_secs);
+ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
+ if (add->priorities && base->priorities) {
+ n->priorities = apr_hash_overlay(pool, add->priorities, base->priorities);
+ }
+ else {
+ n->priorities = add->priorities? add->priorities : base->priorities;
+ }
+ n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size);
+ n->copy_files = H2_CONFIG_GET(add, base, copy_files);
+ if (add->push_list && base->push_list) {
+ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
+ }
+ else {
+ n->push_list = add->push_list? add->push_list : base->push_list;
+ }
+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
+ return n;
+}
+
+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
+{
+ return h2_config_merge(pool, basev, addv);
+}
+
+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
+{
+ return h2_config_merge(pool, basev, addv);
+}
+
+int h2_config_geti(const h2_config *conf, h2_config_var_t var)
+{
+ return (int)h2_config_geti64(conf, var);
+}
+
+apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
+{
+ switch(var) {
+ case H2_CONF_MAX_STREAMS:
+ return H2_CONFIG_GET(conf, &defconf, h2_max_streams);
+ case H2_CONF_WIN_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, h2_window_size);
+ case H2_CONF_MIN_WORKERS:
+ return H2_CONFIG_GET(conf, &defconf, min_workers);
+ case H2_CONF_MAX_WORKERS:
+ return H2_CONFIG_GET(conf, &defconf, max_workers);
+ case H2_CONF_MAX_WORKER_IDLE_SECS:
+ return H2_CONFIG_GET(conf, &defconf, max_worker_idle_secs);
+ case H2_CONF_STREAM_MAX_MEM:
+ return H2_CONFIG_GET(conf, &defconf, stream_max_mem_size);
+ case H2_CONF_ALT_SVC_MAX_AGE:
+ return H2_CONFIG_GET(conf, &defconf, alt_svc_max_age);
+ case H2_CONF_SER_HEADERS:
+ return H2_CONFIG_GET(conf, &defconf, serialize_headers);
+ case H2_CONF_MODERN_TLS_ONLY:
+ return H2_CONFIG_GET(conf, &defconf, modern_tls_only);
+ case H2_CONF_UPGRADE:
+ return H2_CONFIG_GET(conf, &defconf, h2_upgrade);
+ case H2_CONF_DIRECT:
+ return H2_CONFIG_GET(conf, &defconf, h2_direct);
+ case H2_CONF_TLS_WARMUP_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
+ case H2_CONF_TLS_COOLDOWN_SECS:
+ return H2_CONFIG_GET(conf, &defconf, tls_cooldown_secs);
+ case H2_CONF_PUSH:
+ return H2_CONFIG_GET(conf, &defconf, h2_push);
+ case H2_CONF_PUSH_DIARY_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, push_diary_size);
+ case H2_CONF_COPY_FILES:
+ return H2_CONFIG_GET(conf, &defconf, copy_files);
+ case H2_CONF_EARLY_HINTS:
+ return H2_CONFIG_GET(conf, &defconf, early_hints);
+ default:
+ return DEF_VAL;
+ }
+}
+
+const h2_config *h2_config_sget(server_rec *s)
+{
+ h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
+ &http2_module);
+ ap_assert(cfg);
+ return cfg;
+}
+
+const struct h2_priority *h2_config_get_priority(const h2_config *conf,
+ const char *content_type)
+{
+ if (content_type && conf->priorities) {
+ size_t len = strcspn(content_type, "; \t");
+ h2_priority *prio = apr_hash_get(conf->priorities, content_type, len);
+ return prio? prio : apr_hash_get(conf->priorities, "*", 1);
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_streams(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->h2_max_streams = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->h2_max_streams < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_window_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->h2_window_size = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->h2_window_size < 1024) {
+ return "value must be >= 1024";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_min_workers(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->min_workers = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->min_workers < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_workers(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->max_workers = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->max_workers < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->max_worker_idle_secs = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->max_worker_idle_secs < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+
+
+ cfg->stream_max_mem_size = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->stream_max_mem_size < 1024) {
+ return "value must be >= 1024";
+ }
+ return NULL;
+}
+
+static const char *h2_add_alt_svc(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ if (value && *value) {
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool);
+ if (!as) {
+ return "unable to parse alt-svc specifier";
+ }
+ if (!cfg->alt_svcs) {
+ cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*));
+ }
+ APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
+ }
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->alt_svc_max_age = (int)apr_atoi64(value);
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ /* deprecated, ignore */
+ (void)arg;
+ (void)value;
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */
+ "H2SessionExtraFiles is obsolete and will be ignored");
+ return NULL;
+}
+
+static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->serialize_headers = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->serialize_headers = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_direct(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_direct = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_direct = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_push(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_push = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_push = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg,
+ const char *ctype, const char *sdependency,
+ const char *sweight)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
+ const char *sdefweight = "16"; /* default AFTER weight */
+ h2_dependency dependency;
+ h2_priority *priority;
+ int weight;
+
+ if (!*ctype) {
+ return "1st argument must be a mime-type, like 'text/css' or '*'";
+ }
+
+ if (!sweight) {
+ /* 2 args only, but which one? */
+ if (apr_isdigit(sdependency[0])) {
+ sweight = sdependency;
+ sdependency = "AFTER"; /* default dependency */
+ }
+ }
+
+ if (!strcasecmp("AFTER", sdependency)) {
+ dependency = H2_DEPENDANT_AFTER;
+ }
+ else if (!strcasecmp("BEFORE", sdependency)) {
+ dependency = H2_DEPENDANT_BEFORE;
+ if (sweight) {
+ return "dependecy 'Before' does not allow a weight";
+ }
+ }
+ else if (!strcasecmp("INTERLEAVED", sdependency)) {
+ dependency = H2_DEPENDANT_INTERLEAVED;
+ sdefweight = "256"; /* default INTERLEAVED weight */
+ }
+ else {
+ return "dependency must be one of 'After', 'Before' or 'Interleaved'";
+ }
+
+ weight = (int)apr_atoi64(sweight? sweight : sdefweight);
+ if (weight < NGHTTP2_MIN_WEIGHT) {
+ return apr_psprintf(cmd->pool, "weight must be a number >= %d",
+ NGHTTP2_MIN_WEIGHT);
+ }
+
+ priority = apr_pcalloc(cmd->pool, sizeof(*priority));
+ priority->dependency = dependency;
+ priority->weight = weight;
+
+ if (!cfg->priorities) {
+ cfg->priorities = apr_hash_make(cmd->pool);
+ }
+ apr_hash_set(cfg->priorities, ctype, strlen(ctype), priority);
+ return NULL;
+}
+
+static const char *h2_conf_set_modern_tls_only(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->modern_tls_only = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->modern_tls_only = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_upgrade(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_upgrade = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_upgrade = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->tls_warmup_size = apr_atoi64(value);
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->tls_cooldown_secs = (int)apr_atoi64(value);
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ (void)arg;
+ cfg->push_diary_size = (int)apr_atoi64(value);
+ if (cfg->push_diary_size < 0) {
+ return "value must be >= 0";
+ }
+ if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
+ return "value must a power of 2";
+ }
+ if (cfg->push_diary_size > (1 << 15)) {
+ return "value must <= 65536";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_copy_files(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)arg;
+ if (!strcasecmp(value, "On")) {
+ cfg->copy_files = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->copy_files = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push)
+{
+ h2_push_res *new;
+ if (!conf->push_list) {
+ conf->push_list = apr_array_make(pool, 10, sizeof(*push));
+ }
+ new = apr_array_push(conf->push_list);
+ new->uri_ref = push->uri_ref;
+ new->critical = push->critical;
+}
+
+static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
+ const char *arg1, const char *arg2,
+ const char *arg3)
+{
+ h2_config *dconf = (h2_config*)dirconf ;
+ h2_config *sconf = (h2_config*)h2_config_sget(cmd->server);
+ h2_push_res push;
+ const char *last = arg3;
+
+ memset(&push, 0, sizeof(push));
+ if (!strcasecmp("add", arg1)) {
+ push.uri_ref = arg2;
+ }
+ else {
+ push.uri_ref = arg1;
+ last = arg2;
+ if (arg3) {
+ return "too many parameter";
+ }
+ }
+
+ if (last) {
+ if (!strcasecmp("critical", last)) {
+ push.critical = 1;
+ }
+ else {
+ return "unknown last parameter";
+ }
+ }
+
+ /* server command? set both */
+ if (cmd->path == NULL) {
+ add_push(cmd->pool, sconf, &push);
+ add_push(cmd->pool, dconf, &push);
+ }
+ else {
+ add_push(cmd->pool, dconf, &push);
+ }
+
+ return NULL;
+}
+
+static const char *h2_conf_set_early_hints(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->early_hints = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->early_hints = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+void h2_get_num_workers(server_rec *s, int *minw, int *maxw)
+{
+ int threads_per_child = 0;
+ const h2_config *config = h2_config_sget(s);
+
+ *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
+ *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child);
+
+ if (*minw <= 0) {
+ *minw = threads_per_child;
+ }
+ if (*maxw <= 0) {
+ /* As a default, this seems to work quite well under mpm_event.
+ * For people enabling http2 under mpm_prefork, start 4 threads unless
+ * configured otherwise. People get unhappy if their http2 requests are
+ * blocking each other. */
+ *maxw = 3 * (*minw) / 2;
+ if (*maxw < 4) {
+ *maxw = 4;
+ }
+ }
+}
+
+#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)
+
+const command_rec h2_cmds[] = {
+ AP_INIT_TAKE1("H2MaxSessionStreams", h2_conf_set_max_streams, NULL,
+ RSRC_CONF, "maximum number of open streams per session"),
+ AP_INIT_TAKE1("H2WindowSize", h2_conf_set_window_size, NULL,
+ RSRC_CONF, "window size on client DATA"),
+ AP_INIT_TAKE1("H2MinWorkers", h2_conf_set_min_workers, NULL,
+ RSRC_CONF, "minimum number of worker threads per child"),
+ AP_INIT_TAKE1("H2MaxWorkers", h2_conf_set_max_workers, NULL,
+ RSRC_CONF, "maximum number of worker threads per child"),
+ AP_INIT_TAKE1("H2MaxWorkerIdleSeconds", h2_conf_set_max_worker_idle_secs, NULL,
+ RSRC_CONF, "maximum number of idle seconds before a worker shuts down"),
+ AP_INIT_TAKE1("H2StreamMaxMemSize", h2_conf_set_stream_max_mem_size, NULL,
+ RSRC_CONF, "maximum number of bytes buffered in memory for a stream"),
+ AP_INIT_TAKE1("H2AltSvc", h2_add_alt_svc, NULL,
+ RSRC_CONF, "adds an Alt-Svc for this server"),
+ AP_INIT_TAKE1("H2AltSvcMaxAge", h2_conf_set_alt_svc_max_age, NULL,
+ RSRC_CONF, "set the maximum age (in seconds) that client can rely on alt-svc information"),
+ AP_INIT_TAKE1("H2SerializeHeaders", h2_conf_set_serialize_headers, NULL,
+ RSRC_CONF, "on to enable header serialization for compatibility"),
+ AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
+ RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
+ AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
+ RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"),
+ AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
+ RSRC_CONF, "on to enable direct HTTP/2 mode"),
+ AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
+ RSRC_CONF, "number of extra file a session might keep open (obsolete)"),
+ AP_INIT_TAKE1("H2TLSWarmUpSize", h2_conf_set_tls_warmup_size, NULL,
+ RSRC_CONF, "number of bytes on TLS connection before doing max writes"),
+ AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
+ RSRC_CONF, "seconds of idle time on TLS before shrinking writes"),
+ AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL,
+ RSRC_CONF, "off to disable HTTP/2 server push"),
+ AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
+ RSRC_CONF, "define priority of PUSHed resources per content type"),
+ AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
+ RSRC_CONF, "size of push diary"),
+ AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL,
+ OR_FILEINFO, "on to perform copy of file data"),
+ AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL,
+ OR_FILEINFO, "add a resource to be pushed in this location/on this server."),
+ AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL,
+ RSRC_CONF, "on to enable interim status 103 responses"),
+ AP_END_CMD
+};
+
+
+const h2_config *h2_config_rget(request_rec *r)
+{
+ h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config,
+ &http2_module);
+ return cfg? cfg : h2_config_sget(r->server);
+}
+
+const h2_config *h2_config_get(conn_rec *c)
+{
+ h2_ctx *ctx = h2_ctx_get(c, 0);
+
+ if (ctx) {
+ if (ctx->config) {
+ return ctx->config;
+ }
+ else if (ctx->server) {
+ ctx->config = h2_config_sget(ctx->server);
+ return ctx->config;
+ }
+ }
+
+ return h2_config_sget(c->base_server);
+}
diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h
new file mode 100644
index 0000000..17d75d6
--- /dev/null
+++ b/modules/http2/h2_config.h
@@ -0,0 +1,106 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_config_h__
+#define __mod_h2__h2_config_h__
+
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
+typedef enum {
+ H2_CONF_MAX_STREAMS,
+ H2_CONF_WIN_SIZE,
+ H2_CONF_MIN_WORKERS,
+ H2_CONF_MAX_WORKERS,
+ H2_CONF_MAX_WORKER_IDLE_SECS,
+ H2_CONF_STREAM_MAX_MEM,
+ H2_CONF_ALT_SVCS,
+ H2_CONF_ALT_SVC_MAX_AGE,
+ H2_CONF_SER_HEADERS,
+ H2_CONF_DIRECT,
+ H2_CONF_MODERN_TLS_ONLY,
+ H2_CONF_UPGRADE,
+ H2_CONF_TLS_WARMUP_SIZE,
+ H2_CONF_TLS_COOLDOWN_SECS,
+ H2_CONF_PUSH,
+ H2_CONF_PUSH_DIARY_SIZE,
+ H2_CONF_COPY_FILES,
+ H2_CONF_EARLY_HINTS,
+} h2_config_var_t;
+
+struct apr_hash_t;
+struct h2_priority;
+struct h2_push_res;
+
+typedef struct h2_push_res {
+ const char *uri_ref;
+ int critical;
+} h2_push_res;
+
+/* Apache httpd module configuration for h2. */
+typedef struct h2_config {
+ const char *name;
+ int h2_max_streams; /* max concurrent # streams (http2) */
+ int h2_window_size; /* stream window size (http2) */
+ int min_workers; /* min # of worker threads/child */
+ int max_workers; /* max # of worker threads/child */
+ int max_worker_idle_secs; /* max # of idle seconds for worker */
+ int stream_max_mem_size; /* max # bytes held in memory/stream */
+ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
+ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
+ int serialize_headers; /* Use serialized HTTP/1.1 headers for
+ processing, better compatibility */
+ int h2_direct; /* if mod_h2 is active directly */
+ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
+ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
+ int h2_push; /* if HTTP/2 server push is enabled */
+ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
+
+ int push_diary_size; /* # of entries in push diary */
+ int copy_files; /* if files shall be copied vs setaside on output */
+ apr_array_header_t *push_list;/* list of h2_push_res configurations */
+ int early_hints; /* support status code 103 */
+} h2_config;
+
+
+void *h2_config_create_dir(apr_pool_t *pool, char *x);
+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv);
+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s);
+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv);
+
+extern const command_rec h2_cmds[];
+
+const h2_config *h2_config_get(conn_rec *c);
+const h2_config *h2_config_sget(server_rec *s);
+const h2_config *h2_config_rget(request_rec *r);
+
+int h2_config_geti(const h2_config *conf, h2_config_var_t var);
+apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var);
+
+void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
+
+void h2_config_init(apr_pool_t *pool);
+
+const struct h2_priority *h2_config_get_priority(const h2_config *conf,
+ const char *content_type);
+
+#endif /* __mod_h2__h2_config_h__ */
+
diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
new file mode 100644
index 0000000..88da2ba
--- /dev/null
+++ b/modules/http2/h2_conn.c
@@ -0,0 +1,370 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+
+#include <mpm_common.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_filter.h"
+#include "h2_mplx.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_h2.h"
+#include "h2_task.h"
+#include "h2_workers.h"
+#include "h2_conn.h"
+#include "h2_version.h"
+
+static struct h2_workers *workers;
+
+static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN;
+static module *mpm_module;
+static int async_mpm;
+static int mpm_supported = 1;
+static apr_socket_t *dummy_socket;
+
+static void check_modules(int force)
+{
+ static int checked = 0;
+ int i;
+
+ if (force || !checked) {
+ for (i = 0; ap_loaded_modules[i]; ++i) {
+ module *m = ap_loaded_modules[i];
+
+ if (!strcmp("event.c", m->name)) {
+ mpm_type = H2_MPM_EVENT;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("motorz.c", m->name)) {
+ mpm_type = H2_MPM_MOTORZ;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("mpm_netware.c", m->name)) {
+ mpm_type = H2_MPM_NETWARE;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("prefork.c", m->name)) {
+ mpm_type = H2_MPM_PREFORK;
+ mpm_module = m;
+ /* While http2 can work really well on prefork, it collides
+ * today's use case for prefork: runnning single-thread app engines
+ * like php. If we restrict h2_workers to 1 per process, php will
+ * work fine, but browser will be limited to 1 active request at a
+ * time. */
+ mpm_supported = 0;
+ break;
+ }
+ else if (!strcmp("simple_api.c", m->name)) {
+ mpm_type = H2_MPM_SIMPLE;
+ mpm_module = m;
+ mpm_supported = 0;
+ break;
+ }
+ else if (!strcmp("mpm_winnt.c", m->name)) {
+ mpm_type = H2_MPM_WINNT;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("worker.c", m->name)) {
+ mpm_type = H2_MPM_WORKER;
+ mpm_module = m;
+ break;
+ }
+ }
+ checked = 1;
+ }
+}
+
+apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
+{
+ const h2_config *config = h2_config_sget(s);
+ apr_status_t status = APR_SUCCESS;
+ int minw, maxw;
+ int max_threads_per_child = 0;
+ int idle_secs = 0;
+
+ check_modules(1);
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child);
+
+ status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
+ if (status != APR_SUCCESS) {
+ /* some MPMs do not implemnent this */
+ async_mpm = 0;
+ status = APR_SUCCESS;
+ }
+
+ h2_config_init(pool);
+
+ h2_get_num_workers(s, &minw, &maxw);
+
+ idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
+ minw, maxw, max_threads_per_child, idle_secs);
+ workers = h2_workers_create(s, pool, minw, maxw, idle_secs);
+
+ ap_register_input_filter("H2_IN", h2_filter_core_input,
+ NULL, AP_FTYPE_CONNECTION);
+
+ status = h2_mplx_child_init(pool, s);
+
+ if (status == APR_SUCCESS) {
+ status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
+ APR_PROTO_TCP, pool);
+ }
+
+ return status;
+}
+
+h2_mpm_type_t h2_conn_mpm_type(void)
+{
+ check_modules(0);
+ return mpm_type;
+}
+
+const char *h2_conn_mpm_name(void)
+{
+ check_modules(0);
+ return mpm_module? mpm_module->name : "unknown";
+}
+
+int h2_mpm_supported(void)
+{
+ check_modules(0);
+ return mpm_supported;
+}
+
+static module *h2_conn_mpm_module(void)
+{
+ check_modules(0);
+ return mpm_module;
+}
+
+apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
+{
+ h2_session *session;
+ apr_status_t status;
+
+ if (!workers) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
+ "workers not initialized");
+ return APR_EGENERAL;
+ }
+
+ if (r) {
+ status = h2_session_rcreate(&session, r, ctx, workers);
+ }
+ else {
+ status = h2_session_create(&session, c, ctx, workers);
+ }
+
+ if (status == APR_SUCCESS) {
+ h2_ctx_session_set(ctx, session);
+ }
+ return status;
+}
+
+apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
+{
+ apr_status_t status;
+ int mpm_state = 0;
+ h2_session *session = h2_ctx_session_get(ctx);
+
+ ap_assert(session);
+ do {
+ if (c->cs) {
+ c->cs->sense = CONN_SENSE_DEFAULT;
+ c->cs->state = CONN_STATE_HANDLER;
+ }
+
+ status = h2_session_process(session, async_mpm);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03045), session,
+ "process, closing conn"));
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+ } while (!async_mpm
+ && c->keepalive == AP_CONN_KEEPALIVE
+ && mpm_state != AP_MPMQ_STOPPING);
+
+ if (c->cs) {
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
+ break;
+ case H2_SESSION_ST_CLEANUP:
+ case H2_SESSION_ST_DONE:
+ default:
+ c->cs->state = CONN_STATE_LINGER;
+ break;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
+{
+ h2_session *session = h2_ctx_session_get(ctx);
+ if (session) {
+ apr_status_t status = h2_session_pre_close(session, async_mpm);
+ return (status == APR_SUCCESS)? DONE : status;
+ }
+ return DONE;
+}
+
+conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
+{
+ apr_allocator_t *allocator;
+ apr_status_t status;
+ apr_pool_t *pool;
+ conn_rec *c;
+ void *cfg;
+ module *mpm;
+
+ ap_assert(master);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
+ "h2_stream(%ld-%d): create slave", master->id, slave_id);
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+ * independant of its parent pool in the sense that it can work in
+ * another thread. Also, the new allocator needs its own mutex to
+ * synchronize sub-pools.
+ */
+ apr_allocator_create(&allocator);
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ status = apr_pool_create_ex(&pool, parent, NULL, allocator);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master,
+ APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
+ master->id, slave_id);
+ return NULL;
+ }
+ apr_allocator_owner_set(allocator, pool);
+ apr_pool_tag(pool, "h2_slave_conn");
+
+ c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ if (c == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
+ APLOGNO(02913) "h2_session(%ld-%d): create slave",
+ master->id, slave_id);
+ apr_pool_destroy(pool);
+ return NULL;
+ }
+
+ memcpy(c, master, sizeof(conn_rec));
+
+ c->master = master;
+ c->pool = pool;
+ c->conn_config = ap_create_conn_config(pool);
+ c->notes = apr_table_make(pool, 5);
+ c->input_filters = NULL;
+ c->output_filters = NULL;
+ c->bucket_alloc = apr_bucket_alloc_create(pool);
+ c->data_in_input_filters = 0;
+ c->data_in_output_filters = 0;
+ /* prevent mpm_event from making wrong assumptions about this connection,
+ * like e.g. using its socket for an async read check. */
+ c->clogging_input_filters = 1;
+ c->log = NULL;
+ c->log_id = apr_psprintf(pool, "%ld-%d",
+ master->id, slave_id);
+ c->aborted = 0;
+ /* We cannot install the master connection socket on the slaves, as
+ * modules mess with timeouts/blocking of the socket, with
+ * unwanted side effects to the master connection processing.
+ * Fortunately, since we never use the slave socket, we can just install
+ * a single, process-wide dummy and everyone is happy.
+ */
+ ap_set_module_config(c->conn_config, &core_module, dummy_socket);
+ /* TODO: these should be unique to this thread */
+ c->sbh = master->sbh;
+ /* TODO: not all mpm modules have learned about slave connections yet.
+ * copy their config from master to slave.
+ */
+ if ((mpm = h2_conn_mpm_module()) != NULL) {
+ cfg = ap_get_module_config(master->conn_config, mpm);
+ ap_set_module_config(c->conn_config, mpm, cfg);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_stream(%ld-%d): created slave", master->id, slave_id);
+ return c;
+}
+
+void h2_slave_destroy(conn_rec *slave)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
+ "h2_stream(%s): destroy slave",
+ apr_table_get(slave->notes, H2_TASK_ID_NOTE));
+ slave->sbh = NULL;
+ apr_pool_destroy(slave->pool);
+}
+
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
+{
+ if (slave->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ slave->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+ * on this slave connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+ * is unneccessary on a h2 stream.
+ */
+ slave->keepalive = AP_CONN_CLOSE;
+ return ap_run_pre_connection(slave, csd);
+ }
+ return APR_SUCCESS;
+}
+
diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h
new file mode 100644
index 0000000..e45ff31
--- /dev/null
+++ b/modules/http2/h2_conn.h
@@ -0,0 +1,77 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_conn__
+#define __mod_h2__h2_conn__
+
+struct h2_ctx;
+struct h2_task;
+
+/**
+ * Setup the connection and our context for HTTP/2 processing
+ *
+ * @param ctx the http2 context to setup
+ * @param c the connection HTTP/2 is starting on
+ * @param r the upgrade request that still awaits an answer, optional
+ */
+apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r);
+
+/**
+ * Run the HTTP/2 connection in synchronous fashion.
+ * Return when the HTTP/2 session is done
+ * and the connection will close or a fatal error occurred.
+ *
+ * @param ctx the http2 context to run
+ * @return APR_SUCCESS when session is done.
+ */
+apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c);
+
+/**
+ * The connection is about to close. If we have not send a GOAWAY
+ * yet, this is the last chance.
+ */
+apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c);
+
+/* Initialize this child process for h2 connection work,
+ * to be called once during child init before multi processing
+ * starts.
+ */
+apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s);
+
+
+typedef enum {
+ H2_MPM_UNKNOWN,
+ H2_MPM_WORKER,
+ H2_MPM_EVENT,
+ H2_MPM_PREFORK,
+ H2_MPM_MOTORZ,
+ H2_MPM_SIMPLE,
+ H2_MPM_NETWARE,
+ H2_MPM_WINNT,
+} h2_mpm_type_t;
+
+/* Returns the type of MPM module detected */
+h2_mpm_type_t h2_conn_mpm_type(void);
+const char *h2_conn_mpm_name(void);
+int h2_mpm_supported(void);
+
+conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
+void h2_slave_destroy(conn_rec *slave);
+
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
+void h2_slave_run_connection(conn_rec *slave);
+
+#endif /* defined(__mod_h2__h2_conn__) */
diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c
new file mode 100644
index 0000000..eb6ec92
--- /dev/null
+++ b/modules/http2/h2_conn_io.c
@@ -0,0 +1,389 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_request.h>
+
+#include "h2_private.h"
+#include "h2_bucket_eos.h"
+#include "h2_config.h"
+#include "h2_conn_io.h"
+#include "h2_h2.h"
+#include "h2_session.h"
+#include "h2_util.h"
+
+#define TLS_DATA_MAX (16*1024)
+
+/* Calculated like this: assuming MTU 1500 bytes
+ * 1500 - 40 (IP) - 20 (TCP) - 40 (TCP options)
+ * - TLS overhead (60-100)
+ * ~= 1300 bytes */
+#define WRITE_SIZE_INITIAL 1300
+
+/* Calculated like this: max TLS record size 16*1024
+ * - 40 (IP) - 20 (TCP) - 40 (TCP options)
+ * - TLS overhead (60-100)
+ * which seems to create less TCP packets overall
+ */
+#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100)
+
+
+static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
+ const char *tag, apr_bucket_brigade *bb)
+{
+ char buffer[16 * 1024];
+ const char *line = "(null)";
+ apr_size_t bmax = sizeof(buffer)/sizeof(buffer[0]);
+ int off = 0;
+ apr_bucket *b;
+
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ for (b = APR_BRIGADE_FIRST(bb);
+ bmax && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eos ");
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "flush ");
+ }
+ else if (AP_BUCKET_IS_EOR(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eor ");
+ }
+ else if (H2_BUCKET_IS_H2EOS(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "h2eos ");
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "meta(unknown) ");
+ }
+ }
+ else {
+ const char *btype = "data";
+ if (APR_BUCKET_IS_FILE(b)) {
+ btype = "file";
+ }
+ else if (APR_BUCKET_IS_PIPE(b)) {
+ btype = "pipe";
+ }
+ else if (APR_BUCKET_IS_SOCKET(b)) {
+ btype = "socket";
+ }
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ btype = "heap";
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(b)) {
+ btype = "transient";
+ }
+ else if (APR_BUCKET_IS_IMMORTAL(b)) {
+ btype = "immortal";
+ }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ btype = "mmap";
+ }
+#endif
+ else if (APR_BUCKET_IS_POOL(b)) {
+ btype = "pool";
+ }
+
+ off += apr_snprintf(buffer+off, bmax-off, "%s[%ld] ",
+ btype,
+ (long)(b->length == ((apr_size_t)-1)?
+ -1 : b->length));
+ }
+ }
+ line = *buffer? buffer : "(empty)";
+ }
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%ld)-%s: %s",
+ c->id, tag, line);
+
+}
+
+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
+ const h2_config *cfg)
+{
+ io->c = c;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
+ io->is_tls = h2_h2_is_tls(c);
+ io->buffer_output = io->is_tls;
+ io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM);
+
+ if (io->is_tls) {
+ /* This is what we start with,
+ * see https://issues.apache.org/jira/browse/TS-2503
+ */
+ io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE);
+ io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS)
+ * APR_USEC_PER_SEC);
+ io->write_size = (io->cooldown_usecs > 0?
+ WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
+ }
+ else {
+ io->warmup_size = 0;
+ io->cooldown_usecs = 0;
+ io->write_size = 0;
+ }
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
+ "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, "
+ "cd_secs=%f", io->c->id, io->buffer_output,
+ (long)io->warmup_size,
+ ((float)io->cooldown_usecs/APR_USEC_PER_SEC));
+ }
+
+ return APR_SUCCESS;
+}
+
+static void append_scratch(h2_conn_io *io)
+{
+ if (io->scratch && io->slen > 0) {
+ apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen,
+ apr_bucket_free,
+ io->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->scratch = NULL;
+ io->slen = io->ssize = 0;
+ }
+}
+
+static apr_size_t assure_scratch_space(h2_conn_io *io) {
+ apr_size_t remain = io->ssize - io->slen;
+ if (io->scratch && remain == 0) {
+ append_scratch(io);
+ }
+ if (!io->scratch) {
+ /* we control the size and it is larger than what buckets usually
+ * allocate. */
+ io->scratch = apr_bucket_alloc(io->write_size, io->c->bucket_alloc);
+ io->ssize = io->write_size;
+ io->slen = 0;
+ remain = io->ssize;
+ }
+ return remain;
+}
+
+static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
+{
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ if (!b->length) {
+ return APR_SUCCESS;
+ }
+
+ ap_assert(b->length <= (io->ssize - io->slen));
+ if (APR_BUCKET_IS_FILE(b)) {
+ apr_bucket_file *f = (apr_bucket_file *)b->data;
+ apr_file_t *fd = f->fd;
+ apr_off_t offset = b->start;
+ apr_size_t len = b->length;
+
+ /* file buckets will either mmap (which we do not want) or
+ * read 8000 byte chunks and split themself. However, we do
+ * know *exactly* how many bytes we need where.
+ */
+ status = apr_file_seek(fd, APR_SET, &offset);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ status = apr_file_read(fd, io->scratch + io->slen, &len);
+ if (status != APR_SUCCESS && status != APR_EOF) {
+ return status;
+ }
+ io->slen += len;
+ }
+ else {
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ memcpy(io->scratch+io->slen, data, len);
+ io->slen += len;
+ }
+ }
+ return status;
+}
+
+static void check_write_size(h2_conn_io *io)
+{
+ if (io->write_size > WRITE_SIZE_INITIAL
+ && (io->cooldown_usecs > 0)
+ && (apr_time_now() - io->last_write) >= io->cooldown_usecs) {
+ /* long time not written, reset write size */
+ io->write_size = WRITE_SIZE_INITIAL;
+ io->bytes_written = 0;
+ }
+ else if (io->write_size < WRITE_SIZE_MAX
+ && io->bytes_written >= io->warmup_size) {
+ /* connection is hot, use max size */
+ io->write_size = WRITE_SIZE_MAX;
+ }
+}
+
+static apr_status_t pass_output(h2_conn_io *io, int flush)
+{
+ conn_rec *c = io->c;
+ apr_bucket_brigade *bb = io->output;
+ apr_bucket *b;
+ apr_off_t bblen;
+ apr_status_t status;
+
+ append_scratch(io);
+ if (flush && !io->is_flushed) {
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+
+ if (APR_BRIGADE_EMPTY(bb)) {
+ return APR_SUCCESS;
+ }
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
+ apr_brigade_length(bb, 0, &bblen);
+ h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb);
+
+ status = ap_pass_brigade(c->output_filters, bb);
+ if (status == APR_SUCCESS) {
+ io->bytes_written += (apr_size_t)bblen;
+ io->last_write = apr_time_now();
+ if (flush) {
+ io->is_flushed = 1;
+ }
+ }
+ apr_brigade_cleanup(bb);
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
+ "h2_conn_io(%ld): pass_out brigade %ld bytes",
+ c->id, (long)bblen);
+ }
+ return status;
+}
+
+int h2_conn_io_needs_flush(h2_conn_io *io)
+{
+ if (!io->is_flushed) {
+ apr_off_t len = h2_brigade_mem_size(io->output);
+ if (len > io->flush_threshold) {
+ return 1;
+ }
+ /* if we do not exceed flush length due to memory limits,
+ * we want at least flush when we have that amount of data. */
+ apr_brigade_length(io->output, 0, &len);
+ return len > (4 * io->flush_threshold);
+ }
+ return 0;
+}
+
+apr_status_t h2_conn_io_flush(h2_conn_io *io)
+{
+ apr_status_t status;
+ status = pass_output(io, 1);
+ check_write_size(io);
+ return status;
+}
+
+apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t remain;
+
+ if (length > 0) {
+ io->is_flushed = 0;
+ }
+
+ if (io->buffer_output) {
+ while (length > 0) {
+ remain = assure_scratch_space(io);
+ if (remain >= length) {
+ memcpy(io->scratch + io->slen, data, length);
+ io->slen += length;
+ length = 0;
+ }
+ else {
+ memcpy(io->scratch + io->slen, data, remain);
+ io->slen += remain;
+ data += remain;
+ length -= remain;
+ }
+ }
+ }
+ else {
+ status = apr_brigade_write(io->output, NULL, NULL, data, length);
+ }
+ return status;
+}
+
+apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ io->is_flushed = 0;
+ }
+
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* need to finish any open scratch bucket, as meta data
+ * needs to be forward "in order". */
+ append_scratch(io);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ else if (io->buffer_output) {
+ apr_size_t remain = assure_scratch_space(io);
+ if (b->length > remain) {
+ apr_bucket_split(b, remain);
+ if (io->slen == 0) {
+ /* complete write_size bucket, append unchanged */
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ continue;
+ }
+ }
+ else {
+ /* bucket fits in remain, copy to scratch */
+ status = read_to_scratch(io, b);
+ apr_bucket_delete(b);
+ continue;
+ }
+ }
+ else {
+ /* no buffering, forward buckets setaside on flush */
+ if (APR_BUCKET_IS_TRANSIENT(b)) {
+ apr_bucket_setaside(b, io->c->pool);
+ }
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ }
+ return status;
+}
+
diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h
new file mode 100644
index 0000000..2c3be1c
--- /dev/null
+++ b/modules/http2/h2_conn_io.h
@@ -0,0 +1,77 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_conn_io__
+#define __mod_h2__h2_conn_io__
+
+struct h2_config;
+struct h2_session;
+
+/* h2_io is the basic handler of a httpd connection. It keeps two brigades,
+ * one for input, one for output and works with the installed connection
+ * filters.
+ * The read is done via a callback function, so that input can be processed
+ * directly without copying.
+ */
+typedef struct {
+ conn_rec *c;
+ apr_bucket_brigade *output;
+
+ int is_tls;
+ apr_time_t cooldown_usecs;
+ apr_int64_t warmup_size;
+
+ apr_size_t write_size;
+ apr_time_t last_write;
+ apr_int64_t bytes_read;
+ apr_int64_t bytes_written;
+
+ int buffer_output;
+ apr_size_t flush_threshold;
+ unsigned int is_flushed : 1;
+
+ char *scratch;
+ apr_size_t ssize;
+ apr_size_t slen;
+} h2_conn_io;
+
+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
+ const struct h2_config *cfg);
+
+/**
+ * Append data to the buffered output.
+ * @param buf the data to append
+ * @param length the length of the data to append
+ */
+apr_status_t h2_conn_io_write(h2_conn_io *io,
+ const char *buf,
+ size_t length);
+
+apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
+
+/**
+ * Pass any buffered data on to the connection output filters.
+ * @param io the connection io
+ * @param flush if a flush bucket should be appended to any output
+ */
+apr_status_t h2_conn_io_flush(h2_conn_io *io);
+
+/**
+ * Check if the buffered amount of data needs flushing.
+ */
+int h2_conn_io_needs_flush(h2_conn_io *io);
+
+#endif /* defined(__mod_h2__h2_conn_io__) */
diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c
new file mode 100644
index 0000000..d5ccc24
--- /dev/null
+++ b/modules/http2/h2_ctx.c
@@ -0,0 +1,121 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+
+#include "h2_private.h"
+#include "h2_session.h"
+#include "h2_task.h"
+#include "h2_ctx.h"
+
+static h2_ctx *h2_ctx_create(const conn_rec *c)
+{
+ h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
+ ap_assert(ctx);
+ ap_set_module_config(c->conn_config, &http2_module, ctx);
+ h2_ctx_server_set(ctx, c->base_server);
+ return ctx;
+}
+
+void h2_ctx_clear(const conn_rec *c)
+{
+ ap_assert(c);
+ ap_set_module_config(c->conn_config, &http2_module, NULL);
+}
+
+h2_ctx *h2_ctx_create_for(const conn_rec *c, h2_task *task)
+{
+ h2_ctx *ctx = h2_ctx_create(c);
+ if (ctx) {
+ ctx->task = task;
+ }
+ return ctx;
+}
+
+h2_ctx *h2_ctx_get(const conn_rec *c, int create)
+{
+ h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
+ if (ctx == NULL && create) {
+ ctx = h2_ctx_create(c);
+ }
+ return ctx;
+}
+
+h2_ctx *h2_ctx_rget(const request_rec *r)
+{
+ return h2_ctx_get(r->connection, 0);
+}
+
+const char *h2_ctx_protocol_get(const conn_rec *c)
+{
+ h2_ctx *ctx;
+ if (c->master) {
+ c = c->master;
+ }
+ ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
+ return ctx? ctx->protocol : NULL;
+}
+
+h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto)
+{
+ ctx->protocol = proto;
+ return ctx;
+}
+
+h2_session *h2_ctx_session_get(h2_ctx *ctx)
+{
+ return ctx? ctx->session : NULL;
+}
+
+void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session)
+{
+ ctx->session = session;
+}
+
+server_rec *h2_ctx_server_get(h2_ctx *ctx)
+{
+ return ctx? ctx->server : NULL;
+}
+
+h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s)
+{
+ ctx->server = s;
+ return ctx;
+}
+
+int h2_ctx_is_task(h2_ctx *ctx)
+{
+ return ctx && ctx->task;
+}
+
+h2_task *h2_ctx_get_task(h2_ctx *ctx)
+{
+ return ctx? ctx->task : NULL;
+}
+
+h2_task *h2_ctx_cget_task(conn_rec *c)
+{
+ return h2_ctx_get_task(h2_ctx_get(c, 0));
+}
+
+h2_task *h2_ctx_rget_task(request_rec *r)
+{
+ return h2_ctx_get_task(h2_ctx_rget(r));
+}
diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h
new file mode 100644
index 0000000..cb111c9
--- /dev/null
+++ b/modules/http2/h2_ctx.h
@@ -0,0 +1,78 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_ctx__
+#define __mod_h2__h2_ctx__
+
+struct h2_session;
+struct h2_task;
+struct h2_config;
+
+/**
+ * The h2 module context associated with a connection.
+ *
+ * It keeps track of the different types of connections:
+ * - those from clients that use HTTP/2 protocol
+ * - those from clients that do not use HTTP/2
+ * - those created by ourself to perform work on HTTP/2 streams
+ */
+typedef struct h2_ctx {
+ const char *protocol; /* the protocol negotiated */
+ struct h2_session *session; /* the session established */
+ struct h2_task *task; /* the h2_task executing or NULL */
+ const char *hostname; /* hostname negotiated via SNI, optional */
+ server_rec *server; /* httpd server config selected. */
+ const struct h2_config *config; /* effective config in this context */
+} h2_ctx;
+
+/**
+ * Get (or create) a h2 context record for this connection.
+ * @param c the connection to look at
+ * @param create != 0 iff missing context shall be created
+ * @return h2 context of this connection
+ */
+h2_ctx *h2_ctx_get(const conn_rec *c, int create);
+void h2_ctx_clear(const conn_rec *c);
+
+h2_ctx *h2_ctx_rget(const request_rec *r);
+h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task);
+
+
+/* Set the h2 protocol established on this connection context or
+ * NULL when other protocols are in place.
+ */
+h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto);
+
+/* Set the server_rec relevant for this context.
+ */
+h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s);
+server_rec *h2_ctx_server_get(h2_ctx *ctx);
+
+struct h2_session *h2_ctx_session_get(h2_ctx *ctx);
+void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
+
+/**
+ * Get the h2 protocol negotiated for this connection, or NULL.
+ */
+const char *h2_ctx_protocol_get(const conn_rec *c);
+
+int h2_ctx_is_task(h2_ctx *ctx);
+
+struct h2_task *h2_ctx_get_task(h2_ctx *ctx);
+struct h2_task *h2_ctx_cget_task(conn_rec *c);
+struct h2_task *h2_ctx_rget_task(request_rec *r);
+
+#endif /* defined(__mod_h2__h2_ctx__) */
diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c
new file mode 100644
index 0000000..8b254b1
--- /dev/null
+++ b/modules/http2/h2_filter.c
@@ -0,0 +1,568 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <httpd.h>
+#include <http_core.h>
+#include <http_protocol.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
+#include "h2_conn_io.h"
+#include "h2_ctx.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_task.h"
+#include "h2_stream.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_stream.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_version.h"
+
+#include "h2_filter.h"
+
+#define UNSET -1
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin,
+ apr_bucket *b, apr_read_type_e block)
+{
+ h2_session *session = cin->session;
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t len;
+ const char *data;
+ ssize_t n;
+
+ status = apr_bucket_read(b, &data, &len, block);
+
+ while (status == APR_SUCCESS && len > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"),
+ (long)len, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ h2_session_event(session, H2_SESSION_EV_PROTO_ERROR,
+ (int)n, nghttp2_strerror((int)n));
+ status = APR_EGENERAL;
+ }
+ }
+ else {
+ session->io.bytes_read += n;
+ if (len <= n) {
+ break;
+ }
+ len -= n;
+ data += n;
+ }
+ }
+
+ return status;
+}
+
+static apr_status_t recv_RAW_brigade(conn_rec *c, h2_filter_cin *cin,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket* b;
+ int consumed = 0;
+
+ h2_util_bb_log(c, c->id, APLOG_TRACE2, "RAW_in", bb);
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* nop */
+ }
+ else {
+ status = recv_RAW_DATA(c, cin, b, block);
+ }
+ consumed = 1;
+ apr_bucket_delete(b);
+ }
+
+ if (!consumed && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+h2_filter_cin *h2_filter_cin_create(h2_session *session)
+{
+ h2_filter_cin *cin;
+
+ cin = apr_pcalloc(session->pool, sizeof(*cin));
+ if (!cin) {
+ return NULL;
+ }
+ cin->session = session;
+ return cin;
+}
+
+void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout)
+{
+ cin->timeout = timeout;
+}
+
+apr_status_t h2_filter_core_input(ap_filter_t* f,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_filter_cin *cin = f->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_interval_time_t saved_timeout = UNSET;
+ const int trace1 = APLOGctrace1(f->c);
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_session(%ld): read, %s, mode=%d, readbytes=%ld",
+ (long)f->c->id, (block == APR_BLOCK_READ)?
+ "BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes);
+ }
+
+ if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) {
+ return ap_get_brigade(f->next, brigade, mode, block, readbytes);
+ }
+
+ if (mode != AP_MODE_READBYTES) {
+ return (block == APR_BLOCK_READ)? APR_SUCCESS : APR_EAGAIN;
+ }
+
+ if (!cin->bb) {
+ cin->bb = apr_brigade_create(cin->session->pool, f->c->bucket_alloc);
+ }
+
+ if (!cin->socket) {
+ cin->socket = ap_get_conn_socket(f->c);
+ }
+
+ if (APR_BRIGADE_EMPTY(cin->bb)) {
+ /* We only do a blocking read when we have no streams to process. So,
+ * in httpd scoreboard lingo, we are in a KEEPALIVE connection state.
+ */
+ if (block == APR_BLOCK_READ) {
+ if (cin->timeout > 0) {
+ apr_socket_timeout_get(cin->socket, &saved_timeout);
+ apr_socket_timeout_set(cin->socket, cin->timeout);
+ }
+ }
+ status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES,
+ block, readbytes);
+ if (saved_timeout != UNSET) {
+ apr_socket_timeout_set(cin->socket, saved_timeout);
+ }
+ }
+
+ switch (status) {
+ case APR_SUCCESS:
+ status = recv_RAW_brigade(f->c, cin, cin->bb, block);
+ break;
+ case APR_EOF:
+ case APR_EAGAIN:
+ case APR_TIMEUP:
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_session(%ld): read", f->c->id);
+ }
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046)
+ "h2_session(%ld): error reading", f->c->id);
+ break;
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * http2 connection status handler + stream out source
+ ******************************************************************************/
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_bucket_event_cb *cb;
+ void *ctx;
+} h2_bucket_observer;
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+static void bucket_destroy(void *data)
+{
+ h2_bucket_observer *h = data;
+ if (apr_bucket_shared_destroy(h)) {
+ if (h->cb) {
+ h->cb(h->ctx, H2_BUCKET_EV_BEFORE_DESTROY, NULL);
+ }
+ apr_bucket_free(h);
+ }
+}
+
+apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb,
+ void *ctx)
+{
+ h2_bucket_observer *br;
+
+ br = apr_bucket_alloc(sizeof(*br), b->list);
+ br->cb = cb;
+ br->ctx = ctx;
+
+ b = apr_bucket_shared_make(b, br, 0, 0);
+ b->type = &h2_bucket_type_observer;
+ return b;
+}
+
+apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list,
+ h2_bucket_event_cb *cb, void *ctx)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_observer_make(b, cb, ctx);
+ return b;
+}
+
+apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event)
+{
+ if (H2_BUCKET_IS_OBSERVER(b)) {
+ h2_bucket_observer *l = (h2_bucket_observer *)b->data;
+ return l->cb(l->ctx, event, b);
+ }
+ return APR_EINVAL;
+}
+
+const apr_bucket_type_t h2_bucket_type_observer = {
+ "H2OBS", 5, APR_BUCKET_METADATA,
+ bucket_destroy,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
+apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+{
+ if (H2_BUCKET_IS_OBSERVER(src)) {
+ h2_bucket_observer *l = (h2_bucket_observer *)src->data;
+ apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc,
+ l->cb, l->ctx);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ l->cb = NULL;
+ l->ctx = NULL;
+ h2_bucket_observer_fire(b, H2_BUCKET_EV_BEFORE_MASTER_SEND);
+ return b;
+ }
+ return NULL;
+}
+
+static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
+ __attribute__((format(printf,2,3)));
+static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
+{
+ va_list args;
+ apr_status_t rv;
+
+ va_start(args, fmt);
+ rv = apr_brigade_vprintf(bb, NULL, NULL, fmt, args);
+ va_end(args);
+
+ return rv;
+}
+
+static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ h2_mplx *m = s->mplx;
+
+ bbout(bb, " \"settings\": {\n");
+ bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams);
+ bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024);
+ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
+ h2_config_geti(s->config, H2_CONF_WIN_SIZE));
+ bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s));
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_peer_settings(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ bbout(bb, " \"peerSettings\": {\n");
+ bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS));
+ bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_FRAME_SIZE));
+ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE));
+ bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_ENABLE_PUSH));
+ bbout(bb, " \"SETTINGS_HEADER_TABLE_SIZE\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_HEADER_TABLE_SIZE));
+ bbout(bb, " \"SETTINGS_MAX_HEADER_LIST_SIZE\": %d\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE));
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+typedef struct {
+ apr_bucket_brigade *bb;
+ h2_session *s;
+ int idx;
+} stream_ctx_t;
+
+static int add_stream(h2_stream *stream, void *ctx)
+{
+ stream_ctx_t *x = ctx;
+ int32_t flowIn, flowOut;
+
+ flowIn = nghttp2_session_get_stream_effective_local_window_size(x->s->ngh2, stream->id);
+ flowOut = nghttp2_session_get_stream_remote_window_size(x->s->ngh2, stream->id);
+ bbout(x->bb, "%s\n \"%d\": {\n", (x->idx? "," : ""), stream->id);
+ bbout(x->bb, " \"state\": \"%s\",\n", h2_stream_state_str(stream));
+ bbout(x->bb, " \"created\": %f,\n", ((double)stream->created)/APR_USEC_PER_SEC);
+ bbout(x->bb, " \"flowIn\": %d,\n", flowIn);
+ bbout(x->bb, " \"flowOut\": %d,\n", flowOut);
+ bbout(x->bb, " \"dataIn\": %"APR_OFF_T_FMT",\n", stream->in_data_octets);
+ bbout(x->bb, " \"dataOut\": %"APR_OFF_T_FMT"\n", stream->out_data_octets);
+ bbout(x->bb, " }");
+
+ ++x->idx;
+ return 1;
+}
+
+static void add_streams(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ stream_ctx_t x;
+
+ x.bb = bb;
+ x.s = s;
+ x.idx = 0;
+ bbout(bb, " \"streams\": {");
+ h2_mplx_stream_do(s->mplx, add_stream, &x);
+ bbout(bb, "\n }%s\n", last? "" : ",");
+}
+
+static void add_push(apr_bucket_brigade *bb, h2_session *s,
+ h2_stream *stream, int last)
+{
+ h2_push_diary *diary;
+ apr_status_t status;
+
+ bbout(bb, " \"push\": {\n");
+ diary = s->push_diary;
+ if (diary) {
+ const char *data;
+ const char *base64_digest;
+ apr_size_t len;
+
+ status = h2_push_diary_digest_get(diary, bb->p, 256,
+ stream->request->authority,
+ &data, &len);
+ if (status == APR_SUCCESS) {
+ base64_digest = h2_util_base64url_encode(data, len, bb->p);
+ bbout(bb, " \"cacheDigest\": \"%s\",\n", base64_digest);
+ }
+ }
+ bbout(bb, " \"promises\": %d,\n", s->pushes_promised);
+ bbout(bb, " \"submits\": %d,\n", s->pushes_submitted);
+ bbout(bb, " \"resets\": %d\n", s->pushes_reset);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_in(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ bbout(bb, " \"in\": {\n");
+ bbout(bb, " \"requests\": %d,\n", s->remote.emitted_count);
+ bbout(bb, " \"resets\": %d, \n", s->streams_reset);
+ bbout(bb, " \"frames\": %ld,\n", (long)s->frames_received);
+ bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_read);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_out(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ bbout(bb, " \"out\": {\n");
+ bbout(bb, " \"responses\": %d,\n", s->responses_submitted);
+ bbout(bb, " \"frames\": %ld,\n", (long)s->frames_sent);
+ bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_written);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_stats(apr_bucket_brigade *bb, h2_session *s,
+ h2_stream *stream, int last)
+{
+ bbout(bb, " \"stats\": {\n");
+ add_in(bb, s, 0);
+ add_out(bb, s, 0);
+ add_push(bb, s, stream, 1);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
+{
+ conn_rec *c = task->c->master;
+ h2_ctx *h2ctx = h2_ctx_get(c, 0);
+ h2_session *session;
+ h2_stream *stream;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+ int32_t connFlowIn, connFlowOut;
+
+
+ if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) {
+ return APR_SUCCESS;
+ }
+
+ stream = h2_session_stream_get(session, task->stream_id);
+ if (!stream) {
+ /* stream already done */
+ return APR_SUCCESS;
+ }
+
+ bb = apr_brigade_create(stream->pool, c->bucket_alloc);
+
+ connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
+ connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
+
+ bbout(bb, "{\n");
+ bbout(bb, " \"version\": \"draft-01\",\n");
+ add_settings(bb, session, 0);
+ add_peer_settings(bb, session, 0);
+ bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn);
+ bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut);
+ bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown);
+
+ add_streams(bb, session, 0);
+
+ add_stats(bb, session, stream, 1);
+ bbout(bb, "}\n");
+
+ while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BUCKET_INSERT_AFTER(b, e);
+ b = e;
+ }
+ apr_brigade_destroy(bb);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t status_event(void *ctx, h2_bucket_event event,
+ apr_bucket *b)
+{
+ h2_task *task = ctx;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, task->c->master,
+ "status_event(%s): %d", task->id, event);
+ switch (event) {
+ case H2_BUCKET_EV_BEFORE_MASTER_SEND:
+ h2_status_insert(task, b);
+ break;
+ default:
+ break;
+ }
+ return APR_SUCCESS;
+}
+
+int h2_filter_h2_status_handler(request_rec *r)
+{
+ h2_ctx *ctx = h2_ctx_rget(r);
+ conn_rec *c = r->connection;
+ h2_task *task;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_status_t status;
+
+ if (strcmp(r->handler, "http2-status")) {
+ return DECLINED;
+ }
+ if (r->method_number != M_GET && r->method_number != M_POST) {
+ return DECLINED;
+ }
+
+ task = ctx? h2_ctx_get_task(ctx) : NULL;
+ if (task) {
+
+ if ((status = ap_discard_request_body(r)) != OK) {
+ return status;
+ }
+
+ /* We need to handle the actual output on the main thread, as
+ * we need to access h2_session information. */
+ r->status = 200;
+ r->clength = -1;
+ r->chunked = 1;
+ apr_table_unset(r->headers_out, "Content-Length");
+ /* Discourage content-encodings */
+ apr_table_unset(r->headers_out, "Content-Encoding");
+ apr_table_setn(r->subprocess_env, "no-brotli", "1");
+ apr_table_setn(r->subprocess_env, "no-gzip", "1");
+
+ ap_set_content_type(r, "application/json");
+ apr_table_setn(r->notes, H2_FILTER_DEBUG_NOTE, "on");
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = h2_bucket_observer_create(c->bucket_alloc, status_event, task);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "status_handler(%s): checking for incoming trailers",
+ task->id);
+ if (r->trailers_in && !apr_is_empty_table(r->trailers_in)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "status_handler(%s): seeing incoming trailers",
+ task->id);
+ apr_table_setn(r->trailers_out, "h2-trailers-in",
+ apr_itoa(r->pool, 1));
+ }
+
+ status = ap_pass_brigade(r->output_filters, bb);
+ if (status == APR_SUCCESS
+ || r->status != HTTP_OK
+ || c->aborted) {
+ return OK;
+ }
+ else {
+ /* no way to know what type of error occurred */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, status, r,
+ "status_handler(%s): ap_pass_brigade failed",
+ task->id);
+ return AP_FILTER_ERROR;
+ }
+ }
+ return DECLINED;
+}
+
diff --git a/modules/http2/h2_filter.h b/modules/http2/h2_filter.h
new file mode 100644
index 0000000..12810d8
--- /dev/null
+++ b/modules/http2/h2_filter.h
@@ -0,0 +1,73 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_filter__
+#define __mod_h2__h2_filter__
+
+struct h2_bucket_beam;
+struct h2_headers;
+struct h2_stream;
+struct h2_session;
+
+typedef struct h2_filter_cin {
+ apr_pool_t *pool;
+ apr_socket_t *socket;
+ apr_interval_time_t timeout;
+ apr_bucket_brigade *bb;
+ struct h2_session *session;
+ apr_bucket *cur;
+} h2_filter_cin;
+
+h2_filter_cin *h2_filter_cin_create(struct h2_session *session);
+
+void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout);
+
+apr_status_t h2_filter_core_input(ap_filter_t* filter,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+/******* observer bucket ******************************************************/
+
+typedef enum {
+ H2_BUCKET_EV_BEFORE_DESTROY,
+ H2_BUCKET_EV_BEFORE_MASTER_SEND
+} h2_bucket_event;
+
+extern const apr_bucket_type_t h2_bucket_type_observer;
+
+typedef apr_status_t h2_bucket_event_cb(void *ctx, h2_bucket_event event, apr_bucket *b);
+
+#define H2_BUCKET_IS_OBSERVER(e) (e->type == &h2_bucket_type_observer)
+
+apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb,
+ void *ctx);
+
+apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list,
+ h2_bucket_event_cb *cb, void *ctx);
+
+apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event);
+
+apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src);
+
+/******* /.well-known/h2/state handler ****************************************/
+
+int h2_filter_h2_status_handler(request_rec *r);
+
+#endif /* __mod_h2__h2_filter__ */
diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c
new file mode 100644
index 0000000..d69c53c
--- /dev/null
+++ b/modules/http2/h2_from_h1.c
@@ -0,0 +1,875 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_date.h>
+#include <apr_lib.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <util_time.h>
+
+#include "h2_private.h"
+#include "h2_headers.h"
+#include "h2_from_h1.h"
+#include "h2_task.h"
+#include "h2_util.h"
+
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ (void)key;
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fix_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+static void set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool)
+{
+ char *date = NULL;
+ const char *proxy_date = NULL;
+ const char *server = NULL;
+ const char *us = ap_get_server_banner();
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r && r->proxyreq != PROXYREQ_NONE) {
+ proxy_date = apr_table_get(r->headers_out, "Date");
+ if (!proxy_date) {
+ /*
+ * proxy_date needs to be const. So use date for the creation of
+ * our own Date header and pass it over to proxy_date later to
+ * avoid a compiler warning.
+ */
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+ server = apr_table_get(r->headers_out, "Server");
+ }
+ else {
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r? r->request_time : apr_time_now());
+ }
+
+ apr_table_setn(headers, "Date", proxy_date ? proxy_date : date );
+ if (r) {
+ apr_table_unset(r->headers_out, "Date");
+ }
+
+ if (!server && *us) {
+ server = us;
+ }
+ if (server) {
+ apr_table_setn(headers, "Server", server);
+ if (r) {
+ apr_table_unset(r->headers_out, "Server");
+ }
+ }
+}
+
+static int copy_header(void *ctx, const char *name, const char *value)
+{
+ apr_table_t *headers = ctx;
+
+ apr_table_add(headers, name, value);
+ return 1;
+}
+
+static h2_headers *create_response(h2_task *task, request_rec *r)
+{
+ const char *clheader;
+ const char *ctype;
+ apr_table_t *headers;
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fix_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ ap_set_keepalive(r);
+
+ if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
+ }
+ else if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ unsigned int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!apr_strnatcasecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cachable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_add(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ headers = apr_table_make(r->pool, 10);
+
+ set_basic_http_header(headers, r, r->pool);
+ if (r->status == HTTP_NOT_MODIFIED) {
+ apr_table_do(copy_header, headers, r->headers_out,
+ "ETag",
+ "Content-Location",
+ "Expires",
+ "Cache-Control",
+ "Vary",
+ "Warning",
+ "WWW-Authenticate",
+ "Proxy-Authenticate",
+ "Set-Cookie",
+ "Set-Cookie2",
+ NULL);
+ }
+ else {
+ apr_table_do(copy_header, headers, r->headers_out, NULL);
+ }
+
+ return h2_headers_rcreate(r, r->status, headers, r->pool);
+}
+
+typedef enum {
+ H2_RP_STATUS_LINE,
+ H2_RP_HEADER_LINE,
+ H2_RP_DONE
+} h2_rp_state_t;
+
+typedef struct h2_response_parser {
+ h2_rp_state_t state;
+ h2_task *task;
+ int http_status;
+ apr_array_header_t *hlines;
+ apr_bucket_brigade *tmp;
+} h2_response_parser;
+
+static apr_status_t parse_header(h2_response_parser *parser, char *line) {
+ const char *hline;
+ if (line[0] == ' ' || line[0] == '\t') {
+ char **plast;
+ /* continuation line from the header before this */
+ while (line[0] == ' ' || line[0] == '\t') {
+ ++line;
+ }
+
+ plast = apr_array_pop(parser->hlines);
+ if (plast == NULL) {
+ /* not well formed */
+ return APR_EINVAL;
+ }
+ hline = apr_psprintf(parser->task->pool, "%s %s", *plast, line);
+ }
+ else {
+ /* new header line */
+ hline = apr_pstrdup(parser->task->pool, line);
+ }
+ APR_ARRAY_PUSH(parser->hlines, const char*) = hline;
+ return APR_SUCCESS;
+}
+
+static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
+ char *line, apr_size_t len)
+{
+ h2_task *task = parser->task;
+ apr_status_t status;
+
+ if (!parser->tmp) {
+ parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc);
+ }
+ status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
+ HUGE_STRING_LEN);
+ if (status == APR_SUCCESS) {
+ --len;
+ status = apr_brigade_flatten(parser->tmp, line, &len);
+ if (status == APR_SUCCESS) {
+ /* we assume a non-0 containing line and remove trailing crlf. */
+ line[len] = '\0';
+ if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
+ len -= 2;
+ line[len] = '\0';
+ apr_brigade_cleanup(parser->tmp);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ "h2_task(%s): read response line: %s",
+ task->id, line);
+ }
+ else {
+ /* this does not look like a complete line yet */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ "h2_task(%s): read response, incomplete line: %s",
+ task->id, line);
+ return APR_EAGAIN;
+ }
+ }
+ }
+ apr_brigade_cleanup(parser->tmp);
+ return status;
+}
+
+static apr_table_t *make_table(h2_response_parser *parser)
+{
+ h2_task *task = parser->task;
+ apr_array_header_t *hlines = parser->hlines;
+ if (hlines) {
+ apr_table_t *headers = apr_table_make(task->pool, hlines->nelts);
+ int i;
+
+ for (i = 0; i < hlines->nelts; ++i) {
+ char *hline = ((char **)hlines->elts)[i];
+ char *sep = ap_strchr(hline, ':');
+ if (!sep) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, task->c,
+ APLOGNO(02955) "h2_task(%s): invalid header[%d] '%s'",
+ task->id, i, (char*)hline);
+ /* not valid format, abort */
+ return NULL;
+ }
+ (*sep++) = '\0';
+ while (*sep == ' ' || *sep == '\t') {
+ ++sep;
+ }
+
+ if (!h2_util_ignore_header(hline)) {
+ apr_table_merge(headers, hline, sep);
+ }
+ }
+ return headers;
+ }
+ else {
+ return apr_table_make(task->pool, 0);
+ }
+}
+
+static apr_status_t pass_response(h2_task *task, ap_filter_t *f,
+ h2_response_parser *parser)
+{
+ apr_bucket *b;
+ apr_status_t status;
+
+ h2_headers *response = h2_headers_create(parser->http_status,
+ make_table(parser),
+ NULL, 0, task->pool);
+ apr_brigade_cleanup(parser->tmp);
+ b = h2_bucket_headers_create(task->c->bucket_alloc, response);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ b = apr_bucket_flush_create(task->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ status = ap_pass_brigade(f->next, parser->tmp);
+ apr_brigade_cleanup(parser->tmp);
+
+ /* reset parser for possible next response */
+ parser->state = H2_RP_STATUS_LINE;
+ apr_array_clear(parser->hlines);
+
+ if (response->status >= 200) {
+ task->output.sent_response = 1;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ APLOGNO(03197) "h2_task(%s): passed response %d",
+ task->id, response->status);
+ return status;
+}
+
+static apr_status_t parse_status(h2_task *task, char *line)
+{
+ h2_response_parser *parser = task->output.rparser;
+ int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
+ (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0));
+ if (sindex > 0) {
+ int k = sindex + 3;
+ char keepchar = line[k];
+ line[k] = '\0';
+ parser->http_status = atoi(&line[sindex]);
+ line[k] = keepchar;
+ parser->state = H2_RP_HEADER_LINE;
+
+ return APR_SUCCESS;
+ }
+ /* Seems like there is garbage on the connection. May be a leftover
+ * from a previous proxy request.
+ * This should only happen if the H2_RESPONSE filter is not yet in
+ * place (post_read_request has not been reached and the handler wants
+ * to write something. Probably just the interim response we are
+ * waiting for. But if there is other data hanging around before
+ * that, this needs to fail. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03467)
+ "h2_task(%s): unable to parse status line: %s",
+ task->id, line);
+ return APR_EINVAL;
+}
+
+apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ h2_response_parser *parser = task->output.rparser;
+ char line[HUGE_STRING_LEN];
+ apr_status_t status = APR_SUCCESS;
+
+ if (!parser) {
+ parser = apr_pcalloc(task->pool, sizeof(*parser));
+ parser->task = task;
+ parser->state = H2_RP_STATUS_LINE;
+ parser->hlines = apr_array_make(task->pool, 10, sizeof(char *));
+ task->output.rparser = parser;
+ }
+
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ switch (parser->state) {
+ case H2_RP_STATUS_LINE:
+ case H2_RP_HEADER_LINE:
+ status = get_line(parser, bb, line, sizeof(line));
+ if (status == APR_EAGAIN) {
+ /* need more data */
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (parser->state == H2_RP_STATUS_LINE) {
+ /* instead of parsing, just take it directly */
+ status = parse_status(task, line);
+ }
+ else if (line[0] == '\0') {
+ /* end of headers, pass response onward */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): end of response", task->id);
+ return pass_response(task, f, parser);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): response header %s", task->id, line);
+ status = parse_header(parser, line);
+ }
+ break;
+
+ default:
+ return status;
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_task *task = f->ctx;
+ request_rec *r = f->r;
+ apr_bucket *b, *bresp, *body_bucket = NULL, *next;
+ ap_bucket_error *eb = NULL;
+ h2_headers *response = NULL;
+ int headers_passing = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): output_filter called", task->id);
+
+ if (!task->output.sent_response && !f->c->aborted) {
+ /* check, if we need to send the response now. Until we actually
+ * see a DATA bucket or some EOS/EOR, we do not do so. */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_ERROR(b) && !eb) {
+ eb = b->data;
+ }
+ else if (AP_BUCKET_IS_EOC(b)) {
+ /* If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ ap_remove_output_filter(f);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_task(%s): eoc bucket passed", task->id);
+ return ap_pass_brigade(f->next, bb);
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ headers_passing = 1;
+ }
+ else if (!APR_BUCKET_IS_FLUSH(b)) {
+ body_bucket = b;
+ break;
+ }
+ }
+
+ if (eb) {
+ int st = eb->status;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
+ "h2_task(%s): err bucket status=%d", task->id, st);
+ /* throw everything away and replace it with the error response
+ * generated by ap_die() */
+ apr_brigade_cleanup(bb);
+ ap_die(st, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (body_bucket || !headers_passing) {
+ /* time to insert the response bucket before the body or if
+ * no h2_headers is passed, e.g. the response is empty */
+ response = create_response(task, r);
+ if (response == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
+ "h2_task(%s): unable to create response", task->id);
+ return APR_ENOMEM;
+ }
+
+ bresp = h2_bucket_headers_create(f->c->bucket_alloc, response);
+ if (body_bucket) {
+ APR_BUCKET_INSERT_BEFORE(body_bucket, bresp);
+ }
+ else {
+ APR_BRIGADE_INSERT_HEAD(bb, bresp);
+ }
+ task->output.sent_response = 1;
+ r->sent_bodyct = 1;
+ }
+ }
+
+ if (r->header_only) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): header_only, cleanup output brigade",
+ task->id);
+ b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ break;
+ }
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ b = next;
+ }
+ }
+ else if (task->output.sent_response) {
+ /* lets get out of the way, our task is done */
+ ap_remove_output_filter(f);
+ }
+ return ap_pass_brigade(f->next, bb);
+}
+
+static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
+ apr_bucket *first, apr_off_t chunk_len,
+ apr_bucket *tail)
+{
+ /* Surround the buckets [first, tail[ with new buckets carrying the
+ * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
+ * to the end of the brigade. */
+ char buffer[128];
+ apr_bucket *c;
+ int len;
+
+ len = apr_snprintf(buffer, H2_ALEN(buffer),
+ "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len);
+ c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, c);
+ c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc);
+ if (tail) {
+ APR_BUCKET_INSERT_BEFORE(tail, c);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(bb, c);
+ }
+ task->input.chunked_total += chunk_len;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ "h2_task(%s): added chunk %ld, total %ld",
+ task->id, (long)chunk_len, (long)task->input.chunked_total);
+}
+
+static int ser_header(void *ctx, const char *name, const char *value)
+{
+ apr_bucket_brigade *bb = ctx;
+ apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task,
+ apr_read_type_e block) {
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_brigade *bb = task->input.bbchunk;
+
+ if (!bb) {
+ bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ task->input.bbchunk = bb;
+ }
+
+ if (APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b, *next, *first_data = NULL;
+ apr_bucket_brigade *tmp;
+ apr_off_t bblen = 0;
+
+ /* get more data from the lower layer filters. Always do this
+ * in larger pieces, since we handle the read modes ourself. */
+ status = ap_get_brigade(f->next, bb,
+ AP_MODE_READBYTES, block, 32*1024);
+ if (status == APR_EOF) {
+ if (!task->input.eos) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ task->input.eos = 1;
+ return APR_SUCCESS;
+ }
+ ap_remove_input_filter(f);
+ return status;
+
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb) && !task->input.eos;
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (first_data) {
+ make_chunk(task, bb, first_data, bblen, b);
+ first_data = NULL;
+ }
+
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_task(%s): receiving trailers", task->id);
+ tmp = apr_brigade_split_ex(bb, b, NULL);
+ if (!apr_is_empty_table(headers->headers)) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(ser_header, bb, headers->headers, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ r->trailers_in = apr_table_clone(r->pool, headers->headers);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ APR_BRIGADE_CONCAT(bb, tmp);
+ apr_brigade_destroy(tmp);
+ task->input.eos = 1;
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ tmp = apr_brigade_split_ex(bb, b, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ APR_BRIGADE_CONCAT(bb, tmp);
+ apr_brigade_destroy(tmp);
+ task->input.eos = 1;
+ }
+ }
+ else if (b->length == 0) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else {
+ if (!first_data) {
+ first_data = b;
+ bblen = 0;
+ }
+ bblen += b->length;
+ }
+ }
+
+ if (first_data) {
+ make_chunk(task, bb, first_data, bblen, NULL);
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_filter_request_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_task *task = f->ctx;
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next;
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(r->server->module_config,
+ &core_module);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+ "h2_task(%s): request filter, exp=%d", task->id, r->expecting_100);
+ if (!task->request->chunked) {
+ status = ap_get_brigade(f->next, bb, mode, block, readbytes);
+ /* pipe data through, just take care of trailers */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_task(%s): receiving trailers", task->id);
+ r->trailers_in = headers->headers;
+ if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
+ r->headers_in = apr_table_overlay(r->pool, r->headers_in,
+ r->trailers_in);
+ }
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_remove_input_filter(f);
+
+ if (headers->raw_bytes && h2_task_logio_add_bytes_in) {
+ h2_task_logio_add_bytes_in(task->c, headers->raw_bytes);
+ }
+ break;
+ }
+ }
+ return status;
+ }
+
+ /* Things are more complicated. The standard HTTP input filter, which
+ * does a lot what we do not want to duplicate, also cares about chunked
+ * transfer encoding and trailers.
+ * We need to simulate chunked encoding for it to be happy.
+ */
+ if ((status = read_and_chunk(f, task, block)) != APR_SUCCESS) {
+ return status;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, task->input.bbchunk);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, task->input.bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, task->input.bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, task->input.bbchunk, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): getline: %s",
+ task->id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(02942)
+ "h2_task, unsupported READ mode %d", mode);
+ status = APR_ENOTIMPL;
+ }
+
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, "forwarding input", bb);
+ return status;
+}
+
+apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_task *task = f->ctx;
+ request_rec *r = f->r;
+ apr_bucket *b, *e;
+
+ if (task && r) {
+ /* Detect the EOS/EOR bucket and forward any trailers that may have
+ * been set to our h2_headers.
+ */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b))
+ && r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
+ h2_headers *headers;
+ apr_table_t *trailers;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
+ "h2_task(%s): sending trailers", task->id);
+ trailers = apr_table_clone(r->pool, r->trailers_out);
+ headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool);
+ e = h2_bucket_headers_create(bb->bucket_alloc, headers);
+ APR_BUCKET_INSERT_BEFORE(b, e);
+ apr_table_clear(r->trailers_out);
+ ap_remove_output_filter(f);
+ break;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_from_h1.h
new file mode 100644
index 0000000..68a24fd
--- /dev/null
+++ b/modules/http2/h2_from_h1.h
@@ -0,0 +1,50 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_from_h1__
+#define __mod_h2__h2_from_h1__
+
+/**
+ * h2_from_h1 parses a HTTP/1.1 response into
+ * - response status
+ * - a list of header values
+ * - a series of bytes that represent the response body alone, without
+ * any meta data, such as inserted by chunked transfer encoding.
+ *
+ * All data is allocated from the stream memory pool.
+ *
+ * Again, see comments in h2_request: ideally we would take the headers
+ * and status from the httpd structures instead of parsing them here, but
+ * we need to have all handlers and filters involved in request/response
+ * processing, so this seems to be the way for now.
+ */
+struct h2_headers;
+struct h2_task;
+
+apr_status_t h2_from_h1_parse_response(struct h2_task *task, ap_filter_t *f,
+ apr_bucket_brigade *bb);
+
+apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+apr_status_t h2_filter_request_in(ap_filter_t* f,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+#endif /* defined(__mod_h2__h2_from_h1__) */
diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c
new file mode 100644
index 0000000..5580cef
--- /dev/null
+++ b/modules/http2/h2_h2.c
@@ -0,0 +1,765 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+
+#include "mod_ssl.h"
+
+#include "mod_http2.h"
+#include "h2_private.h"
+
+#include "h2_bucket_beam.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_conn.h"
+#include "h2_filter.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_h2.h"
+#include "mod_http2.h"
+
+const char *h2_tls_protos[] = {
+ "h2", NULL
+};
+
+const char *h2_clear_protos[] = {
+ "h2c", NULL
+};
+
+const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
+
+/*******************************************************************************
+ * The optional mod_ssl functions we need.
+ */
+static APR_OPTIONAL_FN_TYPE(ssl_is_https) *opt_ssl_is_https;
+static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *opt_ssl_var_lookup;
+
+
+/*******************************************************************************
+ * HTTP/2 error stuff
+ */
+static const char *h2_err_descr[] = {
+ "no error", /* 0x0 */
+ "protocol error",
+ "internal error",
+ "flow control error",
+ "settings timeout",
+ "stream closed", /* 0x5 */
+ "frame size error",
+ "refused stream",
+ "cancel",
+ "compression error",
+ "connect error", /* 0xa */
+ "enhance your calm",
+ "inadequate security",
+ "http/1.1 required",
+};
+
+const char *h2_h2_err_description(unsigned int h2_error)
+{
+ if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) {
+ return h2_err_descr[h2_error];
+ }
+ return "unknown http/2 error code";
+}
+
+/*******************************************************************************
+ * Check connection security requirements of RFC 7540
+ */
+
+/*
+ * Black Listed Ciphers from RFC 7549 Appendix A
+ *
+ */
+static const char *RFC7540_names[] = {
+ /* ciphers with NULL encrpytion */
+ "NULL-MD5", /* TLS_NULL_WITH_NULL_NULL */
+ /* same */ /* TLS_RSA_WITH_NULL_MD5 */
+ "NULL-SHA", /* TLS_RSA_WITH_NULL_SHA */
+ "NULL-SHA256", /* TLS_RSA_WITH_NULL_SHA256 */
+ "PSK-NULL-SHA", /* TLS_PSK_WITH_NULL_SHA */
+ "DHE-PSK-NULL-SHA", /* TLS_DHE_PSK_WITH_NULL_SHA */
+ "RSA-PSK-NULL-SHA", /* TLS_RSA_PSK_WITH_NULL_SHA */
+ "PSK-NULL-SHA256", /* TLS_PSK_WITH_NULL_SHA256 */
+ "PSK-NULL-SHA384", /* TLS_PSK_WITH_NULL_SHA384 */
+ "DHE-PSK-NULL-SHA256", /* TLS_DHE_PSK_WITH_NULL_SHA256 */
+ "DHE-PSK-NULL-SHA384", /* TLS_DHE_PSK_WITH_NULL_SHA384 */
+ "RSA-PSK-NULL-SHA256", /* TLS_RSA_PSK_WITH_NULL_SHA256 */
+ "RSA-PSK-NULL-SHA384", /* TLS_RSA_PSK_WITH_NULL_SHA384 */
+ "ECDH-ECDSA-NULL-SHA", /* TLS_ECDH_ECDSA_WITH_NULL_SHA */
+ "ECDHE-ECDSA-NULL-SHA", /* TLS_ECDHE_ECDSA_WITH_NULL_SHA */
+ "ECDH-RSA-NULL-SHA", /* TLS_ECDH_RSA_WITH_NULL_SHA */
+ "ECDHE-RSA-NULL-SHA", /* TLS_ECDHE_RSA_WITH_NULL_SHA */
+ "AECDH-NULL-SHA", /* TLS_ECDH_anon_WITH_NULL_SHA */
+ "ECDHE-PSK-NULL-SHA", /* TLS_ECDHE_PSK_WITH_NULL_SHA */
+ "ECDHE-PSK-NULL-SHA256", /* TLS_ECDHE_PSK_WITH_NULL_SHA256 */
+ "ECDHE-PSK-NULL-SHA384", /* TLS_ECDHE_PSK_WITH_NULL_SHA384 */
+
+ /* DES/3DES ciphers */
+ "PSK-3DES-EDE-CBC-SHA", /* TLS_PSK_WITH_3DES_EDE_CBC_SHA */
+ "DHE-PSK-3DES-EDE-CBC-SHA", /* TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA */
+ "RSA-PSK-3DES-EDE-CBC-SHA", /* TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA */
+ "ECDH-ECDSA-DES-CBC3-SHA", /* TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-ECDSA-DES-CBC3-SHA", /* TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDH-RSA-DES-CBC3-SHA", /* TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-RSA-DES-CBC3-SHA", /* TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA */
+ "AECDH-DES-CBC3-SHA", /* TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA */
+ "SRP-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA */
+ "SRP-RSA-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA */
+ "SRP-DSS-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-PSK-3DES-EDE-CBC-SHA", /* TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA */
+ "DES-CBC-SHA", /* TLS_RSA_WITH_DES_CBC_SHA */
+ "DES-CBC3-SHA", /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */
+ "DHE-DSS-DES-CBC3-SHA", /* TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA */
+ "DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_WITH_DES_CBC_SHA */
+ "DHE-RSA-DES-CBC3-SHA", /* TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA */
+ "ADH-DES-CBC-SHA", /* TLS_DH_anon_WITH_DES_CBC_SHA */
+ "ADH-DES-CBC3-SHA", /* TLS_DH_anon_WITH_3DES_EDE_CBC_SHA */
+ "EXP-DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA */
+ "DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_WITH_DES_CBC_SHA */
+ "DH-DSS-DES-CBC3-SHA", /* TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA */
+ "EXP-DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_WITH_DES_CBC_SHA */
+ "DH-RSA-DES-CBC3-SHA", /* TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA */
+
+ /* blacklisted EXPORT ciphers */
+ "EXP-RC4-MD5", /* TLS_RSA_EXPORT_WITH_RC4_40_MD5 */
+ "EXP-RC2-CBC-MD5", /* TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 */
+ "EXP-DES-CBC-SHA", /* TLS_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-DHE-DSS-DES-CBC-SHA", /* TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-ADH-DES-CBC-SHA", /* TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-ADH-RC4-MD5", /* TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 */
+
+ /* blacklisted RC4 encryption */
+ "RC4-MD5", /* TLS_RSA_WITH_RC4_128_MD5 */
+ "RC4-SHA", /* TLS_RSA_WITH_RC4_128_SHA */
+ "ADH-RC4-MD5", /* TLS_DH_anon_WITH_RC4_128_MD5 */
+ "KRB5-RC4-SHA", /* TLS_KRB5_WITH_RC4_128_SHA */
+ "KRB5-RC4-MD5", /* TLS_KRB5_WITH_RC4_128_MD5 */
+ "EXP-KRB5-RC4-SHA", /* TLS_KRB5_EXPORT_WITH_RC4_40_SHA */
+ "EXP-KRB5-RC4-MD5", /* TLS_KRB5_EXPORT_WITH_RC4_40_MD5 */
+ "PSK-RC4-SHA", /* TLS_PSK_WITH_RC4_128_SHA */
+ "DHE-PSK-RC4-SHA", /* TLS_DHE_PSK_WITH_RC4_128_SHA */
+ "RSA-PSK-RC4-SHA", /* TLS_RSA_PSK_WITH_RC4_128_SHA */
+ "ECDH-ECDSA-RC4-SHA", /* TLS_ECDH_ECDSA_WITH_RC4_128_SHA */
+ "ECDHE-ECDSA-RC4-SHA", /* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA */
+ "ECDH-RSA-RC4-SHA", /* TLS_ECDH_RSA_WITH_RC4_128_SHA */
+ "ECDHE-RSA-RC4-SHA", /* TLS_ECDHE_RSA_WITH_RC4_128_SHA */
+ "AECDH-RC4-SHA", /* TLS_ECDH_anon_WITH_RC4_128_SHA */
+ "ECDHE-PSK-RC4-SHA", /* TLS_ECDHE_PSK_WITH_RC4_128_SHA */
+
+ /* blacklisted AES128 encrpytion ciphers */
+ "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA */
+ "DH-DSS-AES128-SHA", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA */
+ "DH-RSA-AES128-SHA", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA */
+ "DHE-DSS-AES128-SHA", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA */
+ "DHE-RSA-AES128-SHA", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA */
+ "ADH-AES128-SHA", /* TLS_DH_anon_WITH_AES_128_CBC_SHA */
+ "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA256 */
+ "DH-DSS-AES128-SHA256", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA256 */
+ "DH-RSA-AES128-SHA256", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA256 */
+ "DHE-DSS-AES128-SHA256", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 */
+ "DHE-RSA-AES128-SHA256", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-ECDSA-AES128-SHA", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA */
+ "ECDHE-ECDSA-AES128-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */
+ "ECDH-RSA-AES128-SHA", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA */
+ "ECDHE-RSA-AES128-SHA", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */
+ "AECDH-AES128-SHA", /* TLS_ECDH_anon_WITH_AES_128_CBC_SHA */
+ "ECDHE-ECDSA-AES128-SHA256", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-ECDSA-AES128-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 */
+ "ECDHE-RSA-AES128-SHA256", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-RSA-AES128-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 */
+ "ADH-AES128-SHA256", /* TLS_DH_anon_WITH_AES_128_CBC_SHA256 */
+ "PSK-AES128-CBC-SHA", /* TLS_PSK_WITH_AES_128_CBC_SHA */
+ "DHE-PSK-AES128-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA */
+ "RSA-PSK-AES128-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA */
+ "PSK-AES128-CBC-SHA256", /* TLS_PSK_WITH_AES_128_CBC_SHA256 */
+ "DHE-PSK-AES128-CBC-SHA256", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 */
+ "RSA-PSK-AES128-CBC-SHA256", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 */
+ "ECDHE-PSK-AES128-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA */
+ "ECDHE-PSK-AES128-CBC-SHA256", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 */
+ "AES128-CCM", /* TLS_RSA_WITH_AES_128_CCM */
+ "AES128-CCM8", /* TLS_RSA_WITH_AES_128_CCM_8 */
+ "PSK-AES128-CCM", /* TLS_PSK_WITH_AES_128_CCM */
+ "PSK-AES128-CCM8", /* TLS_PSK_WITH_AES_128_CCM_8 */
+ "AES128-GCM-SHA256", /* TLS_RSA_WITH_AES_128_GCM_SHA256 */
+ "DH-RSA-AES128-GCM-SHA256", /* TLS_DH_RSA_WITH_AES_128_GCM_SHA256 */
+ "DH-DSS-AES128-GCM-SHA256", /* TLS_DH_DSS_WITH_AES_128_GCM_SHA256 */
+ "ADH-AES128-GCM-SHA256", /* TLS_DH_anon_WITH_AES_128_GCM_SHA256 */
+ "PSK-AES128-GCM-SHA256", /* TLS_PSK_WITH_AES_128_GCM_SHA256 */
+ "RSA-PSK-AES128-GCM-SHA256", /* TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 */
+ "ECDH-ECDSA-AES128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 */
+ "ECDH-RSA-AES128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 */
+ "SRP-AES-128-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_128_CBC_SHA */
+ "SRP-RSA-AES-128-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA */
+ "SRP-DSS-AES-128-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA */
+
+ /* blacklisted AES256 encrpytion ciphers */
+ "AES256-SHA", /* TLS_RSA_WITH_AES_256_CBC_SHA */
+ "DH-DSS-AES256-SHA", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA */
+ "DH-RSA-AES256-SHA", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA */
+ "DHE-DSS-AES256-SHA", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA */
+ "DHE-RSA-AES256-SHA", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA */
+ "ADH-AES256-SHA", /* TLS_DH_anon_WITH_AES_256_CBC_SHA */
+ "AES256-SHA256", /* TLS_RSA_WITH_AES_256_CBC_SHA256 */
+ "DH-DSS-AES256-SHA256", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA256 */
+ "DH-RSA-AES256-SHA256", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA256 */
+ "DHE-DSS-AES256-SHA256", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 */
+ "DHE-RSA-AES256-SHA256", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 */
+ "ADH-AES256-SHA256", /* TLS_DH_anon_WITH_AES_256_CBC_SHA256 */
+ "ECDH-ECDSA-AES256-SHA", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA */
+ "ECDHE-ECDSA-AES256-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */
+ "ECDH-RSA-AES256-SHA", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA */
+ "ECDHE-RSA-AES256-SHA", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */
+ "AECDH-AES256-SHA", /* TLS_ECDH_anon_WITH_AES_256_CBC_SHA */
+ "ECDHE-ECDSA-AES256-SHA384", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 */
+ "ECDH-ECDSA-AES256-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 */
+ "ECDHE-RSA-AES256-SHA384", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 */
+ "ECDH-RSA-AES256-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 */
+ "PSK-AES256-CBC-SHA", /* TLS_PSK_WITH_AES_256_CBC_SHA */
+ "DHE-PSK-AES256-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA */
+ "RSA-PSK-AES256-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA */
+ "PSK-AES256-CBC-SHA384", /* TLS_PSK_WITH_AES_256_CBC_SHA384 */
+ "DHE-PSK-AES256-CBC-SHA384", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 */
+ "RSA-PSK-AES256-CBC-SHA384", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 */
+ "ECDHE-PSK-AES256-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA */
+ "ECDHE-PSK-AES256-CBC-SHA384", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 */
+ "SRP-AES-256-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_256_CBC_SHA */
+ "SRP-RSA-AES-256-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA */
+ "SRP-DSS-AES-256-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA */
+ "AES256-CCM", /* TLS_RSA_WITH_AES_256_CCM */
+ "AES256-CCM8", /* TLS_RSA_WITH_AES_256_CCM_8 */
+ "PSK-AES256-CCM", /* TLS_PSK_WITH_AES_256_CCM */
+ "PSK-AES256-CCM8", /* TLS_PSK_WITH_AES_256_CCM_8 */
+ "AES256-GCM-SHA384", /* TLS_RSA_WITH_AES_256_GCM_SHA384 */
+ "DH-RSA-AES256-GCM-SHA384", /* TLS_DH_RSA_WITH_AES_256_GCM_SHA384 */
+ "DH-DSS-AES256-GCM-SHA384", /* TLS_DH_DSS_WITH_AES_256_GCM_SHA384 */
+ "ADH-AES256-GCM-SHA384", /* TLS_DH_anon_WITH_AES_256_GCM_SHA384 */
+ "PSK-AES256-GCM-SHA384", /* TLS_PSK_WITH_AES_256_GCM_SHA384 */
+ "RSA-PSK-AES256-GCM-SHA384", /* TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 */
+ "ECDH-ECDSA-AES256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 */
+ "ECDH-RSA-AES256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 */
+
+ /* blacklisted CAMELLIA128 encrpytion ciphers */
+ "CAMELLIA128-SHA", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "DH-DSS-CAMELLIA128-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA */
+ "DH-RSA-CAMELLIA128-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "DHE-DSS-CAMELLIA128-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA */
+ "DHE-RSA-CAMELLIA128-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "ADH-CAMELLIA128-SHA", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA */
+ "ECDHE-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDH-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDHE-RSA-CAMELLIA128-SHA256", /* TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDH-RSA-CAMELLIA128-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "PSK-CAMELLIA128-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-PSK-CAMELLIA128-SHA256", /* TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "RSA-PSK-CAMELLIA128-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDHE-PSK-CAMELLIA128-SHA256", /* TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "CAMELLIA128-GCM-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "DH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "DH-DSS-CAMELLIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ADH-CAMELLIA128-GCM-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ECDH-ECDSA-CAMELLIA128-GCM-SHA256",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ECDH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "PSK-CAMELLIA128-GCM-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 */
+ "RSA-PSK-CAMELLIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 */
+ "CAMELLIA128-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DH-DSS-CAMELLIA128-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DH-RSA-CAMELLIA128-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-DSS-CAMELLIA128-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-RSA-CAMELLIA128-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ADH-CAMELLIA128-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 */
+
+ /* blacklisted CAMELLIA256 encrpytion ciphers */
+ "CAMELLIA256-SHA", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "DH-RSA-CAMELLIA256-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "DH-DSS-CAMELLIA256-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA */
+ "DHE-DSS-CAMELLIA256-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA */
+ "DHE-RSA-CAMELLIA256-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "ADH-CAMELLIA256-SHA", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA */
+ "ECDHE-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDH-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDHE-RSA-CAMELLIA256-SHA384", /* TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDH-RSA-CAMELLIA256-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "PSK-CAMELLIA256-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "DHE-PSK-CAMELLIA256-SHA384", /* TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "RSA-PSK-CAMELLIA256-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDHE-PSK-CAMELLIA256-SHA384", /* TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "CAMELLIA256-SHA256", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DH-DSS-CAMELLIA256-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DH-RSA-CAMELLIA256-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DHE-DSS-CAMELLIA256-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DHE-RSA-CAMELLIA256-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "ADH-CAMELLIA256-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 */
+ "CAMELLIA256-GCM-SHA384", /* TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "DH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "DH-DSS-CAMELLIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ADH-CAMELLIA256-GCM-SHA384", /* TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ECDH-ECDSA-CAMELLIA256-GCM-SHA384",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ECDH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "PSK-CAMELLIA256-GCM-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 */
+ "RSA-PSK-CAMELLIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 */
+
+ /* The blacklisted ARIA encrpytion ciphers */
+ "ARIA128-SHA256", /* TLS_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ARIA256-SHA384", /* TLS_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "DH-DSS-ARIA128-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 */
+ "DH-DSS-ARIA256-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 */
+ "DH-RSA-ARIA128-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "DH-RSA-ARIA256-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-DSS-ARIA128-SHA256", /* TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-DSS-ARIA256-SHA384", /* TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-RSA-ARIA128-SHA256", /* TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-RSA-ARIA256-SHA384", /* TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ADH-ARIA128-SHA256", /* TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 */
+ "ADH-ARIA256-SHA384", /* TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 */
+ "ECDHE-ECDSA-ARIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-ECDSA-ARIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDH-ECDSA-ARIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDH-ECDSA-ARIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDHE-RSA-ARIA128-SHA256", /* TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-RSA-ARIA256-SHA384", /* TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDH-RSA-ARIA128-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDH-RSA-ARIA256-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ARIA128-GCM-SHA256", /* TLS_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "ARIA256-GCM-SHA384", /* TLS_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "DH-DSS-ARIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 */
+ "DH-DSS-ARIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 */
+ "DH-RSA-ARIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "DH-RSA-ARIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "ADH-ARIA128-GCM-SHA256", /* TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 */
+ "ADH-ARIA256-GCM-SHA384", /* TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 */
+ "ECDH-ECDSA-ARIA128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 */
+ "ECDH-ECDSA-ARIA256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 */
+ "ECDH-RSA-ARIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "ECDH-RSA-ARIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "PSK-ARIA128-SHA256", /* TLS_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "PSK-ARIA256-SHA384", /* TLS_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-PSK-ARIA128-SHA256", /* TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-PSK-ARIA256-SHA384", /* TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "RSA-PSK-ARIA128-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "RSA-PSK-ARIA256-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "ARIA128-GCM-SHA256", /* TLS_PSK_WITH_ARIA_128_GCM_SHA256 */
+ "ARIA256-GCM-SHA384", /* TLS_PSK_WITH_ARIA_256_GCM_SHA384 */
+ "RSA-PSK-ARIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 */
+ "RSA-PSK-ARIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 */
+ "ECDHE-PSK-ARIA128-SHA256", /* TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-PSK-ARIA256-SHA384", /* TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 */
+
+ /* blacklisted SEED encryptions */
+ "SEED-SHA", /*TLS_RSA_WITH_SEED_CBC_SHA */
+ "DH-DSS-SEED-SHA", /* TLS_DH_DSS_WITH_SEED_CBC_SHA */
+ "DH-RSA-SEED-SHA", /* TLS_DH_RSA_WITH_SEED_CBC_SHA */
+ "DHE-DSS-SEED-SHA", /* TLS_DHE_DSS_WITH_SEED_CBC_SHA */
+ "DHE-RSA-SEED-SHA", /* TLS_DHE_RSA_WITH_SEED_CBC_SHA */
+ "ADH-SEED-SHA", /* TLS_DH_anon_WITH_SEED_CBC_SHA */
+
+ /* blacklisted KRB5 ciphers */
+ "KRB5-DES-CBC-SHA", /* TLS_KRB5_WITH_DES_CBC_SHA */
+ "KRB5-DES-CBC3-SHA", /* TLS_KRB5_WITH_3DES_EDE_CBC_SHA */
+ "KRB5-IDEA-CBC-SHA", /* TLS_KRB5_WITH_IDEA_CBC_SHA */
+ "KRB5-DES-CBC-MD5", /* TLS_KRB5_WITH_DES_CBC_MD5 */
+ "KRB5-DES-CBC3-MD5", /* TLS_KRB5_WITH_3DES_EDE_CBC_MD5 */
+ "KRB5-IDEA-CBC-MD5", /* TLS_KRB5_WITH_IDEA_CBC_MD5 */
+ "EXP-KRB5-DES-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA */
+ "EXP-KRB5-DES-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 */
+ "EXP-KRB5-RC2-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA */
+ "EXP-KRB5-RC2-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 */
+
+ /* blacklisted exoticas */
+ "DHE-DSS-CBC-SHA", /* TLS_DHE_DSS_WITH_DES_CBC_SHA */
+ "IDEA-CBC-SHA", /* TLS_RSA_WITH_IDEA_CBC_SHA */
+
+ /* not really sure if the following names are correct */
+ "SSL3_CK_SCSV", /* TLS_EMPTY_RENEGOTIATION_INFO_SCSV */
+ "SSL3_CK_FALLBACK_SCSV"
+};
+static size_t RFC7540_names_LEN = sizeof(RFC7540_names)/sizeof(RFC7540_names[0]);
+
+
+static apr_hash_t *BLCNames;
+
+static void cipher_init(apr_pool_t *pool)
+{
+ apr_hash_t *hash = apr_hash_make(pool);
+ const char *source;
+ unsigned int i;
+
+ source = "rfc7540";
+ for (i = 0; i < RFC7540_names_LEN; ++i) {
+ apr_hash_set(hash, RFC7540_names[i], APR_HASH_KEY_STRING, source);
+ }
+
+ BLCNames = hash;
+}
+
+static int cipher_is_blacklisted(const char *cipher, const char **psource)
+{
+ *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING);
+ return !!*psource;
+}
+
+/*******************************************************************************
+ * Hooks for processing incoming connections:
+ * - process_conn take over connection in case of h2
+ */
+static int h2_h2_process_conn(conn_rec* c);
+static int h2_h2_pre_close_conn(conn_rec* c);
+static int h2_h2_post_read_req(request_rec *r);
+static int h2_h2_late_fixups(request_rec *r);
+
+/*******************************************************************************
+ * Once per lifetime init, retrieve optional functions
+ */
+apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s)
+{
+ (void)pool;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init");
+ opt_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
+ opt_ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
+
+ if (!opt_ssl_is_https || !opt_ssl_var_lookup) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ APLOGNO(02951) "mod_ssl does not seem to be enabled");
+ }
+
+ cipher_init(pool);
+
+ return APR_SUCCESS;
+}
+
+int h2_h2_is_tls(conn_rec *c)
+{
+ return opt_ssl_is_https && opt_ssl_is_https(c);
+}
+
+int h2_is_acceptable_connection(conn_rec *c, int require_all)
+{
+ int is_tls = h2_h2_is_tls(c);
+ const h2_config *cfg = h2_config_get(c);
+
+ if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) {
+ /* Check TLS connection for modern TLS parameters, as defined in
+ * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ */
+ apr_pool_t *pool = c->pool;
+ server_rec *s = c->base_server;
+ char *val;
+
+ if (!opt_ssl_var_lookup) {
+ /* unable to check */
+ return 0;
+ }
+
+ /* Need Tlsv1.2 or higher, rfc 7540, ch. 9.2
+ */
+ val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_PROTOCOL");
+ if (val && *val) {
+ if (strncmp("TLS", val, 3)
+ || !strcmp("TLSv1", val)
+ || !strcmp("TLSv1.1", val)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050)
+ "h2_h2(%ld): tls protocol not suitable: %s",
+ (long)c->id, val);
+ return 0;
+ }
+ }
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051)
+ "h2_h2(%ld): tls protocol is indetermined", (long)c->id);
+ return 0;
+ }
+
+ /* Check TLS cipher blacklist
+ */
+ val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_CIPHER");
+ if (val && *val) {
+ const char *source;
+ if (cipher_is_blacklisted(val, &source)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052)
+ "h2_h2(%ld): tls cipher %s blacklisted by %s",
+ (long)c->id, val, source);
+ return 0;
+ }
+ }
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053)
+ "h2_h2(%ld): tls cipher is indetermined", (long)c->id);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int h2_allows_h2_direct(conn_rec *c)
+{
+ const h2_config *cfg = h2_config_get(c);
+ int is_tls = h2_h2_is_tls(c);
+ const char *needed_protocol = is_tls? "h2" : "h2c";
+ int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT);
+
+ if (h2_direct < 0) {
+ h2_direct = is_tls? 0 : 1;
+ }
+ return (h2_direct
+ && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
+}
+
+int h2_allows_h2_upgrade(conn_rec *c)
+{
+ const h2_config *cfg = h2_config_get(c);
+ int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE);
+
+ return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c));
+}
+
+/*******************************************************************************
+ * Register various hooks
+ */
+static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
+static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL};
+
+void h2_h2_register_hooks(void)
+{
+ /* Our main processing needs to run quite late. Definitely after mod_ssl,
+ * as we need its connection filters, but also before reqtimeout as its
+ * method of timeouts is specific to HTTP/1.1 (as of now).
+ * The core HTTP/1 processing run as REALLY_LAST, so we will have
+ * a chance to take over before it.
+ */
+ ap_hook_process_connection(h2_h2_process_conn,
+ mod_ssl, mod_reqtimeout, APR_HOOK_LAST);
+
+ /* One last chance to properly say goodbye if we have not done so
+ * already. */
+ ap_hook_pre_close_connection(h2_h2_pre_close_conn, NULL, mod_ssl, APR_HOOK_LAST);
+
+ /* With "H2SerializeHeaders On", we install the filter in this hook
+ * that parses the response. This needs to happen before any other post
+ * read function terminates the request with an error. Otherwise we will
+ * never see the response.
+ */
+ ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(h2_h2_late_fixups, NULL, NULL, APR_HOOK_LAST);
+
+ /* special bucket type transfer through a h2_bucket_beam */
+ h2_register_bucket_beamer(h2_bucket_headers_beam);
+ h2_register_bucket_beamer(h2_bucket_observer_beam);
+}
+
+int h2_h2_process_conn(conn_rec* c)
+{
+ apr_status_t status;
+ h2_ctx *ctx;
+
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
+ if (h2_ctx_is_task(ctx)) {
+ /* our stream pseudo connection */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
+ return DECLINED;
+ }
+
+ if (!ctx && c->keepalives == 0) {
+ const char *proto = ap_get_protocol(c);
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
+ "new connection using protocol '%s', direct=%d, "
+ "tls acceptable=%d", proto, h2_allows_h2_direct(c),
+ h2_is_acceptable_connection(c, 1));
+ }
+
+ if (!strcmp(AP_PROTOCOL_HTTP1, proto)
+ && h2_allows_h2_direct(c)
+ && h2_is_acceptable_connection(c, 1)) {
+ /* Fresh connection still is on http/1.1 and H2Direct is enabled.
+ * Otherwise connection is in a fully acceptable state.
+ * -> peek at the first 24 incoming bytes
+ */
+ apr_bucket_brigade *temp;
+ char *s = NULL;
+ apr_size_t slen;
+
+ temp = apr_brigade_create(c->pool, c->bucket_alloc);
+ status = ap_get_brigade(c->input_filters, temp,
+ AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054)
+ "h2_h2, error reading 24 bytes speculative");
+ apr_brigade_destroy(temp);
+ return DECLINED;
+ }
+
+ apr_brigade_pflatten(temp, &s, &slen, c->pool);
+ if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, direct mode detected");
+ if (!ctx) {
+ ctx = h2_ctx_get(c, 1);
+ }
+ h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c");
+ }
+ else if (APLOGctrace2(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, not detected in %d bytes(base64): %s",
+ (int)slen, h2_util_base64url_encode(s, slen, c->pool));
+ }
+
+ apr_brigade_destroy(temp);
+ }
+ }
+
+ if (ctx) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
+ if (!h2_ctx_session_get(ctx)) {
+ status = h2_conn_setup(ctx, c, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
+ if (status != APR_SUCCESS) {
+ h2_ctx_clear(c);
+ return !OK;
+ }
+ }
+ h2_conn_run(ctx, c);
+ return OK;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined");
+ return DECLINED;
+}
+
+static int h2_h2_pre_close_conn(conn_rec *c)
+{
+ h2_ctx *ctx;
+
+ /* slave connection? */
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ if (ctx) {
+ /* If the session has been closed correctly already, we will not
+ * find a h2_ctx here. The presence indicates that the session
+ * is still ongoing. */
+ return h2_conn_pre_close(ctx, c);
+ }
+ return DECLINED;
+}
+
+static void check_push(request_rec *r, const char *tag)
+{
+ const h2_config *conf = h2_config_rget(r);
+ if (!r->expecting_100
+ && conf && conf->push_list && conf->push_list->nelts > 0) {
+ int i, old_status;
+ const char *old_line;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+ tag, conf->push_list->nelts);
+ for (i = 0; i < conf->push_list->nelts; ++i) {
+ h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res);
+ apr_table_add(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+ }
+ old_status = r->status;
+ old_line = r->status_line;
+ r->status = 103;
+ r->status_line = "103 Early Hints";
+ ap_send_interim_response(r, 1);
+ r->status = old_status;
+ r->status_line = old_line;
+ }
+}
+
+static int h2_h2_post_read_req(request_rec *r)
+{
+ /* slave connection? */
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ struct h2_task *task = h2_ctx_get_task(ctx);
+ /* This hook will get called twice on internal redirects. Take care
+ * that we manipulate filters only once. */
+ if (task && !task->filters_set) {
+ ap_filter_t *f;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "h2_task(%s): adding request filters", task->id);
+
+ /* setup the correct filters to process the request for h2 */
+ ap_add_input_filter("H2_REQUEST", task, r, r->connection);
+
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+ ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+
+ for (f = r->input_filters; f; f = f->next) {
+ if (!strcmp("H2_SLAVE_IN", f->frec->name)) {
+ f->r = r;
+ break;
+ }
+ }
+ ap_add_output_filter("H2_TRAILERS_OUT", task, r, r->connection);
+ task->filters_set = 1;
+ }
+ }
+ return DECLINED;
+}
+
+static int h2_h2_late_fixups(request_rec *r)
+{
+ /* slave connection? */
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ struct h2_task *task = h2_ctx_get_task(ctx);
+ if (task) {
+ /* check if we copy vs. setaside files in this location */
+ task->output.copy_files = h2_config_geti(h2_config_rget(r),
+ H2_CONF_COPY_FILES);
+ if (task->output.copy_files) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_slave_out(%s): copy_files on", task->id);
+ h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
+ }
+ check_push(r, "late_fixup");
+ }
+ }
+ return DECLINED;
+}
+
diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_h2.h
new file mode 100644
index 0000000..367823d
--- /dev/null
+++ b/modules/http2/h2_h2.h
@@ -0,0 +1,79 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_h2__
+#define __mod_h2__h2_h2__
+
+/**
+ * List of ALPN protocol identifiers that we support in cleartext
+ * negotiations. NULL terminated.
+ */
+extern const char *h2_clear_protos[];
+
+/**
+ * List of ALPN protocol identifiers that we support in TLS encrypted
+ * negotiations. NULL terminated.
+ */
+extern const char *h2_tls_protos[];
+
+/**
+ * Provide a user readable description of the HTTP/2 error code-
+ * @param h2_error http/2 error code, as in rfc 7540, ch. 7
+ * @return textual description of code or that it is unknown.
+ */
+const char *h2_h2_err_description(unsigned int h2_error);
+
+/*
+ * One time, post config initialization.
+ */
+apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s);
+
+/* Is the connection a TLS connection?
+ */
+int h2_h2_is_tls(conn_rec *c);
+
+/* Register apache hooks for h2 protocol
+ */
+void h2_h2_register_hooks(void);
+
+/**
+ * Check if the given connection fulfills the requirements as configured.
+ * @param c the connection
+ * @param require_all != 0 iff any missing connection properties make
+ * the test fail. For example, a cipher might not have been selected while
+ * the handshake is still ongoing.
+ * @return != 0 iff connection requirements are met
+ */
+int h2_is_acceptable_connection(conn_rec *c, int require_all);
+
+/**
+ * Check if the "direct" HTTP/2 mode of protocol handling is enabled
+ * for the given connection.
+ * @param c the connection to check
+ * @return != 0 iff direct mode is enabled
+ */
+int h2_allows_h2_direct(conn_rec *c);
+
+/**
+ * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
+ * for the given connection.
+ * @param c the connection to check
+ * @return != 0 iff Upgrade switching is enabled
+ */
+int h2_allows_h2_upgrade(conn_rec *c);
+
+
+#endif /* defined(__mod_h2__h2_h2__) */
diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
new file mode 100644
index 0000000..8b7add6
--- /dev/null
+++ b/modules/http2/h2_headers.c
@@ -0,0 +1,178 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <util_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+
+
+static int is_unsafe(server_rec *s)
+{
+ core_server_config *conf = ap_get_core_module_config(s->module_config);
+ return (conf->http_conformance == AP_HTTP_CONFORMANCE_UNSAFE);
+}
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_headers *headers;
+} h2_bucket_headers;
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r)
+{
+ h2_bucket_headers *br;
+
+ br = apr_bucket_alloc(sizeof(*br), b->list);
+ br->headers = r;
+
+ b = apr_bucket_shared_make(b, br, 0, 0);
+ b->type = &h2_bucket_type_headers;
+
+ return b;
+}
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_headers_make(b, r);
+ return b;
+}
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b)
+{
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return ((h2_bucket_headers *)b->data)->headers;
+ }
+ return NULL;
+}
+
+const apr_bucket_type_t h2_bucket_type_headers = {
+ "H2HEADERS", 5, APR_BUCKET_METADATA,
+ apr_bucket_destroy_noop,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
+apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+{
+ if (H2_BUCKET_IS_HEADERS(src)) {
+ h2_headers *r = ((h2_bucket_headers *)src->data)->headers;
+ apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ return b;
+ }
+ return NULL;
+}
+
+
+h2_headers *h2_headers_create(int status, apr_table_t *headers_in,
+ apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool)
+{
+ h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = status;
+ headers->headers = (headers_in? apr_table_clone(pool, headers_in)
+ : apr_table_make(pool, 5));
+ headers->notes = (notes? apr_table_clone(pool, notes)
+ : apr_table_make(pool, 5));
+ return headers;
+}
+
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ apr_table_t *header, apr_pool_t *pool)
+{
+ h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+ if (headers->status == HTTP_FORBIDDEN) {
+ const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is now allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
+ APLOGNO(03061)
+ "h2_headers(%ld): renegotiate forbidden, cause: %s",
+ (long)r->connection->id, cause);
+ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
+ }
+ }
+ if (is_unsafe(r->server)) {
+ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE,
+ H2_HDR_CONFORMANCE_UNSAFE);
+ }
+ return headers;
+}
+
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
+{
+ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
+}
+
+h2_headers *h2_headers_die(apr_status_t type,
+ const h2_request *req, apr_pool_t *pool)
+{
+ h2_headers *headers;
+ char *date;
+
+ headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = (type >= 200 && type < 600)? type : 500;
+ headers->headers = apr_table_make(pool, 5);
+ headers->notes = apr_table_make(pool, 5);
+
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, req? req->request_time : apr_time_now());
+ apr_table_setn(headers->headers, "Date", date);
+ apr_table_setn(headers->headers, "Server", ap_get_server_banner());
+
+ return headers;
+}
+
+int h2_headers_are_response(h2_headers *headers)
+{
+ return headers->status >= 200;
+}
+
diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h
new file mode 100644
index 0000000..840e8c4
--- /dev/null
+++ b/modules/http2/h2_headers.h
@@ -0,0 +1,79 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_headers__
+#define __mod_h2__h2_headers__
+
+#include "h2.h"
+
+struct h2_bucket_beam;
+
+extern const apr_bucket_type_t h2_bucket_type_headers;
+
+#define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers)
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r);
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r);
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b);
+
+apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src);
+
+/**
+ * Create the headers from the given status and headers
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param notes the notes carried by the headers
+ * @param raw_bytes the raw network bytes (if known) used to transmit these
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_create(int status, apr_table_t *header,
+ apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool);
+
+/**
+ * Create the headers from the given request_rec.
+ * @param r the request record which was processed
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ apr_table_t *header, apr_pool_t *pool);
+
+/**
+ * Clone the headers into another pool. This will not copy any
+ * header strings.
+ */
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
+
+/**
+ * Create the headers for the given error.
+ * @param stream_id id of the stream to create the headers for
+ * @param type the error code
+ * @param req the original h2_request
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_die(apr_status_t type,
+ const struct h2_request *req, apr_pool_t *pool);
+
+int h2_headers_are_response(h2_headers *headers);
+
+#endif /* defined(__mod_h2__h2_headers__) */
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
new file mode 100644
index 0000000..0fae117
--- /dev/null
+++ b/modules/http2/h2_mplx.c
@@ -0,0 +1,1282 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <apr_atomic.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include <mpm_common.h>
+
+#include "mod_http2.h"
+
+#include "h2.h"
+#include "h2_private.h"
+#include "h2_bucket_beam.h"
+#include "h2_config.h"
+#include "h2_conn.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_ngn_shed.h"
+#include "h2_request.h"
+#include "h2_stream.h"
+#include "h2_session.h"
+#include "h2_task.h"
+#include "h2_workers.h"
+#include "h2_util.h"
+
+
+/* utility for iterating over ihash stream sets */
+typedef struct {
+ h2_mplx *m;
+ h2_stream *stream;
+ apr_time_t now;
+} stream_iter_ctx;
+
+apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
+{
+ return APR_SUCCESS;
+}
+
+#define H2_MPLX_ENTER(m) \
+ do { apr_status_t rv; if ((rv = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\
+ return rv;\
+ } } while(0)
+
+#define H2_MPLX_LEAVE(m) \
+ apr_thread_mutex_unlock(m->lock)
+
+#define H2_MPLX_ENTER_ALWAYS(m) \
+ apr_thread_mutex_lock(m->lock)
+
+#define H2_MPLX_ENTER_MAYBE(m, lock) \
+ if (lock) apr_thread_mutex_lock(m->lock)
+
+#define H2_MPLX_LEAVE_MAYBE(m, lock) \
+ if (lock) apr_thread_mutex_unlock(m->lock)
+
+static void check_data_for(h2_mplx *m, h2_stream *stream, int lock);
+
+static void stream_output_consumed(void *ctx,
+ h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_stream *stream = ctx;
+ h2_task *task = stream->task;
+
+ if (length > 0 && task && task->assigned) {
+ h2_req_engine_out_consumed(task->assigned, task->c, length);
+ }
+}
+
+static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
+{
+ h2_stream *stream = ctx;
+ h2_mplx *m = stream->session->mplx;
+ apr_atomic_set32(&m->event_pending, 1);
+}
+
+static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_stream_in_consumed(ctx, length);
+}
+
+static void stream_joined(h2_mplx *m, h2_stream *stream)
+{
+ ap_assert(!stream->task || stream->task->worker_done);
+
+ h2_ihash_remove(m->shold, stream->id);
+ h2_ihash_add(m->spurge, stream);
+}
+
+static void stream_cleanup(h2_mplx *m, h2_stream *stream)
+{
+ ap_assert(stream->state == H2_SS_CLEANUP);
+
+ if (stream->input) {
+ h2_beam_on_consumed(stream->input, NULL, NULL, NULL);
+ h2_beam_abort(stream->input);
+ }
+ if (stream->output) {
+ h2_beam_on_produced(stream->output, NULL, NULL);
+ h2_beam_leave(stream->output);
+ }
+
+ h2_stream_cleanup(stream);
+
+ h2_ihash_remove(m->streams, stream->id);
+ h2_iq_remove(m->q, stream->id);
+ h2_ififo_remove(m->readyq, stream->id);
+ h2_ihash_add(m->shold, stream);
+
+ if (!stream->task || stream->task->worker_done) {
+ stream_joined(m, stream);
+ }
+ else if (stream->task) {
+ stream->task->c->aborted = 1;
+ apr_thread_cond_broadcast(m->task_thawed);
+ }
+}
+
+/**
+ * A h2_mplx needs to be thread-safe *and* if will be called by
+ * the h2_session thread *and* the h2_worker threads. Therefore:
+ * - calls are protected by a mutex lock, m->lock
+ * - the pool needs its own allocator, since apr_allocator_t are
+ * not re-entrant. The separate allocator works without a
+ * separate lock since we already protect h2_mplx itself.
+ * Since HTTP/2 connections can be expected to live longer than
+ * their HTTP/1 cousins, the separate allocator seems to work better
+ * than protecting a shared h2_session one with an own lock.
+ */
+h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
+ const h2_config *conf,
+ h2_workers *workers)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_allocator_t *allocator;
+ apr_thread_mutex_t *mutex;
+ h2_mplx *m;
+ h2_ctx *ctx = h2_ctx_get(c, 0);
+ ap_assert(conf);
+
+ m = apr_pcalloc(parent, sizeof(h2_mplx));
+ if (m) {
+ m->id = c->id;
+ m->c = c;
+ m->s = (ctx? h2_ctx_server_get(ctx) : NULL);
+ if (!m->s) {
+ m->s = c->base_server;
+ }
+
+ /* We create a pool with its own allocator to be used for
+ * processing slave connections. This is the only way to have the
+ * processing independant of its parent pool in the sense that it
+ * can work in another thread. Also, the new allocator needs its own
+ * mutex to synchronize sub-pools.
+ */
+ status = apr_allocator_create(&allocator);
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ apr_pool_create_ex(&m->pool, parent, NULL, allocator);
+ if (!m->pool) {
+ apr_allocator_destroy(allocator);
+ return NULL;
+ }
+ apr_pool_tag(m->pool, "h2_mplx");
+ apr_allocator_owner_set(allocator, m->pool);
+ status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(m->pool);
+ return NULL;
+ }
+ apr_allocator_mutex_set(allocator, mutex);
+
+ status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(m->pool);
+ return NULL;
+ }
+
+ status = apr_thread_cond_create(&m->task_thawed, m->pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(m->pool);
+ return NULL;
+ }
+
+ m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
+ m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
+
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->q = h2_iq_create(m->pool, m->max_streams);
+
+ status = h2_ififo_set_create(&m->readyq, m->pool, m->max_streams);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(m->pool);
+ return NULL;
+ }
+
+ m->workers = workers;
+ m->max_active = workers->max_workers;
+ m->limit_active = 6; /* the original h1 max parallel connections */
+ m->last_limit_change = m->last_idle_block = apr_time_now();
+ m->limit_change_interval = apr_time_from_msec(100);
+
+ m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+
+ m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
+ m->stream_max_mem);
+ h2_ngn_shed_set_ctx(m->ngn_shed , m);
+ }
+ return m;
+}
+
+int h2_mplx_shutdown(h2_mplx *m)
+{
+ int max_stream_started = 0;
+
+ H2_MPLX_ENTER(m);
+
+ max_stream_started = m->max_stream_started;
+ /* Clear schedule queue, disabling existing streams from starting */
+ h2_iq_clear(m->q);
+
+ H2_MPLX_LEAVE(m);
+ return max_stream_started;
+}
+
+static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
+{
+ if (stream->input) {
+ return h2_beam_report_consumption(stream->input);
+ }
+ return 0;
+}
+
+static int report_consumption_iter(void *ctx, void *val)
+{
+ h2_stream *stream = val;
+ h2_mplx *m = ctx;
+
+ input_consumed_signal(m, stream);
+ if (stream->state == H2_SS_CLOSED_L
+ && (!stream->task || stream->task->worker_done)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
+ H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
+ nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, NGHTTP2_NO_ERROR);
+ }
+ return 1;
+}
+
+static int output_consumed_signal(h2_mplx *m, h2_task *task)
+{
+ if (task->output.beam) {
+ return h2_beam_report_consumption(task->output.beam);
+ }
+ return 0;
+}
+
+static int stream_destroy_iter(void *ctx, void *val)
+{
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+
+ h2_ihash_remove(m->spurge, stream->id);
+ ap_assert(stream->state == H2_SS_CLEANUP);
+
+ if (stream->input) {
+ /* Process outstanding events before destruction */
+ input_consumed_signal(m, stream);
+ h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
+ h2_beam_destroy(stream->input);
+ stream->input = NULL;
+ }
+
+ if (stream->task) {
+ h2_task *task = stream->task;
+ conn_rec *slave;
+ int reuse_slave = 0;
+
+ stream->task = NULL;
+ slave = task->c;
+ if (slave) {
+ /* On non-serialized requests, the IO logging has not accounted for any
+ * meta data send over the network: response headers and h2 frame headers. we
+ * counted this on the stream and need to add this now.
+ * This is supposed to happen before the EOR bucket triggers the
+ * logging of the transaction. *fingers crossed* */
+ if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) {
+ apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets;
+ if (unaccounted > 0) {
+ h2_task_logio_add_bytes_out(slave, unaccounted);
+ }
+ }
+
+ if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
+ reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
+ && !task->rst_error);
+ }
+
+ if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
+ h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
+ APLOGNO(03385) "h2_task_destroy, reuse slave");
+ h2_task_destroy(task);
+ APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+ }
+ else {
+ h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
+ "h2_task_destroy, destroy slave");
+ h2_slave_destroy(slave);
+ }
+ }
+ }
+ h2_stream_destroy(stream);
+ return 0;
+}
+
+static void purge_streams(h2_mplx *m, int lock)
+{
+ if (!h2_ihash_empty(m->spurge)) {
+ H2_MPLX_ENTER_MAYBE(m, lock);
+ while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) {
+ /* repeat until empty */
+ }
+ H2_MPLX_LEAVE_MAYBE(m, lock);
+ }
+}
+
+typedef struct {
+ h2_mplx_stream_cb *cb;
+ void *ctx;
+} stream_iter_ctx_t;
+
+static int stream_iter_wrap(void *ctx, void *stream)
+{
+ stream_iter_ctx_t *x = ctx;
+ return x->cb(stream, x->ctx);
+}
+
+apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+{
+ stream_iter_ctx_t x;
+
+ H2_MPLX_ENTER(m);
+
+ x.cb = cb;
+ x.ctx = ctx;
+ h2_ihash_iter(m->streams, stream_iter_wrap, &x);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+}
+
+static int report_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ h2_task *task = stream->task;
+ if (APLOGctrace1(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"),
+ !!stream->task, stream->scheduled, h2_stream_is_ready(stream),
+ (long)h2_beam_get_buffered(stream->output));
+ }
+ if (task) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: %s %s %s"
+ "[started=%d/done=%d/frozen=%d]"),
+ task->request->method, task->request->authority,
+ task->request->path, task->worker_started,
+ task->worker_done, task->frozen);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: no task"));
+ }
+ return 1;
+}
+
+static int unexpected_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "unexpected, started=%d, scheduled=%d, ready=%d"),
+ !!stream->task, stream->scheduled, h2_stream_is_ready(stream));
+ return 1;
+}
+
+static int stream_cancel_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+
+ /* disabled input consumed reporting */
+ if (stream->input) {
+ h2_beam_on_consumed(stream->input, NULL, NULL, NULL);
+ }
+ /* take over event monitoring */
+ h2_stream_set_monitor(stream, NULL);
+ /* Reset, should transit to CLOSED state */
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ /* All connection data has been sent, simulate cleanup */
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+ stream_cleanup(m, stream);
+ return 0;
+}
+
+void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+{
+ apr_status_t status;
+ int i, wait_secs = 60;
+
+ /* How to shut down a h2 connection:
+ * 0. abort and tell the workers that no more tasks will come from us */
+ m->aborted = 1;
+ h2_workers_unregister(m->workers, m);
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ /* How to shut down a h2 connection:
+ * 1. cancel all streams still active */
+ while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
+ /* until empty */
+ }
+
+ /* 2. terminate ngn_shed, no more streams
+ * should be scheduled or in the active set */
+ h2_ngn_shed_abort(m->ngn_shed);
+ ap_assert(h2_ihash_empty(m->streams));
+ ap_assert(h2_iq_empty(m->q));
+
+ /* 3. while workers are busy on this connection, meaning they
+ * are processing tasks from this connection, wait on them finishing
+ * in order to wake us and let us check again.
+ * Eventually, this has to succeed. */
+ m->join_wait = wait;
+ for (i = 0; h2_ihash_count(m->shold) > 0; ++i) {
+ status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
+
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ /* This can happen if we have very long running requests
+ * that do not time out on IO. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
+ "h2_mplx(%ld): waited %d sec for %d tasks",
+ m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
+ h2_ihash_iter(m->shold, report_stream_iter, m);
+ }
+ }
+ ap_assert(m->tasks_active == 0);
+ m->join_wait = NULL;
+
+ /* 4. close the h2_req_enginge shed */
+ h2_ngn_shed_destroy(m->ngn_shed);
+ m->ngn_shed = NULL;
+
+ /* 4. With all workers done, all streams should be in spurge */
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
+ "h2_mplx(%ld): unexpected %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+ h2_ihash_iter(m->shold, unexpected_stream_iter, m);
+ }
+
+ H2_MPLX_LEAVE(m);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): released", m->id);
+}
+
+apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
+{
+ H2_MPLX_ENTER(m);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "cleanup"));
+ stream_cleanup(m, stream);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+}
+
+h2_stream *h2_mplx_stream_get(h2_mplx *m, int id)
+{
+ h2_stream *s = NULL;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ s = h2_ihash_get(m->streams, id);
+
+ H2_MPLX_LEAVE(m);
+ return s;
+}
+
+static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+{
+ h2_stream *stream = ctx;
+ h2_mplx *m = stream->session->mplx;
+
+ check_data_for(m, stream, 1);
+}
+
+static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_stream *stream = h2_ihash_get(m->streams, stream_id);
+
+ if (!stream || !stream->task || m->aborted) {
+ return APR_ECONNABORTED;
+ }
+
+ ap_assert(stream->output == NULL);
+ stream->output = beam;
+
+ if (APLOGctrace2(m->c)) {
+ h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%s): out open", stream->task->id);
+ }
+
+ h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream);
+ h2_beam_on_produced(stream->output, output_produced, stream);
+ if (stream->task->output.copy_files) {
+ h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
+ }
+
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+ check_data_for(m, stream, 0);
+ return status;
+}
+
+apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+{
+ apr_status_t status;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ status = out_open(m, stream_id, beam);
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static apr_status_t out_close(h2_mplx *m, h2_task *task)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_stream *stream;
+
+ if (!task) {
+ return APR_ECONNABORTED;
+ }
+ if (task->c) {
+ ++task->c->keepalives;
+ }
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (!stream) {
+ return APR_ECONNABORTED;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+ "h2_mplx(%s): close", task->id);
+ status = h2_beam_close(task->output.beam);
+ h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
+ output_consumed_signal(m, task);
+ check_data_for(m, stream, 0);
+ return status;
+}
+
+apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+ apr_thread_cond_t *iowait)
+{
+ apr_status_t status;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else if (h2_mplx_has_master_events(m)) {
+ status = APR_SUCCESS;
+ }
+ else {
+ purge_streams(m, 0);
+ h2_ihash_iter(m->streams, report_consumption_iter, m);
+ m->added_output = iowait;
+ status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
+ if (APLOGctrace2(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): trywait on data for %f ms)",
+ m->id, timeout/1000.0);
+ }
+ m->added_output = NULL;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static void check_data_for(h2_mplx *m, h2_stream *stream, int lock)
+{
+ if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) {
+ apr_atomic_set32(&m->event_pending, 1);
+ H2_MPLX_ENTER_MAYBE(m, lock);
+ if (m->added_output) {
+ apr_thread_cond_signal(m->added_output);
+ }
+ H2_MPLX_LEAVE_MAYBE(m, lock);
+ }
+}
+
+apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
+{
+ apr_status_t status;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_iq_sort(m->q, cmp, ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): reprioritize tasks", m->id);
+ status = APR_SUCCESS;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static void register_if_needed(h2_mplx *m)
+{
+ if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
+ apr_status_t status = h2_workers_register(m->workers, m);
+ if (status == APR_SUCCESS) {
+ m->is_registered = 1;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
+ "h2_mplx(%ld): register at workers", m->id);
+ }
+ }
+}
+
+apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
+ h2_stream_pri_cmp *cmp, void *ctx)
+{
+ apr_status_t status;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ status = APR_SUCCESS;
+ h2_ihash_add(m->streams, stream);
+ if (h2_stream_is_ready(stream)) {
+ /* already have a response */
+ check_data_for(m, stream, 0);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "process, add to readyq"));
+ }
+ else {
+ h2_iq_add(m->q, stream->id, cmp, ctx);
+ register_if_needed(m);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "process, added to q"));
+ }
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static h2_task *next_stream_task(h2_mplx *m)
+{
+ h2_stream *stream;
+ int sid;
+ while (!m->aborted && (m->tasks_active < m->limit_active)
+ && (sid = h2_iq_shift(m->q)) > 0) {
+
+ stream = h2_ihash_get(m->streams, sid);
+ if (stream) {
+ conn_rec *slave, **pslave;
+
+ pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
+ if (pslave) {
+ slave = *pslave;
+ slave->aborted = 0;
+ }
+ else {
+ slave = h2_slave_create(m->c, stream->id, m->pool);
+ }
+
+ if (!stream->task) {
+
+ if (sid > m->max_stream_started) {
+ m->max_stream_started = sid;
+ }
+ if (stream->input) {
+ h2_beam_on_consumed(stream->input, stream_input_ev,
+ stream_input_consumed, stream);
+ }
+
+ stream->task = h2_task_create(slave, stream->id,
+ stream->request, m, stream->input,
+ stream->session->s->timeout,
+ m->stream_max_mem);
+ if (!stream->task) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave,
+ H2_STRM_LOG(APLOGNO(02941), stream,
+ "create task"));
+ return NULL;
+ }
+
+ }
+
+ ++m->tasks_active;
+ return stream->task;
+ }
+ }
+ return NULL;
+}
+
+apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
+{
+ apr_status_t rv = APR_EOF;
+
+ *ptask = NULL;
+ ap_assert(m);
+ ap_assert(m->lock);
+
+ if (APR_SUCCESS != (rv = apr_thread_mutex_lock(m->lock))) {
+ return rv;
+ }
+
+ if (m->aborted) {
+ rv = APR_EOF;
+ }
+ else {
+ *ptask = next_stream_task(m);
+ rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
+ }
+ if (APR_EAGAIN != rv) {
+ m->is_registered = 0; /* h2_workers will discard this mplx */
+ }
+ H2_MPLX_LEAVE(m);
+ return rv;
+}
+
+static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
+{
+ h2_stream *stream;
+
+ if (task->frozen) {
+ /* this task was handed over to an engine for processing
+ * and the original worker has finished. That means the
+ * engine may start processing now. */
+ h2_task_thaw(task);
+ apr_thread_cond_broadcast(m->task_thawed);
+ return;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): task(%s) done", m->id, task->id);
+ out_close(m, task);
+
+ if (ngn) {
+ apr_off_t bytes = 0;
+ h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+ bytes += h2_beam_get_buffered(task->output.beam);
+ if (bytes > 0) {
+ /* we need to report consumed and current buffered output
+ * to the engine. The request will be streamed out or cancelled,
+ * no more data is coming from it and the engine should update
+ * its calculations before we destroy this information. */
+ h2_req_engine_out_consumed(ngn, task->c, bytes);
+ }
+ }
+
+ if (task->engine) {
+ if (!m->aborted && !task->c->aborted
+ && !h2_req_engine_is_shutdown(task->engine)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022)
+ "h2_mplx(%ld): task(%s) has not-shutdown "
+ "engine(%s)", m->id, task->id,
+ h2_req_engine_get_id(task->engine));
+ }
+ h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+ }
+
+ task->worker_done = 1;
+ task->done_at = apr_time_now();
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): request done, %f ms elapsed", task->id,
+ (task->done_at - task->started_at) / 1000.0);
+
+ if (task->started_at > m->last_idle_block) {
+ /* this task finished without causing an 'idle block', e.g.
+ * a block by flow control.
+ */
+ if (task->done_at- m->last_limit_change >= m->limit_change_interval
+ && m->limit_active < m->max_active) {
+ /* Well behaving stream, allow it more workers */
+ m->limit_active = H2MIN(m->limit_active * 2,
+ m->max_active);
+ m->last_limit_change = task->done_at;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): increase worker limit to %d",
+ m->id, m->limit_active);
+ }
+ }
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ /* stream not done yet. */
+ if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
+ /* reset and schedule again */
+ h2_task_redo(task);
+ h2_ihash_remove(m->sredo, stream->id);
+ h2_iq_add(m->q, stream->id, NULL, NULL);
+ }
+ else {
+ /* stream not cleaned up, stay around */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "task_done, stream open"));
+ if (stream->input) {
+ h2_beam_leave(stream->input);
+ }
+
+ /* more data will not arrive, resume the stream */
+ check_data_for(m, stream, 0);
+ }
+ }
+ else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
+ /* stream is done, was just waiting for this. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "task_done, in hold"));
+ if (stream->input) {
+ h2_beam_leave(stream->input);
+ }
+ stream_joined(m, stream);
+ }
+ else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
+ ap_assert("stream should not be in spurge" == NULL);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
+ "h2_mplx(%s): task_done, stream not found",
+ task->id);
+ ap_assert("stream should still be available" == NULL);
+ }
+}
+
+void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
+{
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ task_done(m, task, NULL);
+ --m->tasks_active;
+
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+ }
+ if (ptask) {
+ /* caller wants another task */
+ *ptask = next_stream_task(m);
+ }
+ register_if_needed(m);
+
+ H2_MPLX_LEAVE(m);
+}
+
+/*******************************************************************************
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+static int latest_repeatable_unsubmitted_iter(void *data, void *val)
+{
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+
+ if (stream->task && !stream->task->worker_done
+ && h2_task_can_redo(stream->task)
+ && !h2_ihash_get(ctx->m->sredo, stream->id)) {
+ if (!h2_stream_is_ready(stream)) {
+ /* this task occupies a worker, the response has not been submitted
+ * yet, not been cancelled and it is a repeatable request
+ * -> it can be re-scheduled later */
+ if (!ctx->stream
+ || (ctx->stream->task->started_at < stream->task->started_at)) {
+ /* we did not have one or this one was started later */
+ ctx->stream = stream;
+ }
+ }
+ }
+ return 1;
+}
+
+static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
+{
+ stream_iter_ctx ctx;
+ ctx.m = m;
+ ctx.stream = NULL;
+ h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
+ return ctx.stream;
+}
+
+static int timed_out_busy_iter(void *data, void *val)
+{
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+ if (stream->task && !stream->task->worker_done
+ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
+ /* timed out stream occupying a worker, found */
+ ctx->stream = stream;
+ return 0;
+ }
+ return 1;
+}
+
+static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
+{
+ stream_iter_ctx ctx;
+ ctx.m = m;
+ ctx.stream = NULL;
+ ctx.now = apr_time_now();
+ h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+ return ctx.stream;
+}
+
+static apr_status_t unschedule_slow_tasks(h2_mplx *m)
+{
+ h2_stream *stream;
+ int n;
+
+ /* Try to get rid of streams that occupy workers. Look for safe requests
+ * that are repeatable. If none found, fail the connection.
+ */
+ n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
+ while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
+ h2_task_rst(stream->task, H2_ERR_CANCEL);
+ h2_ihash_add(m->sredo, stream);
+ --n;
+ }
+
+ if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
+ h2_stream *stream = get_timed_out_busy_stream(m);
+ if (stream) {
+ /* Too many busy workers, unable to cancel enough streams
+ * and with a busy, timed out stream, we tell the client
+ * to go away... */
+ return APR_TIMEUP;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_mplx_idle(h2_mplx *m)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_time_t now;
+ apr_size_t scount;
+
+ H2_MPLX_ENTER(m);
+
+ scount = h2_ihash_count(m->streams);
+ if (scount > 0) {
+ if (m->tasks_active) {
+ /* If we have streams in connection state 'IDLE', meaning
+ * all streams are ready to sent data out, but lack
+ * WINDOW_UPDATEs.
+ *
+ * This is ok, unless we have streams that still occupy
+ * h2 workers. As worker threads are a scarce resource,
+ * we need to take measures that we do not get DoSed.
+ *
+ * This is what we call an 'idle block'. Limit the amount
+ * of busy workers we allow for this connection until it
+ * well behaves.
+ */
+ now = apr_time_now();
+ m->last_idle_block = now;
+ if (m->limit_active > 2
+ && now - m->last_limit_change >= m->limit_change_interval) {
+ if (m->limit_active > 16) {
+ m->limit_active = 16;
+ }
+ else if (m->limit_active > 8) {
+ m->limit_active = 8;
+ }
+ else if (m->limit_active > 4) {
+ m->limit_active = 4;
+ }
+ else if (m->limit_active > 2) {
+ m->limit_active = 2;
+ }
+ m->last_limit_change = now;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): decrease worker limit to %d",
+ m->id, m->limit_active);
+ }
+
+ if (m->tasks_active > m->limit_active) {
+ status = unschedule_slow_tasks(m);
+ }
+ }
+ else if (!h2_iq_empty(m->q)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): idle, but %d streams to process",
+ m->id, (int)h2_iq_count(m->q));
+ status = APR_EAGAIN;
+ }
+ else {
+ /* idle, have streams, but no tasks active. what are we waiting for?
+ * WINDOW_UPDATEs from client? */
+ h2_stream *stream = NULL;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): idle, no tasks ongoing, %d streams",
+ m->id, (int)h2_ihash_count(m->streams));
+ h2_ihash_shift(m->streams, (void**)&stream, 1);
+ if (stream) {
+ h2_ihash_add(m->streams, stream);
+ if (stream->output && !stream->out_checked) {
+ /* FIXME: this looks like a race between the session thinking
+ * it is idle and the EOF on a stream not being sent.
+ * Signal to caller to leave IDLE state.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "output closed=%d, mplx idle"
+ ", out has %ld bytes buffered"),
+ h2_beam_is_closed(stream->output),
+ (long)h2_beam_get_buffered(stream->output));
+ h2_ihash_add(m->streams, stream);
+ check_data_for(m, stream, 0);
+ stream->out_checked = 1;
+ status = APR_EAGAIN;
+ }
+ }
+ }
+ }
+ register_if_needed(m);
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+/*******************************************************************************
+ * HTTP/2 request engines
+ ******************************************************************************/
+
+typedef struct {
+ h2_mplx * m;
+ h2_req_engine *ngn;
+ int streams_updated;
+} ngn_update_ctx;
+
+static int ngn_update_window(void *ctx, void *val)
+{
+ ngn_update_ctx *uctx = ctx;
+ h2_stream *stream = val;
+ if (stream->task && stream->task->assigned == uctx->ngn
+ && output_consumed_signal(uctx->m, stream->task)) {
+ ++uctx->streams_updated;
+ }
+ return 1;
+}
+
+static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn)
+{
+ ngn_update_ctx ctx;
+
+ ctx.m = m;
+ ctx.ngn = ngn;
+ ctx.streams_updated = 0;
+ h2_ihash_iter(m->streams, ngn_update_window, &ctx);
+
+ return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
+}
+
+apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
+ request_rec *r,
+ http2_req_engine_init *einit)
+{
+ apr_status_t status;
+ h2_mplx *m;
+ h2_task *task;
+ h2_stream *stream;
+
+ task = h2_ctx_rget_task(r);
+ if (!task) {
+ return APR_ECONNABORTED;
+ }
+ m = task->mplx;
+
+ H2_MPLX_ENTER(m);
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit);
+ }
+ else {
+ status = APR_ECONNABORTED;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn,
+ apr_read_type_e block,
+ int capacity,
+ request_rec **pr)
+{
+ h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn);
+ h2_mplx *m = h2_ngn_shed_get_ctx(shed);
+ apr_status_t status;
+ int want_shutdown;
+
+ H2_MPLX_ENTER(m);
+
+ want_shutdown = (block == APR_BLOCK_READ);
+
+ /* Take this opportunity to update output consummation
+ * for this engine */
+ ngn_out_update_windows(m, ngn);
+
+ if (want_shutdown && !h2_iq_empty(m->q)) {
+ /* For a blocking read, check first if requests are to be
+ * had and, if not, wait a short while before doing the
+ * blocking, and if unsuccessful, terminating read.
+ */
+ status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): start block engine pull", m->id);
+ apr_thread_cond_timedwait(m->task_thawed, m->lock,
+ apr_time_from_msec(20));
+ status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
+ }
+ }
+ else {
+ status = h2_ngn_shed_pull_request(shed, ngn, capacity,
+ want_shutdown, pr);
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
+ apr_status_t status)
+{
+ h2_task *task = h2_ctx_cget_task(r_conn);
+
+ if (task) {
+ h2_mplx *m = task->mplx;
+ h2_stream *stream;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+
+ ngn_out_update_windows(m, ngn);
+ h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
+
+ if (status != APR_SUCCESS && stream
+ && h2_task_can_redo(task)
+ && !h2_ihash_get(m->sredo, stream->id)) {
+ h2_ihash_add(m->sredo, stream);
+ }
+
+ if (task->engine) {
+ /* cannot report that as done until engine returns */
+ }
+ else {
+ task_done(m, task, ngn);
+ }
+
+ H2_MPLX_LEAVE(m);
+ }
+}
+
+/*******************************************************************************
+ * mplx master events dispatching
+ ******************************************************************************/
+
+int h2_mplx_has_master_events(h2_mplx *m)
+{
+ return apr_atomic_read32(&m->event_pending) > 0;
+}
+
+apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+ stream_ev_callback *on_resume,
+ void *on_ctx)
+{
+ h2_stream *stream;
+ int n, id;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): dispatch events", m->id);
+ apr_atomic_set32(&m->event_pending, 0);
+
+ /* update input windows for streams */
+ h2_ihash_iter(m->streams, report_consumption_iter, m);
+ purge_streams(m, 1);
+
+ n = h2_ififo_count(m->readyq);
+ while (n > 0
+ && (h2_ififo_try_pull(m->readyq, &id) == APR_SUCCESS)) {
+ --n;
+ stream = h2_ihash_get(m->streams, id);
+ if (stream) {
+ on_resume(on_ctx, stream);
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream)
+{
+ check_data_for(m, stream, 1);
+ return APR_SUCCESS;
+}
+
+int h2_mplx_awaits_data(h2_mplx *m)
+{
+ int waiting = 1;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ if (h2_ihash_empty(m->streams)) {
+ waiting = 0;
+ }
+ else if (!m->tasks_active && !h2_ififo_count(m->readyq)
+ && h2_iq_empty(m->q)) {
+ waiting = 0;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return waiting;
+}
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
new file mode 100644
index 0000000..2890b98
--- /dev/null
+++ b/modules/http2/h2_mplx.h
@@ -0,0 +1,330 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_mplx__
+#define __mod_h2__h2_mplx__
+
+/**
+ * The stream multiplexer. It pushes buckets from the connection
+ * thread to the stream threads and vice versa. It's thread-safe
+ * to use.
+ *
+ * There is one h2_mplx instance for each h2_session, which sits on top
+ * of a particular httpd conn_rec. Input goes from the connection to
+ * the stream tasks. Output goes from the stream tasks to the connection,
+ * e.g. the client.
+ *
+ * For each stream, there can be at most "H2StreamMaxMemSize" output bytes
+ * queued in the multiplexer. If a task thread tries to write more
+ * data, it is blocked until space becomes available.
+ *
+ * Writing input is never blocked. In order to use flow control on the input,
+ * the mplx can be polled for input data consumption.
+ */
+
+struct apr_pool_t;
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+struct h2_bucket_beam;
+struct h2_config;
+struct h2_ihash_t;
+struct h2_task;
+struct h2_stream;
+struct h2_request;
+struct apr_thread_cond_t;
+struct h2_workers;
+struct h2_iqueue;
+struct h2_ngn_shed;
+struct h2_req_engine;
+
+#include <apr_queue.h>
+
+typedef struct h2_mplx h2_mplx;
+
+struct h2_mplx {
+ long id;
+ conn_rec *c;
+ apr_pool_t *pool;
+ server_rec *s; /* server for master conn */
+
+ unsigned int event_pending;
+ unsigned int aborted;
+ unsigned int is_registered; /* is registered at h2_workers */
+
+ struct h2_ihash_t *streams; /* all streams currently processing */
+ struct h2_ihash_t *sredo; /* all streams that need to be re-started */
+ struct h2_ihash_t *shold; /* all streams done with task ongoing */
+ struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
+
+ struct h2_iqueue *q; /* all stream ids that need to be started */
+ struct h2_ififo *readyq; /* all stream ids ready for output */
+
+ struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
+
+ int max_streams; /* max # of concurrent streams */
+ int max_stream_started; /* highest stream id that started processing */
+ int tasks_active; /* # of tasks being processed from this mplx */
+ int limit_active; /* current limit on active tasks, dynamic */
+ int max_active; /* max, hard limit # of active tasks in a process */
+ apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
+ * streams were ready */
+ apr_time_t last_limit_change; /* last time, worker limit changed */
+ apr_interval_time_t limit_change_interval;
+
+ apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *added_output;
+ struct apr_thread_cond_t *task_thawed;
+ struct apr_thread_cond_t *join_wait;
+
+ apr_size_t stream_max_mem;
+
+ apr_pool_t *spare_io_pool;
+ apr_array_header_t *spare_slaves; /* spare slave connections */
+
+ struct h2_workers *workers;
+
+ struct h2_ngn_shed *ngn_shed;
+};
+
+
+
+/*******************************************************************************
+ * Object lifecycle and information.
+ ******************************************************************************/
+
+apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
+
+/**
+ * Create the multiplexer for the given HTTP2 session.
+ * Implicitly has reference count 1.
+ */
+h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master,
+ const struct h2_config *conf,
+ struct h2_workers *workers);
+
+/**
+ * Decreases the reference counter of this mplx and waits for it
+ * to reached 0, destroy the mplx afterwards.
+ * This is to be called from the thread that created the mplx in
+ * the first place.
+ * @param m the mplx to be released and destroyed
+ * @param wait condition var to wait on for ref counter == 0
+ */
+void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
+
+apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask);
+
+void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
+
+/**
+ * Shut down the multiplexer gracefully. Will no longer schedule new streams
+ * but let the ongoing ones finish normally.
+ * @return the highest stream id being/been processed
+ */
+int h2_mplx_shutdown(h2_mplx *m);
+
+int h2_mplx_is_busy(h2_mplx *m);
+
+/*******************************************************************************
+ * IO lifetime of streams.
+ ******************************************************************************/
+
+struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id);
+
+/**
+ * Notifies mplx that a stream has been completely handled on the main
+ * connection and is ready for cleanup.
+ *
+ * @param m the mplx itself
+ * @param stream the stream ready for cleanup
+ */
+apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
+
+/**
+ * Waits on output data from any stream in this session to become available.
+ * Returns APR_TIMEUP if no data arrived in the given time.
+ */
+apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+ struct apr_thread_cond_t *iowait);
+
+apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream);
+
+/*******************************************************************************
+ * Stream processing.
+ ******************************************************************************/
+
+/**
+ * Process a stream request.
+ *
+ * @param m the multiplexer
+ * @param stream the identifier of the stream
+ * @param r the request to be processed
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
+ h2_stream_pri_cmp *cmp, void *ctx);
+
+/**
+ * Stream priorities have changed, reschedule pending requests.
+ *
+ * @param m the multiplexer
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+
+typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
+
+/**
+ * Check if the multiplexer has events for the master connection pending.
+ * @return != 0 iff there are events pending
+ */
+int h2_mplx_has_master_events(h2_mplx *m);
+
+/**
+ * Dispatch events for the master connection, such as
+ ± @param m the multiplexer
+ * @param on_resume new output data has arrived for a suspended stream
+ * @param ctx user supplied argument to invocation.
+ */
+apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+ stream_ev_callback *on_resume,
+ void *ctx);
+
+int h2_mplx_awaits_data(h2_mplx *m);
+
+typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
+
+apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+
+/*******************************************************************************
+ * Output handling of streams.
+ ******************************************************************************/
+
+/**
+ * Opens the output for the given stream with the specified response.
+ */
+apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
+ struct h2_bucket_beam *beam);
+
+/*******************************************************************************
+ * h2_mplx list Manipulation.
+ ******************************************************************************/
+
+/**
+ * The magic pointer value that indicates the head of a h2_mplx list
+ * @param b The mplx list
+ * @return The magic pointer value
+ */
+#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link)
+
+/**
+ * Determine if the mplx list is empty
+ * @param b The list to check
+ * @return true or false
+ */
+#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link)
+
+/**
+ * Return the first mplx in a list
+ * @param b The list to query
+ * @return The first mplx in the list
+ */
+#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b)
+
+/**
+ * Return the last mplx in a list
+ * @param b The list to query
+ * @return The last mplx int he list
+ */
+#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b)
+
+/**
+ * Insert a single mplx at the front of a list
+ * @param b The list to add to
+ * @param e The mplx to insert
+ */
+#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \
+h2_mplx *ap__b = (e); \
+APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \
+} while (0)
+
+/**
+ * Insert a single mplx at the end of a list
+ * @param b The list to add to
+ * @param e The mplx to insert
+ */
+#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \
+h2_mplx *ap__b = (e); \
+APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \
+} while (0)
+
+/**
+ * Get the next mplx in the list
+ * @param e The current mplx
+ * @return The next mplx
+ */
+#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link)
+/**
+ * Get the previous mplx in the list
+ * @param e The current mplx
+ * @return The previous mplx
+ */
+#define H2_MPLX_PREV(e) APR_RING_PREV((e), link)
+
+/**
+ * Remove a mplx from its list
+ * @param e The mplx to remove
+ */
+#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link)
+
+/*******************************************************************************
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+/**
+ * Master connection has entered idle mode.
+ * @param m the mplx instance of the master connection
+ * @return != SUCCESS iff connection should be terminated
+ */
+apr_status_t h2_mplx_idle(h2_mplx *m);
+
+/*******************************************************************************
+ * h2_req_engine handling
+ ******************************************************************************/
+
+typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_size_t req_buffer_size,
+ request_rec *r,
+ h2_output_consumed **pconsumed,
+ void **pbaton);
+
+apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
+ request_rec *r,
+ h2_mplx_req_engine_init *einit);
+apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn,
+ apr_read_type_e block,
+ int capacity,
+ request_rec **pr);
+void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn,
+ apr_status_t status);
+
+#endif /* defined(__mod_h2__h2_mplx__) */
diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c
new file mode 100644
index 0000000..fb85776
--- /dev/null
+++ b/modules/http2/h2_ngn_shed.c
@@ -0,0 +1,392 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "mod_http2.h"
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
+#include "h2_conn.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_task.h"
+#include "h2_util.h"
+#include "h2_ngn_shed.h"
+
+
+typedef struct h2_ngn_entry h2_ngn_entry;
+struct h2_ngn_entry {
+ APR_RING_ENTRY(h2_ngn_entry) link;
+ h2_task *task;
+ request_rec *r;
+};
+
+#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link)
+#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link)
+#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link)
+
+#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link)
+#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link)
+#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b)
+#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b)
+
+#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \
+h2_ngn_entry *ap__b = (e); \
+APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \
+} while (0)
+
+#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \
+h2_ngn_entry *ap__b = (e); \
+APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \
+} while (0)
+
+struct h2_req_engine {
+ const char *id; /* identifier */
+ const char *type; /* name of the engine type */
+ apr_pool_t *pool; /* pool for engine specific allocations */
+ conn_rec *c; /* connection this engine is assigned to */
+ h2_task *task; /* the task this engine is based on, running in */
+ h2_ngn_shed *shed;
+
+ unsigned int shutdown : 1; /* engine is being shut down */
+ unsigned int done : 1; /* engine has finished */
+
+ APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries;
+ int capacity; /* maximum concurrent requests */
+ int no_assigned; /* # of assigned requests */
+ int no_live; /* # of live */
+ int no_finished; /* # of finished */
+
+ h2_output_consumed *out_consumed;
+ void *out_consumed_ctx;
+};
+
+const char *h2_req_engine_get_id(h2_req_engine *engine)
+{
+ return engine->id;
+}
+
+int h2_req_engine_is_shutdown(h2_req_engine *engine)
+{
+ return engine->shutdown;
+}
+
+void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c,
+ apr_off_t bytes)
+{
+ if (engine->out_consumed) {
+ engine->out_consumed(engine->out_consumed_ctx, c, bytes);
+ }
+}
+
+h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
+ int default_capacity,
+ apr_size_t req_buffer_size)
+{
+ h2_ngn_shed *shed;
+
+ shed = apr_pcalloc(pool, sizeof(*shed));
+ shed->c = c;
+ shed->pool = pool;
+ shed->default_capacity = default_capacity;
+ shed->req_buffer_size = req_buffer_size;
+ shed->ngns = apr_hash_make(pool);
+
+ return shed;
+}
+
+void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx)
+{
+ shed->user_ctx = user_ctx;
+}
+
+void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed)
+{
+ return shed->user_ctx;
+}
+
+h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn)
+{
+ return ngn->shed;
+}
+
+void h2_ngn_shed_abort(h2_ngn_shed *shed)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394)
+ "h2_ngn_shed(%ld): abort", shed->c->id);
+ shed->aborted = 1;
+}
+
+static void ngn_add_task(h2_req_engine *ngn, h2_task *task, request_rec *r)
+{
+ h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
+ APR_RING_ELEM_INIT(entry, link);
+ entry->task = task;
+ entry->r = r;
+ H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
+ ngn->no_assigned++;
+}
+
+
+apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type,
+ request_rec *r,
+ http2_req_engine_init *einit)
+{
+ h2_req_engine *ngn;
+ h2_task *task = h2_ctx_rget_task(r);
+
+ ap_assert(task);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id,
+ task->id);
+ if (task->request->serialize) {
+ /* Max compatibility, deny processing of this */
+ return APR_EOF;
+ }
+
+ if (task->assigned) {
+ --task->assigned->no_assigned;
+ --task->assigned->no_live;
+ task->assigned = NULL;
+ }
+
+ if (task->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_ngn_shed(%ld): push task(%s) hosting engine %s "
+ "already with %d tasks",
+ shed->c->id, task->id, task->engine->id,
+ task->engine->no_assigned);
+ task->assigned = task->engine;
+ ngn_add_task(task->engine, task, r);
+ return APR_SUCCESS;
+ }
+
+ ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING);
+ if (ngn && !ngn->shutdown) {
+ /* this task will be processed in another thread,
+ * freeze any I/O for the time being. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_ngn_shed(%ld): pushing request %s to %s",
+ shed->c->id, task->id, ngn->id);
+ if (!h2_task_has_thawed(task)) {
+ h2_task_freeze(task);
+ }
+ ngn_add_task(ngn, task, r);
+ return APR_SUCCESS;
+ }
+
+ /* no existing engine or being shut down, start a new one */
+ if (einit) {
+ apr_status_t status;
+ apr_pool_t *pool = task->pool;
+ h2_req_engine *newngn;
+
+ newngn = apr_pcalloc(pool, sizeof(*ngn));
+ newngn->pool = pool;
+ newngn->id = apr_psprintf(pool, "ngn-%s", task->id);
+ newngn->type = apr_pstrdup(pool, ngn_type);
+ newngn->c = task->c;
+ newngn->shed = shed;
+ newngn->capacity = shed->default_capacity;
+ newngn->no_assigned = 1;
+ newngn->no_live = 1;
+ APR_RING_INIT(&newngn->entries, h2_ngn_entry, link);
+
+ status = einit(newngn, newngn->id, newngn->type, newngn->pool,
+ shed->req_buffer_size, r,
+ &newngn->out_consumed, &newngn->out_consumed_ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395)
+ "h2_ngn_shed(%ld): create engine %s (%s)",
+ shed->c->id, newngn->id, newngn->type);
+ if (status == APR_SUCCESS) {
+ newngn->task = task;
+ task->engine = newngn;
+ task->assigned = newngn;
+ apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn);
+ }
+ return status;
+ }
+ return APR_EOF;
+}
+
+static h2_ngn_entry *pop_detached(h2_req_engine *ngn)
+{
+ h2_ngn_entry *entry;
+ for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
+ entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
+ entry = H2_NGN_ENTRY_NEXT(entry)) {
+ if (h2_task_has_thawed(entry->task)
+ || (entry->task->engine == ngn)) {
+ /* The task hosting this engine can always be pulled by it.
+ * For other task, they need to become detached, e.g. no longer
+ * assigned to another worker. */
+ H2_NGN_ENTRY_REMOVE(entry);
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed,
+ h2_req_engine *ngn,
+ int capacity,
+ int want_shutdown,
+ request_rec **pr)
+{
+ h2_ngn_entry *entry;
+
+ ap_assert(ngn);
+ *pr = NULL;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396)
+ "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d",
+ shed->c->id, ngn->id, want_shutdown);
+ if (shed->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397)
+ "h2_ngn_shed(%ld): abort while pulling requests %s",
+ shed->c->id, ngn->id);
+ ngn->shutdown = 1;
+ return APR_ECONNABORTED;
+ }
+
+ ngn->capacity = capacity;
+ if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
+ if (want_shutdown) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s",
+ shed->c->id, ngn->id);
+ ngn->shutdown = 1;
+ }
+ return ngn->shutdown? APR_EOF : APR_EAGAIN;
+ }
+
+ if ((entry = pop_detached(ngn))) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398)
+ "h2_ngn_shed(%ld): pulled request %s for engine %s",
+ shed->c->id, entry->task->id, ngn->id);
+ ngn->no_live++;
+ *pr = entry->r;
+ entry->task->assigned = ngn;
+ /* task will now run in ngn's own thread. Modules like lua
+ * seem to require the correct thread set in the conn_rec.
+ * See PR 59542. */
+ if (entry->task->c && ngn->c) {
+ entry->task->c->current_thread = ngn->c->current_thread;
+ }
+ if (entry->task->engine == ngn) {
+ /* If an engine pushes its own base task, and then pulls
+ * it back to itself again, it needs to be thawed.
+ */
+ h2_task_thaw(entry->task);
+ }
+ return APR_SUCCESS;
+ }
+
+ if (1) {
+ h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399)
+ "h2_ngn_shed(%ld): pull task, nothing, first task %s",
+ shed->c->id, entry->task->id);
+ }
+ return APR_EAGAIN;
+}
+
+static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn,
+ h2_task *task, int waslive, int aborted)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400)
+ "h2_ngn_shed(%ld): task %s %s by %s",
+ shed->c->id, task->id, aborted? "aborted":"done", ngn->id);
+ ngn->no_finished++;
+ if (waslive) ngn->no_live--;
+ ngn->no_assigned--;
+ task->assigned = NULL;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
+ struct h2_req_engine *ngn, h2_task *task)
+{
+ return ngn_done_task(shed, ngn, task, 1, 0);
+}
+
+void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn)
+{
+ if (ngn->done) {
+ return;
+ }
+
+ if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
+ h2_ngn_entry *entry;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): exit engine %s (%s), "
+ "has still requests queued, shutdown=%d,"
+ "assigned=%ld, live=%ld, finished=%ld",
+ shed->c->id, ngn->id, ngn->type,
+ ngn->shutdown,
+ (long)ngn->no_assigned, (long)ngn->no_live,
+ (long)ngn->no_finished);
+ for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
+ entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
+ entry = H2_NGN_ENTRY_NEXT(entry)) {
+ h2_task *task = entry->task;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): engine %s has queued task %s, "
+ "frozen=%d, aborting",
+ shed->c->id, ngn->id, task->id, task->frozen);
+ ngn_done_task(shed, ngn, task, 0, 1);
+ task->engine = task->assigned = NULL;
+ }
+ }
+ if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): exit engine %s (%s), "
+ "assigned=%ld, live=%ld, finished=%ld",
+ shed->c->id, ngn->id, ngn->type,
+ (long)ngn->no_assigned, (long)ngn->no_live,
+ (long)ngn->no_finished);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): exit engine %s",
+ shed->c->id, ngn->id);
+ }
+
+ apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL);
+ ngn->done = 1;
+}
+
+void h2_ngn_shed_destroy(h2_ngn_shed *shed)
+{
+ ap_assert(apr_hash_count(shed->ngns) == 0);
+}
+
diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h
new file mode 100644
index 0000000..7764c18
--- /dev/null
+++ b/modules/http2/h2_ngn_shed.h
@@ -0,0 +1,79 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_req_shed_h
+#define h2_req_shed_h
+
+struct h2_req_engine;
+struct h2_task;
+
+typedef struct h2_ngn_shed h2_ngn_shed;
+struct h2_ngn_shed {
+ conn_rec *c;
+ apr_pool_t *pool;
+ apr_hash_t *ngns;
+ void *user_ctx;
+
+ unsigned int aborted : 1;
+
+ int default_capacity;
+ apr_size_t req_buffer_size; /* preferred buffer size for responses */
+};
+
+const char *h2_req_engine_get_id(h2_req_engine *engine);
+int h2_req_engine_is_shutdown(h2_req_engine *engine);
+
+void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c,
+ apr_off_t bytes);
+
+typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_size_t req_buffer_size,
+ request_rec *r,
+ h2_output_consumed **pconsumed,
+ void **pbaton);
+
+h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
+ int default_capactiy,
+ apr_size_t req_buffer_size);
+
+void h2_ngn_shed_destroy(h2_ngn_shed *shed);
+
+void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx);
+void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed);
+
+h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn);
+
+void h2_ngn_shed_abort(h2_ngn_shed *shed);
+
+apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type,
+ request_rec *r,
+ h2_shed_ngn_init *init_cb);
+
+apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, h2_req_engine *pub_ngn,
+ int capacity,
+ int want_shutdown, request_rec **pr);
+
+apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
+ struct h2_req_engine *ngn,
+ struct h2_task *task);
+
+void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn);
+
+
+#endif /* h2_req_shed_h */
diff --git a/modules/http2/h2_private.h b/modules/http2/h2_private.h
new file mode 100644
index 0000000..516be13
--- /dev/null
+++ b/modules/http2/h2_private.h
@@ -0,0 +1,28 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_h2_h2_private_h
+#define mod_h2_h2_private_h
+
+#include <apr_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+extern module AP_MODULE_DECLARE_DATA http2_module;
+
+APLOG_USE_MODULE(http2);
+
+#endif
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
new file mode 100644
index 0000000..8389c7c
--- /dev/null
+++ b/modules/http2/h2_proxy_session.c
@@ -0,0 +1,1584 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <apr_strings.h>
+#include <nghttp2/nghttp2.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <mod_proxy.h>
+
+#include "mod_http2.h"
+#include "h2.h"
+#include "h2_proxy_util.h"
+#include "h2_proxy_session.h"
+
+APLOG_USE_MODULE(proxy_http2);
+
+typedef struct h2_proxy_stream {
+ int id;
+ apr_pool_t *pool;
+ h2_proxy_session *session;
+
+ const char *url;
+ request_rec *r;
+ h2_proxy_request *req;
+ const char *real_server_uri;
+ const char *p_server_uri;
+ int standalone;
+
+ h2_proxy_stream_state_t state;
+ unsigned int suspended : 1;
+ unsigned int waiting_on_100 : 1;
+ unsigned int waiting_on_ping : 1;
+ uint32_t error_code;
+
+ apr_bucket_brigade *input;
+ apr_off_t data_sent;
+ apr_bucket_brigade *output;
+ apr_off_t data_received;
+
+ apr_table_t *saves;
+} h2_proxy_stream;
+
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg);
+static void ping_arrived(h2_proxy_session *session);
+static apr_status_t check_suspended(h2_proxy_session *session);
+static void stream_resume(h2_proxy_stream *stream);
+
+
+static apr_status_t proxy_session_pre_close(void *theconn)
+{
+ proxy_conn_rec *p_conn = (proxy_conn_rec *)theconn;
+ h2_proxy_session *session = p_conn->data;
+
+ if (session && session->ngh2) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "proxy_session(%s): pool cleanup, state=%d, streams=%d",
+ session->id, session->state,
+ (int)h2_proxy_ihash_count(session->streams));
+ session->aborted = 1;
+ dispatch_event(session, H2_PROXYS_EV_PRE_CLOSE, 0, NULL);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ p_conn->data = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static int proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush)
+{
+ apr_status_t status;
+ apr_off_t transferred;
+
+ if (flush) {
+ apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ apr_brigade_length(bb, 0, &transferred);
+ if (transferred != -1)
+ p_conn->worker->s->transferred += transferred;
+ status = ap_pass_brigade(origin->output_filters, bb);
+ /* Cleanup the brigade now to avoid buckets lifetime
+ * issues in case of error returned below. */
+ apr_brigade_cleanup(bb);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, origin, APLOGNO(03357)
+ "pass output failed to %pI (%s)",
+ p_conn->addr, p_conn->hostname);
+ }
+ return status;
+}
+
+static ssize_t raw_send(nghttp2_session *ngh2, const uint8_t *data,
+ size_t length, int flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ apr_bucket *b;
+ apr_status_t status;
+ int flush = 1;
+
+ if (data) {
+ b = apr_bucket_transient_create((const char*)data, length,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->output, b);
+ }
+
+ status = proxy_pass_brigade(session->c->bucket_alloc,
+ session->p_conn, session->c,
+ session->output, flush);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_sesssion(%s): raw_send %d bytes, flush=%d",
+ session->id, (int)length, flush);
+ if (status != APR_SUCCESS) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ return length;
+}
+
+static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ request_rec *r;
+ int n;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03341)
+ "h2_proxy_session(%s): recv FRAME[%s]",
+ session->id, buffer);
+ }
+
+ session->last_frame_received = apr_time_now();
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (!stream) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ r = stream->r;
+ if (r->status >= 100 && r->status < 200) {
+ /* By default, we will forward all interim responses when
+ * we are sitting on a HTTP/2 connection to the client */
+ int forward = session->h2_front;
+ switch(r->status) {
+ case 100:
+ if (stream->waiting_on_100) {
+ stream->waiting_on_100 = 0;
+ r->status_line = ap_get_status_line(r->status);
+ forward = 1;
+ }
+ break;
+ case 103:
+ /* workaround until we get this into http protocol base
+ * parts. without this, unknown codes are converted to
+ * 500... */
+ r->status_line = "103 Early Hints";
+ break;
+ default:
+ r->status_line = ap_get_status_line(r->status);
+ break;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03487)
+ "h2_proxy_session(%s): got interim HEADERS, "
+ "status=%d, will forward=%d",
+ session->id, r->status, forward);
+ if (forward) {
+ ap_send_interim_response(r, 1);
+ }
+ }
+ stream_resume(stream);
+ break;
+ case NGHTTP2_PING:
+ if (session->check_ping) {
+ session->check_ping = 0;
+ ping_arrived(session);
+ }
+ break;
+ case NGHTTP2_PUSH_PROMISE:
+ break;
+ case NGHTTP2_SETTINGS:
+ if (frame->settings.niv > 0) {
+ n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
+ if (n > 0) {
+ session->remote_max_concurrent = n;
+ }
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ /* we expect the remote server to tell us the highest stream id
+ * that it has started processing. */
+ session->last_stream_id = frame->goaway.last_stream_id;
+ dispatch_event(session, H2_PROXYS_EV_REMOTE_GOAWAY, 0, NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int before_frame_send(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03343)
+ "h2_proxy_session(%s): sent FRAME[%s]",
+ session->id, buffer);
+ }
+ return 0;
+}
+
+static int add_header(void *table, const char *n, const char *v)
+{
+ apr_table_add(table, n, v);
+ return 1;
+}
+
+static void process_proxy_header(h2_proxy_stream *stream, const char *n, const char *v)
+{
+ static const struct {
+ const char *name;
+ ap_proxy_header_reverse_map_fn func;
+ } transform_hdrs[] = {
+ { "Location", ap_proxy_location_reverse_map },
+ { "Content-Location", ap_proxy_location_reverse_map },
+ { "URI", ap_proxy_location_reverse_map },
+ { "Destination", ap_proxy_location_reverse_map },
+ { "Set-Cookie", ap_proxy_cookie_reverse_map },
+ { NULL, NULL }
+ };
+ request_rec *r = stream->r;
+ proxy_dir_conf *dconf;
+ int i;
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ if (!dconf->preserve_host) {
+ for (i = 0; transform_hdrs[i].name; ++i) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+ apr_table_add(r->headers_out, n,
+ (*transform_hdrs[i].func)(r, dconf, v));
+ return;
+ }
+ }
+ if (!ap_cstr_casecmp("Link", n)) {
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ apr_table_add(r->headers_out, n,
+ h2_proxy_link_reverse_map(r, dconf,
+ stream->real_server_uri, stream->p_server_uri, v));
+ return;
+ }
+ }
+ apr_table_add(r->headers_out, n, v);
+}
+
+static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
+ const char *n, apr_size_t nlen,
+ const char *v, apr_size_t vlen)
+{
+ if (n[0] == ':') {
+ if (!stream->data_received && !strncmp(":status", n, nlen)) {
+ char *s = apr_pstrndup(stream->r->pool, v, vlen);
+
+ apr_table_setn(stream->r->notes, "proxy-status", s);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got status %s",
+ stream->session->id, stream->id, s);
+ stream->r->status = (int)apr_atoi64(s);
+ if (stream->r->status <= 0) {
+ stream->r->status = 500;
+ return APR_EGENERAL;
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+ if (!h2_proxy_res_ignore_header(n, nlen)) {
+ char *hname, *hvalue;
+
+ hname = apr_pstrndup(stream->pool, n, nlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
+ hvalue = apr_pstrndup(stream->pool, v, vlen);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got header %s: %s",
+ stream->session->id, stream->id, hname, hvalue);
+ process_proxy_header(stream, hname, hvalue);
+ }
+ return APR_SUCCESS;
+}
+
+static int log_header(void *ctx, const char *key, const char *value)
+{
+ h2_proxy_stream *stream = ctx;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out %s: %s",
+ stream->session->id, stream->id, key, value);
+ return 1;
+}
+
+static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ request_rec *r = stream->r;
+ apr_pool_t *p = r->pool;
+
+ /* Now, add in the cookies from the response to the ones already saved */
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
+
+ /* and now load 'em all in */
+ if (!apr_is_empty_table(stream->saves)) {
+ apr_table_unset(r->headers_out, "Set-Cookie");
+ r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves);
+ }
+
+ /* handle Via header in response */
+ if (session->conf->viaopt != via_off
+ && session->conf->viaopt != via_block) {
+ const char *server_name = ap_get_server_name(stream->r);
+ apr_port_t port = ap_get_server_port(stream->r);
+ char portstr[32];
+
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which doesn't make sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == stream->r->hostname) {
+ server_name = stream->r->server->server_hostname;
+ }
+ if (ap_is_default_port(port, stream->r)) {
+ portstr[0] = '\0';
+ }
+ else {
+ apr_snprintf(portstr, sizeof(portstr), ":%d", port);
+ }
+
+ /* create a "Via:" response header entry and merge it */
+ apr_table_add(r->headers_out, "Via",
+ (session->conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr)
+ );
+ }
+
+ if (APLOGrtrace2(stream->r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out after merging",
+ stream->session->id, stream->id);
+ apr_table_do(log_header, stream, stream->r->headers_out, NULL);
+ }
+}
+
+static int stream_response_data(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id, const uint8_t *data,
+ size_t len, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ apr_bucket *b;
+ apr_status_t status;
+
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03358)
+ "h2_proxy_session(%s): recv data chunk for "
+ "unknown stream %d, ignored",
+ session->id, stream_id);
+ return 0;
+ }
+
+ if (!stream->data_received) {
+ /* last chance to manipulate response headers.
+ * after this, only trailers */
+ h2_proxy_stream_end_headers_out(stream);
+ }
+ stream->data_received += len;
+
+ b = apr_bucket_transient_create((const char*)data, len,
+ stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ /* always flush after a DATA frame, as we have no other indication
+ * of buffer use */
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+
+ status = ap_pass_brigade(stream->r->output_filters, stream->output);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359)
+ "h2_proxy_session(%s): stream=%d, response DATA %ld, %ld"
+ " total", session->id, stream_id, (long)len,
+ (long)stream->data_received);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
+ "h2_proxy_session(%s): passing output on stream %d",
+ session->id, stream->id);
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+ if (stream->standalone) {
+ nghttp2_session_consume(ngh2, stream_id, len);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_session(%s): stream %d, win_update %d bytes",
+ session->id, stream_id, (int)len);
+ }
+ return 0;
+}
+
+static int on_stream_close(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ if (!session->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03360)
+ "h2_proxy_session(%s): stream=%d, closed, err=%d",
+ session->id, stream_id, error_code);
+ stream = h2_proxy_ihash_get(session->streams, stream_id);
+ if (stream) {
+ stream->error_code = error_code;
+ }
+ dispatch_event(session, H2_PROXYS_EV_STREAM_DONE, stream_id, NULL);
+ }
+ return 0;
+}
+
+static int on_header(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *namearg, size_t nlen,
+ const uint8_t *valuearg, size_t vlen, uint8_t flags,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ const char *n = (const char*)namearg;
+ const char *v = (const char*)valuearg;
+
+ (void)session;
+ if (frame->hd.type == NGHTTP2_HEADERS && nlen) {
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (stream) {
+ if (h2_proxy_stream_add_header_out(stream, n, nlen, v, vlen)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+ }
+ else if (frame->hd.type == NGHTTP2_PUSH_PROMISE) {
+ }
+
+ return 0;
+}
+
+static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source, void *user_data)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status = APR_SUCCESS;
+
+ *data_flags = 0;
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361)
+ "h2_proxy_stream(%s): data_read, stream %d not found",
+ stream->session->id, stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (stream->session->check_ping) {
+ /* suspend until we hear from the other side */
+ stream->waiting_on_ping = 1;
+ status = APR_EAGAIN;
+ }
+ else if (stream->r->expecting_100) {
+ /* suspend until the answer comes */
+ stream->waiting_on_100 = 1;
+ status = APR_EAGAIN;
+ }
+ else if (APR_BRIGADE_EMPTY(stream->input)) {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ H2MAX(APR_BUCKET_BUFF_SIZE, length));
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): request body read",
+ stream->session->id, stream->id);
+ }
+
+ if (status == APR_SUCCESS) {
+ ssize_t readlen = 0;
+ while (status == APR_SUCCESS
+ && (readlen < length)
+ && !APR_BRIGADE_EMPTY(stream->input)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(stream->input);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ else {
+ /* we do nothing more regarding any meta here */
+ }
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+
+ if (status == APR_SUCCESS && blen > 0) {
+ ssize_t copylen = H2MIN(length - readlen, blen);
+ memcpy(buf, bdata, copylen);
+ buf += copylen;
+ readlen += copylen;
+ if (copylen < blen) {
+ /* We have data left in the bucket. Split it. */
+ status = apr_bucket_split(b, copylen);
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ stream->data_sent += readlen;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468)
+ "h2_proxy_stream(%d): request DATA %ld, %ld"
+ " total, flags=%d",
+ stream->id, (long)readlen, (long)stream->data_sent,
+ (int)*data_flags);
+ return readlen;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ /* suspended stream, needs to be re-awakened */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): suspending",
+ stream->session->id, stream_id);
+ stream->suspended = 1;
+ h2_proxy_iq_add(stream->session->suspended, stream->id, NULL, NULL);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ else {
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+}
+
+#ifdef H2_NG2_INVALID_HEADER_CB
+static int on_invalid_header_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03469)
+ "h2_proxy_session(%s-%d): denying stream with invalid header "
+ "'%s: %s'", session->id, (int)frame->hd.stream_id,
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
+ }
+ return nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ frame->hd.stream_id,
+ NGHTTP2_PROTOCOL_ERROR);
+}
+#endif
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ int h2_front,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done)
+{
+ if (!p_conn->data) {
+ apr_pool_t *pool = p_conn->scpool;
+ h2_proxy_session *session;
+ nghttp2_session_callbacks *cbs;
+ nghttp2_option *option;
+
+ session = apr_pcalloc(pool, sizeof(*session));
+ apr_pool_pre_cleanup_register(pool, p_conn, proxy_session_pre_close);
+ p_conn->data = session;
+
+ session->id = apr_pstrdup(p_conn->scpool, id);
+ session->c = p_conn->connection;
+ session->p_conn = p_conn;
+ session->conf = conf;
+ session->pool = p_conn->scpool;
+ session->state = H2_PROXYS_ST_INIT;
+ session->h2_front = h2_front;
+ session->window_bits_stream = window_bits_stream;
+ session->window_bits_connection = window_bits_connection;
+ session->streams = h2_proxy_ihash_create(pool, offsetof(h2_proxy_stream, id));
+ session->suspended = h2_proxy_iq_create(pool, 5);
+ session->done = done;
+
+ session->input = apr_brigade_create(session->pool, session->c->bucket_alloc);
+ session->output = apr_brigade_create(session->pool, session->c->bucket_alloc);
+
+ nghttp2_session_callbacks_new(&cbs);
+ nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv);
+ nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, stream_response_data);
+ nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close);
+ nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
+ nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send);
+ nghttp2_session_callbacks_set_send_callback(cbs, raw_send);
+#ifdef H2_NG2_INVALID_HEADER_CB
+ nghttp2_session_callbacks_set_on_invalid_header_callback(cbs, on_invalid_header_cb);
+#endif
+
+ nghttp2_option_new(&option);
+ nghttp2_option_set_peer_max_concurrent_streams(option, 100);
+ nghttp2_option_set_no_auto_window_update(option, 1);
+
+ nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
+
+ nghttp2_option_del(option);
+ nghttp2_session_callbacks_del(cbs);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
+ "setup session for %s", p_conn->hostname);
+ }
+ else {
+ h2_proxy_session *session = p_conn->data;
+ apr_interval_time_t age = apr_time_now() - session->last_frame_received;
+ if (age > apr_time_from_sec(1)) {
+ session->check_ping = 1;
+ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
+ }
+ }
+ return p_conn->data;
+}
+
+static apr_status_t session_start(h2_proxy_session *session)
+{
+ nghttp2_settings_entry settings[2];
+ int rv, add_conn_window;
+ apr_socket_t *s;
+
+ s = ap_get_conn_socket(session->c);
+#if (!defined(WIN32) && !defined(NETWARE)) || defined(DOXYGEN)
+ if (s) {
+ ap_sock_disable_nagle(s);
+ }
+#endif
+
+ settings[0].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH;
+ settings[0].value = 0;
+ settings[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[1].value = (1 << session->window_bits_stream) - 1;
+
+ rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE, settings,
+ H2_ALEN(settings));
+
+ /* If the connection window is larger than our default, trigger a WINDOW_UPDATE */
+ add_conn_window = ((1 << session->window_bits_connection) - 1 -
+ NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE);
+ if (!rv && add_conn_window != 0) {
+ rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE, 0, add_conn_window);
+ }
+ return rv? APR_EGENERAL : APR_SUCCESS;
+}
+
+static apr_status_t open_stream(h2_proxy_session *session, const char *url,
+ request_rec *r, int standalone,
+ h2_proxy_stream **pstream)
+{
+ h2_proxy_stream *stream;
+ apr_uri_t puri;
+ const char *authority, *scheme, *path;
+ apr_status_t status;
+ proxy_dir_conf *dconf;
+
+ stream = apr_pcalloc(r->pool, sizeof(*stream));
+
+ stream->pool = r->pool;
+ stream->url = url;
+ stream->r = r;
+ stream->standalone = standalone;
+ stream->session = session;
+ stream->state = H2_STREAM_ST_IDLE;
+
+ stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+
+ stream->req = h2_proxy_req_create(1, stream->pool, 0);
+
+ status = apr_uri_parse(stream->pool, url, &puri);
+ if (status != APR_SUCCESS)
+ return status;
+
+ scheme = (strcmp(puri.scheme, "h2")? "http" : "https");
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ if (dconf->preserve_host) {
+ authority = r->hostname;
+ }
+ else {
+ authority = puri.hostname;
+ if (!ap_strchr_c(authority, ':') && puri.port
+ && apr_uri_port_of_scheme(scheme) != puri.port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
+ }
+ }
+
+ /* we need this for mapping relative uris in headers ("Link") back
+ * to local uris */
+ stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority);
+ stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
+ path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
+ h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
+ authority, path, r->headers_in);
+
+ if (dconf->add_forwarded_headers) {
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+ const char *buf;
+
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+ apr_table_mergen(stream->req->headers, "X-Forwarded-For",
+ r->useragent_ip);
+
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+ if ((buf = apr_table_get(r->headers_in, "Host"))) {
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Host", buf);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+ * name of this proxy server is (if there are more than one)
+ * XXX: This duplicates Via: - do we strictly need it?
+ */
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Server",
+ r->server->server_hostname);
+ }
+ }
+
+ /* Tuck away all already existing cookies */
+ stream->saves = apr_table_make(r->pool, 2);
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
+
+ *pstream = stream;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *stream)
+{
+ h2_proxy_ngheader *hd;
+ nghttp2_data_provider *pp = NULL;
+ nghttp2_data_provider provider;
+ int rv, may_have_request_body = 1;
+ apr_status_t status;
+
+ hd = h2_proxy_util_nghd_make_req(stream->pool, stream->req);
+
+ /* If we expect a 100-continue response, we must refrain from reading
+ any input until we get it. Reading the input will possibly trigger
+ HTTP_IN filter to generate the 100-continue itself. */
+ if (stream->waiting_on_100 || stream->waiting_on_ping) {
+ /* make a small test if we get an EOF/EOS immediately */
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ may_have_request_body = APR_STATUS_IS_EAGAIN(status)
+ || (status == APR_SUCCESS
+ && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)));
+ }
+
+ if (may_have_request_body) {
+ provider.source.fd = 0;
+ provider.source.ptr = NULL;
+ provider.read_callback = stream_request_data;
+ pp = &provider;
+ }
+
+ rv = nghttp2_submit_request(session->ngh2, NULL,
+ hd->nv, hd->nvlen, pp, stream);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+ "h2_proxy_session(%s): submit %s%s -> %d",
+ session->id, stream->req->authority, stream->req->path,
+ rv);
+ if (rv > 0) {
+ stream->id = rv;
+ stream->state = H2_STREAM_ST_OPEN;
+ h2_proxy_ihash_add(session->streams, stream);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_SUBMITTED, rv, NULL);
+
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t readlen = 0;
+ ssize_t n;
+
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* nop */
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+ if (status == APR_SUCCESS && blen > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)bdata, blen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): feeding %ld bytes -> %ld",
+ session->id, (long)blen, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ status = APR_EGENERAL;
+ }
+ }
+ else {
+ readlen += n;
+ if (n < blen) {
+ apr_bucket_split(b, n);
+ }
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_session(%s): fed %ld bytes of input to session",
+ session->id, (long)readlen);
+ if (readlen == 0 && status == APR_SUCCESS) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block,
+ apr_interval_time_t timeout)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (APR_BRIGADE_EMPTY(session->input)) {
+ apr_socket_t *socket = NULL;
+ apr_time_t save_timeout = -1;
+
+ if (block) {
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &save_timeout);
+ apr_socket_timeout_set(socket, timeout);
+ }
+ else {
+ /* cannot block on timeout */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03379)
+ "h2_proxy_session(%s): unable to get conn socket",
+ session->id);
+ return APR_ENOTIMPL;
+ }
+ }
+
+ status = ap_get_brigade(session->c->input_filters, session->input,
+ AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ 64 * 1024);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ "h2_proxy_session(%s): read from conn", session->id);
+ if (socket && save_timeout != -1) {
+ apr_socket_timeout_set(socket, save_timeout);
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ status = feed_brigade(session, session->input);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* nop */
+ }
+ else if (!APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03380)
+ "h2_proxy_session(%s): read error", session->id);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+
+ return status;
+}
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *session,
+ const char *url, request_rec *r,
+ int standalone)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status;
+
+ status = open_stream(session, url, r, standalone, &stream);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03381)
+ "process stream(%d): %s %s%s, original: %s",
+ stream->id, stream->req->method,
+ stream->req->authority, stream->req->path,
+ r->the_request);
+ status = submit_stream(session, stream);
+ }
+ return status;
+}
+
+static void stream_resume(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_stream(%s-%d): resuming",
+ session->id, stream->id);
+ stream->suspended = 0;
+ h2_proxy_iq_remove(session->suspended, stream->id);
+ nghttp2_session_resume_data(session->ngh2, stream->id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+}
+
+static apr_status_t check_suspended(h2_proxy_session *session)
+{
+ h2_proxy_stream *stream;
+ int i, stream_id;
+ apr_status_t status;
+
+ for (i = 0; i < session->suspended->nelts; ++i) {
+ stream_id = session->suspended->elts[i];
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ if (stream->waiting_on_100 || stream->waiting_on_ping) {
+ status = APR_EAGAIN;
+ }
+ else {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ }
+ if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->input)) {
+ stream_resume(stream);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c,
+ APLOGNO(03382) "h2_proxy_stream(%s-%d): check input",
+ session->id, stream_id);
+ stream_resume(stream);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ else {
+ /* gone? */
+ h2_proxy_iq_remove(session->suspended, stream_id);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ return APR_EAGAIN;
+}
+
+static apr_status_t session_shutdown(h2_proxy_session *session, int reason,
+ const char *msg)
+{
+ apr_status_t status = APR_SUCCESS;
+ const char *err = msg;
+
+ ap_assert(session);
+ if (!err && reason) {
+ err = nghttp2_strerror(reason);
+ }
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
+ reason, (uint8_t*)err, err? strlen(err):0);
+ status = nghttp2_session_send(session->ngh2);
+ dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err);
+ return status;
+}
+
+
+static const char *StateNames[] = {
+ "INIT", /* H2_PROXYS_ST_INIT */
+ "DONE", /* H2_PROXYS_ST_DONE */
+ "IDLE", /* H2_PROXYS_ST_IDLE */
+ "BUSY", /* H2_PROXYS_ST_BUSY */
+ "WAIT", /* H2_PROXYS_ST_WAIT */
+ "LSHUTDOWN", /* H2_PROXYS_ST_LOCAL_SHUTDOWN */
+ "RSHUTDOWN", /* H2_PROXYS_ST_REMOTE_SHUTDOWN */
+};
+
+static const char *state_name(h2_proxys_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static int is_accepting_streams(h2_proxy_session *session)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_WAIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void transit(h2_proxy_session *session, const char *action,
+ h2_proxys_state nstate)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03345)
+ "h2_proxy_session(%s): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+}
+
+static void ev_init(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ if (h2_proxy_ihash_empty(session->streams)) {
+ transit(session, "init", H2_PROXYS_ST_IDLE);
+ }
+ else {
+ transit(session, "init", H2_PROXYS_ST_BUSY);
+ }
+ break;
+
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_local_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* already did that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* all done */
+ transit(session, "local goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "local goaway", H2_PROXYS_ST_LOCAL_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_remote_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* already received that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* all done */
+ transit(session, "remote goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "remote goaway", H2_PROXYS_ST_REMOTE_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_conn_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "conn error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, arg, session->c,
+ "h2_proxy_session(%s): conn error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_proto_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "proto error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): proto error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_conn_timeout(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_no_io(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* nothing for input and output to do. If we remain
+ * in this state, we go into a tight loop and suck up
+ * CPU cycles. Ideally, we'd like to do a blocking read, but that
+ * is not possible if we have scheduled tasks and wait
+ * for them to produce something. */
+ if (h2_proxy_ihash_empty(session->streams)) {
+ if (!is_accepting_streams(session)) {
+ /* We are no longer accepting new streams and have
+ * finished processing existing ones. Time to leave. */
+ session_shutdown(session, arg, msg);
+ transit(session, "no io", H2_PROXYS_ST_DONE);
+ }
+ else {
+ /* When we have no streams, no task events are possible,
+ * switch to blocking reads */
+ transit(session, "no io", H2_PROXYS_ST_IDLE);
+ }
+ }
+ else {
+ /* Unable to do blocking reads, as we wait on events from
+ * task processing in other threads. Do a busy wait with
+ * backoff timer. */
+ transit(session, "no io", H2_PROXYS_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_submitted(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream submitted", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_done(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ h2_proxy_stream *stream;
+
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ int touched = (stream->data_sent ||
+ stream_id <= session->last_stream_id);
+ apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+ "h2_proxy_sesssion(%s): stream(%d) closed "
+ "(touched=%d, error=%d)",
+ session->id, stream_id, touched, stream->error_code);
+
+ if (status != APR_SUCCESS) {
+ stream->r->status = 500;
+ }
+ else if (!stream->data_received) {
+ apr_bucket *b;
+ /* if the response had no body, this is the time to flush
+ * an empty brigade which will also write the resonse
+ * headers */
+ h2_proxy_stream_end_headers_out(stream);
+ stream->data_received = 1;
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
+
+ stream->state = H2_STREAM_ST_CLOSED;
+ h2_proxy_ihash_remove(session->streams, stream_id);
+ h2_proxy_iq_remove(session->suspended, stream_id);
+ if (session->done) {
+ session->done(session, stream->r, status, touched);
+ }
+ }
+
+ switch (session->state) {
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_resumed(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream resumed", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_data_read(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "data read", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_ngh2_done(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_pre_close(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* nop */
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_PROXYS_EV_INIT:
+ ev_init(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_LOCAL_GOAWAY:
+ ev_local_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_REMOTE_GOAWAY:
+ ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_ERROR:
+ ev_conn_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PROTO_ERROR:
+ ev_proto_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_TIMEOUT:
+ ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NO_IO:
+ ev_no_io(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_SUBMITTED:
+ ev_stream_submitted(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_DONE:
+ ev_stream_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_RESUMED:
+ ev_stream_resumed(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_DATA_READ:
+ ev_data_read(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NGH2_DONE:
+ ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PRE_CLOSE:
+ ev_pre_close(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): unknown event %d",
+ session->id, ev);
+ break;
+ }
+}
+
+static int send_loop(h2_proxy_session *session)
+{
+ while (nghttp2_session_want_write(session->ngh2)) {
+ int rv = nghttp2_session_send(session->ngh2);
+ if (rv < 0 && nghttp2_is_fatal(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): write, rv=%d", session->id, rv);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+apr_status_t h2_proxy_session_process(h2_proxy_session *session)
+{
+ apr_status_t status;
+ int have_written = 0, have_read = 0;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): process", session->id);
+
+run_loop:
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ status = session_start(session);
+ if (status == APR_SUCCESS) {
+ dispatch_event(session, H2_PROXYS_EV_INIT, 0, NULL);
+ goto run_loop;
+ }
+ else {
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+ break;
+
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ have_written = send_loop(session);
+
+ if (nghttp2_session_want_read(session->ngh2)) {
+ status = h2_proxy_session_read(session, 0, 0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ }
+ }
+
+ if (!have_written && !have_read
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NO_IO, 0, NULL);
+ goto run_loop;
+ }
+ break;
+
+ case H2_PROXYS_ST_WAIT:
+ if (check_suspended(session) == APR_EAGAIN) {
+ /* no stream has become resumed. Do a blocking read with
+ * ever increasing timeouts... */
+ if (session->wait_timeout < 25) {
+ session->wait_timeout = 25;
+ }
+ else {
+ session->wait_timeout = H2MIN(apr_time_from_msec(100),
+ 2*session->wait_timeout);
+ }
+
+ status = h2_proxy_session_read(session, 1, session->wait_timeout);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ APLOGNO(03365)
+ "h2_proxy_session(%s): WAIT read, timeout=%fms",
+ session->id, (float)session->wait_timeout/1000.0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)
+ || APR_STATUS_IS_EAGAIN(status)) {
+ /* go back to checking all inputs again */
+ transit(session, "wait cycle", H2_PROXYS_ST_BUSY);
+ }
+ }
+ break;
+
+ case H2_PROXYS_ST_IDLE:
+ break;
+
+ case H2_PROXYS_ST_DONE: /* done, session terminated */
+ return APR_EOF;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, session->c,
+ APLOGNO(03346)"h2_proxy_session(%s): unknown state %d",
+ session->id, session->state);
+ dispatch_event(session, H2_PROXYS_EV_PROTO_ERROR, 0, NULL);
+ break;
+ }
+
+
+ if (have_read || have_written) {
+ session->wait_timeout = 0;
+ }
+
+ if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NGH2_DONE, 0, NULL);
+ }
+
+ return APR_SUCCESS; /* needs to be called again */
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ h2_proxy_request_done *done;
+} cleanup_iter_ctx;
+
+static int cancel_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ nghttp2_submit_rst_stream(ctx->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, 0);
+ return 1;
+}
+
+void h2_proxy_session_cancel_all(h2_proxy_session *session)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = session->done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366)
+ "h2_proxy_session(%s): cancel %d streams",
+ session->id, (int)h2_proxy_ihash_count(session->streams));
+ h2_proxy_ihash_iter(session->streams, cancel_iter, &ctx);
+ session_shutdown(session, 0, NULL);
+ }
+}
+
+static int done_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ int touched = (stream->data_sent ||
+ stream->id <= ctx->session->last_stream_id);
+ ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched);
+ return 1;
+}
+
+void h2_proxy_session_cleanup(h2_proxy_session *session,
+ h2_proxy_request_done *done)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03519)
+ "h2_proxy_session(%s): terminated, %d streams unfinished",
+ session->id, (int)h2_proxy_ihash_count(session->streams));
+ h2_proxy_ihash_iter(session->streams, done_iter, &ctx);
+ h2_proxy_ihash_clear(session->streams);
+ }
+}
+
+static int ping_arrived_iter(void *udata, void *val)
+{
+ h2_proxy_stream *stream = val;
+ if (stream->waiting_on_ping) {
+ stream->waiting_on_ping = 0;
+ stream_resume(stream);
+ }
+ return 1;
+}
+
+static void ping_arrived(h2_proxy_session *session)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03470)
+ "h2_proxy_session(%s): ping arrived, unblocking streams",
+ session->id);
+ h2_proxy_ihash_iter(session->streams, ping_arrived_iter, &session);
+ }
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ conn_rec *c;
+ apr_off_t bytes;
+ int updated;
+} win_update_ctx;
+
+static int win_update_iter(void *udata, void *val)
+{
+ win_update_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+
+ if (stream->r && stream->r->connection == ctx->c) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c,
+ "h2_proxy_session(%s-%d): win_update %ld bytes",
+ ctx->session->id, (int)stream->id, (long)ctx->bytes);
+ nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes);
+ ctx->updated = 1;
+ return 0;
+ }
+ return 1;
+}
+
+
+void h2_proxy_session_update_window(h2_proxy_session *session,
+ conn_rec *c, apr_off_t bytes)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ win_update_ctx ctx;
+ ctx.session = session;
+ ctx.c = c;
+ ctx.bytes = bytes;
+ ctx.updated = 0;
+ h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx);
+
+ if (!ctx.updated) {
+ /* could not find the stream any more, possibly closed, update
+ * the connection window at least */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): win_update conn %ld bytes",
+ session->id, (long)bytes);
+ nghttp2_session_consume_connection(session->ngh2, (size_t)bytes);
+ }
+ }
+}
+
diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h
new file mode 100644
index 0000000..ecebb61
--- /dev/null
+++ b/modules/http2/h2_proxy_session.h
@@ -0,0 +1,128 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_proxy_session_h
+#define h2_proxy_session_h
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#include <nghttp2/nghttp2.h>
+
+struct h2_proxy_iqueue;
+struct h2_proxy_ihash_t;
+
+typedef enum {
+ H2_STREAM_ST_IDLE,
+ H2_STREAM_ST_OPEN,
+ H2_STREAM_ST_RESV_LOCAL,
+ H2_STREAM_ST_RESV_REMOTE,
+ H2_STREAM_ST_CLOSED_INPUT,
+ H2_STREAM_ST_CLOSED_OUTPUT,
+ H2_STREAM_ST_CLOSED,
+} h2_proxy_stream_state_t;
+
+typedef enum {
+ H2_PROXYS_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_PROXYS_ST_DONE, /* finished, connection close */
+ H2_PROXYS_ST_IDLE, /* no streams to process */
+ H2_PROXYS_ST_BUSY, /* read/write without stop */
+ H2_PROXYS_ST_WAIT, /* waiting for tasks reporting back */
+ H2_PROXYS_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
+ H2_PROXYS_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+} h2_proxys_state;
+
+typedef enum {
+ H2_PROXYS_EV_INIT, /* session was initialized */
+ H2_PROXYS_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_PROXYS_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_PROXYS_EV_CONN_ERROR, /* connection error */
+ H2_PROXYS_EV_PROTO_ERROR, /* protocol error */
+ H2_PROXYS_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_PROXYS_EV_NO_IO, /* nothing has been read or written */
+ H2_PROXYS_EV_STREAM_SUBMITTED, /* stream has been submitted */
+ H2_PROXYS_EV_STREAM_DONE, /* stream has been finished */
+ H2_PROXYS_EV_STREAM_RESUMED, /* stream signalled availability of headers/data */
+ H2_PROXYS_EV_DATA_READ, /* connection data has been read */
+ H2_PROXYS_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */
+} h2_proxys_event_t;
+
+
+typedef struct h2_proxy_session h2_proxy_session;
+typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
+ apr_status_t status, int touched);
+
+struct h2_proxy_session {
+ const char *id;
+ conn_rec *c;
+ proxy_conn_rec *p_conn;
+ proxy_server_conf *conf;
+ apr_pool_t *pool;
+ nghttp2_session *ngh2; /* the nghttp2 session itself */
+
+ unsigned int aborted : 1;
+ unsigned int check_ping : 1;
+ unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */
+
+ h2_proxy_request_done *done;
+ void *user_data;
+
+ unsigned char window_bits_stream;
+ unsigned char window_bits_connection;
+
+ h2_proxys_state state;
+ apr_interval_time_t wait_timeout;
+
+ struct h2_proxy_ihash_t *streams;
+ struct h2_proxy_iqueue *suspended;
+ apr_size_t remote_max_concurrent;
+ int last_stream_id; /* last stream id processed by backend, or 0 */
+ apr_time_t last_frame_received;
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
+};
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ int h2_front,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done);
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *s, const char *url,
+ request_rec *r, int standalone);
+
+/**
+ * Perform a step in processing the proxy session. Will return aftert
+ * one read/write cycle and indicate session status by status code.
+ * @param s the session to process
+ * @return APR_EAGAIN when processing needs to be invoked again
+ * APR_SUCCESS when all streams have been processed, session still live
+ * APR_EOF when the session has been terminated
+ */
+apr_status_t h2_proxy_session_process(h2_proxy_session *s);
+
+void h2_proxy_session_cancel_all(h2_proxy_session *s);
+
+void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
+
+void h2_proxy_session_update_window(h2_proxy_session *s,
+ conn_rec *c, apr_off_t bytes);
+
+#define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
+
+#endif /* h2_proxy_session_h */
diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c
new file mode 100644
index 0000000..bd45294
--- /dev/null
+++ b/modules/http2/h2_proxy_util.c
@@ -0,0 +1,1335 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_request.h>
+#include <mod_proxy.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_proxy_util.h"
+
+APLOG_USE_MODULE(proxy_http2);
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_proxy_log2(int n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_proxy_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_proxy_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_proxy_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_proxy_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_proxy_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_proxy_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_proxy_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+typedef struct {
+ h2_proxy_ihash_t *ih;
+ int *buffer;
+ size_t max;
+ size_t len;
+} icollect_ctx;
+
+static int icollect_iter(void *x, void *val)
+{
+ icollect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max)
+{
+ icollect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_proxy_ihash_iter(ih, icollect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_proxy_ihash_remove(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_proxy_iqueue *q, int nlen);
+static void iq_swap(h2_proxy_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top,
+ h2_proxy_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom,
+ h2_proxy_iq_cmp *cmp, void *ctx);
+
+h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_proxy_iqueue *q = apr_pcalloc(pool, sizeof(h2_proxy_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_proxy_iq_empty(h2_proxy_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_proxy_iq_count(h2_proxy_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+}
+
+int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_proxy_iq_clear(h2_proxy_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisons to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_proxy_iq_shift(h2_proxy_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+static void iq_grow(h2_proxy_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_proxy_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top,
+ h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom,
+ h2_proxy_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+/*******************************************************************************
+ * h2_proxy_ngheader
+ ******************************************************************************/
+#define H2_HD_MATCH_LIT_CS(l, name) \
+ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static int h2_util_ignore_header(const char *name)
+{
+ /* never forward, ch. 8.1.2.2 */
+ return (H2_HD_MATCH_LIT_CS("connection", name)
+ || H2_HD_MATCH_LIT_CS("proxy-connection", name)
+ || H2_HD_MATCH_LIT_CS("upgrade", name)
+ || H2_HD_MATCH_LIT_CS("keep-alive", name)
+ || H2_HD_MATCH_LIT_CS("transfer-encoding", name));
+}
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
+#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
+
+static int add_header(h2_proxy_ngheader *ngh,
+ const char *key, size_t key_len,
+ const char *value, size_t val_len)
+{
+ nghttp2_nv *nv = &ngh->nv[ngh->nvlen++];
+
+ nv->name = (uint8_t*)key;
+ nv->namelen = key_len;
+ nv->value = (uint8_t*)value;
+ nv->valuelen = val_len;
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ add_header(ctx, key, strlen(key), value, strlen(value));
+ }
+ return 1;
+}
+
+h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const h2_proxy_request *req)
+{
+
+ h2_proxy_ngheader *ngh;
+ size_t n;
+
+ ap_assert(req);
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
+
+ n = 4;
+ apr_table_do(count_header, &n, req->headers, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
+ NV_ADD_LIT_CS(ngh, ":authority", req->authority);
+ NV_ADD_LIT_CS(ngh, ":path", req->path);
+ NV_ADD_LIT_CS(ngh, ":method", req->method);
+ apr_table_do(add_table_header, ngh, req->headers, NULL);
+
+ return ngh;
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredProxyRespHds[] = {
+ H2_DEF_LITERAL("alt-svc"),
+};
+
+static int ignore_header(const literal *lits, size_t llen,
+ const char *name, size_t nlen)
+{
+ const literal *lit;
+ int i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int h2_proxy_req_ignore_header(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
+}
+
+int h2_proxy_res_ignore_header(const char *name, size_t len)
+{
+ return (h2_proxy_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
+}
+
+void h2_proxy_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_proxy_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize)
+{
+ h2_proxy_request *req = apr_pcalloc(pool, sizeof(h2_proxy_request));
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+ req->serialize = serialize;
+
+ return req;
+}
+
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize)
+{
+ return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ size_t klen = strlen(key);
+ if (!h2_proxy_req_ignore_header(key, klen)) {
+ h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers)
+{
+ h1_ctx x;
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
+
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
+
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+typedef struct {
+ apr_pool_t *pool;
+ request_rec *r;
+ proxy_dir_conf *conf;
+ const char *s;
+ int slen;
+ int i;
+ const char *server_uri;
+ int su_len;
+ const char *real_backend_uri;
+ int rbu_len;
+ const char *p_server_uri;
+ int psu_len;
+ int link_start;
+ int link_end;
+} link_ctx;
+
+static int attr_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case '-':
+ case '.':
+ case '^':
+ case '_':
+ case '`':
+ case '|':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int ptoken_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ case ':':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '@':
+ case '[':
+ case ']':
+ case '^':
+ case '_':
+ case '`':
+ case '{':
+ case '|':
+ case '}':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int skip_ws(link_ctx *ctx)
+{
+ char c;
+ while (ctx->i < ctx->slen
+ && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) {
+ ++ctx->i;
+ }
+ return (ctx->i < ctx->slen);
+}
+
+static int find_chr(link_ctx *ctx, char c, int *pidx)
+{
+ int j;
+ for (j = ctx->i; j < ctx->slen; ++j) {
+ if (ctx->s[j] == c) {
+ *pidx = j;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_chr(link_ctx *ctx, char c)
+{
+ if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) {
+ ++ctx->i;
+ return 1;
+ }
+ return 0;
+}
+
+static int skip_qstring(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '\"')) {
+ int end;
+ if (find_chr(ctx, '\"', &end)) {
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_ptoken(link_ctx *ctx)
+{
+ if (skip_ws(ctx)) {
+ int i;
+ for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int read_link(link_ctx *ctx)
+{
+ ctx->link_start = ctx->link_end = 0;
+ if (skip_ws(ctx) && read_chr(ctx, '<')) {
+ int end;
+ if (find_chr(ctx, '>', &end)) {
+ ctx->link_start = ctx->i;
+ ctx->link_end = end;
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_pname(link_ctx *ctx)
+{
+ if (skip_ws(ctx)) {
+ int i;
+ for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_pvalue(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '=')) {
+ if (skip_qstring(ctx) || skip_ptoken(ctx)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_param(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ';')) {
+ if (skip_pname(ctx)) {
+ skip_pvalue(ctx); /* value is optional */
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_sep(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ',')) {
+ return 1;
+ }
+ return 0;
+}
+
+static size_t subst_str(link_ctx *ctx, int start, int end, const char *ns)
+{
+ int olen, nlen, plen;
+ int delta;
+ char *p;
+
+ olen = end - start;
+ nlen = (int)strlen(ns);
+ delta = nlen - olen;
+ plen = ctx->slen + delta + 1;
+ p = apr_pcalloc(ctx->pool, plen);
+ memcpy(p, ctx->s, start);
+ memcpy(p + start, ns, nlen);
+ strcpy(p + start + nlen, ctx->s + end);
+ ctx->s = p;
+ ctx->slen = (int)strlen(p);
+ if (ctx->i >= end) {
+ ctx->i += delta;
+ }
+ return nlen;
+}
+
+static void map_link(link_ctx *ctx)
+{
+ if (ctx->link_start < ctx->link_end) {
+ char buffer[HUGE_STRING_LEN];
+ int need_len, link_len, buffer_len, prepend_p_server;
+ const char *mapped;
+
+ buffer[0] = '\0';
+ buffer_len = 0;
+ link_len = ctx->link_end - ctx->link_start;
+ need_len = link_len + 1;
+ prepend_p_server = (ctx->s[ctx->link_start] == '/');
+ if (prepend_p_server) {
+ /* common to use relative uris in link header, for mappings
+ * to work need to prefix the backend server uri */
+ need_len += ctx->psu_len;
+ apr_cpystrn(buffer, ctx->p_server_uri, sizeof(buffer));
+ buffer_len = ctx->psu_len;
+ }
+ if (need_len > sizeof(buffer)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ctx->r, APLOGNO(03482)
+ "link_reverse_map uri too long, skipped: %s", ctx->s);
+ return;
+ }
+ apr_cpystrn(buffer + buffer_len, ctx->s + ctx->link_start, link_len + 1);
+ if (!prepend_p_server
+ && strcmp(ctx->real_backend_uri, ctx->p_server_uri)
+ && !strncmp(buffer, ctx->real_backend_uri, ctx->rbu_len)) {
+ /* the server uri and our local proxy uri we use differ, for mapping
+ * to work, we need to use the proxy uri */
+ int path_start = ctx->link_start + ctx->rbu_len;
+ link_len -= ctx->rbu_len;
+ memcpy(buffer, ctx->p_server_uri, ctx->psu_len);
+ memcpy(buffer + ctx->psu_len, ctx->s + path_start, link_len);
+ buffer_len = ctx->psu_len + link_len;
+ buffer[buffer_len] = '\0';
+ }
+ mapped = ap_proxy_location_reverse_map(ctx->r, ctx->conf, buffer);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->r,
+ "reverse_map[%s] %s --> %s", ctx->p_server_uri, buffer, mapped);
+ if (mapped != buffer) {
+ if (prepend_p_server) {
+ if (ctx->server_uri == NULL) {
+ ctx->server_uri = ap_construct_url(ctx->pool, "", ctx->r);
+ ctx->su_len = (int)strlen(ctx->server_uri);
+ }
+ if (!strncmp(mapped, ctx->server_uri, ctx->su_len)) {
+ mapped += ctx->su_len;
+ }
+ }
+ subst_str(ctx, ctx->link_start, ctx->link_end, mapped);
+ }
+ }
+}
+
+/* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1>
+ Link = "Link" ":" #link-value
+ link-value = "<" URI-Reference ">" *( ";" link-param )
+ link-param = ( ( "rel" "=" relation-types )
+ | ( "anchor" "=" <"> URI-Reference <"> )
+ | ( "rev" "=" relation-types )
+ | ( "hreflang" "=" Language-Tag )
+ | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) )
+ | ( "title" "=" quoted-string )
+ | ( "title*" "=" ext-value )
+ | ( "type" "=" ( media-type | quoted-mt ) )
+ | ( link-extension ) )
+ link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] )
+ | ( ext-name-star "=" ext-value )
+ ext-name-star = parmname "*" ; reserved for RFC2231-profiled
+ ; extensions. Whitespace NOT
+ ; allowed in between.
+ ptoken = 1*ptokenchar
+ ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "("
+ | ")" | "*" | "+" | "-" | "." | "/" | DIGIT
+ | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA
+ | "[" | "]" | "^" | "_" | "`" | "{" | "|"
+ | "}" | "~"
+ media-type = type-name "/" subtype-name
+ quoted-mt = <"> media-type <">
+ relation-types = relation-type
+ | <"> relation-type *( 1*SP relation-type ) <">
+ relation-type = reg-rel-type | ext-rel-type
+ reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" )
+ ext-rel-type = URI
+
+ and from <https://tools.ietf.org/html/rfc5987>
+ parmname = 1*attr-char
+ attr-char = ALPHA / DIGIT
+ / "!" / "#" / "$" / "&" / "+" / "-" / "."
+ / "^" / "_" / "`" / "|" / "~"
+ */
+
+const char *h2_proxy_link_reverse_map(request_rec *r,
+ proxy_dir_conf *conf,
+ const char *real_backend_uri,
+ const char *proxy_server_uri,
+ const char *s)
+{
+ link_ctx ctx;
+
+ if (r->proxyreq != PROXYREQ_REVERSE) {
+ return s;
+ }
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.r = r;
+ ctx.pool = r->pool;
+ ctx.conf = conf;
+ ctx.real_backend_uri = real_backend_uri;
+ ctx.rbu_len = (int)strlen(ctx.real_backend_uri);
+ ctx.p_server_uri = proxy_server_uri;
+ ctx.psu_len = (int)strlen(ctx.p_server_uri);
+ ctx.s = s;
+ ctx.slen = (int)strlen(s);
+ while (read_link(&ctx)) {
+ while (skip_param(&ctx)) {
+ /* nop */
+ }
+ map_link(&ctx);
+ if (!read_sep(&ctx)) {
+ break;
+ }
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "link_reverse_map %s --> %s", s, ctx.s);
+ return ctx.s;
+}
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+struct h2_proxy_fifo {
+ void **elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int nth_index(h2_proxy_fifo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t fifo_destroy(void *data)
+{
+ h2_proxy_fifo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int index_of(h2_proxy_fifo *fifo, void *elem)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (elem == fifo->elems[nth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t create_int(h2_proxy_fifo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_proxy_fifo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_proxy_fifo_count(h2_proxy_fifo *fifo)
+{
+ return fifo->count;
+}
+
+int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo)
+{
+ return fifo->nelems;
+}
+
+static apr_status_t check_not_empty(h2_proxy_fifo *fifo, int block)
+{
+ if (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ while (fifo->count == 0) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push(h2_proxy_fifo *fifo, void *elem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ if (fifo->set && index_of(fifo, elem) >= 0) {
+ /* set mode, elem already member */
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[nth_index(fifo, fifo->count)] = elem;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 1);
+}
+
+apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 0);
+}
+
+static void *pull_head(h2_proxy_fifo *fifo)
+{
+ void *elem;
+
+ ap_assert(fifo->count > 0);
+ elem = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = nth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return elem;
+}
+
+static apr_status_t fifo_pull(h2_proxy_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) {
+ apr_thread_mutex_unlock(fifo->lock);
+ *pelem = NULL;
+ return rv;
+ }
+
+ ap_assert(fifo->count > 0);
+ *pelem = pull_head(fifo);
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 1);
+}
+
+apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 0);
+}
+
+apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ void *e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[nth_index(fifo, i)];
+ if (e == elem) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[nth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h
new file mode 100644
index 0000000..a88fb7e
--- /dev/null
+++ b/modules/http2/h2_proxy_util.h
@@ -0,0 +1,256 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_proxy_util__
+#define __mod_h2__h2_proxy_util__
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_proxy_request;
+struct nghttp2_frame;
+
+int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_proxy_ihash_t h2_proxy_ihash_t;
+typedef int h2_proxy_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih);
+int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih);
+void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx);
+
+void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val);
+void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id);
+void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val);
+void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih);
+
+size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max);
+size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_proxy_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_proxy_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_proxy_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_proxy_iq_empty(h2_proxy_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_proxy_iq_count(h2_proxy_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the task to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ */
+void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_proxy_iq_clear(h2_proxy_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first stream id from the queue or NULL if the queue is empty.
+ * The task will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @return the first stream id of the queue, 0 if empty
+ */
+int h2_proxy_iq_shift(h2_proxy_iqueue *q);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_proxy_log2(n) iff n is a power of 2 */
+unsigned char h2_proxy_log2(int n);
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+void h2_proxy_util_camel_case_header(char *s, size_t len);
+int h2_proxy_res_ignore_header(const char *name, size_t len);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+typedef struct h2_proxy_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_proxy_ngheader;
+h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const struct h2_proxy_request *req);
+
+/*******************************************************************************
+ * h2_proxy_request helpers
+ ******************************************************************************/
+typedef struct h2_proxy_request h2_proxy_request;
+
+struct h2_proxy_request {
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
+ const char *scheme;
+ const char *authority;
+ const char *path;
+
+ apr_table_t *headers;
+
+ apr_time_t request_time;
+
+ unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
+ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+};
+
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize);
+apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
+/*******************************************************************************
+ * reverse mapping for link headers
+ ******************************************************************************/
+const char *h2_proxy_link_reverse_map(request_rec *r,
+ proxy_dir_conf *conf,
+ const char *real_server_uri,
+ const char *proxy_server_uri,
+ const char *s);
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_proxy_fifo h2_proxy_fifo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity elements. Elements can
+ * appear several times.
+ */
+apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity elements. Elements only
+ * appear once. Pushing an element already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo);
+apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo);
+
+int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo);
+int h2_proxy_fifo_count(h2_proxy_fifo *fifo);
+
+/**
+ * Push en element into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param elem the element to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem);
+apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem);
+
+apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem);
+apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem);
+
+/**
+ * Remove the elem from the queue, will remove multiple appearances.
+ * @param elem the element to remove
+ * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem);
+
+
+#endif /* defined(__mod_h2__h2_proxy_util__) */
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
new file mode 100644
index 0000000..9a3b19b
--- /dev/null
+++ b/modules/http2/h2_push.c
@@ -0,0 +1,1060 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_hash.h>
+#include <apr_time.h>
+
+#ifdef H2_OPENSSL
+#include <openssl/sha.h>
+#endif
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+static const char *policy_str(h2_push_policy policy)
+{
+ switch (policy) {
+ case H2_PUSH_NONE:
+ return "none";
+ case H2_PUSH_FAST_LOAD:
+ return "fast-load";
+ case H2_PUSH_HEAD:
+ return "head";
+ default:
+ return "default";
+ }
+}
+
+typedef struct {
+ const h2_request *req;
+ int push_policy;
+ apr_pool_t *pool;
+ apr_array_header_t *pushes;
+ const char *s;
+ size_t slen;
+ size_t i;
+
+ const char *link;
+ apr_table_t *params;
+ char b[4096];
+} link_ctx;
+
+static int attr_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case '-':
+ case '.':
+ case '^':
+ case '_':
+ case '`':
+ case '|':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int ptoken_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ case ':':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '@':
+ case '[':
+ case ']':
+ case '^':
+ case '_':
+ case '`':
+ case '{':
+ case '|':
+ case '}':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int skip_ws(link_ctx *ctx)
+{
+ char c;
+ while (ctx->i < ctx->slen
+ && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) {
+ ++ctx->i;
+ }
+ return (ctx->i < ctx->slen);
+}
+
+static int find_chr(link_ctx *ctx, char c, size_t *pidx)
+{
+ size_t j;
+ for (j = ctx->i; j < ctx->slen; ++j) {
+ if (ctx->s[j] == c) {
+ *pidx = j;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_chr(link_ctx *ctx, char c)
+{
+ if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) {
+ ++ctx->i;
+ return 1;
+ }
+ return 0;
+}
+
+static char *mk_str(link_ctx *ctx, size_t end)
+{
+ if (ctx->i < end) {
+ return apr_pstrndup(ctx->pool, ctx->s + ctx->i, end - ctx->i);
+ }
+ return (char*)"";
+}
+
+static int read_qstring(link_ctx *ctx, const char **ps)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '\"')) {
+ size_t end;
+ if (find_chr(ctx, '\"', &end)) {
+ *ps = mk_str(ctx, end);
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_ptoken(link_ctx *ctx, const char **ps)
+{
+ if (skip_ws(ctx)) {
+ size_t i;
+ for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ *ps = mk_str(ctx, i);
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int read_link(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '<')) {
+ size_t end;
+ if (find_chr(ctx, '>', &end)) {
+ ctx->link = mk_str(ctx, end);
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_pname(link_ctx *ctx, const char **pname)
+{
+ if (skip_ws(ctx)) {
+ size_t i;
+ for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ *pname = mk_str(ctx, i);
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_pvalue(link_ctx *ctx, const char **pvalue)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '=')) {
+ if (read_qstring(ctx, pvalue) || read_ptoken(ctx, pvalue)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_param(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ';')) {
+ const char *name, *value = "";
+ if (read_pname(ctx, &name)) {
+ read_pvalue(ctx, &value); /* value is optional */
+ apr_table_setn(ctx->params, name, value);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_sep(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ',')) {
+ return 1;
+ }
+ return 0;
+}
+
+static void init_params(link_ctx *ctx)
+{
+ if (!ctx->params) {
+ ctx->params = apr_table_make(ctx->pool, 5);
+ }
+ else {
+ apr_table_clear(ctx->params);
+ }
+}
+
+static int same_authority(const h2_request *req, const apr_uri_t *uri)
+{
+ if (uri->scheme != NULL && strcmp(uri->scheme, req->scheme)) {
+ return 0;
+ }
+ if (uri->hostinfo != NULL && strcmp(uri->hostinfo, req->authority)) {
+ return 0;
+ }
+ return 1;
+}
+
+static int set_push_header(void *ctx, const char *key, const char *value)
+{
+ size_t klen = strlen(key);
+ if (H2_HD_MATCH_LIT("User-Agent", key, klen)
+ || H2_HD_MATCH_LIT("Accept", key, klen)
+ || H2_HD_MATCH_LIT("Accept-Encoding", key, klen)
+ || H2_HD_MATCH_LIT("Accept-Language", key, klen)
+ || H2_HD_MATCH_LIT("Cache-Control", key, klen)) {
+ apr_table_setn(ctx, key, value);
+ }
+ return 1;
+}
+
+static int has_param(link_ctx *ctx, const char *param)
+{
+ const char *p = apr_table_get(ctx->params, param);
+ return !!p;
+}
+
+static int has_relation(link_ctx *ctx, const char *rel)
+{
+ const char *s, *val = apr_table_get(ctx->params, "rel");
+ if (val) {
+ if (!strcmp(rel, val)) {
+ return 1;
+ }
+ s = ap_strstr_c(val, rel);
+ if (s && (s == val || s[-1] == ' ')) {
+ s += strlen(rel);
+ if (!*s || *s == ' ') {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int add_push(link_ctx *ctx)
+{
+ /* so, we have read a Link header and need to decide
+ * if we transform it into a push.
+ */
+ if (has_relation(ctx, "preload") && !has_param(ctx, "nopush")) {
+ apr_uri_t uri;
+ if (apr_uri_parse(ctx->pool, ctx->link, &uri) == APR_SUCCESS) {
+ if (uri.path && same_authority(ctx->req, &uri)) {
+ char *path;
+ const char *method;
+ apr_table_t *headers;
+ h2_request *req;
+ h2_push *push;
+
+ /* We only want to generate pushes for resources in the
+ * same authority than the original request.
+ * icing: i think that is wise, otherwise we really need to
+ * check that the vhost/server is available and uses the same
+ * TLS (if any) parameters.
+ */
+ path = apr_uri_unparse(ctx->pool, &uri, APR_URI_UNP_OMITSITEPART);
+ push = apr_pcalloc(ctx->pool, sizeof(*push));
+ switch (ctx->push_policy) {
+ case H2_PUSH_HEAD:
+ method = "HEAD";
+ break;
+ default:
+ method = "GET";
+ break;
+ }
+ headers = apr_table_make(ctx->pool, 5);
+ apr_table_do(set_push_header, headers, ctx->req->headers, NULL);
+ req = h2_req_create(0, ctx->pool, method, ctx->req->scheme,
+ ctx->req->authority, path, headers,
+ ctx->req->serialize);
+ /* atm, we do not push on pushes */
+ h2_request_end_headers(req, ctx->pool, 1, 0);
+ push->req = req;
+ if (has_param(ctx, "critical")) {
+ h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
+ prio->dependency = H2_DEPENDANT_BEFORE;
+ push->priority = prio;
+ }
+ if (!ctx->pushes) {
+ ctx->pushes = apr_array_make(ctx->pool, 5, sizeof(h2_push*));
+ }
+ APR_ARRAY_PUSH(ctx->pushes, h2_push*) = push;
+ }
+ }
+ }
+ return 0;
+}
+
+static void inspect_link(link_ctx *ctx, const char *s, size_t slen)
+{
+ /* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1>
+ Link = "Link" ":" #link-value
+ link-value = "<" URI-Reference ">" *( ";" link-param )
+ link-param = ( ( "rel" "=" relation-types )
+ | ( "anchor" "=" <"> URI-Reference <"> )
+ | ( "rev" "=" relation-types )
+ | ( "hreflang" "=" Language-Tag )
+ | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) )
+ | ( "title" "=" quoted-string )
+ | ( "title*" "=" ext-value )
+ | ( "type" "=" ( media-type | quoted-mt ) )
+ | ( link-extension ) )
+ link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] )
+ | ( ext-name-star "=" ext-value )
+ ext-name-star = parmname "*" ; reserved for RFC2231-profiled
+ ; extensions. Whitespace NOT
+ ; allowed in between.
+ ptoken = 1*ptokenchar
+ ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "("
+ | ")" | "*" | "+" | "-" | "." | "/" | DIGIT
+ | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA
+ | "[" | "]" | "^" | "_" | "`" | "{" | "|"
+ | "}" | "~"
+ media-type = type-name "/" subtype-name
+ quoted-mt = <"> media-type <">
+ relation-types = relation-type
+ | <"> relation-type *( 1*SP relation-type ) <">
+ relation-type = reg-rel-type | ext-rel-type
+ reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" )
+ ext-rel-type = URI
+
+ and from <https://tools.ietf.org/html/rfc5987>
+ parmname = 1*attr-char
+ attr-char = ALPHA / DIGIT
+ / "!" / "#" / "$" / "&" / "+" / "-" / "."
+ / "^" / "_" / "`" / "|" / "~"
+ */
+
+ ctx->s = s;
+ ctx->slen = slen;
+ ctx->i = 0;
+
+ while (read_link(ctx)) {
+ init_params(ctx);
+ while (read_param(ctx)) {
+ /* nop */
+ }
+ add_push(ctx);
+ if (!read_sep(ctx)) {
+ break;
+ }
+ }
+}
+
+static int head_iter(void *ctx, const char *key, const char *value)
+{
+ if (!apr_strnatcasecmp("link", key)) {
+ inspect_link(ctx, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
+ int push_policy, const h2_headers *res)
+{
+ if (req && push_policy != H2_PUSH_NONE) {
+ /* Collect push candidates from the request/response pair.
+ *
+ * One source for pushes are "rel=preload" link headers
+ * in the response.
+ *
+ * TODO: This may be extended in the future by hooks or callbacks
+ * where other modules can provide push information directly.
+ */
+ if (res->headers) {
+ link_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.req = req;
+ ctx.push_policy = push_policy;
+ ctx.pool = p;
+
+ apr_table_do(head_iter, &ctx, res->headers, NULL);
+ if (ctx.pushes) {
+ apr_table_setn(res->headers, "push-policy",
+ policy_str(push_policy));
+ }
+ return ctx.pushes;
+ }
+ }
+ return NULL;
+}
+
+/*******************************************************************************
+ * push diary
+ *
+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
+ * connection. It records a hash value from the absolute URL of the resource
+ * pushed.
+ * - Lacking openssl, it uses 'apr_hashfunc_default' for the value
+ * - with openssl, it uses SHA256 to calculate the hash value
+ * - whatever the method to generate the hash, the diary keeps a maximum of 64
+ * bits per hash, limiting the memory consumption to about
+ * H2PushDiarySize * 8
+ * bytes. Entries are sorted by most recently used and oldest entries are
+ * forgotten first.
+ * - Clients can initialize/replace the push diary by sending a 'Cache-Digest'
+ * header. Currently, this is the base64url encoded value of the cache digest
+ * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * This draft can be expected to evolve and the definition of the header
+ * will be added there and refined.
+ * - The cache digest header is a Golomb Coded Set of hash values, but it may
+ * limit the amount of bits per hash value even further. For a good description
+ * of GCS, read here:
+ * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters
+ * - The means that the push diary might be initialized with hash values of much
+ * less than 64 bits, leading to more false positives, but smaller digest size.
+ ******************************************************************************/
+
+
+#define GCSLOG_LEVEL APLOG_TRACE1
+
+typedef struct h2_push_diary_entry {
+ apr_uint64_t hash;
+} h2_push_diary_entry;
+
+
+#ifdef H2_OPENSSL
+static void sha256_update(SHA256_CTX *ctx, const char *s)
+{
+ SHA256_Update(ctx, s, strlen(s));
+}
+
+static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ SHA256_CTX sha256;
+ apr_uint64_t val;
+ unsigned char hash[SHA256_DIGEST_LENGTH];
+ int i;
+
+ SHA256_Init(&sha256);
+ sha256_update(&sha256, push->req->scheme);
+ sha256_update(&sha256, "://");
+ sha256_update(&sha256, push->req->authority);
+ sha256_update(&sha256, push->req->path);
+ SHA256_Final(hash, &sha256);
+
+ val = 0;
+ for (i = 0; i != sizeof(val); ++i)
+ val = val * 256 + hash[i];
+ *phash = val >> (64 - diary->mask_bits);
+}
+#endif
+
+
+static unsigned int val_apr_hash(const char *str)
+{
+ apr_ssize_t len = strlen(str);
+ return apr_hashfunc_default(str, &len);
+}
+
+static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ apr_uint64_t val;
+#if APR_UINT64_MAX > UINT_MAX
+ val = ((apr_uint64_t)(val_apr_hash(push->req->scheme)) << 32);
+ val ^= ((apr_uint64_t)(val_apr_hash(push->req->authority)) << 16);
+ val ^= val_apr_hash(push->req->path);
+#else
+ val = val_apr_hash(push->req->scheme);
+ val ^= val_apr_hash(push->req->authority);
+ val ^= val_apr_hash(push->req->path);
+#endif
+ *phash = val;
+}
+
+static apr_int32_t ceil_power_of_2(apr_int32_t n)
+{
+ if (n <= 2) return 2;
+ --n;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ return ++n;
+}
+
+static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype,
+ int N)
+{
+ h2_push_diary *diary = NULL;
+
+ if (N > 0) {
+ diary = apr_pcalloc(p, sizeof(*diary));
+
+ diary->NMax = ceil_power_of_2(N);
+ diary->N = diary->NMax;
+ /* the mask we use in value comparison depends on where we got
+ * the values from. If we calculate them ourselves, we can use
+ * the full 64 bits.
+ * If we set the diary via a compressed golomb set, we have less
+ * relevant bits and need to use a smaller mask. */
+ diary->mask_bits = 64;
+ /* grows by doubling, start with a power of 2 */
+ diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry));
+
+ switch (dtype) {
+#ifdef H2_OPENSSL
+ case H2_PUSH_DIGEST_SHA256:
+ diary->dtype = H2_PUSH_DIGEST_SHA256;
+ diary->dcalc = calc_sha256_hash;
+ break;
+#endif /* ifdef H2_OPENSSL */
+ default:
+ diary->dtype = H2_PUSH_DIGEST_APR_HASH;
+ diary->dcalc = calc_apr_hash;
+ break;
+ }
+ }
+
+ return diary;
+}
+
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N)
+{
+ return diary_create(p, H2_PUSH_DIGEST_SHA256, N);
+}
+
+static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash)
+{
+ if (diary) {
+ h2_push_diary_entry *e;
+ int i;
+
+ /* search from the end, where the last accessed digests are */
+ for (i = diary->entries->nelts-1; i >= 0; --i) {
+ e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry);
+ if (e->hash == hash) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
+{
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+ h2_push_diary_entry e;
+ apr_size_t lastidx = diary->entries->nelts-1;
+
+ /* move entry[idx] to the end */
+ if (idx < lastidx) {
+ e = entries[idx];
+ memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
+ entries[lastidx] = e;
+ }
+ return &entries[lastidx];
+}
+
+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
+{
+ h2_push_diary_entry *ne;
+
+ if (diary->entries->nelts < diary->N) {
+ /* append a new diary entry at the end */
+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
+ ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
+ }
+ else {
+ /* replace content with new digest. keeps memory usage constant once diary is full */
+ ne = move_to_last(diary, 0);
+ *ne = *e;
+ }
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
+}
+
+apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
+{
+ apr_array_header_t *npushes = pushes;
+ h2_push_diary_entry e;
+ int i, idx;
+
+ if (session->push_diary && pushes) {
+ npushes = NULL;
+
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push;
+
+ push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ session->push_diary->dcalc(session->push_diary, &e.hash, push);
+ idx = h2_push_diary_find(session->push_diary, e.hash);
+ if (idx >= 0) {
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ "push_diary_update: already there PUSH %s", push->req->path);
+ move_to_last(session->push_diary, idx);
+ }
+ else {
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ "push_diary_update: adding PUSH %s", push->req->path);
+ if (!npushes) {
+ npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*));
+ }
+ APR_ARRAY_PUSH(npushes, h2_push*) = push;
+ h2_push_diary_append(session->push_diary, &e);
+ }
+ }
+ }
+ return npushes;
+}
+
+apr_array_header_t *h2_push_collect_update(h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_headers *res)
+{
+ h2_session *session = stream->session;
+ const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
+ apr_array_header_t *pushes;
+ apr_status_t status;
+
+ if (cache_digest && session->push_diary) {
+ status = h2_push_diary_digest64_set(session->push_diary, req->authority,
+ cache_digest, stream->pool);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_SSSN_LOG(APLOGNO(03057), session,
+ "push diary set from Cache-Digest: %s"), cache_digest);
+ }
+ }
+ pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
+ return h2_push_diary_update(stream->session, pushes);
+}
+
+static apr_int32_t h2_log2inv(unsigned char log2)
+{
+ return log2? (1 << log2) : 1;
+}
+
+
+typedef struct {
+ h2_push_diary *diary;
+ unsigned char log2p;
+ int mask_bits;
+ int delta_bits;
+ int fixed_bits;
+ apr_uint64_t fixed_mask;
+ apr_pool_t *pool;
+ unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last;
+} gset_encoder;
+
+static int cmp_puint64(const void *p1, const void *p2)
+{
+ const apr_uint64_t *pu1 = p1, *pu2 = p2;
+ return (*pu1 > *pu2)? 1 : ((*pu1 == *pu2)? 0 : -1);
+}
+
+/* in golomb bit stream encoding, bit 0 is the 8th of the first char, or
+ * more generally:
+ * char(bit/8) & cbit_mask[(bit % 8)]
+ */
+static unsigned char cbit_mask[] = {
+ 0x80u,
+ 0x40u,
+ 0x20u,
+ 0x10u,
+ 0x08u,
+ 0x04u,
+ 0x02u,
+ 0x01u,
+};
+
+static apr_status_t gset_encode_bit(gset_encoder *encoder, int bit)
+{
+ if (++encoder->bit >= 8) {
+ if (++encoder->offset >= encoder->datalen) {
+ apr_size_t nlen = encoder->datalen*2;
+ unsigned char *ndata = apr_pcalloc(encoder->pool, nlen);
+ if (!ndata) {
+ return APR_ENOMEM;
+ }
+ memcpy(ndata, encoder->data, encoder->datalen);
+ encoder->data = ndata;
+ encoder->datalen = nlen;
+ }
+ encoder->bit = 0;
+ encoder->data[encoder->offset] = 0xffu;
+ }
+ if (!bit) {
+ encoder->data[encoder->offset] &= ~cbit_mask[encoder->bit];
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval)
+{
+ apr_uint64_t delta, flex_bits;
+ apr_status_t status = APR_SUCCESS;
+ int i;
+
+ delta = pval - encoder->last;
+ encoder->last = pval;
+ flex_bits = (delta >> encoder->fixed_bits);
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool,
+ "h2_push_diary_enc: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT" flex_bits=%"APR_UINT64_T_FMT", "
+ ", fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT,
+ pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask);
+ for (; flex_bits != 0; --flex_bits) {
+ status = gset_encode_bit(encoder, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ status = gset_encode_bit(encoder, 0);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (i = encoder->fixed_bits-1; i >= 0; --i) {
+ status = gset_encode_bit(encoder, (delta >> i) & 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
+ int maxP, const char *authority,
+ const char **pdata, apr_size_t *plen)
+{
+ int nelts, N, i;
+ unsigned char log2n, log2pmax;
+ gset_encoder encoder;
+ apr_uint64_t *hashes;
+ apr_size_t hash_count;
+
+ nelts = diary->entries->nelts;
+
+ if (nelts > APR_UINT32_MAX) {
+ /* should not happen */
+ return APR_ENOTIMPL;
+ }
+ N = ceil_power_of_2(nelts);
+ log2n = h2_log2(N);
+
+ /* Now log2p is the max number of relevant bits, so that
+ * log2p + log2n == mask_bits. We can uise a lower log2p
+ * and have a shorter set encoding...
+ */
+ log2pmax = h2_log2(ceil_power_of_2(maxP));
+
+ memset(&encoder, 0, sizeof(encoder));
+ encoder.diary = diary;
+ encoder.log2p = H2MIN(diary->mask_bits - log2n, log2pmax);
+ encoder.mask_bits = log2n + encoder.log2p;
+ encoder.delta_bits = diary->mask_bits - encoder.mask_bits;
+ encoder.fixed_bits = encoder.log2p;
+ encoder.fixed_mask = 1;
+ encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1;
+ encoder.pool = pool;
+ encoder.datalen = 512;
+ encoder.data = apr_pcalloc(encoder.pool, encoder.datalen);
+
+ encoder.data[0] = log2n;
+ encoder.data[1] = encoder.log2p;
+ encoder.offset = 1;
+ encoder.bit = 8;
+ encoder.last = 0;
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, "
+ "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s",
+ (int)nelts, (int)N, (int)log2n, diary->mask_bits,
+ (int)encoder.mask_bits, (int)encoder.delta_bits,
+ (int)encoder.log2p, authority);
+
+ if (!authority || !diary->authority
+ || !strcmp("*", authority) || !strcmp(diary->authority, authority)) {
+ hash_count = diary->entries->nelts;
+ hashes = apr_pcalloc(encoder.pool, hash_count);
+ for (i = 0; i < hash_count; ++i) {
+ hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash
+ >> encoder.delta_bits);
+ }
+
+ qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64);
+ for (i = 0; i < hash_count; ++i) {
+ if (!i || (hashes[i] != hashes[i-1])) {
+ gset_encode_next(&encoder, hashes[i]);
+ }
+ }
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: golomb compressed hashes, %d bytes",
+ (int)encoder.offset + 1);
+ }
+ *pdata = (const char *)encoder.data;
+ *plen = encoder.offset + 1;
+
+ return APR_SUCCESS;
+}
+
+typedef struct {
+ h2_push_diary *diary;
+ apr_pool_t *pool;
+ unsigned char log2p;
+ const unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last_val;
+} gset_decoder;
+
+static int gset_decode_next_bit(gset_decoder *decoder)
+{
+ if (++decoder->bit >= 8) {
+ if (++decoder->offset >= decoder->datalen) {
+ return -1;
+ }
+ decoder->bit = 0;
+ }
+ return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
+}
+
+static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
+{
+ apr_uint64_t flex = 0, fixed = 0, delta;
+ int i;
+
+ /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
+ * On a malformed bit-string, this will not fail, but produce results
+ * which are pbly too large. Luckily, the diary will modulo the hash.
+ */
+ while (1) {
+ int bit = gset_decode_next_bit(decoder);
+ if (bit == -1) {
+ return APR_EINVAL;
+ }
+ if (!bit) {
+ break;
+ }
+ ++flex;
+ }
+
+ for (i = 0; i < decoder->log2p; ++i) {
+ int bit = gset_decode_next_bit(decoder);
+ if (bit == -1) {
+ return APR_EINVAL;
+ }
+ fixed = (fixed << 1) | bit;
+ }
+
+ delta = (flex << decoder->log2p) | fixed;
+ *phash = delta + decoder->last_val;
+ decoder->last_val = *phash;
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
+ "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT,
+ *phash, delta, (int)flex, fixed);
+
+ return APR_SUCCESS;
+}
+
+/**
+ * Initialize the push diary by a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * .
+ * @param diary the diary to set the digest into
+ * @param data the binary cache digest
+ * @param len the length of the cache digest
+ * @return APR_EINVAL if digest was not successfully parsed
+ */
+apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+ const char *data, apr_size_t len)
+{
+ gset_decoder decoder;
+ unsigned char log2n, log2p;
+ int N, i;
+ apr_pool_t *pool = diary->entries->pool;
+ h2_push_diary_entry e;
+ apr_status_t status = APR_SUCCESS;
+
+ if (len < 2) {
+ /* at least this should be there */
+ return APR_EINVAL;
+ }
+ log2n = data[0];
+ log2p = data[1];
+ diary->mask_bits = log2n + log2p;
+ if (diary->mask_bits > 64) {
+ /* cannot handle */
+ return APR_ENOTIMPL;
+ }
+
+ /* whatever is in the digest, it replaces the diary entries */
+ apr_array_clear(diary->entries);
+ if (!authority || !strcmp("*", authority)) {
+ diary->authority = NULL;
+ }
+ else if (!diary->authority || strcmp(diary->authority, authority)) {
+ diary->authority = apr_pstrdup(diary->entries->pool, authority);
+ }
+
+ N = h2_log2inv(log2n + log2p);
+
+ decoder.diary = diary;
+ decoder.pool = pool;
+ decoder.log2p = log2p;
+ decoder.data = (const unsigned char*)data;
+ decoder.datalen = len;
+ decoder.offset = 1;
+ decoder.bit = 8;
+ decoder.last_val = 0;
+
+ diary->N = N;
+ /* Determine effective N we use for storage */
+ if (!N) {
+ /* a totally empty cache digest. someone tells us that she has no
+ * entries in the cache at all. Use our own preferences for N+mask
+ */
+ diary->N = diary->NMax;
+ return APR_SUCCESS;
+ }
+ else if (N > diary->NMax) {
+ /* Store not more than diary is configured to hold. We open us up
+ * to DOS attacks otherwise. */
+ diary->N = diary->NMax;
+ }
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_set: N=%d, log2n=%d, "
+ "diary->mask_bits=%d, dec.log2p=%d",
+ (int)diary->N, (int)log2n, diary->mask_bits,
+ (int)decoder.log2p);
+
+ for (i = 0; i < diary->N; ++i) {
+ if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
+ /* the data may have less than N values */
+ break;
+ }
+ h2_push_diary_append(diary, &e);
+ }
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
+ (int)diary->entries->nelts, diary->mask_bits);
+ return status;
+}
+
+apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+ const char *data64url, apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest64_set: digest=%s, dlen=%d",
+ data64url, (int)len);
+ return h2_push_diary_digest_set(diary, authority, data, len);
+}
+
diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h
new file mode 100644
index 0000000..bc24e68
--- /dev/null
+++ b/modules/http2/h2_push.h
@@ -0,0 +1,120 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_push__
+#define __mod_h2__h2_push__
+
+#include "h2.h"
+
+struct h2_request;
+struct h2_headers;
+struct h2_ngheader;
+struct h2_session;
+struct h2_stream;
+
+typedef struct h2_push {
+ const struct h2_request *req;
+ h2_priority *priority;
+} h2_push;
+
+typedef enum {
+ H2_PUSH_DIGEST_APR_HASH,
+ H2_PUSH_DIGEST_SHA256
+} h2_push_digest_type;
+
+typedef struct h2_push_diary h2_push_diary;
+
+typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
+
+struct h2_push_diary {
+ apr_array_header_t *entries;
+ int NMax; /* Maximum for N, should size change be necessary */
+ int N; /* Current maximum number of entries, power of 2 */
+ apr_uint64_t mask; /* mask for relevant bits */
+ unsigned int mask_bits; /* number of relevant bits */
+ const char *authority;
+ h2_push_digest_type dtype;
+ h2_push_digest_calc *dcalc;
+};
+
+/**
+ * Determine the list of h2_push'es to send to the client on behalf of
+ * the given request/response pair.
+ *
+ * @param p the pool to use
+ * @param req the requst from the client
+ * @param res the response from the server
+ * @return array of h2_push addresses or NULL
+ */
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ int push_policy,
+ const struct h2_headers *res);
+
+/**
+ * Create a new push diary for the given maximum number of entries.
+ *
+ * @param p the pool to use
+ * @param N the max number of entries, rounded up to 2^x
+ * @return the created diary, might be NULL of max_entries is 0
+ */
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N);
+
+/**
+ * Filters the given pushes against the diary and returns only those pushes
+ * that were newly entered in the diary.
+ */
+apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_header_t *pushes);
+
+/**
+ * Collect pushes for the given request/response pair, enter them into the
+ * diary and return those pushes newly entered.
+ */
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_headers *res);
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param authority the authority to get the data for, use NULL/"*" for all
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
+ int maxP, const char *authority,
+ const char **pdata, apr_size_t *plen);
+
+/**
+ * Initialize the push diary by a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * .
+ * @param diary the diary to set the digest into
+ * @param authority the authority to set the data for
+ * @param data the binary cache digest
+ * @param len the length of the cache digest
+ * @return APR_EINVAL if digest was not successfully parsed
+ */
+apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+ const char *data, apr_size_t len);
+
+apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+ const char *data64url, apr_pool_t *pool);
+
+#endif /* defined(__mod_h2__h2_push__) */
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
new file mode 100644
index 0000000..8899c4f
--- /dev/null
+++ b/modules/http2/h2_request.c
@@ -0,0 +1,339 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mpm.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2_config.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_util.h"
+
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+ apr_status_t status;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key),
+ value, strlen(value));
+ return (x->status == APR_SUCCESS)? 1 : 0;
+}
+
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+ request_rec *r)
+{
+ h2_request *req;
+ const char *scheme, *authority, *path;
+ h1_ctx x;
+
+ *preq = NULL;
+ scheme = apr_pstrdup(pool, r->parsed_uri.scheme? r->parsed_uri.scheme
+ : ap_http_scheme(r));
+ authority = apr_pstrdup(pool, r->hostname);
+ path = apr_uri_unparse(pool, &r->parsed_uri, APR_URI_UNP_OMITSITEPART);
+
+ if (!r->method || !scheme || !r->hostname || !path) {
+ return APR_EINVAL;
+ }
+
+ if (!ap_strchr_c(authority, ':') && r->server && r->server->port) {
+ apr_port_t defport = apr_uri_port_of_scheme(scheme);
+ if (defport != r->server->port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(pool, "%s:%d", authority,
+ (int)r->server->port);
+ }
+ }
+
+ req = apr_pcalloc(pool, sizeof(*req));
+ req->method = apr_pstrdup(pool, r->method);
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = apr_table_make(pool, 10);
+ if (r->server) {
+ req->serialize = h2_config_geti(h2_config_sget(r->server),
+ H2_CONF_SER_HEADERS);
+ }
+
+ x.pool = pool;
+ x.headers = req->headers;
+ x.status = APR_SUCCESS;
+ apr_table_do(set_h1_header, &x, r->headers_in, NULL);
+
+ *preq = req;
+ return x.status;
+}
+
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (nlen <= 0) {
+ return status;
+ }
+
+ if (name[0] == ':') {
+ /* pseudo header, see ch. 8.1.2.3, always should come first */
+ if (!apr_is_empty_table(req->headers)) {
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,
+ APLOGNO(02917)
+ "h2_request: pseudo header after request start");
+ return APR_EGENERAL;
+ }
+
+ if (H2_HEADER_METHOD_LEN == nlen
+ && !strncmp(H2_HEADER_METHOD, name, nlen)) {
+ req->method = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_SCHEME_LEN == nlen
+ && !strncmp(H2_HEADER_SCHEME, name, nlen)) {
+ req->scheme = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_PATH_LEN == nlen
+ && !strncmp(H2_HEADER_PATH, name, nlen)) {
+ req->path = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_AUTH_LEN == nlen
+ && !strncmp(H2_HEADER_AUTH, name, nlen)) {
+ req->authority = apr_pstrndup(pool, value, vlen);
+ }
+ else {
+ char buffer[32];
+ memset(buffer, 0, 32);
+ strncpy(buffer, name, (nlen > 31)? 31 : nlen);
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool,
+ APLOGNO(02954)
+ "h2_request: ignoring unknown pseudo header %s",
+ buffer);
+ }
+ }
+ else {
+ /* non-pseudo header, append to work bucket of stream */
+ status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen);
+ }
+
+ return status;
+}
+
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes)
+{
+ const char *s;
+
+ /* rfc7540, ch. 8.1.2.3:
+ * - if we have :authority, it overrides any Host header
+ * - :authority MUST be ommited when converting h1->h2, so we
+ * might get a stream without, but then Host needs to be there */
+ if (!req->authority) {
+ const char *host = apr_table_get(req->headers, "Host");
+ if (!host) {
+ return APR_BADARG;
+ }
+ req->authority = host;
+ }
+ else {
+ apr_table_setn(req->headers, "Host", req->authority);
+ }
+
+ s = apr_table_get(req->headers, "Content-Length");
+ if (!s) {
+ /* HTTP/2 does not need a Content-Length for framing, but our
+ * internal request processing is used to HTTP/1.1, so we
+ * need to either add a Content-Length or a Transfer-Encoding
+ * if any content can be expected. */
+ if (!eos) {
+ /* We have not seen a content-length and have no eos,
+ * simulate a chunked encoding for our HTTP/1.1 infrastructure,
+ * in case we have "H2SerializeHeaders on" here
+ */
+ req->chunked = 1;
+ apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
+ }
+ else if (apr_table_get(req->headers, "Content-Type")) {
+ /* If we have a content-type, but already seen eos, no more
+ * data will come. Signal a zero content length explicitly.
+ */
+ apr_table_setn(req->headers, "Content-Length", "0");
+ }
+ }
+ req->raw_bytes += raw_bytes;
+
+ return APR_SUCCESS;
+}
+
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
+{
+ h2_request *dst = apr_pmemdup(p, src, sizeof(*dst));
+ dst->method = apr_pstrdup(p, src->method);
+ dst->scheme = apr_pstrdup(p, src->scheme);
+ dst->authority = apr_pstrdup(p, src->authority);
+ dst->path = apr_pstrdup(p, src->path);
+ dst->headers = apr_table_clone(p, src->headers);
+ return dst;
+}
+
+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
+{
+ int access_status = HTTP_OK;
+ const char *rpath;
+ apr_pool_t *p;
+ request_rec *r;
+ const char *s;
+
+ apr_pool_create(&p, c->pool);
+ apr_pool_tag(p, "request");
+ r = apr_pcalloc(p, sizeof(request_rec));
+ AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)c);
+ r->pool = p;
+ r->connection = c;
+ r->server = c->base_server;
+
+ r->user = NULL;
+ r->ap_auth_type = NULL;
+
+ r->allowed_methods = ap_make_method_list(p, 2);
+
+ r->headers_in = apr_table_clone(r->pool, req->headers);
+ r->trailers_in = apr_table_make(r->pool, 5);
+ r->subprocess_env = apr_table_make(r->pool, 25);
+ r->headers_out = apr_table_make(r->pool, 12);
+ r->err_headers_out = apr_table_make(r->pool, 5);
+ r->trailers_out = apr_table_make(r->pool, 5);
+ r->notes = apr_table_make(r->pool, 5);
+
+ r->request_config = ap_create_request_config(r->pool);
+ /* Must be set before we run create request hook */
+
+ r->proto_output_filters = c->output_filters;
+ r->output_filters = r->proto_output_filters;
+ r->proto_input_filters = c->input_filters;
+ r->input_filters = r->proto_input_filters;
+ ap_run_create_request(r);
+ r->per_dir_config = r->server->lookup_defaults;
+
+ r->sent_bodyct = 0; /* bytect isn't for body */
+
+ r->read_length = 0;
+ r->read_body = REQUEST_NO_BODY;
+
+ r->status = HTTP_OK; /* Until further notice */
+ r->header_only = 0;
+ r->the_request = NULL;
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+ r->useragent_addr = c->client_addr;
+ r->useragent_ip = c->client_ip;
+
+ ap_run_pre_read_request(r, c);
+
+ /* Time to populate r with the data we have. */
+ r->request_time = req->request_time;
+ r->method = req->method;
+ /* Provide quick information about the request method as soon as known */
+ r->method_number = ap_method_number_of(r->method);
+ if (r->method_number == M_GET && r->method[0] == 'H') {
+ r->header_only = 1;
+ }
+
+ rpath = (req->path ? req->path : "");
+ ap_parse_uri(r, rpath);
+ r->protocol = (char*)"HTTP/2.0";
+ r->proto_num = HTTP_VERSION(2, 0);
+
+ r->the_request = apr_psprintf(r->pool, "%s %s %s",
+ r->method, rpath, r->protocol);
+
+ /* update what we think the virtual host is based on the headers we've
+ * now read. may update status.
+ * Leave r->hostname empty, vhost will parse if form our Host: header,
+ * otherwise we get complains about port numbers.
+ */
+ r->hostname = NULL;
+ ap_update_vhost_from_headers(r);
+
+ /* we may have switched to another server */
+ r->per_dir_config = r->server->lookup_defaults;
+
+ s = apr_table_get(r->headers_in, "Expect");
+ if (s && s[0]) {
+ if (ap_cstr_casecmp(s, "100-continue") == 0) {
+ r->expecting_100 = 1;
+ }
+ else {
+ r->status = HTTP_EXPECTATION_FAILED;
+ ap_send_error_response(r, 0);
+ }
+ }
+
+ /*
+ * Add the HTTP_IN filter here to ensure that ap_discard_request_body
+ * called by ap_die and by ap_send_error_response works correctly on
+ * status codes that do not cause the connection to be dropped and
+ * in situations where the connection should be kept alive.
+ */
+ ap_add_input_filter_handle(ap_http_input_filter_handle,
+ NULL, r, r->connection);
+
+ if (access_status != HTTP_OK
+ || (access_status = ap_run_post_read_request(r))) {
+ /* Request check post hooks failed. An example of this would be a
+ * request for a vhost where h2 is disabled --> 421.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03367)
+ "h2_request: access_status=%d, request_create failed",
+ access_status);
+ ap_die(access_status, r);
+ ap_update_child_status(c->sbh, SERVER_BUSY_LOG, r);
+ ap_run_log_transaction(r);
+ r = NULL;
+ goto traceout;
+ }
+
+ AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
+ (char *)r->uri, (char *)r->server->defn_name,
+ r->status);
+ return r;
+traceout:
+ AP_READ_REQUEST_FAILURE((uintptr_t)r);
+ return r;
+}
+
+
diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h
new file mode 100644
index 0000000..48aee09
--- /dev/null
+++ b/modules/http2/h2_request.h
@@ -0,0 +1,48 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_request__
+#define __mod_h2__h2_request__
+
+#include "h2.h"
+
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+ request_rec *r);
+
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes);
+
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
+
+/**
+ * Create a request_rec representing the h2_request to be
+ * processed on the given connection.
+ *
+ * @param req the h2 request to process
+ * @param conn the connection to process the request on
+ * @return the request_rec representing the request
+ */
+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn);
+
+
+#endif /* defined(__mod_h2__h2_request__) */
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
new file mode 100644
index 0000000..ed96cf0
--- /dev/null
+++ b/modules/http2/h2_session.c
@@ -0,0 +1,2360 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <apr_thread_cond.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <scoreboard.h>
+
+#include <mpm_common.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_bucket_eos.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_filter.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_version.h"
+#include "h2_workers.h"
+
+
+static apr_status_t dispatch_master(h2_session *session);
+static apr_status_t h2_session_read(h2_session *session, int block);
+static void transit(h2_session *session, const char *action,
+ h2_session_state nstate);
+
+static void on_stream_state_enter(void *ctx, h2_stream *stream);
+static void on_stream_state_event(void *ctx, h2_stream *stream, h2_stream_event_t ev);
+static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev);
+
+static int h2_session_status_from_apr_status(apr_status_t rv)
+{
+ if (rv == APR_SUCCESS) {
+ return NGHTTP2_NO_ERROR;
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ else if (APR_STATUS_IS_EOF(rv)) {
+ return NGHTTP2_ERR_EOF;
+ }
+ return NGHTTP2_ERR_PROTO;
+}
+
+h2_stream *h2_session_stream_get(h2_session *session, int stream_id)
+{
+ return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+}
+
+static void dispatch_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
+
+void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg)
+{
+ dispatch_event(session, ev, err, msg);
+}
+
+static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
+{
+ int unprocessed = (!h2_stream_was_closed(stream)
+ && (H2_STREAM_CLIENT_INITIATED(stream->id)?
+ (!stream->session->local.accepting
+ && stream->id > stream->session->local.accepted_max)
+ :
+ (!stream->session->remote.accepting
+ && stream->id > stream->session->remote.accepted_max))
+ );
+ if (unprocessed) {
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ return 0;
+ }
+ return 1;
+}
+
+static void cleanup_unprocessed_streams(h2_session *session)
+{
+ h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session);
+}
+
+static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on)
+{
+ h2_stream * stream;
+ apr_pool_t *stream_pool;
+
+ apr_pool_create(&stream_pool, session->pool);
+ apr_pool_tag(stream_pool, "h2_stream");
+
+ stream = h2_stream_create(stream_id, stream_pool, session,
+ session->monitor, initiated_on);
+ if (stream) {
+ nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
+ }
+ return stream;
+}
+
+/**
+ * Determine the importance of streams when scheduling tasks.
+ * - if both stream depend on the same one, compare weights
+ * - if one stream is closer to the root, prioritize that one
+ * - if both are on the same level, use the weight of their root
+ * level ancestors
+ */
+static int spri_cmp(int sid1, nghttp2_stream *s1,
+ int sid2, nghttp2_stream *s2, h2_session *session)
+{
+ nghttp2_stream *p1, *p2;
+
+ p1 = nghttp2_stream_get_parent(s1);
+ p2 = nghttp2_stream_get_parent(s2);
+
+ if (p1 == p2) {
+ int32_t w1, w2;
+
+ w1 = nghttp2_stream_get_weight(s1);
+ w2 = nghttp2_stream_get_weight(s2);
+ return w2 - w1;
+ }
+ else if (!p1) {
+ /* stream 1 closer to root */
+ return -1;
+ }
+ else if (!p2) {
+ /* stream 2 closer to root */
+ return 1;
+ }
+ return spri_cmp(sid1, p1, sid2, p2, session);
+}
+
+static int stream_pri_cmp(int sid1, int sid2, void *ctx)
+{
+ h2_session *session = ctx;
+ nghttp2_stream *s1, *s2;
+
+ s1 = nghttp2_session_find_stream(session->ngh2, sid1);
+ s2 = nghttp2_session_find_stream(session->ngh2, sid2);
+
+ if (s1 == s2) {
+ return 0;
+ }
+ else if (!s1) {
+ return 1;
+ }
+ else if (!s2) {
+ return -1;
+ }
+ return spri_cmp(sid1, s1, sid2, s2, session);
+}
+
+/*
+ * Callback when nghttp2 wants to send bytes back to the client.
+ */
+static ssize_t send_cb(nghttp2_session *ngh2,
+ const uint8_t *data, size_t length,
+ int flags, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t status;
+ (void)ngh2;
+ (void)flags;
+
+ status = h2_conn_io_write(&session->io, (const char *)data, length);
+ if (status == APR_SUCCESS) {
+ return length;
+ }
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062)
+ "h2_session: send error");
+ return h2_session_status_from_apr_status(status);
+}
+
+static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ int error, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ (void)ngh2;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03063), session,
+ "recv invalid FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ return 0;
+}
+
+static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id,
+ const uint8_t *data, size_t len, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t status = APR_EINVAL;
+ h2_stream * stream;
+ int rv = 0;
+
+ stream = h2_session_stream_get(session, stream_id);
+ if (stream) {
+ status = h2_stream_recv_DATA(stream, flags, data, len);
+ dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream data rcvd");
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
+ "h2_stream(%ld-%d): on_data_chunk for unknown stream",
+ session->id, (int)stream_id);
+ rv = NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (status != APR_SUCCESS) {
+ /* count this as consumed explicitly as no one will read it */
+ nghttp2_session_consume(session->ngh2, stream_id, len);
+ }
+ return rv;
+}
+
+static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *stream;
+
+ (void)ngh2;
+ stream = h2_session_stream_get(session, stream_id);
+ if (stream) {
+ if (error_code) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03065), stream,
+ "closing with err=%d %s"),
+ (int)error_code, h2_h2_err_description(error_code));
+ h2_stream_rst(stream, error_code);
+ }
+ }
+ return 0;
+}
+
+static int on_begin_headers_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *s;
+
+ /* We may see HEADERs at the start of a stream or after all DATA
+ * streams to carry trailers. */
+ (void)ngh2;
+ s = h2_session_stream_get(session, frame->hd.stream_id);
+ if (s) {
+ /* nop */
+ }
+ else {
+ s = h2_session_open_stream(userp, frame->hd.stream_id, 0);
+ }
+ return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
+}
+
+static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags,
+ void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream * stream;
+ apr_status_t status;
+
+ (void)flags;
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(02920)
+ "h2_stream(%ld-%d): on_header unknown stream",
+ session->id, (int)frame->hd.stream_id);
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_add_header(stream, (const char *)name, namelen,
+ (const char *)value, valuelen);
+ if (status != APR_SUCCESS && !h2_stream_is_ready(stream)) {
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+ return 0;
+}
+
+/**
+ * nghttp2 session has received a complete frame. Most are used by nghttp2
+ * for processing of internal state. Some, like HEADER and DATA frames,
+ * we need to act on.
+ */
+static int on_frame_recv_cb(nghttp2_session *ng2s,
+ const nghttp2_frame *frame,
+ void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *stream;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03066), session,
+ "recv FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+
+ ++session->frames_received;
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ /* This can be HEADERS for a new stream, defining the request,
+ * or HEADER may come after DATA at the end of a stream as in
+ * trailers */
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream) {
+ rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
+ break;
+ case NGHTTP2_DATA:
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(02923), stream,
+ "DATA, len=%ld, flags=%d"),
+ (long)frame->hd.length, frame->hd.flags);
+ rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
+ break;
+ case NGHTTP2_PRIORITY:
+ session->reprioritize = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): PRIORITY frame "
+ " weight=%d, dependsOn=%d, exclusive=%d",
+ session->id, (int)frame->hd.stream_id,
+ frame->priority.pri_spec.weight,
+ frame->priority.pri_spec.stream_id,
+ frame->priority.pri_spec.exclusive);
+ break;
+ case NGHTTP2_WINDOW_UPDATE:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): WINDOW_UPDATE incr=%d",
+ session->id, (int)frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ if (nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_SESSION_EV_FRAME_RCVD, 0, "window update");
+ }
+ break;
+ case NGHTTP2_RST_STREAM:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
+ "h2_stream(%ld-%d): RST_STREAM by client, errror=%d",
+ session->id, (int)frame->hd.stream_id,
+ (int)frame->rst_stream.error_code);
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream && stream->initiated_on) {
+ ++session->pushes_reset;
+ }
+ else {
+ ++session->streams_reset;
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ if (frame->goaway.error_code == 0
+ && frame->goaway.last_stream_id == ((1u << 31) - 1)) {
+ /* shutdown notice. Should not come from a client... */
+ session->remote.accepting = 0;
+ }
+ else {
+ session->remote.accepted_max = frame->goaway.last_stream_id;
+ dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
+ frame->goaway.error_code, NULL);
+ }
+ break;
+ case NGHTTP2_SETTINGS:
+ if (APLOGctrace2(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length);
+ }
+ break;
+ default:
+ if (APLOGctrace2(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer,
+ sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer);
+ }
+ break;
+ }
+
+ if (session->state == H2_SESSION_ST_IDLE) {
+ /* We received a frame, but session is in state IDLE. That means the frame
+ * did not really progress any of the (possibly) open streams. It was a meta
+ * frame, e.g. SETTINGS/WINDOW_UPDATE/unknown/etc.
+ * Remember: IDLE means we cannot send because either there are no streams open or
+ * all open streams are blocked on exhausted WINDOWs for outgoing data.
+ * The more frames we receive that do not change this, the less interested we
+ * become in serving this connection. This is expressed in increasing "idle_delays".
+ * Eventually, the connection will timeout and we'll close it. */
+ session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received);
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_SSSN_MSG(session, "session has %ld idle frames"),
+ (long)session->idle_frames);
+ if (session->idle_frames > 10) {
+ apr_size_t busy_frames = H2MAX(session->frames_received - session->idle_frames, 1);
+ int idle_ratio = (int)(session->idle_frames / busy_frames);
+ if (idle_ratio > 100) {
+ session->idle_delay = apr_time_from_msec(H2MIN(1000, idle_ratio));
+ }
+ else if (idle_ratio > 10) {
+ session->idle_delay = apr_time_from_msec(10);
+ }
+ else if (idle_ratio > 1) {
+ session->idle_delay = apr_time_from_msec(1);
+ }
+ else {
+ session->idle_delay = 0;
+ }
+ }
+ }
+
+ if (APR_SUCCESS != rv) return NGHTTP2_ERR_PROTO;
+ return 0;
+}
+
+static int h2_session_continue_data(h2_session *session) {
+ if (h2_mplx_has_master_events(session->mplx)) {
+ return 0;
+ }
+ if (h2_conn_io_needs_flush(&session->io)) {
+ return 0;
+ }
+ return 1;
+}
+
+static char immortal_zeros[H2_MAX_PADLEN];
+
+static int on_send_data_cb(nghttp2_session *ngh2,
+ nghttp2_frame *frame,
+ const uint8_t *framehd,
+ size_t length,
+ nghttp2_data_source *source,
+ void *userp)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_session *session = (h2_session *)userp;
+ int stream_id = (int)frame->hd.stream_id;
+ unsigned char padlen;
+ int eos;
+ h2_stream *stream;
+ apr_bucket *b;
+ apr_off_t len = length;
+
+ (void)ngh2;
+ (void)source;
+ if (!h2_session_continue_data(session)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
+ if (frame->data.padlen > H2_MAX_PADLEN) {
+ return NGHTTP2_ERR_PROTO;
+ }
+ padlen = (unsigned char)frame->data.padlen;
+
+ stream = h2_session_stream_get(session, stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c,
+ APLOGNO(02924)
+ "h2_stream(%ld-%d): send_data, stream not found",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
+ (long)length);
+
+ status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
+ if (padlen && status == APR_SUCCESS) {
+ status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ H2_STRM_MSG(stream, "writing frame header"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ H2_STRM_MSG(stream, "send_data_cb, reading stream"));
+ apr_brigade_cleanup(session->bbtmp);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ else if (len != length) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, "
+ "got %ld from stream"), (long)length, (long)len);
+ apr_brigade_cleanup(session->bbtmp);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (padlen) {
+ b = apr_bucket_immortal_create(immortal_zeros, padlen,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ }
+
+ status = h2_conn_io_pass(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+
+ if (status == APR_SUCCESS) {
+ stream->out_data_frames++;
+ stream->out_data_octets += length;
+ return 0;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(02925), stream, "failed send_data_cb"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+}
+
+static int on_frame_send_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_session *session = user_data;
+ h2_stream *stream;
+ int stream_id = frame->hd.stream_id;
+
+ ++session->frames_sent;
+ switch (frame->hd.type) {
+ case NGHTTP2_PUSH_PROMISE:
+ /* PUSH_PROMISE we report on the promised stream */
+ stream_id = frame->push_promise.promised_stream_id;
+ break;
+ default:
+ break;
+ }
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03068), session,
+ "sent FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+
+ stream = h2_session_stream_get(session, stream_id);
+ if (stream) {
+ h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
+ return 0;
+}
+
+#ifdef H2_NG2_INVALID_HEADER_CB
+static int on_invalid_header_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags, void *user_data)
+{
+ h2_session *session = user_data;
+ h2_stream *stream;
+
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03456)
+ "h2_stream(%ld-%d): invalid header '%s: %s'",
+ session->id, (int)frame->hd.stream_id,
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
+ }
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream) {
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ return 0;
+}
+#endif
+
+#define NGH2_SET_CALLBACK(callbacks, name, fn)\
+nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
+
+static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
+{
+ int rv = nghttp2_session_callbacks_new(pcb);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c,
+ APLOGNO(02926) "nghttp2_session_callbacks_new: %s",
+ nghttp2_strerror(rv));
+ return APR_EGENERAL;
+ }
+
+ NGH2_SET_CALLBACK(*pcb, send, send_cb);
+ NGH2_SET_CALLBACK(*pcb, on_frame_recv, on_frame_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_invalid_frame_recv, on_invalid_frame_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_data_chunk_recv, on_data_chunk_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb);
+ NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb);
+ NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
+ NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
+ NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb);
+#ifdef H2_NG2_INVALID_HEADER_CB
+ NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
+#endif
+ return APR_SUCCESS;
+}
+
+static apr_status_t h2_session_shutdown_notice(h2_session *session)
+{
+ apr_status_t status;
+
+ ap_assert(session);
+ if (!session->local.accepting) {
+ return APR_SUCCESS;
+ }
+
+ nghttp2_submit_shutdown_notice(session->ngh2);
+ session->local.accepting = 0;
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03457), session, "sent shutdown notice"));
+ return status;
+}
+
+static apr_status_t h2_session_shutdown(h2_session *session, int error,
+ const char *msg, int force_close)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ ap_assert(session);
+ if (session->local.shutdown) {
+ return APR_SUCCESS;
+ }
+ if (!msg && error) {
+ msg = nghttp2_strerror(error);
+ }
+
+ if (error || force_close) {
+ /* not a graceful shutdown, we want to leave...
+ * Do not start further streams that are waiting to be scheduled.
+ * Find out the max stream id that we habe been processed or
+ * are still actively working on.
+ * Remove all streams greater than this number without submitting
+ * a RST_STREAM frame, since that should be clear from the GOAWAY
+ * we send. */
+ session->local.accepted_max = h2_mplx_shutdown(session->mplx);
+ session->local.error = error;
+ }
+ else {
+ /* graceful shutdown. we will continue processing all streams
+ * we have, but no longer accept new ones. Report the max stream
+ * we have received and discard all new ones. */
+ }
+
+ session->local.accepting = 0;
+ session->local.shutdown = 1;
+ if (!session->c->aborted) {
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
+ session->local.accepted_max,
+ error, (uint8_t*)msg, msg? strlen(msg):0);
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03069), session,
+ "sent GOAWAY, err=%d, msg=%s"), error, msg? msg : "");
+ }
+ dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
+ return status;
+}
+
+static apr_status_t session_cleanup(h2_session *session, const char *trigger)
+{
+ conn_rec *c = session->c;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_SSSN_MSG(session, "pool_cleanup"));
+
+ if (session->state != H2_SESSION_ST_DONE
+ && session->state != H2_SESSION_ST_INIT) {
+ /* Not good. The connection is being torn down and we have
+ * not sent a goaway. This is considered a protocol error and
+ * the client has to assume that any streams "in flight" may have
+ * been processed and are not safe to retry.
+ * As clients with idle connection may only learn about a closed
+ * connection when sending the next request, this has the effect
+ * that at least this one request will fail.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
+ H2_SSSN_LOG(APLOGNO(03199), session,
+ "connection disappeared without proper "
+ "goodbye, clients will be confused, should not happen"));
+ }
+
+ transit(session, trigger, H2_SESSION_ST_CLEANUP);
+ h2_mplx_release_and_join(session->mplx, session->iowait);
+ session->mplx = NULL;
+
+ ap_assert(session->ngh2);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ h2_ctx_clear(c);
+
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t session_pool_cleanup(void *data)
+{
+ conn_rec *c = data;
+ h2_session *session;
+ h2_ctx *ctx = h2_ctx_get(c, 0);
+
+ if (ctx && (session = h2_ctx_session_get(ctx))) {
+ /* if the session is still there, now is the last chance
+ * to perform cleanup. Normally, cleanup should have happened
+ * earlier in the connection pre_close. Main reason is that
+ * any ongoing requests on slave connections might still access
+ * data which has, at this time, already been freed. An example
+ * is mod_ssl that uses request hooks. */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
+ H2_SSSN_LOG(APLOGNO(10020), session,
+ "session cleanup triggered by pool cleanup. "
+ "this should have happened earlier already."));
+ return session_cleanup(session, "pool cleanup");
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t h2_session_create_int(h2_session **psession,
+ conn_rec *c,
+ request_rec *r,
+ h2_ctx *ctx,
+ h2_workers *workers)
+{
+ nghttp2_session_callbacks *callbacks = NULL;
+ nghttp2_option *options = NULL;
+ apr_allocator_t *allocator;
+ apr_thread_mutex_t *mutex;
+ uint32_t n;
+ apr_pool_t *pool = NULL;
+ h2_session *session;
+ apr_status_t status;
+ int rv;
+
+ *psession = NULL;
+ status = apr_allocator_create(&allocator);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ apr_pool_create_ex(&pool, c->pool, NULL, allocator);
+ if (!pool) {
+ apr_allocator_destroy(allocator);
+ return APR_ENOMEM;
+ }
+ apr_pool_tag(pool, "h2_session");
+ apr_allocator_owner_set(allocator, pool);
+ status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+ apr_allocator_mutex_set(allocator, mutex);
+
+ session = apr_pcalloc(pool, sizeof(h2_session));
+ if (!session) {
+ return APR_ENOMEM;
+ }
+
+ *psession = session;
+ session->id = c->id;
+ session->c = c;
+ session->r = r;
+ session->s = h2_ctx_server_get(ctx);
+ session->pool = pool;
+ session->config = h2_config_sget(session->s);
+ session->workers = workers;
+
+ session->state = H2_SESSION_ST_INIT;
+ session->local.accepting = 1;
+ session->remote.accepting = 1;
+
+ session->max_stream_count = h2_config_geti(session->config,
+ H2_CONF_MAX_STREAMS);
+ session->max_stream_mem = h2_config_geti(session->config,
+ H2_CONF_STREAM_MAX_MEM);
+
+ status = apr_thread_cond_create(&session->iowait, session->pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(pool);
+ return status;
+ }
+
+ session->in_pending = h2_iq_create(session->pool, (int)session->max_stream_count);
+ if (session->in_pending == NULL) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+
+ session->in_process = h2_iq_create(session->pool, (int)session->max_stream_count);
+ if (session->in_process == NULL) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+
+ session->monitor = apr_pcalloc(pool, sizeof(h2_stream_monitor));
+ if (session->monitor == NULL) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+ session->monitor->ctx = session;
+ session->monitor->on_state_enter = on_stream_state_enter;
+ session->monitor->on_state_event = on_stream_state_event;
+ session->monitor->on_event = on_stream_event;
+
+ session->mplx = h2_mplx_create(c, session->pool, session->config,
+ workers);
+
+ /* connection input filter that feeds the session */
+ session->cin = h2_filter_cin_create(session);
+ ap_add_input_filter("H2_IN", session->cin, r, c);
+
+ h2_conn_io_init(&session->io, c, session->config);
+ session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
+
+ status = init_callbacks(c, &callbacks);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
+ "nghttp2: error in init_callbacks");
+ apr_pool_destroy(pool);
+ return status;
+ }
+
+ rv = nghttp2_option_new(&options);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02928) "nghttp2_option_new: %s",
+ nghttp2_strerror(rv));
+ apr_pool_destroy(pool);
+ return status;
+ }
+ nghttp2_option_set_peer_max_concurrent_streams(
+ options, (uint32_t)session->max_stream_count);
+ /* We need to handle window updates ourself, otherwise we
+ * get flooded by nghttp2. */
+ nghttp2_option_set_no_auto_window_update(options, 1);
+
+ rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
+ session, options);
+ nghttp2_session_callbacks_del(callbacks);
+ nghttp2_option_del(options);
+
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02929) "nghttp2_session_server_new: %s",
+ nghttp2_strerror(rv));
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+
+ n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
+ session->push_diary = h2_push_diary_create(session->pool, n);
+
+ if (APLOGcdebug(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ H2_SSSN_LOG(APLOGNO(03200), session,
+ "created, max_streams=%d, stream_mem=%d, "
+ "workers_limit=%d, workers_max=%d, "
+ "push_diary(type=%d,N=%d)"),
+ (int)session->max_stream_count,
+ (int)session->max_stream_mem,
+ session->mplx->limit_active,
+ session->mplx->max_active,
+ session->push_diary->dtype,
+ (int)session->push_diary->N);
+ }
+
+ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_session_create(h2_session **psession,
+ conn_rec *c, h2_ctx *ctx, h2_workers *workers)
+{
+ return h2_session_create_int(psession, c, NULL, ctx, workers);
+}
+
+apr_status_t h2_session_rcreate(h2_session **psession,
+ request_rec *r, h2_ctx *ctx, h2_workers *workers)
+{
+ return h2_session_create_int(psession, r->connection, r, ctx, workers);
+}
+
+static apr_status_t h2_session_start(h2_session *session, int *rv)
+{
+ apr_status_t status = APR_SUCCESS;
+ nghttp2_settings_entry settings[3];
+ size_t slen;
+ int win_size;
+
+ ap_assert(session);
+ /* Start the conversation by submitting our SETTINGS frame */
+ *rv = 0;
+ if (session->r) {
+ const char *s, *cs;
+ apr_size_t dlen;
+ h2_stream * stream;
+
+ /* 'h2c' mode: we should have a 'HTTP2-Settings' header with
+ * base64 encoded client settings. */
+ s = apr_table_get(session->r->headers_in, "HTTP2-Settings");
+ if (!s) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EINVAL, session->r,
+ APLOGNO(02931)
+ "HTTP2-Settings header missing in request");
+ return APR_EINVAL;
+ }
+ cs = NULL;
+ dlen = h2_util_base64url_decode(&cs, s, session->pool);
+
+ if (APLOGrdebug(session->r)) {
+ char buffer[128];
+ h2_util_hex_dump(buffer, 128, (char*)cs, dlen);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, APLOGNO(03070)
+ "upgrading h2c session with HTTP2-Settings: %s -> %s (%d)",
+ s, buffer, (int)dlen);
+ }
+
+ *rv = nghttp2_session_upgrade(session->ngh2, (uint8_t*)cs, dlen, NULL);
+ if (*rv != 0) {
+ status = APR_EINVAL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
+ APLOGNO(02932) "nghttp2_session_upgrade: %s",
+ nghttp2_strerror(*rv));
+ return status;
+ }
+
+ /* Now we need to auto-open stream 1 for the request we got. */
+ stream = h2_session_open_stream(session, 1, 0);
+ if (!stream) {
+ status = APR_EGENERAL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
+ APLOGNO(02933) "open stream 1: %s",
+ nghttp2_strerror(*rv));
+ return status;
+ }
+
+ status = h2_stream_set_request_rec(stream, session->r, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ slen = 0;
+ settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
+ settings[slen].value = (uint32_t)session->max_stream_count;
+ ++slen;
+ win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE);
+ if (win_size != H2_INITIAL_WINDOW_SIZE) {
+ settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[slen].value = win_size;
+ ++slen;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_SSSN_LOG(APLOGNO(03201), session,
+ "start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"),
+ (long)win_size, (int)session->max_stream_count);
+ *rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE,
+ settings, slen);
+ if (*rv != 0) {
+ status = APR_EGENERAL;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ H2_SSSN_LOG(APLOGNO(02935), session,
+ "nghttp2_submit_settings: %s"), nghttp2_strerror(*rv));
+ }
+ else {
+ /* use maximum possible value for connection window size. We are only
+ * interested in per stream flow control. which have the initial window
+ * size configured above.
+ * Therefore, for our use, the connection window can only get in the
+ * way. Example: if we allow 100 streams with a 32KB window each, we
+ * buffer up to 3.2 MB of data. Unless we do separate connection window
+ * interim updates, any smaller connection window will lead to blocking
+ * in DATA flow.
+ */
+ *rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE,
+ 0, NGHTTP2_MAX_WINDOW_SIZE - win_size);
+ if (*rv != 0) {
+ status = APR_EGENERAL;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ H2_SSSN_LOG(APLOGNO(02970), session,
+ "nghttp2_submit_window_update: %s"),
+ nghttp2_strerror(*rv));
+ }
+ }
+
+ return status;
+}
+
+static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
+ h2_headers *headers, apr_off_t len,
+ int eos);
+
+static ssize_t stream_data_cb(nghttp2_session *ng2s,
+ int32_t stream_id,
+ uint8_t *buf,
+ size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *puser)
+{
+ h2_session *session = (h2_session *)puser;
+ apr_off_t nread = length;
+ int eos = 0;
+ apr_status_t status;
+ h2_stream *stream;
+ ap_assert(session);
+
+ /* The session wants to send more DATA for the stream. We need
+ * to find out how much of the requested length we can send without
+ * blocking.
+ * Indicate EOS when we encounter it or DEFERRED if the stream
+ * should be suspended. Beware of trailers.
+ */
+
+ (void)ng2s;
+ (void)buf;
+ (void)source;
+ stream = h2_session_stream_get(session, stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
+ APLOGNO(02937)
+ "h2_stream(%ld-%d): data_cb, stream not found",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_out_prepare(stream, &nread, &eos, NULL);
+ if (nread) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "prepared no_copy, len=%ld, eos=%d"),
+ (long)nread, eos);
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
+ }
+
+ switch (status) {
+ case APR_SUCCESS:
+ break;
+
+ case APR_EOF:
+ eos = 1;
+ break;
+
+ case APR_ECONNRESET:
+ case APR_ECONNABORTED:
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+
+ case APR_EAGAIN:
+ /* If there is no data available, our session will automatically
+ * suspend this stream and not ask for more data until we resume
+ * it. Remember at our h2_stream that we need to do this.
+ */
+ nread = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03071), stream, "suspending"));
+ return NGHTTP2_ERR_DEFERRED;
+
+ default:
+ nread = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ H2_STRM_LOG(APLOGNO(02938), stream, "reading data"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (eos) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ return (ssize_t)nread;
+}
+
+struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
+ h2_push *push)
+{
+ h2_stream *stream;
+ h2_ngheader *ngh;
+ apr_status_t status;
+ int nid = 0;
+
+ status = h2_req_create_ngheader(&ngh, is->pool, push->req);
+ if (status == APR_SUCCESS) {
+ nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
+ ngh->nv, ngh->nvlen, NULL);
+ }
+ if (status != APR_SUCCESS || nid <= 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(03075), is,
+ "submitting push promise fail: %s"), nghttp2_strerror(nid));
+ return NULL;
+ }
+ ++session->pushes_promised;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03076), is, "SERVER_PUSH %d for %s %s on %d"),
+ nid, push->req->method, push->req->path, is->id);
+
+ stream = h2_session_open_stream(session, nid, is->id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03077), stream,
+ "failed to create stream obj %d"), nid);
+ /* kill the push_promise */
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
+ NGHTTP2_INTERNAL_ERROR);
+ return NULL;
+ }
+
+ h2_session_set_prio(session, stream, push->priority);
+ h2_stream_set_request(stream, push->req);
+ ++session->unsent_promises;
+ return stream;
+}
+
+static int valid_weight(float f)
+{
+ int w = (int)f;
+ return (w < NGHTTP2_MIN_WEIGHT? NGHTTP2_MIN_WEIGHT :
+ (w > NGHTTP2_MAX_WEIGHT)? NGHTTP2_MAX_WEIGHT : w);
+}
+
+apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
+ const h2_priority *prio)
+{
+ apr_status_t status = APR_SUCCESS;
+#ifdef H2_NG2_CHANGE_PRIO
+ nghttp2_stream *s_grandpa, *s_parent, *s;
+
+ if (prio == NULL) {
+ /* we treat this as a NOP */
+ return APR_SUCCESS;
+ }
+ s = nghttp2_session_find_stream(session->ngh2, stream->id);
+ if (!s) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "lookup of nghttp2_stream failed"));
+ return APR_EINVAL;
+ }
+
+ s_parent = nghttp2_stream_get_parent(s);
+ if (s_parent) {
+ nghttp2_priority_spec ps;
+ int id_parent, id_grandpa, w_parent, w;
+ int rv = 0;
+ const char *ptype = "AFTER";
+ h2_dependency dep = prio->dependency;
+
+ id_parent = nghttp2_stream_get_stream_id(s_parent);
+ s_grandpa = nghttp2_stream_get_parent(s_parent);
+ if (s_grandpa) {
+ id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
+ }
+ else {
+ /* parent of parent does not exist,
+ * only possible if parent == root */
+ dep = H2_DEPENDANT_AFTER;
+ }
+
+ switch (dep) {
+ case H2_DEPENDANT_INTERLEAVED:
+ /* PUSHed stream is to be interleaved with initiating stream.
+ * It is made a sibling of the initiating stream and gets a
+ * proportional weight [1, MAX_WEIGHT] of the initiaing
+ * stream weight.
+ */
+ ptype = "INTERLEAVED";
+ w_parent = nghttp2_stream_get_weight(s_parent);
+ w = valid_weight(w_parent * ((float)prio->weight / NGHTTP2_MAX_WEIGHT));
+ nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
+ break;
+
+ case H2_DEPENDANT_BEFORE:
+ /* PUSHed stream os to be sent BEFORE the initiating stream.
+ * It gets the same weight as the initiating stream, replaces
+ * that stream in the dependency tree and has the initiating
+ * stream as child.
+ */
+ ptype = "BEFORE";
+ w = w_parent = nghttp2_stream_get_weight(s_parent);
+ nghttp2_priority_spec_init(&ps, stream->id, w_parent, 0);
+ id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
+ rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps);
+ if (rv < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202)
+ "h2_stream(%ld-%d): PUSH BEFORE, weight=%d, "
+ "depends=%d, returned=%d",
+ session->id, id_parent, ps.weight, ps.stream_id, rv);
+ return APR_EGENERAL;
+ }
+ nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
+ break;
+
+ case H2_DEPENDANT_AFTER:
+ /* The PUSHed stream is to be sent after the initiating stream.
+ * Give if the specified weight and let it depend on the intiating
+ * stream.
+ */
+ /* fall through, it's the default */
+ default:
+ nghttp2_priority_spec_init(&ps, id_parent, valid_weight(prio->weight), 0);
+ break;
+ }
+
+
+ rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ""H2_STRM_LOG(APLOGNO(03203), stream,
+ "PUSH %s, weight=%d, depends=%d, returned=%d"),
+ ptype, ps.weight, ps.stream_id, rv);
+ status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
+ }
+#else
+ (void)session;
+ (void)stream;
+ (void)prio;
+ (void)valid_weight;
+#endif
+ return status;
+}
+
+int h2_session_push_enabled(h2_session *session)
+{
+ /* iff we can and they can and want */
+ return (session->remote.accepting /* remote GOAWAY received */
+ && h2_config_geti(session->config, H2_CONF_PUSH)
+ && nghttp2_session_get_remote_settings(session->ngh2,
+ NGHTTP2_SETTINGS_ENABLE_PUSH));
+}
+
+static apr_status_t h2_session_send(h2_session *session)
+{
+ apr_interval_time_t saved_timeout;
+ int rv;
+ apr_socket_t *socket;
+
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &saved_timeout);
+ apr_socket_timeout_set(socket, session->s->timeout);
+ }
+
+ rv = nghttp2_session_send(session->ngh2);
+
+ if (socket) {
+ apr_socket_timeout_set(socket, saved_timeout);
+ }
+ session->have_written = 1;
+ if (rv != 0 && rv != NGHTTP2_ERR_WOULDBLOCK) {
+ if (nghttp2_is_fatal(rv)) {
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
+ return APR_EGENERAL;
+ }
+ }
+
+ session->unsent_promises = 0;
+ session->unsent_submits = 0;
+
+ return APR_SUCCESS;
+}
+
+/**
+ * headers for the stream are ready.
+ */
+static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
+ h2_headers *headers, apr_off_t len,
+ int eos)
+{
+ apr_status_t status = APR_SUCCESS;
+ int rv = 0;
+
+ ap_assert(session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "on_headers"));
+ if (headers->status < 100) {
+ h2_stream_rst(stream, headers->status);
+ goto leave;
+ }
+ else if (stream->has_response) {
+ h2_ngheader *nh;
+
+ status = h2_res_create_ngtrailer(&nh, stream->pool, headers);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
+ (int)nh->nvlen);
+ if (status == APR_SUCCESS) {
+ rv = nghttp2_submit_trailer(session->ngh2, stream->id,
+ nh->nv, nh->nvlen);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ goto leave;
+ }
+ else {
+ nghttp2_data_provider provider, *pprovider = NULL;
+ h2_ngheader *ngh;
+ const char *note;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03073), stream, "submit response %d, REMOTE_WINDOW_SIZE=%u"),
+ headers->status,
+ (unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id));
+
+ if (!eos || len > 0) {
+ memset(&provider, 0, sizeof(provider));
+ provider.source.fd = stream->id;
+ provider.read_callback = stream_data_cb;
+ pprovider = &provider;
+ }
+
+ /* If this stream is not a pushed one itself,
+ * and HTTP/2 server push is enabled here,
+ * and the response HTTP status is not sth >= 400,
+ * and the remote side has pushing enabled,
+ * -> find and perform any pushes on this stream
+ * *before* we submit the stream response itself.
+ * This helps clients avoid opening new streams on Link
+ * headers that get pushed right afterwards.
+ *
+ * *) the response code is relevant, as we do not want to
+ * make pushes on 401 or 403 codes and friends.
+ * And if we see a 304, we do not push either
+ * as the client, having this resource in its cache, might
+ * also have the pushed ones as well.
+ */
+ if (!stream->initiated_on
+ && !stream->has_response
+ && stream->request && stream->request->method
+ && !strcmp("GET", stream->request->method)
+ && (headers->status < 400)
+ && (headers->status != 304)
+ && h2_session_push_enabled(session)) {
+
+ h2_stream_submit_pushes(stream, headers);
+ }
+
+ if (!stream->pref_priority) {
+ stream->pref_priority = h2_stream_get_priority(stream, headers);
+ }
+ h2_session_set_prio(session, stream, stream->pref_priority);
+
+ note = apr_table_get(headers->notes, H2_FILTER_DEBUG_NOTE);
+ if (note && !strcmp("on", note)) {
+ int32_t connFlowIn, connFlowOut;
+
+ connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
+ connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
+ headers = h2_headers_copy(stream->pool, headers);
+ apr_table_setn(headers->headers, "conn-flow-in",
+ apr_itoa(stream->pool, connFlowIn));
+ apr_table_setn(headers->headers, "conn-flow-out",
+ apr_itoa(stream->pool, connFlowOut));
+ }
+
+ if (headers->status == 103
+ && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) {
+ /* suppress sending this to the client, it might have triggered
+ * pushes and served its purpose nevertheless */
+ rv = 0;
+ goto leave;
+ }
+
+ status = h2_res_create_ngheader(&ngh, stream->pool, headers);
+ if (status == APR_SUCCESS) {
+ rv = nghttp2_submit_response(session->ngh2, stream->id,
+ ngh->nv, ngh->nvlen, pprovider);
+ stream->has_response = h2_headers_are_response(headers);
+ session->have_written = 1;
+
+ if (stream->initiated_on) {
+ ++session->pushes_submitted;
+ }
+ else {
+ ++session->responses_submitted;
+ }
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ }
+
+leave:
+ if (nghttp2_is_fatal(rv)) {
+ status = APR_EGENERAL;
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ APLOGNO(02940) "submit_response: %s",
+ nghttp2_strerror(rv));
+ }
+
+ ++session->unsent_submits;
+
+ /* Unsent push promises are written immediately, as nghttp2
+ * 1.5.0 realizes internal stream data structures only on
+ * send and we might need them for other submits.
+ * Also, to conserve memory, we send at least every 10 submits
+ * so that nghttp2 does not buffer all outbound items too
+ * long.
+ */
+ if (status == APR_SUCCESS
+ && (session->unsent_promises || session->unsent_submits > 10)) {
+ status = h2_session_send(session);
+ }
+ return status;
+}
+
+/**
+ * A stream was resumed as new response/output data arrived.
+ */
+static apr_status_t on_stream_resume(void *ctx, h2_stream *stream)
+{
+ h2_session *session = ctx;
+ apr_status_t status = APR_EAGAIN;
+ int rv;
+ apr_off_t len = 0;
+ int eos = 0;
+ h2_headers *headers;
+
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "on_resume"));
+
+send_headers:
+ headers = NULL;
+ status = h2_stream_out_prepare(stream, &len, &eos, &headers);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ H2_STRM_MSG(stream, "prepared len=%ld, eos=%d"),
+ (long)len, eos);
+ if (headers) {
+ status = on_stream_headers(session, stream, headers, len, eos);
+ if (status != APR_SUCCESS || stream->rst_error) {
+ return status;
+ }
+ goto send_headers;
+ }
+ else if (status != APR_EAGAIN) {
+ /* we have DATA to send */
+ if (!stream->has_response) {
+ /* but no response */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03466), stream,
+ "no response, RST_STREAM"));
+ h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
+ return APR_SUCCESS;
+ }
+ rv = nghttp2_session_resume_data(session->ngh2, stream->id);
+ session->have_written = 1;
+ ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
+ APLOG_ERR : APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(02936), stream, "resumed"));
+ }
+ return status;
+}
+
+static void h2_session_in_flush(h2_session *session)
+{
+ int id;
+
+ while ((id = h2_iq_shift(session->in_process)) > 0) {
+ h2_stream *stream = h2_session_stream_get(session, id);
+ if (stream) {
+ ap_assert(!stream->scheduled);
+ if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
+ h2_mplx_process(session->mplx, stream, stream_pri_cmp, session);
+ }
+ else {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ }
+ }
+ }
+
+ while ((id = h2_iq_shift(session->in_pending)) > 0) {
+ h2_stream *stream = h2_session_stream_get(session, id);
+ if (stream) {
+ h2_stream_flush_input(stream);
+ }
+ }
+}
+
+static apr_status_t session_read(h2_session *session, apr_size_t readlen, int block)
+{
+ apr_status_t status, rstatus = APR_EAGAIN;
+ conn_rec *c = session->c;
+ apr_off_t read_start = session->io.bytes_read;
+
+ while (1) {
+ /* H2_IN filter handles all incoming data against the session.
+ * We just pull at the filter chain to make it happen */
+ status = ap_get_brigade(c->input_filters,
+ session->bbtmp, AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ H2MAX(APR_BUCKET_BUFF_SIZE, readlen));
+ /* get rid of any possible data we do not expect to get */
+ apr_brigade_cleanup(session->bbtmp);
+
+ switch (status) {
+ case APR_SUCCESS:
+ /* successful read, reset our idle timers */
+ rstatus = APR_SUCCESS;
+ if (block) {
+ /* successful blocked read, try unblocked to
+ * get more. */
+ block = 0;
+ }
+ break;
+ case APR_EAGAIN:
+ return rstatus;
+ case APR_TIMEUP:
+ return status;
+ default:
+ if (session->io.bytes_read == read_start) {
+ /* first attempt failed */
+ if (APR_STATUS_IS_ETIMEDOUT(status)
+ || APR_STATUS_IS_ECONNABORTED(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_EBADF(status)) {
+ /* common status for a client that has left */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
+ H2_SSSN_MSG(session, "input gone"));
+ }
+ else {
+ /* uncommon status, log on INFO so that we see this */
+ ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(02950), session,
+ "error reading, terminating"));
+ }
+ return status;
+ }
+ /* subsequent failure after success(es), return initial
+ * status. */
+ return rstatus;
+ }
+ if ((session->io.bytes_read - read_start) > readlen) {
+ /* read enough in one go, give write a chance */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ H2_SSSN_MSG(session, "read enough, returning"));
+ break;
+ }
+ }
+ return rstatus;
+}
+
+static apr_status_t h2_session_read(h2_session *session, int block)
+{
+ apr_status_t status = session_read(session, session->max_stream_mem
+ * H2MAX(2, session->open_streams),
+ block);
+ h2_session_in_flush(session);
+ return status;
+}
+
+static const char *StateNames[] = {
+ "INIT", /* H2_SESSION_ST_INIT */
+ "DONE", /* H2_SESSION_ST_DONE */
+ "IDLE", /* H2_SESSION_ST_IDLE */
+ "BUSY", /* H2_SESSION_ST_BUSY */
+ "WAIT", /* H2_SESSION_ST_WAIT */
+ "CLEANUP", /* H2_SESSION_ST_CLEANUP */
+};
+
+const char *h2_session_state_str(h2_session_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static void update_child_status(h2_session *session, int status, const char *msg)
+{
+ /* Assume that we also change code/msg when something really happened and
+ * avoid updating the scoreboard in between */
+ if (session->last_status_code != status
+ || session->last_status_msg != msg) {
+ apr_snprintf(session->status, sizeof(session->status),
+ "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)",
+ msg? msg : "-",
+ (int)session->open_streams,
+ (int)session->remote.emitted_count,
+ (int)session->responses_submitted,
+ (int)session->pushes_submitted,
+ (int)session->pushes_reset + session->streams_reset);
+ ap_update_child_status_descr(session->c->sbh, status, session->status);
+ }
+}
+
+static void transit(h2_session *session, const char *action, h2_session_state nstate)
+{
+ apr_time_t timeout;
+ int ostate, loglvl;
+ const char *s;
+
+ if (session->state != nstate) {
+ ostate = session->state;
+ session->state = nstate;
+
+ loglvl = APLOG_DEBUG;
+ if ((ostate == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT)
+ || (ostate == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
+ loglvl = APLOG_TRACE1;
+ }
+ ap_log_cerror(APLOG_MARK, loglvl, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03078), session,
+ "transit [%s] -- %s --> [%s]"),
+ h2_session_state_str(ostate), action,
+ h2_session_state_str(nstate));
+
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (!session->remote.emitted_count) {
+ /* on fresh connections, with async mpm, do not return
+ * to mpm for a second. This gives the first request a better
+ * chance to arrive (und connection leaving IDLE state).
+ * If we return to mpm right away, this connection has the
+ * same chance of being cleaned up by the mpm as connections
+ * that already served requests - not fair. */
+ session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
+ s = "timeout";
+ timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
+ update_child_status(session, SERVER_BUSY_READ, "idle");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
+ (int)apr_time_sec(H2MAX(session->s->timeout, session->s->keep_alive_timeout)));
+ }
+ else if (session->open_streams) {
+ s = "timeout";
+ timeout = session->s->keep_alive_timeout;
+ update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
+ }
+ else {
+ /* normal keepalive setup */
+ s = "keepalive";
+ timeout = session->s->keep_alive_timeout;
+ update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
+ }
+ session->idle_until = apr_time_now() + timeout;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_SSSN_LOG("", session, "enter idle, %s = %d sec"),
+ s, (int)apr_time_sec(timeout));
+ break;
+ case H2_SESSION_ST_DONE:
+ update_child_status(session, SERVER_CLOSING, "done");
+ break;
+ default:
+ /* nop */
+ break;
+ }
+ }
+}
+
+static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ transit(session, "init", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
+{
+ cleanup_unprocessed_streams(session);
+ if (!session->remote.shutdown) {
+ update_child_status(session, SERVER_CLOSING, "local goaway");
+ }
+ transit(session, "local goaway", H2_SESSION_ST_DONE);
+}
+
+static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg)
+{
+ if (!session->remote.shutdown) {
+ session->remote.error = arg;
+ session->remote.accepting = 0;
+ session->remote.shutdown = 1;
+ cleanup_unprocessed_streams(session);
+ update_child_status(session, SERVER_CLOSING, "remote goaway");
+ transit(session, "remote goaway", H2_SESSION_ST_DONE);
+ }
+}
+
+static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_DONE:
+ /* just leave */
+ transit(session, "conn error", H2_SESSION_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03401), session,
+ "conn error -> shutdown"));
+ h2_session_shutdown(session, arg, msg, 0);
+ break;
+ }
+}
+
+static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
+{
+ if (!session->local.shutdown) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03402), session,
+ "proto error -> shutdown"));
+ h2_session_shutdown(session, arg, msg, 0);
+ }
+}
+
+static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg)
+{
+ transit(session, msg, H2_SESSION_ST_DONE);
+ if (!session->local.shutdown) {
+ h2_session_shutdown(session, arg, msg, 1);
+ }
+}
+
+static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ /* Nothing to READ, nothing to WRITE on the master connection.
+ * Possible causes:
+ * - we wait for the client to send us sth
+ * - we wait for started tasks to produce output
+ * - we have finished all streams and the client has sent GO_AWAY
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_SSSN_MSG(session, "NO_IO event, %d streams open"),
+ session->open_streams);
+ h2_conn_io_flush(&session->io);
+ if (session->open_streams > 0) {
+ if (h2_mplx_awaits_data(session->mplx)) {
+ /* waiting for at least one stream to produce data */
+ transit(session, "no io", H2_SESSION_ST_WAIT);
+ }
+ else {
+ /* we have streams open, and all are submitted and none
+ * is suspended. The only thing keeping us from WRITEing
+ * more must be the flow control.
+ * This means we only wait for WINDOW_UPDATE from the
+ * client and can block on READ. */
+ transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
+ /* Make sure we have flushed all previously written output
+ * so that the client will react. */
+ if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ return;
+ }
+ }
+ }
+ else if (session->local.accepting) {
+ /* When we have no streams, but accept new, switch to idle */
+ transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
+ }
+ else {
+ /* We are no longer accepting new streams and there are
+ * none left. Time to leave. */
+ h2_session_shutdown(session, arg, msg, 0);
+ transit(session, "no io", H2_SESSION_ST_DONE);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_frame_rcvd(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_WAIT:
+ transit(session, "frame received", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_stream_change(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_WAIT:
+ transit(session, "stream change", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_SESSION_ST_DONE);
+ break;
+ }
+}
+
+static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ /* nop */
+ break;
+ default:
+ h2_session_shutdown_notice(session);
+ break;
+ }
+}
+
+static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg)
+{
+ h2_session_shutdown(session, arg, msg, 1);
+}
+
+static void ev_stream_open(h2_session *session, h2_stream *stream)
+{
+ h2_iq_append(session->in_process, stream->id);
+}
+
+static void ev_stream_closed(h2_session *session, h2_stream *stream)
+{
+ apr_bucket *b;
+
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)
+ && (stream->id > session->local.completed_max)) {
+ session->local.completed_max = stream->id;
+ }
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ break;
+ default:
+ break;
+ }
+
+ /* The stream might have data in the buffers of the main connection.
+ * We can only free the allocated resources once all had been written.
+ * Send a special buckets on the connection that gets destroyed when
+ * all preceding data has been handled. On its destruction, it is safe
+ * to purge all resources of the stream. */
+ b = h2_bucket_eos_create(session->c->bucket_alloc, stream);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ h2_conn_io_pass(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+}
+
+static void on_stream_state_enter(void *ctx, h2_stream *stream)
+{
+ h2_session *session = ctx;
+ /* stream entered a new state */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "entered state"));
+ switch (stream->state) {
+ case H2_SS_IDLE: /* stream was created */
+ ++session->open_streams;
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
+ ++session->remote.emitted_count;
+ if (stream->id > session->remote.emitted_max) {
+ session->remote.emitted_max = stream->id;
+ session->local.accepted_max = stream->id;
+ }
+ }
+ else {
+ if (stream->id > session->local.emitted_max) {
+ ++session->local.emitted_count;
+ session->remote.emitted_max = stream->id;
+ }
+ }
+ break;
+ case H2_SS_OPEN: /* stream has request headers */
+ case H2_SS_RSVD_L: /* stream has request headers */
+ ev_stream_open(session, stream);
+ break;
+ case H2_SS_CLOSED_L: /* stream output was closed */
+ break;
+ case H2_SS_CLOSED_R: /* stream input was closed */
+ break;
+ case H2_SS_CLOSED: /* stream in+out were closed */
+ --session->open_streams;
+ ev_stream_closed(session, stream);
+ break;
+ case H2_SS_CLEANUP:
+ h2_mplx_stream_cleanup(session->mplx, stream);
+ break;
+ default:
+ break;
+ }
+ dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream state change");
+}
+
+static void on_stream_event(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev)
+{
+ h2_session *session = ctx;
+ switch (ev) {
+ case H2_SEV_IN_DATA_PENDING:
+ h2_iq_append(session->in_pending, stream->id);
+ break;
+ default:
+ /* NOP */
+ break;
+ }
+}
+
+static void on_stream_state_event(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev)
+{
+ h2_session *session = ctx;
+ switch (ev) {
+ case H2_SEV_CANCELLED:
+ if (session->state != H2_SESSION_ST_DONE) {
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, stream->rst_error);
+ }
+ break;
+ default:
+ /* NOP */
+ break;
+ }
+}
+
+static void dispatch_event(h2_session *session, h2_session_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_SESSION_EV_INIT:
+ h2_session_ev_init(session, arg, msg);
+ break;
+ case H2_SESSION_EV_LOCAL_GOAWAY:
+ h2_session_ev_local_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_REMOTE_GOAWAY:
+ h2_session_ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_ERROR:
+ h2_session_ev_conn_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PROTO_ERROR:
+ h2_session_ev_proto_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_TIMEOUT:
+ h2_session_ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NO_IO:
+ h2_session_ev_no_io(session, arg, msg);
+ break;
+ case H2_SESSION_EV_FRAME_RCVD:
+ h2_session_ev_frame_rcvd(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NGH2_DONE:
+ h2_session_ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_SESSION_EV_MPM_STOPPING:
+ h2_session_ev_mpm_stopping(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PRE_CLOSE:
+ h2_session_ev_pre_close(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_CHANGE:
+ h2_session_ev_stream_change(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_SSSN_MSG(session, "unknown event %d"), ev);
+ break;
+ }
+}
+
+/* trigger window updates, stream resumes and submits */
+static apr_status_t dispatch_master(h2_session *session) {
+ apr_status_t status;
+
+ status = h2_mplx_dispatch_master_events(session->mplx,
+ on_stream_resume, session);
+ if (status == APR_EAGAIN) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ H2_SSSN_MSG(session, "no master event available"));
+ }
+ else if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ H2_SSSN_MSG(session, "dispatch error"));
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "dispatch error");
+ }
+ return status;
+}
+
+static const int MAX_WAIT_MICROS = 200 * 1000;
+
+apr_status_t h2_session_process(h2_session *session, int async)
+{
+ apr_status_t status = APR_SUCCESS;
+ conn_rec *c = session->c;
+ int rv, mpm_state, trace = APLOGctrace3(c);
+ apr_time_t now;
+
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session, "process start, async=%d"), async);
+ }
+
+ while (session->state != H2_SESSION_ST_DONE) {
+ now = apr_time_now();
+ session->have_read = session->have_written = 0;
+
+ if (session->local.accepting
+ && !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
+ }
+ }
+
+ session->status[0] = '\0';
+
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ if (!h2_is_acceptable_connection(c, 1)) {
+ update_child_status(session, SERVER_BUSY_READ,
+ "inadequate security");
+ h2_session_shutdown(session,
+ NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
+ }
+ else {
+ update_child_status(session, SERVER_BUSY_READ, "init");
+ status = h2_session_start(session, &rv);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03079), session,
+ "started on %s:%d"),
+ session->s->server_hostname,
+ c->local_addr->port);
+ if (status != APR_SUCCESS) {
+ dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_IDLE:
+ if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
+ H2_SSSN_MSG(session, "idle, timeout reached, closing"));
+ if (session->idle_delay) {
+ apr_table_setn(session->c->notes, "short-lingering-close", "1");
+ }
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
+ goto out;
+ }
+
+ if (session->idle_delay) {
+ /* we are less interested in spending time on this connection */
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, status, c,
+ H2_SSSN_MSG(session, "session is idle (%ld ms), idle wait %ld sec left"),
+ (long)apr_time_as_msec(session->idle_delay),
+ (long)apr_time_sec(session->idle_until - now));
+ apr_sleep(session->idle_delay);
+ session->idle_delay = 0;
+ }
+
+ h2_conn_io_flush(&session->io);
+ if (async && !session->r && (now > session->idle_sync_until)) {
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "nonblock read, %d streams open"),
+ session->open_streams);
+ }
+ status = h2_session_read(session, 0);
+
+ if (status == APR_SUCCESS) {
+ session->have_read = 1;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
+ status = APR_EAGAIN;
+ goto out;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03403), session,
+ "no data, error"));
+ dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, 0, "timeout");
+ }
+ }
+ else {
+ /* make certain, we send everything before we idle */
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "sync, stutter 1-sec, %d streams open"),
+ session->open_streams);
+ }
+ /* We wait in smaller increments, using a 1 second timeout.
+ * That gives us the chance to check for MPMQ_STOPPING often.
+ */
+ status = h2_mplx_idle(session->mplx);
+ if (status == APR_EAGAIN) {
+ break;
+ }
+ else if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
+ }
+ h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1));
+ status = h2_session_read(session, 1);
+ if (status == APR_SUCCESS) {
+ session->have_read = 1;
+ }
+ else if (status == APR_EAGAIN) {
+ /* nothing to read */
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* continue reading handling */
+ }
+ else if (APR_STATUS_IS_ECONNABORTED(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_EBADF(status)) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session, "input gone"));
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ else {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "(1 sec timeout) read failed"));
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
+ }
+ }
+ if (nghttp2_session_want_write(session->ngh2)) {
+ ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
+ status = h2_session_send(session);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
+ if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "writing");
+ break;
+ }
+ }
+ break;
+
+ case H2_SESSION_ST_BUSY:
+ if (nghttp2_session_want_read(session->ngh2)) {
+ ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL);
+ h2_filter_cin_timeout_set(session->cin, session->s->timeout);
+ status = h2_session_read(session, 0);
+ if (status == APR_SUCCESS) {
+ session->have_read = 1;
+ }
+ else if (status == APR_EAGAIN) {
+ /* nothing to read */
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
+ break;
+ }
+ else {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ }
+
+ status = dispatch_master(session);
+ if (status != APR_SUCCESS && status != APR_EAGAIN) {
+ break;
+ }
+
+ if (nghttp2_session_want_write(session->ngh2)) {
+ ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
+ status = h2_session_send(session);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
+ if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "writing");
+ break;
+ }
+ }
+
+ if (session->have_read || session->have_written) {
+ if (session->wait_us) {
+ session->wait_us = 0;
+ }
+ }
+ else if (!nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_WAIT:
+ if (session->wait_us <= 0) {
+ session->wait_us = 10;
+ if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ break;
+ }
+ }
+ else {
+ /* repeating, increase timer for graceful backoff */
+ session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
+ }
+
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
+ "h2_session: wait for data, %ld micros",
+ (long)session->wait_us);
+ }
+ status = h2_mplx_out_trywait(session->mplx, session->wait_us,
+ session->iowait);
+ if (status == APR_SUCCESS) {
+ session->wait_us = 0;
+ dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, NULL);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* go back to checking all inputs again */
+ transit(session, "wait cycle", session->local.shutdown?
+ H2_SESSION_ST_DONE : H2_SESSION_ST_BUSY);
+ }
+ else if (APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
+ H2_SSSN_LOG(APLOGNO(03404), session,
+ "waiting on conditional"));
+ h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR,
+ "cond wait error", 0);
+ }
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ H2_SSSN_LOG(APLOGNO(03080), session,
+ "unknown state"));
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
+ break;
+ }
+
+ if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
+ }
+ if (session->reprioritize) {
+ h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
+ session->reprioritize = 0;
+ }
+ }
+
+out:
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session, "process returns"));
+ }
+
+ if ((session->state != H2_SESSION_ST_DONE)
+ && (APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status))) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+
+ return (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
+}
+
+apr_status_t h2_session_pre_close(h2_session *session, int async)
+{
+ apr_status_t status;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_SSSN_MSG(session, "pre_close"));
+ dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0,
+ (session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL);
+ status = session_cleanup(session, "pre_close");
+ if (status == APR_SUCCESS) {
+ /* no one should hold a reference to this session any longer and
+ * the h2_ctx was removed from the connection.
+ * Take the pool (and thus all subpools etc. down now, instead of
+ * during cleanup of main connection pool. */
+ apr_pool_destroy(session->pool);
+ }
+ return status;
+}
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
new file mode 100644
index 0000000..df2a862
--- /dev/null
+++ b/modules/http2/h2_session.h
@@ -0,0 +1,228 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_session__
+#define __mod_h2__h2_session__
+
+#include "h2_conn_io.h"
+
+/**
+ * A HTTP/2 connection, a session with a specific client.
+ *
+ * h2_session sits on top of a httpd conn_rec* instance and takes complete
+ * control of the connection data. It receives protocol frames from the
+ * client. For new HTTP/2 streams it creates h2_task(s) that are sent
+ * via callback to a dispatcher (see h2_conn.c).
+ * h2_session keeps h2_io's for each ongoing stream which buffer the
+ * payload for that stream.
+ *
+ * New incoming HEADER frames are converted into a h2_stream+h2_task instance
+ * that both represent a HTTP/2 stream, but may have separate lifetimes. This
+ * allows h2_task to be scheduled in other threads without semaphores
+ * all over the place. It allows task memory to be freed independent of
+ * session lifetime and sessions may close down while tasks are still running.
+ *
+ *
+ */
+
+#include "h2.h"
+
+struct apr_thread_mutext_t;
+struct apr_thread_cond_t;
+struct h2_ctx;
+struct h2_config;
+struct h2_filter_cin;
+struct h2_ihash_t;
+struct h2_mplx;
+struct h2_priority;
+struct h2_push;
+struct h2_push_diary;
+struct h2_session;
+struct h2_stream;
+struct h2_stream_monitor;
+struct h2_task;
+struct h2_workers;
+
+struct nghttp2_session;
+
+typedef enum {
+ H2_SESSION_EV_INIT, /* session was initialized */
+ H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_SESSION_EV_CONN_ERROR, /* connection error */
+ H2_SESSION_EV_PROTO_ERROR, /* protocol error */
+ H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_SESSION_EV_NO_IO, /* nothing has been read or written */
+ H2_SESSION_EV_FRAME_RCVD, /* a frame has been received */
+ H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
+ H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
+ H2_SESSION_EV_STREAM_CHANGE, /* a stream (state/input/output) changed */
+} h2_session_event_t;
+
+typedef struct h2_session {
+ long id; /* identifier of this session, unique
+ * inside a httpd process */
+ conn_rec *c; /* the connection this session serves */
+ request_rec *r; /* the request that started this in case
+ * of 'h2c', NULL otherwise */
+ server_rec *s; /* server/vhost we're starting on */
+ const struct h2_config *config; /* Relevant config for this session */
+ apr_pool_t *pool; /* pool to use in session */
+ struct h2_mplx *mplx; /* multiplexer for stream data */
+ struct h2_workers *workers; /* for executing stream tasks */
+ struct h2_filter_cin *cin; /* connection input filter context */
+ h2_conn_io io; /* io on httpd conn filters */
+ struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
+
+ h2_session_state state; /* state session is in */
+
+ h2_session_props local; /* properties of local session */
+ h2_session_props remote; /* properites of remote session */
+
+ unsigned int reprioritize : 1; /* scheduled streams priority changed */
+ unsigned int flush : 1; /* flushing output necessary */
+ unsigned int have_read : 1; /* session has read client data */
+ unsigned int have_written : 1; /* session did write data to client */
+ apr_interval_time_t wait_us; /* timeout during BUSY_WAIT state, micro secs */
+
+ struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
+
+ struct h2_stream_monitor *monitor;/* monitor callbacks for streams */
+ int open_streams; /* number of client streams open */
+ int unsent_submits; /* number of submitted, but not yet written responses. */
+ int unsent_promises; /* number of submitted, but not yet written push promises */
+
+ int responses_submitted; /* number of http/2 responses submitted */
+ int streams_reset; /* number of http/2 streams reset by client */
+ int pushes_promised; /* number of http/2 push promises submitted */
+ int pushes_submitted; /* number of http/2 pushed responses submitted */
+ int pushes_reset; /* number of http/2 pushed reset by client */
+
+ apr_size_t frames_received; /* number of http/2 frames received */
+ apr_size_t frames_sent; /* number of http/2 frames sent */
+
+ apr_size_t max_stream_count; /* max number of open streams */
+ apr_size_t max_stream_mem; /* max buffer memory for a single stream */
+
+ apr_time_t idle_until; /* Time we shut down due to sheer boredom */
+ apr_time_t idle_sync_until; /* Time we sync wait until keepalive handling kicks in */
+ apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */
+ apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */
+
+ apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
+ struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */
+
+ char status[64]; /* status message for scoreboard */
+ int last_status_code; /* the one already reported */
+ const char *last_status_msg; /* the one already reported */
+
+ struct h2_iqueue *in_pending; /* all streams with input pending */
+ struct h2_iqueue *in_process; /* all streams ready for processing on slave */
+
+} h2_session;
+
+const char *h2_session_state_str(h2_session_state state);
+
+/**
+ * Create a new h2_session for the given connection.
+ * The session will apply the configured parameter.
+ * @param psession pointer receiving the created session on success or NULL
+ * @param c the connection to work on
+ * @param cfg the module config to apply
+ * @param workers the worker pool to use
+ * @return the created session
+ */
+apr_status_t h2_session_create(h2_session **psession,
+ conn_rec *c, struct h2_ctx *ctx,
+ struct h2_workers *workers);
+
+/**
+ * Create a new h2_session for the given request.
+ * The session will apply the configured parameter.
+ * @param psession pointer receiving the created session on success or NULL
+ * @param r the request that was upgraded
+ * @param cfg the module config to apply
+ * @param workers the worker pool to use
+ * @return the created session
+ */
+apr_status_t h2_session_rcreate(h2_session **psession,
+ request_rec *r, struct h2_ctx *ctx,
+ struct h2_workers *workers);
+
+void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
+
+/**
+ * Process the given HTTP/2 session until it is ended or a fatal
+ * error occurred.
+ *
+ * @param session the sessionm to process
+ */
+apr_status_t h2_session_process(h2_session *session, int async);
+
+/**
+ * Last chance to do anything before the connection is closed.
+ */
+apr_status_t h2_session_pre_close(h2_session *session, int async);
+
+/**
+ * Called when a serious error occurred and the session needs to terminate
+ * without further connection io.
+ * @param session the session to abort
+ * @param reason the apache status that caused the abort
+ */
+void h2_session_abort(h2_session *session, apr_status_t reason);
+
+/**
+ * Close and deallocate the given session.
+ */
+void h2_session_close(h2_session *session);
+
+/**
+ * Returns if client settings have push enabled.
+ * @param != 0 iff push is enabled in client settings
+ */
+int h2_session_push_enabled(h2_session *session);
+
+/**
+ * Look up the stream in this session with the given id.
+ */
+struct h2_stream *h2_session_stream_get(h2_session *session, int stream_id);
+
+/**
+ * Submit a push promise on the stream and schedule the new steam for
+ * processing..
+ *
+ * @param session the session to work in
+ * @param is the stream initiating the push
+ * @param push the push to promise
+ * @return the new promised stream or NULL
+ */
+struct h2_stream *h2_session_push(h2_session *session,
+ struct h2_stream *is, struct h2_push *push);
+
+apr_status_t h2_session_set_prio(h2_session *session,
+ struct h2_stream *stream,
+ const struct h2_priority *prio);
+
+#define H2_SSSN_MSG(s, msg) \
+ "h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \
+ s->open_streams
+
+#define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg)
+
+#endif /* defined(__mod_h2__h2_session__) */
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
new file mode 100644
index 0000000..24ebc56
--- /dev/null
+++ b/modules/http2/h2_stream.c
@@ -0,0 +1,1078 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_conn.h"
+#include "h2_config.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_ctx.h"
+#include "h2_task.h"
+#include "h2_util.h"
+
+
+static const char *h2_ss_str(h2_stream_state_t state)
+{
+ switch (state) {
+ case H2_SS_IDLE:
+ return "IDLE";
+ case H2_SS_RSVD_L:
+ return "RESERVED_LOCAL";
+ case H2_SS_RSVD_R:
+ return "RESERVED_REMOTE";
+ case H2_SS_OPEN:
+ return "OPEN";
+ case H2_SS_CLOSED_L:
+ return "HALF_CLOSED_LOCAL";
+ case H2_SS_CLOSED_R:
+ return "HALF_CLOSED_REMOTE";
+ case H2_SS_CLOSED:
+ return "CLOSED";
+ case H2_SS_CLEANUP:
+ return "CLEANUP";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char *h2_stream_state_str(h2_stream *stream)
+{
+ return h2_ss_str(stream->state);
+}
+
+/* Abbreviations for stream transit tables */
+#define S_XXX (-2) /* Programming Error */
+#define S_ERR (-1) /* Protocol Error */
+#define S_NOP (0) /* No Change */
+#define S_IDL (H2_SS_IDL + 1)
+#define S_RS_L (H2_SS_RSVD_L + 1)
+#define S_RS_R (H2_SS_RSVD_R + 1)
+#define S_OPEN (H2_SS_OPEN + 1)
+#define S_CL_L (H2_SS_CLOSED_L + 1)
+#define S_CL_R (H2_SS_CLOSED_R + 1)
+#define S_CLS (H2_SS_CLOSED + 1)
+#define S_CLN (H2_SS_CLEANUP + 1)
+
+/* state transisitions when certain frame types are sent */
+static int trans_on_send[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_ERR, S_ERR, S_ERR, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* DATA */
+{ S_ERR, S_ERR, S_CL_R, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* HEADERS */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */
+{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */
+{ S_RS_L,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */
+};
+/* state transisitions when certain frame types are received */
+static int trans_on_recv[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_ERR, S_ERR, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* DATA */
+{ S_OPEN,S_CL_L, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* HEADERS */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */
+{ S_ERR, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */
+{ S_RS_R,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */
+};
+/* state transisitions when certain events happen */
+static int trans_on_event[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_XXX, S_ERR, S_ERR, S_CL_L, S_CLS, S_XXX, S_XXX, S_XXX, },/* EV_CLOSED_L*/
+{ S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/
+{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/
+{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/
+};
+
+static int on_map(h2_stream_state_t state, int map[H2_SS_MAX])
+{
+ int op = map[state];
+ switch (op) {
+ case S_XXX:
+ case S_ERR:
+ return op;
+ case S_NOP:
+ return state;
+ default:
+ return op-1;
+ }
+}
+
+static int on_frame(h2_stream_state_t state, int frame_type,
+ int frame_map[][H2_SS_MAX], apr_size_t maxlen)
+{
+ ap_assert(frame_type >= 0);
+ ap_assert(state >= 0);
+ if (frame_type >= maxlen) {
+ return state; /* NOP, ignore unknown frame types */
+ }
+ return on_map(state, frame_map[frame_type]);
+}
+
+static int on_frame_send(h2_stream_state_t state, int frame_type)
+{
+ return on_frame(state, frame_type, trans_on_send, H2_ALEN(trans_on_send));
+}
+
+static int on_frame_recv(h2_stream_state_t state, int frame_type)
+{
+ return on_frame(state, frame_type, trans_on_recv, H2_ALEN(trans_on_recv));
+}
+
+static int on_event(h2_stream* stream, h2_stream_event_t ev)
+{
+ if (stream->monitor && stream->monitor->on_event) {
+ stream->monitor->on_event(stream->monitor->ctx, stream, ev);
+ }
+ if (ev < H2_ALEN(trans_on_event)) {
+ return on_map(stream->state, trans_on_event[ev]);
+ }
+ return stream->state;
+}
+
+static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
+{
+ if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
+ conn_rec *c = s->session->c;
+ char buffer[4 * 1024];
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
+
+ len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c,
+ H2_STRM_MSG(s, "out-buffer(%s)"), len? buffer : "empty");
+ }
+}
+
+static apr_status_t setup_input(h2_stream *stream) {
+ if (stream->input == NULL) {
+ int empty = (stream->input_eof
+ && (!stream->in_buffer
+ || APR_BRIGADE_EMPTY(stream->in_buffer)));
+ if (!empty) {
+ h2_beam_create(&stream->input, stream->pool, stream->id,
+ "input", H2_BEAM_OWNER_SEND, 0,
+ stream->session->s->timeout);
+ h2_beam_send_from(stream->input, stream->pool);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t close_input(h2_stream *stream)
+{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+
+ stream->input_eof = 1;
+ if (stream->input && h2_beam_is_closed(stream->input)) {
+ return APR_SUCCESS;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "closing input"));
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+
+ if (stream->trailers && !apr_is_empty_table(stream->trailers)) {
+ apr_bucket *b;
+ h2_headers *r;
+
+ if (!stream->in_buffer) {
+ stream->in_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+
+ r = h2_headers_create(HTTP_OK, stream->trailers, NULL,
+ stream->in_trailer_octets, stream->pool);
+ stream->trailers = NULL;
+ b = h2_bucket_headers_create(c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
+
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "added trailers"));
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+ }
+ if (stream->input) {
+ h2_stream_flush_input(stream);
+ return h2_beam_close(stream->input);
+ }
+ return status;
+}
+
+static apr_status_t close_output(h2_stream *stream)
+{
+ if (!stream->output || h2_beam_is_closed(stream->output)) {
+ return APR_SUCCESS;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "closing output"));
+ return h2_beam_leave(stream->output);
+}
+
+static void on_state_enter(h2_stream *stream)
+{
+ if (stream->monitor && stream->monitor->on_state_enter) {
+ stream->monitor->on_state_enter(stream->monitor->ctx, stream);
+ }
+}
+
+static void on_state_event(h2_stream *stream, h2_stream_event_t ev)
+{
+ if (stream->monitor && stream->monitor->on_state_event) {
+ stream->monitor->on_state_event(stream->monitor->ctx, stream, ev);
+ }
+}
+
+static void on_state_invalid(h2_stream *stream)
+{
+ if (stream->monitor && stream->monitor->on_state_invalid) {
+ stream->monitor->on_state_invalid(stream->monitor->ctx, stream);
+ }
+ /* stream got an event/frame invalid in its state */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "invalid state event"));
+ switch (stream->state) {
+ case H2_SS_OPEN:
+ case H2_SS_RSVD_L:
+ case H2_SS_RSVD_R:
+ case H2_SS_CLOSED_L:
+ case H2_SS_CLOSED_R:
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ break;
+ default:
+ break;
+ }
+}
+
+static apr_status_t transit(h2_stream *stream, int new_state)
+{
+ if (new_state == stream->state) {
+ return APR_SUCCESS;
+ }
+ else if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ H2_STRM_LOG(APLOGNO(03081), stream, "invalid transition"));
+ on_state_invalid(stream);
+ return APR_EINVAL;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "transit to [%s]"), h2_ss_str(new_state));
+ stream->state = new_state;
+ switch (new_state) {
+ case H2_SS_IDLE:
+ break;
+ case H2_SS_RSVD_L:
+ close_input(stream);
+ break;
+ case H2_SS_RSVD_R:
+ break;
+ case H2_SS_OPEN:
+ break;
+ case H2_SS_CLOSED_L:
+ close_output(stream);
+ break;
+ case H2_SS_CLOSED_R:
+ close_input(stream);
+ break;
+ case H2_SS_CLOSED:
+ close_input(stream);
+ close_output(stream);
+ if (stream->out_buffer) {
+ apr_brigade_cleanup(stream->out_buffer);
+ }
+ break;
+ case H2_SS_CLEANUP:
+ break;
+ }
+ on_state_enter(stream);
+ return APR_SUCCESS;
+}
+
+void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor)
+{
+ stream->monitor = monitor;
+}
+
+void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
+{
+ int new_state;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "dispatch event %d"), ev);
+ new_state = on_event(stream, ev);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ H2_STRM_LOG(APLOGNO(10002), stream, "invalid event %d"), ev);
+ on_state_invalid(stream);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return;
+ }
+ else if (new_state == stream->state) {
+ /* nop */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "non-state event %d"), ev);
+ return;
+ }
+ else {
+ on_state_event(stream, ev);
+ transit(stream, new_state);
+ }
+}
+
+static void set_policy_for(h2_stream *stream, h2_request *r)
+{
+ int enabled = h2_session_push_enabled(stream->session);
+ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool,
+ enabled);
+ r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS);
+}
+
+apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
+{
+ apr_status_t status = APR_SUCCESS;
+ int new_state, eos = 0;
+
+ new_state = on_frame_send(stream->state, ftype);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "invalid frame %d send"), ftype);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return transit(stream, new_state);
+ }
+
+ ++stream->out_frames;
+ stream->out_frame_octets += frame_len;
+ switch (ftype) {
+ case NGHTTP2_DATA:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_HEADERS:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_PUSH_PROMISE:
+ /* start pushed stream */
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp != NULL);
+ status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ set_policy_for(stream, stream->rtmp);
+ stream->request = stream->rtmp;
+ stream->rtmp = NULL;
+ break;
+
+ default:
+ break;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "send frame %d, eos=%d"), ftype, eos);
+ status = transit(stream, new_state);
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
+ }
+ return status;
+}
+
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
+{
+ apr_status_t status = APR_SUCCESS;
+ int new_state, eos = 0;
+
+ new_state = on_frame_recv(stream->state, ftype);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "invalid frame %d recv"), ftype);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return transit(stream, new_state);
+ }
+
+ switch (ftype) {
+ case NGHTTP2_DATA:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_HEADERS:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ if (stream->state == H2_SS_OPEN) {
+ /* trailer HEADER */
+ if (!eos) {
+ h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
+ }
+ stream->in_trailer_octets += frame_len;
+ }
+ else {
+ /* request HEADER */
+ ap_assert(stream->request == NULL);
+ if (stream->rtmp == NULL) {
+ /* This can only happen, if the stream has received no header
+ * name/value pairs at all. The lastest nghttp2 version have become
+ * pretty good at detecting this early. In any case, we have
+ * to abort the connection here, since this is clearly a protocol error */
+ return APR_EINVAL;
+ }
+ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ set_policy_for(stream, stream->rtmp);
+ stream->request = stream->rtmp;
+ stream->rtmp = NULL;
+ }
+ break;
+
+ default:
+ break;
+ }
+ status = transit(stream, new_state);
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
+ }
+ return status;
+}
+
+apr_status_t h2_stream_flush_input(h2_stream *stream)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (stream->in_buffer && !APR_BRIGADE_EMPTY(stream->in_buffer)) {
+ setup_input(stream);
+ status = h2_beam_send(stream->input, stream->in_buffer, APR_BLOCK_READ);
+ stream->in_last_write = apr_time_now();
+ }
+ if (stream->input_eof
+ && stream->input && !h2_beam_is_closed(stream->input)) {
+ status = h2_beam_close(stream->input);
+ }
+ return status;
+}
+
+apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
+ const uint8_t *data, size_t len)
+{
+ h2_session *session = stream->session;
+ apr_status_t status = APR_SUCCESS;
+
+ stream->in_data_frames++;
+ if (len > 0) {
+ if (APLOGctrace3(session->c)) {
+ const char *load = apr_pstrndup(stream->pool, (const char *)data, len);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c,
+ H2_STRM_MSG(stream, "recv DATA, len=%d: -->%s<--"),
+ (int)len, load);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ H2_STRM_MSG(stream, "recv DATA, len=%d"), (int)len);
+ }
+ stream->in_data_octets += len;
+ if (!stream->in_buffer) {
+ stream->in_buffer = apr_brigade_create(stream->pool,
+ session->c->bucket_alloc);
+ }
+ apr_brigade_write(stream->in_buffer, NULL, NULL, (const char *)data, len);
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+ }
+ return status;
+}
+
+static void prep_output(h2_stream *stream) {
+ conn_rec *c = stream->session->c;
+ if (!stream->out_buffer) {
+ stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+}
+
+h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
+ h2_stream_monitor *monitor, int initiated_on)
+{
+ h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
+
+ stream->id = id;
+ stream->initiated_on = initiated_on;
+ stream->created = apr_time_now();
+ stream->state = H2_SS_IDLE;
+ stream->pool = pool;
+ stream->session = session;
+ stream->monitor = monitor;
+ stream->max_mem = session->max_stream_mem;
+
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ stream->in_window_size =
+ nghttp2_session_get_stream_local_window_size(
+ stream->session->ngh2, stream->id);
+#endif
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03082), stream, "created"));
+ on_state_enter(stream);
+ return stream;
+}
+
+void h2_stream_cleanup(h2_stream *stream)
+{
+ apr_status_t status;
+
+ ap_assert(stream);
+ if (stream->out_buffer) {
+ /* remove any left over output buckets that may still have
+ * references into request pools */
+ apr_brigade_cleanup(stream->out_buffer);
+ }
+ if (stream->input) {
+ h2_beam_abort(stream->input);
+ status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ);
+ if (status == APR_EAGAIN) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "wait on input drain"));
+ status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
+ H2_STRM_MSG(stream, "input drain returned"));
+ }
+ }
+}
+
+void h2_stream_destroy(h2_stream *stream)
+{
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
+ H2_STRM_MSG(stream, "destroy"));
+ apr_pool_destroy(stream->pool);
+}
+
+apr_status_t h2_stream_prep_processing(h2_stream *stream)
+{
+ if (stream->request) {
+ const h2_request *r = stream->request;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "schedule %s %s://%s%s chunked=%d"),
+ r->method, r->scheme, r->authority, r->path, r->chunked);
+ setup_input(stream);
+ stream->scheduled = 1;
+ return APR_SUCCESS;
+ }
+ return APR_EINVAL;
+}
+
+void h2_stream_rst(h2_stream *stream, int error_code)
+{
+ stream->rst_error = error_code;
+ if (stream->input) {
+ h2_beam_abort(stream->input);
+ }
+ if (stream->output) {
+ h2_beam_leave(stream->output);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "reset, error=%d"), error_code);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
+}
+
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+ request_rec *r, int eos)
+{
+ h2_request *req;
+ apr_status_t status;
+
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp == NULL);
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+ status = h2_request_rcreate(&req, stream->pool, r);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r,
+ H2_STRM_LOG(APLOGNO(03058), stream,
+ "set_request_rec %s host=%s://%s%s"),
+ req->method, req->scheme, req->authority, req->path);
+ stream->rtmp = req;
+ /* simulate the frames that led to this */
+ return h2_stream_recv_frame(stream, NGHTTP2_HEADERS,
+ NGHTTP2_FLAG_END_STREAM, 0);
+ }
+ return status;
+}
+
+void h2_stream_set_request(h2_stream *stream, const h2_request *r)
+{
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp == NULL);
+ stream->rtmp = h2_request_clone(stream->pool, r);
+}
+
+static void set_error_response(h2_stream *stream, int http_status)
+{
+ if (!h2_stream_is_ready(stream)) {
+ conn_rec *c = stream->session->c;
+ apr_bucket *b;
+ h2_headers *response;
+
+ response = h2_headers_die(http_status, stream->request, stream->pool);
+ prep_output(stream);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
+ b = h2_bucket_headers_create(c->bucket_alloc, response);
+ APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
+ }
+}
+
+static apr_status_t add_trailer(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ conn_rec *c = stream->session->c;
+ char *hname, *hvalue;
+
+ if (nlen == 0 || name[0] == ':') {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c,
+ H2_STRM_LOG(APLOGNO(03060), stream,
+ "pseudo header in trailer"));
+ return APR_EINVAL;
+ }
+ if (h2_req_ignore_trailer(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ if (!stream->trailers) {
+ stream->trailers = apr_table_make(stream->pool, 5);
+ }
+ hname = apr_pstrndup(stream->pool, name, nlen);
+ hvalue = apr_pstrndup(stream->pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(stream->trailers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ h2_session *session = stream->session;
+ int error = 0;
+ apr_status_t status;
+
+ if (stream->has_response) {
+ return APR_EINVAL;
+ }
+ ++stream->request_headers_added;
+ if (name[0] == ':') {
+ if ((vlen) > session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "pseudo %s too long"), name);
+ error = HTTP_REQUEST_URI_TOO_LARGE;
+ }
+ }
+ else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
+ /* header too long */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "header %s too long"), name);
+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+ }
+
+ if (stream->request_headers_added > session->s->limit_req_fields + 4) {
+ /* too many header lines, include 4 pseudo headers */
+ if (stream->request_headers_added
+ > session->s->limit_req_fields + 4 + 100) {
+ /* yeah, right */
+ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
+ return APR_ECONNRESET;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "too many header lines"));
+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+ }
+
+ if (error) {
+ set_error_response(stream, error);
+ return APR_EINVAL;
+ }
+ else if (H2_SS_IDLE == stream->state) {
+ if (!stream->rtmp) {
+ stream->rtmp = h2_req_create(stream->id, stream->pool,
+ NULL, NULL, NULL, NULL, NULL, 0);
+ }
+ status = h2_request_add_header(stream->rtmp, stream->pool,
+ name, nlen, value, vlen);
+ }
+ else if (H2_SS_OPEN == stream->state) {
+ status = add_trailer(stream, name, nlen, value, vlen);
+ }
+ else {
+ status = APR_EINVAL;
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "header %s not accepted"), name);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
+ }
+ return status;
+}
+
+static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
+{
+ if (bb) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return b;
+ }
+ b = APR_BUCKET_NEXT(b);
+ }
+ }
+ return NULL;
+}
+
+static apr_status_t add_buffered_data(h2_stream *stream, apr_off_t requested,
+ apr_off_t *plen, int *peos, int *is_all,
+ h2_headers **pheaders)
+{
+ apr_bucket *b, *e;
+
+ *peos = 0;
+ *plen = 0;
+ *is_all = 0;
+ if (pheaders) {
+ *pheaders = NULL;
+ }
+
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "add_buffered_data");
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_FLUSH(b)) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ *peos = 1;
+ return APR_SUCCESS;
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ if (*plen > 0) {
+ /* data before the response, can only return up to here */
+ return APR_SUCCESS;
+ }
+ else if (pheaders) {
+ *pheaders = h2_bucket_headers_get(b);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "prep, -> response %d"),
+ (*pheaders)->status);
+ return APR_SUCCESS;
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+ }
+ else if (b->length == 0) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else {
+ ap_assert(b->length != (apr_size_t)-1);
+ *plen += b->length;
+ if (*plen >= requested) {
+ *plen = requested;
+ return APR_SUCCESS;
+ }
+ }
+ b = e;
+ }
+ *is_all = 1;
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
+ int *peos, h2_headers **pheaders)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t requested, missing, max_chunk = H2_DATA_CHUNK_SIZE;
+ conn_rec *c;
+ int complete;
+
+ ap_assert(stream);
+
+ if (stream->rst_error) {
+ *plen = 0;
+ *peos = 1;
+ return APR_ECONNRESET;
+ }
+
+ c = stream->session->c;
+ prep_output(stream);
+
+ /* determine how much we'd like to send. We cannot send more than
+ * is requested. But we can reduce the size in case the master
+ * connection operates in smaller chunks. (TSL warmup) */
+ if (stream->session->io.write_size > 0) {
+ max_chunk = stream->session->io.write_size - 9; /* header bits */
+ }
+ requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
+
+ /* count the buffered data until eos or a headers bucket */
+ status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders);
+
+ if (status == APR_EAGAIN) {
+ /* TODO: ugly, someone needs to retrieve the response first */
+ h2_mplx_keep_active(stream->session->mplx, stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prep, response eagain"));
+ return status;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ if (pheaders && *pheaders) {
+ return APR_SUCCESS;
+ }
+
+ /* If there we do not have enough buffered data to satisfy the requested
+ * length *and* we counted the _complete_ buffer (and did not stop in the middle
+ * because of meta data there), lets see if we can read more from the
+ * output beam */
+ missing = H2MIN(requested, stream->max_mem) - *plen;
+ if (complete && !*peos && missing > 0) {
+ apr_status_t rv = APR_EOF;
+
+ if (stream->output) {
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre");
+ rv = h2_beam_receive(stream->output, stream->out_buffer,
+ APR_NONBLOCK_READ, stream->max_mem - *plen);
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post");
+ }
+
+ if (rv == APR_SUCCESS) {
+ /* count the buffer again, now that we have read output */
+ status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders);
+ }
+ else if (APR_STATUS_IS_EOF(rv)) {
+ apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos);
+ *peos = 1;
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* we set this is the status of this call only if there
+ * is no buffered data, see check below */
+ }
+ else {
+ /* real error reading. Give this back directly, even though
+ * we may have something buffered. */
+ status = rv;
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ if (*peos || *plen) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prepare, len=%ld eos=%d"),
+ (long)*plen, *peos);
+ }
+ else {
+ status = (stream->output && h2_beam_is_closed(stream->output))? APR_EOF : APR_EAGAIN;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prepare, no data"));
+ }
+ }
+ return status;
+}
+
+static int is_not_headers(apr_bucket *b)
+{
+ return !H2_BUCKET_IS_HEADERS(b);
+}
+
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+ status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers);
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ status = APR_EAGAIN;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ H2_STRM_MSG(stream, "read_to, len=%ld eos=%d"),
+ (long)*plen, *peos);
+ return status;
+}
+
+
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_array_header_t *pushes;
+ int i;
+
+ pushes = h2_push_collect_update(stream, stream->request, response);
+ if (pushes && !apr_is_empty_array(pushes)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "found %d push candidates"),
+ pushes->nelts);
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ h2_stream *s = h2_session_push(stream->session, stream, push);
+ if (!s) {
+ status = APR_ECONNRESET;
+ break;
+ }
+ }
+ }
+ return status;
+}
+
+apr_table_t *h2_stream_get_trailers(h2_stream *stream)
+{
+ return NULL;
+}
+
+const h2_priority *h2_stream_get_priority(h2_stream *stream,
+ h2_headers *response)
+{
+ if (response && stream->initiated_on) {
+ const char *ctype = apr_table_get(response->headers, "content-type");
+ if (ctype) {
+ /* FIXME: Not good enough, config needs to come from request->server */
+ return h2_config_get_priority(stream->session->config, ctype);
+ }
+ }
+ return NULL;
+}
+
+int h2_stream_is_ready(h2_stream *stream)
+{
+ if (stream->has_response) {
+ return 1;
+ }
+ else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) {
+ return 1;
+ }
+ return 0;
+}
+
+int h2_stream_was_closed(const h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_SS_CLOSED:
+ case H2_SS_CLEANUP:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
+{
+ h2_session *session = stream->session;
+
+ if (amount > 0) {
+ apr_off_t consumed = amount;
+
+ while (consumed > 0) {
+ int len = (consumed > INT_MAX)? INT_MAX : (int)consumed;
+ nghttp2_session_consume(session->ngh2, stream->id, len);
+ consumed -= len;
+ }
+
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ if (1) {
+ int cur_size = nghttp2_session_get_stream_local_window_size(
+ session->ngh2, stream->id);
+ int win = stream->in_window_size;
+ int thigh = win * 8/10;
+ int tlow = win * 2/10;
+ const int win_max = 2*1024*1024;
+ const int win_min = 32*1024;
+
+ /* Work in progress, probably should add directives for these
+ * values once this stabilizes somewhat. The general idea is
+ * to adapt stream window sizes if the input window changes
+ * a) very quickly (< good RTT) from full to empty
+ * b) only a little bit (> bad RTT)
+ * where in a) it grows and in b) it shrinks again.
+ */
+ if (cur_size > thigh && amount > thigh && win < win_max) {
+ /* almost empty again with one reported consumption, how
+ * long did this take? */
+ long ms = apr_time_msec(apr_time_now() - stream->in_last_write);
+ if (ms < 40) {
+ win = H2MIN(win_max, win + (64*1024));
+ }
+ }
+ else if (cur_size < tlow && amount < tlow && win > win_min) {
+ /* staying full, for how long already? */
+ long ms = apr_time_msec(apr_time_now() - stream->in_last_write);
+ if (ms > 700) {
+ win = H2MAX(win_min, win - (32*1024));
+ }
+ }
+
+ if (win != stream->in_window_size) {
+ stream->in_window_size = win;
+ nghttp2_session_set_local_window_size(session->ngh2,
+ NGHTTP2_FLAG_NONE, stream->id, win);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): consumed %ld bytes, window now %d/%d",
+ session->id, stream->id, (long)amount,
+ cur_size, stream->in_window_size);
+ }
+#endif
+ }
+ return APR_SUCCESS;
+}
+
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
new file mode 100644
index 0000000..7ecc0ad
--- /dev/null
+++ b/modules/http2/h2_stream.h
@@ -0,0 +1,309 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_stream__
+#define __mod_h2__h2_stream__
+
+#include "h2.h"
+
+/**
+ * A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms.
+ *
+ * A stream always belongs to a h2_session, the one managing the
+ * connection to the client. The h2_session writes to the h2_stream,
+ * adding HEADERS and DATA and finally an EOS. When headers are done,
+ * h2_stream is scheduled for handling, which is expected to produce
+ * a response h2_headers at least.
+ *
+ * The h2_headers may be followed by more h2_headers (interim responses) and
+ * by DATA frames read from the h2_stream until EOS is reached. Trailers
+ * are send when a last h2_headers is received. This always closes the stream
+ * output.
+ */
+
+struct h2_mplx;
+struct h2_priority;
+struct h2_request;
+struct h2_headers;
+struct h2_session;
+struct h2_task;
+struct h2_bucket_beam;
+
+typedef struct h2_stream h2_stream;
+
+typedef void h2_stream_state_cb(void *ctx, h2_stream *stream);
+typedef void h2_stream_event_cb(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev);
+
+/**
+ * Callback structure for events and stream state transisitions
+ */
+typedef struct h2_stream_monitor {
+ void *ctx;
+ h2_stream_state_cb *on_state_enter; /* called when a state is entered */
+ h2_stream_state_cb *on_state_invalid; /* called when an invalid state change
+ was detected */
+ h2_stream_event_cb *on_state_event; /* called right before the given event
+ result in a new stream state */
+ h2_stream_event_cb *on_event; /* called for events that do not
+ trigger a state change */
+} h2_stream_monitor;
+
+struct h2_stream {
+ int id; /* http2 stream identifier */
+ int initiated_on; /* initiating stream id (PUSH) or 0 */
+ apr_pool_t *pool; /* the memory pool for this stream */
+ struct h2_session *session; /* the session this stream belongs to */
+ h2_stream_state_t state; /* state of this stream */
+
+ apr_time_t created; /* when stream was created */
+
+ const struct h2_request *request; /* the request made in this stream */
+ struct h2_request *rtmp; /* request being assembled */
+ apr_table_t *trailers; /* optional incoming trailers */
+ int request_headers_added; /* number of request headers added */
+
+ struct h2_bucket_beam *input;
+ apr_bucket_brigade *in_buffer;
+ int in_window_size;
+ apr_time_t in_last_write;
+
+ struct h2_bucket_beam *output;
+ apr_bucket_brigade *out_buffer;
+ apr_size_t max_mem; /* maximum amount of data buffered */
+
+ int rst_error; /* stream error for RST_STREAM */
+ unsigned int aborted : 1; /* was aborted */
+ unsigned int scheduled : 1; /* stream has been scheduled */
+ unsigned int has_response : 1; /* response headers are known */
+ unsigned int input_eof : 1; /* no more request data coming */
+ unsigned int out_checked : 1; /* output eof was double checked */
+ unsigned int push_policy; /* which push policy to use for this request */
+
+ struct h2_task *task; /* assigned task to fullfill request */
+
+ const h2_priority *pref_priority; /* preferred priority for this stream */
+ apr_off_t out_frames; /* # of frames sent out */
+ apr_off_t out_frame_octets; /* # of RAW frame octets sent out */
+ apr_off_t out_data_frames; /* # of DATA frames sent */
+ apr_off_t out_data_octets; /* # of DATA octets (payload) sent */
+ apr_off_t in_data_frames; /* # of DATA frames received */
+ apr_off_t in_data_octets; /* # of DATA octets (payload) received */
+ apr_off_t in_trailer_octets; /* # of HEADER octets (payload) received in trailers */
+
+ h2_stream_monitor *monitor; /* optional monitor for stream states */
+};
+
+
+#define H2_STREAM_RST(s, def) (s->rst_error? s->rst_error : (def))
+
+/**
+ * Create a stream in H2_SS_IDLE state.
+ * @param id the stream identifier
+ * @param pool the memory pool to use for this stream
+ * @param session the session this stream belongs to
+ * @param monitor an optional monitor to be called for events and
+ * state transisitions
+ * @param initiated_on the id of the stream this one was initiated on (PUSH)
+ *
+ * @return the newly opened stream
+ */
+h2_stream *h2_stream_create(int id, apr_pool_t *pool,
+ struct h2_session *session,
+ h2_stream_monitor *monitor,
+ int initiated_on);
+
+/**
+ * Destroy memory pool if still owned by the stream.
+ */
+void h2_stream_destroy(h2_stream *stream);
+
+/**
+ * Prepare the stream so that processing may start.
+ *
+ * This is the time to allocated resources not needed before.
+ *
+ * @param stream the stream to prep
+ */
+apr_status_t h2_stream_prep_processing(h2_stream *stream);
+
+/*
+ * Set a new monitor for this stream, replacing any existing one. Can
+ * be called with NULL to have no monitor installed.
+ */
+void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor);
+
+/**
+ * Dispatch (handle) an event on the given stream.
+ * @param stream the streama the event happened on
+ * @param ev the type of event
+ */
+void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev);
+
+/**
+ * Cleanup references into requst processing.
+ *
+ * @param stream the stream to cleanup
+ */
+void h2_stream_cleanup(h2_stream *stream);
+
+/**
+ * Notify the stream that amount bytes have been consumed of its input
+ * since the last invocation of this method (delta amount).
+ */
+apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount);
+
+/**
+ * Set complete stream headers from given h2_request.
+ *
+ * @param stream stream to write request to
+ * @param r the request with all the meta data
+ * @param eos != 0 iff stream input is closed
+ */
+void h2_stream_set_request(h2_stream *stream, const h2_request *r);
+
+/**
+ * Set complete stream header from given request_rec.
+ *
+ * @param stream stream to write request to
+ * @param r the request with all the meta data
+ * @param eos != 0 iff stream input is closed
+ */
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+ request_rec *r, int eos);
+
+/*
+ * Add a HTTP/2 header (including pseudo headers) or trailer
+ * to the given stream, depending on stream state.
+ *
+ * @param stream stream to write the header to
+ * @param name the name of the HTTP/2 header
+ * @param nlen the number of characters in name
+ * @param value the header value
+ * @param vlen the number of characters in value
+ */
+apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+
+/*
+ * Process a frame of received DATA.
+ *
+ * @param stream stream to write the data to
+ * @param flags the frame flags
+ * @param data the beginning of the bytes to write
+ * @param len the number of bytes to write
+ */
+apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
+ const uint8_t *data, size_t len);
+
+apr_status_t h2_stream_flush_input(h2_stream *stream);
+
+/**
+ * Reset the stream. Stream write/reads will return errors afterwards.
+ *
+ * @param stream the stream to reset
+ * @param error_code the HTTP/2 error code
+ */
+void h2_stream_rst(h2_stream *stream, int error_code);
+
+/**
+ * Determine if stream was closed already. This is true for
+ * states H2_SS_CLOSED, H2_SS_CLEANUP. But not true
+ * for H2_SS_CLOSED_L and H2_SS_CLOSED_R.
+ *
+ * @param stream the stream to check on
+ * @return != 0 iff stream has been closed
+ */
+int h2_stream_was_closed(const h2_stream *stream);
+
+/**
+ * Do a speculative read on the stream output to determine the
+ * amount of data that can be read.
+ *
+ * @param stream the stream to speculatively read from
+ * @param plen (in-/out) number of bytes requested and on return amount of bytes that
+ * may be read without blocking
+ * @param peos (out) != 0 iff end of stream will be reached when reading plen
+ * bytes (out value).
+ * @param presponse (out) the response of one became available
+ * @return APR_SUCCESS if out information was computed successfully.
+ * APR_EAGAIN if not data is available and end of stream has not been
+ * reached yet.
+ */
+apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
+ int *peos, h2_headers **presponse);
+
+/**
+ * Read a maximum number of bytes into the bucket brigade.
+ *
+ * @param stream the stream to read from
+ * @param bb the brigade to append output to
+ * @param plen (in-/out) max. number of bytes to append and on return actual
+ * number of bytes appended to brigade
+ * @param peos (out) != 0 iff end of stream has been reached while reading
+ * @return APR_SUCCESS if out information was computed successfully.
+ * APR_EAGAIN if not data is available and end of stream has not been
+ * reached yet.
+ */
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos);
+
+/**
+ * Get optional trailers for this stream, may be NULL. Meaningful
+ * results can only be expected when the end of the response body has
+ * been reached.
+ *
+ * @param stream to ask for trailers
+ * @return trailers for NULL
+ */
+apr_table_t *h2_stream_get_trailers(h2_stream *stream);
+
+/**
+ * Submit any server push promises on this stream and schedule
+ * the tasks connection with these.
+ *
+ * @param stream the stream for which to submit
+ */
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response);
+
+/**
+ * Get priority information set for this stream.
+ */
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
+ h2_headers *response);
+
+/**
+ * Return a textual representation of the stream state as in RFC 7540
+ * nomenclator, all caps, underscores.
+ */
+const char *h2_stream_state_str(h2_stream *stream);
+
+/**
+ * Determine if stream is ready for submitting a response or a RST
+ * @param stream the stream to check
+ */
+int h2_stream_is_ready(h2_stream *stream);
+
+#define H2_STRM_MSG(s, msg) \
+ "h2_stream(%ld-%d,%s): "msg, s->session->id, s->id, h2_stream_state_str(s)
+
+#define H2_STRM_LOG(aplogno, s, msg) aplogno H2_STRM_MSG(s, msg)
+
+#endif /* defined(__mod_h2__h2_stream__) */
diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
new file mode 100644
index 0000000..5e73568
--- /dev/null
+++ b/modules/http2/h2_switch.c
@@ -0,0 +1,194 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_conn.h"
+#include "h2_h2.h"
+#include "h2_switch.h"
+
+/*******************************************************************************
+ * Once per lifetime init, retrieve optional functions
+ */
+apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s)
+{
+ (void)pool;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_switch init");
+
+ return APR_SUCCESS;
+}
+
+static int h2_protocol_propose(conn_rec *c, request_rec *r,
+ server_rec *s,
+ const apr_array_header_t *offers,
+ apr_array_header_t *proposals)
+{
+ int proposed = 0;
+ int is_tls = h2_h2_is_tls(c);
+ const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
+
+ (void)s;
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+
+ if (strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))) {
+ /* We do not know how to switch from anything else but http/1.1.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03083)
+ "protocol switch: current proto != http/1.1, declined");
+ return DECLINED;
+ }
+
+ if (!h2_is_acceptable_connection(c, 0)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
+ "protocol propose: connection requirements not met");
+ return DECLINED;
+ }
+
+ if (r) {
+ /* So far, this indicates an HTTP/1 Upgrade header initiated
+ * protocol switch. For that, the HTTP2-Settings header needs
+ * to be present and valid for the connection.
+ */
+ const char *p;
+
+ if (!h2_allows_h2_upgrade(c)) {
+ return DECLINED;
+ }
+
+ p = apr_table_get(r->headers_in, "HTTP2-Settings");
+ if (!p) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03085)
+ "upgrade without HTTP2-Settings declined");
+ return DECLINED;
+ }
+
+ p = apr_table_get(r->headers_in, "Connection");
+ if (!ap_find_token(r->pool, p, "http2-settings")) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03086)
+ "upgrade without HTTP2-Settings declined");
+ return DECLINED;
+ }
+
+ /* We also allow switching only for requests that have no body.
+ */
+ p = apr_table_get(r->headers_in, "Content-Length");
+ if (p && strcmp(p, "0")) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087)
+ "upgrade with content-length: %s, declined", p);
+ return DECLINED;
+ }
+ }
+
+ while (*protos) {
+ /* Add all protocols we know (tls or clear) and that
+ * are part of the offerings (if there have been any).
+ */
+ if (!offers || ap_array_str_contains(offers, *protos)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "proposing protocol '%s'", *protos);
+ APR_ARRAY_PUSH(proposals, const char*) = *protos;
+ proposed = 1;
+ }
+ ++protos;
+ }
+ return proposed? DECLINED : OK;
+}
+
+static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
+ const char *protocol)
+{
+ int found = 0;
+ const char **protos = h2_h2_is_tls(c)? h2_tls_protos : h2_clear_protos;
+ const char **p = protos;
+
+ (void)s;
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+
+ while (*p) {
+ if (!strcmp(*p, protocol)) {
+ found = 1;
+ break;
+ }
+ p++;
+ }
+
+ if (found) {
+ h2_ctx *ctx = h2_ctx_get(c, 1);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "switching protocol to '%s'", protocol);
+ h2_ctx_protocol_set(ctx, protocol);
+ h2_ctx_server_set(ctx, s);
+
+ if (r != NULL) {
+ apr_status_t status;
+ /* Switching in the middle of a request means that
+ * we have to send out the response to this one in h2
+ * format. So we need to take over the connection
+ * right away.
+ */
+ ap_remove_input_filter_byhandle(r->input_filters, "http_in");
+ ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout");
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+
+ /* Ok, start an h2_conn on this one. */
+ h2_ctx_server_set(ctx, r->server);
+ status = h2_conn_setup(ctx, r->connection, r);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
+ "session setup");
+ h2_ctx_clear(c);
+ return !OK;
+ }
+
+ h2_conn_run(ctx, c);
+ }
+ return OK;
+ }
+
+ return DECLINED;
+}
+
+static const char *h2_protocol_get(const conn_rec *c)
+{
+ return h2_ctx_protocol_get(c);
+}
+
+void h2_switch_register_hooks(void)
+{
+ ap_hook_protocol_propose(h2_protocol_propose, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_protocol_switch(h2_protocol_switch, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_protocol_get(h2_protocol_get, NULL, NULL, APR_HOOK_MIDDLE);
+}
diff --git a/modules/http2/h2_switch.h b/modules/http2/h2_switch.h
new file mode 100644
index 0000000..7be8a23
--- /dev/null
+++ b/modules/http2/h2_switch.h
@@ -0,0 +1,30 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_switch__
+#define __mod_h2__h2_switch__
+
+/*
+ * One time, post config initialization.
+ */
+apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s);
+
+/* Register apache hooks for protocol switching
+ */
+void h2_switch_register_hooks(void);
+
+
+#endif /* defined(__mod_h2__h2_switch__) */
diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
new file mode 100644
index 0000000..86fb026
--- /dev/null
+++ b/modules/http2/h2_task.c
@@ -0,0 +1,769 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <apr_atomic.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mpm.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_conn.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_from_h1.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_util.h"
+
+static void H2_TASK_OUT_LOG(int lvl, h2_task *task, apr_bucket_brigade *bb,
+ const char *tag)
+{
+ if (APLOG_C_IS_LEVEL(task->c, lvl)) {
+ conn_rec *c = task->c;
+ char buffer[4 * 1024];
+ const char *line = "(null)";
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
+
+ len = h2_util_bb_print(buffer, bmax, tag, "", bb);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s",
+ task->id, len? buffer : line);
+ }
+}
+
+/*******************************************************************************
+ * task input handling
+ ******************************************************************************/
+
+static int input_ser_header(void *ctx, const char *name, const char *value)
+{
+ h2_task *task = ctx;
+ apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+/*******************************************************************************
+ * task output handling
+ ******************************************************************************/
+
+static apr_status_t open_output(h2_task *task)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348)
+ "h2_task(%s): open output to %s %s %s",
+ task->id, task->request->method,
+ task->request->authority,
+ task->request->path);
+ task->output.opened = 1;
+ return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam);
+}
+
+static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
+{
+ apr_off_t written, left;
+ apr_status_t status;
+
+ apr_brigade_length(bb, 0, &written);
+ H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out");
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)");
+ /* engines send unblocking */
+ status = h2_beam_send(task->output.beam, bb,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ);
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)");
+
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ apr_brigade_length(bb, 0, &left);
+ written -= left;
+ status = APR_SUCCESS;
+ }
+ if (status == APR_SUCCESS) {
+ if (h2_task_logio_add_bytes_out) {
+ h2_task_logio_add_bytes_out(task->c, written);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ "h2_task(%s): send_out done", task->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c,
+ "h2_task(%s): send_out (%ld bytes)",
+ task->id, (long)written);
+ }
+ return status;
+}
+
+/* Bring the data from the brigade (which represents the result of the
+ * request_rec out filter chain) into the h2_mplx for further sending
+ * on the master connection.
+ */
+static apr_status_t slave_out(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb)
+{
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+ int flush = 0, blocking;
+
+ if (task->frozen) {
+ h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
+ "frozen task output write, ignored", bb);
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ APR_BUCKET_REMOVE(b);
+ task->eor = b;
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+send:
+ /* we send block once we opened the output, so someone is there
+ * reading it *and* the task is not assigned to a h2_req_engine */
+ blocking = (!task->assigned && task->output.opened);
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_FLUSH(b) || APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ flush = 1;
+ break;
+ }
+ }
+
+ if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
+ /* still have data buffered from previous attempt.
+ * setaside and append new data and try to pass the complete data */
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ if (APR_SUCCESS != (rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool))) {
+ goto out;
+ }
+ }
+ rv = send_out(task, task->output.bb, blocking);
+ }
+ else {
+ /* no data buffered previously, pass brigade directly */
+ rv = send_out(task, bb, blocking);
+
+ if (APR_SUCCESS == rv && !APR_BRIGADE_EMPTY(bb)) {
+ /* output refused to buffer it all, time to open? */
+ if (!task->output.opened && APR_SUCCESS == (rv = open_output(task))) {
+ /* Make another attempt to send the data. With the output open,
+ * the call might be blocking and send all data, so we do not need
+ * to save the brigade */
+ goto send;
+ }
+ else if (blocking && flush) {
+ /* Need to keep on doing this. */
+ goto send;
+ }
+
+ if (APR_SUCCESS == rv) {
+ /* could not write all, buffer the rest */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
+ "h2_slave_out(%s): saving brigade", task->id);
+ ap_assert(NULL);
+ rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
+ flush = 1;
+ }
+ }
+ }
+
+ if (APR_SUCCESS == rv && !task->output.opened && flush) {
+ /* got a flush or could not write all, time to tell someone to read */
+ rv = open_output(task);
+ }
+out:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c,
+ "h2_slave_out(%s): slave_out leave", task->id);
+ return rv;
+}
+
+static apr_status_t output_finish(h2_task *task)
+{
+ if (!task->output.opened) {
+ return open_output(task);
+ }
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * task slave connection filters
+ ******************************************************************************/
+
+static apr_status_t h2_filter_slave_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_task *task;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next;
+ apr_off_t bblen;
+ const int trace1 = APLOGctrace1(f->c);
+ apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
+ (apr_size_t)readbytes : APR_SIZE_MAX);
+
+ task = h2_ctx_cget_task(f->c);
+ ap_assert(task);
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld",
+ task->id, mode, block, (long)readbytes);
+ }
+
+ if (mode == AP_MODE_INIT) {
+ return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
+ }
+
+ if (f->c->aborted) {
+ return APR_ECONNABORTED;
+ }
+
+ if (!task->input.bb) {
+ return APR_EOF;
+ }
+
+ /* Cleanup brigades from those nasty 0 length non-meta buckets
+ * that apr_brigade_split_line() sometimes produces. */
+ for (b = APR_BRIGADE_FIRST(task->input.bb);
+ b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) {
+ apr_bucket_delete(b);
+ }
+ }
+
+ while (APR_BRIGADE_EMPTY(task->input.bb)) {
+ /* Get more input data for our request. */
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_slave_in(%s): get more data from mplx, block=%d, "
+ "readbytes=%ld", task->id, block, (long)readbytes);
+ }
+ if (task->input.beam) {
+ status = h2_beam_receive(task->input.beam, task->input.bb, block,
+ 128*1024);
+ }
+ else {
+ status = APR_EOF;
+ }
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_slave_in(%s): read returned", task->id);
+ }
+ if (APR_STATUS_IS_EAGAIN(status)
+ && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
+ /* chunked input handling does not seem to like it if we
+ * return with APR_EAGAIN from a GETLINE read...
+ * upload 100k test on test-ser.example.org hangs */
+ status = APR_SUCCESS;
+ }
+ else if (APR_STATUS_IS_EOF(status)) {
+ break;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ if (trace1) {
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "input.beam recv raw", task->input.bb);
+ }
+ if (h2_task_logio_add_bytes_in) {
+ apr_brigade_length(bb, 0, &bblen);
+ h2_task_logio_add_bytes_in(f->c, bblen);
+ }
+ }
+
+ /* Nothing there, no more data to get. Return APR_EAGAIN on
+ * speculative reads, this is ap_check_pipeline()'s trick to
+ * see if the connection needs closing. */
+ if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) {
+ return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF;
+ }
+
+ if (trace1) {
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "task_input.bb", task->input.bb);
+ }
+
+ if (APR_BRIGADE_EMPTY(task->input.bb)) {
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_slave_in(%s): no data", task->id);
+ }
+ return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, task->input.bb);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, task->input.bb, rmax);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, task->input.bb, rmax);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, task->input.bb, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_slave_in(%s): getline: %s",
+ task->id, buffer);
+ }
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(03472)
+ "h2_slave_in(%s), unsupported READ mode %d",
+ task->id, mode);
+ status = APR_ENOTIMPL;
+ }
+
+ if (trace1) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
+ }
+ return status;
+}
+
+static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
+ apr_bucket_brigade* brigade)
+{
+ h2_task *task = h2_ctx_cget_task(filter->c);
+ apr_status_t status;
+
+ ap_assert(task);
+ status = slave_out(task, filter, brigade);
+ if (status != APR_SUCCESS) {
+ h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
+ }
+ return status;
+}
+
+static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ h2_task *task = h2_ctx_cget_task(f->c);
+ apr_status_t status;
+
+ ap_assert(task);
+ /* There are cases where we need to parse a serialized http/1.1
+ * response. One example is a 100-continue answer in serialized mode
+ * or via a mod_proxy setup */
+ while (bb && !task->output.sent_response) {
+ status = h2_from_h1_parse_response(task, f, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_task(%s): parsed response", task->id);
+ if (APR_BRIGADE_EMPTY(bb) || status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+/*******************************************************************************
+ * task things
+ ******************************************************************************/
+
+int h2_task_can_redo(h2_task *task) {
+ if (task->input.beam && h2_beam_was_received(task->input.beam)) {
+ /* cannot repeat that. */
+ return 0;
+ }
+ return (!strcmp("GET", task->request->method)
+ || !strcmp("HEAD", task->request->method)
+ || !strcmp("OPTIONS", task->request->method));
+}
+
+void h2_task_redo(h2_task *task)
+{
+ task->rst_error = 0;
+}
+
+void h2_task_rst(h2_task *task, int error)
+{
+ task->rst_error = error;
+ if (task->input.beam) {
+ h2_beam_leave(task->input.beam);
+ }
+ if (!task->worker_done) {
+ h2_beam_abort(task->output.beam);
+ }
+ if (task->c) {
+ task->c->aborted = 1;
+ }
+}
+
+/*******************************************************************************
+ * Register various hooks
+ */
+static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
+static int h2_task_pre_conn(conn_rec* c, void *arg);
+static int h2_task_process_conn(conn_rec* c);
+
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
+
+void h2_task_register_hooks(void)
+{
+ /* This hook runs on new connections before mod_ssl has a say.
+ * Its purpose is to prevent mod_ssl from touching our pseudo-connections
+ * for streams.
+ */
+ ap_hook_pre_connection(h2_task_pre_conn,
+ NULL, mod_ssl, APR_HOOK_FIRST);
+ /* When the connection processing actually starts, we might
+ * take over, if the connection is for a task.
+ */
+ ap_hook_process_connection(h2_task_process_conn,
+ NULL, NULL, APR_HOOK_FIRST);
+
+ ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
+ NULL, AP_FTYPE_NETWORK);
+
+ ap_register_input_filter("H2_REQUEST", h2_filter_request_in,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_RESPONSE", h2_filter_headers_out,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_TRAILERS_OUT", h2_filter_trailers_out,
+ NULL, AP_FTYPE_PROTOCOL);
+}
+
+/* post config init */
+apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s)
+{
+ h2_task_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in);
+ h2_task_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out);
+
+ return APR_SUCCESS;
+}
+
+static int h2_task_pre_conn(conn_rec* c, void *arg)
+{
+ h2_ctx *ctx;
+
+ if (!c->master) {
+ return OK;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ (void)arg;
+ if (h2_ctx_is_task(ctx)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, pre_connection, found stream task");
+ ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
+ ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
+ ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
+ }
+ return OK;
+}
+
+h2_task *h2_task_create(conn_rec *slave, int stream_id,
+ const h2_request *req, h2_mplx *m,
+ h2_bucket_beam *input,
+ apr_interval_time_t timeout,
+ apr_size_t output_max_mem)
+{
+ apr_pool_t *pool;
+ h2_task *task;
+
+ ap_assert(slave);
+ ap_assert(req);
+
+ apr_pool_create(&pool, slave->pool);
+ task = apr_pcalloc(pool, sizeof(h2_task));
+ if (task == NULL) {
+ return NULL;
+ }
+ task->id = "000";
+ task->stream_id = stream_id;
+ task->c = slave;
+ task->mplx = m;
+ task->pool = pool;
+ task->request = req;
+ task->timeout = timeout;
+ task->input.beam = input;
+ task->output.max_buffer = output_max_mem;
+
+ return task;
+}
+
+void h2_task_destroy(h2_task *task)
+{
+ if (task->output.beam) {
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
+ h2_beam_destroy(task->output.beam);
+ task->output.beam = NULL;
+ }
+
+ if (task->eor) {
+ apr_bucket_destroy(task->eor);
+ }
+ if (task->pool) {
+ apr_pool_destroy(task->pool);
+ }
+}
+
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
+{
+ conn_rec *c;
+
+ ap_assert(task);
+ c = task->c;
+ task->worker_started = 1;
+ task->started_at = apr_time_now();
+
+ if (c->master) {
+ /* Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+ * for the request_rec they handle, it needs to be unique for slave
+ * connections also.
+ * The connection id is generated by the MPM and most MPMs use the formula
+ * id := (child_num * max_threads) + thread_num
+ * which means that there is a maximum id of about
+ * idmax := max_child_count * max_threads
+ * If we assume 2024 child processes with 2048 threads max, we get
+ * idmax ~= 2024 * 2048 = 2 ** 22
+ * On 32 bit systems, we have not much space left, but on 64 bit systems
+ * (and higher?) we can use the upper 32 bits without fear of collision.
+ * 32 bits is just what we need, since a connection can only handle so
+ * many streams.
+ */
+ int slave_id, free_bits;
+
+ task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id,
+ task->stream_id);
+ if (sizeof(unsigned long) >= 8) {
+ free_bits = 32;
+ slave_id = task->stream_id;
+ }
+ else {
+ /* Assume we have a more limited number of threads/processes
+ * and h2 workers on a 32-bit system. Use the worker instead
+ * of the stream id. */
+ free_bits = 8;
+ slave_id = worker_id;
+ }
+ task->c->id = (c->master->id << free_bits)^slave_id;
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output",
+ H2_BEAM_OWNER_SEND, 0, task->timeout);
+ if (!task->output.beam) {
+ return APR_ENOMEM;
+ }
+
+ h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer);
+ h2_beam_send_from(task->output.beam, task->pool);
+
+ h2_ctx_create_for(c, task);
+ apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
+
+ h2_slave_run_pre_connection(c, ap_get_conn_socket(c));
+
+ task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
+ if (task->request->serialize) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): serialize request %s %s",
+ task->id, task->request->method, task->request->path);
+ apr_brigade_printf(task->input.bb, NULL,
+ NULL, "%s %s HTTP/1.1\r\n",
+ task->request->method, task->request->path);
+ apr_table_do(input_ser_header, task, task->request->headers, NULL);
+ apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process connection", task->id);
+
+ task->c->current_thread = thread;
+ ap_run_process_connection(c);
+
+ if (task->frozen) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process_conn returned frozen task",
+ task->id);
+ /* cleanup delayed */
+ return APR_EAGAIN;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): processing done", task->id);
+ return output_finish(task);
+ }
+}
+
+static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
+{
+ const h2_request *req = task->request;
+ conn_state_t *cs = c->cs;
+ request_rec *r;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): create request_rec", task->id);
+ r = h2_request_create_rec(req, c);
+ if (r && (r->status == HTTP_OK)) {
+ /* set timeouts for virtual host of request */
+ if (task->timeout != r->server->timeout) {
+ task->timeout = r->server->timeout;
+ h2_beam_timeout_set(task->output.beam, task->timeout);
+ if (task->input.beam) {
+ h2_beam_timeout_set(task->input.beam, task->timeout);
+ }
+ }
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+
+ if (cs) {
+ cs->state = CONN_STATE_HANDLER;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): start process_request", task->id);
+
+ /* Add the raw bytes of the request (e.g. header frame lengths to
+ * the logio for this request. */
+ if (req->raw_bytes && h2_task_logio_add_bytes_in) {
+ h2_task_logio_add_bytes_in(c, req->raw_bytes);
+ }
+
+ ap_process_request(r);
+
+ if (task->frozen) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process_request frozen", task->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process_request done", task->id);
+ }
+
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ if (cs)
+ cs->state = CONN_STATE_WRITE_COMPLETION;
+ r = NULL;
+ }
+ else if (!r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): create request_rec failed, r=NULL", task->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): create request_rec failed, r->status=%d",
+ task->id, r->status);
+ }
+
+ return APR_SUCCESS;
+}
+
+static int h2_task_process_conn(conn_rec* c)
+{
+ h2_ctx *ctx;
+
+ if (!c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ if (h2_ctx_is_task(ctx)) {
+ if (!ctx->task->request->serialize) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, processing request directly");
+ h2_task_process_request(ctx->task, c);
+ return DONE;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s), serialized handling", ctx->task->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "slave_conn(%ld): has no task", c->id);
+ }
+ return DECLINED;
+}
+
+apr_status_t h2_task_freeze(h2_task *task)
+{
+ if (!task->frozen) {
+ task->frozen = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406)
+ "h2_task(%s), frozen", task->id);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_task_thaw(h2_task *task)
+{
+ if (task->frozen) {
+ task->frozen = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
+ "h2_task(%s), thawed", task->id);
+ }
+ task->thawed = 1;
+ return APR_SUCCESS;
+}
+
+int h2_task_has_thawed(h2_task *task)
+{
+ return task->thawed;
+}
diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h
new file mode 100644
index 0000000..ab6a746
--- /dev/null
+++ b/modules/http2/h2_task.h
@@ -0,0 +1,127 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_task__
+#define __mod_h2__h2_task__
+
+#include <http_core.h>
+
+/**
+ * A h2_task fakes a HTTP/1.1 request from the data in a HTTP/2 stream
+ * (HEADER+CONT.+DATA) the module recieves.
+ *
+ * In order to answer a HTTP/2 stream, we want all Apache httpd infrastructure
+ * to be involved as usual, as if this stream can as a separate HTTP/1.1
+ * request. The basic trickery to do so was derived from google's mod_spdy
+ * source. Basically, we fake a new conn_rec object, even with its own
+ * socket and give it to ap_process_connection().
+ *
+ * Since h2_task instances are executed in separate threads, we may have
+ * different lifetimes than our h2_stream or h2_session instances. Basically,
+ * we would like to be as standalone as possible.
+ *
+ * Finally, to keep certain connection level filters, such as ourselves and
+ * especially mod_ssl ones, from messing with our data, we need a filter
+ * of our own to disble those.
+ */
+
+struct h2_bucket_beam;
+struct h2_conn;
+struct h2_mplx;
+struct h2_task;
+struct h2_req_engine;
+struct h2_request;
+struct h2_response_parser;
+struct h2_stream;
+struct h2_worker;
+
+typedef struct h2_task h2_task;
+
+struct h2_task {
+ const char *id;
+ int stream_id;
+ conn_rec *c;
+ apr_pool_t *pool;
+
+ const struct h2_request *request;
+ apr_interval_time_t timeout;
+ int rst_error; /* h2 related stream abort error */
+
+ struct {
+ struct h2_bucket_beam *beam;
+ unsigned int eos : 1;
+ apr_bucket_brigade *bb;
+ apr_bucket_brigade *bbchunk;
+ apr_off_t chunked_total;
+ } input;
+ struct {
+ struct h2_bucket_beam *beam;
+ unsigned int opened : 1;
+ unsigned int sent_response : 1;
+ unsigned int copy_files : 1;
+ struct h2_response_parser *rparser;
+ apr_bucket_brigade *bb;
+ apr_size_t max_buffer;
+ } output;
+
+ struct h2_mplx *mplx;
+
+ unsigned int filters_set : 1;
+ unsigned int frozen : 1;
+ unsigned int thawed : 1;
+ unsigned int worker_started : 1; /* h2_worker started processing */
+ unsigned int worker_done : 1; /* h2_worker finished */
+
+ apr_time_t started_at; /* when processing started */
+ apr_time_t done_at; /* when processing was done */
+ apr_bucket *eor;
+
+ struct h2_req_engine *engine; /* engine hosted by this task */
+ struct h2_req_engine *assigned; /* engine that task has been assigned to */
+};
+
+h2_task *h2_task_create(conn_rec *slave, int stream_id,
+ const h2_request *req, struct h2_mplx *m,
+ struct h2_bucket_beam *input,
+ apr_interval_time_t timeout,
+ apr_size_t output_max_mem);
+
+void h2_task_destroy(h2_task *task);
+
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id);
+
+void h2_task_redo(h2_task *task);
+int h2_task_can_redo(h2_task *task);
+
+/**
+ * Reset the task with the given error code, resets all input/output.
+ */
+void h2_task_rst(h2_task *task, int error);
+
+void h2_task_register_hooks(void);
+/*
+ * One time, post config intialization.
+ */
+apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s);
+
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
+
+apr_status_t h2_task_freeze(h2_task *task);
+apr_status_t h2_task_thaw(h2_task *task);
+int h2_task_has_thawed(h2_task *task);
+
+#endif /* defined(__mod_h2__h2_task__) */
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
new file mode 100644
index 0000000..9dacd8b
--- /dev/null
+++ b/modules/http2/h2_util.c
@@ -0,0 +1,2015 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_request.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_util.h"
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(int n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+size_t h2_util_hex_dump(char *buffer, size_t maxlen,
+ const char *data, size_t datalen)
+{
+ size_t offset = 0;
+ size_t maxoffset = (maxlen-4);
+ size_t i;
+ for (i = 0; i < datalen && offset < maxoffset; ++i) {
+ const char *sep = (i && i % 16 == 0)? "\n" : " ";
+ int n = apr_snprintf(buffer+offset, maxoffset-offset,
+ "%2x%s", ((unsigned int)data[i]&0xff), sep);
+ offset += n;
+ }
+ strcpy(buffer+offset, (i<datalen)? "..." : "");
+ return strlen(buffer);
+}
+
+size_t h2_util_header_print(char *buffer, size_t maxlen,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen)
+{
+ size_t offset = 0;
+ size_t i;
+ for (i = 0; i < namelen && offset < maxlen; ++i, ++offset) {
+ buffer[offset] = name[i];
+ }
+ for (i = 0; i < 2 && offset < maxlen; ++i, ++offset) {
+ buffer[offset] = ": "[i];
+ }
+ for (i = 0; i < valuelen && offset < maxlen; ++i, ++offset) {
+ buffer[offset] = value[i];
+ }
+ buffer[offset] = '\0';
+ return offset;
+}
+
+
+void h2_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+/* base64 url encoding ****************************************************************************/
+
+#define N6 (unsigned int)-1
+
+static const unsigned int BASE64URL_UINT6[] = {
+/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 0 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, 62, N6, N6, /* 2 */
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */
+ N6, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, N6, N6, N6, N6, 63, /* 5 */
+ N6, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, N6, N6, N6, N6, N6, /* 7 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 8 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 9 */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* a */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* b */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* c */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* d */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* e */
+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6 /* f */
+};
+static const unsigned char BASE64URL_CHARS[] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', /* 30 - 39 */
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', /* 40 - 49 */
+ 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', /* 50 - 59 */
+ '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */
+};
+
+#define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ]
+
+apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded,
+ apr_pool_t *pool)
+{
+ const unsigned char *e = (const unsigned char *)encoded;
+ const unsigned char *p = e;
+ unsigned char *d;
+ unsigned int n;
+ long len, mlen, remain, i;
+
+ while (*p && BASE64URL_UINT6[ *p ] != N6) {
+ ++p;
+ }
+ len = (int)(p - e);
+ mlen = (len/4)*4;
+ *decoded = apr_pcalloc(pool, (apr_size_t)len + 1);
+
+ i = 0;
+ d = (unsigned char*)*decoded;
+ for (; i < mlen; i += 4) {
+ n = ((BASE64URL_UINT6[ e[i+0] ] << 18) +
+ (BASE64URL_UINT6[ e[i+1] ] << 12) +
+ (BASE64URL_UINT6[ e[i+2] ] << 6) +
+ (BASE64URL_UINT6[ e[i+3] ]));
+ *d++ = (unsigned char)(n >> 16);
+ *d++ = (unsigned char)(n >> 8 & 0xffu);
+ *d++ = (unsigned char)(n & 0xffu);
+ }
+ remain = len - mlen;
+ switch (remain) {
+ case 2:
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12));
+ *d++ = (unsigned char)(n >> 16);
+ remain = 1;
+ break;
+ case 3:
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12) +
+ (BASE64URL_UINT6[ e[mlen+2] ] << 6));
+ *d++ = (unsigned char)(n >> 16);
+ *d++ = (unsigned char)(n >> 8 & 0xffu);
+ remain = 2;
+ break;
+ default: /* do nothing */
+ break;
+ }
+ return (apr_size_t)(mlen/4*3 + remain);
+}
+
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t dlen, apr_pool_t *pool)
+{
+ int i, len = (int)dlen;
+ apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */
+ const unsigned char *udata = (const unsigned char*)data;
+ unsigned char *enc, *p = apr_pcalloc(pool, slen);
+
+ enc = p;
+ for (i = 0; i < len-2; i+= 3) {
+ *p++ = BASE64URL_CHAR( (udata[i] >> 2) );
+ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) );
+ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) + (udata[i+2] >> 6) );
+ *p++ = BASE64URL_CHAR( (udata[i+2]) );
+ }
+
+ if (i < len) {
+ *p++ = BASE64URL_CHAR( (udata[i] >> 2) );
+ if (i == (len - 1)) {
+ *p++ = BASE64URL_CHARS[ ((unsigned int)udata[i] << 4) & 0x3fu ];
+ }
+ else {
+ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) );
+ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) );
+ }
+ }
+ *p++ = '\0';
+ return (char *)enc;
+}
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+size_t h2_ihash_count(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_ihash_empty(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_ihash_get(h2_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_ihash_add(h2_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_ihash_remove(h2_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_ihash_clear(h2_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_iqueue *q, int nlen);
+static void iq_swap(h2_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx);
+
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_iq_empty(h2_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_iq_count(h2_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (h2_iq_contains(q, sid)) {
+ return 0;
+ }
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+ return 1;
+}
+
+int h2_iq_append(h2_iqueue *q, int sid)
+{
+ return h2_iq_add(q, sid, NULL, NULL);
+}
+
+int h2_iq_remove(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_iq_clear(h2_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisions to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_iq_shift(h2_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max)
+{
+ int i;
+ for (i = 0; i < max; ++i) {
+ pint[i] = h2_iq_shift(q);
+ if (pint[i] == 0) {
+ break;
+ }
+ }
+ return i;
+}
+
+static void iq_grow(h2_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+int h2_iq_contains(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+struct h2_fifo {
+ void **elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int nth_index(h2_fifo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t fifo_destroy(void *data)
+{
+ h2_fifo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int index_of(h2_fifo *fifo, void *elem)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (elem == fifo->elems[nth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_fifo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_fifo_term(h2_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_interrupt(h2_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_fifo_count(h2_fifo *fifo)
+{
+ return fifo->count;
+}
+
+static apr_status_t check_not_empty(h2_fifo *fifo, int block)
+{
+ while (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block)
+{
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (fifo->set && index_of(fifo, elem) >= 0) {
+ /* set mode, elem already member */
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[nth_index(fifo, fifo->count)] = elem;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push(h2_fifo *fifo, void *elem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = fifo_push_int(fifo, elem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 1);
+}
+
+apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 0);
+}
+
+static apr_status_t pull_head(h2_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) {
+ *pelem = NULL;
+ return rv;
+ }
+ *pelem = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = nth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_pull(h2_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = pull_head(fifo, pelem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 1);
+}
+
+apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 0);
+}
+
+static apr_status_t fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx, int block)
+{
+ apr_status_t rv;
+ void *elem;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = pull_head(fifo, &elem, block))) {
+ switch (fn(elem, ctx)) {
+ case H2_FIFO_OP_PULL:
+ break;
+ case H2_FIFO_OP_REPUSH:
+ rv = fifo_push_int(fifo, elem, block);
+ break;
+ }
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx)
+{
+ return fifo_peek(fifo, fn, ctx, 1);
+}
+
+apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx)
+{
+ return fifo_peek(fifo, fn, ctx, 0);
+}
+
+apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ void *e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[nth_index(fifo, i)];
+ if (e == elem) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[nth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+/*******************************************************************************
+ * FIFO int queue
+ ******************************************************************************/
+
+struct h2_ififo {
+ int *elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int inth_index(h2_ififo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t ififo_destroy(void *data)
+{
+ h2_ififo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int iindex_of(h2_ififo *fifo, int id)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (id == fifo->elems[inth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_ififo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(int));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, ififo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return icreate_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return icreate_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_ififo_term(h2_ififo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_interrupt(h2_ififo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_ififo_count(h2_ififo *fifo)
+{
+ return fifo->count;
+}
+
+static apr_status_t icheck_not_empty(h2_ififo *fifo, int block)
+{
+ while (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block)
+{
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (fifo->set && iindex_of(fifo, id) >= 0) {
+ /* set mode, elem already member */
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[inth_index(fifo, fifo->count)] = id;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_push(h2_ififo *fifo, int id, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ififo_push_int(fifo, id, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_push(h2_ififo *fifo, int id)
+{
+ return ififo_push(fifo, id, 1);
+}
+
+apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id)
+{
+ return ififo_push(fifo, id, 0);
+}
+
+static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = icheck_not_empty(fifo, block)) != APR_SUCCESS) {
+ *pi = 0;
+ return rv;
+ }
+ *pi = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = inth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_pull(h2_ififo *fifo, int *pi, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ipull_head(fifo, pi, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi)
+{
+ return ififo_pull(fifo, pi, 1);
+}
+
+apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi)
+{
+ return ififo_pull(fifo, pi, 0);
+}
+
+static apr_status_t ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx, int block)
+{
+ apr_status_t rv;
+ int id;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) {
+ switch (fn(id, ctx)) {
+ case H2_FIFO_OP_PULL:
+ break;
+ case H2_FIFO_OP_REPUSH:
+ rv = ififo_push_int(fifo, id, block);
+ break;
+ }
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx)
+{
+ return ififo_peek(fifo, fn, ctx, 1);
+}
+
+apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx)
+{
+ return ififo_peek(fifo, fn, ctx, 0);
+}
+
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ int e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[inth_index(fifo, i)];
+ if (e == id) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[inth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+/*******************************************************************************
+ * h2_util for apt_table_t
+ ******************************************************************************/
+
+typedef struct {
+ apr_size_t bytes;
+ apr_size_t pair_extra;
+} table_bytes_ctx;
+
+static int count_bytes(void *x, const char *key, const char *value)
+{
+ table_bytes_ctx *ctx = x;
+ if (key) {
+ ctx->bytes += strlen(key);
+ }
+ if (value) {
+ ctx->bytes += strlen(value);
+ }
+ ctx->bytes += ctx->pair_extra;
+ return 1;
+}
+
+apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
+{
+ table_bytes_ctx ctx;
+
+ ctx.bytes = 0;
+ ctx.pair_extra = pair_extra;
+ apr_table_do(count_bytes, &ctx, t, NULL);
+ return ctx.bytes;
+}
+
+
+/*******************************************************************************
+ * h2_util for bucket brigades
+ ******************************************************************************/
+
+static apr_status_t last_not_included(apr_bucket_brigade *bb,
+ apr_off_t maxlen,
+ int same_alloc,
+ apr_size_t *pfile_buckets_allowed,
+ apr_bucket **pend)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+ int files_allowed = pfile_buckets_allowed? (int)*pfile_buckets_allowed : 0;
+
+ if (maxlen >= 0) {
+ /* Find the bucket, up to which we reach maxlen/mem bytes */
+ for (b = APR_BRIGADE_FIRST(bb);
+ (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* included */
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (maxlen == 0 && b->length > 0) {
+ *pend = b;
+ return status;
+ }
+
+ if (same_alloc && APR_BUCKET_IS_FILE(b)) {
+ /* we like it move it, always */
+ }
+ else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) {
+ /* this has no memory footprint really unless
+ * it is read, disregard it in length count,
+ * unless we do not move the file buckets */
+ --files_allowed;
+ }
+ else if (maxlen < (apr_off_t)b->length) {
+ apr_bucket_split(b, (apr_size_t)maxlen);
+ maxlen = 0;
+ }
+ else {
+ maxlen -= b->length;
+ }
+ }
+ }
+ }
+ *pend = APR_BRIGADE_SENTINEL(bb);
+ return status;
+}
+
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
+{
+ apr_bucket *b;
+ apr_off_t remain = length;
+ apr_status_t status = APR_SUCCESS;
+
+ while (!APR_BRIGADE_EMPTY(src)) {
+ b = APR_BRIGADE_FIRST(src);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ }
+ else {
+ if (remain == b->length) {
+ /* fall through */
+ }
+ else if (remain <= 0) {
+ return status;
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (remain < b->length) {
+ apr_bucket_split(b, remain);
+ }
+ }
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
+{
+ apr_bucket *b, *next;
+ apr_off_t remain = length;
+ apr_status_t status = APR_SUCCESS;
+
+ for (b = APR_BRIGADE_FIRST(src);
+ b != APR_BRIGADE_SENTINEL(src);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* fall through */
+ }
+ else {
+ if (remain == b->length) {
+ /* fall through */
+ }
+ else if (remain <= 0) {
+ return status;
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (remain < b->length) {
+ apr_bucket_split(b, remain);
+ }
+ }
+ }
+ status = apr_bucket_copy(b, &b);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
+ }
+ return status;
+}
+
+int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len)
+{
+ apr_bucket *b, *end;
+
+ apr_status_t status = last_not_included(bb, len, 0, 0, &end);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb) && b != end;
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (APR_BUCKET_IS_EOS(b)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ apr_status_t status;
+ apr_off_t blen = 0;
+
+ /* test read to determine available length */
+ status = apr_brigade_length(bb, 1, &blen);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ else if (blen == 0) {
+ /* brigade without data, does it have an EOS bucket somwhere? */
+ *plen = 0;
+ *peos = h2_util_has_eos(bb, -1);
+ }
+ else {
+ /* data in the brigade, limit the length returned. Check for EOS
+ * bucket only if we indicate data. This is required since plen == 0
+ * means "the whole brigade" for h2_util_hash_eos()
+ */
+ if (blen < *plen || *plen < 0) {
+ *plen = blen;
+ }
+ *peos = h2_util_has_eos(bb, *plen);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
+ h2_util_pass_cb *cb, void *ctx,
+ apr_off_t *plen, int *peos)
+{
+ apr_status_t status = APR_SUCCESS;
+ int consume = (cb != NULL);
+ apr_off_t written = 0;
+ apr_off_t avail = *plen;
+ apr_bucket *next, *b;
+
+ /* Pass data in our brigade through the callback until the length
+ * is satisfied or we encounter an EOS.
+ */
+ *peos = 0;
+ for (b = APR_BRIGADE_FIRST(bb);
+ (status == APR_SUCCESS) && (b != APR_BRIGADE_SENTINEL(bb));
+ b = next) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *peos = 1;
+ }
+ else {
+ /* ignore */
+ }
+ }
+ else if (avail <= 0) {
+ break;
+ }
+ else {
+ const char *data = NULL;
+ apr_size_t data_len;
+
+ if (b->length == ((apr_size_t)-1)) {
+ /* read to determine length */
+ status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ);
+ }
+ else {
+ data_len = b->length;
+ }
+
+ if (data_len > avail) {
+ apr_bucket_split(b, avail);
+ data_len = (apr_size_t)avail;
+ }
+
+ if (consume) {
+ if (!data) {
+ status = apr_bucket_read(b, &data, &data_len,
+ APR_NONBLOCK_READ);
+ }
+ if (status == APR_SUCCESS) {
+ status = cb(ctx, data, data_len);
+ }
+ }
+ else {
+ data_len = b->length;
+ }
+ avail -= data_len;
+ written += data_len;
+ }
+
+ next = APR_BUCKET_NEXT(b);
+ if (consume) {
+ apr_bucket_delete(b);
+ }
+ }
+
+ *plen = written;
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep)
+{
+ apr_size_t off = 0;
+ if (sep && *sep) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", sep);
+ }
+
+ if (bmax <= off) {
+ return off;
+ }
+ else if (APR_BUCKET_IS_METADATA(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", b->type->name);
+ }
+ else if (bmax > off) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]",
+ b->type->name,
+ (long)(b->length == ((apr_size_t)-1)?
+ -1 : b->length));
+ }
+ return off;
+}
+
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb)
+{
+ apr_size_t off = 0;
+ const char *sp = "";
+ apr_bucket *b;
+
+ if (bmax > 1) {
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = APR_BRIGADE_FIRST(bb);
+ (bmax > off) && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
+ }
+ if (bmax > off) {
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
+ }
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
+ }
+ }
+ return off;
+}
+
+apr_status_t h2_append_brigade(apr_bucket_brigade *to,
+ apr_bucket_brigade *from,
+ apr_off_t *plen,
+ int *peos,
+ h2_bucket_gate *should_append)
+{
+ apr_bucket *e;
+ apr_off_t len = 0, remain = *plen;
+ apr_status_t rv;
+
+ *peos = 0;
+
+ while (!APR_BRIGADE_EMPTY(from)) {
+ e = APR_BRIGADE_FIRST(from);
+
+ if (!should_append(e)) {
+ goto leave;
+ }
+ else if (APR_BUCKET_IS_METADATA(e)) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ *peos = 1;
+ apr_bucket_delete(e);
+ continue;
+ }
+ }
+ else {
+ if (remain > 0 && e->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+
+ if (remain < e->length) {
+ if (remain <= 0) {
+ goto leave;
+ }
+ apr_bucket_split(e, (apr_size_t)remain);
+ }
+ }
+
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(to, e);
+ len += e->length;
+ remain -= e->length;
+ }
+leave:
+ *plen = len;
+ return APR_SUCCESS;
+}
+
+apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_off_t total = 0;
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ total += sizeof(*b);
+ if (b->length > 0) {
+ if (APR_BUCKET_IS_HEAP(b)
+ || APR_BUCKET_IS_POOL(b)) {
+ total += b->length;
+ }
+ }
+ }
+ return total;
+}
+
+
+/*******************************************************************************
+ * h2_ngheader
+ ******************************************************************************/
+
+int h2_util_ignore_header(const char *name)
+{
+ /* never forward, ch. 8.1.2.2 */
+ return (H2_HD_MATCH_LIT_CS("connection", name)
+ || H2_HD_MATCH_LIT_CS("proxy-connection", name)
+ || H2_HD_MATCH_LIT_CS("upgrade", name)
+ || H2_HD_MATCH_LIT_CS("keep-alive", name)
+ || H2_HD_MATCH_LIT_CS("transfer-encoding", name));
+}
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+static const char *inv_field_name_chr(const char *token)
+{
+ const char *p = ap_scan_http_token(token);
+ if (p == token && *p == ':') {
+ p = ap_scan_http_token(++p);
+ }
+ return (p && *p)? p : NULL;
+}
+
+static const char *inv_field_value_chr(const char *token)
+{
+ const char *p = ap_scan_http_field_content(token);
+ return (p && *p)? p : NULL;
+}
+
+typedef struct ngh_ctx {
+ apr_pool_t *p;
+ int unsafe;
+ h2_ngheader *ngh;
+ apr_status_t status;
+} ngh_ctx;
+
+static int add_header(ngh_ctx *ctx, const char *key, const char *value)
+{
+ nghttp2_nv *nv = &(ctx->ngh)->nv[(ctx->ngh)->nvlen++];
+ const char *p;
+
+ if (!ctx->unsafe) {
+ if ((p = inv_field_name_chr(key))) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p,
+ "h2_request: head field '%s: %s' has invalid char %s",
+ key, value, p);
+ ctx->status = APR_EINVAL;
+ return 0;
+ }
+ if ((p = inv_field_value_chr(value))) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p,
+ "h2_request: head field '%s: %s' has invalid char %s",
+ key, value, p);
+ ctx->status = APR_EINVAL;
+ return 0;
+ }
+ }
+ nv->name = (uint8_t*)key;
+ nv->namelen = strlen(key);
+ nv->value = (uint8_t*)value;
+ nv->valuelen = strlen(value);
+
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ add_header(ctx, key, value);
+ }
+ return 1;
+}
+
+static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p,
+ int unsafe, size_t key_count,
+ const char *keys[], const char *values[],
+ apr_table_t *headers)
+{
+ ngh_ctx ctx;
+ size_t n, i;
+
+ ctx.p = p;
+ ctx.unsafe = unsafe;
+
+ n = key_count;
+ apr_table_do(count_header, &n, headers, NULL);
+
+ *ph = ctx.ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ if (!ctx.ngh) {
+ return APR_ENOMEM;
+ }
+
+ ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ if (!ctx.ngh->nv) {
+ return APR_ENOMEM;
+ }
+
+ ctx.status = APR_SUCCESS;
+ for (i = 0; i < key_count; ++i) {
+ if (!add_header(&ctx, keys[i], values[i])) {
+ return ctx.status;
+ }
+ }
+
+ apr_table_do(add_table_header, &ctx, headers, NULL);
+
+ return ctx.status;
+}
+
+static int is_unsafe(h2_headers *h)
+{
+ const char *v = apr_table_get(h->notes, H2_HDR_CONFORMANCE);
+ return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE));
+}
+
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ return ngheader_create(ph, p, is_unsafe(headers),
+ 0, NULL, NULL, headers->headers);
+}
+
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ const char *keys[] = {
+ ":status"
+ };
+ const char *values[] = {
+ apr_psprintf(p, "%d", headers->status)
+ };
+ return ngheader_create(ph, p, is_unsafe(headers),
+ H2_ALEN(keys), keys, values, headers->headers);
+}
+
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req)
+{
+
+ const char *keys[] = {
+ ":scheme",
+ ":authority",
+ ":path",
+ ":method",
+ };
+ const char *values[] = {
+ req->scheme,
+ req->authority,
+ req->path,
+ req->method,
+ };
+
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
+
+ return ngheader_create(ph, p, 0, H2_ALEN(keys), keys, values, req->headers);
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredRequestTrailers[] = { /* Ignore, see rfc7230, ch. 4.1.2 */
+ H2_DEF_LITERAL("te"),
+ H2_DEF_LITERAL("host"),
+ H2_DEF_LITERAL("range"),
+ H2_DEF_LITERAL("cookie"),
+ H2_DEF_LITERAL("expect"),
+ H2_DEF_LITERAL("pragma"),
+ H2_DEF_LITERAL("max-forwards"),
+ H2_DEF_LITERAL("cache-control"),
+ H2_DEF_LITERAL("authorization"),
+ H2_DEF_LITERAL("content-length"),
+ H2_DEF_LITERAL("proxy-authorization"),
+};
+static literal IgnoredResponseTrailers[] = {
+ H2_DEF_LITERAL("age"),
+ H2_DEF_LITERAL("date"),
+ H2_DEF_LITERAL("vary"),
+ H2_DEF_LITERAL("cookie"),
+ H2_DEF_LITERAL("expires"),
+ H2_DEF_LITERAL("warning"),
+ H2_DEF_LITERAL("location"),
+ H2_DEF_LITERAL("retry-after"),
+ H2_DEF_LITERAL("cache-control"),
+ H2_DEF_LITERAL("www-authenticate"),
+ H2_DEF_LITERAL("proxy-authenticate"),
+};
+
+static int ignore_header(const literal *lits, size_t llen,
+ const char *name, size_t nlen)
+{
+ const literal *lit;
+ size_t i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int h2_req_ignore_header(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
+}
+
+int h2_req_ignore_trailer(const char *name, size_t len)
+{
+ return (h2_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredRequestTrailers), name, len));
+}
+
+int h2_res_ignore_trailer(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredResponseTrailers), name, len);
+}
+
+apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header, int serialize)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+ req->serialize = serialize;
+
+ return req;
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
+
+/*******************************************************************************
+ * push policy
+ ******************************************************************************/
+int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled)
+{
+ h2_push_policy policy = H2_PUSH_NONE;
+ if (push_enabled) {
+ const char *val = apr_table_get(headers, "accept-push-policy");
+ if (val) {
+ if (ap_find_token(p, val, "fast-load")) {
+ policy = H2_PUSH_FAST_LOAD;
+ }
+ else if (ap_find_token(p, val, "head")) {
+ policy = H2_PUSH_HEAD;
+ }
+ else if (ap_find_token(p, val, "default")) {
+ policy = H2_PUSH_DEFAULT;
+ }
+ else if (ap_find_token(p, val, "none")) {
+ policy = H2_PUSH_NONE;
+ }
+ else {
+ /* nothing known found in this header, go by default */
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ else {
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ return policy;
+}
+
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
new file mode 100644
index 0000000..1eb262d
--- /dev/null
+++ b/modules/http2/h2_util.h
@@ -0,0 +1,544 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_util__
+#define __mod_h2__h2_util__
+
+#include <nghttp2/nghttp2.h>
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_request;
+struct nghttp2_frame;
+
+size_t h2_util_hex_dump(char *buffer, size_t maxlen,
+ const char *data, size_t datalen);
+
+size_t h2_util_header_print(char *buffer, size_t maxlen,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen);
+
+void h2_util_camel_case_header(char *s, size_t len);
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_ihash_t h2_ihash_t;
+typedef int h2_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+size_t h2_ihash_count(h2_ihash_t *ih);
+int h2_ihash_empty(h2_ihash_t *ih);
+void *h2_ihash_get(h2_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
+
+void h2_ihash_add(h2_ihash_t *ih, void *val);
+void h2_ihash_remove(h2_ihash_t *ih, int id);
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
+void h2_ihash_clear(h2_ihash_t *ih);
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_iq_empty(h2_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_iq_count(h2_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the id to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ * @return != 0 iff id was not already there
+ */
+int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Append the id to the queue if not already present.
+ *
+ * @param q the queue to append the id to
+ * @param sid the id to append
+ * @return != 0 iff id was not already there
+ */
+int h2_iq_append(h2_iqueue *q, int sid);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_iq_remove(h2_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_iq_clear(h2_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first id from the queue or 0 if the queue is empty.
+ * The id is being removed.
+ *
+ * @param q the queue to get the first id from
+ * @return the first id of the queue, 0 if empty
+ */
+int h2_iq_shift(h2_iqueue *q);
+
+/**
+ * Get the first max ids from the queue. All these ids will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @param pint the int array to receive the values
+ * @param max the maximum number of ids to shift
+ * @return the actual number of ids shifted
+ */
+size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max);
+
+/**
+ * Determine if int is in the queue already
+ *
+ * @parm q the queue
+ * @param sid the integer id to check for
+ * @return != 0 iff sid is already in the queue
+ */
+int h2_iq_contains(h2_iqueue *q, int sid);
+
+/*******************************************************************************
+ * FIFO queue (void* elements)
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_fifo h2_fifo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity elements. Elements can
+ * appear several times.
+ */
+apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity elements. Elements only
+ * appear once. Pushing an element already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_fifo_term(h2_fifo *fifo);
+apr_status_t h2_fifo_interrupt(h2_fifo *fifo);
+
+int h2_fifo_count(h2_fifo *fifo);
+
+/**
+ * Push en element into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param elem the element to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem);
+apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem);
+
+apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem);
+apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem);
+
+typedef enum {
+ H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */
+ H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */
+} h2_fifo_op_t;
+
+typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx);
+
+/**
+ * Call given function on the head of the queue, once it exists, and
+ * perform the returned operation on it. The queue will hold its lock during
+ * this time, so no other operations on the queue are possible.
+ * @param fifo the queue to peek at
+ * @param fn the function to call on the head, once available
+ * @param ctx context to pass in call to function
+ */
+apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx);
+
+/**
+ * Non-blocking version of h2_fifo_peek.
+ */
+apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx);
+
+/**
+ * Remove the elem from the queue, will remove multiple appearances.
+ * @param elem the element to remove
+ * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem);
+
+/*******************************************************************************
+ * iFIFO queue (int elements)
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_ififo h2_ififo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity int. ints can
+ * appear several times.
+ */
+apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity integers. Ints only
+ * appear once. Pushing an int already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_ififo_term(h2_ififo *fifo);
+apr_status_t h2_ififo_interrupt(h2_ififo *fifo);
+
+int h2_ififo_count(h2_ififo *fifo);
+
+/**
+ * Push an int into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param id the int to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_ififo_push(h2_ififo *fifo, int id);
+apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id);
+
+apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi);
+apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi);
+
+typedef h2_fifo_op_t h2_ififo_peek_fn(int head, void *ctx);
+
+/**
+ * Call given function on the head of the queue, once it exists, and
+ * perform the returned operation on it. The queue will hold its lock during
+ * this time, so no other operations on the queue are possible.
+ * @param fifo the queue to peek at
+ * @param fn the function to call on the head, once available
+ * @param ctx context to pass in call to function
+ */
+apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx);
+
+/**
+ * Non-blocking version of h2_fifo_peek.
+ */
+apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx);
+
+/**
+ * Remove the integer from the queue, will remove multiple appearances.
+ * @param id the integer to remove
+ * @return APR_SUCCESS iff > 0 ints were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(int n);
+
+/**
+ * Count the bytes that all key/value pairs in a table have
+ * in length (exlucding terminating 0s), plus additional extra per pair.
+ *
+ * @param t the table to inspect
+ * @param pair_extra the extra amount to add per pair
+ * @return the number of bytes all key/value pairs have
+ */
+apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra);
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+int h2_req_ignore_header(const char *name, size_t len);
+int h2_req_ignore_trailer(const char *name, size_t len);
+int h2_res_ignore_trailer(const char *name, size_t len);
+
+/**
+ * Set the push policy for the given request. Takes request headers into
+ * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00
+ * for details.
+ *
+ * @param headers the http headers to inspect
+ * @param p the pool to use
+ * @param push_enabled if HTTP/2 server push is generally enabled for this request
+ * @return the push policy desired
+ */
+int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled);
+
+/*******************************************************************************
+ * base64 url encoding, different table from normal base64
+ ******************************************************************************/
+/**
+ * I always wanted to write my own base64url decoder...not. See
+ * https://tools.ietf.org/html/rfc4648#section-5 for description.
+ */
+apr_size_t h2_util_base64url_decode(const char **decoded,
+ const char *encoded,
+ apr_pool_t *pool);
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t len, apr_pool_t *pool);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+
+#define H2_HD_MATCH_LIT_CS(l, name) \
+ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+#define H2_CREATE_NV_LIT_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \
+ nv->namelen = sizeof(NAME) - 1; \
+ nv->value = (uint8_t *)VALUE; \
+ nv->valuelen = strlen(VALUE)
+
+#define H2_CREATE_NV_CS_LIT(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \
+ nv->namelen = strlen(NAME); \
+ nv->value = (uint8_t *)VALUE; \
+ nv->valuelen = sizeof(VALUE) - 1
+
+#define H2_CREATE_NV_CS_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \
+ nv->namelen = strlen(NAME); \
+ nv->value = (uint8_t *)VALUE; \
+ nv->valuelen = strlen(VALUE)
+
+int h2_util_ignore_header(const char *name);
+
+struct h2_headers;
+
+typedef struct h2_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_ngheader;
+
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req);
+
+apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+/*******************************************************************************
+ * h2_request helpers
+ ******************************************************************************/
+
+struct h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize);
+
+/*******************************************************************************
+ * apr brigade helpers
+ ******************************************************************************/
+
+/**
+ * Concatenate at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
+ */
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
+/**
+ * Copy at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
+ */
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
+/**
+ * Return != 0 iff there is a FLUSH or EOS bucket in the brigade.
+ * @param bb the brigade to check on
+ * @return != 0 iff brigade holds FLUSH or EOS bucket (or both)
+ */
+int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len);
+
+/**
+ * Check how many bytes of the desired amount are available and if the
+ * end of stream is reached by that amount.
+ * @param bb the brigade to check
+ * @param plen the desired length and, on return, the available length
+ * @param on return, if eos has been reached
+ */
+apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos);
+
+typedef apr_status_t h2_util_pass_cb(void *ctx,
+ const char *data, apr_off_t len);
+
+/**
+ * Read at most *plen bytes from the brigade and pass them into the
+ * given callback. If cb is NULL, just return the amount of data that
+ * could have been read.
+ * If an EOS was/would be encountered, set *peos != 0.
+ * @param bb the brigade to read from
+ * @param cb the callback to invoke for the read data
+ * @param ctx optional data passed to callback
+ * @param plen inout, as input gives the maximum number of bytes to read,
+ * on return specifies the actual/would be number of bytes
+ * @param peos != 0 iff an EOS bucket was/would be encountered.
+ */
+apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
+ h2_util_pass_cb *cb, void *ctx,
+ apr_off_t *plen, int *peos);
+
+/**
+ * Print a bucket's meta data (type and length) to the buffer.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep);
+
+/**
+ * Prints the brigade bucket types and lengths into the given buffer
+ * up to bmax.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb);
+/**
+ * Logs the bucket brigade (which bucket types with what length)
+ * to the log at the given level.
+ * @param c the connection to log for
+ * @param sid the stream identifier this brigade belongs to
+ * @param level the log level (as in APLOG_*)
+ * @param tag a short message text about the context
+ * @param bb the brigade to log
+ */
+#define h2_util_bb_log(c, sid, level, tag, bb) \
+do { \
+ char buffer[4 * 1024]; \
+ const char *line = "(null)"; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
+ ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \
+ ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \
+} while(0)
+
+
+typedef int h2_bucket_gate(apr_bucket *b);
+/**
+ * Transfer buckets from one brigade to another with a limit on the
+ * maximum amount of bytes transferred. Does no setaside magic, lifetime
+ * of brigades must fit.
+ * @param to brigade to transfer buckets to
+ * @param from brigades to remove buckets from
+ * @param plen maximum bytes to transfer, actual bytes transferred
+ * @param peos if an EOS bucket was transferred
+ */
+apr_status_t h2_append_brigade(apr_bucket_brigade *to,
+ apr_bucket_brigade *from,
+ apr_off_t *plen,
+ int *peos,
+ h2_bucket_gate *should_append);
+
+/**
+ * Get an approximnation of the memory footprint of the given
+ * brigade. This varies from apr_brigade_length as
+ * - no buckets are ever read
+ * - only buckets known to allocate memory (HEAP+POOL) are counted
+ * - the bucket struct itself is counted
+ */
+apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb);
+
+#endif /* defined(__mod_h2__h2_util__) */
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
new file mode 100644
index 0000000..7079437
--- /dev/null
+++ b/modules/http2/h2_version.h
@@ -0,0 +1,41 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_h2_h2_version_h
+#define mod_h2_h2_version_h
+
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
+/**
+ * @macro
+ * Version number of the http2 module as c string
+ */
+#define MOD_HTTP2_VERSION "1.11.4"
+
+/**
+ * @macro
+ * Numerical representation of the version number of the http2 module
+ * release. This is a 24 bit number with 8 bits for major number, 8 bits
+ * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
+ */
+#define MOD_HTTP2_VERSION_NUM 0x010b04
+
+
+#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
new file mode 100644
index 0000000..699f533
--- /dev/null
+++ b/modules/http2/h2_workers.c
@@ -0,0 +1,383 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_atomic.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "h2.h"
+#include "h2_private.h"
+#include "h2_mplx.h"
+#include "h2_task.h"
+#include "h2_workers.h"
+#include "h2_util.h"
+
+typedef struct h2_slot h2_slot;
+struct h2_slot {
+ int id;
+ h2_slot *next;
+ h2_workers *workers;
+ int aborted;
+ int sticks;
+ h2_task *task;
+ apr_thread_t *thread;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_idle;
+};
+
+static h2_slot *pop_slot(h2_slot **phead)
+{
+ /* Atomically pop a slot from the list */
+ for (;;) {
+ h2_slot *first = *phead;
+ if (first == NULL) {
+ return NULL;
+ }
+ if (apr_atomic_casptr((void*)phead, first->next, first) == first) {
+ first->next = NULL;
+ return first;
+ }
+ }
+}
+
+static void push_slot(h2_slot **phead, h2_slot *slot)
+{
+ /* Atomically push a slot to the list */
+ ap_assert(!slot->next);
+ for (;;) {
+ h2_slot *next = slot->next = *phead;
+ if (apr_atomic_casptr((void*)phead, slot, next) == next) {
+ return;
+ }
+ }
+}
+
+static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx);
+
+static apr_status_t activate_slot(h2_workers *workers, h2_slot *slot)
+{
+ apr_status_t status;
+
+ slot->workers = workers;
+ slot->aborted = 0;
+ slot->task = NULL;
+
+ if (!slot->lock) {
+ status = apr_thread_mutex_create(&slot->lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ if (status != APR_SUCCESS) {
+ push_slot(&workers->free, slot);
+ return status;
+ }
+ }
+
+ if (!slot->not_idle) {
+ status = apr_thread_cond_create(&slot->not_idle, workers->pool);
+ if (status != APR_SUCCESS) {
+ push_slot(&workers->free, slot);
+ return status;
+ }
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: new thread for slot %d", slot->id);
+ /* thread will either immediately start work or add itself
+ * to the idle queue */
+ apr_thread_create(&slot->thread, workers->thread_attr, slot_run, slot,
+ workers->pool);
+ if (!slot->thread) {
+ push_slot(&workers->free, slot);
+ return APR_ENOMEM;
+ }
+
+ apr_atomic_inc32(&workers->worker_count);
+ return APR_SUCCESS;
+}
+
+static apr_status_t add_worker(h2_workers *workers)
+{
+ h2_slot *slot = pop_slot(&workers->free);
+ if (slot) {
+ return activate_slot(workers, slot);
+ }
+ return APR_EAGAIN;
+}
+
+static void wake_idle_worker(h2_workers *workers)
+{
+ h2_slot *slot = pop_slot(&workers->idle);
+ if (slot) {
+ apr_thread_mutex_lock(slot->lock);
+ apr_thread_cond_signal(slot->not_idle);
+ apr_thread_mutex_unlock(slot->lock);
+ }
+ else if (workers->dynamic) {
+ add_worker(workers);
+ }
+}
+
+static void cleanup_zombies(h2_workers *workers)
+{
+ h2_slot *slot;
+ while ((slot = pop_slot(&workers->zombies))) {
+ if (slot->thread) {
+ apr_status_t status;
+ apr_thread_join(&status, slot->thread);
+ slot->thread = NULL;
+ }
+ apr_atomic_dec32(&workers->worker_count);
+ slot->next = NULL;
+ push_slot(&workers->free, slot);
+ }
+}
+
+static apr_status_t slot_pull_task(h2_slot *slot, h2_mplx *m)
+{
+ apr_status_t rv;
+
+ rv = h2_mplx_pop_task(m, &slot->task);
+ if (slot->task) {
+ /* Ok, we got something to give back to the worker for execution.
+ * If we still have idle workers, we let the worker be sticky,
+ * e.g. making it poll the task's h2_mplx instance for more work
+ * before asking back here. */
+ slot->sticks = slot->workers->max_workers;
+ return rv;
+ }
+ slot->sticks = 0;
+ return APR_EOF;
+}
+
+static h2_fifo_op_t mplx_peek(void *head, void *ctx)
+{
+ h2_mplx *m = head;
+ h2_slot *slot = ctx;
+
+ if (slot_pull_task(slot, m) == APR_EAGAIN) {
+ wake_idle_worker(slot->workers);
+ return H2_FIFO_OP_REPUSH;
+ }
+ return H2_FIFO_OP_PULL;
+}
+
+/**
+ * Get the next task for the given worker. Will block until a task arrives
+ * or the max_wait timer expires and more than min workers exist.
+ */
+static apr_status_t get_next(h2_slot *slot)
+{
+ h2_workers *workers = slot->workers;
+ apr_status_t status;
+
+ slot->task = NULL;
+ while (!slot->aborted) {
+ if (!slot->task) {
+ status = h2_fifo_try_peek(workers->mplxs, mplx_peek, slot);
+ if (status == APR_EOF) {
+ return status;
+ }
+ }
+
+ if (slot->task) {
+ return APR_SUCCESS;
+ }
+
+ cleanup_zombies(workers);
+
+ apr_thread_mutex_lock(slot->lock);
+ push_slot(&workers->idle, slot);
+ apr_thread_cond_wait(slot->not_idle, slot->lock);
+ apr_thread_mutex_unlock(slot->lock);
+ }
+ return APR_EOF;
+}
+
+static void slot_done(h2_slot *slot)
+{
+ push_slot(&(slot->workers->zombies), slot);
+}
+
+
+static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx)
+{
+ h2_slot *slot = wctx;
+
+ while (!slot->aborted) {
+
+ /* Get a h2_task from the mplxs queue. */
+ get_next(slot);
+ while (slot->task) {
+
+ h2_task_do(slot->task, thread, slot->id);
+
+ /* Report the task as done. If stickyness is left, offer the
+ * mplx the opportunity to give us back a new task right away.
+ */
+ if (!slot->aborted && (--slot->sticks > 0)) {
+ h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task);
+ }
+ else {
+ h2_mplx_task_done(slot->task->mplx, slot->task, NULL);
+ slot->task = NULL;
+ }
+ }
+ }
+
+ slot_done(slot);
+ return NULL;
+}
+
+static apr_status_t workers_pool_cleanup(void *data)
+{
+ h2_workers *workers = data;
+ h2_slot *slot;
+
+ if (!workers->aborted) {
+ workers->aborted = 1;
+ /* abort all idle slots */
+ for (;;) {
+ slot = pop_slot(&workers->idle);
+ if (slot) {
+ apr_thread_mutex_lock(slot->lock);
+ slot->aborted = 1;
+ apr_thread_cond_signal(slot->not_idle);
+ apr_thread_mutex_unlock(slot->lock);
+ }
+ else {
+ break;
+ }
+ }
+
+ h2_fifo_term(workers->mplxs);
+ h2_fifo_interrupt(workers->mplxs);
+
+ cleanup_zombies(workers);
+ }
+ return APR_SUCCESS;
+}
+
+h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool,
+ int min_workers, int max_workers,
+ int idle_secs)
+{
+ apr_status_t status;
+ h2_workers *workers;
+ apr_pool_t *pool;
+ int i, n;
+
+ ap_assert(s);
+ ap_assert(server_pool);
+
+ /* let's have our own pool that will be parent to all h2_worker
+ * instances we create. This happens in various threads, but always
+ * guarded by our lock. Without this pool, all subpool creations would
+ * happen on the pool handed to us, which we do not guard.
+ */
+ apr_pool_create(&pool, server_pool);
+ apr_pool_tag(pool, "h2_workers");
+ workers = apr_pcalloc(pool, sizeof(h2_workers));
+ if (!workers) {
+ return NULL;
+ }
+
+ workers->s = s;
+ workers->pool = pool;
+ workers->min_workers = min_workers;
+ workers->max_workers = max_workers;
+ workers->max_idle_secs = (idle_secs > 0)? idle_secs : 10;
+
+ /* FIXME: the fifo set we use here has limited capacity. Once the
+ * set is full, connections with new requests do a wait. Unfortunately,
+ * we have optimizations in place there that makes such waiting "unfair"
+ * in the sense that it may take connections a looong time to get scheduled.
+ *
+ * Need to rewrite this to use one of our double-linked lists and a mutex
+ * to have unlimited capacity and fair scheduling.
+ *
+ * For now, we just make enough room to have many connections inside one
+ * process.
+ */
+ status = h2_fifo_set_create(&workers->mplxs, pool, 8 * 1024);
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+
+ status = apr_threadattr_create(&workers->thread_attr, workers->pool);
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+
+ if (ap_thread_stacksize != 0) {
+ apr_threadattr_stacksize_set(workers->thread_attr,
+ ap_thread_stacksize);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: using stacksize=%ld",
+ (long)ap_thread_stacksize);
+ }
+
+ status = apr_thread_mutex_create(&workers->lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ if (status == APR_SUCCESS) {
+ n = workers->nslots = workers->max_workers;
+ workers->slots = apr_pcalloc(workers->pool, n * sizeof(h2_slot));
+ if (workers->slots == NULL) {
+ workers->nslots = 0;
+ status = APR_ENOMEM;
+ }
+ for (i = 0; i < n; ++i) {
+ workers->slots[i].id = i;
+ }
+ }
+ if (status == APR_SUCCESS) {
+ /* we activate all for now, TODO: support min_workers again.
+ * do this in reverse for vanity reasons so slot 0 will most
+ * likely be at head of idle queue. */
+ n = workers->max_workers;
+ for (i = n-1; i >= 0; --i) {
+ status = activate_slot(workers, &workers->slots[i]);
+ }
+ /* the rest of the slots go on the free list */
+ for(i = n; i < workers->nslots; ++i) {
+ push_slot(&workers->free, &workers->slots[i]);
+ }
+ workers->dynamic = (workers->worker_count < workers->max_workers);
+ }
+ if (status == APR_SUCCESS) {
+ apr_pool_pre_cleanup_register(pool, workers, workers_pool_cleanup);
+ return workers;
+ }
+ return NULL;
+}
+
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m)
+{
+ apr_status_t status = h2_fifo_push(workers->mplxs, m);
+ wake_idle_worker(workers);
+ return status;
+}
+
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m)
+{
+ return h2_fifo_remove(workers->mplxs, m);
+}
diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h
new file mode 100644
index 0000000..3561582
--- /dev/null
+++ b/modules/http2/h2_workers.h
@@ -0,0 +1,82 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_workers__
+#define __mod_h2__h2_workers__
+
+/* Thread pool specific to executing h2_tasks. Has a minimum and maximum
+ * number of workers it creates. Starts with minimum workers and adds
+ * some on load, reduces the number again when idle.
+ *
+ */
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+struct h2_mplx;
+struct h2_request;
+struct h2_task;
+struct h2_fifo;
+
+struct h2_slot;
+
+typedef struct h2_workers h2_workers;
+
+struct h2_workers {
+ server_rec *s;
+ apr_pool_t *pool;
+
+ int next_worker_id;
+ int min_workers;
+ int max_workers;
+ int max_idle_secs;
+
+ int aborted;
+ int dynamic;
+
+ apr_threadattr_t *thread_attr;
+ int nslots;
+ struct h2_slot *slots;
+
+ volatile apr_uint32_t worker_count;
+
+ struct h2_slot *free;
+ struct h2_slot *idle;
+ struct h2_slot *zombies;
+
+ struct h2_fifo *mplxs;
+
+ struct apr_thread_mutex_t *lock;
+};
+
+
+/* Create a worker pool with the given minimum and maximum number of
+ * threads.
+ */
+h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
+ int min_size, int max_size, int idle_secs);
+
+/**
+ * Registers a h2_mplx for task scheduling. If this h2_mplx runs
+ * out of tasks, it will be automatically be unregistered. Should
+ * new tasks arrive, it needs to be registered again.
+ */
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m);
+
+/**
+ * Remove a h2_mplx from the worker registry.
+ */
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m);
+
+#endif /* defined(__mod_h2__h2_workers__) */
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
new file mode 100644
index 0000000..3d278e9
--- /dev/null
+++ b/modules/http2/mod_http2.c
@@ -0,0 +1,393 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_want.h>
+
+#include <httpd.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+
+#include "mod_http2.h"
+
+#include <nghttp2/nghttp2.h>
+#include "h2_stream.h"
+#include "h2_alt_svc.h"
+#include "h2_conn.h"
+#include "h2_filter.h"
+#include "h2_task.h"
+#include "h2_session.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_switch.h"
+#include "h2_version.h"
+
+
+static void h2_hooks(apr_pool_t *pool);
+
+AP_DECLARE_MODULE(http2) = {
+ STANDARD20_MODULE_STUFF,
+ h2_config_create_dir, /* func to create per dir config */
+ h2_config_merge_dir, /* func to merge per dir config */
+ h2_config_create_svr, /* func to create per server config */
+ h2_config_merge_svr, /* func to merge per server config */
+ h2_cmds, /* command handlers */
+ h2_hooks,
+#if defined(AP_MODULE_FLAG_NONE)
+ AP_MODULE_FLAG_ALWAYS_MERGE
+#endif
+};
+
+static int h2_h2_fixups(request_rec *r);
+
+typedef struct {
+ unsigned int change_prio : 1;
+ unsigned int sha256 : 1;
+ unsigned int inv_headers : 1;
+ unsigned int dyn_windows : 1;
+} features;
+
+static features myfeats;
+static int mpm_warned;
+
+/* The module initialization. Called once as apache hook, before any multi
+ * processing (threaded or not) happens. It is typically at least called twice,
+ * see
+ * http://wiki.apache.org/httpd/ModuleLife
+ * Since the first run is just a "practise" run, we want to initialize for real
+ * only on the second try. This defeats the purpose of the first dry run a bit,
+ * since apache wants to verify that a new configuration actually will work.
+ * So if we have trouble with the configuration, this will only be detected
+ * when the server has already switched.
+ * On the other hand, when we initialize lib nghttp2, all possible crazy things
+ * might happen and this might even eat threads. So, better init on the real
+ * invocation, for now at least.
+ */
+static int h2_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *mod_h2_init_key = "mod_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status;
+
+ (void)plog;(void)ptemp;
+#ifdef H2_NG2_CHANGE_PRIO
+ myfeats.change_prio = 1;
+#endif
+#ifdef H2_OPENSSL
+ myfeats.sha256 = 1;
+#endif
+#ifdef H2_NG2_INVALID_HEADER_CB
+ myfeats.inv_headers = 1;
+#endif
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ myfeats.dyn_windows = 1;
+#endif
+
+ apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool);
+ if ( data == NULL ) {
+ ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03089)
+ "initializing post config dry run");
+ apr_pool_userdata_set((const void *)1, mod_h2_init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090)
+ "mod_http2 (v%s, feats=%s%s%s%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION,
+ myfeats.change_prio? "CHPRIO" : "",
+ myfeats.sha256? "+SHA256" : "",
+ myfeats.inv_headers? "+INVHD" : "",
+ myfeats.dyn_windows? "+DWINS" : "",
+ ngh2? ngh2->version_str : "unknown");
+
+ switch (h2_conn_mpm_type()) {
+ case H2_MPM_SIMPLE:
+ case H2_MPM_MOTORZ:
+ case H2_MPM_NETWARE:
+ case H2_MPM_WINNT:
+ /* not sure we need something extra for those. */
+ break;
+ case H2_MPM_EVENT:
+ case H2_MPM_WORKER:
+ /* all fine, we know these ones */
+ break;
+ case H2_MPM_PREFORK:
+ /* ok, we now know how to handle that one */
+ break;
+ case H2_MPM_UNKNOWN:
+ /* ??? */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03091)
+ "post_config: mpm type unknown");
+ break;
+ }
+
+ if (!h2_mpm_supported() && !mpm_warned) {
+ mpm_warned = 1;
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10034)
+ "The mpm module (%s) is not supported by mod_http2. The mpm determines "
+ "how things are processed in your server. HTTP/2 has more demands in "
+ "this regard and the currently selected mpm will just not do. "
+ "This is an advisory warning. Your server will continue to work, but "
+ "the HTTP/2 protocol will be inactive.",
+ h2_conn_mpm_name());
+ }
+
+ status = h2_h2_init(p, s);
+ if (status == APR_SUCCESS) {
+ status = h2_switch_init(p, s);
+ }
+ if (status == APR_SUCCESS) {
+ status = h2_task_init(p, s);
+ }
+
+ return status;
+}
+
+static char *http2_var_lookup(apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *name);
+static int http2_is_h2(conn_rec *);
+
+static apr_status_t http2_req_engine_push(const char *ngn_type,
+ request_rec *r,
+ http2_req_engine_init *einit)
+{
+ return h2_mplx_req_engine_push(ngn_type, r, einit);
+}
+
+static apr_status_t http2_req_engine_pull(h2_req_engine *ngn,
+ apr_read_type_e block,
+ int capacity,
+ request_rec **pr)
+{
+ return h2_mplx_req_engine_pull(ngn, block, capacity, pr);
+}
+
+static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
+ apr_status_t status)
+{
+ h2_mplx_req_engine_done(ngn, r_conn, status);
+}
+
+static void http2_get_num_workers(server_rec *s, int *minw, int *maxw)
+{
+ h2_get_num_workers(s, minw, maxw);
+}
+
+/* Runs once per created child process. Perform any process
+ * related initionalization here.
+ */
+static void h2_child_init(apr_pool_t *pool, server_rec *s)
+{
+ /* Set up our connection processing */
+ apr_status_t status = h2_conn_child_init(pool, s);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ APLOGNO(02949) "initializing connection handling");
+ }
+
+}
+
+/* Install this module into the apache2 infrastructure.
+ */
+static void h2_hooks(apr_pool_t *pool)
+{
+ static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
+
+ APR_REGISTER_OPTIONAL_FN(http2_is_h2);
+ APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
+ APR_REGISTER_OPTIONAL_FN(http2_req_engine_push);
+ APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull);
+ APR_REGISTER_OPTIONAL_FN(http2_req_engine_done);
+ APR_REGISTER_OPTIONAL_FN(http2_get_num_workers);
+
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
+
+ /* Run once after configuration is set, but before mpm children initialize.
+ */
+ ap_hook_post_config(h2_post_config, mod_ssl, NULL, APR_HOOK_MIDDLE);
+
+ /* Run once after a child process has been created.
+ */
+ ap_hook_child_init(h2_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+
+ h2_h2_register_hooks();
+ h2_switch_register_hooks();
+ h2_task_register_hooks();
+
+ h2_alt_svc_register_hooks();
+
+ /* Setup subprocess env for certain variables
+ */
+ ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE);
+
+ /* test http2 connection status handler */
+ ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static const char *val_HTTP2(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ return ctx? "on" : "off";
+}
+
+static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ if (r) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task) {
+ h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
+ if (stream && stream->push_policy != H2_PUSH_NONE) {
+ return "on";
+ }
+ }
+ }
+ else if (c && h2_session_push_enabled(ctx->session)) {
+ return "on";
+ }
+ }
+ else if (s) {
+ const h2_config *cfg = h2_config_sget(s);
+ if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) {
+ return "on";
+ }
+ }
+ return "off";
+}
+
+static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
+ return "PUSHED";
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
+ h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
+ if (stream) {
+ return apr_itoa(p, stream->initiated_on);
+ }
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task) {
+ return task->id;
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx);
+ if (cp && (cp = ap_strchr_c(cp, '-'))) {
+ return ++cp;
+ }
+ return NULL;
+}
+
+typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx);
+typedef struct h2_var_def {
+ const char *name;
+ h2_var_lookup *lookup;
+ unsigned int subprocess : 1; /* should be set in r->subprocess_env */
+} h2_var_def;
+
+static h2_var_def H2_VARS[] = {
+ { "HTTP2", val_HTTP2, 1 },
+ { "H2PUSH", val_H2_PUSH, 1 },
+ { "H2_PUSH", val_H2_PUSH, 1 },
+ { "H2_PUSHED", val_H2_PUSHED, 1 },
+ { "H2_PUSHED_ON", val_H2_PUSHED_ON, 1 },
+ { "H2_STREAM_ID", val_H2_STREAM_ID, 1 },
+ { "H2_STREAM_TAG", val_H2_STREAM_TAG, 1 },
+};
+
+#ifndef H2_ALEN
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+#endif
+
+
+static int http2_is_h2(conn_rec *c)
+{
+ return h2_ctx_get(c->master? c->master : c, 0) != NULL;
+}
+
+static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, char *name)
+{
+ int i;
+ /* If the # of vars grow, we need to put definitions in a hash */
+ for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (!strcmp(vdef->name, name)) {
+ h2_ctx *ctx = (r? h2_ctx_rget(r) :
+ h2_ctx_get(c->master? c->master : c, 0));
+ return (char *)vdef->lookup(p, s, c, r, ctx);
+ }
+ }
+ return (char*)"";
+}
+
+static int h2_h2_fixups(request_rec *r)
+{
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ int i;
+
+ for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (vdef->subprocess) {
+ apr_table_setn(r->subprocess_env, vdef->name,
+ vdef->lookup(r->pool, r->server, r->connection,
+ r, ctx));
+ }
+ }
+ }
+ return DECLINED;
+}
diff --git a/modules/http2/mod_http2.dep b/modules/http2/mod_http2.dep
new file mode 100644
index 0000000..52f2286
--- /dev/null
+++ b/modules/http2/mod_http2.dep
@@ -0,0 +1,1433 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_http2.mak
+
+./h2_alt_svc.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_util.h"\
+
+
+./h2_bucket_eos.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_stream.h"\
+
+
+./h2_config.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+
+
+./h2_conn.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_version.h"\
+ ".\h2_workers.h"\
+
+
+./h2_conn_io.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_config.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_util.h"\
+
+
+./h2_ctx.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_task.h"\
+
+
+./h2_filter.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_version.h"\
+
+
+./h2_from_h1.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_time.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_private.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_h2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\ssl\mod_ssl.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_int_queue.c : \
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+
+
+./h2_io.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_io_set.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_io_set.h"\
+ ".\h2_private.h"\
+
+
+./h2_mplx.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_ngn_shed.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_workers.h"\
+ ".\mod_http2.h"\
+
+
+./h2_ngn_shed.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_ngn_shed.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_push.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_util.h"\
+
+
+./h2_request.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_core.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_util.h"\
+
+
+./h2_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_base64.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_config.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_version.h"\
+ ".\h2_workers.h"\
+
+
+./h2_stream.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_switch.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_switch.h"\
+
+
+./h2_task.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_core.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_atomic.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+
+
+./h2_task_input.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_task_output.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_util.h"\
+
+
+./h2_workers.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_atomic.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_task.h"\
+ ".\h2_workers.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_switch.h"\
+ ".\h2_task.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
new file mode 100644
index 0000000..d1c4322
--- /dev/null
+++ b/modules/http2/mod_http2.dsp
@@ -0,0 +1,195 @@
+# Microsoft Developer Studio Project File - Name="mod_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_http2 - Win32 Release"
+# Name "mod_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_alt_svc.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_bucket_beam.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_bucket_eos.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_conn.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_conn_io.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_ctx.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_filter.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_from_h1.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_h2.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_mplx.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_ngn_shed.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_push.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_request.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_headers.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_stream.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_switch.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_task.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_workers.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h
new file mode 100644
index 0000000..7a1b49a
--- /dev/null
+++ b/modules/http2/mod_http2.h
@@ -0,0 +1,101 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOD_HTTP2_H__
+#define __MOD_HTTP2_H__
+
+/** The http2_var_lookup() optional function retrieves HTTP2 environment
+ * variables. */
+APR_DECLARE_OPTIONAL_FN(char *,
+ http2_var_lookup, (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *));
+
+/** An optional function which returns non-zero if the given connection
+ * or its master connection is using HTTP/2. */
+APR_DECLARE_OPTIONAL_FN(int,
+ http2_is_h2, (conn_rec *));
+
+
+/*******************************************************************************
+ * HTTP/2 request engines
+ ******************************************************************************/
+
+struct apr_thread_cond_t;
+
+typedef struct h2_req_engine h2_req_engine;
+
+typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+
+/**
+ * Initialize a h2_req_engine. The structure will be passed in but
+ * only the name and master are set. The function should initialize
+ * all fields.
+ * @param engine the allocated, partially filled structure
+ * @param r the first request to process, or NULL
+ */
+typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_size_t req_buffer_size,
+ request_rec *r,
+ http2_output_consumed **pconsumed,
+ void **pbaton);
+
+/**
+ * Push a request to an engine with the specified name for further processing.
+ * If no such engine is available, einit is not NULL, einit is called
+ * with a new engine record and the caller is responsible for running the
+ * new engine instance.
+ * @param engine_type the type of the engine to add the request to
+ * @param r the request to push to an engine for processing
+ * @param einit an optional initialization callback for a new engine
+ * of the requested type, should no instance be available.
+ * By passing a non-NULL callback, the caller is willing
+ * to init and run a new engine itself.
+ * @return APR_SUCCESS iff slave was successfully added to an engine
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_push, (const char *engine_type,
+ request_rec *r,
+ http2_req_engine_init *einit));
+
+/**
+ * Get a new request for processing in this engine.
+ * @param engine the engine which is done processing the slave
+ * @param block if call should block waiting for request to come
+ * @param capacity how many parallel requests are acceptable
+ * @param pr the request that needs processing or NULL
+ * @return APR_SUCCESS if new request was assigned
+ * APR_EAGAIN if no new request is available
+ * APR_EOF if engine may shut down, as no more request will be scheduled
+ * APR_ECONNABORTED if the engine needs to shut down immediately
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_pull, (h2_req_engine *engine,
+ apr_read_type_e block,
+ int capacity,
+ request_rec **pr));
+APR_DECLARE_OPTIONAL_FN(void,
+ http2_req_engine_done, (h2_req_engine *engine,
+ conn_rec *rconn,
+ apr_status_t status));
+
+APR_DECLARE_OPTIONAL_FN(void,
+ http2_get_num_workers, (server_rec *s,
+ int *minw, int *max));
+
+#endif
diff --git a/modules/http2/mod_http2.mak b/modules/http2/mod_http2.mak
new file mode 100644
index 0000000..10ae887
--- /dev/null
+++ b/modules/http2/mod_http2.mak
@@ -0,0 +1,542 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_http2 - Win32 Release" && "$(CFG)" != "mod_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
+ -@erase "$(INTDIR)\h2_bucket_eos.obj"
+ -@erase "$(INTDIR)\h2_config.obj"
+ -@erase "$(INTDIR)\h2_conn.obj"
+ -@erase "$(INTDIR)\h2_conn_io.obj"
+ -@erase "$(INTDIR)\h2_ctx.obj"
+ -@erase "$(INTDIR)\h2_filter.obj"
+ -@erase "$(INTDIR)\h2_from_h1.obj"
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+ -@erase "$(INTDIR)\h2_ngn_shed.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+ -@erase "$(INTDIR)\h2_stream.obj"
+ -@erase "$(INTDIR)\h2_switch.obj"
+ -@erase "$(INTDIR)\h2_task.obj"
+ -@erase "$(INTDIR)\h2_util.obj"
+ -@erase "$(INTDIR)\h2_workers.obj"
+ -@erase "$(INTDIR)\mod_http2.obj"
+ -@erase "$(INTDIR)\mod_http2.res"
+ -@erase "$(INTDIR)\mod_http2_src.idb"
+ -@erase "$(INTDIR)\mod_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_http2.exp"
+ -@erase "$(OUTDIR)\mod_http2.lib"
+ -@erase "$(OUTDIR)\mod_http2.pdb"
+ -@erase "$(OUTDIR)\mod_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
+ "$(INTDIR)\h2_bucket_eos.obj" \
+ "$(INTDIR)\h2_config.obj" \
+ "$(INTDIR)\h2_conn.obj" \
+ "$(INTDIR)\h2_conn_io.obj" \
+ "$(INTDIR)\h2_ctx.obj" \
+ "$(INTDIR)\h2_filter.obj" \
+ "$(INTDIR)\h2_from_h1.obj" \
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+ "$(INTDIR)\h2_ngn_shed.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_session.obj" \
+ "$(INTDIR)\h2_stream.obj" \
+ "$(INTDIR)\h2_switch.obj" \
+ "$(INTDIR)\h2_task.obj" \
+ "$(INTDIR)\h2_util.obj" \
+ "$(INTDIR)\h2_workers.obj" \
+ "$(INTDIR)\mod_http2.obj" \
+ "$(INTDIR)\mod_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib"
+
+"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so"
+ if exist .\Release\mod_http2.so.manifest mt.exe -manifest .\Release\mod_http2.so.manifest -outputresource:.\Release\mod_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
+ -@erase "$(INTDIR)\h2_bucket_eos.obj"
+ -@erase "$(INTDIR)\h2_config.obj"
+ -@erase "$(INTDIR)\h2_conn.obj"
+ -@erase "$(INTDIR)\h2_conn_io.obj"
+ -@erase "$(INTDIR)\h2_ctx.obj"
+ -@erase "$(INTDIR)\h2_filter.obj"
+ -@erase "$(INTDIR)\h2_from_h1.obj"
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+ -@erase "$(INTDIR)\h2_ngn_shed.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+ -@erase "$(INTDIR)\h2_stream.obj"
+ -@erase "$(INTDIR)\h2_switch.obj"
+ -@erase "$(INTDIR)\h2_task.obj"
+ -@erase "$(INTDIR)\h2_util.obj"
+ -@erase "$(INTDIR)\h2_workers.obj"
+ -@erase "$(INTDIR)\mod_http2.obj"
+ -@erase "$(INTDIR)\mod_http2.res"
+ -@erase "$(INTDIR)\mod_http2_src.idb"
+ -@erase "$(INTDIR)\mod_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_http2.exp"
+ -@erase "$(OUTDIR)\mod_http2.lib"
+ -@erase "$(OUTDIR)\mod_http2.pdb"
+ -@erase "$(OUTDIR)\mod_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
+ "$(INTDIR)\h2_bucket_eos.obj" \
+ "$(INTDIR)\h2_config.obj" \
+ "$(INTDIR)\h2_conn.obj" \
+ "$(INTDIR)\h2_conn_io.obj" \
+ "$(INTDIR)\h2_ctx.obj" \
+ "$(INTDIR)\h2_filter.obj" \
+ "$(INTDIR)\h2_from_h1.obj" \
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+ "$(INTDIR)\h2_ngn_shed.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_session.obj" \
+ "$(INTDIR)\h2_stream.obj" \
+ "$(INTDIR)\h2_switch.obj" \
+ "$(INTDIR)\h2_task.obj" \
+ "$(INTDIR)\h2_util.obj" \
+ "$(INTDIR)\h2_workers.obj" \
+ "$(INTDIR)\mod_http2.obj" \
+ "$(INTDIR)\mod_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib"
+
+"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so"
+ if exist .\Debug\mod_http2.so.manifest mt.exe -manifest .\Debug\mod_http2.so.manifest -outputresource:.\Debug\mod_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_http2.dep")
+!INCLUDE "mod_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release" || "$(CFG)" == "mod_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+SOURCE=./h2_alt_svc.c
+
+"$(INTDIR)\h2_alt_svc.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_beam.c
+
+"$(INTDIR)/h2_bucket_beam.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_eos.c
+
+"$(INTDIR)\h2_bucket_eos.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_config.c
+
+"$(INTDIR)\h2_config.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_conn.c
+
+"$(INTDIR)\h2_conn.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_conn_io.c
+
+"$(INTDIR)\h2_conn_io.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_ctx.c
+
+"$(INTDIR)\h2_ctx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_filter.c
+
+"$(INTDIR)\h2_filter.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_from_h1.c
+
+"$(INTDIR)\h2_from_h1.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_h2.c
+
+"$(INTDIR)\h2_h2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_headers.c
+
+"$(INTDIR)\h2_headers.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_mplx.c
+
+"$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_ngn_shed.c
+
+"$(INTDIR)\h2_ngn_shed.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_push.c
+
+"$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_request.c
+
+"$(INTDIR)\h2_request.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_session.c
+
+"$(INTDIR)\h2_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_stream.c
+
+"$(INTDIR)\h2_stream.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_switch.c
+
+"$(INTDIR)\h2_switch.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_task.c
+
+"$(INTDIR)\h2_task.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_util.c
+
+"$(INTDIR)\h2_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_workers.c
+
+"$(INTDIR)\h2_workers.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_http2.c
+
+"$(INTDIR)\mod_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
new file mode 100644
index 0000000..a7e0dcd
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.c
@@ -0,0 +1,674 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <nghttp2/nghttp2.h>
+
+#include <httpd.h>
+#include <mod_proxy.h>
+#include "mod_http2.h"
+
+
+#include "mod_proxy_http2.h"
+#include "h2_request.h"
+#include "h2_proxy_util.h"
+#include "h2_version.h"
+#include "h2_proxy_session.h"
+
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+static void register_hook(apr_pool_t *p);
+
+AP_DECLARE_MODULE(proxy_http2) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ register_hook, /* register hooks */
+#if defined(AP_MODULE_FLAG_NONE)
+ AP_MODULE_FLAG_ALWAYS_MERGE
+#endif
+};
+
+/* Optional functions from mod_http2 */
+static int (*is_h2)(conn_rec *c);
+static apr_status_t (*req_engine_push)(const char *name, request_rec *r,
+ http2_req_engine_init *einit);
+static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
+ apr_read_type_e block,
+ int capacity,
+ request_rec **pr);
+static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn,
+ apr_status_t status);
+
+typedef struct h2_proxy_ctx {
+ conn_rec *owner;
+ apr_pool_t *pool;
+ request_rec *rbase;
+ server_rec *server;
+ const char *proxy_func;
+ char server_portstr[32];
+ proxy_conn_rec *p_conn;
+ proxy_worker *worker;
+ proxy_server_conf *conf;
+
+ h2_req_engine *engine;
+ const char *engine_id;
+ const char *engine_type;
+ apr_pool_t *engine_pool;
+ apr_size_t req_buffer_size;
+ h2_proxy_fifo *requests;
+ int capacity;
+
+ unsigned standalone : 1;
+ unsigned is_ssl : 1;
+ unsigned flushall : 1;
+
+ apr_status_t r_status; /* status of our first request work */
+ h2_proxy_session *session; /* current http2 session against backend */
+} h2_proxy_ctx;
+
+static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *init_key = "mod_proxy_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status = APR_SUCCESS;
+ (void)plog;(void)ptemp;
+
+ apr_pool_userdata_get(&data, init_key, s->process->pool);
+ if ( data == NULL ) {
+ apr_pool_userdata_set((const void *)1, init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03349)
+ "mod_proxy_http2 (v%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
+
+ is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
+ req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push);
+ req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull);
+ req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done);
+
+ /* we need all of them */
+ if (!req_engine_push || !req_engine_pull || !req_engine_done) {
+ req_engine_push = NULL;
+ req_engine_pull = NULL;
+ req_engine_done = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * canonicalize the url into the request, if it is meant for us.
+ * slightly modified copy from mod_http
+ */
+static int proxy_http2_canon(request_rec *r, char *url)
+{
+ char *host, *path, sport[7];
+ char *search = NULL;
+ const char *err;
+ const char *scheme;
+ const char *http_scheme;
+ apr_port_t port, def_port;
+
+ /* ap_port_of_scheme() */
+ if (ap_cstr_casecmpn(url, "h2c:", 4) == 0) {
+ url += 4;
+ scheme = "h2c";
+ http_scheme = "http";
+ }
+ else if (ap_cstr_casecmpn(url, "h2:", 3) == 0) {
+ url += 3;
+ scheme = "h2";
+ http_scheme = "https";
+ }
+ else {
+ return DECLINED;
+ }
+ port = def_port = ap_proxy_port_of_scheme(http_scheme);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "HTTP2: canonicalising URL %s", url);
+
+ /* do syntatic check.
+ * We break the URL into host, port, path, search
+ */
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03350)
+ "error parsing URL %s: %s", url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path.
+ *
+ * In a reverse proxy, our URL has been processed, so canonicalise
+ * unless proxy-nocanon is set to say it's raw
+ * In a forward proxy, we have and MUST NOT MANGLE the original.
+ */
+ switch (r->proxyreq) {
+ default: /* wtf are we doing here? */
+ case PROXYREQ_REVERSE:
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
+ }
+ else {
+ path = ap_proxy_canonenc(r->pool, url, (int)strlen(url),
+ enc_path, 0, r->proxyreq);
+ search = r->args;
+ }
+ break;
+ case PROXYREQ_PROXY:
+ path = url;
+ break;
+ }
+
+ if (path == NULL) {
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (port != def_port) {
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ }
+ else {
+ sport[0] = '\0';
+ }
+
+ if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
+ "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+ return OK;
+}
+
+static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes)
+{
+ h2_proxy_ctx *ctx = baton;
+
+ if (ctx->session) {
+ h2_proxy_session_update_window(ctx->session, c, bytes);
+ }
+}
+
+static apr_status_t proxy_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_size_t req_buffer_size,
+ request_rec *r,
+ http2_output_consumed **pconsumed,
+ void **pctx)
+{
+ h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
+ &proxy_http2_module);
+ if (!ctx) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
+ "h2_proxy_session, engine init, no ctx found");
+ return APR_ENOTIMPL;
+ }
+
+ ctx->pool = pool;
+ ctx->engine = engine;
+ ctx->engine_id = id;
+ ctx->engine_type = type;
+ ctx->engine_pool = pool;
+ ctx->req_buffer_size = req_buffer_size;
+ ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests));
+
+ *pconsumed = out_consumed;
+ *pctx = ctx;
+ return APR_SUCCESS;
+}
+
+static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
+{
+ h2_proxy_ctx *ctx = session->user_data;
+ const char *url;
+ apr_status_t status;
+
+ url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
+ apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
+ ctx->p_conn->connection->local_addr->port));
+ status = h2_proxy_session_submit(session, url, r, ctx->standalone);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
+ "pass request body failed to %pI (%s) from %s (%s)",
+ ctx->p_conn->addr, ctx->p_conn->hostname ?
+ ctx->p_conn->hostname: "", session->c->client_ip,
+ session->c->remote_host ? session->c->remote_host: "");
+ }
+ return status;
+}
+
+static void request_done(h2_proxy_ctx *ctx, request_rec *r,
+ apr_status_t status, int touched)
+{
+ const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
+ "h2_proxy_session(%s): request done %s, touched=%d",
+ ctx->engine_id, task_id, touched);
+ if (status != APR_SUCCESS) {
+ if (!touched) {
+ /* untouched request, need rescheduling */
+ status = h2_proxy_fifo_push(ctx->requests, r);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+ APLOGNO(03369)
+ "h2_proxy_session(%s): rescheduled request %s",
+ ctx->engine_id, task_id);
+ return;
+ }
+ else {
+ const char *uri;
+ uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+ APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s "
+ "not complete, cannot repeat",
+ ctx->engine_id, task_id, uri);
+ }
+ }
+
+ if (r == ctx->rbase) {
+ ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
+ : HTTP_SERVICE_UNAVAILABLE);
+ }
+
+ if (req_engine_done && ctx->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+ APLOGNO(03370)
+ "h2_proxy_session(%s): finished request %s",
+ ctx->engine_id, task_id);
+ req_engine_done(ctx->engine, r->connection, status);
+ }
+}
+
+static void session_req_done(h2_proxy_session *session, request_rec *r,
+ apr_status_t status, int touched)
+{
+ request_done(session->user_data, r, status, touched);
+}
+
+static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
+{
+ if (h2_proxy_fifo_count(ctx->requests) > 0) {
+ return APR_SUCCESS;
+ }
+ else if (req_engine_pull && ctx->engine) {
+ apr_status_t status;
+ request_rec *r = NULL;
+
+ status = req_engine_pull(ctx->engine, before_leave?
+ APR_BLOCK_READ: APR_NONBLOCK_READ,
+ ctx->capacity, &r);
+ if (status == APR_SUCCESS && r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner,
+ "h2_proxy_engine(%s): pulled request (%s) %s",
+ ctx->engine_id,
+ before_leave? "before leave" : "regular",
+ r->the_request);
+ h2_proxy_fifo_push(ctx->requests, r);
+ }
+ return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
+ }
+ return APR_EOF;
+}
+
+static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
+ apr_status_t status = OK;
+ int h2_front;
+ request_rec *r;
+
+ /* Step Four: Send the Request in a new HTTP/2 stream and
+ * loop until we got the response or encounter errors.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
+ "eng(%s): setup session", ctx->engine_id);
+ h2_front = is_h2? is_h2(ctx->owner) : 0;
+ ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
+ h2_front, 30,
+ h2_proxy_log2((int)ctx->req_buffer_size),
+ session_req_done);
+ if (!ctx->session) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner,
+ APLOGNO(03372) "session unavailable");
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
+ "eng(%s): run session %s", ctx->engine_id, ctx->session->id);
+ ctx->session->user_data = ctx;
+
+ while (!ctx->owner->aborted) {
+ if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
+ add_request(ctx->session, r);
+ }
+
+ status = h2_proxy_session_process(ctx->session);
+
+ if (status == APR_SUCCESS) {
+ apr_status_t s2;
+ /* ongoing processing, call again */
+ if (ctx->session->remote_max_concurrent > 0
+ && ctx->session->remote_max_concurrent != ctx->capacity) {
+ ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent,
+ h2_proxy_fifo_capacity(ctx->requests));
+ }
+ s2 = next_request(ctx, 0);
+ if (s2 == APR_ECONNABORTED) {
+ /* master connection gone */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
+ APLOGNO(03374) "eng(%s): pull request",
+ ctx->engine_id);
+ /* give notice that we're leaving and cancel all ongoing
+ * streams. */
+ next_request(ctx, 1);
+ h2_proxy_session_cancel_all(ctx->session);
+ h2_proxy_session_process(ctx->session);
+ status = ctx->r_status = APR_SUCCESS;
+ break;
+ }
+ if ((h2_proxy_fifo_count(ctx->requests) == 0)
+ && h2_proxy_ihash_empty(ctx->session->streams)) {
+ break;
+ }
+ }
+ else {
+ /* end of processing, maybe error */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03375) "eng(%s): end of session %s",
+ ctx->engine_id, ctx->session->id);
+ /*
+ * Any open stream of that session needs to
+ * a) be reopened on the new session iff safe to do so
+ * b) reported as done (failed) otherwise
+ */
+ h2_proxy_session_cleanup(ctx->session, session_req_done);
+ break;
+ }
+ }
+
+ ctx->session->user_data = NULL;
+ ctx->session = NULL;
+
+ return status;
+}
+
+static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r)
+{
+ conn_rec *c = ctx->owner;
+ const char *engine_type, *hostname;
+
+ hostname = (ctx->p_conn->ssl_hostname?
+ ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
+ engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
+ ctx->server_portstr);
+
+ if (c->master && req_engine_push && r && is_h2 && is_h2(c)) {
+ /* If we are have req_engine capabilities, push the handling of this
+ * request (e.g. slave connection) to a proxy_http2 engine which
+ * uses the same backend. We may be called to create an engine
+ * ourself. */
+ if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) {
+ if (ctx->engine == NULL) {
+ /* request has been assigned to an engine in another thread */
+ return SUSPENDED;
+ }
+ }
+ }
+
+ if (!ctx->engine) {
+ /* No engine was available or has been initialized, handle this
+ * request just by ourself. */
+ ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
+ ctx->engine_type = engine_type;
+ ctx->engine_pool = ctx->pool;
+ ctx->req_buffer_size = (32*1024);
+ ctx->standalone = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_proxy_http2(%ld): setup standalone engine for type %s",
+ c->id, engine_type);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "H2: hosting engine %s", ctx->engine_id);
+ }
+
+ return h2_proxy_fifo_push(ctx->requests, r);
+}
+
+static int proxy_http2_handler(request_rec *r,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ char *url,
+ const char *proxyname,
+ apr_port_t proxyport)
+{
+ const char *proxy_func;
+ char *locurl = url, *u;
+ apr_size_t slen;
+ int is_ssl = 0;
+ apr_status_t status;
+ h2_proxy_ctx *ctx;
+ apr_uri_t uri;
+ int reconnects = 0;
+
+ /* find the scheme */
+ if ((url[0] != 'h' && url[0] != 'H') || url[1] != '2') {
+ return DECLINED;
+ }
+ u = strchr(url, ':');
+ if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') {
+ return DECLINED;
+ }
+ slen = (u - url);
+ switch(slen) {
+ case 2:
+ proxy_func = "H2";
+ is_ssl = 1;
+ break;
+ case 3:
+ if (url[2] != 'c' && url[2] != 'C') {
+ return DECLINED;
+ }
+ proxy_func = "H2C";
+ break;
+ default:
+ return DECLINED;
+ }
+
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->owner = r->connection;
+ ctx->pool = r->pool;
+ ctx->rbase = r;
+ ctx->server = r->server;
+ ctx->proxy_func = proxy_func;
+ ctx->is_ssl = is_ssl;
+ ctx->worker = worker;
+ ctx->conf = conf;
+ ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
+ ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
+
+ h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100);
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+
+ /* scheme says, this is for us. */
+ apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase,
+ "H2: serving URL %s", url);
+
+run_connect:
+ /* Get a proxy_conn_rec from the worker, might be a new one, might
+ * be one still open from another request, or it might fail if the
+ * worker is stopped or in error. */
+ if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn,
+ ctx->worker, ctx->server)) != OK) {
+ goto cleanup;
+ }
+
+ ctx->p_conn->is_ssl = ctx->is_ssl;
+ if (ctx->is_ssl && ctx->p_conn->connection) {
+ /* If there are some metadata on the connection (e.g. TLS alert),
+ * let mod_ssl detect them, and create a new connection below.
+ */
+ apr_bucket_brigade *tmp_bb;
+ tmp_bb = apr_brigade_create(ctx->rbase->pool,
+ ctx->rbase->connection->bucket_alloc);
+ status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb,
+ AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1);
+ if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ctx->p_conn->close = 1;
+ }
+ apr_brigade_cleanup(tmp_bb);
+ }
+
+ /* Step One: Determine the URL to connect to (might be a proxy),
+ * initialize the backend accordingly and determine the server
+ * port string we can expect in responses. */
+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
+ ctx->p_conn, &uri, &locurl,
+ proxyname, proxyport,
+ ctx->server_portstr,
+ sizeof(ctx->server_portstr))) != OK) {
+ goto cleanup;
+ }
+
+ /* If we are not already hosting an engine, try to push the request
+ * to an already existing engine or host a new engine here. */
+ if (r && !ctx->engine) {
+ ctx->r_status = push_request_somewhere(ctx, r);
+ r = NULL;
+ if (ctx->r_status == SUSPENDED) {
+ /* request was pushed to another thread, leave processing here */
+ goto cleanup;
+ }
+ }
+
+ /* Step Two: Make the Connection (or check that an already existing
+ * socket is still usable). On success, we have a socket connected to
+ * backend->hostname. */
+ if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker,
+ ctx->server)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
+ "H2: failed to make connection to backend: %s",
+ ctx->p_conn->hostname);
+ goto reconnect;
+ }
+
+ /* Step Three: Create conn_rec for the socket we have open now. */
+ if (!ctx->p_conn->connection) {
+ status = ap_proxy_connection_create_ex(ctx->proxy_func,
+ ctx->p_conn, ctx->rbase);
+ if (status != OK) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+ "setup new connection: is_ssl=%d %s %s %s",
+ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
+ locurl, ctx->p_conn->hostname);
+ goto reconnect;
+ }
+
+ if (!ctx->p_conn->data) {
+ /* New conection: set a note on the connection what CN is
+ * requested and what protocol we want */
+ if (ctx->p_conn->ssl_hostname) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner,
+ "set SNI to %s for (%s)",
+ ctx->p_conn->ssl_hostname,
+ ctx->p_conn->hostname);
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+ }
+ if (ctx->is_ssl) {
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-alpn-protos", "h2");
+ }
+ }
+ }
+
+run_session:
+ status = proxy_engine_run(ctx);
+ if (status == APR_SUCCESS) {
+ /* session and connection still ok */
+ if (next_request(ctx, 1) == APR_SUCCESS) {
+ /* more requests, run again */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376)
+ "run_session, again");
+ goto run_session;
+ }
+ /* done */
+ ctx->engine = NULL;
+ }
+
+reconnect:
+ if (next_request(ctx, 1) == APR_SUCCESS) {
+ /* Still more to do, tear down old conn and start over */
+ if (ctx->p_conn) {
+ ctx->p_conn->close = 1;
+ /*only in trunk so far */
+ /*proxy_run_detach_backend(r, ctx->p_conn);*/
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+ ++reconnects;
+ if (reconnects < 5 && !ctx->owner->aborted) {
+ goto run_connect;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
+ "giving up after %d reconnects, %d requests todo",
+ reconnects, h2_proxy_fifo_count(ctx->requests));
+ }
+
+cleanup:
+ if (ctx->p_conn) {
+ if (status != APR_SUCCESS) {
+ /* close socket when errors happened or session shut down (EOF) */
+ ctx->p_conn->close = 1;
+ }
+ /*only in trunk so far */
+ /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+
+ /* Any requests will still have need to fail */
+ while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
+ request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1);
+ }
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03377) "leaving handler");
+ return ctx->r_status;
+}
+
+static void register_hook(apr_pool_t *p)
+{
+ ap_hook_post_config(h2_proxy_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+
+ proxy_hook_scheme_handler(proxy_http2_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(proxy_http2_canon, NULL, NULL, APR_HOOK_FIRST);
+}
+
diff --git a/modules/http2/mod_proxy_http2.dep b/modules/http2/mod_proxy_http2.dep
new file mode 100644
index 0000000..641fca6
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dep
@@ -0,0 +1,208 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_proxy_http2.mak
+
+./h2_proxy_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_proxy_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_proxy_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_util.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_proxy_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_request.h"\
+ ".\h2_proxy_util.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+ ".\mod_proxy_http2.h"\
+
diff --git a/modules/http2/mod_proxy_http2.dsp b/modules/http2/mod_proxy_http2.dsp
new file mode 100644
index 0000000..5d6305f
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dsp
@@ -0,0 +1,119 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_http2 - Win32 Release"
+# Name "mod_proxy_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_proxy_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_proxy_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_proxy_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_proxy_http2.h b/modules/http2/mod_proxy_http2.h
new file mode 100644
index 0000000..0048ed9
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.h
@@ -0,0 +1,21 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOD_PROXY_HTTP2_H__
+#define __MOD_PROXY_HTTP2_H__
+
+
+#endif
diff --git a/modules/http2/mod_proxy_http2.mak b/modules/http2/mod_proxy_http2.mak
new file mode 100644
index 0000000..e8e0624
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.mak
@@ -0,0 +1,427 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_proxy_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_proxy_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_proxy_http2 - Win32 Release" && "$(CFG)" != "mod_proxy_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Release" "mod_http2 - Win32 Release" "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" "mod_http2 - Win32 ReleaseCLEAN" "mod_proxy - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Release\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Release\mod_proxy_http2.so.manifest mt.exe -manifest .\Release\mod_proxy_http2.so.manifest -outputresource:.\Release\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Debug" "mod_http2 - Win32 Debug" "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" "mod_http2 - Win32 DebugCLEAN" "mod_proxy - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Debug\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Debug\mod_proxy_http2.so.manifest mt.exe -manifest .\Debug\mod_proxy_http2.so.manifest -outputresource:.\Debug\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_proxy_http2.dep")
+!INCLUDE "mod_proxy_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_proxy_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" || "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_http2 - Win32 Release" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release"
+ cd "."
+
+"mod_http2 - Win32 ReleaseCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release" RECURSE=1 CLEAN
+ cd "."
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_http2 - Win32 Debug" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug"
+ cd "."
+
+"mod_http2 - Win32 DebugCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug" RECURSE=1 CLEAN
+ cd "."
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_proxy - Win32 Release" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release"
+ cd "..\http2"
+
+"mod_proxy - Win32 ReleaseCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_proxy - Win32 Debug" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug"
+ cd "..\http2"
+
+"mod_proxy - Win32 DebugCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ENDIF
+
+SOURCE=./h2_proxy_session.c
+
+"$(INTDIR)\h2_proxy_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_proxy_util.c
+
+"$(INTDIR)\h2_proxy_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_proxy_http2.c
+
+"$(INTDIR)\mod_proxy_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+