From 6beeb1b708550be0d4a53b272283e17e5e35fe17 Mon Sep 17 00:00:00 2001
From: Daniel Baumann
Date: Sun, 7 Apr 2024 17:01:30 +0200
Subject: Adding upstream version 2.4.57.
Signed-off-by: Daniel Baumann
---
modules/http/.indent.pro | 54 ++
modules/http/Makefile.in | 3 +
modules/http/byterange_filter.c | 610 ++++++++++++
modules/http/chunk_filter.c | 197 ++++
modules/http/config.m4 | 20 +
modules/http/http_core.c | 326 +++++++
modules/http/http_etag.c | 413 +++++++++
modules/http/http_filters.c | 1941 +++++++++++++++++++++++++++++++++++++++
modules/http/http_protocol.c | 1671 +++++++++++++++++++++++++++++++++
modules/http/http_request.c | 861 +++++++++++++++++
modules/http/mod_mime.c | 1037 +++++++++++++++++++++
modules/http/mod_mime.dep | 55 ++
modules/http/mod_mime.dsp | 111 +++
modules/http/mod_mime.exp | 1 +
modules/http/mod_mime.mak | 353 +++++++
15 files changed, 7653 insertions(+)
create mode 100644 modules/http/.indent.pro
create mode 100644 modules/http/Makefile.in
create mode 100644 modules/http/byterange_filter.c
create mode 100644 modules/http/chunk_filter.c
create mode 100644 modules/http/config.m4
create mode 100644 modules/http/http_core.c
create mode 100644 modules/http/http_etag.c
create mode 100644 modules/http/http_filters.c
create mode 100644 modules/http/http_protocol.c
create mode 100644 modules/http/http_request.c
create mode 100644 modules/http/mod_mime.c
create mode 100644 modules/http/mod_mime.dep
create mode 100644 modules/http/mod_mime.dsp
create mode 100644 modules/http/mod_mime.exp
create mode 100644 modules/http/mod_mime.mak
(limited to 'modules/http')
diff --git a/modules/http/.indent.pro b/modules/http/.indent.pro
new file mode 100644
index 0000000..a9fbe9f
--- /dev/null
+++ b/modules/http/.indent.pro
@@ -0,0 +1,54 @@
+-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1
+-TBUFF
+-TFILE
+-TTRANS
+-TUINT4
+-T_trans
+-Tallow_options_t
+-Tapache_sfio
+-Tarray_header
+-Tbool_int
+-Tbuf_area
+-Tbuff_struct
+-Tbuffy
+-Tcmd_how
+-Tcmd_parms
+-Tcommand_rec
+-Tcommand_struct
+-Tconn_rec
+-Tcore_dir_config
+-Tcore_server_config
+-Tdir_maker_func
+-Tevent
+-Tglobals_s
+-Thandler_func
+-Thandler_rec
+-Tjoblist_s
+-Tlisten_rec
+-Tmerger_func
+-Tmode_t
+-Tmodule
+-Tmodule_struct
+-Tmutex
+-Tn_long
+-Tother_child_rec
+-Toverrides_t
+-Tparent_score
+-Tpid_t
+-Tpiped_log
+-Tpool
+-Trequest_rec
+-Trequire_line
+-Trlim_t
+-Tscoreboard
+-Tsemaphore
+-Tserver_addr_rec
+-Tserver_rec
+-Tserver_rec_chain
+-Tshort_score
+-Ttable
+-Ttable_entry
+-Tthread
+-Tu_wide_int
+-Tvtime_t
+-Twide_int
diff --git a/modules/http/Makefile.in b/modules/http/Makefile.in
new file mode 100644
index 0000000..167b343
--- /dev/null
+++ b/modules/http/Makefile.in
@@ -0,0 +1,3 @@
+
+include $(top_srcdir)/build/special.mk
+
diff --git a/modules/http/byterange_filter.c b/modules/http/byterange_filter.c
new file mode 100644
index 0000000..5ebe853
--- /dev/null
+++ b/modules/http/byterange_filter.c
@@ -0,0 +1,610 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * byterange_filter.c --- HTTP byterange filter and friends.
+ */
+
+#include "apr.h"
+
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include
+#endif
+#if APR_HAVE_UNISTD_H
+#include
+#endif
+
+#ifndef AP_DEFAULT_MAX_RANGES
+#define AP_DEFAULT_MAX_RANGES 200
+#endif
+#ifndef AP_DEFAULT_MAX_OVERLAPS
+#define AP_DEFAULT_MAX_OVERLAPS 20
+#endif
+#ifndef AP_DEFAULT_MAX_REVERSALS
+#define AP_DEFAULT_MAX_REVERSALS 20
+#endif
+
+#define MAX_PREALLOC_RANGES 100
+
+APLOG_USE_MODULE(http);
+
+typedef struct indexes_t {
+ apr_off_t start;
+ apr_off_t end;
+} indexes_t;
+
+/*
+ * Returns: number of ranges (merged) or -1 for no-good
+ */
+static int ap_set_byterange(request_rec *r, apr_off_t clength,
+ apr_array_header_t **indexes,
+ int *overlaps, int *reversals)
+{
+ const char *range;
+ const char *ct;
+ char *cur;
+ apr_array_header_t *merged;
+ int num_ranges = 0, unsatisfiable = 0;
+ apr_off_t ostart = 0, oend = 0, sum_lengths = 0;
+ int in_merge = 0;
+ indexes_t *idx;
+ int ranges = 1;
+ int i;
+ const char *it;
+
+ *overlaps = 0;
+ *reversals = 0;
+
+ if (r->assbackwards) {
+ return 0;
+ }
+
+ /*
+ * Check for Range request-header (HTTP/1.1) or Request-Range for
+ * backwards-compatibility with second-draft Luotonen/Franks
+ * byte-ranges (e.g. Netscape Navigator 2-3).
+ *
+ * We support this form, with Request-Range, and (farther down) we
+ * send multipart/x-byteranges instead of multipart/byteranges for
+ * Request-Range based requests to work around a bug in Netscape
+ * Navigator 2-3 and MSIE 3.
+ */
+
+ if (!(range = apr_table_get(r->headers_in, "Range"))) {
+ range = apr_table_get(r->headers_in, "Request-Range");
+ }
+
+ if (!range || strncasecmp(range, "bytes=", 6) || r->status != HTTP_OK) {
+ return 0;
+ }
+
+ /* is content already a single range? */
+ if (apr_table_get(r->headers_out, "Content-Range")) {
+ return 0;
+ }
+
+ /* is content already a multiple range? */
+ if ((ct = apr_table_get(r->headers_out, "Content-Type"))
+ && (!strncasecmp(ct, "multipart/byteranges", 20)
+ || !strncasecmp(ct, "multipart/x-byteranges", 22))) {
+ return 0;
+ }
+
+ /*
+ * Check the If-Range header for Etag or Date.
+ */
+ if (AP_CONDITION_NOMATCH == ap_condition_if_range(r, r->headers_out)) {
+ return 0;
+ }
+
+ range += 6;
+ it = range;
+ while (*it) {
+ if (*it++ == ',') {
+ ranges++;
+ }
+ }
+ it = range;
+ if (ranges > MAX_PREALLOC_RANGES) {
+ ranges = MAX_PREALLOC_RANGES;
+ }
+ *indexes = apr_array_make(r->pool, ranges, sizeof(indexes_t));
+ while ((cur = ap_getword(r->pool, &range, ','))) {
+ char *dash;
+ apr_off_t number, start, end;
+
+ if (!*cur)
+ break;
+
+ /*
+ * Per RFC 2616 14.35.1: If there is at least one syntactically invalid
+ * byte-range-spec, we must ignore the whole header.
+ */
+
+ if (!(dash = strchr(cur, '-'))) {
+ return 0;
+ }
+
+ if (dash == cur) {
+ /* In the form "-5" */
+ if (!ap_parse_strict_length(&number, dash+1)) {
+ return 0;
+ }
+ if (number < 1) {
+ return 0;
+ }
+ start = clength - number;
+ end = clength - 1;
+ }
+ else {
+ *dash++ = '\0';
+ if (!ap_parse_strict_length(&number, cur)) {
+ return 0;
+ }
+ start = number;
+ if (*dash) {
+ if (!ap_parse_strict_length(&number, dash)) {
+ return 0;
+ }
+ end = number;
+ if (start > end) {
+ return 0;
+ }
+ }
+ else { /* "5-" */
+ end = clength - 1;
+ /*
+ * special case: 0-
+ * ignore all other ranges provided
+ * return as a single range: 0-
+ */
+ if (start == 0) {
+ num_ranges = 0;
+ sum_lengths = 0;
+ in_merge = 1;
+ oend = end;
+ ostart = start;
+ apr_array_clear(*indexes);
+ break;
+ }
+ }
+ }
+
+ if (start < 0) {
+ start = 0;
+ }
+ if (start >= clength) {
+ unsatisfiable = 1;
+ continue;
+ }
+ if (end >= clength) {
+ end = clength - 1;
+ }
+
+ if (!in_merge) {
+ /* new set */
+ ostart = start;
+ oend = end;
+ in_merge = 1;
+ continue;
+ }
+ in_merge = 0;
+
+ if (start >= ostart && end <= oend) {
+ in_merge = 1;
+ }
+
+ if (start < ostart && end >= ostart-1) {
+ ostart = start;
+ ++*reversals;
+ in_merge = 1;
+ }
+ if (end >= oend && start <= oend+1 ) {
+ oend = end;
+ in_merge = 1;
+ }
+
+ if (in_merge) {
+ ++*overlaps;
+ continue;
+ } else {
+ idx = (indexes_t *)apr_array_push(*indexes);
+ idx->start = ostart;
+ idx->end = oend;
+ sum_lengths += oend - ostart + 1;
+ /* new set again */
+ in_merge = 1;
+ ostart = start;
+ oend = end;
+ num_ranges++;
+ }
+ }
+
+ if (in_merge) {
+ idx = (indexes_t *)apr_array_push(*indexes);
+ idx->start = ostart;
+ idx->end = oend;
+ sum_lengths += oend - ostart + 1;
+ num_ranges++;
+ }
+ else if (num_ranges == 0 && unsatisfiable) {
+ /* If all ranges are unsatisfiable, we should return 416 */
+ return -1;
+ }
+ if (sum_lengths > clength) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Sum of ranges larger than file, ignoring.");
+ return 0;
+ }
+
+ /*
+ * create the merged table now, now that we know we need it
+ */
+ merged = apr_array_make(r->pool, num_ranges, sizeof(char *));
+ idx = (indexes_t *)(*indexes)->elts;
+ for (i = 0; i < (*indexes)->nelts; i++, idx++) {
+ char **new = (char **)apr_array_push(merged);
+ *new = apr_psprintf(r->pool, "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT,
+ idx->start, idx->end);
+ }
+
+ r->status = HTTP_PARTIAL_CONTENT;
+ r->range = apr_array_pstrcat(r->pool, merged, ',');
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01583)
+ "Range: %s | %s (%d : %d : %"APR_OFF_T_FMT")",
+ it, r->range, *overlaps, *reversals, clength);
+
+ return num_ranges;
+}
+
+/*
+ * Here we try to be compatible with clients that want multipart/x-byteranges
+ * instead of multipart/byteranges (also see above), as per HTTP/1.1. We
+ * look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
+ * that the browser supports an older protocol. We also check User-Agent
+ * for Microsoft Internet Explorer 3, which needs this as well.
+ */
+static int use_range_x(request_rec *r)
+{
+ const char *ua;
+ return (apr_table_get(r->headers_in, "Request-Range")
+ || ((ua = apr_table_get(r->headers_in, "User-Agent"))
+ && ap_strstr_c(ua, "MSIE 3")));
+}
+
+#define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
+
+static apr_status_t copy_brigade_range(apr_bucket_brigade *bb,
+ apr_bucket_brigade *bbout,
+ apr_off_t start,
+ apr_off_t end)
+{
+ apr_bucket *first = NULL, *last = NULL, *out_first = NULL, *e;
+ apr_uint64_t pos = 0, off_first = 0, off_last = 0;
+ apr_status_t rv;
+ apr_uint64_t start64, end64;
+ apr_off_t pofft = 0;
+
+ /*
+ * Once we know that start and end are >= 0 convert everything to apr_uint64_t.
+ * See the comments in apr_brigade_partition why.
+ * In short apr_off_t (for values >= 0)and apr_size_t fit into apr_uint64_t.
+ */
+ start64 = (apr_uint64_t)start;
+ end64 = (apr_uint64_t)end;
+
+ if (start < 0 || end < 0 || start64 > end64)
+ return APR_EINVAL;
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ apr_uint64_t elen64;
+ /* we know that no bucket has undefined length (-1) */
+ AP_DEBUG_ASSERT(e->length != (apr_size_t)(-1));
+ elen64 = (apr_uint64_t)e->length;
+ if (!first && (elen64 + pos > start64)) {
+ first = e;
+ off_first = pos;
+ }
+ if (elen64 + pos > end64) {
+ last = e;
+ off_last = pos;
+ break;
+ }
+ pos += elen64;
+ }
+ if (!first || !last)
+ return APR_EINVAL;
+
+ e = first;
+ while (1)
+ {
+ apr_bucket *copy;
+ AP_DEBUG_ASSERT(e != APR_BRIGADE_SENTINEL(bb));
+ rv = apr_bucket_copy(e, ©);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+
+ APR_BRIGADE_INSERT_TAIL(bbout, copy);
+ if (e == first) {
+ if (off_first != start64) {
+ rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first));
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+ out_first = APR_BUCKET_NEXT(copy);
+ apr_bucket_delete(copy);
+ }
+ else {
+ out_first = copy;
+ }
+ }
+ if (e == last) {
+ if (e == first) {
+ off_last += start64 - off_first;
+ copy = out_first;
+ }
+ if (end64 - off_last != (apr_uint64_t)e->length) {
+ rv = apr_bucket_split(copy, (apr_size_t)(end64 + 1 - off_last));
+ if (rv != APR_SUCCESS) {
+ apr_brigade_cleanup(bbout);
+ return rv;
+ }
+ copy = APR_BUCKET_NEXT(copy);
+ if (copy != APR_BRIGADE_SENTINEL(bbout)) {
+ apr_bucket_delete(copy);
+ }
+ }
+ break;
+ }
+ e = APR_BUCKET_NEXT(e);
+ }
+
+ AP_DEBUG_ASSERT(APR_SUCCESS == apr_brigade_length(bbout, 1, &pofft));
+ pos = (apr_uint64_t)pofft;
+ AP_DEBUG_ASSERT(pos == end64 - start64 + 1);
+ return APR_SUCCESS;
+}
+
+static apr_status_t send_416(ap_filter_t *f, apr_bucket_brigade *tmpbb)
+{
+ apr_bucket *e;
+ conn_rec *c = f->r->connection;
+ ap_remove_output_filter(f);
+ f->r->status = HTTP_OK;
+ e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL,
+ f->r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(tmpbb, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(tmpbb, e);
+ return ap_pass_brigade(f->next, tmpbb);
+}
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ apr_bucket *e;
+ apr_bucket_brigade *bsend;
+ apr_bucket_brigade *tmpbb;
+ apr_off_t range_start;
+ apr_off_t range_end;
+ apr_off_t clength = 0;
+ apr_status_t rv;
+ int found = 0;
+ int num_ranges;
+ char *bound_head = NULL;
+ apr_array_header_t *indexes;
+ indexes_t *idx;
+ int i;
+ int original_status;
+ int max_ranges, max_overlaps, max_reversals;
+ int overlaps = 0, reversals = 0;
+ core_dir_config *core_conf = ap_get_core_module_config(r->per_dir_config);
+
+ max_ranges = ( (core_conf->max_ranges >= 0 || core_conf->max_ranges == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_ranges
+ : AP_DEFAULT_MAX_RANGES );
+ max_overlaps = ( (core_conf->max_overlaps >= 0 || core_conf->max_overlaps == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_overlaps
+ : AP_DEFAULT_MAX_OVERLAPS );
+ max_reversals = ( (core_conf->max_reversals >= 0 || core_conf->max_reversals == AP_MAXRANGES_UNLIMITED)
+ ? core_conf->max_reversals
+ : AP_DEFAULT_MAX_REVERSALS );
+ /*
+ * Iterate through the brigade until reaching EOS or a bucket with
+ * unknown length.
+ */
+ for (e = APR_BRIGADE_FIRST(bb);
+ (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)
+ && e->length != (apr_size_t)-1);
+ e = APR_BUCKET_NEXT(e)) {
+ clength += e->length;
+ }
+
+ /*
+ * Don't attempt to do byte range work if this brigade doesn't
+ * contain an EOS, or if any of the buckets has an unknown length;
+ * this avoids the cases where it is expensive to perform
+ * byteranging (i.e. may require arbitrary amounts of memory).
+ */
+ if (!APR_BUCKET_IS_EOS(e) || clength <= 0) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ original_status = r->status;
+ num_ranges = ap_set_byterange(r, clength, &indexes, &overlaps, &reversals);
+
+ /* No Ranges or we hit a limit? We have nothing to do, get out of the way. */
+ if (num_ranges == 0 ||
+ (max_ranges >= 0 && num_ranges > max_ranges) ||
+ (max_overlaps >= 0 && overlaps > max_overlaps) ||
+ (max_reversals >= 0 && reversals > max_reversals)) {
+ r->status = original_status;
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* this brigade holds what we will be sending */
+ bsend = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ if (num_ranges < 0)
+ return send_416(f, bsend);
+
+ if (num_ranges > 1) {
+ /* Is ap_make_content_type required here? */
+ const char *orig_ct = ap_make_content_type(r, r->content_type);
+
+ ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
+ use_range_x(r) ? "/x-" : "/",
+ "byteranges; boundary=",
+ ap_multipart_boundary, NULL));
+
+ if (orig_ct) {
+ bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ap_multipart_boundary,
+ CRLF "Content-type: ",
+ orig_ct,
+ CRLF "Content-range: bytes ",
+ NULL);
+ }
+ else {
+ /* if we have no type for the content, do our best */
+ bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ap_multipart_boundary,
+ CRLF "Content-range: bytes ",
+ NULL);
+ }
+ ap_xlate_proto_to_ascii(bound_head, strlen(bound_head));
+ }
+
+ tmpbb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ idx = (indexes_t *)indexes->elts;
+ for (i = 0; i < indexes->nelts; i++, idx++) {
+ range_start = idx->start;
+ range_end = idx->end;
+
+ rv = copy_brigade_range(bb, tmpbb, range_start, range_end);
+ if (rv != APR_SUCCESS ) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01584)
+ "copy_brigade_range() failed [%" APR_OFF_T_FMT
+ "-%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]",
+ range_start, range_end, clength);
+ continue;
+ }
+ found = 1;
+
+ /*
+ * For single range requests, we must produce Content-Range header.
+ * Otherwise, we need to produce the multipart boundaries.
+ */
+ if (num_ranges == 1) {
+ apr_table_setn(r->headers_out, "Content-Range",
+ apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
+ range_start, range_end, clength));
+ }
+ else {
+ char *ts;
+
+ e = apr_bucket_pool_create(bound_head, strlen(bound_head),
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
+ range_start, range_end, clength);
+ ap_xlate_proto_to_ascii(ts, strlen(ts));
+ e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ APR_BRIGADE_CONCAT(bsend, tmpbb);
+ if (i && !(i & 0x1F)) {
+ /*
+ * Every now and then, pass what we have down the filter chain.
+ * In this case, the content-length filter cannot calculate and
+ * set the content length and we must remove any Content-Length
+ * header already present.
+ */
+ apr_table_unset(r->headers_out, "Content-Length");
+ if ((rv = ap_pass_brigade(f->next, bsend)) != APR_SUCCESS)
+ return rv;
+ apr_brigade_cleanup(bsend);
+ }
+ }
+
+ if (found == 0) {
+ /* bsend is assumed to be empty if we get here. */
+ return send_416(f, bsend);
+ }
+
+ if (num_ranges > 1) {
+ char *end;
+
+ /* add the final boundary */
+ end = apr_pstrcat(r->pool, CRLF "--", ap_multipart_boundary, "--" CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(end, strlen(end));
+ e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ /* we're done with the original content - all of our data is in bsend. */
+ apr_brigade_cleanup(bb);
+ apr_brigade_destroy(tmpbb);
+
+ /* send our multipart output */
+ return ap_pass_brigade(f->next, bsend);
+}
diff --git a/modules/http/chunk_filter.c b/modules/http/chunk_filter.c
new file mode 100644
index 0000000..cb1501a
--- /dev/null
+++ b/modules/http/chunk_filter.c
@@ -0,0 +1,197 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * chunk_filter.c --- HTTP/1.1 chunked transfer encoding filter.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/*
+ * A pointer to this is used to memorize in the filter context that a bad
+ * gateway error bucket had been seen. It is used as an invented unique pointer.
+ */
+static char bad_gateway_seen;
+
+apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+#define ASCII_CRLF "\015\012"
+#define ASCII_ZERO "\060"
+ conn_rec *c = f->r->connection;
+ apr_bucket_brigade *more, *tmp;
+ apr_bucket *e;
+ apr_status_t rv;
+
+ for (more = tmp = NULL; b; b = more, more = NULL) {
+ apr_off_t bytes = 0;
+ apr_bucket *eos = NULL;
+ apr_bucket *flush = NULL;
+ /* XXX: chunk_hdr must remain at this scope since it is used in a
+ * transient bucket.
+ */
+ char chunk_hdr[20]; /* enough space for the snprintf below */
+
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ /* there shouldn't be anything after the eos */
+ ap_remove_output_filter(f);
+ eos = e;
+ break;
+ }
+ if (AP_BUCKET_IS_ERROR(e)
+ && (((ap_bucket_error *)(e->data))->status
+ == HTTP_BAD_GATEWAY)) {
+ /*
+ * We had a broken backend. Memorize this in the filter
+ * context.
+ */
+ f->ctx = &bad_gateway_seen;
+ continue;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ flush = e;
+ if (e != APR_BRIGADE_LAST(b)) {
+ more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+ }
+ break;
+ }
+ else if (e->length == (apr_size_t)-1) {
+ /* unknown amount of data (e.g. a pipe) */
+ const char *data;
+ apr_size_t len;
+
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len > 0) {
+ /*
+ * There may be a new next bucket representing the
+ * rest of the data stream on which a read() may
+ * block so we pass down what we have so far.
+ */
+ bytes += len;
+ more = apr_brigade_split_ex(b, APR_BUCKET_NEXT(e), tmp);
+ break;
+ }
+ else {
+ /* If there was nothing in this bucket then we can
+ * safely move on to the next one without pausing
+ * to pass down what we have counted up so far.
+ */
+ continue;
+ }
+ }
+ else {
+ bytes += e->length;
+ }
+ }
+
+ /*
+ * XXX: if there aren't very many bytes at this point it may
+ * be a good idea to set them aside and return for more,
+ * unless we haven't finished counting this brigade yet.
+ */
+ /* if there are content bytes, then wrap them in a chunk */
+ if (bytes > 0) {
+ apr_size_t hdr_len;
+ /*
+ * Insert the chunk header, specifying the number of bytes in
+ * the chunk.
+ */
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes);
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+
+ /*
+ * Insert the end-of-chunk CRLF before an EOS or
+ * FLUSH bucket, or appended to the brigade
+ */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
+ if (eos != NULL) {
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+ else if (flush != NULL) {
+ APR_BUCKET_INSERT_BEFORE(flush, e);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+ }
+
+ /* RFC 2616, Section 3.6.1
+ *
+ * If there is an EOS bucket, then prefix it with:
+ * 1) the last-chunk marker ("0" CRLF)
+ * 2) the trailer
+ * 3) the end-of-chunked body CRLF
+ *
+ * We only do this if we have not seen an error bucket with
+ * status HTTP_BAD_GATEWAY. We have memorized an
+ * error bucket that we had seen in the filter context.
+ * The error bucket with status HTTP_BAD_GATEWAY indicates that the
+ * connection to the backend (mod_proxy) broke in the middle of the
+ * response. In order to signal the client that something went wrong
+ * we do not create the last-chunk marker and set c->keepalive to
+ * AP_CONN_CLOSE in the core output filter.
+ *
+ * XXX: it would be nice to combine this with the end-of-chunk
+ * marker above, but this is a bit more straight-forward for
+ * now.
+ */
+ if (eos && !f->ctx) {
+ /* XXX: (2) trailers ... does not yet exist */
+ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
+ /* */
+ ASCII_CRLF, 5, c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+
+ /* pass the brigade to the next filter. */
+ rv = ap_pass_brigade(f->next, b);
+ apr_brigade_cleanup(b);
+ if (rv != APR_SUCCESS || eos != NULL) {
+ return rv;
+ }
+ tmp = b;
+ }
+ return APR_SUCCESS;
+}
diff --git a/modules/http/config.m4 b/modules/http/config.m4
new file mode 100644
index 0000000..6496007
--- /dev/null
+++ b/modules/http/config.m4
@@ -0,0 +1,20 @@
+dnl modules enabled in this directory by default
+
+APACHE_MODPATH_INIT(http)
+
+http_objects="http_core.lo http_protocol.lo http_request.lo http_filters.lo chunk_filter.lo byterange_filter.lo http_etag.lo"
+
+dnl mod_http should only be built as a static module for now.
+dnl this will hopefully be "fixed" at some point in the future by
+dnl refactoring mod_http and moving some things to the core and
+dnl vice versa so that the core does not depend upon mod_http.
+if test "$enable_http" = "yes"; then
+ enable_http="static"
+elif test "$enable_http" = "shared"; then
+ AC_MSG_ERROR([mod_http can not be built as a shared DSO])
+fi
+
+APACHE_MODULE(http,[HTTP protocol handling. The http module is a basic one that enables the server to function as an HTTP server. It is only useful to disable it if you want to use another protocol module instead. Don't disable this module unless you are really sure what you are doing. Note: This module will always be linked statically.], $http_objects, , static)
+APACHE_MODULE(mime, mapping of file-extension to MIME. Disabling this module is normally not recommended., , , yes)
+
+APACHE_MODPATH_FINISH
diff --git a/modules/http/http_core.c b/modules/http/http_core.c
new file mode 100644
index 0000000..c6cb473
--- /dev/null
+++ b/modules/http/http_core.c
@@ -0,0 +1,326 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+/* Handles for core filters */
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_input_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_header_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_chunk_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_http_outerror_filter_handle;
+AP_DECLARE_DATA ap_filter_rec_t *ap_byterange_filter_handle;
+
+AP_DECLARE_DATA const char *ap_multipart_boundary;
+
+/* If we are using an MPM That Supports Async Connections,
+ * use a different processing function
+ */
+static int async_mpm = 0;
+
+static const char *set_keep_alive_timeout(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ apr_interval_time_t timeout;
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ /* Stolen from mod_proxy.c */
+ if (ap_timeout_parameter_parse(arg, &timeout, "s") != APR_SUCCESS)
+ return "KeepAliveTimeout has wrong format";
+ cmd->server->keep_alive_timeout = timeout;
+
+ /* We don't want to take into account whether or not KeepAliveTimeout is
+ * set for the main server, because if no http_module directive is used
+ * for a vhost, it will inherit the http_srv_cfg from the main server.
+ * However keep_alive_timeout_set helps determine whether the vhost should
+ * use its own configured timeout or the one from the vhost declared first
+ * on the same IP:port (ie. c->base_server, and the legacy behaviour).
+ */
+ if (cmd->server->is_virtual) {
+ cmd->server->keep_alive_timeout_set = 1;
+ }
+ return NULL;
+}
+
+static const char *set_keep_alive(cmd_parms *cmd, void *dummy,
+ int arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive = arg;
+ return NULL;
+}
+
+static const char *set_keep_alive_max(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
+ if (err != NULL) {
+ return err;
+ }
+
+ cmd->server->keep_alive_max = atoi(arg);
+ return NULL;
+}
+
+static const command_rec http_cmds[] = {
+ AP_INIT_TAKE1("KeepAliveTimeout", set_keep_alive_timeout, NULL, RSRC_CONF,
+ "Keep-Alive timeout duration (sec)"),
+ AP_INIT_TAKE1("MaxKeepAliveRequests", set_keep_alive_max, NULL, RSRC_CONF,
+ "Maximum number of Keep-Alive requests per connection, "
+ "or 0 for infinite"),
+ AP_INIT_FLAG("KeepAlive", set_keep_alive, NULL, RSRC_CONF,
+ "Whether persistent connections should be On or Off"),
+ { NULL }
+};
+
+static const char *http_scheme(const request_rec *r)
+{
+ /*
+ * The http module shouldn't return anything other than
+ * "http" (the default) or "https".
+ */
+ if (r->server->server_scheme &&
+ (strcmp(r->server->server_scheme, "https") == 0))
+ return "https";
+
+ return "http";
+}
+
+static apr_port_t http_port(const request_rec *r)
+{
+ if (r->server->server_scheme &&
+ (strcmp(r->server->server_scheme, "https") == 0))
+ return DEFAULT_HTTPS_PORT;
+
+ return DEFAULT_HTTP_PORT;
+}
+
+static int ap_process_http_async_connection(conn_rec *c)
+{
+ request_rec *r = NULL;
+ conn_state_t *cs = c->cs;
+
+ AP_DEBUG_ASSERT(cs != NULL);
+ AP_DEBUG_ASSERT(cs->state == CONN_STATE_READ_REQUEST_LINE);
+
+ if (cs->state == CONN_STATE_READ_REQUEST_LINE) {
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ if (ap_extended_status) {
+ ap_set_conn_count(c->sbh, r, c->keepalives);
+ }
+ if ((r = ap_read_request(c))) {
+ if (r->status == HTTP_OK) {
+ cs->state = CONN_STATE_HANDLER;
+ if (ap_extended_status) {
+ ap_set_conn_count(c->sbh, r, c->keepalives + 1);
+ }
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ ap_process_async_request(r);
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ r = NULL;
+ }
+
+ if (cs->state != CONN_STATE_WRITE_COMPLETION &&
+ cs->state != CONN_STATE_SUSPENDED) {
+ /* Something went wrong; close the connection */
+ cs->state = CONN_STATE_LINGER;
+ }
+ }
+ else { /* ap_read_request failed - client may have closed */
+ cs->state = CONN_STATE_LINGER;
+ }
+ }
+
+ return OK;
+}
+
+static int ap_process_http_sync_connection(conn_rec *c)
+{
+ request_rec *r;
+ conn_state_t *cs = c->cs;
+ apr_socket_t *csd = NULL;
+ int mpm_state = 0;
+
+ /*
+ * Read and process each request found on our connection
+ * until no requests are left or we decide to close.
+ */
+
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ while ((r = ap_read_request(c)) != NULL) {
+ apr_interval_time_t keep_alive_timeout = r->server->keep_alive_timeout;
+
+ /* To preserve legacy behaviour, use the keepalive timeout from the
+ * base server (first on this IP:port) when none is explicitly
+ * configured on this server.
+ */
+ if (!r->server->keep_alive_timeout_set) {
+ keep_alive_timeout = c->base_server->keep_alive_timeout;
+ }
+
+ if (r->status == HTTP_OK) {
+ if (cs)
+ cs->state = CONN_STATE_HANDLER;
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ ap_process_request(r);
+ /* After the call to ap_process_request, the
+ * request pool will have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ r = NULL;
+ }
+
+ if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted)
+ break;
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, NULL);
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ break;
+ }
+
+ if (!csd) {
+ csd = ap_get_conn_socket(c);
+ }
+ apr_socket_opt_set(csd, APR_INCOMPLETE_READ, 1);
+ apr_socket_timeout_set(csd, keep_alive_timeout);
+ /* Go straight to select() to wait for the next request */
+ }
+
+ return OK;
+}
+
+static int ap_process_http_connection(conn_rec *c)
+{
+ if (async_mpm && !c->clogging_input_filters) {
+ return ap_process_http_async_connection(c);
+ }
+ else {
+ return ap_process_http_sync_connection(c);
+ }
+}
+
+static int http_create_request(request_rec *r)
+{
+ if (!r->main && !r->prev) {
+ ap_add_output_filter_handle(ap_byterange_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_content_length_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_header_filter_handle,
+ NULL, r, r->connection);
+ ap_add_output_filter_handle(ap_http_outerror_filter_handle,
+ NULL, r, r->connection);
+ }
+
+ return OK;
+}
+
+static int http_send_options(request_rec *r)
+{
+ if ((r->method_number == M_OPTIONS) && r->uri && (r->uri[0] == '*') &&
+ (r->uri[1] == '\0')) {
+ return DONE; /* Send HTTP pong, without Allow header */
+ }
+ return DECLINED;
+}
+
+static int http_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ apr_uint64_t val;
+ if (ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm) != APR_SUCCESS) {
+ async_mpm = 0;
+ }
+ ap_random_insecure_bytes(&val, sizeof(val));
+ ap_multipart_boundary = apr_psprintf(p, "%0" APR_UINT64_T_HEX_FMT, val);
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(http_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_process_connection(ap_process_http_connection, NULL, NULL,
+ APR_HOOK_REALLY_LAST);
+ ap_hook_map_to_storage(ap_send_http_trace,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_map_to_storage(http_send_options,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_http_scheme(http_scheme,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_default_port(http_port,NULL,NULL,APR_HOOK_REALLY_LAST);
+ ap_hook_create_request(http_create_request, NULL, NULL, APR_HOOK_REALLY_LAST);
+ ap_http_input_filter_handle =
+ ap_register_input_filter("HTTP_IN", ap_http_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_http_header_filter_handle =
+ ap_register_output_filter("HTTP_HEADER", ap_http_header_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_chunk_filter_handle =
+ ap_register_output_filter("CHUNK", ap_http_chunk_filter,
+ NULL, AP_FTYPE_TRANSCODE);
+ ap_http_outerror_filter_handle =
+ ap_register_output_filter("HTTP_OUTERROR", ap_http_outerror_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_byterange_filter_handle =
+ ap_register_output_filter("BYTERANGE", ap_byterange_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_method_registry_init(p);
+}
+
+AP_DECLARE_MODULE(http) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ http_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/modules/http/http_etag.c b/modules/http/http_etag.c
new file mode 100644
index 0000000..af74549
--- /dev/null
+++ b/modules/http/http_etag.c
@@ -0,0 +1,413 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+#include "apr_sha1.h"
+#include "apr_base64.h"
+#include "apr_buckets.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#if APR_HAS_MMAP
+#include "apr_mmap.h"
+#endif /* APR_HAS_MMAP */
+
+#define SHA1_DIGEST_BASE64_LEN 4*(APR_SHA1_DIGESTSIZE/3)
+
+/* Generate the human-readable hex representation of an apr_uint64_t
+ * (basically a faster version of 'sprintf("%llx")')
+ */
+#define HEX_DIGITS "0123456789abcdef"
+static char *etag_uint64_to_hex(char *next, apr_uint64_t u)
+{
+ int printing = 0;
+ int shift = sizeof(apr_uint64_t) * 8 - 4;
+ do {
+ unsigned short next_digit = (unsigned short)
+ ((u >> shift) & (apr_uint64_t)0xf);
+ if (next_digit) {
+ *next++ = HEX_DIGITS[next_digit];
+ printing = 1;
+ }
+ else if (printing) {
+ *next++ = HEX_DIGITS[next_digit];
+ }
+ shift -= 4;
+ } while (shift);
+ *next++ = HEX_DIGITS[u & (apr_uint64_t)0xf];
+ return next;
+}
+
+#define ETAG_WEAK "W/"
+#define CHARS_PER_UINT64 (sizeof(apr_uint64_t) * 2)
+
+static void etag_start(char *etag, const char *weak, char **next)
+{
+ if (weak) {
+ while (*weak) {
+ *etag++ = *weak++;
+ }
+ }
+ *etag++ = '"';
+
+ *next = etag;
+}
+
+static void etag_end(char *next, const char *vlv, apr_size_t vlv_len)
+{
+ if (vlv) {
+ *next++ = ';';
+ apr_cpystrn(next, vlv, vlv_len);
+ }
+ else {
+ *next++ = '"';
+ *next = '\0';
+ }
+}
+
+/*
+ * Construct a strong ETag by creating a SHA1 hash across the file content.
+ */
+static char *make_digest_etag(request_rec *r, etag_rec *er, char *vlv,
+ apr_size_t vlv_len, char *weak, apr_size_t weak_len)
+{
+ apr_sha1_ctx_t context;
+ unsigned char digest[APR_SHA1_DIGESTSIZE];
+ apr_file_t *fd = NULL;
+ core_dir_config *cfg;
+ char *etag, *next;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+
+ apr_size_t nbytes;
+ apr_off_t offset = 0, zero = 0, len = 0;
+ apr_status_t status;
+
+ cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config);
+
+ if (er->fd) {
+ fd = er->fd;
+ }
+ else if (er->pathname) {
+ if ((status = apr_file_open(&fd, er->pathname, APR_READ | APR_BINARY,
+ 0, r->pool)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10251)
+ "Make etag: could not open %s", er->pathname);
+ return "";
+ }
+ }
+ if (!fd) {
+ return "";
+ }
+
+ if ((status = apr_file_seek(fd, APR_CUR, &offset)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10252)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ if ((status = apr_file_seek(fd, APR_END, &len)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10258)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ if ((status = apr_file_seek(fd, APR_SET, &zero)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10253)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ e = apr_brigade_insert_file(bb, fd, 0, len, r->pool);
+
+#if APR_HAS_MMAP
+ if (cfg->enable_mmap == ENABLE_MMAP_OFF) {
+ (void)apr_bucket_file_enable_mmap(e, 0);
+ }
+#endif
+
+ apr_sha1_init(&context);
+ while (!APR_BRIGADE_EMPTY(bb))
+ {
+ const char *str;
+
+ e = APR_BRIGADE_FIRST(bb);
+
+ if ((status = apr_bucket_read(e, &str, &nbytes, APR_BLOCK_READ)) != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10254)
+ "Make etag: could not read");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+
+ apr_sha1_update(&context, str, nbytes);
+ apr_bucket_delete(e);
+ }
+
+ if ((status = apr_file_seek(fd, APR_SET, &offset)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10255)
+ "Make etag: could not seek");
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+ return "";
+ }
+ apr_sha1_final(digest, &context);
+
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ SHA1_DIGEST_BASE64_LEN + vlv_len + 4);
+
+ etag_start(etag, weak, &next);
+ next += apr_base64_encode_binary(next, digest, APR_SHA1_DIGESTSIZE) - 1;
+ etag_end(next, vlv, vlv_len);
+
+ if (er->pathname) {
+ apr_file_close(fd);
+ }
+
+ return etag;
+}
+
+/*
+ * Construct an entity tag (ETag) from resource information. If it's a real
+ * file, build in some of the file characteristics. If the modification time
+ * is newer than (request-time minus 1 second), mark the ETag as weak - it
+ * could be modified again in as short an interval.
+ */
+AP_DECLARE(char *) ap_make_etag_ex(request_rec *r, etag_rec *er)
+{
+ char *weak = NULL;
+ apr_size_t weak_len = 0, vlv_len = 0;
+ char *etag, *next, *vlv;
+ core_dir_config *cfg;
+ etag_components_t etag_bits;
+ etag_components_t bits_added;
+
+ cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config);
+ etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add;
+
+ if (er->force_weak) {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (r->vlist_validator) {
+
+ /* If we have a variant list validator (vlv) due to the
+ * response being negotiated, then we create a structured
+ * entity tag which merges the variant etag with the variant
+ * list validator (vlv). This merging makes revalidation
+ * somewhat safer, ensures that caches which can deal with
+ * Vary will (eventually) be updated if the set of variants is
+ * changed, and is also a protocol requirement for transparent
+ * content negotiation.
+ */
+
+ /* if the variant list validator is weak, we make the whole
+ * structured etag weak. If we would not, then clients could
+ * have problems merging range responses if we have different
+ * variants with the same non-globally-unique strong etag.
+ */
+
+ vlv = r->vlist_validator;
+ if (vlv[0] == 'W') {
+ vlv += 3;
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+ else {
+ vlv++;
+ }
+ vlv_len = strlen(vlv);
+
+ }
+ else {
+ vlv = NULL;
+ vlv_len = 0;
+ }
+
+ /*
+ * Did a module flag the need for a strong etag, or did the
+ * configuration tell us to generate a digest?
+ */
+ if (er->finfo->filetype == APR_REG &&
+ (AP_REQUEST_IS_STRONG_ETAG(r) || (etag_bits & ETAG_DIGEST))) {
+
+ return make_digest_etag(r, er, vlv, vlv_len, weak, weak_len);
+ }
+
+ /*
+ * If it's a file (or we wouldn't be here) and no ETags
+ * should be set for files, return an empty string and
+ * note it for the header-sender to ignore.
+ */
+ if (etag_bits & ETAG_NONE) {
+ return "";
+ }
+
+ if (etag_bits == ETAG_UNSET) {
+ etag_bits = ETAG_BACKWARD;
+ }
+ /*
+ * Make an ETag header out of various pieces of information. We use
+ * the last-modified date and, if we have a real file, the
+ * length and inode number - note that this doesn't have to match
+ * the content-length (i.e. includes), it just has to be unique
+ * for the file.
+ *
+ * If the request was made within a second of the last-modified date,
+ * we send a weak tag instead of a strong one, since it could
+ * be modified again later in the second, and the validation
+ * would be incorrect.
+ */
+ if ((er->request_time - er->finfo->mtime < (1 * APR_USEC_PER_SEC))) {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (er->finfo->filetype != APR_NOFILE) {
+ /*
+ * ETag gets set to [W/]"inode-size-mtime", modulo any
+ * FileETag keywords.
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") +
+ 3 * CHARS_PER_UINT64 + vlv_len + 2);
+
+ etag_start(etag, weak, &next);
+
+ bits_added = 0;
+ if (etag_bits & ETAG_INODE) {
+ next = etag_uint64_to_hex(next, er->finfo->inode);
+ bits_added |= ETAG_INODE;
+ }
+ if (etag_bits & ETAG_SIZE) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_uint64_to_hex(next, er->finfo->size);
+ bits_added |= ETAG_SIZE;
+ }
+ if (etag_bits & ETAG_MTIME) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_uint64_to_hex(next, er->finfo->mtime);
+ }
+
+ etag_end(next, vlv, vlv_len);
+
+ }
+ else {
+ /*
+ * Not a file document, so just use the mtime: [W/]"mtime"
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ CHARS_PER_UINT64 + vlv_len + 2);
+
+ etag_start(etag, weak, &next);
+ next = etag_uint64_to_hex(next, er->finfo->mtime);
+ etag_end(next, vlv, vlv_len);
+
+ }
+
+ return etag;
+}
+
+AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak)
+{
+ etag_rec er;
+
+ er.vlist_validator = NULL;
+ er.request_time = r->request_time;
+ er.finfo = &r->finfo;
+ er.pathname = r->filename;
+ er.fd = NULL;
+ er.force_weak = force_weak;
+
+ return ap_make_etag_ex(r, &er);
+}
+
+AP_DECLARE(void) ap_set_etag(request_rec *r)
+{
+ char *etag;
+
+ etag_rec er;
+
+ er.vlist_validator = r->vlist_validator;
+ er.request_time = r->request_time;
+ er.finfo = &r->finfo;
+ er.pathname = r->filename;
+ er.fd = NULL;
+ er.force_weak = 0;
+
+ etag = ap_make_etag_ex(r, &er);
+
+ if (etag && etag[0]) {
+ apr_table_setn(r->headers_out, "ETag", etag);
+ }
+ else {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ }
+
+}
+
+AP_DECLARE(void) ap_set_etag_fd(request_rec *r, apr_file_t *fd)
+{
+ char *etag;
+
+ etag_rec er;
+
+ er.vlist_validator = r->vlist_validator;
+ er.request_time = r->request_time;
+ er.finfo = &r->finfo;
+ er.pathname = NULL;
+ er.fd = fd;
+ er.force_weak = 0;
+
+ etag = ap_make_etag_ex(r, &er);
+
+ if (etag && etag[0]) {
+ apr_table_setn(r->headers_out, "ETag", etag);
+ }
+ else {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ }
+
+}
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
new file mode 100644
index 0000000..1a8df34
--- /dev/null
+++ b/modules/http/http_filters.c
@@ -0,0 +1,1941 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_filter.c --- HTTP routines which either filters or deal with filters.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_connection.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include
+#endif
+#if APR_HAVE_UNISTD_H
+#include
+#endif
+
+APLOG_USE_MODULE(http);
+
+typedef struct http_filter_ctx
+{
+ apr_off_t remaining;
+ apr_off_t limit;
+ apr_off_t limit_used;
+ apr_int32_t chunk_used;
+ apr_int32_t chunk_bws;
+ apr_int32_t chunkbits;
+ enum
+ {
+ BODY_NONE, /* streamed data */
+ BODY_LENGTH, /* data constrained by content length */
+ BODY_CHUNK, /* chunk expected */
+ BODY_CHUNK_PART, /* chunk digits */
+ BODY_CHUNK_EXT, /* chunk extension */
+ BODY_CHUNK_CR, /* got space(s) after digits, expect [CR]LF or ext */
+ BODY_CHUNK_LF, /* got CR after digits or ext, expect LF */
+ BODY_CHUNK_DATA, /* data constrained by chunked encoding */
+ BODY_CHUNK_END, /* chunked data terminating CRLF */
+ BODY_CHUNK_END_LF, /* got CR after data, expect LF */
+ BODY_CHUNK_TRAILER /* trailers */
+ } state;
+ unsigned int eos_sent :1,
+ seen_data:1;
+ apr_bucket_brigade *bb;
+} http_ctx_t;
+
+/* bail out if some error in the HTTP input filter happens */
+static apr_status_t bail_out_on_error(http_ctx_t *ctx,
+ ap_filter_t *f,
+ int http_error)
+{
+ apr_bucket *e;
+ apr_bucket_brigade *bb = ctx->bb;
+
+ apr_brigade_cleanup(bb);
+
+ if (f->r->proxyreq == PROXYREQ_RESPONSE) {
+ switch (http_error) {
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return APR_ENOSPC;
+
+ case HTTP_REQUEST_TIME_OUT:
+ return APR_INCOMPLETE;
+
+ case HTTP_NOT_IMPLEMENTED:
+ return APR_ENOTIMPL;
+
+ default:
+ return APR_EGENERAL;
+ }
+ }
+
+ e = ap_bucket_error_create(http_error,
+ NULL, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ /* If chunked encoding / content-length are corrupt, we may treat parts
+ * of this request's body as the next one's headers.
+ * To be safe, disable keep-alive.
+ */
+ f->r->connection->keepalive = AP_CONN_CLOSE;
+ return ap_pass_brigade(f->r->output_filters, bb);
+}
+
+/**
+ * Parse a chunk line with optional extension, detect overflow.
+ * There are several error cases:
+ * 1) If the chunk link is misformatted, APR_EINVAL is returned.
+ * 2) If the conversion would require too many bits, APR_EGENERAL is returned.
+ * 3) If the conversion used the correct number of bits, but an overflow
+ * caused only the sign bit to flip, then APR_ENOSPC is returned.
+ * A negative chunk length always indicates an overflow error.
+ */
+static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer,
+ apr_size_t len, int linelimit, int strict)
+{
+ apr_size_t i = 0;
+
+ while (i < len) {
+ char c = buffer[i];
+
+ ap_xlate_proto_from_ascii(&c, 1);
+
+ /* handle CRLF after the chunk */
+ if (ctx->state == BODY_CHUNK_END
+ || ctx->state == BODY_CHUNK_END_LF) {
+ if (c == LF) {
+ if (strict && (ctx->state != BODY_CHUNK_END_LF)) {
+ /*
+ * CR missing before LF.
+ */
+ return APR_EINVAL;
+ }
+ ctx->state = BODY_CHUNK;
+ }
+ else if (c == CR && ctx->state == BODY_CHUNK_END) {
+ ctx->state = BODY_CHUNK_END_LF;
+ }
+ else {
+ /*
+ * CRLF expected.
+ */
+ return APR_EINVAL;
+ }
+ i++;
+ continue;
+ }
+
+ /* handle start of the chunk */
+ if (ctx->state == BODY_CHUNK) {
+ if (!apr_isxdigit(c)) {
+ /*
+ * Detect invalid character at beginning. This also works for
+ * empty chunk size lines.
+ */
+ return APR_EINVAL;
+ }
+ else {
+ ctx->state = BODY_CHUNK_PART;
+ }
+ ctx->remaining = 0;
+ ctx->chunkbits = sizeof(apr_off_t) * 8;
+ ctx->chunk_used = 0;
+ ctx->chunk_bws = 0;
+ }
+
+ if (c == LF) {
+ if (strict && (ctx->state != BODY_CHUNK_LF)) {
+ /*
+ * CR missing before LF.
+ */
+ return APR_EINVAL;
+ }
+ if (ctx->remaining) {
+ ctx->state = BODY_CHUNK_DATA;
+ }
+ else {
+ ctx->state = BODY_CHUNK_TRAILER;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_LF) {
+ /*
+ * LF expected.
+ */
+ return APR_EINVAL;
+ }
+ else if (c == CR) {
+ ctx->state = BODY_CHUNK_LF;
+ }
+ else if (c == ';') {
+ ctx->state = BODY_CHUNK_EXT;
+ }
+ else if (ctx->state == BODY_CHUNK_EXT) {
+ /*
+ * Control chars (excluding tabs) are invalid.
+ * TODO: more precisely limit input
+ */
+ if (c != '\t' && apr_iscntrl(c)) {
+ return APR_EINVAL;
+ }
+ }
+ else if (c == ' ' || c == '\t') {
+ /* Be lenient up to 10 implied *LWS, a legacy of RFC 2616,
+ * and noted as errata to RFC7230;
+ * https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4667
+ */
+ ctx->state = BODY_CHUNK_CR;
+ if (++ctx->chunk_bws > 10) {
+ return APR_EINVAL;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_CR) {
+ /*
+ * ';', CR or LF expected.
+ */
+ return APR_EINVAL;
+ }
+ else if (ctx->state == BODY_CHUNK_PART) {
+ int xvalue;
+
+ /* ignore leading zeros */
+ if (!ctx->remaining && c == '0') {
+ i++;
+ continue;
+ }
+
+ ctx->chunkbits -= 4;
+ if (ctx->chunkbits < 0) {
+ /* overflow */
+ return APR_ENOSPC;
+ }
+
+ if (c >= '0' && c <= '9') {
+ xvalue = c - '0';
+ }
+ else if (c >= 'A' && c <= 'F') {
+ xvalue = c - 'A' + 0xa;
+ }
+ else if (c >= 'a' && c <= 'f') {
+ xvalue = c - 'a' + 0xa;
+ }
+ else {
+ /* bogus character */
+ return APR_EINVAL;
+ }
+
+ ctx->remaining = (ctx->remaining << 4) | xvalue;
+ if (ctx->remaining < 0) {
+ /* overflow */
+ return APR_ENOSPC;
+ }
+ }
+ else {
+ /* Should not happen */
+ return APR_EGENERAL;
+ }
+
+ i++;
+ }
+
+ /* sanity check */
+ ctx->chunk_used += len;
+ if (ctx->chunk_used < 0 || ctx->chunk_used > linelimit) {
+ return APR_ENOSPC;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
+ apr_bucket_brigade *b, int merge)
+{
+ int rv;
+ apr_bucket *e;
+ request_rec *r = f->r;
+ apr_table_t *saved_headers_in = r->headers_in;
+ int saved_status = r->status;
+
+ r->status = HTTP_OK;
+ r->headers_in = r->trailers_in;
+ apr_table_clear(r->headers_in);
+ ap_get_mime_headers(r);
+
+ if(r->status == HTTP_OK) {
+ r->status = saved_status;
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ rv = APR_SUCCESS;
+ }
+ else {
+ const char *error_notes = apr_table_get(r->notes,
+ "error-notes");
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02656)
+ "Error while reading HTTP trailer: %i%s%s",
+ r->status, error_notes ? ": " : "",
+ error_notes ? error_notes : "");
+ rv = APR_EINVAL;
+ }
+
+ if(!merge) {
+ r->headers_in = saved_headers_in;
+ }
+ else {
+ r->headers_in = apr_table_overlay(r->pool, saved_headers_in,
+ r->trailers_in);
+ }
+
+ return rv;
+}
+
+/* This is the HTTP_INPUT filter for HTTP requests and responses from
+ * proxied servers (mod_proxy). It handles chunked and content-length
+ * bodies. This can only be inserted/used after the headers
+ * are successfully parsed.
+ */
+apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(f->r->server->module_config,
+ &core_module);
+ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
+ apr_bucket *e;
+ http_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+ int http_error = HTTP_REQUEST_ENTITY_TOO_LARGE;
+ int again;
+
+ /* just get out of the way of things we don't want. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return ap_get_brigade(f->next, b, mode, block, readbytes);
+ }
+
+ if (!ctx) {
+ const char *tenc, *lenp;
+ f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
+ ctx->state = BODY_NONE;
+ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+
+ /* LimitRequestBody does not apply to proxied responses.
+ * Consider implementing this check in its own filter.
+ * Would adding a directive to limit the size of proxied
+ * responses be useful?
+ */
+ if (!f->r->proxyreq) {
+ ctx->limit = ap_get_limit_req_body(f->r);
+ }
+ else {
+ ctx->limit = 0;
+ }
+
+ tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
+ lenp = apr_table_get(f->r->headers_in, "Content-Length");
+
+ if (tenc) {
+ if (ap_is_chunked(f->r->pool, tenc)) {
+ ctx->state = BODY_CHUNK;
+ }
+ else if (f->r->proxyreq == PROXYREQ_RESPONSE) {
+ /* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23
+ * Section 3.3.3.3: "If a Transfer-Encoding header field is
+ * present in a response and the chunked transfer coding is not
+ * the final encoding, the message body length is determined by
+ * reading the connection until it is closed by the server."
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02555)
+ "Unknown Transfer-Encoding: %s; "
+ "using read-until-close", tenc);
+ tenc = NULL;
+ }
+ else {
+ /* Something that isn't a HTTP request, unless some future
+ * edition defines new transfer encodings, is unsupported.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
+ "Unknown Transfer-Encoding: %s", tenc);
+ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST);
+ }
+ lenp = NULL;
+ }
+ if (lenp) {
+ ctx->state = BODY_LENGTH;
+
+ /* Protects against over/underflow, non-digit chars in the
+ * string, leading plus/minus signs, trailing characters and
+ * a negative number.
+ */
+ if (!ap_parse_strict_length(&ctx->remaining, lenp)) {
+ ctx->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587)
+ "Invalid Content-Length");
+
+ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST);
+ }
+
+ /* If we have a limit in effect and we know the C-L ahead of
+ * time, stop it here if it is invalid.
+ */
+ if (ctx->limit && ctx->limit < ctx->remaining) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01588)
+ "Requested content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
+ return bail_out_on_error(ctx, f, HTTP_REQUEST_ENTITY_TOO_LARGE);
+ }
+ }
+
+ /* If we don't have a request entity indicated by the headers, EOS.
+ * (BODY_NONE is a valid intermediate state due to trailers,
+ * but it isn't a valid starting state.)
+ *
+ * RFC 2616 Section 4.4 note 5 states that connection-close
+ * is invalid for a request entity - request bodies must be
+ * denoted by C-L or T-E: chunked.
+ *
+ * Note that since the proxy uses this filter to handle the
+ * proxied *response*, proxy responses MUST be exempt.
+ */
+ if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+ }
+
+ /* Since we're about to read data, send 100-Continue if needed.
+ * Only valid on chunked and C-L bodies where the C-L is > 0.
+ *
+ * If the read is to be nonblocking though, the caller may not want to
+ * handle this just now (e.g. mod_proxy_http), and is prepared to read
+ * nothing if the client really waits for 100 continue, so we don't
+ * send it now and wait for later blocking read.
+ *
+ * In any case, even if r->expecting remains set at the end of the
+ * request handling, ap_set_keepalive() will finally do the right
+ * thing (i.e. "Connection: close" the connection).
+ */
+ if (block == APR_BLOCK_READ
+ && (ctx->state == BODY_CHUNK
+ || (ctx->state == BODY_LENGTH && ctx->remaining > 0))
+ && f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)
+ && !(ctx->eos_sent || f->r->eos_sent || f->r->bytes_sent)) {
+ if (!ap_is_HTTP_SUCCESS(f->r->status)) {
+ ctx->state = BODY_NONE;
+ ctx->eos_sent = 1; /* send EOS below */
+ }
+ else if (!ctx->seen_data) {
+ int saved_status = f->r->status;
+ const char *saved_status_line = f->r->status_line;
+ f->r->status = HTTP_CONTINUE;
+ f->r->status_line = NULL;
+ ap_send_interim_response(f->r, 0);
+ AP_DEBUG_ASSERT(!f->r->expecting_100);
+ f->r->status_line = saved_status_line;
+ f->r->status = saved_status;
+ }
+ else {
+ /* https://tools.ietf.org/html/rfc7231#section-5.1.1
+ * A server MAY omit sending a 100 (Continue) response if it
+ * has already received some or all of the message body for
+ * the corresponding request [...]
+ */
+ f->r->expecting_100 = 0;
+ }
+ }
+
+ /* sanity check in case we're read twice */
+ if (ctx->eos_sent) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ return APR_SUCCESS;
+ }
+
+ do {
+ apr_brigade_cleanup(b);
+ again = 0; /* until further notice */
+
+ /* read and handle the brigade */
+ switch (ctx->state) {
+ case BODY_CHUNK:
+ case BODY_CHUNK_PART:
+ case BODY_CHUNK_EXT:
+ case BODY_CHUNK_CR:
+ case BODY_CHUNK_LF:
+ case BODY_CHUNK_END:
+ case BODY_CHUNK_END_LF: {
+
+ rv = ap_get_brigade(f->next, b, AP_MODE_GETLINE, block, 0);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv == APR_EOF) {
+ return APR_INCOMPLETE;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ e = APR_BRIGADE_FIRST(b);
+ while (e != APR_BRIGADE_SENTINEL(b)) {
+ const char *buffer;
+ apr_size_t len;
+
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ int parsing = 0;
+
+ rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ);
+ if (rv == APR_SUCCESS) {
+ parsing = 1;
+ if (len > 0) {
+ ctx->seen_data = 1;
+ }
+ rv = parse_chunk_size(ctx, buffer, len,
+ f->r->server->limit_req_fieldsize, strict);
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590)
+ "Error reading/parsing chunk %s ",
+ (APR_ENOSPC == rv) ? "(overflow)" : "");
+ if (parsing) {
+ if (rv != APR_ENOSPC) {
+ http_error = HTTP_BAD_REQUEST;
+ }
+ return bail_out_on_error(ctx, f, http_error);
+ }
+ return rv;
+ }
+ }
+
+ apr_bucket_delete(e);
+ e = APR_BRIGADE_FIRST(b);
+ }
+ again = 1; /* come around again */
+
+ if (ctx->state == BODY_CHUNK_TRAILER) {
+ /* Treat UNSET as DISABLE - trailers aren't merged by default */
+ return read_chunked_trailers(ctx, f, b,
+ conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE);
+ }
+
+ break;
+ }
+ case BODY_NONE:
+ case BODY_LENGTH:
+ case BODY_CHUNK_DATA: {
+
+ /* Ensure that the caller can not go over our boundary point. */
+ if (ctx->state != BODY_NONE && ctx->remaining < readbytes) {
+ readbytes = ctx->remaining;
+ }
+ if (readbytes > 0) {
+ apr_off_t totalread;
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv == APR_EOF && ctx->state != BODY_NONE
+ && ctx->remaining > 0) {
+ return APR_INCOMPLETE;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* How many bytes did we just read? */
+ apr_brigade_length(b, 0, &totalread);
+ if (totalread > 0) {
+ ctx->seen_data = 1;
+ }
+
+ /* If this happens, we have a bucket of unknown length. Die because
+ * it means our assumptions have changed. */
+ AP_DEBUG_ASSERT(totalread >= 0);
+
+ if (ctx->state != BODY_NONE) {
+ ctx->remaining -= totalread;
+ if (ctx->remaining > 0) {
+ e = APR_BRIGADE_LAST(b);
+ if (APR_BUCKET_IS_EOS(e)) {
+ apr_bucket_delete(e);
+ return APR_INCOMPLETE;
+ }
+ }
+ else if (ctx->state == BODY_CHUNK_DATA) {
+ /* next chunk please */
+ ctx->state = BODY_CHUNK_END;
+ ctx->chunk_used = 0;
+ }
+ }
+
+ /* We have a limit in effect. */
+ if (ctx->limit) {
+ /* FIXME: Note that we might get slightly confused on
+ * chunked inputs as we'd need to compensate for the chunk
+ * lengths which may not really count. This seems to be up
+ * for interpretation.
+ */
+ ctx->limit_used += totalread;
+ if (ctx->limit < ctx->limit_used) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
+ APLOGNO(01591) "Read content length of "
+ "%" APR_OFF_T_FMT " is larger than the "
+ "configured limit of %" APR_OFF_T_FMT,
+ ctx->limit_used, ctx->limit);
+ return bail_out_on_error(ctx, f,
+ HTTP_REQUEST_ENTITY_TOO_LARGE);
+ }
+ }
+ }
+
+ /* If we have no more bytes remaining on a C-L request,
+ * save the caller a round trip to discover EOS.
+ */
+ if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ }
+
+ break;
+ }
+ case BODY_CHUNK_TRAILER: {
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ /* for timeout */
+ if (block == APR_NONBLOCK_READ
+ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
+ || (APR_STATUS_IS_EAGAIN(rv)))) {
+ return APR_EAGAIN;
+ }
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ break;
+ }
+ default: {
+ /* Should not happen */
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02901)
+ "Unexpected body state (%i)", (int)ctx->state);
+ return APR_EGENERAL;
+ }
+ }
+
+ } while (again);
+
+ return APR_SUCCESS;
+}
+
+struct check_header_ctx {
+ request_rec *r;
+ int strict;
+};
+
+/* check a single header, to be used with apr_table_do() */
+static int check_header(struct check_header_ctx *ctx,
+ const char *name, const char **val)
+{
+ const char *pos, *end;
+ char *dst = NULL;
+
+ if (name[0] == '\0') {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02428)
+ "Empty response header name, aborting request");
+ return 0;
+ }
+
+ if (ctx->strict) {
+ end = ap_scan_http_token(name);
+ }
+ else {
+ end = ap_scan_vchar_obstext(name);
+ }
+ if (*end) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02429)
+ "Response header name '%s' contains invalid "
+ "characters, aborting request",
+ name);
+ return 0;
+ }
+
+ for (pos = *val; *pos; pos = end) {
+ end = ap_scan_http_field_content(pos);
+ if (*end) {
+ if (end[0] != CR || end[1] != LF || (end[2] != ' ' &&
+ end[2] != '\t')) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02430)
+ "Response header '%s' value of '%s' contains "
+ "invalid characters, aborting request",
+ name, pos);
+ return 0;
+ }
+ if (!dst) {
+ *val = dst = apr_palloc(ctx->r->pool, strlen(*val) + 1);
+ }
+ }
+ if (dst) {
+ memcpy(dst, pos, end - pos);
+ dst += end - pos;
+ if (*end) {
+ /* skip folding and replace with a single space */
+ end += 3 + strspn(end + 3, "\t ");
+ *dst++ = ' ';
+ }
+ }
+ }
+ if (dst) {
+ *dst = '\0';
+ }
+ return 1;
+}
+
+static int check_headers_table(apr_table_t *t, struct check_header_ctx *ctx)
+{
+ const apr_array_header_t *headers = apr_table_elts(t);
+ apr_table_entry_t *header;
+ int i;
+
+ for (i = 0; i < headers->nelts; ++i) {
+ header = &APR_ARRAY_IDX(headers, i, apr_table_entry_t);
+ if (!header->key) {
+ continue;
+ }
+ if (!check_header(ctx, header->key, (const char **)&header->val)) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Check headers for HTTP conformance
+ * @return 1 if ok, 0 if bad
+ */
+static APR_INLINE int check_headers(request_rec *r)
+{
+ struct check_header_ctx ctx;
+ core_server_config *conf =
+ ap_get_core_module_config(r->server->module_config);
+
+ ctx.r = r;
+ ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
+ return check_headers_table(r->headers_out, &ctx) &&
+ check_headers_table(r->err_headers_out, &ctx);
+}
+
+static int check_headers_recursion(request_rec *r)
+{
+ void *check = NULL;
+ apr_pool_userdata_get(&check, "check_headers_recursion", r->pool);
+ if (check) {
+ return 1;
+ }
+ apr_pool_userdata_setn("true", "check_headers_recursion", NULL, r->pool);
+ return 0;
+}
+
+typedef struct header_struct {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+} header_struct;
+
+/* Send a single HTTP header field to the client. Note that this function
+ * is used in calls to apr_table_do(), so don't change its interface.
+ * It returns true unless there was a write error of some kind.
+ */
+static int form_header_field(header_struct *h,
+ const char *fieldname, const char *fieldval)
+{
+#if APR_CHARSET_EBCDIC
+ char *headfield;
+ apr_size_t len;
+
+ headfield = apr_pstrcat(h->pool, fieldname, ": ", fieldval, CRLF, NULL);
+ len = strlen(headfield);
+
+ ap_xlate_proto_to_ascii(headfield, len);
+ apr_brigade_write(h->bb, NULL, NULL, headfield, len);
+#else
+ struct iovec vec[4];
+ struct iovec *v = vec;
+ v->iov_base = (void *)fieldname;
+ v->iov_len = strlen(fieldname);
+ v++;
+ v->iov_base = ": ";
+ v->iov_len = sizeof(": ") - 1;
+ v++;
+ v->iov_base = (void *)fieldval;
+ v->iov_len = strlen(fieldval);
+ v++;
+ v->iov_base = CRLF;
+ v->iov_len = sizeof(CRLF) - 1;
+ apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
+#endif /* !APR_CHARSET_EBCDIC */
+ return 1;
+}
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && ap_cstr_casecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fixup_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+/* Send a request's HTTP response headers to the client.
+ */
+static apr_status_t send_all_header_fields(header_struct *h,
+ const request_rec *r)
+{
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ struct iovec *vec;
+ struct iovec *vec_next;
+
+ elts = apr_table_elts(r->headers_out);
+ if (elts->nelts == 0) {
+ return APR_SUCCESS;
+ }
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ t_end = t_elt + elts->nelts;
+ vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
+ sizeof(struct iovec));
+ vec_next = vec;
+
+ /* For each field, generate
+ * name ": " value CRLF
+ */
+ do {
+ vec_next->iov_base = (void*)(t_elt->key);
+ vec_next->iov_len = strlen(t_elt->key);
+ vec_next++;
+ vec_next->iov_base = ": ";
+ vec_next->iov_len = sizeof(": ") - 1;
+ vec_next++;
+ vec_next->iov_base = (void*)(t_elt->val);
+ vec_next->iov_len = strlen(t_elt->val);
+ vec_next++;
+ vec_next->iov_base = CRLF;
+ vec_next->iov_len = sizeof(CRLF) - 1;
+ vec_next++;
+ t_elt++;
+ } while (t_elt < t_end);
+
+ if (APLOGrtrace4(r)) {
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ do {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
+ ap_escape_logitem(r->pool, t_elt->key),
+ ap_escape_logitem(r->pool, t_elt->val));
+ t_elt++;
+ } while (t_elt < t_end);
+ }
+
+#if APR_CHARSET_EBCDIC
+ {
+ apr_size_t len;
+ char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
+ }
+#else
+ return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
+#endif
+}
+
+/* Confirm that the status line is well-formed and matches r->status.
+ * If they don't match, a filter may have negated the status line set by a
+ * handler.
+ * Zap r->status_line if bad.
+ */
+static apr_status_t validate_status_line(request_rec *r)
+{
+ char *end;
+
+ if (r->status_line) {
+ int len = strlen(r->status_line);
+ if (len < 3
+ || apr_strtoi64(r->status_line, &end, 10) != r->status
+ || (end - 3) != r->status_line
+ || (len >= 4 && ! apr_isspace(r->status_line[3]))) {
+ r->status_line = NULL;
+ return APR_EGENERAL;
+ }
+ /* Since we passed the above check, we know that length three
+ * is equivalent to only a 3 digit numeric http status.
+ * RFC2616 mandates a trailing space, let's add it.
+ */
+ if (len == 3) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, " ", NULL);
+ return APR_EGENERAL;
+ }
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+/*
+ * Determine the protocol to use for the response. Potentially downgrade
+ * to HTTP/1.0 in some situations and/or turn off keepalives.
+ *
+ * also prepare r->status_line.
+ */
+static void basic_http_header_check(request_rec *r,
+ const char **protocol)
+{
+ apr_status_t rv;
+
+ if (r->assbackwards) {
+ /* no such thing as a response protocol */
+ return;
+ }
+
+ rv = validate_status_line(r);
+
+ if (!r->status_line) {
+ r->status_line = ap_get_status_line(r->status);
+ } else if (rv != APR_SUCCESS) {
+ /* Status line is OK but our own reason phrase
+ * would be preferred if defined
+ */
+ const char *tmp = ap_get_status_line(r->status);
+ if (!strncmp(tmp, r->status_line, 3)) {
+ r->status_line = tmp;
+ }
+ }
+
+ /* Note that we must downgrade before checking for force responses. */
+ if (r->proto_num > HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "downgrade-1.0")) {
+ r->proto_num = HTTP_VERSION(1,0);
+ }
+
+ /* kludge around broken browsers when indicated by force-response-1.0
+ */
+ if (r->proto_num == HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "force-response-1.0")) {
+ *protocol = "HTTP/1.0";
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ *protocol = AP_SERVER_PROTOCOL;
+ }
+
+}
+
+/* fill "bb" with a barebones/initial HTTP response header */
+static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
+ const char *protocol)
+{
+ char *date = NULL;
+ const char *proxy_date = NULL;
+ const char *server = NULL;
+ const char *us = ap_get_server_banner();
+ header_struct h;
+ struct iovec vec[4];
+
+ if (r->assbackwards) {
+ /* there are no headers to send */
+ return;
+ }
+
+ /* Output the HTTP/1.x Status-Line and the Date and Server fields */
+
+ vec[0].iov_base = (void *)protocol;
+ vec[0].iov_len = strlen(protocol);
+ vec[1].iov_base = (void *)" ";
+ vec[1].iov_len = sizeof(" ") - 1;
+ vec[2].iov_base = (void *)(r->status_line);
+ vec[2].iov_len = strlen(r->status_line);
+ vec[3].iov_base = (void *)CRLF;
+ vec[3].iov_len = sizeof(CRLF) - 1;
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ tmp = apr_pstrcatv(r->pool, vec, 4, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_write(bb, NULL, NULL, tmp, len);
+ }
+#else
+ apr_brigade_writev(bb, NULL, NULL, vec, 4);
+#endif
+
+ h.pool = r->pool;
+ h.bb = bb;
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r->proxyreq != PROXYREQ_NONE) {
+ proxy_date = apr_table_get(r->headers_out, "Date");
+ if (!proxy_date) {
+ /*
+ * proxy_date needs to be const. So use date for the creation of
+ * our own Date header and pass it over to proxy_date later to
+ * avoid a compiler warning.
+ */
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+ server = apr_table_get(r->headers_out, "Server");
+ }
+ else {
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+
+ form_header_field(&h, "Date", proxy_date ? proxy_date : date );
+
+ if (!server && *us)
+ server = us;
+ if (server)
+ form_header_field(&h, "Server", server);
+
+ if (APLOGrtrace3(r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "Response sent with status %d%s",
+ r->status,
+ APLOGrtrace4(r) ? ", headers:" : "");
+
+ /*
+ * Date and Server are less interesting, use TRACE5 for them while
+ * using TRACE4 for the other headers.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Date: %s",
+ proxy_date ? proxy_date : date );
+ if (server)
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, " Server: %s",
+ server);
+ }
+
+
+ /* unset so we don't send them again */
+ apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
+ if (server) {
+ apr_table_unset(r->headers_out, "Server");
+ }
+}
+
+AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
+{
+ const char *protocol = NULL;
+
+ basic_http_header_check(r, &protocol);
+ basic_http_header(r, bb, protocol);
+}
+
+static void terminate_header(apr_bucket_brigade *bb)
+{
+ char crlf[] = CRLF;
+ apr_size_t buflen;
+
+ buflen = strlen(crlf);
+ ap_xlate_proto_to_ascii(crlf, buflen);
+ apr_brigade_write(bb, NULL, NULL, crlf, buflen);
+}
+
+AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
+{
+ core_server_config *conf;
+ int rv;
+ apr_bucket_brigade *bb;
+ header_struct h;
+ apr_bucket *b;
+ int body;
+ char *bodyread = NULL, *bodyoff;
+ apr_size_t bodylen = 0;
+ apr_size_t bodybuf;
+ long res = -1; /* init to avoid gcc -Wall warning */
+
+ if (r->method_number != M_TRACE) {
+ return DECLINED;
+ }
+
+ /* Get the original request */
+ while (r->prev) {
+ r = r->prev;
+ }
+ conf = ap_get_core_module_config(r->server->module_config);
+
+ if (conf->trace_enable == AP_TRACE_DISABLE) {
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE denied by server configuration");
+ return HTTP_METHOD_NOT_ALLOWED;
+ }
+
+ if (conf->trace_enable == AP_TRACE_EXTENDED)
+ /* XXX: should be = REQUEST_CHUNKED_PASS */
+ body = REQUEST_CHUNKED_DECHUNK;
+ else
+ body = REQUEST_NO_BODY;
+
+ if ((rv = ap_setup_client_block(r, body))) {
+ if (rv == HTTP_REQUEST_ENTITY_TOO_LARGE)
+ apr_table_setn(r->notes, "error-notes",
+ "TRACE with a request body is not allowed");
+ return rv;
+ }
+
+ if (ap_should_client_block(r)) {
+
+ if (r->remaining > 0) {
+ if (r->remaining > 65536) {
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k\n");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+ /* always 32 extra bytes to catch chunk header exceptions */
+ bodybuf = (apr_size_t)r->remaining + 32;
+ }
+ else {
+ /* Add an extra 8192 for chunk headers */
+ bodybuf = 73730;
+ }
+
+ bodyoff = bodyread = apr_palloc(r->pool, bodybuf);
+
+ /* only while we have enough for a chunked header */
+ while ((!bodylen || bodybuf >= 32) &&
+ (res = ap_get_client_block(r, bodyoff, bodybuf)) > 0) {
+ bodylen += res;
+ bodybuf -= res;
+ bodyoff += res;
+ }
+ if (res > 0 && bodybuf < 32) {
+ /* discard_rest_of_request_body into our buffer */
+ while (ap_get_client_block(r, bodyread, bodylen) > 0)
+ ;
+ apr_table_setn(r->notes, "error-notes",
+ "Extended TRACE request bodies cannot exceed 64k\n");
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ if (res < 0) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ ap_set_content_type(r, "message/http");
+
+ /* Now we recreate the request, and echo it back */
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ len = strlen(r->the_request);
+ tmp = apr_pmemdup(r->pool, r->the_request, len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_putstrs(bb, NULL, NULL, tmp, CRLF_ASCII, NULL);
+ }
+#else
+ apr_brigade_putstrs(bb, NULL, NULL, r->the_request, CRLF, NULL);
+#endif
+ h.pool = r->pool;
+ h.bb = bb;
+ apr_table_do((int (*) (void *, const char *, const char *))
+ form_header_field, (void *) &h, r->headers_in, NULL);
+ apr_brigade_puts(bb, NULL, NULL, CRLF_ASCII);
+
+ /* If configured to accept a body, echo the body */
+ if (bodylen) {
+ b = apr_bucket_pool_create(bodyread, bodylen,
+ r->pool, bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+
+ ap_pass_brigade(r->output_filters, bb);
+
+ return DONE;
+}
+
+typedef struct header_filter_ctx {
+ int headers_sent;
+} header_filter_ctx;
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ const char *clheader;
+ int header_only = (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status));
+ const char *protocol = NULL;
+ apr_bucket *e;
+ apr_bucket_brigade *b2;
+ header_struct h;
+ header_filter_ctx *ctx = f->ctx;
+ const char *ctype;
+ ap_bucket_error *eb = NULL;
+ apr_status_t rv = APR_SUCCESS;
+ int recursive_error = 0;
+
+ AP_DEBUG_ASSERT(!r->main);
+
+ if (!ctx) {
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
+ }
+ else if (ctx->headers_sent) {
+ /* Eat body if response must not have one. */
+ if (header_only) {
+ /* Still next filters may be waiting for EOS, so pass it (alone)
+ * when encountered and be done with this filter.
+ */
+ e = APR_BRIGADE_LAST(b);
+ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) {
+ APR_BUCKET_REMOVE(e);
+ apr_brigade_cleanup(b);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+ }
+ apr_brigade_cleanup(b);
+ return rv;
+ }
+ }
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e) && !eb) {
+ eb = e->data;
+ continue;
+ }
+ /*
+ * If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ if (AP_BUCKET_IS_EOC(e)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+ }
+ }
+
+ if (!ctx->headers_sent && !check_headers(r)) {
+ /* We may come back here from ap_die() below,
+ * so clear anything from this response.
+ */
+ apr_table_clear(r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ apr_brigade_cleanup(b);
+
+ /* Don't recall ap_die() if we come back here (from its own internal
+ * redirect or error response), otherwise we can end up in infinite
+ * recursion; better fall through with 500, minimal headers and an
+ * empty body (EOS only).
+ */
+ if (!check_headers_recursion(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return AP_FILTER_ERROR;
+ }
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ e = ap_bucket_eoc_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ ap_set_content_length(r, 0);
+ recursive_error = 1;
+ }
+ else if (eb) {
+ int status;
+ status = eb->status;
+ apr_brigade_cleanup(b);
+ ap_die(status, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (r->assbackwards) {
+ r->sent_bodyct = 1;
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+ goto out;
+ }
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ *
+ * Note: the force-response-1.0 should come before the call to
+ * basic_http_header_check()
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fixup_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ basic_http_header_check(r, &protocol);
+ ap_set_keepalive(r);
+
+ if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
+ }
+ else if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!ap_cstr_casecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cacheable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ b2 = apr_brigade_create(r->pool, c->bucket_alloc);
+ basic_http_header(r, b2, protocol);
+
+ h.pool = r->pool;
+ h.bb = b2;
+
+ send_all_header_fields(&h, r);
+
+ terminate_header(b2);
+
+ if (header_only) {
+ e = APR_BRIGADE_LAST(b);
+ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(b2, e);
+ ap_remove_output_filter(f);
+ }
+ apr_brigade_cleanup(b);
+ }
+
+ rv = ap_pass_brigade(f->next, b2);
+ apr_brigade_cleanup(b2);
+ ctx->headers_sent = 1;
+
+ if (rv != APR_SUCCESS || header_only) {
+ goto out;
+ }
+
+ r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
+
+ if (r->chunked) {
+ /* We can't add this filter until we have already sent the headers.
+ * If we add it before this point, then the headers will be chunked
+ * as well, and that is just wrong.
+ */
+ ap_add_output_filter("CHUNK", NULL, r, r->connection);
+ }
+
+ /* Don't remove this filter until after we have added the CHUNK filter.
+ * Otherwise, f->next won't be the CHUNK filter and thus the first
+ * brigade won't be chunked properly.
+ */
+ ap_remove_output_filter(f);
+ rv = ap_pass_brigade(f->next, b);
+out:
+ if (recursive_error) {
+ return AP_FILTER_ERROR;
+ }
+ return rv;
+}
+
+/*
+ * Map specific APR codes returned by the filter stack to HTTP error
+ * codes, or the default status code provided. Use it as follows:
+ *
+ * return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ *
+ * If the filter has already handled the error, AP_FILTER_ERROR will
+ * be returned, which is cleanly passed through.
+ *
+ * These mappings imply that the filter stack is reading from the
+ * downstream client, the proxy will map these codes differently.
+ */
+AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status)
+{
+ switch (rv) {
+ case AP_FILTER_ERROR:
+ return AP_FILTER_ERROR;
+
+ case APR_ENOSPC:
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+
+ case APR_ENOTIMPL:
+ return HTTP_NOT_IMPLEMENTED;
+
+ case APR_TIMEUP:
+ case APR_ETIMEDOUT:
+ return HTTP_REQUEST_TIME_OUT;
+
+ default:
+ return status;
+ }
+}
+
+/* In HTTP/1.1, any method can have a body. However, most GET handlers
+ * wouldn't know what to do with a request body if they received one.
+ * This helper routine tests for and reads any message body in the request,
+ * simply discarding whatever it receives. We need to do this because
+ * failing to read the request body would cause it to be interpreted
+ * as the next request on a persistent connection.
+ *
+ * Since we return an error status if the request is malformed, this
+ * routine should be called at the beginning of a no-body handler, e.g.,
+ *
+ * if ((retval = ap_discard_request_body(r)) != OK) {
+ * return retval;
+ * }
+ */
+AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+{
+ int rc = OK;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+
+ /* Sometimes we'll get in a state where the input handling has
+ * detected an error where we want to drop the connection, so if
+ * that's the case, don't read the data as that is what we're trying
+ * to avoid.
+ *
+ * This function is also a no-op on a subrequest.
+ */
+ if (r->main || c->keepalive == AP_CONN_CLOSE) {
+ return OK;
+ }
+ if (ap_status_drops_connection(r->status)) {
+ c->keepalive = AP_CONN_CLOSE;
+ return OK;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ for (;;) {
+ apr_status_t rv;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+ if (rv != APR_SUCCESS) {
+ rc = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ goto cleanup;
+ }
+
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_EOS(b)) {
+ goto cleanup;
+ }
+
+ /* There is no need to read empty or metadata buckets or
+ * buckets of known length, but we MUST read buckets of
+ * unknown length in order to exhaust them.
+ */
+ if (b->length == (apr_size_t)-1) {
+ apr_size_t len;
+ const char *data;
+
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ rc = HTTP_BAD_REQUEST;
+ goto cleanup;
+ }
+ }
+
+ apr_bucket_delete(b);
+ }
+ }
+
+cleanup:
+ apr_brigade_cleanup(bb);
+ if (rc != OK) {
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ return rc;
+}
+
+/* Here we deal with getting the request message body from the client.
+ * Whether or not the request contains a body is signaled by the presence
+ * of a non-zero Content-Length or by a Transfer-Encoding: chunked.
+ *
+ * Note that this is more complicated than it was in Apache 1.1 and prior
+ * versions, because chunked support means that the module does less.
+ *
+ * The proper procedure is this:
+ *
+ * 1. Call ap_setup_client_block() near the beginning of the request
+ * handler. This will set up all the necessary properties, and will
+ * return either OK, or an error code. If the latter, the module should
+ * return that error code. The second parameter selects the policy to
+ * apply if the request message indicates a body, and how a chunked
+ * transfer-coding should be interpreted. Choose one of
+ *
+ * REQUEST_NO_BODY Send 413 error if message has any body
+ * REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
+ * REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
+ * REQUEST_CHUNKED_PASS If chunked, pass the chunk headers with body.
+ *
+ * In order to use the last two options, the caller MUST provide a buffer
+ * large enough to hold a chunk-size line, including any extensions.
+ *
+ * 2. When you are ready to read a body (if any), call ap_should_client_block().
+ * This will tell the module whether or not to read input. If it is 0,
+ * the module should assume that there is no message body to read.
+ *
+ * 3. Finally, call ap_get_client_block in a loop. Pass it a buffer and its size.
+ * It will put data into the buffer (not necessarily a full buffer), and
+ * return the length of the input block. When it is done reading, it will
+ * return 0 if EOF, or -1 if there was an error.
+ * If an error occurs on input, we force an end to keepalive.
+ *
+ * This step also sends a 100 Continue response to HTTP/1.1 clients if appropriate.
+ */
+
+AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
+{
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+ apr_off_t limit_req_body = ap_get_limit_req_body(r);
+
+ r->read_body = read_policy;
+ r->read_chunked = 0;
+ r->remaining = 0;
+
+ if (tenc) {
+ if (ap_cstr_casecmp(tenc, "chunked")) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592)
+ "Unknown Transfer-Encoding %s", tenc);
+ return HTTP_NOT_IMPLEMENTED;
+ }
+ if (r->read_body == REQUEST_CHUNKED_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01593)
+ "chunked Transfer-Encoding forbidden: %s", r->uri);
+ return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
+ }
+
+ r->read_chunked = 1;
+ }
+ else if (lenp) {
+ if (!ap_parse_strict_length(&r->remaining, lenp)) {
+ r->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01594)
+ "Invalid Content-Length '%s'", lenp);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ if ((r->read_body == REQUEST_NO_BODY)
+ && (r->read_chunked || (r->remaining > 0))) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01595)
+ "%s with body is not allowed for %s", r->method, r->uri);
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+ if (limit_req_body > 0 && (r->remaining > limit_req_body)) {
+ /* will be logged when the body is discarded */
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+#ifdef AP_DEBUG
+ {
+ /* Make sure ap_getline() didn't leave any droppings. */
+ core_request_config *req_cfg =
+ (core_request_config *)ap_get_core_module_config(r->request_config);
+ AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
+ }
+#endif
+
+ return OK;
+}
+
+AP_DECLARE(int) ap_should_client_block(request_rec *r)
+{
+ /* First check if we have already read the request body */
+
+ if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* get_client_block is called in a loop to get the request message body.
+ * This is quite simple if the client includes a content-length
+ * (the normal case), but gets messy if the body is chunked. Note that
+ * r->remaining is used to maintain state across calls and that
+ * r->read_length is the total number of bytes given to the caller
+ * across all invocations. It is messy because we have to be careful not
+ * to read past the data provided by the client, since these reads block.
+ * Returns 0 on End-of-body, -1 on error or premature chunk end.
+ *
+ */
+AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
+ apr_size_t bufsiz)
+{
+ apr_status_t rv;
+ apr_bucket_brigade *bb;
+
+ if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
+ return 0;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ if (bb == NULL) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ return -1;
+ }
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, bufsiz);
+
+ /* We lose the failure code here. This is why ap_get_client_block should
+ * not be used.
+ */
+ if (rv == AP_FILTER_ERROR) {
+ /* AP_FILTER_ERROR means a filter has responded already,
+ * we are DONE.
+ */
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+ if (rv != APR_SUCCESS) {
+ /* if we actually fail here, we want to just return and
+ * stop trying to read data from the client.
+ */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* If this fails, it means that a filter is written incorrectly and that
+ * it needs to learn how to properly handle APR_BLOCK_READ requests by
+ * returning data when requested.
+ */
+ AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
+
+ /* Check to see if EOS in the brigade.
+ *
+ * If so, we have to leave a nugget for the *next* ap_get_client_block
+ * call to return 0.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (r->read_chunked) {
+ r->remaining = -1;
+ }
+ else {
+ r->remaining = 0;
+ }
+ }
+
+ rv = apr_brigade_flatten(bb, buffer, &bufsiz);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* XXX yank me? */
+ r->read_length += bufsiz;
+
+ apr_brigade_destroy(bb);
+ return bufsiz;
+}
+
+/* Context struct for ap_http_outerror_filter */
+typedef struct {
+ int seen_eoc;
+ int first_error;
+} outerror_filter_ctx_t;
+
+/* Filter to handle any error buckets on output */
+apr_status_t ap_http_outerror_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ outerror_filter_ctx_t *ctx = (outerror_filter_ctx_t *)(f->ctx);
+ apr_bucket *e;
+
+ /* Create context if none is present */
+ if (!ctx) {
+ ctx = apr_pcalloc(r->pool, sizeof(outerror_filter_ctx_t));
+ f->ctx = ctx;
+ }
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (AP_BUCKET_IS_ERROR(e)) {
+ /*
+ * Start of error handling state tree. Just one condition
+ * right now :)
+ */
+ if (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY) {
+ /* stream aborted and we have not ended it yet */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ /*
+ * Memorize the status code of the first error bucket for possible
+ * later use.
+ */
+ if (!ctx->first_error) {
+ ctx->first_error = ((ap_bucket_error *)(e->data))->status;
+ }
+ continue;
+ }
+ /* Detect EOC buckets and memorize this in the context. */
+ if (AP_BUCKET_IS_EOC(e)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ ctx->seen_eoc = 1;
+ }
+ }
+ /*
+ * Remove all data buckets that are in a brigade after an EOC bucket
+ * was seen, as an EOC bucket tells us that no (further) resource
+ * and protocol data should go out to the client. OTOH meta buckets
+ * are still welcome as they might trigger needed actions down in
+ * the chain (e.g. in network filters like SSL).
+ * Remark 1: It is needed to dump ALL data buckets in the brigade
+ * since an filter in between might have inserted data
+ * buckets BEFORE the EOC bucket sent by the original
+ * sender and we do NOT want this data to be sent.
+ * Remark 2: Dumping all data buckets here does not necessarily mean
+ * that no further data is send to the client as:
+ * 1. Network filters like SSL can still be triggered via
+ * meta buckets to talk with the client e.g. for a
+ * clean shutdown.
+ * 2. There could be still data that was buffered before
+ * down in the chain that gets flushed by a FLUSH or an
+ * EOS bucket.
+ */
+ if (ctx->seen_eoc) {
+ /*
+ * Set the request status to the status of the first error bucket.
+ * This should ensure that we log an appropriate status code in
+ * the access log.
+ * We need to set r->status on each call after we noticed an EOC as
+ * data bucket generators like ap_die might have changed the status
+ * code. But we know better in this case and insist on the status
+ * code that we have seen in the error bucket.
+ */
+ if (ctx->first_error) {
+ r->status = ctx->first_error;
+ }
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ APR_BUCKET_REMOVE(e);
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, b);
+}
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
new file mode 100644
index 0000000..d031f24
--- /dev/null
+++ b/modules/http/http_protocol.c
@@ -0,0 +1,1671 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_protocol.c --- routines which directly communicate with the client.
+ *
+ * Code originally by Rob McCool; much redone by Robert S. Thau
+ * and the Apache Software Foundation.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+#include "ap_mpm.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include
+#endif
+#if APR_HAVE_UNISTD_H
+#include
+#endif
+
+APLOG_USE_MODULE(http);
+
+/* New Apache routine to map status codes into array indices
+ * e.g. 100 -> 0, 101 -> 1, 200 -> 2 ...
+ * The number of status lines must equal the value of
+ * RESPONSE_CODES (httpd.h) and must be listed in order.
+ * No gaps are allowed between X00 and the largest Xnn
+ * for any X (see ap_index_of_response).
+ * When adding a new code here, add a define to httpd.h
+ * as well.
+ */
+
+static const char * const status_lines[RESPONSE_CODES] =
+{
+ "100 Continue",
+ "101 Switching Protocols",
+ "102 Processing",
+#define LEVEL_200 3
+ "200 OK",
+ "201 Created",
+ "202 Accepted",
+ "203 Non-Authoritative Information",
+ "204 No Content",
+ "205 Reset Content",
+ "206 Partial Content",
+ "207 Multi-Status",
+ "208 Already Reported",
+ NULL, /* 209 */
+ NULL, /* 210 */
+ NULL, /* 211 */
+ NULL, /* 212 */
+ NULL, /* 213 */
+ NULL, /* 214 */
+ NULL, /* 215 */
+ NULL, /* 216 */
+ NULL, /* 217 */
+ NULL, /* 218 */
+ NULL, /* 219 */
+ NULL, /* 220 */
+ NULL, /* 221 */
+ NULL, /* 222 */
+ NULL, /* 223 */
+ NULL, /* 224 */
+ NULL, /* 225 */
+ "226 IM Used",
+#define LEVEL_300 30
+ "300 Multiple Choices",
+ "301 Moved Permanently",
+ "302 Found",
+ "303 See Other",
+ "304 Not Modified",
+ "305 Use Proxy",
+ NULL, /* 306 */
+ "307 Temporary Redirect",
+ "308 Permanent Redirect",
+#define LEVEL_400 39
+ "400 Bad Request",
+ "401 Unauthorized",
+ "402 Payment Required",
+ "403 Forbidden",
+ "404 Not Found",
+ "405 Method Not Allowed",
+ "406 Not Acceptable",
+ "407 Proxy Authentication Required",
+ "408 Request Timeout",
+ "409 Conflict",
+ "410 Gone",
+ "411 Length Required",
+ "412 Precondition Failed",
+ "413 Request Entity Too Large",
+ "414 Request-URI Too Long",
+ "415 Unsupported Media Type",
+ "416 Requested Range Not Satisfiable",
+ "417 Expectation Failed",
+ NULL, /* 418 */
+ NULL, /* 419 */
+ NULL, /* 420 */
+ "421 Misdirected Request",
+ "422 Unprocessable Entity",
+ "423 Locked",
+ "424 Failed Dependency",
+ NULL, /* 425 */
+ "426 Upgrade Required",
+ NULL, /* 427 */
+ "428 Precondition Required",
+ "429 Too Many Requests",
+ NULL, /* 430 */
+ "431 Request Header Fields Too Large",
+ NULL, /* 432 */
+ NULL, /* 433 */
+ NULL, /* 434 */
+ NULL, /* 435 */
+ NULL, /* 436 */
+ NULL, /* 437 */
+ NULL, /* 438 */
+ NULL, /* 439 */
+ NULL, /* 440 */
+ NULL, /* 441 */
+ NULL, /* 442 */
+ NULL, /* 443 */
+ NULL, /* 444 */
+ NULL, /* 445 */
+ NULL, /* 446 */
+ NULL, /* 447 */
+ NULL, /* 448 */
+ NULL, /* 449 */
+ NULL, /* 450 */
+ "451 Unavailable For Legal Reasons",
+#define LEVEL_500 91
+ "500 Internal Server Error",
+ "501 Not Implemented",
+ "502 Bad Gateway",
+ "503 Service Unavailable",
+ "504 Gateway Timeout",
+ "505 HTTP Version Not Supported",
+ "506 Variant Also Negotiates",
+ "507 Insufficient Storage",
+ "508 Loop Detected",
+ NULL, /* 509 */
+ "510 Not Extended",
+ "511 Network Authentication Required"
+};
+
+APR_HOOK_STRUCT(
+ APR_HOOK_LINK(insert_error_filter)
+)
+
+AP_IMPLEMENT_HOOK_VOID(insert_error_filter, (request_rec *r), (r))
+
+/* The index of the first bit field that is used to index into a limit
+ * bitmask. M_INVALID + 1 to METHOD_NUMBER_LAST.
+ */
+#define METHOD_NUMBER_FIRST (M_INVALID + 1)
+
+/* The max method number. Method numbers are used to shift bitmasks,
+ * so this cannot exceed 63, and all bits high is equal to -1, which is a
+ * special flag, so the last bit used has index 62.
+ */
+#define METHOD_NUMBER_LAST 62
+
+static int is_mpm_running(void)
+{
+ int mpm_state = 0;
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ return 0;
+ }
+
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
+AP_DECLARE(int) ap_set_keepalive(request_rec *r)
+{
+ int ka_sent = 0;
+ int left = r->server->keep_alive_max - r->connection->keepalives;
+ int wimpy = ap_find_token(r->pool,
+ apr_table_get(r->headers_out, "Connection"),
+ "close");
+ const char *conn = apr_table_get(r->headers_in, "Connection");
+
+ /* The following convoluted conditional determines whether or not
+ * the current connection should remain persistent after this response
+ * (a.k.a. HTTP Keep-Alive) and whether or not the output message
+ * body should use the HTTP/1.1 chunked transfer-coding. In English,
+ *
+ * IF we have not marked this connection as errored;
+ * and the client isn't expecting 100-continue (PR47087 - more
+ * input here could be the client continuing when we're
+ * closing the request).
+ * and the response body has a defined length due to the status code
+ * being 304 or 204, the request method being HEAD, already
+ * having defined Content-Length or Transfer-Encoding: chunked, or
+ * the request version being HTTP/1.1 and thus capable of being set
+ * as chunked [we know the (r->chunked = 1) side-effect is ugly];
+ * and the server configuration enables keep-alive;
+ * and the server configuration has a reasonable inter-request timeout;
+ * and there is no maximum # requests or the max hasn't been reached;
+ * and the response status does not require a close;
+ * and the response generator has not already indicated close;
+ * and the client did not request non-persistence (Connection: close);
+ * and we haven't been configured to ignore the buggy twit
+ * or they're a buggy twit coming through a HTTP/1.1 proxy
+ * and the client is requesting an HTTP/1.0-style keep-alive
+ * or the client claims to be HTTP/1.1 compliant (perhaps a proxy);
+ * and this MPM process is not already exiting
+ * THEN we can be persistent, which requires more headers be output.
+ *
+ * Note that the condition evaluation order is extremely important.
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && !r->expecting_100
+ && (r->header_only
+ || AP_STATUS_IS_HEADER_ONLY(r->status)
+ || apr_table_get(r->headers_out, "Content-Length")
+ || ap_is_chunked(r->pool,
+ apr_table_get(r->headers_out,
+ "Transfer-Encoding"))
+ || ((r->proto_num >= HTTP_VERSION(1,1))
+ && (r->chunked = 1))) /* THIS CODE IS CORRECT, see above. */
+ && r->server->keep_alive
+ && (r->server->keep_alive_timeout > 0)
+ && ((r->server->keep_alive_max == 0)
+ || (left > 0))
+ && !ap_status_drops_connection(r->status)
+ && !wimpy
+ && !ap_find_token(r->pool, conn, "close")
+ && (!apr_table_get(r->subprocess_env, "nokeepalive")
+ || apr_table_get(r->headers_in, "Via"))
+ && ((ka_sent = ap_find_token(r->pool, conn, "keep-alive"))
+ || (r->proto_num >= HTTP_VERSION(1,1)))
+ && is_mpm_running()) {
+
+ r->connection->keepalive = AP_CONN_KEEPALIVE;
+ r->connection->keepalives++;
+
+ /* If they sent a Keep-Alive token, send one back */
+ if (ka_sent) {
+ if (r->server->keep_alive_max) {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d, max=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout),
+ left));
+ }
+ else {
+ apr_table_setn(r->headers_out, "Keep-Alive",
+ apr_psprintf(r->pool, "timeout=%d",
+ (int)apr_time_sec(r->server->keep_alive_timeout)));
+ }
+ apr_table_mergen(r->headers_out, "Connection", "Keep-Alive");
+ }
+
+ return 1;
+ }
+
+ /* Otherwise, we need to indicate that we will be closing this
+ * connection immediately after the current response.
+ *
+ * We only really need to send "close" to HTTP/1.1 clients, but we
+ * always send it anyway, because a broken proxy may identify itself
+ * as HTTP/1.0, but pass our request along with our HTTP/1.1 tag
+ * to a HTTP/1.1 client. Better safe than sorry.
+ */
+ if (!wimpy) {
+ apr_table_mergen(r->headers_out, "Connection", "close");
+ }
+
+ /*
+ * If we had previously been a keepalive connection and this
+ * is the last one, then bump up the number of keepalives
+ * we've had
+ */
+ if ((r->connection->keepalive != AP_CONN_CLOSE)
+ && r->server->keep_alive_max
+ && !left) {
+ r->connection->keepalives++;
+ }
+ r->connection->keepalive = AP_CONN_CLOSE;
+
+ return 0;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_match, *etag;
+
+ /* A server MUST use the strong comparison function (see section 13.3.3)
+ * to compare the entity tags in If-Match.
+ */
+ if ((if_match = apr_table_get(r->headers_in, "If-Match")) != NULL) {
+ if (if_match[0] == '*'
+ || ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_match, etag))) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_unmodified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_unmodified;
+
+ if_unmodified = apr_table_get(r->headers_in, "If-Unmodified-Since");
+ if (if_unmodified) {
+ apr_int64_t mtime, reqtime;
+
+ apr_time_t ius = apr_time_sec(apr_date_parse_http(if_unmodified));
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ if ((ius != APR_DATE_BAD) && (mtime > ius)) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_none_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_nonematch, *etag;
+
+ if_nonematch = apr_table_get(r->headers_in, "If-None-Match");
+ if (if_nonematch != NULL) {
+
+ if (if_nonematch[0] == '*') {
+ return AP_CONDITION_STRONG;
+ }
+
+ /* See section 13.3.3 for rules on how to determine if two entities tags
+ * match. The weak comparison function can only be used with GET or HEAD
+ * requests.
+ */
+ if (r->method_number == M_GET) {
+ if ((etag = apr_table_get(headers, "ETag")) != NULL) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ if (ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ if (ap_find_etag_weak(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ }
+ }
+
+ else if ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ return AP_CONDITION_NOMATCH;
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_modified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_modified_since;
+
+ if ((if_modified_since = apr_table_get(r->headers_in, "If-Modified-Since"))
+ != NULL) {
+ apr_int64_t mtime;
+ apr_int64_t ims, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ ims = apr_time_sec(apr_date_parse_http(if_modified_since));
+
+ if (ims >= mtime && ims <= reqtime) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_range(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_range, *etag;
+
+ if ((if_range = apr_table_get(r->headers_in, "If-Range"))
+ && apr_table_get(r->headers_in, "Range")) {
+ if (if_range[0] == '"') {
+
+ if ((etag = apr_table_get(headers, "ETag"))
+ && !strcmp(if_range, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+
+ }
+ else {
+ apr_int64_t mtime;
+ apr_int64_t rtime, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ rtime = apr_time_sec(apr_date_parse_http(if_range));
+
+ if (rtime == mtime) {
+ if (reqtime < mtime + 60) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(int) ap_meets_conditions(request_rec *r)
+{
+ int not_modified = -1; /* unset by default */
+ ap_condition_e cond;
+
+ /* Check for conditional requests --- note that we only want to do
+ * this if we are successful so far and we are not processing a
+ * subrequest or an ErrorDocument.
+ *
+ * The order of the checks is important, since ETag checks are supposed
+ * to be more accurate than checks relative to the modification time.
+ * However, not all documents are guaranteed to *have* ETags, and some
+ * might have Last-Modified values w/o ETags, so this gets a little
+ * complicated.
+ */
+
+ if (!ap_is_HTTP_SUCCESS(r->status) || r->no_local_copy) {
+ return OK;
+ }
+
+ /* If an If-Match request-header field was given
+ * AND the field value is not "*" (meaning match anything)
+ * AND if our strong ETag does not match any entity tag in that field,
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+
+ /* Else if a valid If-Unmodified-Since request-header field was given
+ * AND the requested resource has been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_unmodified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ return HTTP_PRECONDITION_FAILED;
+ }
+
+ /* If an If-None-Match request-header field was given
+ * AND the field value is "*" (meaning match anything)
+ * OR our ETag matches any of the entity tags in that field, fail.
+ *
+ * If the request method was GET or HEAD, failure means the server
+ * SHOULD respond with a 304 (Not Modified) response.
+ * For all other request methods, failure means the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ *
+ * GET or HEAD allow weak etag comparison, all other methods require
+ * strong comparison. We can only use weak if it's not a range request.
+ */
+ cond = ap_condition_if_none_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ else {
+ return HTTP_PRECONDITION_FAILED;
+ }
+ }
+
+ /* If a valid If-Modified-Since request-header field was given
+ * AND it is a GET or HEAD request
+ * AND the requested resource has not been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 304 (Not Modified).
+ * A date later than the server's current request time is invalid.
+ */
+ cond = ap_condition_if_modified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ }
+
+ /* If an If-Range and an Range header is present, we must return
+ * 200 OK. The byterange filter will convert it to a range response.
+ */
+ cond = ap_condition_if_range(r, r->headers_out);
+ if (cond > AP_CONDITION_NONE) {
+ return OK;
+ }
+
+ if (not_modified == 1) {
+ return HTTP_NOT_MODIFIED;
+ }
+
+ return OK;
+}
+
+/**
+ * Singleton registry of additional methods. This maps new method names
+ * such as "MYGET" to methnums, which are int offsets into bitmasks.
+ *
+ * This follows the same technique as standard M_GET, M_POST, etc. These
+ * are dynamically assigned when modules are loaded and
+ * directives are processed.
+ */
+static apr_hash_t *methods_registry = NULL;
+static int cur_method_number = METHOD_NUMBER_FIRST;
+
+/* internal function to register one method/number pair */
+static void register_one_method(apr_pool_t *p, const char *methname,
+ int methnum)
+{
+ int *pnum = apr_palloc(p, sizeof(*pnum));
+
+ *pnum = methnum;
+ apr_hash_set(methods_registry, methname, APR_HASH_KEY_STRING, pnum);
+}
+
+/* This internal function is used to clear the method registry
+ * and reset the cur_method_number counter.
+ */
+static apr_status_t ap_method_registry_destroy(void *notused)
+{
+ methods_registry = NULL;
+ cur_method_number = METHOD_NUMBER_FIRST;
+ return APR_SUCCESS;
+}
+
+AP_DECLARE(void) ap_method_registry_init(apr_pool_t *p)
+{
+ methods_registry = apr_hash_make(p);
+ apr_pool_cleanup_register(p, NULL,
+ ap_method_registry_destroy,
+ apr_pool_cleanup_null);
+
+ /* put all the standard methods into the registry hash to ease the
+ * mapping operations between name and number
+ * HEAD is a special-instance of the GET method and shares the same ID
+ */
+ register_one_method(p, "GET", M_GET);
+ register_one_method(p, "HEAD", M_GET);
+ register_one_method(p, "PUT", M_PUT);
+ register_one_method(p, "POST", M_POST);
+ register_one_method(p, "DELETE", M_DELETE);
+ register_one_method(p, "CONNECT", M_CONNECT);
+ register_one_method(p, "OPTIONS", M_OPTIONS);
+ register_one_method(p, "TRACE", M_TRACE);
+ register_one_method(p, "PATCH", M_PATCH);
+ register_one_method(p, "PROPFIND", M_PROPFIND);
+ register_one_method(p, "PROPPATCH", M_PROPPATCH);
+ register_one_method(p, "MKCOL", M_MKCOL);
+ register_one_method(p, "COPY", M_COPY);
+ register_one_method(p, "MOVE", M_MOVE);
+ register_one_method(p, "LOCK", M_LOCK);
+ register_one_method(p, "UNLOCK", M_UNLOCK);
+ register_one_method(p, "VERSION-CONTROL", M_VERSION_CONTROL);
+ register_one_method(p, "CHECKOUT", M_CHECKOUT);
+ register_one_method(p, "UNCHECKOUT", M_UNCHECKOUT);
+ register_one_method(p, "CHECKIN", M_CHECKIN);
+ register_one_method(p, "UPDATE", M_UPDATE);
+ register_one_method(p, "LABEL", M_LABEL);
+ register_one_method(p, "REPORT", M_REPORT);
+ register_one_method(p, "MKWORKSPACE", M_MKWORKSPACE);
+ register_one_method(p, "MKACTIVITY", M_MKACTIVITY);
+ register_one_method(p, "BASELINE-CONTROL", M_BASELINE_CONTROL);
+ register_one_method(p, "MERGE", M_MERGE);
+}
+
+AP_DECLARE(int) ap_method_register(apr_pool_t *p, const char *methname)
+{
+ int *methnum;
+
+ if (methods_registry == NULL) {
+ ap_method_registry_init(p);
+ }
+
+ if (methname == NULL) {
+ return M_INVALID;
+ }
+
+ /* Check if the method was previously registered. If it was
+ * return the associated method number.
+ */
+ methnum = (int *)apr_hash_get(methods_registry, methname,
+ APR_HASH_KEY_STRING);
+ if (methnum != NULL)
+ return *methnum;
+
+ if (cur_method_number > METHOD_NUMBER_LAST) {
+ /* The method registry has run out of dynamically
+ * assignable method numbers. Log this and return M_INVALID.
+ */
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p, APLOGNO(01610)
+ "Maximum new request methods %d reached while "
+ "registering method %s.",
+ METHOD_NUMBER_LAST, methname);
+ return M_INVALID;
+ }
+
+ register_one_method(p, methname, cur_method_number);
+ return cur_method_number++;
+}
+
+#define UNKNOWN_METHOD (-1)
+
+static int lookup_builtin_method(const char *method, apr_size_t len)
+{
+ /* Note: the following code was generated by the "shilka" tool from
+ the "cocom" parsing/compilation toolkit. It is an optimized lookup
+ based on analysis of the input keywords. Postprocessing was done
+ on the shilka output, but the basic structure and analysis is
+ from there. Should new HTTP methods be added, then manual insertion
+ into this code is fine, or simply re-running the shilka tool on
+ the appropriate input. */
+
+ /* Note: it is also quite reasonable to just use our method_registry,
+ but I'm assuming (probably incorrectly) we want more speed here
+ (based on the optimizations the previous code was doing). */
+
+ switch (len)
+ {
+ case 3:
+ switch (method[0])
+ {
+ case 'P':
+ return (method[1] == 'U'
+ && method[2] == 'T'
+ ? M_PUT : UNKNOWN_METHOD);
+ case 'G':
+ return (method[1] == 'E'
+ && method[2] == 'T'
+ ? M_GET : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 4:
+ switch (method[0])
+ {
+ case 'H':
+ return (method[1] == 'E'
+ && method[2] == 'A'
+ && method[3] == 'D'
+ ? M_GET : UNKNOWN_METHOD);
+ case 'P':
+ return (method[1] == 'O'
+ && method[2] == 'S'
+ && method[3] == 'T'
+ ? M_POST : UNKNOWN_METHOD);
+ case 'M':
+ return (method[1] == 'O'
+ && method[2] == 'V'
+ && method[3] == 'E'
+ ? M_MOVE : UNKNOWN_METHOD);
+ case 'L':
+ return (method[1] == 'O'
+ && method[2] == 'C'
+ && method[3] == 'K'
+ ? M_LOCK : UNKNOWN_METHOD);
+ case 'C':
+ return (method[1] == 'O'
+ && method[2] == 'P'
+ && method[3] == 'Y'
+ ? M_COPY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 5:
+ switch (method[2])
+ {
+ case 'T':
+ return (memcmp(method, "PATCH", 5) == 0
+ ? M_PATCH : UNKNOWN_METHOD);
+ case 'R':
+ return (memcmp(method, "MERGE", 5) == 0
+ ? M_MERGE : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "MKCOL", 5) == 0
+ ? M_MKCOL : UNKNOWN_METHOD);
+ case 'B':
+ return (memcmp(method, "LABEL", 5) == 0
+ ? M_LABEL : UNKNOWN_METHOD);
+ case 'A':
+ return (memcmp(method, "TRACE", 5) == 0
+ ? M_TRACE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 6:
+ switch (method[0])
+ {
+ case 'U':
+ switch (method[5])
+ {
+ case 'K':
+ return (memcmp(method, "UNLOCK", 6) == 0
+ ? M_UNLOCK : UNKNOWN_METHOD);
+ case 'E':
+ return (memcmp(method, "UPDATE", 6) == 0
+ ? M_UPDATE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+ case 'R':
+ return (memcmp(method, "REPORT", 6) == 0
+ ? M_REPORT : UNKNOWN_METHOD);
+ case 'D':
+ return (memcmp(method, "DELETE", 6) == 0
+ ? M_DELETE : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 7:
+ switch (method[1])
+ {
+ case 'P':
+ return (memcmp(method, "OPTIONS", 7) == 0
+ ? M_OPTIONS : UNKNOWN_METHOD);
+ case 'O':
+ return (memcmp(method, "CONNECT", 7) == 0
+ ? M_CONNECT : UNKNOWN_METHOD);
+ case 'H':
+ return (memcmp(method, "CHECKIN", 7) == 0
+ ? M_CHECKIN : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 8:
+ switch (method[0])
+ {
+ case 'P':
+ return (memcmp(method, "PROPFIND", 8) == 0
+ ? M_PROPFIND : UNKNOWN_METHOD);
+ case 'C':
+ return (memcmp(method, "CHECKOUT", 8) == 0
+ ? M_CHECKOUT : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 9:
+ return (memcmp(method, "PROPPATCH", 9) == 0
+ ? M_PROPPATCH : UNKNOWN_METHOD);
+
+ case 10:
+ switch (method[0])
+ {
+ case 'U':
+ return (memcmp(method, "UNCHECKOUT", 10) == 0
+ ? M_UNCHECKOUT : UNKNOWN_METHOD);
+ case 'M':
+ return (memcmp(method, "MKACTIVITY", 10) == 0
+ ? M_MKACTIVITY : UNKNOWN_METHOD);
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ case 11:
+ return (memcmp(method, "MKWORKSPACE", 11) == 0
+ ? M_MKWORKSPACE : UNKNOWN_METHOD);
+
+ case 15:
+ return (memcmp(method, "VERSION-CONTROL", 15) == 0
+ ? M_VERSION_CONTROL : UNKNOWN_METHOD);
+
+ case 16:
+ return (memcmp(method, "BASELINE-CONTROL", 16) == 0
+ ? M_BASELINE_CONTROL : UNKNOWN_METHOD);
+
+ default:
+ return UNKNOWN_METHOD;
+ }
+
+ /* NOTREACHED */
+}
+
+/* Get the method number associated with the given string, assumed to
+ * contain an HTTP method. Returns M_INVALID if not recognized.
+ *
+ * This is the first step toward placing method names in a configurable
+ * list. Hopefully it (and other routines) can eventually be moved to
+ * something like a mod_http_methods.c, complete with config stuff.
+ */
+AP_DECLARE(int) ap_method_number_of(const char *method)
+{
+ int len = strlen(method);
+ int which = lookup_builtin_method(method, len);
+
+ if (which != UNKNOWN_METHOD)
+ return which;
+
+ /* check if the method has been dynamically registered */
+ if (methods_registry != NULL) {
+ int *methnum = apr_hash_get(methods_registry, method, len);
+
+ if (methnum != NULL) {
+ return *methnum;
+ }
+ }
+
+ return M_INVALID;
+}
+
+/*
+ * Turn a known method number into a name.
+ */
+AP_DECLARE(const char *) ap_method_name_of(apr_pool_t *p, int methnum)
+{
+ apr_hash_index_t *hi = apr_hash_first(p, methods_registry);
+
+ /* scan through the hash table, looking for a value that matches
+ the provided method number. */
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if (*(int *)val == methnum)
+ return key;
+ }
+
+ /* it wasn't found in the hash */
+ return NULL;
+}
+
+/* The index is found by its offset from the x00 code of each level.
+ * Although this is fast, it will need to be replaced if some nutcase
+ * decides to define a high-numbered code before the lower numbers.
+ * If that sad event occurs, replace the code below with a linear search
+ * from status_lines[shortcut[i]] to status_lines[shortcut[i+1]-1];
+ * or use NULL to fill the gaps.
+ */
+static int index_of_response(int status)
+{
+ static int shortcut[6] = {0, LEVEL_200, LEVEL_300, LEVEL_400, LEVEL_500,
+ RESPONSE_CODES};
+ int i, pos;
+
+ if (status < 100) { /* Below 100 is illegal for HTTP status */
+ return -1;
+ }
+ if (status > 999) { /* Above 999 is also illegal for HTTP status */
+ return -1;
+ }
+
+ for (i = 0; i < 5; i++) {
+ status -= 100;
+ if (status < 100) {
+ pos = (status + shortcut[i]);
+ if (pos < shortcut[i + 1] && status_lines[pos] != NULL) {
+ return pos;
+ }
+ else {
+ break;
+ }
+ }
+ }
+ return -2; /* Status unknown (falls in gap) or above 600 */
+}
+
+AP_DECLARE(int) ap_index_of_response(int status)
+{
+ int index = index_of_response(status);
+ return (index < 0) ? LEVEL_500 : index;
+}
+
+AP_DECLARE(const char *) ap_get_status_line_ex(apr_pool_t *p, int status)
+{
+ int index = index_of_response(status);
+ if (index >= 0) {
+ return status_lines[index];
+ }
+ else if (index == -2) {
+ return apr_psprintf(p, "%i Status %i", status, status);
+ }
+ return status_lines[LEVEL_500];
+}
+
+AP_DECLARE(const char *) ap_get_status_line(int status)
+{
+ return status_lines[ap_index_of_response(status)];
+}
+
+/* Build the Allow field-value from the request handler method mask.
+ */
+static char *make_allow(request_rec *r)
+{
+ apr_int64_t mask;
+ apr_array_header_t *allow = apr_array_make(r->pool, 10, sizeof(char *));
+ apr_hash_index_t *hi = apr_hash_first(r->pool, methods_registry);
+ /* For TRACE below */
+ core_server_config *conf =
+ ap_get_core_module_config(r->server->module_config);
+
+ mask = r->allowed_methods->method_mask;
+
+ for (; hi; hi = apr_hash_next(hi)) {
+ const void *key;
+ void *val;
+
+ apr_hash_this(hi, &key, NULL, &val);
+ if ((mask & (AP_METHOD_BIT << *(int *)val)) != 0) {
+ APR_ARRAY_PUSH(allow, const char *) = key;
+ }
+ }
+
+ /* TRACE is tested on a per-server basis */
+ if (conf->trace_enable != AP_TRACE_DISABLE)
+ *(const char **)apr_array_push(allow) = "TRACE";
+
+ /* ### this is rather annoying. we should enforce registration of
+ ### these methods */
+ if ((mask & (AP_METHOD_BIT << M_INVALID))
+ && (r->allowed_methods->method_list != NULL)
+ && (r->allowed_methods->method_list->nelts != 0)) {
+ apr_array_cat(allow, r->allowed_methods->method_list);
+ }
+
+ return apr_array_pstrcat(r->pool, allow, ',');
+}
+
+AP_DECLARE(int) ap_send_http_options(request_rec *r)
+{
+ if (r->assbackwards) {
+ return DECLINED;
+ }
+
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+
+ /* the request finalization will send an EOS, which will flush all
+ * the headers out (including the Allow header)
+ */
+
+ return OK;
+}
+
+AP_DECLARE(void) ap_set_content_type(request_rec *r, const char *ct)
+{
+ if (!ct) {
+ r->content_type = NULL;
+ }
+ else if (!r->content_type || strcmp(r->content_type, ct)) {
+ r->content_type = ct;
+ }
+}
+
+AP_DECLARE(void) ap_set_accept_ranges(request_rec *r)
+{
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ apr_table_setn(r->headers_out, "Accept-Ranges",
+ (d->max_ranges == AP_MAXRANGES_NORANGES) ? "none"
+ : "bytes");
+}
+static const char *add_optional_notes(request_rec *r,
+ const char *prefix,
+ const char *key,
+ const char *suffix)
+{
+ const char *notes, *result;
+
+ if ((notes = apr_table_get(r->notes, key)) == NULL) {
+ result = apr_pstrcat(r->pool, prefix, suffix, NULL);
+ }
+ else {
+ result = apr_pstrcat(r->pool, prefix, notes, suffix, NULL);
+ }
+
+ return result;
+}
+
+/* construct and return the default error message for a given
+ * HTTP defined error code
+ */
+static const char *get_canned_error_string(int status,
+ request_rec *r,
+ const char *location)
+{
+ apr_pool_t *p = r->pool;
+ const char *error_notes, *h1, *s1;
+
+ switch (status) {
+ case HTTP_MOVED_PERMANENTLY:
+ case HTTP_MOVED_TEMPORARILY:
+ case HTTP_TEMPORARY_REDIRECT:
+ case HTTP_PERMANENT_REDIRECT:
+ return(apr_pstrcat(p,
+ "The document has moved pool, location),
+ "\">here.
\n",
+ NULL));
+ case HTTP_SEE_OTHER:
+ return(apr_pstrcat(p,
+ "The answer to your request is located "
+ "pool, location),
+ "\">here.
\n",
+ NULL));
+ case HTTP_USE_PROXY:
+ return("This resource is only accessible "
+ "through the proxy\n"
+ "
\nYou will need to configure "
+ "your client to use that proxy.
\n");
+ case HTTP_PROXY_AUTHENTICATION_REQUIRED:
+ case HTTP_UNAUTHORIZED:
+ return("This server could not verify that you\n"
+ "are authorized to access the document\n"
+ "requested. Either you supplied the wrong\n"
+ "credentials (e.g., bad password), or your\n"
+ "browser doesn't understand how to supply\n"
+ "the credentials required.
\n");
+ case HTTP_BAD_REQUEST:
+ return(add_optional_notes(r,
+ "Your browser sent a request that "
+ "this server could not understand.
\n",
+ "error-notes",
+ "
\n"));
+ case HTTP_FORBIDDEN:
+ return(add_optional_notes(r, "You don't have permission to access this resource.", "error-notes", "
\n"));
+ case HTTP_NOT_FOUND:
+ return("The requested URL was not found on this server.
\n");
+ case HTTP_METHOD_NOT_ALLOWED:
+ return(apr_pstrcat(p,
+ "The requested method ",
+ ap_escape_html(r->pool, r->method),
+ " is not allowed for this URL.
\n",
+ NULL));
+ case HTTP_NOT_ACCEPTABLE:
+ return(add_optional_notes(r,
+ "An appropriate representation of the requested resource "
+ "could not be found on this server.
\n",
+ "variant-list", ""));
+ case HTTP_MULTIPLE_CHOICES:
+ return(add_optional_notes(r, "", "variant-list", ""));
+ case HTTP_LENGTH_REQUIRED:
+ s1 = apr_pstrcat(p,
+ "A request of the requested method ",
+ ap_escape_html(r->pool, r->method),
+ " requires a valid Content-length.
\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "
\n"));
+ case HTTP_PRECONDITION_FAILED:
+ return("The precondition on the request "
+ "for this URL evaluated to false.
\n");
+ case HTTP_NOT_IMPLEMENTED:
+ s1 = apr_pstrcat(p,
+ "",
+ ap_escape_html(r->pool, r->method),
+ " not supported for current URL.
\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "
\n"));
+ case HTTP_BAD_GATEWAY:
+ s1 = "The proxy server received an invalid" CRLF
+ "response from an upstream server.
" CRLF;
+ return(add_optional_notes(r, s1, "error-notes", "
\n"));
+ case HTTP_VARIANT_ALSO_VARIES:
+ return("A variant for the requested "
+ "resource\n
\n"
+ "\n
\nis itself a negotiable resource. "
+ "This indicates a configuration error.
\n");
+ case HTTP_REQUEST_TIME_OUT:
+ return("Server timeout waiting for the HTTP request from the client.
\n");
+ case HTTP_GONE:
+ return("The requested resource is no longer available on this server"
+ " and there is no forwarding address.\n"
+ "Please remove all references to this resource.
\n");
+ case HTTP_REQUEST_ENTITY_TOO_LARGE:
+ return(apr_pstrcat(p,
+ "The requested resource does not allow request data with ",
+ ap_escape_html(r->pool, r->method),
+ " requests, or the amount of data provided in\n"
+ "the request exceeds the capacity limit.\n",
+ NULL));
+ case HTTP_REQUEST_URI_TOO_LARGE:
+ s1 = "The requested URL's length exceeds the capacity\n"
+ "limit for this server.
\n";
+ return(add_optional_notes(r, s1, "error-notes", "
\n"));
+ case HTTP_UNSUPPORTED_MEDIA_TYPE:
+ return("The supplied request data is not in a format\n"
+ "acceptable for processing by this resource.
\n");
+ case HTTP_RANGE_NOT_SATISFIABLE:
+ return("None of the range-specifier values in the Range\n"
+ "request-header field overlap the current extent\n"
+ "of the selected resource.
\n");
+ case HTTP_EXPECTATION_FAILED:
+ s1 = apr_table_get(r->headers_in, "Expect");
+ if (s1)
+ s1 = apr_pstrcat(p,
+ "The expectation given in the Expect request-header\n"
+ "field could not be met by this server.\n"
+ "The client sent
\n Expect: ",
+ ap_escape_html(r->pool, s1), "\n
\n",
+ NULL);
+ else
+ s1 = "No expectation was seen, the Expect request-header \n"
+ "field was not presented by the client.\n";
+ return add_optional_notes(r, s1, "error-notes", "
"
+ "Only the 100-continue expectation is supported.
\n");
+ case HTTP_UNPROCESSABLE_ENTITY:
+ return("The server understands the media type of the\n"
+ "request entity, but was unable to process the\n"
+ "contained instructions.
\n");
+ case HTTP_LOCKED:
+ return("The requested resource is currently locked.\n"
+ "The lock must be released or proper identification\n"
+ "given before the method can be applied.
\n");
+ case HTTP_FAILED_DEPENDENCY:
+ return("The method could not be performed on the resource\n"
+ "because the requested action depended on another\n"
+ "action and that other action failed.
\n");
+ case HTTP_UPGRADE_REQUIRED:
+ return("The requested resource can only be retrieved\n"
+ "using SSL. The server is willing to upgrade the current\n"
+ "connection to SSL, but your client doesn't support it.\n"
+ "Either upgrade your client, or try requesting the page\n"
+ "using https://\n");
+ case HTTP_PRECONDITION_REQUIRED:
+ return("
The request is required to be conditional.
\n");
+ case HTTP_TOO_MANY_REQUESTS:
+ return("The user has sent too many requests\n"
+ "in a given amount of time.
\n");
+ case HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE:
+ return("The server refused this request because\n"
+ "the request header fields are too large.
\n");
+ case HTTP_INSUFFICIENT_STORAGE:
+ return("The method could not be performed on the resource\n"
+ "because the server is unable to store the\n"
+ "representation needed to successfully complete the\n"
+ "request. There is insufficient free space left in\n"
+ "your storage allocation.
\n");
+ case HTTP_SERVICE_UNAVAILABLE:
+ return("The server is temporarily unable to service your\n"
+ "request due to maintenance downtime or capacity\n"
+ "problems. Please try again later.
\n");
+ case HTTP_GATEWAY_TIME_OUT:
+ return("The gateway did not receive a timely response\n"
+ "from the upstream server or application.
\n");
+ case HTTP_LOOP_DETECTED:
+ return("The server terminated an operation because\n"
+ "it encountered an infinite loop.
\n");
+ case HTTP_NOT_EXTENDED:
+ return("A mandatory extension policy in the request is not\n"
+ "accepted by the server for this resource.
\n");
+ case HTTP_NETWORK_AUTHENTICATION_REQUIRED:
+ return("The client needs to authenticate to gain\n"
+ "network access.
\n");
+ case HTTP_MISDIRECTED_REQUEST:
+ return("The client needs a new connection for this\n"
+ "request as the requested host name does not match\n"
+ "the Server Name Indication (SNI) in use for this\n"
+ "connection.
\n");
+ case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
+ return(add_optional_notes(r,
+ "Access to this URL has been denied for legal reasons.
\n",
+ "error-notes", "
\n"));
+ default: /* HTTP_INTERNAL_SERVER_ERROR */
+ /*
+ * This comparison to expose error-notes could be modified to
+ * use a configuration directive and export based on that
+ * directive. For now "*" is used to designate an error-notes
+ * that is totally safe for any user to see (ie lacks paths,
+ * database passwords, etc.)
+ */
+ if (((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL)
+ && (h1 = apr_table_get(r->notes, "verbose-error-to")) != NULL
+ && (strcmp(h1, "*") == 0)) {
+ return(apr_pstrcat(p, error_notes, "\n", NULL));
+ }
+ else {
+ return(apr_pstrcat(p,
+ "The server encountered an internal "
+ "error or\n"
+ "misconfiguration and was unable to complete\n"
+ "your request.
\n"
+ "Please contact the server "
+ "administrator at \n ",
+ ap_escape_html(r->pool,
+ r->server->server_admin),
+ " to inform them of the time this "
+ "error occurred,\n"
+ " and the actions you performed just before "
+ "this error.
\n"
+ "More information about this error "
+ "may be available\n"
+ "in the server error log.
\n",
+ NULL));
+ }
+ /*
+ * It would be nice to give the user the information they need to
+ * fix the problem directly since many users don't have access to
+ * the error_log (think University sites) even though they can easily
+ * get this error by misconfiguring an htaccess file. However, the
+ * e error notes tend to include the real file pathname in this case,
+ * which some people consider to be a breach of privacy. Until we
+ * can figure out a way to remove the pathname, leave this commented.
+ *
+ * if ((error_notes = apr_table_get(r->notes,
+ * "error-notes")) != NULL) {
+ * return(apr_pstrcat(p, error_notes, "\n", NULL);
+ * }
+ * else {
+ * return "";
+ * }
+ */
+ }
+}
+
+/* We should have named this send_canned_response, since it is used for any
+ * response that can be generated by the server from the request record.
+ * This includes all 204 (no content), 3xx (redirect), 4xx (client error),
+ * and 5xx (server error) messages that have not been redirected to another
+ * handler via the ErrorDocument feature.
+ */
+AP_DECLARE(void) ap_send_error_response(request_rec *r, int recursive_error)
+{
+ int status = r->status;
+ int idx = ap_index_of_response(status);
+ char *custom_response;
+ const char *location = apr_table_get(r->headers_out, "Location");
+
+ /* At this point, we are starting the response over, so we have to reset
+ * this value.
+ */
+ r->eos_sent = 0;
+
+ /* and we need to get rid of any RESOURCE filters that might be lurking
+ * around, thinking they are in the middle of the original request
+ */
+
+ r->output_filters = r->proto_output_filters;
+
+ ap_run_insert_error_filter(r);
+
+ /* We need to special-case the handling of 204 and 304 responses,
+ * since they have specific HTTP requirements and do not include a
+ * message body. Note that being assbackwards here is not an option.
+ */
+ if (AP_STATUS_IS_HEADER_ONLY(status)) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ /*
+ * It's possible that the Location field might be in r->err_headers_out
+ * instead of r->headers_out; use the latter if possible, else the
+ * former.
+ */
+ if (location == NULL) {
+ location = apr_table_get(r->err_headers_out, "Location");
+ }
+
+ if (!r->assbackwards) {
+ apr_table_t *tmp = r->headers_out;
+
+ /* For all HTTP/1.x responses for which we generate the message,
+ * we need to avoid inheriting the "normal status" header fields
+ * that may have been set by the request handler before the
+ * error or redirect, except for Location on external redirects.
+ */
+ r->headers_out = r->err_headers_out;
+ r->err_headers_out = tmp;
+ apr_table_clear(r->err_headers_out);
+
+ if (ap_is_HTTP_REDIRECT(status) || (status == HTTP_CREATED)) {
+ if ((location != NULL) && *location) {
+ apr_table_setn(r->headers_out, "Location", location);
+ }
+ else {
+ location = ""; /* avoids coredump when printing, below */
+ }
+ }
+
+ r->content_languages = NULL;
+ r->content_encoding = NULL;
+ r->clength = 0;
+
+ if (apr_table_get(r->subprocess_env,
+ "suppress-error-charset") != NULL) {
+ core_request_config *request_conf =
+ ap_get_core_module_config(r->request_config);
+ request_conf->suppress_charset = 1; /* avoid adding default
+ * charset later
+ */
+ ap_set_content_type(r, "text/html");
+ }
+ else {
+ ap_set_content_type(r, "text/html; charset=iso-8859-1");
+ }
+
+ if ((status == HTTP_METHOD_NOT_ALLOWED)
+ || (status == HTTP_NOT_IMPLEMENTED)) {
+ apr_table_setn(r->headers_out, "Allow", make_allow(r));
+ }
+
+ if (r->header_only) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+
+ if ((custom_response = ap_response_code_string(r, idx))) {
+ /*
+ * We have a custom response output. This should only be
+ * a text-string to write back. But if the ErrorDocument
+ * was a local redirect and the requested resource failed
+ * for any reason, the custom_response will still hold the
+ * redirect URL. We don't really want to output this URL
+ * as a text message, so first check the custom response
+ * string to ensure that it is a text-string (using the
+ * same test used in ap_die(), i.e. does it start with a ").
+ *
+ * If it's not a text string, we've got a recursive error or
+ * an external redirect. If it's a recursive error, ap_die passes
+ * us the second error code so we can write both, and has already
+ * backed up to the original error. If it's an external redirect,
+ * it hasn't happened yet; we may never know if it fails.
+ */
+ if (custom_response[0] == '\"') {
+ ap_rputs(custom_response + 1, r);
+ ap_finalize_request_protocol(r);
+ return;
+ }
+ }
+ {
+ const char *title = status_lines[idx];
+ const char *h1;
+
+ /* Accept a status_line set by a module, but only if it begins
+ * with the correct 3 digit status code
+ */
+ if (r->status_line) {
+ char *end;
+ int len = strlen(r->status_line);
+ if (len >= 3
+ && apr_strtoi64(r->status_line, &end, 10) == r->status
+ && (end - 3) == r->status_line
+ && (len < 4 || apr_isspace(r->status_line[3]))
+ && (len < 5 || apr_isalnum(r->status_line[4]))) {
+ /* Since we passed the above check, we know that length three
+ * is equivalent to only a 3 digit numeric http status.
+ * RFC2616 mandates a trailing space, let's add it.
+ * If we have an empty reason phrase, we also add "Unknown Reason".
+ */
+ if (len == 3) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, " Unknown Reason", NULL);
+ } else if (len == 4) {
+ r->status_line = apr_pstrcat(r->pool, r->status_line, "Unknown Reason", NULL);
+ }
+ title = r->status_line;
+ }
+ }
+
+ /* folks decided they didn't want the error code in the H1 text */
+ h1 = &title[4];
+
+ /* can't count on a charset filter being in place here,
+ * so do ebcdic->ascii translation explicitly (if needed)
+ */
+
+ ap_rvputs_proto_in_ascii(r,
+ DOCTYPE_HTML_2_0
+ "\n", title,
+ "\n\n", h1, "
\n",
+ NULL);
+
+ ap_rvputs_proto_in_ascii(r,
+ get_canned_error_string(status, r, location),
+ NULL);
+
+ if (recursive_error) {
+ ap_rvputs_proto_in_ascii(r, "Additionally, a ",
+ status_lines[ap_index_of_response(recursive_error)],
+ "\nerror was encountered while trying to use an "
+ "ErrorDocument to handle the request.
\n", NULL);
+ }
+ ap_rvputs_proto_in_ascii(r, ap_psignature("
\n", r), NULL);
+ ap_rvputs_proto_in_ascii(r, "\n", NULL);
+ }
+ ap_finalize_request_protocol(r);
+}
+
+/*
+ * Create a new method list with the specified number of preallocated
+ * extension slots.
+ */
+AP_DECLARE(ap_method_list_t *) ap_make_method_list(apr_pool_t *p, int nelts)
+{
+ ap_method_list_t *ml;
+
+ ml = (ap_method_list_t *) apr_palloc(p, sizeof(ap_method_list_t));
+ ml->method_mask = 0;
+ ml->method_list = apr_array_make(p, nelts, sizeof(char *));
+ return ml;
+}
+
+/*
+ * Make a copy of a method list (primarily for subrequests that may
+ * subsequently change it; don't want them changing the parent's, too!).
+ */
+AP_DECLARE(void) ap_copy_method_list(ap_method_list_t *dest,
+ ap_method_list_t *src)
+{
+ int i;
+ char **imethods;
+ char **omethods;
+
+ dest->method_mask = src->method_mask;
+ imethods = (char **) src->method_list->elts;
+ for (i = 0; i < src->method_list->nelts; ++i) {
+ omethods = (char **) apr_array_push(dest->method_list);
+ *omethods = apr_pstrdup(dest->method_list->pool, imethods[i]);
+ }
+}
+
+/*
+ * Return true if the specified HTTP method is in the provided
+ * method list.
+ */
+AP_DECLARE(int) ap_method_in_list(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+
+ /*
+ * If it's one of our known methods, use the shortcut and check the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ return !!(l->method_mask & (AP_METHOD_BIT << methnum));
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if ((l->method_list == NULL) || (l->method_list->nelts == 0)) {
+ return 0;
+ }
+
+ return ap_array_str_contains(l->method_list, method);
+}
+
+/*
+ * Add the specified method to a method list (if it isn't already there).
+ */
+AP_DECLARE(void) ap_method_list_add(ap_method_list_t *l, const char *method)
+{
+ int methnum;
+ const char **xmethod;
+
+ /*
+ * If it's one of our known methods, use the shortcut and use the
+ * bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ l->method_mask |= (AP_METHOD_BIT << methnum);
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (ap_array_str_contains(l->method_list, method)) {
+ return;
+ }
+
+ xmethod = (const char **) apr_array_push(l->method_list);
+ *xmethod = method;
+}
+
+/*
+ * Remove the specified method from a method list.
+ */
+AP_DECLARE(void) ap_method_list_remove(ap_method_list_t *l,
+ const char *method)
+{
+ int methnum;
+ char **methods;
+
+ /*
+ * If it's a known methods, either builtin or registered
+ * by a module, use the bitmask.
+ */
+ methnum = ap_method_number_of(method);
+ if (methnum != M_INVALID) {
+ l->method_mask &= ~(AP_METHOD_BIT << methnum);
+ return;
+ }
+ /*
+ * Otherwise, see if the method name is in the array of string names.
+ */
+ if (l->method_list->nelts != 0) {
+ int i, j, k;
+ methods = (char **)l->method_list->elts;
+ for (i = 0; i < l->method_list->nelts; ) {
+ if (strcmp(method, methods[i]) == 0) {
+ for (j = i, k = i + 1; k < l->method_list->nelts; ++j, ++k) {
+ methods[j] = methods[k];
+ }
+ --l->method_list->nelts;
+ }
+ else {
+ ++i;
+ }
+ }
+ }
+}
+
+/*
+ * Reset a method list to be completely empty.
+ */
+AP_DECLARE(void) ap_clear_method_list(ap_method_list_t *l)
+{
+ l->method_mask = 0;
+ l->method_list->nelts = 0;
+}
+
diff --git a/modules/http/http_request.c b/modules/http/http_request.c
new file mode 100644
index 0000000..d59cfe2
--- /dev/null
+++ b/modules/http/http_request.c
@@ -0,0 +1,861 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_request.c: functions to get and process requests
+ *
+ * Rob McCool 3/21/93
+ *
+ * Thoroughly revamped by rst for Apache. NB this file reads
+ * best from the bottom up.
+ *
+ */
+
+#include "apr_strings.h"
+#include "apr_file_io.h"
+#include "apr_fnmatch.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_request.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "util_filter.h"
+#include "util_charset.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include
+#endif
+
+APLOG_USE_MODULE(http);
+
+/*****************************************************************
+ *
+ * Mainline request processing...
+ */
+
+/* XXX A cleaner and faster way to do this might be to pass the request_rec
+ * down the filter chain as a parameter. It would need to change for
+ * subrequest vs. main request filters; perhaps the subrequest filter could
+ * make the switch.
+ */
+static void update_r_in_filters(ap_filter_t *f,
+ request_rec *from,
+ request_rec *to)
+{
+ while (f) {
+ if (f->r == from) {
+ f->r = to;
+ }
+ f = f->next;
+ }
+}
+
+static void ap_die_r(int type, request_rec *r, int recursive_error)
+{
+ char *custom_response;
+ request_rec *r_1st_err = r;
+
+ if (type == OK || type == DONE) {
+ ap_finalize_request_protocol(r);
+ return;
+ }
+
+ if (!ap_is_HTTP_VALID_RESPONSE(type)) {
+ ap_filter_t *next;
+
+ /*
+ * Check if we still have the ap_http_header_filter in place. If
+ * this is the case we should not ignore the error here because
+ * it means that we have not sent any response at all and never
+ * will. This is bad. Sent an internal server error instead.
+ */
+ next = r->output_filters;
+ while (next && (next->frec != ap_http_header_filter_handle)) {
+ next = next->next;
+ }
+
+ /*
+ * If next != NULL then we left the while above because of
+ * next->frec == ap_http_header_filter
+ */
+ if (next) {
+ if (type != AP_FILTER_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01579)
+ "Invalid response status %i", type);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02831)
+ "Response from AP_FILTER_ERROR");
+ }
+ type = HTTP_INTERNAL_SERVER_ERROR;
+ }
+ else {
+ return;
+ }
+ }
+
+ /*
+ * The following takes care of Apache redirects to custom response URLs
+ * Note that if we are already dealing with the response to some other
+ * error condition, we just report on the original error, and give up on
+ * any attempt to handle the other thing "intelligently"...
+ */
+ if (recursive_error != HTTP_OK) {
+ while (r_1st_err->prev && (r_1st_err->prev->status != HTTP_OK))
+ r_1st_err = r_1st_err->prev; /* Get back to original error */
+
+ if (r_1st_err != r) {
+ /* The recursive error was caused by an ErrorDocument specifying
+ * an internal redirect to a bad URI. ap_internal_redirect has
+ * changed the filter chains to point to the ErrorDocument's
+ * request_rec. Back out those changes so we can safely use the
+ * original failing request_rec to send the canned error message.
+ *
+ * ap_send_error_response gets rid of existing resource filters
+ * on the output side, so we can skip those.
+ */
+ update_r_in_filters(r_1st_err->proto_output_filters, r, r_1st_err);
+ update_r_in_filters(r_1st_err->input_filters, r, r_1st_err);
+ }
+
+ custom_response = NULL; /* Do NOT retry the custom thing! */
+ }
+ else {
+ int error_index = ap_index_of_response(type);
+ custom_response = ap_response_code_string(r, error_index);
+ recursive_error = 0;
+ }
+
+ r->status = type;
+
+ /*
+ * This test is done here so that none of the auth modules needs to know
+ * about proxy authentication. They treat it like normal auth, and then
+ * we tweak the status.
+ */
+ if (HTTP_UNAUTHORIZED == r->status && PROXYREQ_PROXY == r->proxyreq) {
+ r->status = HTTP_PROXY_AUTHENTICATION_REQUIRED;
+ }
+
+ /* If we don't want to keep the connection, make sure we mark that the
+ * connection is not eligible for keepalive. If we want to keep the
+ * connection, be sure that the request body (if any) has been read.
+ */
+ if (ap_status_drops_connection(r->status)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+
+ /*
+ * Two types of custom redirects --- plain text, and URLs. Plain text has
+ * a leading '"', so the URL code, here, is triggered on its absence
+ */
+
+ if (custom_response && custom_response[0] != '"') {
+
+ if (ap_is_url(custom_response)) {
+ /*
+ * The URL isn't local, so lets drop through the rest of this
+ * apache code, and continue with the usual REDIRECT handler.
+ * But note that the client will ultimately see the wrong
+ * status...
+ */
+ r->status = HTTP_MOVED_TEMPORARILY;
+ apr_table_setn(r->headers_out, "Location", custom_response);
+ }
+ else if (custom_response[0] == '/') {
+ const char *error_notes, *original_method;
+ int original_method_number;
+ r->no_local_copy = 1; /* Do NOT send HTTP_NOT_MODIFIED for
+ * error documents! */
+ /*
+ * This redirect needs to be a GET no matter what the original
+ * method was.
+ */
+ apr_table_setn(r->subprocess_env, "REQUEST_METHOD", r->method);
+
+ /*
+ * Provide a special method for modules to communicate
+ * more informative (than the plain canned) messages to us.
+ * Propagate them to ErrorDocuments via the ERROR_NOTES variable:
+ */
+ if ((error_notes = apr_table_get(r->notes,
+ "error-notes")) != NULL) {
+ apr_table_setn(r->subprocess_env, "ERROR_NOTES", error_notes);
+ }
+ original_method = r->method;
+ original_method_number = r->method_number;
+ r->method = "GET";
+ r->method_number = M_GET;
+ ap_internal_redirect(custom_response, r);
+ /* preserve ability to see %method = original_method;
+ r->method_number = original_method_number;
+ return;
+ }
+ else {
+ /*
+ * Dumb user has given us a bad url to redirect to --- fake up
+ * dying with a recursive server error...
+ */
+ recursive_error = HTTP_INTERNAL_SERVER_ERROR;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01580)
+ "Invalid error redirection directive: %s",
+ custom_response);
+ }
+ }
+ ap_send_error_response(r_1st_err, recursive_error);
+}
+
+AP_DECLARE(void) ap_die(int type, request_rec *r)
+{
+ ap_die_r(type, r, r->status);
+}
+
+AP_DECLARE(apr_status_t) ap_check_pipeline(conn_rec *c, apr_bucket_brigade *bb,
+ unsigned int max_blank_lines)
+{
+ apr_status_t rv = APR_EOF;
+ ap_input_mode_t mode = AP_MODE_SPECULATIVE;
+ unsigned int num_blank_lines = 0;
+ apr_size_t cr = 0;
+ char buf[2];
+
+ while (c->keepalive != AP_CONN_CLOSE && !c->aborted) {
+ apr_size_t len = cr + 1;
+
+ apr_brigade_cleanup(bb);
+ rv = ap_get_brigade(c->input_filters, bb, mode,
+ APR_NONBLOCK_READ, len);
+ if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) {
+ if (mode == AP_MODE_READBYTES) {
+ /* Unexpected error, stop with this connection */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(02967)
+ "Can't consume pipelined empty lines");
+ c->keepalive = AP_CONN_CLOSE;
+ rv = APR_EGENERAL;
+ }
+ else if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
+ /* Pipe is dead */
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ /* Pipe is up and empty */
+ rv = APR_EAGAIN;
+ }
+ break;
+ }
+ if (!max_blank_lines) {
+ apr_off_t n = 0;
+ /* Single read asked, (non-meta-)data available? */
+ rv = apr_brigade_length(bb, 0, &n);
+ if (rv == APR_SUCCESS && n <= 0) {
+ rv = APR_EAGAIN;
+ }
+ break;
+ }
+
+ /* Lookup and consume blank lines */
+ rv = apr_brigade_flatten(bb, buf, &len);
+ if (rv != APR_SUCCESS || len != cr + 1) {
+ int log_level;
+ if (mode == AP_MODE_READBYTES) {
+ /* Unexpected error, stop with this connection */
+ c->keepalive = AP_CONN_CLOSE;
+ log_level = APLOG_ERR;
+ rv = APR_EGENERAL;
+ }
+ else {
+ /* Let outside (non-speculative/blocking) read determine
+ * where this possible failure comes from (metadata,
+ * morphed EOF socket, ...). Debug only here.
+ */
+ log_level = APLOG_DEBUG;
+ rv = APR_SUCCESS;
+ }
+ ap_log_cerror(APLOG_MARK, log_level, rv, c, APLOGNO(02968)
+ "Can't check pipelined data");
+ break;
+ }
+
+ if (mode == AP_MODE_READBYTES) {
+ /* [CR]LF consumed, try next */
+ mode = AP_MODE_SPECULATIVE;
+ cr = 0;
+ }
+ else if (cr) {
+ AP_DEBUG_ASSERT(len == 2 && buf[0] == APR_ASCII_CR);
+ if (buf[1] == APR_ASCII_LF) {
+ /* consume this CRLF */
+ mode = AP_MODE_READBYTES;
+ num_blank_lines++;
+ }
+ else {
+ /* CR(?!LF) is data */
+ break;
+ }
+ }
+ else {
+ if (buf[0] == APR_ASCII_LF) {
+ /* consume this LF */
+ mode = AP_MODE_READBYTES;
+ num_blank_lines++;
+ }
+ else if (buf[0] == APR_ASCII_CR) {
+ cr = 1;
+ }
+ else {
+ /* Not [CR]LF, some data */
+ break;
+ }
+ }
+ if (num_blank_lines > max_blank_lines) {
+ /* Enough blank lines with this connection,
+ * stop and don't recycle it.
+ */
+ c->keepalive = AP_CONN_CLOSE;
+ rv = APR_NOTFOUND;
+ break;
+ }
+ }
+
+ return rv;
+}
+
+#define RETRIEVE_BRIGADE_FROM_POOL(bb, key, pool, allocator) do { \
+ apr_pool_userdata_get((void **)&bb, key, pool); \
+ if (bb == NULL) { \
+ bb = apr_brigade_create(pool, allocator); \
+ apr_pool_userdata_setn((const void *)bb, key, NULL, pool); \
+ } \
+ else { \
+ apr_brigade_cleanup(bb); \
+ } \
+} while(0)
+
+AP_DECLARE(void) ap_process_request_after_handler(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ conn_rec *c = r->connection;
+ apr_status_t rv;
+
+ /* Send an EOR bucket through the output filter chain. When
+ * this bucket is destroyed, the request will be logged and
+ * its pool will be freed
+ */
+ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_after_handler_brigade",
+ c->pool, c->bucket_alloc);
+ b = ap_bucket_eor_create(c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+
+ ap_pass_brigade(c->output_filters, bb);
+
+ /* The EOR bucket has either been handled by an output filter (eg.
+ * deleted or moved to a buffered_bb => no more in bb), or an error
+ * occured before that (eg. c->aborted => still in bb) and we ought
+ * to destroy it now. So cleanup any remaining bucket along with
+ * the orphan request (if any).
+ */
+ apr_brigade_cleanup(bb);
+
+ /* From here onward, it is no longer safe to reference r
+ * or r->pool, because r->pool may have been destroyed
+ * already by the EOR bucket's cleanup function.
+ */
+
+ /* Check pipeline consuming blank lines, they must not be interpreted as
+ * the next pipelined request, otherwise we would block on the next read
+ * without flushing data, and hence possibly delay pending response(s)
+ * until the next/real request comes in or the keepalive timeout expires.
+ */
+ rv = ap_check_pipeline(c, bb, DEFAULT_LIMIT_BLANK_LINES);
+ c->data_in_input_filters = (rv == APR_SUCCESS);
+ apr_brigade_cleanup(bb);
+
+ if (c->cs)
+ c->cs->state = (c->aborted) ? CONN_STATE_LINGER
+ : CONN_STATE_WRITE_COMPLETION;
+ AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, r->status);
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+}
+
+void ap_process_async_request(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ int access_status;
+
+ /* Give quick handlers a shot at serving the request on the fast
+ * path, bypassing all of the other Apache hooks.
+ *
+ * This hook was added to enable serving files out of a URI keyed
+ * content cache ( e.g., Mike Abbott's Quick Shortcut Cache,
+ * described here: http://oss.sgi.com/projects/apache/mod_qsc.html )
+ *
+ * It may have other uses as well, such as routing requests directly to
+ * content handlers that have the ability to grok HTTP and do their
+ * own access checking, etc (e.g. servlet engines).
+ *
+ * Use this hook with extreme care and only if you know what you are
+ * doing.
+ */
+ AP_PROCESS_REQUEST_ENTRY((uintptr_t)r, r->uri);
+ if (ap_extended_status) {
+ ap_time_process_request(r->connection->sbh, START_PREQUEST);
+ }
+
+ if (APLOGrtrace4(r)) {
+ int i;
+ const apr_array_header_t *t_h = apr_table_elts(r->headers_in);
+ const apr_table_entry_t *t_elt = (apr_table_entry_t *)t_h->elts;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r,
+ "Headers received from client:");
+ for (i = 0; i < t_h->nelts; i++, t_elt++) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
+ ap_escape_logitem(r->pool, t_elt->key),
+ ap_escape_logitem(r->pool, t_elt->val));
+ }
+ }
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&r->invoke_mtx, APR_THREAD_MUTEX_DEFAULT, r->pool);
+ apr_thread_mutex_lock(r->invoke_mtx);
+#endif
+ access_status = ap_run_quick_handler(r, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(r);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(r);
+ }
+ }
+
+ if (access_status == SUSPENDED) {
+ /* TODO: Should move these steps into a generic function, so modules
+ * working on a suspended request can also call _ENTRY again.
+ */
+ AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, access_status);
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+ if (c->cs)
+ c->cs->state = CONN_STATE_SUSPENDED;
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(r->invoke_mtx);
+#endif
+ return;
+ }
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(r->invoke_mtx);
+#endif
+
+ ap_die_r(access_status, r, HTTP_OK);
+
+ ap_process_request_after_handler(r);
+}
+
+AP_DECLARE(void) ap_process_request(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ conn_rec *c = r->connection;
+ apr_status_t rv;
+
+ ap_process_async_request(r);
+
+ if (!c->data_in_input_filters) {
+ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_brigade",
+ c->pool, c->bucket_alloc);
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, b);
+ rv = ap_pass_brigade(c->output_filters, bb);
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ /*
+ * Notice a timeout as an error message. This might be
+ * valuable for detecting clients with broken network
+ * connections or possible DoS attacks.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, APLOGNO(01581)
+ "flushing data to the client");
+ }
+ apr_brigade_cleanup(bb);
+ }
+ if (ap_extended_status) {
+ ap_time_process_request(c->sbh, STOP_PREQUEST);
+ }
+}
+
+static apr_table_t *rename_original_env(apr_pool_t *p, apr_table_t *t)
+{
+ const apr_array_header_t *env_arr = apr_table_elts(t);
+ const apr_table_entry_t *elts = (const apr_table_entry_t *) env_arr->elts;
+ apr_table_t *new = apr_table_make(p, env_arr->nalloc);
+ int i;
+
+ for (i = 0; i < env_arr->nelts; ++i) {
+ if (!elts[i].key)
+ continue;
+ apr_table_setn(new, apr_pstrcat(p, "REDIRECT_", elts[i].key, NULL),
+ elts[i].val);
+ }
+
+ return new;
+}
+
+static request_rec *internal_internal_redirect(const char *new_uri,
+ request_rec *r) {
+ int access_status;
+ request_rec *new;
+ const char *vary_header;
+
+ if (ap_is_recursion_limit_exceeded(r)) {
+ ap_die(HTTP_INTERNAL_SERVER_ERROR, r);
+ return NULL;
+ }
+
+ new = (request_rec *) apr_pcalloc(r->pool, sizeof(request_rec));
+
+ new->connection = r->connection;
+ new->server = r->server;
+ new->pool = r->pool;
+
+ /*
+ * A whole lot of this really ought to be shared with http_protocol.c...
+ * another missing cleanup. It's particularly inappropriate to be
+ * setting header_only, etc., here.
+ */
+
+ new->method = r->method;
+ new->method_number = r->method_number;
+ new->allowed_methods = ap_make_method_list(new->pool, 2);
+ ap_parse_uri(new, new_uri);
+ new->parsed_uri.port_str = r->parsed_uri.port_str;
+ new->parsed_uri.port = r->parsed_uri.port;
+
+ new->request_config = ap_create_request_config(r->pool);
+
+ new->per_dir_config = r->server->lookup_defaults;
+
+ new->prev = r;
+ r->next = new;
+
+ new->useragent_addr = r->useragent_addr;
+ new->useragent_ip = r->useragent_ip;
+
+ /* Must have prev and next pointers set before calling create_request
+ * hook.
+ */
+ ap_run_create_request(new);
+
+ /* Inherit the rest of the protocol info... */
+
+ new->the_request = r->the_request;
+
+ new->allowed = r->allowed;
+
+ new->status = r->status;
+ new->assbackwards = r->assbackwards;
+ new->header_only = r->header_only;
+ new->protocol = r->protocol;
+ new->proto_num = r->proto_num;
+ new->hostname = r->hostname;
+ new->request_time = r->request_time;
+ new->main = r->main;
+
+ new->headers_in = r->headers_in;
+ new->trailers_in = r->trailers_in;
+ new->headers_out = apr_table_make(r->pool, 12);
+ if (ap_is_HTTP_REDIRECT(new->status)) {
+ const char *location = apr_table_get(r->headers_out, "Location");
+ if (location)
+ apr_table_setn(new->headers_out, "Location", location);
+ }
+
+ /* A module (like mod_rewrite) can force an internal redirect
+ * to carry over the Vary header (if present).
+ */
+ if (apr_table_get(r->notes, "redirect-keeps-vary")) {
+ if((vary_header = apr_table_get(r->headers_out, "Vary"))) {
+ apr_table_setn(new->headers_out, "Vary", vary_header);
+ }
+ }
+
+ new->err_headers_out = r->err_headers_out;
+ new->trailers_out = apr_table_make(r->pool, 5);
+ new->subprocess_env = rename_original_env(r->pool, r->subprocess_env);
+ new->notes = apr_table_make(r->pool, 5);
+
+ new->htaccess = r->htaccess;
+ new->no_cache = r->no_cache;
+ new->expecting_100 = r->expecting_100;
+ new->no_local_copy = r->no_local_copy;
+ new->read_length = r->read_length; /* We can only read it once */
+ new->vlist_validator = r->vlist_validator;
+
+ new->proto_output_filters = r->proto_output_filters;
+ new->proto_input_filters = r->proto_input_filters;
+
+ new->input_filters = new->proto_input_filters;
+
+ if (new->main) {
+ ap_filter_t *f, *nextf;
+
+ /* If this is a subrequest, the filter chain may contain a
+ * mixture of filters specific to the old request (r), and
+ * some inherited from r->main. Here, inherit that filter
+ * chain, and remove all those which are specific to the old
+ * request; ensuring the subreq filter is left in place. */
+ new->output_filters = r->output_filters;
+
+ f = new->output_filters;
+ do {
+ nextf = f->next;
+
+ if (f->r == r && f->frec != ap_subreq_core_filter_handle) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01582)
+ "dropping filter '%s' in internal redirect from %s to %s",
+ f->frec->name, r->unparsed_uri, new_uri);
+
+ /* To remove the filter, first set f->r to the *new*
+ * request_rec, so that ->output_filters on 'new' is
+ * changed (if necessary) when removing the filter. */
+ f->r = new;
+ ap_remove_output_filter(f);
+ }
+
+ f = nextf;
+
+ /* Stop at the protocol filters. If a protocol filter has
+ * been newly installed for this resource, better leave it
+ * in place, though it's probably a misconfiguration or
+ * filter bug to get into this state. */
+ } while (f && f != new->proto_output_filters);
+ }
+ else {
+ /* If this is not a subrequest, clear out all
+ * resource-specific filters. */
+ new->output_filters = new->proto_output_filters;
+ }
+
+ update_r_in_filters(new->input_filters, r, new);
+ update_r_in_filters(new->output_filters, r, new);
+
+ apr_table_setn(new->subprocess_env, "REDIRECT_STATUS",
+ apr_itoa(r->pool, r->status));
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ new->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+#if APR_HAS_THREADS
+ new->invoke_mtx = r->invoke_mtx;
+#endif
+
+ /*
+ * XXX: hmm. This is because mod_setenvif and mod_unique_id really need
+ * to do their thing on internal redirects as well. Perhaps this is a
+ * misnamed function.
+ */
+ if ((access_status = ap_post_read_request(new))) {
+ ap_die(access_status, new);
+ return NULL;
+ }
+
+ return new;
+}
+
+/* XXX: Is this function is so bogus and fragile that we deep-6 it? */
+AP_DECLARE(void) ap_internal_fast_redirect(request_rec *rr, request_rec *r)
+{
+ /* We need to tell POOL_DEBUG that we're guaranteeing that rr->pool
+ * will exist as long as r->pool. Otherwise we run into troubles because
+ * some values in this request will be allocated in r->pool, and others in
+ * rr->pool.
+ */
+ apr_pool_join(r->pool, rr->pool);
+ r->proxyreq = rr->proxyreq;
+ r->no_cache = (r->no_cache && rr->no_cache);
+ r->no_local_copy = (r->no_local_copy && rr->no_local_copy);
+ r->mtime = rr->mtime;
+ r->uri = rr->uri;
+ r->filename = rr->filename;
+ r->canonical_filename = rr->canonical_filename;
+ r->path_info = rr->path_info;
+ r->args = rr->args;
+ r->finfo = rr->finfo;
+ r->handler = rr->handler;
+ ap_set_content_type(r, rr->content_type);
+ r->content_encoding = rr->content_encoding;
+ r->content_languages = rr->content_languages;
+ r->per_dir_config = rr->per_dir_config;
+ /* copy output headers from subrequest, but leave negotiation headers */
+ r->notes = apr_table_overlay(r->pool, rr->notes, r->notes);
+ r->headers_out = apr_table_overlay(r->pool, rr->headers_out,
+ r->headers_out);
+ r->err_headers_out = apr_table_overlay(r->pool, rr->err_headers_out,
+ r->err_headers_out);
+ r->trailers_out = apr_table_overlay(r->pool, rr->trailers_out,
+ r->trailers_out);
+ r->subprocess_env = apr_table_overlay(r->pool, rr->subprocess_env,
+ r->subprocess_env);
+
+ r->output_filters = rr->output_filters;
+ r->input_filters = rr->input_filters;
+
+ /* If any filters pointed at the now-defunct rr, we must point them
+ * at our "new" instance of r. In particular, some of rr's structures
+ * will now be bogus (say rr->headers_out). If a filter tried to modify
+ * their f->r structure when it is pointing to rr, the real request_rec
+ * will not get updated. Fix that here.
+ */
+ update_r_in_filters(r->input_filters, rr, r);
+ update_r_in_filters(r->output_filters, rr, r);
+
+ if (r->main) {
+ ap_filter_t *next = r->output_filters;
+ while (next && (next != r->proto_output_filters)) {
+ if (next->frec == ap_subreq_core_filter_handle) {
+ break;
+ }
+ next = next->next;
+ }
+ if (!next || next == r->proto_output_filters) {
+ ap_add_output_filter_handle(ap_subreq_core_filter_handle,
+ NULL, r, r->connection);
+ }
+ }
+ else {
+ /*
+ * We need to check if we now have the SUBREQ_CORE filter in our filter
+ * chain. If this is the case we need to remove it since we are NO
+ * subrequest. But we need to keep in mind that the SUBREQ_CORE filter
+ * does not necessarily need to be the first filter in our chain. So we
+ * need to go through the chain. But we only need to walk up the chain
+ * until the proto_output_filters as the SUBREQ_CORE filter is below the
+ * protocol filters.
+ */
+ ap_filter_t *next;
+
+ next = r->output_filters;
+ while (next && (next->frec != ap_subreq_core_filter_handle)
+ && (next != r->proto_output_filters)) {
+ next = next->next;
+ }
+ if (next && (next->frec == ap_subreq_core_filter_handle)) {
+ ap_remove_output_filter(next);
+ }
+ }
+}
+
+AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ AP_INTERNAL_REDIRECT(r->uri, new_uri);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ access_status = ap_run_quick_handler(new, 0); /* Not a look-up request */
+ if (access_status == DECLINED) {
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(new);
+ }
+ }
+ ap_die(access_status, new);
+}
+
+/* This function is designed for things like actions or CGI scripts, when
+ * using AddHandler, and you want to preserve the content type across
+ * an internal redirect.
+ */
+AP_DECLARE(void) ap_internal_redirect_handler(const char *new_uri, request_rec *r)
+{
+ int access_status;
+ request_rec *new = internal_internal_redirect(new_uri, r);
+
+ /* ap_die was already called, if an error occured */
+ if (!new) {
+ return;
+ }
+
+ if (r->handler)
+ ap_set_content_type(new, r->content_type);
+ access_status = ap_process_request_internal(new);
+ if (access_status == OK) {
+ access_status = ap_invoke_handler(new);
+ }
+ ap_die(access_status, new);
+}
+
+AP_DECLARE(void) ap_allow_methods(request_rec *r, int reset, ...)
+{
+ const char *method;
+ va_list methods;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ va_start(methods, reset);
+ while ((method = va_arg(methods, const char *)) != NULL) {
+ ap_method_list_add(r->allowed_methods, method);
+ }
+ va_end(methods);
+}
+
+AP_DECLARE(void) ap_allow_standard_methods(request_rec *r, int reset, ...)
+{
+ int method;
+ va_list methods;
+ apr_int64_t mask;
+
+ /*
+ * Get rid of any current settings if requested; not just the
+ * well-known methods but any extensions as well.
+ */
+ if (reset) {
+ ap_clear_method_list(r->allowed_methods);
+ }
+
+ mask = 0;
+ va_start(methods, reset);
+ while ((method = va_arg(methods, int)) != -1) {
+ mask |= (AP_METHOD_BIT << method);
+ }
+ va_end(methods);
+
+ r->allowed_methods->method_mask |= mask;
+}
diff --git a/modules/http/mod_mime.c b/modules/http/mod_mime.c
new file mode 100644
index 0000000..700f824
--- /dev/null
+++ b/modules/http/mod_mime.c
@@ -0,0 +1,1037 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_mime.c: Sends/gets MIME headers for requests
+ *
+ * Rob McCool
+ *
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_hash.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_request.h"
+#include "http_protocol.h"
+
+/* XXXX - fix me / EBCDIC
+ * there was a cludge here which would use its
+ * own version apr_isascii(). Indicating that
+ * on some platforms that might be needed.
+ *
+ * #define OS_ASC(c) (c) -- for mere mortals
+ * or
+ * #define OS_ASC(c) (ebcdic2ascii[c]) -- for dino's
+ *
+ * #define apr_isascii(c) ((OS_ASC(c) & 0x80) == 0)
+ */
+
+/* XXXXX - fix me - See note with NOT_PROXY
+ */
+
+typedef struct attrib_info {
+ char *name;
+ int offset;
+} attrib_info;
+
+/* Information to which an extension can be mapped
+ */
+typedef struct extension_info {
+ char *forced_type; /* Additional AddTyped stuff */
+ char *encoding_type; /* Added with AddEncoding... */
+ char *language_type; /* Added with AddLanguage... */
+ char *handler; /* Added with AddHandler... */
+ char *charset_type; /* Added with AddCharset... */
+ char *input_filters; /* Added with AddInputFilter... */
+ char *output_filters; /* Added with AddOutputFilter... */
+} extension_info;
+
+#define MULTIMATCH_UNSET 0
+#define MULTIMATCH_ANY 1
+#define MULTIMATCH_NEGOTIATED 2
+#define MULTIMATCH_HANDLERS 4
+#define MULTIMATCH_FILTERS 8
+
+typedef struct {
+ apr_hash_t *extension_mappings; /* Map from extension name to
+ * extension_info structure */
+
+ apr_array_header_t *remove_mappings; /* A simple list, walked once */
+
+ char *default_language; /* Language if no AddLanguage ext found */
+
+ int multimatch; /* Extensions to include in multiview matching
+ * for filenames, e.g. Filters and Handlers
+ */
+ int use_path_info; /* If set to 0, only use filename.
+ * If set to 1, append PATH_INFO to filename for
+ * lookups.
+ * If set to 2, this value is unset and is
+ * effectively 0.
+ */
+} mime_dir_config;
+
+typedef struct param_s {
+ char *attr;
+ char *val;
+ struct param_s *next;
+} param;
+
+typedef struct {
+ const char *type;
+ apr_size_t type_len;
+ const char *subtype;
+ apr_size_t subtype_len;
+ param *param;
+} content_type;
+
+static char tspecial[] = {
+ '(', ')', '<', '>', '@', ',', ';', ':',
+ '\\', '"', '/', '[', ']', '?', '=',
+ '\0'
+};
+
+module AP_MODULE_DECLARE_DATA mime_module;
+
+static void *create_mime_dir_config(apr_pool_t *p, char *dummy)
+{
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ new->extension_mappings = NULL;
+ new->remove_mappings = NULL;
+
+ new->default_language = NULL;
+
+ new->multimatch = MULTIMATCH_UNSET;
+
+ new->use_path_info = 2;
+
+ return new;
+}
+/*
+ * Overlay one hash table of extension_mappings onto another
+ */
+static void *overlay_extension_mappings(apr_pool_t *p,
+ const void *key,
+ apr_ssize_t klen,
+ const void *overlay_val,
+ const void *base_val,
+ const void *data)
+{
+ const extension_info *overlay_info = (const extension_info *)overlay_val;
+ const extension_info *base_info = (const extension_info *)base_val;
+ extension_info *new_info = apr_pmemdup(p, base_info, sizeof(extension_info));
+
+ if (overlay_info->forced_type) {
+ new_info->forced_type = overlay_info->forced_type;
+ }
+ if (overlay_info->encoding_type) {
+ new_info->encoding_type = overlay_info->encoding_type;
+ }
+ if (overlay_info->language_type) {
+ new_info->language_type = overlay_info->language_type;
+ }
+ if (overlay_info->handler) {
+ new_info->handler = overlay_info->handler;
+ }
+ if (overlay_info->charset_type) {
+ new_info->charset_type = overlay_info->charset_type;
+ }
+ if (overlay_info->input_filters) {
+ new_info->input_filters = overlay_info->input_filters;
+ }
+ if (overlay_info->output_filters) {
+ new_info->output_filters = overlay_info->output_filters;
+ }
+
+ return new_info;
+}
+
+/* Member is the offset within an extension_info of the pointer to reset
+ */
+static void remove_items(apr_pool_t *p, apr_array_header_t *remove,
+ apr_hash_t *mappings)
+{
+ attrib_info *suffix = (attrib_info *) remove->elts;
+ int i;
+ for (i = 0; i < remove->nelts; i++) {
+ extension_info *exinfo = apr_hash_get(mappings,
+ suffix[i].name,
+ APR_HASH_KEY_STRING);
+ if (exinfo && *(const char**)((char *)exinfo + suffix[i].offset)) {
+ extension_info *copyinfo = exinfo;
+ exinfo = apr_pmemdup(p, copyinfo, sizeof(*exinfo));
+ apr_hash_set(mappings, suffix[i].name,
+ APR_HASH_KEY_STRING, exinfo);
+
+ *(const char**)((char *)exinfo + suffix[i].offset) = NULL;
+ }
+ }
+}
+
+static void *merge_mime_dir_configs(apr_pool_t *p, void *basev, void *addv)
+{
+ mime_dir_config *base = (mime_dir_config *)basev;
+ mime_dir_config *add = (mime_dir_config *)addv;
+ mime_dir_config *new = apr_palloc(p, sizeof(mime_dir_config));
+
+ if (base->extension_mappings && add->extension_mappings) {
+ new->extension_mappings = apr_hash_merge(p, add->extension_mappings,
+ base->extension_mappings,
+ overlay_extension_mappings,
+ NULL);
+ }
+ else {
+ if (base->extension_mappings == NULL) {
+ new->extension_mappings = add->extension_mappings;
+ }
+ else {
+ new->extension_mappings = base->extension_mappings;
+ }
+ /* We may not be merging the tables, but if we potentially will change
+ * an exinfo member, then we are about to trounce it anyways.
+ * We must have a copy for safety.
+ */
+ if (new->extension_mappings && add->remove_mappings) {
+ new->extension_mappings =
+ apr_hash_copy(p, new->extension_mappings);
+ }
+ }
+
+ if (new->extension_mappings) {
+ if (add->remove_mappings)
+ remove_items(p, add->remove_mappings, new->extension_mappings);
+ }
+ new->remove_mappings = NULL;
+
+ new->default_language = add->default_language ?
+ add->default_language : base->default_language;
+
+ new->multimatch = (add->multimatch != MULTIMATCH_UNSET) ?
+ add->multimatch : base->multimatch;
+
+ if ((add->use_path_info & 2) == 0) {
+ new->use_path_info = add->use_path_info;
+ }
+ else {
+ new->use_path_info = base->use_path_info;
+ }
+
+ return new;
+}
+
+static const char *add_extension_info(cmd_parms *cmd, void *m_,
+ const char *value_, const char* ext)
+{
+ mime_dir_config *m=m_;
+ extension_info *exinfo;
+ int offset = (int) (long) cmd->info;
+ char *key = apr_pstrdup(cmd->temp_pool, ext);
+ char *value = apr_pstrdup(cmd->pool, value_);
+ ap_str_tolower(value);
+ ap_str_tolower(key);
+
+ if (*key == '.') {
+ ++key;
+ }
+ if (!m->extension_mappings) {
+ m->extension_mappings = apr_hash_make(cmd->pool);
+ exinfo = NULL;
+ }
+ else {
+ exinfo = (extension_info*)apr_hash_get(m->extension_mappings, key,
+ APR_HASH_KEY_STRING);
+ }
+ if (!exinfo) {
+ exinfo = apr_pcalloc(cmd->pool, sizeof(extension_info));
+ key = apr_pstrdup(cmd->pool, key);
+ apr_hash_set(m->extension_mappings, key, APR_HASH_KEY_STRING, exinfo);
+ }
+ *(const char**)((char *)exinfo + offset) = value;
+ return NULL;
+}
+
+/*
+ * As RemoveType should also override the info from TypesConfig, we add an
+ * empty string as type instead of actually removing the type.
+ */
+static const char *remove_extension_type(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ return add_extension_info(cmd, m_, "", ext);
+}
+
+/*
+ * Note handler names are un-added with each per_dir_config merge.
+ * This keeps the association from being inherited, but not
+ * from being re-added at a subordinate level.
+ */
+static const char *remove_extension_info(cmd_parms *cmd, void *m_,
+ const char *ext)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ attrib_info *suffix;
+ if (*ext == '.') {
+ ++ext;
+ }
+ if (!m->remove_mappings) {
+ m->remove_mappings = apr_array_make(cmd->pool, 4, sizeof(*suffix));
+ }
+ suffix = (attrib_info *)apr_array_push(m->remove_mappings);
+ suffix->name = apr_pstrdup(cmd->pool, ext);
+ ap_str_tolower(suffix->name);
+ suffix->offset = (int) (long) cmd->info;
+ return NULL;
+}
+
+/* The sole bit of server configuration that the MIME module has is
+ * the name of its config file, so...
+ */
+
+static const char *set_types_config(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ ap_set_module_config(cmd->server->module_config, &mime_module,
+ (void *)arg);
+ return NULL;
+}
+
+static const char *multiviews_match(cmd_parms *cmd, void *m_,
+ const char *include)
+{
+ mime_dir_config *m = (mime_dir_config *) m_;
+ const char *errmsg;
+
+ errmsg = ap_check_cmd_context(cmd, NOT_IN_LOCATION);
+ if (errmsg != NULL) {
+ return errmsg;
+ }
+
+ if (strcasecmp(include, "Any") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_ANY)) {
+ return "Any is incompatible with NegotiatedOnly, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_ANY;
+ }
+ else if (strcasecmp(include, "NegotiatedOnly") == 0) {
+ if (m->multimatch && (m->multimatch & ~MULTIMATCH_NEGOTIATED)) {
+ return "NegotiatedOnly is incompatible with Any, "
+ "Filters and Handlers";
+ }
+ m->multimatch |= MULTIMATCH_NEGOTIATED;
+ }
+ else if (strcasecmp(include, "Filters") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Filters is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_FILTERS;
+ }
+ else if (strcasecmp(include, "Handlers") == 0) {
+ if (m->multimatch && (m->multimatch & (MULTIMATCH_NEGOTIATED
+ | MULTIMATCH_ANY))) {
+ return "Handlers is incompatible with Any and NegotiatedOnly";
+ }
+ m->multimatch |= MULTIMATCH_HANDLERS;
+ }
+ else {
+ return apr_psprintf(cmd->pool, "Unrecognized option '%s'", include);
+ }
+
+ return NULL;
+}
+
+static const command_rec mime_cmds[] =
+{
+ AP_INIT_ITERATE2("AddCharset", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "a charset (e.g., iso-2022-jp), followed by one or more "
+ "file extensions"),
+ AP_INIT_ITERATE2("AddEncoding", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "an encoding (e.g., gzip), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddHandler", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "a handler name followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddInputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "input filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddLanguage", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "a language (e.g., fr), followed by one or more file extensions"),
+ AP_INIT_ITERATE2("AddOutputFilter", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "output filter name (or ; delimited names) followed by one or "
+ "more file extensions"),
+ AP_INIT_ITERATE2("AddType", add_extension_info,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "a mime type followed by one or more file extensions"),
+ AP_INIT_TAKE1("DefaultLanguage", ap_set_string_slot,
+ (void*)APR_OFFSETOF(mime_dir_config, default_language), OR_FILEINFO,
+ "language to use for documents with no other language file extension"),
+ AP_INIT_ITERATE("MultiviewsMatch", multiviews_match, NULL, OR_FILEINFO,
+ "NegotiatedOnly (default), Handlers and/or Filters, or Any"),
+ AP_INIT_ITERATE("RemoveCharset", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, charset_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveEncoding", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, encoding_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveHandler", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, handler), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveInputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, input_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveLanguage", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, language_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveOutputFilter", remove_extension_info,
+ (void *)APR_OFFSETOF(extension_info, output_filters), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_ITERATE("RemoveType", remove_extension_type,
+ (void *)APR_OFFSETOF(extension_info, forced_type), OR_FILEINFO,
+ "one or more file extensions"),
+ AP_INIT_TAKE1("TypesConfig", set_types_config, NULL, RSRC_CONF,
+ "the MIME types config file"),
+ AP_INIT_FLAG("ModMimeUsePathInfo", ap_set_flag_slot,
+ (void *)APR_OFFSETOF(mime_dir_config, use_path_info), ACCESS_CONF,
+ "Set to 'yes' to allow mod_mime to use path info for type checking"),
+ {NULL}
+};
+
+static apr_hash_t *mime_type_extensions;
+
+static int mime_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
+{
+ ap_configfile_t *f;
+ char l[MAX_STRING_LEN];
+ const char *types_confname = ap_get_module_config(s->module_config,
+ &mime_module);
+ apr_status_t status;
+
+ if (!types_confname) {
+ types_confname = AP_TYPES_CONFIG_FILE;
+ }
+
+ types_confname = ap_server_root_relative(p, types_confname);
+ if (!types_confname) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s, APLOGNO(01596)
+ "Invalid mime types config path %s",
+ (const char *)ap_get_module_config(s->module_config,
+ &mime_module));
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if ((status = ap_pcfg_openfile(&f, ptemp, types_confname))
+ != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s, APLOGNO(01597)
+ "could not open mime types config file %s.",
+ types_confname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ mime_type_extensions = apr_hash_make(p);
+
+ while (!(ap_cfg_getline(l, MAX_STRING_LEN, f))) {
+ const char *ll = l, *ct;
+
+ if (l[0] == '#') {
+ continue;
+ }
+ ct = ap_getword_conf(p, &ll);
+
+ while (ll[0]) {
+ char *ext = ap_getword_conf(p, &ll);
+ ap_str_tolower(ext);
+ apr_hash_set(mime_type_extensions, ext, APR_HASH_KEY_STRING, ct);
+ }
+ }
+ ap_cfg_closefile(f);
+ return OK;
+}
+
+static const char *zap_sp(const char *s)
+{
+ if (s == NULL) {
+ return (NULL);
+ }
+ if (*s == '\0') {
+ return (s);
+ }
+
+ /* skip prefixed white space */
+ for (; *s == ' ' || *s == '\t' || *s == '\n'; s++)
+ ;
+
+ return (s);
+}
+
+static char *zap_sp_and_dup(apr_pool_t *p, const char *start,
+ const char *end, apr_size_t *len)
+{
+ while ((start < end) && apr_isspace(*start)) {
+ start++;
+ }
+ while ((end > start) && apr_isspace(*(end - 1))) {
+ end--;
+ }
+ if (len) {
+ *len = end - start;
+ }
+ return apr_pstrmemdup(p, start, end - start);
+}
+
+static int is_token(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && apr_isgraph(c)
+ && (strchr(tspecial, c) == NULL)) ? 1 : -1;
+ return res;
+}
+
+static int is_qtext(char c)
+{
+ int res;
+
+ res = (apr_isascii(c) && (c != '"') && (c != '\\') && (c != '\n'))
+ ? 1 : -1;
+ return res;
+}
+
+static int is_quoted_pair(const char *s)
+{
+ int res = -1;
+ int c;
+
+ if (*s == '\\') {
+ c = (int) *(s + 1);
+ if (c && apr_isascii(c)) {
+ res = 1;
+ }
+ }
+ return (res);
+}
+
+static content_type *analyze_ct(request_rec *r, const char *s)
+{
+ const char *cp, *mp;
+ char *attribute, *value;
+ int quoted = 0;
+ server_rec * ss = r->server;
+ apr_pool_t * p = r->pool;
+
+ content_type *ctp;
+ param *pp, *npp;
+
+ /* initialize ctp */
+ ctp = (content_type *)apr_palloc(p, sizeof(content_type));
+ ctp->type = NULL;
+ ctp->subtype = NULL;
+ ctp->param = NULL;
+
+ mp = s;
+
+ /* getting a type */
+ cp = mp;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01598)
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type = cp;
+ do {
+ cp++;
+ } while (*cp && (*cp != '/') && !apr_isspace(*cp) && (*cp != ';'));
+ if (!*cp || (*cp == ';')) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01599)
+ "Cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (*cp != '/') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01600)
+ "mod_mime: analyze_ct: cannot get media type from '%s'",
+ (const char *) mp);
+ return (NULL);
+ }
+ ctp->type_len = cp - ctp->type;
+
+ cp++; /* skip the '/' */
+
+ /* getting a subtype */
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+ if (!*cp) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01601)
+ "Cannot get media subtype.");
+ return (NULL);
+ }
+ ctp->subtype = cp;
+ do {
+ cp++;
+ } while (*cp && !apr_isspace(*cp) && (*cp != ';'));
+ ctp->subtype_len = cp - ctp->subtype;
+ while (apr_isspace(*cp)) {
+ cp++;
+ }
+
+ if (*cp == '\0') {
+ return (ctp);
+ }
+
+ /* getting parameters */
+ cp++; /* skip the ';' */
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01602)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ attribute = NULL;
+ value = NULL;
+
+ while (cp != NULL && *cp != '\0') {
+ if (attribute == NULL) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ continue;
+ }
+ else if (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ continue;
+ }
+ else if (*cp == '=') {
+ attribute = zap_sp_and_dup(p, mp, cp, NULL);
+ if (attribute == NULL || *attribute == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01603)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ cp++;
+ cp = zap_sp(cp);
+ if (cp == NULL || *cp == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01604)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ mp = cp;
+ continue;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01605)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ else {
+ if (mp == cp) {
+ if (*cp == '"') {
+ quoted = 1;
+ cp++;
+ }
+ else {
+ quoted = 0;
+ }
+ }
+ if (quoted > 0) {
+ while (quoted && *cp != '\0') {
+ if (is_qtext(*cp) > 0) {
+ cp++;
+ }
+ else if (is_quoted_pair(cp) > 0) {
+ cp += 2;
+ }
+ else if (*cp == '"') {
+ cp++;
+ while (*cp == ' ' || *cp == '\t' || *cp == '\n') {
+ cp++;
+ }
+ if (*cp != ';' && *cp != '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01606)
+ "Cannot get media parameter.");
+ return(NULL);
+ }
+ quoted = 0;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01607)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ else {
+ while (1) {
+ if (is_token(*cp) > 0) {
+ cp++;
+ }
+ else if (*cp == '\0' || *cp == ';') {
+ break;
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01608)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+ }
+ }
+ value = zap_sp_and_dup(p, mp, cp, NULL);
+ if (value == NULL || *value == '\0') {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ss, APLOGNO(01609)
+ "Cannot get media parameter.");
+ return (NULL);
+ }
+
+ pp = apr_palloc(p, sizeof(param));
+ pp->attr = attribute;
+ pp->val = value;
+ pp->next = NULL;
+
+ if (ctp->param == NULL) {
+ ctp->param = pp;
+ }
+ else {
+ npp = ctp->param;
+ while (npp->next) {
+ npp = npp->next;
+ }
+ npp->next = pp;
+ }
+ quoted = 0;
+ attribute = NULL;
+ value = NULL;
+ if (*cp == '\0') {
+ break;
+ }
+ cp++;
+ mp = cp;
+ }
+ }
+ return (ctp);
+}
+
+/*
+ * find_ct is the hook routine for determining content-type and other
+ * MIME-related metadata. It assumes that r->filename has already been
+ * set and stat has been called for r->finfo. It also assumes that the
+ * non-path base file name is not the empty string unless it is a dir.
+ */
+static int find_ct(request_rec *r)
+{
+ mime_dir_config *conf;
+ apr_array_header_t *exception_list;
+ char *ext;
+ const char *fn, *fntmp, *type, *charset = NULL, *resource_name, *qm;
+ int found_metadata = 0;
+
+ if (r->finfo.filetype == APR_DIR) {
+ ap_set_content_type(r, DIR_MAGIC_TYPE);
+ return OK;
+ }
+
+ if (!r->filename) {
+ return DECLINED;
+ }
+
+ conf = (mime_dir_config *)ap_get_module_config(r->per_dir_config,
+ &mime_module);
+ exception_list = apr_array_make(r->pool, 2, sizeof(char *));
+
+ /* If use_path_info is explicitly set to on (value & 1 == 1), append. */
+ if (conf->use_path_info & 1) {
+ resource_name = apr_pstrcat(r->pool, r->filename, r->path_info, NULL);
+ }
+ /*
+ * In the reverse proxy case r->filename might contain a query string if
+ * the nocanon option was used with ProxyPass.
+ * If this is the case cut off the query string as the last parameter in
+ * this query string might end up on an extension we take care about, but
+ * we only want to match against path components not against query
+ * parameters.
+ */
+ else if ((r->proxyreq == PROXYREQ_REVERSE)
+ && (apr_table_get(r->notes, "proxy-nocanon"))
+ && ((qm = ap_strchr_c(r->filename, '?')) != NULL)) {
+ resource_name = apr_pstrmemdup(r->pool, r->filename, qm - r->filename);
+ }
+ else {
+ resource_name = r->filename;
+ }
+
+ /* Always drop the path leading up to the file name.
+ */
+ if ((fn = ap_strrchr_c(resource_name, '/')) == NULL) {
+ fn = resource_name;
+ }
+ else {
+ ++fn;
+ }
+
+
+ /* The exception list keeps track of those filename components that
+ * are not associated with extensions indicating metadata.
+ * The base name is always the first exception (i.e., "txt.html" has
+ * a basename of "txt" even though it might look like an extension).
+ * Leading dots are considered to be part of the base name (a file named
+ * ".png" is likely not a png file but just a hidden file called png).
+ */
+ fntmp = fn;
+ while (*fntmp == '.')
+ fntmp++;
+ fntmp = ap_strchr_c(fntmp, '.');
+ if (fntmp) {
+ ext = apr_pstrmemdup(r->pool, fn, fntmp - fn);
+ fn = fntmp + 1;
+ }
+ else {
+ ext = apr_pstrdup(r->pool, fn);
+ fn += strlen(fn);
+ }
+
+ *((const char **)apr_array_push(exception_list)) = ext;
+
+ /* Parse filename extensions which can be in any order
+ */
+ while (*fn && (ext = ap_getword(r->pool, &fn, '.'))) {
+ const extension_info *exinfo = NULL;
+ int found;
+ char *extcase;
+
+ if (*ext == '\0') { /* ignore empty extensions "bad..html" */
+ continue;
+ }
+
+ found = 0;
+
+ /* Save the ext in extcase before converting it to lower case.
+ */
+ extcase = apr_pstrdup(r->pool, ext);
+ ap_str_tolower(ext);
+
+ if (conf->extension_mappings != NULL) {
+ exinfo = (extension_info*)apr_hash_get(conf->extension_mappings,
+ ext, APR_HASH_KEY_STRING);
+ }
+
+ if (exinfo == NULL || !exinfo->forced_type) {
+ if ((type = apr_hash_get(mime_type_extensions, ext,
+ APR_HASH_KEY_STRING)) != NULL) {
+ ap_set_content_type(r, (char*) type);
+ found = 1;
+ }
+ }
+
+ if (exinfo != NULL) {
+
+ /* empty string is treated as special case for RemoveType */
+ if (exinfo->forced_type && *exinfo->forced_type) {
+ ap_set_content_type(r, exinfo->forced_type);
+ found = 1;
+ }
+
+ if (exinfo->charset_type) {
+ charset = exinfo->charset_type;
+ found = 1;
+ }
+ if (exinfo->language_type) {
+ if (!r->content_languages) {
+ r->content_languages = apr_array_make(r->pool, 2,
+ sizeof(char *));
+ }
+ *((const char **)apr_array_push(r->content_languages))
+ = exinfo->language_type;
+ found = 1;
+ }
+ if (exinfo->encoding_type) {
+ if (!r->content_encoding) {
+ r->content_encoding = exinfo->encoding_type;
+ }
+ else {
+ /* XXX should eliminate duplicate entities
+ *
+ * ah no. Order is important and double encoding is neither
+ * forbidden nor impossible. -- nd
+ */
+ r->content_encoding = apr_pstrcat(r->pool,
+ r->content_encoding,
+ ", ",
+ exinfo->encoding_type,
+ NULL);
+ }
+ found = 1;
+ }
+ /* The following extensions are not 'Found'. That is, they don't
+ * make any contribution to metadata negotiation, so they must have
+ * been explicitly requested by name.
+ */
+ if (exinfo->handler && r->proxyreq == PROXYREQ_NONE) {
+ r->handler = exinfo->handler;
+ if (conf->multimatch & MULTIMATCH_HANDLERS) {
+ found = 1;
+ }
+ }
+ /* XXX Two significant problems; 1, we don't check to see if we are
+ * setting redundant filters. 2, we insert these in the types
+ * config hook, which may be too early (dunno.)
+ */
+ if (exinfo->input_filters) {
+ const char *filter, *filters = exinfo->input_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_input_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ if (exinfo->output_filters) {
+ const char *filter, *filters = exinfo->output_filters;
+ while (*filters
+ && (filter = ap_getword(r->pool, &filters, ';'))) {
+ ap_add_output_filter(filter, NULL, r, r->connection);
+ }
+ if (conf->multimatch & MULTIMATCH_FILTERS) {
+ found = 1;
+ }
+ }
+ }
+
+ if (found || (conf->multimatch & MULTIMATCH_ANY)) {
+ found_metadata = 1;
+ }
+ else {
+ *((const char **) apr_array_push(exception_list)) = extcase;
+ }
+ }
+
+ /*
+ * Need to set a notes entry on r for unrecognized elements.
+ * Somebody better claim them! If we did absolutely nothing,
+ * skip the notes to alert mod_negotiation we are clueless.
+ */
+ if (found_metadata) {
+ apr_table_setn(r->notes, "ap-mime-exceptions-list",
+ (void *)exception_list);
+ }
+
+ if (r->content_type) {
+ content_type *ctp;
+ int override = 0;
+
+ if ((ctp = analyze_ct(r, r->content_type))) {
+ param *pp = ctp->param;
+ char *base_content_type = apr_palloc(r->pool, ctp->type_len +
+ ctp->subtype_len +
+ sizeof("/"));
+ char *tmp = base_content_type;
+ memcpy(tmp, ctp->type, ctp->type_len);
+ tmp += ctp->type_len;
+ *tmp++ = '/';
+ memcpy(tmp, ctp->subtype, ctp->subtype_len);
+ tmp += ctp->subtype_len;
+ *tmp = 0;
+ ap_set_content_type(r, base_content_type);
+ while (pp != NULL) {
+ if (charset && !strcmp(pp->attr, "charset")) {
+ if (!override) {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; charset=",
+ charset,
+ NULL));
+ override = 1;
+ }
+ }
+ else {
+ ap_set_content_type(r,
+ apr_pstrcat(r->pool,
+ r->content_type,
+ "; ", pp->attr,
+ "=", pp->val,
+ NULL));
+ }
+ pp = pp->next;
+ }
+ if (charset && !override) {
+ ap_set_content_type(r, apr_pstrcat(r->pool, r->content_type,
+ "; charset=", charset,
+ NULL));
+ }
+ }
+ }
+
+ /* Set default language, if none was specified by the extensions
+ * and we have a DefaultLanguage setting in force
+ */
+
+ if (!r->content_languages && conf->default_language) {
+ const char **new;
+
+ r->content_languages = apr_array_make(r->pool, 2, sizeof(char *));
+ new = (const char **)apr_array_push(r->content_languages);
+ *new = conf->default_language;
+ }
+
+ if (!r->content_type) {
+ return DECLINED;
+ }
+
+ return OK;
+}
+
+static void register_hooks(apr_pool_t *p)
+{
+ ap_hook_post_config(mime_post_config,NULL,NULL,APR_HOOK_MIDDLE);
+ ap_hook_type_checker(find_ct,NULL,NULL,APR_HOOK_MIDDLE);
+ /*
+ * this hook seems redundant ... is there any reason a type checker isn't
+ * allowed to do this already? I'd think that fixups in general would be
+ * the last opportunity to get the filters right.
+ * ap_hook_insert_filter(mime_insert_filters,NULL,NULL,APR_HOOK_MIDDLE);
+ */
+}
+
+AP_DECLARE_MODULE(mime) = {
+ STANDARD20_MODULE_STUFF,
+ create_mime_dir_config, /* create per-directory config structure */
+ merge_mime_dir_configs, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ mime_cmds, /* command apr_table_t */
+ register_hooks /* register hooks */
+};
diff --git a/modules/http/mod_mime.dep b/modules/http/mod_mime.dep
new file mode 100644
index 0000000..7a195a1
--- /dev/null
+++ b/modules/http/mod_mime.dep
@@ -0,0 +1,55 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_mime.mak
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+.\mod_mime.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+
diff --git a/modules/http/mod_mime.dsp b/modules/http/mod_mime.dsp
new file mode 100644
index 0000000..71ba2ad
--- /dev/null
+++ b/modules/http/mod_mime.dsp
@@ -0,0 +1,111 @@
+# Microsoft Developer Studio Project File - Name="mod_mime" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_mime - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:".\Release\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_mime_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_mime.so" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_mime - Win32 Release"
+# Name "mod_mime - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_mime.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http/mod_mime.exp b/modules/http/mod_mime.exp
new file mode 100644
index 0000000..f2e38db
--- /dev/null
+++ b/modules/http/mod_mime.exp
@@ -0,0 +1 @@
+mime_module
diff --git a/modules/http/mod_mime.mak b/modules/http/mod_mime.mak
new file mode 100644
index 0000000..14d106f
--- /dev/null
+++ b/modules/http/mod_mime.mak
@@ -0,0 +1,353 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_mime.dsp
+!IF "$(CFG)" == ""
+CFG=mod_mime - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_mime - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_mime - Win32 Release" && "$(CFG)" != "mod_mime - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_mime.mak" CFG="mod_mime - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_mime - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_mime - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\mod_mime.obj"
+ -@erase "$(INTDIR)\mod_mime.res"
+ -@erase "$(INTDIR)\mod_mime_src.idb"
+ -@erase "$(INTDIR)\mod_mime_src.pdb"
+ -@erase "$(OUTDIR)\mod_mime.exp"
+ -@erase "$(OUTDIR)\mod_mime.lib"
+ -@erase "$(OUTDIR)\mod_mime.pdb"
+ -@erase "$(OUTDIR)\mod_mime.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_mime_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_mime.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_mime.pdb" /debug /out:"$(OUTDIR)\mod_mime.so" /implib:"$(OUTDIR)\mod_mime.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\mod_mime.obj" \
+ "$(INTDIR)\mod_mime.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib"
+
+"$(OUTDIR)\mod_mime.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_mime.so"
+ if exist .\Release\mod_mime.so.manifest mt.exe -manifest .\Release\mod_mime.so.manifest -outputresource:.\Release\mod_mime.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_mime.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\mod_mime.obj"
+ -@erase "$(INTDIR)\mod_mime.res"
+ -@erase "$(INTDIR)\mod_mime_src.idb"
+ -@erase "$(INTDIR)\mod_mime_src.pdb"
+ -@erase "$(OUTDIR)\mod_mime.exp"
+ -@erase "$(OUTDIR)\mod_mime.lib"
+ -@erase "$(OUTDIR)\mod_mime.pdb"
+ -@erase "$(OUTDIR)\mod_mime.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_mime_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_mime.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_mime.pdb" /debug /out:"$(OUTDIR)\mod_mime.so" /implib:"$(OUTDIR)\mod_mime.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_mime.so
+LINK32_OBJS= \
+ "$(INTDIR)\mod_mime.obj" \
+ "$(INTDIR)\mod_mime.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib"
+
+"$(OUTDIR)\mod_mime.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_mime.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_mime.so"
+ if exist .\Debug\mod_mime.so.manifest mt.exe -manifest .\Debug\mod_mime.so.manifest -outputresource:.\Debug\mod_mime.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_mime.dep")
+!INCLUDE "mod_mime.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_mime.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_mime - Win32 Release" || "$(CFG)" == "mod_mime - Win32 Debug"
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http"
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http"
+
+!ENDIF
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_mime - Win32 Release"
+
+
+"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
+
+
+"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=.\mod_mime.c
+
+"$(INTDIR)\mod_mime.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
--
cgit v1.2.3