summaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:03:19 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:03:19 +0000
commitac45e40b4d3cdc2497d5b6f5efe7d8a927beca21 (patch)
tree96057c4c489cab1e8c112d689befe2b2b9fa34e7 /modules
parentReleasing progress-linux version 2.4.57-2~progress6.99u1. (diff)
downloadapache2-ac45e40b4d3cdc2497d5b6f5efe7d8a927beca21.tar.xz
apache2-ac45e40b4d3cdc2497d5b6f5efe7d8a927beca21.zip
Merging upstream version 2.4.59.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--modules/aaa/mod_auth_basic.c6
-rw-r--r--modules/aaa/mod_authnz_fcgi.c8
-rw-r--r--modules/cache/mod_socache_shmcb.c2
-rw-r--r--modules/core/mod_macro.c2
-rw-r--r--modules/dav/fs/repos.c14
-rw-r--r--modules/dav/main/mod_dav.c40
-rw-r--r--modules/dav/main/mod_dav.h1
-rw-r--r--modules/filters/mod_deflate.c259
-rw-r--r--modules/filters/mod_xml2enc.c26
-rw-r--r--modules/generators/mod_cgi.c63
-rw-r--r--modules/generators/mod_cgid.c15
-rw-r--r--modules/generators/mod_status.c48
-rw-r--r--modules/http/http_filters.c40
-rw-r--r--modules/http2/config2.m41
-rw-r--r--modules/http2/h2.h19
-rw-r--r--modules/http2/h2_bucket_beam.c60
-rw-r--r--modules/http2/h2_bucket_beam.h19
-rw-r--r--modules/http2/h2_c1_io.c28
-rw-r--r--modules/http2/h2_c1_io.h3
-rw-r--r--modules/http2/h2_c2.c192
-rw-r--r--modules/http2/h2_c2_filter.c50
-rw-r--r--modules/http2/h2_config.c140
-rw-r--r--modules/http2/h2_config.h4
-rw-r--r--modules/http2/h2_conn_ctx.h4
-rw-r--r--modules/http2/h2_headers.c14
-rw-r--r--modules/http2/h2_mplx.c113
-rw-r--r--modules/http2/h2_mplx.h18
-rw-r--r--modules/http2/h2_proxy_session.c98
-rw-r--r--modules/http2/h2_proxy_session.h5
-rw-r--r--modules/http2/h2_push.c3
-rw-r--r--modules/http2/h2_request.c148
-rw-r--r--modules/http2/h2_session.c75
-rw-r--r--modules/http2/h2_session.h3
-rw-r--r--modules/http2/h2_stream.c136
-rw-r--r--modules/http2/h2_stream.h18
-rw-r--r--modules/http2/h2_switch.c5
-rw-r--r--modules/http2/h2_util.c19
-rw-r--r--modules/http2/h2_version.h4
-rw-r--r--modules/http2/h2_ws.c362
-rw-r--r--modules/http2/h2_ws.h35
-rw-r--r--modules/http2/mod_http2.c2
-rw-r--r--modules/http2/mod_http2.dsp4
-rw-r--r--modules/http2/mod_http2.h23
-rw-r--r--modules/http2/mod_proxy_http2.c110
-rw-r--r--modules/ldap/util_ldap.c51
-rw-r--r--modules/ldap/util_ldap_cache.c14
-rw-r--r--modules/mappers/config9.m45
-rw-r--r--modules/mappers/mod_alias.c131
-rw-r--r--modules/mappers/mod_rewrite.c14
-rw-r--r--modules/mappers/mod_rewrite.mak4
-rw-r--r--modules/md/md.h18
-rw-r--r--modules/md/md_acme_authz.c47
-rw-r--r--modules/md/md_acme_order.c4
-rw-r--r--modules/md/md_crypt.c47
-rw-r--r--modules/md/md_util.c29
-rw-r--r--modules/md/md_util.h7
-rw-r--r--modules/md/md_version.h4
-rw-r--r--modules/md/mod_md.c56
-rw-r--r--modules/md/mod_md_config.c46
-rw-r--r--modules/md/mod_md_config.h6
-rw-r--r--modules/md/mod_md_status.c2
-rw-r--r--modules/proxy/ajp_header.c10
-rw-r--r--modules/proxy/balancers/mod_lbmethod_heartbeat.c2
-rw-r--r--modules/proxy/mod_proxy.c52
-rw-r--r--modules/proxy/mod_proxy.h33
-rw-r--r--modules/proxy/mod_proxy_ajp.c35
-rw-r--r--modules/proxy/mod_proxy_fcgi.c9
-rw-r--r--modules/proxy/mod_proxy_ftp.c99
-rw-r--r--modules/proxy/mod_proxy_hcheck.c136
-rw-r--r--modules/proxy/mod_proxy_http.c3
-rw-r--r--modules/proxy/mod_proxy_scgi.c8
-rw-r--r--modules/proxy/mod_proxy_uwsgi.c6
-rw-r--r--modules/proxy/proxy_util.c1098
-rw-r--r--modules/slotmem/mod_slotmem_shm.c4
-rw-r--r--modules/ssl/mod_ssl.c5
-rw-r--r--modules/ssl/mod_ssl_openssl.h9
-rw-r--r--modules/ssl/ssl_engine_config.c15
-rw-r--r--modules/ssl/ssl_engine_init.c281
-rw-r--r--modules/ssl/ssl_engine_io.c51
-rw-r--r--modules/ssl/ssl_engine_kernel.c10
-rw-r--r--modules/ssl/ssl_engine_pphrase.c7
-rw-r--r--modules/ssl/ssl_private.h67
-rw-r--r--modules/ssl/ssl_util.c2
-rw-r--r--modules/ssl/ssl_util_ocsp.c5
-rw-r--r--modules/ssl/ssl_util_ssl.c35
-rw-r--r--modules/ssl/ssl_util_stapling.c14
-rw-r--r--modules/tls/config2.m43
-rw-r--r--modules/tls/tls_core.c4
88 files changed, 3468 insertions, 1259 deletions
diff --git a/modules/aaa/mod_auth_basic.c b/modules/aaa/mod_auth_basic.c
index 4e1d47f..c8c9492 100644
--- a/modules/aaa/mod_auth_basic.c
+++ b/modules/aaa/mod_auth_basic.c
@@ -40,9 +40,9 @@ typedef struct {
ap_expr_info_t *fakeuser;
ap_expr_info_t *fakepass;
const char *use_digest_algorithm;
- int fake_set:1;
- int use_digest_algorithm_set:1;
- int authoritative_set:1;
+ unsigned int fake_set:1,
+ use_digest_algorithm_set:1,
+ authoritative_set:1;
} auth_basic_config_rec;
static void *create_auth_basic_dir_config(apr_pool_t *p, char *d)
diff --git a/modules/aaa/mod_authnz_fcgi.c b/modules/aaa/mod_authnz_fcgi.c
index 1aadcc2..69743f1 100644
--- a/modules/aaa/mod_authnz_fcgi.c
+++ b/modules/aaa/mod_authnz_fcgi.c
@@ -571,6 +571,14 @@ static apr_status_t handle_response(const fcgi_provider_conf *conf,
"parsing -> %d/%d",
fn, status, r->status);
+ /* FCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
if (rspbuf) { /* caller wants to see response body,
* if any
*/
diff --git a/modules/cache/mod_socache_shmcb.c b/modules/cache/mod_socache_shmcb.c
index 4727961..1785839 100644
--- a/modules/cache/mod_socache_shmcb.c
+++ b/modules/cache/mod_socache_shmcb.c
@@ -793,7 +793,6 @@ static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
*/
if (header->subcache_data_size - subcache->data_used < total_len
|| subcache->idx_used == header->index_num) {
- unsigned int loop = 0;
idx = SHMCB_INDEX(subcache, subcache->idx_pos);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00845)
@@ -820,7 +819,6 @@ static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
header->stat_scrolled++;
/* Loop admin */
idx = idx2;
- loop++;
} while (header->subcache_data_size - subcache->data_used < total_len);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00846)
diff --git a/modules/core/mod_macro.c b/modules/core/mod_macro.c
index 04af43b..cc42d0b 100644
--- a/modules/core/mod_macro.c
+++ b/modules/core/mod_macro.c
@@ -465,7 +465,7 @@ static const char *process_content(apr_pool_t * pool,
for (i = 0; i < contents->nelts; i++) {
const char *errmsg;
/* copy the line and substitute macro parameters */
- strncpy(line, ((char **) contents->elts)[i], MAX_STRING_LEN - 1);
+ apr_cpystrn(line, ((char **) contents->elts)[i], MAX_STRING_LEN);
errmsg = substitute_macro_args(line, MAX_STRING_LEN,
macro, replacements, used);
if (errmsg) {
diff --git a/modules/dav/fs/repos.c b/modules/dav/fs/repos.c
index d38868c..64bc894 100644
--- a/modules/dav/fs/repos.c
+++ b/modules/dav/fs/repos.c
@@ -35,6 +35,7 @@
#include "mod_dav.h"
#include "repos.h"
+APLOG_USE_MODULE(dav_fs);
/* to assist in debugging mod_dav's GET handling */
#define DEBUG_GET_HANDLER 0
@@ -1586,6 +1587,19 @@ static dav_error * dav_fs_walker(dav_fs_walker_context *fsctx, int depth)
status = apr_stat(&fsctx->info1.finfo, fsctx->path1.buf,
DAV_FINFO_MASK, pool);
if (status != APR_SUCCESS && status != APR_INCOMPLETE) {
+ dav_resource_private *ctx = params->root->info;
+
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, ctx->r,
+ APLOGNO(10472) "could not access file (%s) during directory walk",
+ fsctx->path1.buf);
+
+ /* If being tolerant, ignore failure due to losing a race
+ * with some other process deleting files out from under
+ * the directory walk. */
+ if ((params->walk_type & DAV_WALKTYPE_TOLERANT)
+ && APR_STATUS_IS_ENOENT(status)) {
+ continue;
+ }
/* woah! where'd it go? */
/* ### should have a better error here */
err = dav_new_error(pool, HTTP_NOT_FOUND, 0, status, NULL);
diff --git a/modules/dav/main/mod_dav.c b/modules/dav/main/mod_dav.c
index d16ab4a..dea3f18 100644
--- a/modules/dav/main/mod_dav.c
+++ b/modules/dav/main/mod_dav.c
@@ -81,6 +81,7 @@ typedef struct {
const char *provider_name;
const dav_provider *provider;
const char *dir;
+ const char *base;
int locktimeout;
int allow_depthinfinity;
int allow_lockdiscovery;
@@ -196,6 +197,7 @@ static void *dav_merge_dir_config(apr_pool_t *p, void *base, void *overrides)
newconf->locktimeout = DAV_INHERIT_VALUE(parent, child, locktimeout);
newconf->dir = DAV_INHERIT_VALUE(parent, child, dir);
+ newconf->base = DAV_INHERIT_VALUE(parent, child, base);
newconf->allow_depthinfinity = DAV_INHERIT_VALUE(parent, child,
allow_depthinfinity);
newconf->allow_lockdiscovery = DAV_INHERIT_VALUE(parent, child,
@@ -283,6 +285,18 @@ static const char *dav_cmd_dav(cmd_parms *cmd, void *config, const char *arg1)
}
/*
+ * Command handler for the DAVBasePath directive, which is TAKE1
+ */
+static const char *dav_cmd_davbasepath(cmd_parms *cmd, void *config, const char *arg1)
+{
+ dav_dir_conf *conf = config;
+
+ conf->base = arg1;
+
+ return NULL;
+}
+
+/*
* Command handler for the DAVDepthInfinity directive, which is FLAG.
*/
static const char *dav_cmd_davdepthinfinity(cmd_parms *cmd, void *config,
@@ -748,7 +762,7 @@ DAV_DECLARE(dav_error *) dav_get_resource(request_rec *r, int label_allowed,
int use_checked_in, dav_resource **res_p)
{
dav_dir_conf *conf;
- const char *label = NULL;
+ const char *label = NULL, *base;
dav_error *err;
/* if the request target can be overridden, get any target selector */
@@ -765,11 +779,27 @@ DAV_DECLARE(dav_error *) dav_get_resource(request_rec *r, int label_allowed,
ap_escape_html(r->pool, r->uri)));
}
+ /* Take the repos root from DAVBasePath if configured, else the
+ * path of the enclosing section. */
+ base = conf->base ? conf->base : conf->dir;
+
/* resolve the resource */
- err = (*conf->provider->repos->get_resource)(r, conf->dir,
+ err = (*conf->provider->repos->get_resource)(r, base,
label, use_checked_in,
res_p);
if (err != NULL) {
+ /* In the error path, give a hint that DavBasePath needs to be
+ * used if the location was configured via a regex match. */
+ if (!conf->base) {
+ core_dir_config *cdc = ap_get_core_module_config(r->per_dir_config);
+
+ if (cdc->r) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, APLOGNO(10484)
+ "failed to find repository for location configured "
+ "via regex match - missing DAVBasePath?");
+ }
+ }
+
err = dav_push_error(r->pool, err->status, 0,
"Could not fetch resource information.", err);
return err;
@@ -2187,7 +2217,7 @@ static int dav_method_propfind(request_rec *r)
return HTTP_BAD_REQUEST;
}
- ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_AUTH;
+ ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_AUTH | DAV_WALKTYPE_TOLERANT;
ctx.w.func = dav_propfind_walker;
ctx.w.walk_ctx = &ctx;
ctx.w.pool = r->pool;
@@ -5164,6 +5194,10 @@ static const command_rec dav_cmds[] =
AP_INIT_TAKE1("DAV", dav_cmd_dav, NULL, ACCESS_CONF,
"specify the DAV provider for a directory or location"),
+ /* per directory/location */
+ AP_INIT_TAKE1("DAVBasePath", dav_cmd_davbasepath, NULL, ACCESS_CONF,
+ "specify the DAV repository base URL"),
+
/* per directory/location, or per server */
AP_INIT_TAKE1("DAVMinTimeout", dav_cmd_davmintimeout, NULL,
ACCESS_CONF|RSRC_CONF,
diff --git a/modules/dav/main/mod_dav.h b/modules/dav/main/mod_dav.h
index eca34a2..c8c54f3 100644
--- a/modules/dav/main/mod_dav.h
+++ b/modules/dav/main/mod_dav.h
@@ -1823,6 +1823,7 @@ typedef struct
#define DAV_WALKTYPE_AUTH 0x0001 /* limit to authorized files */
#define DAV_WALKTYPE_NORMAL 0x0002 /* walk normal files */
#define DAV_WALKTYPE_LOCKNULL 0x0004 /* walk locknull resources */
+#define DAV_WALKTYPE_TOLERANT 0x0008 /* tolerate non-fatal errors */
/* callback function and a client context for the walk */
dav_error * (*func)(dav_walk_resource *wres, int calltype);
diff --git a/modules/filters/mod_deflate.c b/modules/filters/mod_deflate.c
index 2431fd7..5a541e7 100644
--- a/modules/filters/mod_deflate.c
+++ b/modules/filters/mod_deflate.c
@@ -57,15 +57,20 @@ module AP_MODULE_DECLARE_DATA deflate_module;
#define AP_INFLATE_RATIO_LIMIT 200
#define AP_INFLATE_RATIO_BURST 3
+#define AP_DEFLATE_ETAG_ADDSUFFIX 0
+#define AP_DEFLATE_ETAG_NOCHANGE 1
+#define AP_DEFLATE_ETAG_REMOVE 2
+
typedef struct deflate_filter_config_t
{
int windowSize;
int memlevel;
int compressionlevel;
- apr_size_t bufferSize;
+ int bufferSize;
const char *note_ratio_name;
const char *note_input_name;
const char *note_output_name;
+ int etag_opt;
} deflate_filter_config;
typedef struct deflate_dirconf_t {
@@ -249,7 +254,7 @@ static const char *deflate_set_buffer_size(cmd_parms *cmd, void *dummy,
return "DeflateBufferSize should be positive";
}
- c->bufferSize = (apr_size_t)n;
+ c->bufferSize = n;
return NULL;
}
@@ -295,6 +300,29 @@ static const char *deflate_set_memlevel(cmd_parms *cmd, void *dummy,
return NULL;
}
+static const char *deflate_set_etag(cmd_parms *cmd, void *dummy,
+ const char *arg)
+{
+ deflate_filter_config *c = ap_get_module_config(cmd->server->module_config,
+ &deflate_module);
+
+ if (!strcasecmp(arg, "NoChange")) {
+ c->etag_opt = AP_DEFLATE_ETAG_NOCHANGE;
+ }
+ else if (!strcasecmp(arg, "AddSuffix")) {
+ c->etag_opt = AP_DEFLATE_ETAG_ADDSUFFIX;
+ }
+ else if (!strcasecmp(arg, "Remove")) {
+ c->etag_opt = AP_DEFLATE_ETAG_REMOVE;
+ }
+ else {
+ return "DeflateAlterETAG accepts only 'NoChange', 'AddSuffix', and 'Remove'";
+ }
+
+ return NULL;
+}
+
+
static const char *deflate_set_compressionlevel(cmd_parms *cmd, void *dummy,
const char *arg)
{
@@ -388,35 +416,40 @@ typedef struct deflate_ctx_t
/* Do update ctx->crc, see comment in flush_libz_buffer */
#define UPDATE_CRC 1
+static void consume_buffer(deflate_ctx *ctx, deflate_filter_config *c,
+ int len, int crc, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+
+ /*
+ * Do we need to update ctx->crc? Usually this is the case for
+ * inflate action where we need to do a crc on the output, whereas
+ * in the deflate case we need to do a crc on the input
+ */
+ if (crc) {
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
+ }
+
+ b = apr_bucket_heap_create((char *)ctx->buffer, len, NULL,
+ bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ ctx->stream.next_out = ctx->buffer;
+ ctx->stream.avail_out = c->bufferSize;
+}
+
static int flush_libz_buffer(deflate_ctx *ctx, deflate_filter_config *c,
- struct apr_bucket_alloc_t *bucket_alloc,
int (*libz_func)(z_streamp, int), int flush,
int crc)
{
int zRC = Z_OK;
int done = 0;
- unsigned int deflate_len;
- apr_bucket *b;
+ int deflate_len;
for (;;) {
deflate_len = c->bufferSize - ctx->stream.avail_out;
-
- if (deflate_len != 0) {
- /*
- * Do we need to update ctx->crc? Usually this is the case for
- * inflate action where we need to do a crc on the output, whereas
- * in the deflate case we need to do a crc on the input
- */
- if (crc) {
- ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
- deflate_len);
- }
- b = apr_bucket_heap_create((char *)ctx->buffer,
- deflate_len, NULL,
- bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- ctx->stream.next_out = ctx->buffer;
- ctx->stream.avail_out = c->bufferSize;
+ if (deflate_len > 0) {
+ consume_buffer(ctx, c, deflate_len, crc, ctx->bb);
}
if (done)
@@ -464,11 +497,16 @@ static apr_status_t deflate_ctx_cleanup(void *data)
* value inside the double-quotes if an ETag has already been set
* and its value already contains double-quotes. PR 39727
*/
-static void deflate_check_etag(request_rec *r, const char *transform)
+static void deflate_check_etag(request_rec *r, const char *transform, int etag_opt)
{
const char *etag = apr_table_get(r->headers_out, "ETag");
apr_size_t etaglen;
+ if (etag_opt == AP_DEFLATE_ETAG_REMOVE) {
+ apr_table_unset(r->headers_out, "ETag");
+ return;
+ }
+
if ((etag && ((etaglen = strlen(etag)) > 2))) {
if (etag[etaglen - 1] == '"') {
apr_size_t transformlen = strlen(transform);
@@ -527,6 +565,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
request_rec *r = f->r;
deflate_ctx *ctx = f->ctx;
int zRC;
+ apr_status_t rv;
apr_size_t len = 0, blen;
const char *data;
deflate_filter_config *c;
@@ -809,7 +848,9 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
}
apr_table_unset(r->headers_out, "Content-Length");
apr_table_unset(r->headers_out, "Content-MD5");
- deflate_check_etag(r, "gzip");
+ if (c->etag_opt != AP_DEFLATE_ETAG_NOCHANGE) {
+ deflate_check_etag(r, "gzip", c->etag_opt);
+ }
/* For a 304 response, only change the headers */
if (r->status == HTTP_NOT_MODIFIED) {
@@ -856,8 +897,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
ctx->stream.avail_in = 0; /* should be zero already anyway */
/* flush the remaining data from the zlib buffers */
- flush_libz_buffer(ctx, c, f->c->bucket_alloc, deflate, Z_FINISH,
- NO_UPDATE_CRC);
+ flush_libz_buffer(ctx, c, deflate, Z_FINISH, NO_UPDATE_CRC);
buf = apr_palloc(r->pool, VALIDATION_SIZE);
putLong((unsigned char *)&buf[0], ctx->crc);
@@ -900,6 +940,10 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
}
deflateEnd(&ctx->stream);
+
+ /* We've ended the libz stream, so remove ourselves. */
+ ap_remove_output_filter(f);
+
/* No need for cleanup any longer */
apr_pool_cleanup_kill(r->pool, ctx, deflate_ctx_cleanup);
@@ -910,15 +954,15 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
/* Okay, we've seen the EOS.
* Time to pass it along down the chain.
*/
- return ap_pass_brigade(f->next, ctx->bb);
+ rv = ap_pass_brigade(f->next, ctx->bb);
+ apr_brigade_cleanup(ctx->bb);
+ return rv;
}
if (APR_BUCKET_IS_FLUSH(e)) {
- apr_status_t rv;
-
/* flush the remaining data from the zlib buffers */
- zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, deflate,
- Z_SYNC_FLUSH, NO_UPDATE_CRC);
+ zRC = flush_libz_buffer(ctx, c, deflate, Z_SYNC_FLUSH,
+ NO_UPDATE_CRC);
if (zRC != Z_OK) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01385)
"Zlib error %d flushing zlib output buffer (%s)",
@@ -930,6 +974,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
rv = ap_pass_brigade(f->next, ctx->bb);
+ apr_brigade_cleanup(ctx->bb);
if (rv != APR_SUCCESS) {
return rv;
}
@@ -947,7 +992,12 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
}
/* read */
- apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10298)
+ "failed reading from %s bucket", e->type->name);
+ return rv;
+ }
if (!len) {
apr_bucket_delete(e);
continue;
@@ -964,21 +1014,15 @@ static apr_status_t deflate_out_filter(ap_filter_t *f,
ctx->stream.next_in = (unsigned char *)data; /* We just lost const-ness,
* but we'll just have to
* trust zlib */
- ctx->stream.avail_in = len;
+ ctx->stream.avail_in = (int)len;
while (ctx->stream.avail_in != 0) {
if (ctx->stream.avail_out == 0) {
- apr_status_t rv;
+ consume_buffer(ctx, c, c->bufferSize, NO_UPDATE_CRC, ctx->bb);
- ctx->stream.next_out = ctx->buffer;
- len = c->bufferSize - ctx->stream.avail_out;
-
- b = apr_bucket_heap_create((char *)ctx->buffer, len,
- NULL, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- ctx->stream.avail_out = c->bufferSize;
/* Send what we have right now to the next filter. */
rv = ap_pass_brigade(f->next, ctx->bb);
+ apr_brigade_cleanup(ctx->bb);
if (rv != APR_SUCCESS) {
return rv;
}
@@ -1275,44 +1319,40 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
if (APR_BUCKET_IS_FLUSH(bkt)) {
apr_bucket *tmp_b;
- ctx->inflate_total += ctx->stream.avail_out;
- zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH);
- ctx->inflate_total -= ctx->stream.avail_out;
- if (zRC != Z_OK) {
- inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01391)
- "Zlib error %d inflating data (%s)", zRC,
- ctx->stream.msg);
- return APR_EGENERAL;
- }
+ if (!ctx->done) {
+ ctx->inflate_total += ctx->stream.avail_out;
+ zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH);
+ ctx->inflate_total -= ctx->stream.avail_out;
+ if (zRC != Z_OK) {
+ inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01391)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
+ return APR_EGENERAL;
+ }
- if (inflate_limit && ctx->inflate_total > inflate_limit) {
- inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02647)
- "Inflated content length of %" APR_OFF_T_FMT
- " is larger than the configured limit"
- " of %" APR_OFF_T_FMT,
- ctx->inflate_total, inflate_limit);
- return APR_ENOSPC;
- }
-
- if (!check_ratio(r, ctx, dc)) {
- inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02805)
- "Inflated content ratio is larger than the "
- "configured limit %i by %i time(s)",
- dc->ratio_limit, dc->ratio_burst);
- return APR_EINVAL;
- }
+ if (inflate_limit && ctx->inflate_total > inflate_limit) {
+ inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02647)
+ "Inflated content length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT,
+ ctx->inflate_total, inflate_limit);
+ return APR_ENOSPC;
+ }
- len = c->bufferSize - ctx->stream.avail_out;
- ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
- tmp_b = apr_bucket_heap_create((char *)ctx->buffer, len,
- NULL, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_b);
+ if (!check_ratio(r, ctx, dc)) {
+ inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02805)
+ "Inflated content ratio is larger than the "
+ "configured limit %i by %i time(s)",
+ dc->ratio_limit, dc->ratio_burst);
+ return APR_EINVAL;
+ }
- ctx->stream.next_out = ctx->buffer;
- ctx->stream.avail_out = c->bufferSize;
+ consume_buffer(ctx, c, c->bufferSize - ctx->stream.avail_out,
+ UPDATE_CRC, ctx->proc_bb);
+ }
/* Flush everything so far in the returning brigade, but continue
* reading should EOS/more follow (don't lose them).
@@ -1358,16 +1398,8 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
if (!ctx->validation_buffer) {
while (ctx->stream.avail_in != 0) {
if (ctx->stream.avail_out == 0) {
- apr_bucket *tmp_heap;
-
- ctx->stream.next_out = ctx->buffer;
- len = c->bufferSize - ctx->stream.avail_out;
-
- ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
- tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
- NULL, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
- ctx->stream.avail_out = c->bufferSize;
+ consume_buffer(ctx, c, c->bufferSize, UPDATE_CRC,
+ ctx->proc_bb);
}
ctx->inflate_total += ctx->stream.avail_out;
@@ -1410,7 +1442,6 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
}
if (ctx->validation_buffer) {
- apr_bucket *tmp_heap;
apr_size_t avail, valid;
unsigned char *buf = ctx->validation_buffer;
@@ -1439,13 +1470,8 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
(apr_uint64_t)ctx->stream.total_in,
(apr_uint64_t)ctx->stream.total_out, r->uri);
- len = c->bufferSize - ctx->stream.avail_out;
-
- ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
- tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
- NULL, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
- ctx->stream.avail_out = c->bufferSize;
+ consume_buffer(ctx, c, c->bufferSize - ctx->stream.avail_out,
+ UPDATE_CRC, ctx->proc_bb);
{
unsigned long compCRC, compLen;
@@ -1491,16 +1517,8 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
if (block == APR_BLOCK_READ &&
APR_BRIGADE_EMPTY(ctx->proc_bb) &&
ctx->stream.avail_out < c->bufferSize) {
- apr_bucket *tmp_heap;
- apr_size_t len;
- ctx->stream.next_out = ctx->buffer;
- len = c->bufferSize - ctx->stream.avail_out;
-
- ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
- tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len,
- NULL, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap);
- ctx->stream.avail_out = c->bufferSize;
+ consume_buffer(ctx, c, c->bufferSize - ctx->stream.avail_out,
+ UPDATE_CRC, ctx->proc_bb);
}
if (!APR_BRIGADE_EMPTY(ctx->proc_bb)) {
@@ -1566,7 +1584,9 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
*/
apr_table_unset(r->headers_out, "Content-Length");
apr_table_unset(r->headers_out, "Content-MD5");
- deflate_check_etag(r, "gunzip");
+ if (c->etag_opt != AP_DEFLATE_ETAG_NOCHANGE) {
+ deflate_check_etag(r, "gunzip", c->etag_opt);
+ }
/* For a 304 response, only change the headers */
if (r->status == HTTP_NOT_MODIFIED) {
@@ -1614,7 +1634,6 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
while (!APR_BRIGADE_EMPTY(bb))
{
const char *data;
- apr_bucket *b;
apr_size_t len;
e = APR_BRIGADE_FIRST(bb);
@@ -1636,8 +1655,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
* fails, whereas in the deflate case you can empty a filled output
* buffer and call it again until no more output can be created.
*/
- flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate, Z_SYNC_FLUSH,
- UPDATE_CRC);
+ flush_libz_buffer(ctx, c, inflate, Z_SYNC_FLUSH, UPDATE_CRC);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01398)
"Zlib: Inflated %" APR_UINT64_T_FMT
" to %" APR_UINT64_T_FMT " : URL %s",
@@ -1679,15 +1697,14 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
* Okay, we've seen the EOS.
* Time to pass it along down the chain.
*/
- return ap_pass_brigade(f->next, ctx->bb);
+ rv = ap_pass_brigade(f->next, ctx->bb);
+ apr_brigade_cleanup(ctx->bb);
+ return rv;
}
if (APR_BUCKET_IS_FLUSH(e)) {
- apr_status_t rv;
-
/* flush the remaining data from the zlib buffers */
- zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate,
- Z_SYNC_FLUSH, UPDATE_CRC);
+ zRC = flush_libz_buffer(ctx, c, inflate, Z_SYNC_FLUSH, UPDATE_CRC);
if (zRC == Z_STREAM_END) {
if (ctx->validation_buffer == NULL) {
ctx->validation_buffer = apr_pcalloc(f->r->pool,
@@ -1705,6 +1722,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
rv = ap_pass_brigade(f->next, ctx->bb);
+ apr_brigade_cleanup(ctx->bb);
if (rv != APR_SUCCESS) {
return rv;
}
@@ -1821,16 +1839,11 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
while (ctx->stream.avail_in != 0) {
if (ctx->stream.avail_out == 0) {
- ctx->stream.next_out = ctx->buffer;
- len = c->bufferSize - ctx->stream.avail_out;
-
- ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len);
- b = apr_bucket_heap_create((char *)ctx->buffer, len,
- NULL, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- ctx->stream.avail_out = c->bufferSize;
+ consume_buffer(ctx, c, c->bufferSize, UPDATE_CRC, ctx->bb);
+
/* Send what we have right now to the next filter. */
rv = ap_pass_brigade(f->next, ctx->bb);
+ apr_brigade_cleanup(ctx->bb);
if (rv != APR_SUCCESS) {
return rv;
}
@@ -1845,6 +1858,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
return APR_EGENERAL;
}
+ /* Don't check length limits on inflate_out */
if (!check_ratio(r, ctx, dc)) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02650)
"Inflated content ratio is larger than the "
@@ -1922,6 +1936,9 @@ static const command_rec deflate_filter_cmds[] = {
AP_INIT_TAKE1("DeflateInflateRatioBurst", deflate_set_inflate_ratio_burst, NULL, OR_ALL,
"Set the maximum number of following inflate ratios above limit "
"(default: " APR_STRINGIFY(AP_INFLATE_RATIO_BURST) ")"),
+ AP_INIT_TAKE1("DeflateAlterEtag", deflate_set_etag, NULL, RSRC_CONF,
+ "Set how mod_deflate should modify ETAG response headers: 'AddSuffix' (default), 'NoChange' (2.2.x behavior), 'Remove'"),
+
{NULL}
};
diff --git a/modules/filters/mod_xml2enc.c b/modules/filters/mod_xml2enc.c
index 76046b4..eb05c18 100644
--- a/modules/filters/mod_xml2enc.c
+++ b/modules/filters/mod_xml2enc.c
@@ -206,11 +206,11 @@ static void sniff_encoding(request_rec* r, xml2ctx* ctx)
}
}
}
-
+
/* to sniff, first we look for BOM */
if (ctx->xml2enc == XML_CHAR_ENCODING_NONE) {
- ctx->xml2enc = xmlDetectCharEncoding((const xmlChar*)ctx->buf,
- ctx->bytes);
+ ctx->xml2enc = xmlDetectCharEncoding((const unsigned char*)ctx->buf,
+ ctx->bytes);
if (HAVE_ENCODING(ctx->xml2enc)) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01432)
"Got charset from XML rules.") ;
@@ -323,7 +323,7 @@ static apr_status_t xml2enc_ffunc(ap_filter_t* f, apr_bucket_brigade* bb)
apr_bucket* bstart;
apr_size_t insz = 0;
int pending_meta = 0;
- char *ctype;
+ char *mtype;
char *p;
if (!ctx || !f->r->content_type) {
@@ -332,13 +332,17 @@ static apr_status_t xml2enc_ffunc(ap_filter_t* f, apr_bucket_brigade* bb)
return ap_pass_brigade(f->next, bb) ;
}
- ctype = apr_pstrdup(f->r->pool, f->r->content_type);
- for (p = ctype; *p; ++p)
- if (isupper(*p))
- *p = tolower(*p);
-
- /* only act if starts-with "text/" or contains "xml" */
- if (strncmp(ctype, "text/", 5) && !strstr(ctype, "xml")) {
+ /* Extract the media type, ignoring parameters in content-type. */
+ mtype = apr_pstrdup(f->r->pool, f->r->content_type);
+ if ((p = ap_strchr(mtype, ';')) != NULL) *p = '\0';
+ ap_str_tolower(mtype);
+
+ /* Accept text/ types, plus any XML media type per RFC 7303. */
+ if (!(strncmp(mtype, "text/", 5) == 0
+ || strcmp(mtype, "application/xml") == 0
+ || (strlen(mtype) > 7 /* minimum 'a/b+xml' length */
+ && (p = strstr(mtype, "+xml")) != NULL
+ && strlen(p) == 4 /* ensures +xml is a suffix */))) {
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb) ;
}
diff --git a/modules/generators/mod_cgi.c b/modules/generators/mod_cgi.c
index 7e4b126..1f77786 100644
--- a/modules/generators/mod_cgi.c
+++ b/modules/generators/mod_cgi.c
@@ -92,6 +92,10 @@ typedef struct {
apr_size_t bufbytes;
} cgi_server_conf;
+typedef struct {
+ apr_interval_time_t timeout;
+} cgi_dirconf;
+
static void *create_cgi_config(apr_pool_t *p, server_rec *s)
{
cgi_server_conf *c =
@@ -112,6 +116,12 @@ static void *merge_cgi_config(apr_pool_t *p, void *basev, void *overridesv)
return overrides->logname ? overrides : base;
}
+static void *create_cgi_dirconf(apr_pool_t *p, char *dummy)
+{
+ cgi_dirconf *c = (cgi_dirconf *) apr_pcalloc(p, sizeof(cgi_dirconf));
+ return c;
+}
+
static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg)
{
server_rec *s = cmd->server;
@@ -150,6 +160,17 @@ static const char *set_scriptlog_buffer(cmd_parms *cmd, void *dummy,
return NULL;
}
+static const char *set_script_timeout(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ cgi_dirconf *dc = dummy;
+
+ if (ap_timeout_parameter_parse(arg, &dc->timeout, "s") != APR_SUCCESS) {
+ return "CGIScriptTimeout has wrong format";
+ }
+
+ return NULL;
+}
+
static const command_rec cgi_cmds[] =
{
AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF,
@@ -158,6 +179,9 @@ AP_INIT_TAKE1("ScriptLogLength", set_scriptlog_length, NULL, RSRC_CONF,
"the maximum length (in bytes) of the script debug log"),
AP_INIT_TAKE1("ScriptLogBuffer", set_scriptlog_buffer, NULL, RSRC_CONF,
"the maximum size (in bytes) to record of a POST request"),
+AP_INIT_TAKE1("CGIScriptTimeout", set_script_timeout, NULL, RSRC_CONF | ACCESS_CONF,
+ "The amount of time to wait between successful reads from "
+ "the CGI script, in seconds."),
{NULL}
};
@@ -466,23 +490,26 @@ static apr_status_t run_cgi_child(apr_file_t **script_out,
apr_filepath_name_get(r->filename));
}
else {
+ cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module);
+ apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout;
+
apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT);
*script_in = procnew->out;
if (!*script_in)
return APR_EBADF;
- apr_file_pipe_timeout_set(*script_in, r->server->timeout);
+ apr_file_pipe_timeout_set(*script_in, timeout);
if (e_info->prog_type == RUN_AS_CGI) {
*script_out = procnew->in;
if (!*script_out)
return APR_EBADF;
- apr_file_pipe_timeout_set(*script_out, r->server->timeout);
+ apr_file_pipe_timeout_set(*script_out, timeout);
*script_err = procnew->err;
if (!*script_err)
return APR_EBADF;
- apr_file_pipe_timeout_set(*script_err, r->server->timeout);
+ apr_file_pipe_timeout_set(*script_err, timeout);
}
}
}
@@ -675,11 +702,14 @@ static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str,
apr_size_t *len, apr_read_type_e block)
{
struct cgi_bucket_data *data = b->data;
- apr_interval_time_t timeout;
+ apr_interval_time_t timeout = 0;
apr_status_t rv;
int gotdata = 0;
+ cgi_dirconf *dc = ap_get_module_config(data->r->per_dir_config, &cgi_module);
- timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout;
+ if (block != APR_NONBLOCK_READ) {
+ timeout = dc->timeout > 0 ? dc->timeout : data->r->server->timeout;
+ }
do {
const apr_pollfd_t *results;
@@ -757,6 +787,8 @@ static int cgi_handler(request_rec *r)
apr_status_t rv;
cgi_exec_info_t e_info;
conn_rec *c;
+ cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module);
+ apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout;
if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) {
return DECLINED;
@@ -935,9 +967,18 @@ static int cgi_handler(request_rec *r)
char sbuf[MAX_STRING_LEN];
int ret;
- if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
- APLOG_MODULE_INDEX)))
- {
+ ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
+ APLOG_MODULE_INDEX);
+
+ /* xCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
+ if (ret != OK) {
ret = log_script(r, conf, ret, dbuf, sbuf, bb, script_err);
/*
@@ -976,7 +1017,7 @@ static int cgi_handler(request_rec *r)
* stderr output, as normal. */
discard_script_output(bb);
apr_brigade_destroy(bb);
- apr_file_pipe_timeout_set(script_err, r->server->timeout);
+ apr_file_pipe_timeout_set(script_err, timeout);
log_script_err(r, script_err);
}
@@ -1027,7 +1068,7 @@ static int cgi_handler(request_rec *r)
* connection drops or we stopped sending output for some other
* reason */
if (rv == APR_SUCCESS && !r->connection->aborted) {
- apr_file_pipe_timeout_set(script_err, r->server->timeout);
+ apr_file_pipe_timeout_set(script_err, timeout);
log_script_err(r, script_err);
}
@@ -1268,7 +1309,7 @@ static void register_hooks(apr_pool_t *p)
AP_DECLARE_MODULE(cgi) =
{
STANDARD20_MODULE_STUFF,
- NULL, /* dir config creater */
+ create_cgi_dirconf, /* dir config creater */
NULL, /* dir merger --- default is to override */
create_cgi_config, /* server config */
merge_cgi_config, /* merge server config */
diff --git a/modules/generators/mod_cgid.c b/modules/generators/mod_cgid.c
index 2258a68..4bab59f 100644
--- a/modules/generators/mod_cgid.c
+++ b/modules/generators/mod_cgid.c
@@ -1616,9 +1616,18 @@ static int cgid_handler(request_rec *r)
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
- if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
- APLOG_MODULE_INDEX)))
- {
+ ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
+ APLOG_MODULE_INDEX);
+
+ /* xCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
+ if (ret != OK) {
ret = log_script(r, conf, ret, dbuf, sbuf, bb, NULL);
/*
diff --git a/modules/generators/mod_status.c b/modules/generators/mod_status.c
index 5917953..5bada07 100644
--- a/modules/generators/mod_status.c
+++ b/modules/generators/mod_status.c
@@ -186,7 +186,8 @@ static int status_handler(request_rec *r)
apr_uint32_t up_time;
ap_loadavg_t t;
int j, i, res, written;
- int ready;
+ int idle;
+ int graceful;
int busy;
unsigned long count;
unsigned long lres, my_lres, conn_lres;
@@ -203,6 +204,7 @@ static int status_handler(request_rec *r)
char *stat_buffer;
pid_t *pid_buffer, worker_pid;
int *thread_idle_buffer = NULL;
+ int *thread_graceful_buffer = NULL;
int *thread_busy_buffer = NULL;
clock_t tu, ts, tcu, tcs;
clock_t gu, gs, gcu, gcs;
@@ -231,7 +233,8 @@ static int status_handler(request_rec *r)
#endif
#endif
- ready = 0;
+ idle = 0;
+ graceful = 0;
busy = 0;
count = 0;
bcount = 0;
@@ -250,6 +253,7 @@ static int status_handler(request_rec *r)
stat_buffer = apr_palloc(r->pool, server_limit * thread_limit * sizeof(char));
if (is_async) {
thread_idle_buffer = apr_palloc(r->pool, server_limit * sizeof(int));
+ thread_graceful_buffer = apr_palloc(r->pool, server_limit * sizeof(int));
thread_busy_buffer = apr_palloc(r->pool, server_limit * sizeof(int));
}
@@ -318,6 +322,7 @@ static int status_handler(request_rec *r)
ps_record = ap_get_scoreboard_process(i);
if (is_async) {
thread_idle_buffer[i] = 0;
+ thread_graceful_buffer[i] = 0;
thread_busy_buffer[i] = 0;
}
for (j = 0; j < thread_limit; ++j) {
@@ -336,18 +341,20 @@ static int status_handler(request_rec *r)
&& ps_record->pid) {
if (res == SERVER_READY) {
if (ps_record->generation == mpm_generation)
- ready++;
+ idle++;
if (is_async)
thread_idle_buffer[i]++;
}
else if (res != SERVER_DEAD &&
res != SERVER_STARTING &&
res != SERVER_IDLE_KILL) {
- busy++;
- if (is_async) {
- if (res == SERVER_GRACEFUL)
- thread_idle_buffer[i]++;
- else
+ if (res == SERVER_GRACEFUL) {
+ graceful++;
+ if (is_async)
+ thread_graceful_buffer[i]++;
+ } else {
+ busy++;
+ if (is_async)
thread_busy_buffer[i]++;
}
}
@@ -548,10 +555,10 @@ static int status_handler(request_rec *r)
} /* ap_extended_status */
if (!short_report)
- ap_rprintf(r, "<dt>%d requests currently being processed, "
- "%d idle workers</dt>\n", busy, ready);
+ ap_rprintf(r, "<dt>%d requests currently being processed, %d workers gracefully restarting, "
+ "%d idle workers</dt>\n", busy, graceful, idle);
else
- ap_rprintf(r, "BusyWorkers: %d\nIdleWorkers: %d\n", busy, ready);
+ ap_rprintf(r, "BusyWorkers: %d\nGracefulWorkers: %d\nIdleWorkers: %d\n", busy, graceful, idle);
if (!short_report)
ap_rputs("</dl>", r);
@@ -559,11 +566,6 @@ static int status_handler(request_rec *r)
if (is_async) {
int write_completion = 0, lingering_close = 0, keep_alive = 0,
connections = 0, stopping = 0, procs = 0;
- /*
- * These differ from 'busy' and 'ready' in how gracefully finishing
- * threads are counted. XXX: How to make this clear in the html?
- */
- int busy_workers = 0, idle_workers = 0;
if (!short_report)
ap_rputs("\n\n<table rules=\"all\" cellpadding=\"1%\">\n"
"<tr><th rowspan=\"2\">Slot</th>"
@@ -573,7 +575,7 @@ static int status_handler(request_rec *r)
"<th colspan=\"2\">Threads</th>"
"<th colspan=\"3\">Async connections</th></tr>\n"
"<tr><th>total</th><th>accepting</th>"
- "<th>busy</th><th>idle</th>"
+ "<th>busy</th><th>graceful</th><th>idle</th>"
"<th>writing</th><th>keep-alive</th><th>closing</th></tr>\n", r);
for (i = 0; i < server_limit; ++i) {
ps_record = ap_get_scoreboard_process(i);
@@ -582,8 +584,6 @@ static int status_handler(request_rec *r)
write_completion += ps_record->write_completion;
keep_alive += ps_record->keep_alive;
lingering_close += ps_record->lingering_close;
- busy_workers += thread_busy_buffer[i];
- idle_workers += thread_idle_buffer[i];
procs++;
if (ps_record->quiescing) {
stopping++;
@@ -599,7 +599,7 @@ static int status_handler(request_rec *r)
ap_rprintf(r, "<tr><td>%u</td><td>%" APR_PID_T_FMT "</td>"
"<td>%s%s</td>"
"<td>%u</td><td>%s</td>"
- "<td>%u</td><td>%u</td>"
+ "<td>%u</td><td>%u</td><td>%u</td>"
"<td>%u</td><td>%u</td><td>%u</td>"
"</tr>\n",
i, ps_record->pid,
@@ -607,6 +607,7 @@ static int status_handler(request_rec *r)
ps_record->connections,
ps_record->not_accepting ? "no" : "yes",
thread_busy_buffer[i],
+ thread_graceful_buffer[i],
thread_idle_buffer[i],
ps_record->write_completion,
ps_record->keep_alive,
@@ -618,25 +619,22 @@ static int status_handler(request_rec *r)
ap_rprintf(r, "<tr><td>Sum</td>"
"<td>%d</td><td>%d</td>"
"<td>%d</td><td>&nbsp;</td>"
- "<td>%d</td><td>%d</td>"
+ "<td>%d</td><td>%d</td><td>%d</td>"
"<td>%d</td><td>%d</td><td>%d</td>"
"</tr>\n</table>\n",
procs, stopping,
connections,
- busy_workers, idle_workers,
+ busy, graceful, idle,
write_completion, keep_alive, lingering_close);
}
else {
ap_rprintf(r, "Processes: %d\n"
"Stopping: %d\n"
- "BusyWorkers: %d\n"
- "IdleWorkers: %d\n"
"ConnsTotal: %d\n"
"ConnsAsyncWriting: %d\n"
"ConnsAsyncKeepAlive: %d\n"
"ConnsAsyncClosing: %d\n",
procs, stopping,
- busy_workers, idle_workers,
connections,
write_completion, keep_alive, lingering_close);
}
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
index 1a8df34..f20aee7 100644
--- a/modules/http/http_filters.c
+++ b/modules/http/http_filters.c
@@ -778,6 +778,18 @@ static APR_INLINE int check_headers(request_rec *r)
struct check_header_ctx ctx;
core_server_config *conf =
ap_get_core_module_config(r->server->module_config);
+ const char *val;
+
+ if ((val = apr_table_get(r->headers_out, "Transfer-Encoding"))) {
+ if (apr_table_get(r->headers_out, "Content-Length")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ if (!ap_is_chunked(r->pool, val)) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ return 0;
+ }
+ }
ctx.r = r;
ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
@@ -1353,6 +1365,9 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
*/
apr_table_clear(r->headers_out);
apr_table_clear(r->err_headers_out);
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
apr_brigade_cleanup(b);
/* Don't recall ap_die() if we come back here (from its own internal
@@ -1369,8 +1384,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
APR_BRIGADE_INSERT_TAIL(b, e);
e = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
- r->content_type = r->content_encoding = NULL;
- r->content_languages = NULL;
ap_set_content_length(r, 0);
recursive_error = 1;
}
@@ -1397,6 +1410,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
if (!apr_is_empty_table(r->err_headers_out)) {
r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
r->headers_out);
+ apr_table_clear(r->err_headers_out);
}
/*
@@ -1416,6 +1430,17 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
fixup_vary(r);
}
+
+ /*
+ * Control cachability for non-cacheable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
/*
* Now remove any ETag response header field if earlier processing
* says so (such as a 'FileETag None' directive).
@@ -1428,6 +1453,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
basic_http_header_check(r, &protocol);
ap_set_keepalive(r);
+ /* 204/304 responses don't have content related headers */
if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
apr_table_unset(r->headers_out, "Transfer-Encoding");
apr_table_unset(r->headers_out, "Content-Length");
@@ -1470,16 +1496,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
apr_table_setn(r->headers_out, "Content-Language", field);
}
- /*
- * Control cachability for non-cacheable responses if not already set by
- * some other part of the server configuration.
- */
- if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
- char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
- apr_table_addn(r->headers_out, "Expires", date);
- }
-
/* This is a hack, but I can't find anyway around it. The idea is that
* we don't want to send out 0 Content-Lengths if it is a head request.
* This happens when modules try to outsmart the server, and return
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
index f89f5ba..c4579c4 100644
--- a/modules/http2/config2.m4
+++ b/modules/http2/config2.m4
@@ -37,6 +37,7 @@ h2_stream.lo dnl
h2_switch.lo dnl
h2_util.lo dnl
h2_workers.lo dnl
+h2_ws.lo dnl
"
dnl
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
index 250e726..f496a6d 100644
--- a/modules/http2/h2.h
+++ b/modules/http2/h2.h
@@ -20,6 +20,8 @@
#include <apr_version.h>
#include <ap_mmn.h>
+#include <nghttp2/nghttp2ver.h>
+
struct h2_session;
struct h2_stream;
@@ -33,6 +35,20 @@ struct h2_stream;
#define H2_USE_PIPES (APR_FILES_AS_SOCKETS && APR_VERSION_AT_LEAST(1,6,0))
#endif
+#if AP_MODULE_MAGIC_AT_LEAST(20120211, 129)
+#define H2_USE_POLLFD_FROM_CONN 1
+#else
+#define H2_USE_POLLFD_FROM_CONN 0
+#endif
+
+/* WebSockets support requires apr 1.7.0 for apr_encode.h, plus the
+ * WebSockets features of nghttp2 1.34.0 and later. */
+#if H2_USE_PIPES && defined(NGHTTP2_VERSION_NUM) && NGHTTP2_VERSION_NUM >= 0x012200 && APR_VERSION_AT_LEAST(1,7,0)
+#define H2_USE_WEBSOCKETS 1
+#else
+#define H2_USE_WEBSOCKETS 0
+#endif
+
/**
* The magic PRIamble of RFC 7540 that is always sent when starting
* a h2 communication.
@@ -62,6 +78,8 @@ extern const char *H2_MAGIC_TOKEN;
#define H2_HEADER_AUTH_LEN 10
#define H2_HEADER_PATH ":path"
#define H2_HEADER_PATH_LEN 5
+#define H2_HEADER_PROTO ":protocol"
+#define H2_HEADER_PROTO_LEN 9
#define H2_CRLF "\r\n"
/* Size of the frame header itself in HTTP/2 */
@@ -153,6 +171,7 @@ struct h2_request {
const char *scheme;
const char *authority;
const char *path;
+ const char *protocol;
apr_table_t *headers;
apr_time_t request_time;
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
index cbf7f34..6978254 100644
--- a/modules/http2/h2_bucket_beam.c
+++ b/modules/http2/h2_bucket_beam.c
@@ -24,6 +24,7 @@
#include <httpd.h>
#include <http_protocol.h>
+#include <http_request.h>
#include <http_log.h>
#include "h2_private.h"
@@ -156,6 +157,23 @@ static void purge_consumed_buckets(h2_bucket_beam *beam)
* from sender thread only */
while (!H2_BLIST_EMPTY(&beam->buckets_consumed)) {
b = H2_BLIST_FIRST(&beam->buckets_consumed);
+ if(AP_BUCKET_IS_EOR(b)) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_eor, b);
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+ }
+}
+
+static void purge_eor_buckets(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ /* delete all sender buckets in purge brigade, needs to be called
+ * from sender thread only */
+ while (!H2_BLIST_EMPTY(&beam->buckets_eor)) {
+ b = H2_BLIST_FIRST(&beam->buckets_eor);
apr_bucket_delete(b);
}
}
@@ -250,12 +268,13 @@ static void beam_shutdown(h2_bucket_beam *beam, apr_shutdown_how_e how)
if (how == APR_SHUTDOWN_READWRITE) {
beam->cons_io_cb = NULL;
beam->recv_cb = NULL;
+ beam->eagain_cb = NULL;
}
/* shutdown sender (or both)? */
if (how != APR_SHUTDOWN_READ) {
- h2_blist_cleanup(&beam->buckets_to_send);
purge_consumed_buckets(beam);
+ h2_blist_cleanup(&beam->buckets_to_send);
}
}
@@ -263,6 +282,7 @@ static apr_status_t beam_cleanup(void *data)
{
h2_bucket_beam *beam = data;
beam_shutdown(beam, APR_SHUTDOWN_READWRITE);
+ purge_eor_buckets(beam);
beam->pool = NULL; /* the pool is clearing now */
return APR_SUCCESS;
}
@@ -295,6 +315,7 @@ apr_status_t h2_beam_create(h2_bucket_beam **pbeam, conn_rec *from,
H2_BLIST_INIT(&beam->buckets_to_send);
H2_BLIST_INIT(&beam->buckets_consumed);
+ H2_BLIST_INIT(&beam->buckets_eor);
beam->tx_mem_limits = 1;
beam->max_buf_size = max_buf_size;
beam->timeout = timeout;
@@ -565,6 +586,9 @@ cleanup:
rv = APR_ECONNABORTED;
}
H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "end send", sender_bb);
+ if(rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv) && sender_bb != NULL) {
+ apr_brigade_cleanup(sender_bb);
+ }
apr_thread_mutex_unlock(beam->lock);
return rv;
}
@@ -724,6 +748,9 @@ transfer:
leave:
H2_BEAM_LOG(beam, to, APLOG_TRACE2, rv, "end receive", bb);
+ if (rv == APR_EAGAIN && beam->eagain_cb) {
+ beam->eagain_cb(beam->eagain_ctx, beam);
+ }
apr_thread_mutex_unlock(beam->lock);
return rv;
}
@@ -746,6 +773,15 @@ void h2_beam_on_received(h2_bucket_beam *beam,
apr_thread_mutex_unlock(beam->lock);
}
+void h2_beam_on_eagain(h2_bucket_beam *beam,
+ h2_beam_ev_callback *eagain_cb, void *ctx)
+{
+ apr_thread_mutex_lock(beam->lock);
+ beam->eagain_cb = eagain_cb;
+ beam->eagain_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
+}
+
void h2_beam_on_send(h2_bucket_beam *beam,
h2_beam_ev_callback *send_cb, void *ctx)
{
@@ -823,3 +859,25 @@ int h2_beam_report_consumption(h2_bucket_beam *beam)
apr_thread_mutex_unlock(beam->lock);
return rv;
}
+
+int h2_beam_is_complete(h2_bucket_beam *beam)
+{
+ int rv = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ if (beam->closed)
+ rv = 1;
+ else {
+ apr_bucket *b;
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ rv = 1;
+ break;
+ }
+ }
+ }
+ apr_thread_mutex_unlock(beam->lock);
+ return rv;
+}
diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h
index 2a9d5f0..c58ce98 100644
--- a/modules/http2/h2_bucket_beam.h
+++ b/modules/http2/h2_bucket_beam.h
@@ -48,6 +48,7 @@ struct h2_bucket_beam {
apr_pool_t *pool;
h2_blist buckets_to_send;
h2_blist buckets_consumed;
+ h2_blist buckets_eor;
apr_size_t max_buf_size;
apr_interval_time_t timeout;
@@ -66,6 +67,8 @@ struct h2_bucket_beam {
void *recv_ctx;
h2_beam_ev_callback *send_cb; /* event: buckets were added in h2_beam_send() */
void *send_ctx;
+ h2_beam_ev_callback *eagain_cb; /* event: a receive results in ARP_EAGAIN */
+ void *eagain_ctx;
apr_off_t recv_bytes; /* amount of bytes transferred in h2_beam_receive() */
apr_off_t recv_bytes_reported; /* amount of bytes reported as received via callback */
@@ -205,6 +208,16 @@ void h2_beam_on_received(h2_bucket_beam *beam,
h2_beam_ev_callback *recv_cb, void *ctx);
/**
+ * Register a callback to be invoked on the receiver side whenever
+ * APR_EAGAIN is being returned in h2_beam_receive().
+ * @param beam the beam to set the callback on
+ * @param egain_cb the callback or NULL, called before APR_EAGAIN is returned
+ * @param ctx the context to use in callback invocation
+ */
+void h2_beam_on_eagain(h2_bucket_beam *beam,
+ h2_beam_ev_callback *eagain_cb, void *ctx);
+
+/**
* Register a call back from the sender side to be invoked when send
* has added buckets to the beam.
* Unregister by passing a NULL on_send_cb.
@@ -245,4 +258,10 @@ apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam);
*/
apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam);
+/**
+ * @return != 0 iff beam has been closed or has an EOS bucket buffered
+ * waiting to be received.
+ */
+int h2_beam_is_complete(h2_bucket_beam *beam);
+
#endif /* h2_bucket_beam_h */
diff --git a/modules/http2/h2_c1_io.c b/modules/http2/h2_c1_io.c
index ade8836..5ed4ee8 100644
--- a/modules/http2/h2_c1_io.c
+++ b/modules/http2/h2_c1_io.c
@@ -260,9 +260,22 @@ static apr_status_t read_to_scratch(h2_c1_io *io, apr_bucket *b)
static apr_status_t pass_output(h2_c1_io *io, int flush)
{
conn_rec *c = io->session->c1;
- apr_off_t bblen;
+ apr_off_t bblen = 0;
apr_status_t rv;
-
+
+ if (io->is_passing) {
+ /* recursive call, may be triggered by an H2EOS bucket
+ * being destroyed and triggering sending more data? */
+ AP_DEBUG_ASSERT(0);
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10456)
+ "h2_c1_io(%ld): recursive call of h2_c1_io_pass. "
+ "Denied to prevent output corruption. This "
+ "points to a bug in the HTTP/2 implementation.",
+ c->id);
+ return APR_EGENERAL;
+ }
+ io->is_passing = 1;
+
append_scratch(io);
if (flush) {
if (!APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output))) {
@@ -271,17 +284,16 @@ static apr_status_t pass_output(h2_c1_io *io, int flush)
}
}
if (APR_BRIGADE_EMPTY(io->output)) {
- return APR_SUCCESS;
+ rv = APR_SUCCESS;
+ goto cleanup;
}
-
+
io->unflushed = !APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output));
apr_brigade_length(io->output, 0, &bblen);
C1_IO_BB_LOG(c, 0, APLOG_TRACE2, "out", io->output);
-
+
rv = ap_pass_brigade(c->output_filters, io->output);
if (APR_SUCCESS != rv) goto cleanup;
-
- io->buffered_len = 0;
io->bytes_written += (apr_size_t)bblen;
if (io->write_size < WRITE_SIZE_MAX
@@ -309,6 +321,8 @@ cleanup:
c->id, (long)bblen);
}
apr_brigade_cleanup(io->output);
+ io->buffered_len = 0;
+ io->is_passing = 0;
return rv;
}
diff --git a/modules/http2/h2_c1_io.h b/modules/http2/h2_c1_io.h
index d891ffb..c4dac38 100644
--- a/modules/http2/h2_c1_io.h
+++ b/modules/http2/h2_c1_io.h
@@ -44,7 +44,8 @@ typedef struct {
apr_off_t buffered_len;
apr_off_t flush_threshold;
unsigned int is_flushed : 1;
-
+ unsigned int is_passing : 1;
+
char *scratch;
apr_size_t ssize;
apr_size_t slen;
diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c
index 44a08d0..a955200 100644
--- a/modules/http2/h2_c2.c
+++ b/modules/http2/h2_c2.c
@@ -48,8 +48,10 @@
#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
+#include "h2_ws.h"
#include "h2_c2.h"
#include "h2_util.h"
+#include "mod_http2.h"
static module *mpm_module;
@@ -133,10 +135,22 @@ apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s)
APR_PROTO_TCP, pool);
}
+static void h2_c2_log_io(conn_rec *c2, apr_off_t bytes_sent)
+{
+ if (bytes_sent && h2_c_logio_add_bytes_out) {
+ h2_c_logio_add_bytes_out(c2, bytes_sent);
+ }
+}
+
void h2_c2_destroy(conn_rec *c2)
{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
"h2_c2(%s): destroy", c2->log_id);
+ if(!c2->aborted && conn_ctx && conn_ctx->bytes_sent) {
+ h2_c2_log_io(c2, conn_ctx->bytes_sent);
+ }
apr_pool_destroy(c2->pool);
}
@@ -146,6 +160,10 @@ void h2_c2_abort(conn_rec *c2, conn_rec *from)
AP_DEBUG_ASSERT(conn_ctx);
AP_DEBUG_ASSERT(conn_ctx->stream_id);
+ if(!c2->aborted && conn_ctx->bytes_sent) {
+ h2_c2_log_io(c2, conn_ctx->bytes_sent);
+ }
+
if (conn_ctx->beam_in) {
h2_beam_abort(conn_ctx->beam_in, from);
}
@@ -157,6 +175,7 @@ void h2_c2_abort(conn_rec *c2, conn_rec *from)
typedef struct {
apr_bucket_brigade *bb; /* c2: data in holding area */
+ unsigned did_upgrade_eos:1; /* for Upgrade, we added an extra EOS */
} h2_c2_fctx_in_t;
static apr_status_t h2_c2_filter_in(ap_filter_t* f,
@@ -200,7 +219,17 @@ static apr_status_t h2_c2_filter_in(ap_filter_t* f,
APR_BRIGADE_INSERT_TAIL(fctx->bb, b);
}
}
-
+
+ /* If this is a HTTP Upgrade, it means the request we process
+ * has not Content, although the stream is not necessarily closed.
+ * On first read, we insert an EOS to signal processing that it
+ * has the complete body. */
+ if (conn_ctx->is_upgrade && !fctx->did_upgrade_eos) {
+ b = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(fctx->bb, b);
+ fctx->did_upgrade_eos = 1;
+ }
+
while (APR_BRIGADE_EMPTY(fctx->bb)) {
/* Get more input data for our request. */
if (APLOGctrace2(f->c)) {
@@ -326,42 +355,13 @@ receive:
static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_brigade* bb)
{
- apr_off_t written, header_len = 0;
+ apr_off_t written = 0;
apr_status_t rv;
- if (h2_c_logio_add_bytes_out) {
- /* mod_logio wants to report the number of bytes written in a
- * response, including header and footer fields. Since h2 converts
- * those during c1 processing into the HPACKed h2 HEADER frames,
- * we need to give mod_logio something here and count just the
- * raw lengths of all headers in the buckets. */
- apr_bucket *b;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b)) {
-#if AP_HAS_RESPONSE_BUCKETS
- if (AP_BUCKET_IS_RESPONSE(b)) {
- header_len += (apr_off_t)response_length_estimate(b->data);
- }
- if (AP_BUCKET_IS_HEADERS(b)) {
- header_len += (apr_off_t)headers_length_estimate(b->data);
- }
-#else
- if (H2_BUCKET_IS_HEADERS(b)) {
- header_len += (apr_off_t)h2_bucket_headers_headers_length(b);
- }
-#endif /* AP_HAS_RESPONSE_BUCKETS */
- }
- }
-
rv = h2_beam_send(conn_ctx->beam_out, c2, bb, APR_BLOCK_READ, &written);
-
if (APR_STATUS_IS_EAGAIN(rv)) {
rv = APR_SUCCESS;
}
- if (written && h2_c_logio_add_bytes_out) {
- h2_c_logio_add_bytes_out(c2, written + header_len);
- }
return rv;
}
@@ -403,30 +403,56 @@ static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
return rv;
}
-static void check_push(request_rec *r, const char *tag)
+static int addn_headers(void *udata, const char *name, const char *value)
{
- apr_array_header_t *push_list = h2_config_push_list(r);
+ apr_table_t *dest = udata;
+ apr_table_addn(dest, name, value);
+ return 1;
+}
- if (!r->expecting_100 && push_list && push_list->nelts > 0) {
- int i, old_status;
- const char *old_line;
+static void check_early_hints(request_rec *r, const char *tag)
+{
+ apr_array_header_t *push_list = h2_config_push_list(r);
+ apr_table_t *early_headers = h2_config_early_headers(r);
+
+ if (!r->expecting_100 &&
+ ((push_list && push_list->nelts > 0) ||
+ (early_headers && !apr_is_empty_table(early_headers)))) {
+ int have_hints = 0, i;
+
+ if (push_list && push_list->nelts > 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+ tag, push_list->nelts);
+ for (i = 0; i < push_list->nelts; ++i) {
+ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
+ apr_table_add(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+ }
+ have_hints = 1;
+ }
+ if (early_headers && !apr_is_empty_table(early_headers)) {
+ apr_table_do(addn_headers, r->headers_out, early_headers, NULL);
+ have_hints = 1;
+ }
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "%s, early announcing %d resources for push",
- tag, push_list->nelts);
- for (i = 0; i < push_list->nelts; ++i) {
- h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
- apr_table_add(r->headers_out, "Link",
- apr_psprintf(r->pool, "<%s>; rel=preload%s",
- push->uri_ref, push->critical? "; critical" : ""));
+ if (have_hints) {
+ int old_status;
+ const char *old_line;
+
+ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 &&
+ h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
+ apr_table_setn(r->connection->notes, H2_PUSH_MODE_NOTE, "0");
+ }
+ old_status = r->status;
+ old_line = r->status_line;
+ r->status = 103;
+ r->status_line = "103 Early Hints";
+ ap_send_interim_response(r, 1);
+ r->status = old_status;
+ r->status_line = old_line;
}
- old_status = r->status;
- old_line = r->status_line;
- r->status = 103;
- r->status_line = "103 Early Hints";
- ap_send_interim_response(r, 1);
- r->status = old_status;
- r->status_line = old_line;
}
}
@@ -439,11 +465,42 @@ static int c2_hook_fixups(request_rec *r)
return DECLINED;
}
- check_push(r, "late_fixup");
+ check_early_hints(r, "late_fixup");
return DECLINED;
}
+static apr_status_t http2_get_pollfd_from_conn(conn_rec *c,
+ struct apr_pollfd_t *pfd,
+ apr_interval_time_t *ptimeout)
+{
+#if H2_USE_PIPES
+ if (c->master) {
+ h2_conn_ctx_t *ctx = h2_conn_ctx_get(c);
+ if (ctx) {
+ if (ctx->beam_in && ctx->pipe_in[H2_PIPE_OUT]) {
+ pfd->desc_type = APR_POLL_FILE;
+ pfd->desc.f = ctx->pipe_in[H2_PIPE_OUT];
+ if (ptimeout)
+ *ptimeout = h2_beam_timeout_get(ctx->beam_in);
+ }
+ else {
+ /* no input */
+ pfd->desc_type = APR_NO_DESC;
+ if (ptimeout)
+ *ptimeout = -1;
+ }
+ return APR_SUCCESS;
+ }
+ }
+#else
+ (void)c;
+ (void)pfd;
+ (void)ptimeout;
+#endif /* H2_USE_PIPES */
+ return APR_ENOTIMPL;
+}
+
#if AP_HAS_RESPONSE_BUCKETS
static void c2_pre_read_request(request_rec *r, conn_rec *c2)
@@ -545,8 +602,14 @@ void h2_c2_register_hooks(void)
/* We need to manipulate the standard HTTP/1.1 protocol filters and
* install our own. This needs to be done very early. */
ap_hook_pre_read_request(c2_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE);
- ap_hook_post_read_request(c2_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_post_read_request(c2_post_read_request, NULL, NULL,
+ APR_HOOK_REALLY_FIRST);
ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
+#if H2_USE_POLLFD_FROM_CONN
+ ap_hook_get_pollfd_from_conn(http2_get_pollfd_from_conn, NULL, NULL,
+ APR_HOOK_MIDDLE);
+#endif
+ APR_REGISTER_OPTIONAL_FN(http2_get_pollfd_from_conn);
c2_net_in_filter_handle =
ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
@@ -655,11 +718,21 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
{
const h2_request *req = conn_ctx->request;
conn_state_t *cs = c->cs;
- request_rec *r;
+ request_rec *r = NULL;
const char *tenc;
apr_time_t timeout;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (req->protocol && !strcmp("websocket", req->protocol)) {
+ req = h2_ws_rewrite_request(req, c, conn_ctx->beam_in == NULL);
+ if (!req) {
+ rv = APR_EGENERAL;
+ goto cleanup;
+ }
+ }
+
+ r = h2_create_request_rec(req, c, conn_ctx->beam_in == NULL);
- r = h2_create_request_rec(conn_ctx->request, c, conn_ctx->beam_in == NULL);
if (!r) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_c2(%s-%d): create request_rec failed, r=NULL",
@@ -722,7 +795,7 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
cs->state = CONN_STATE_WRITE_COMPLETION;
cleanup:
- return APR_SUCCESS;
+ return rv;
}
conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent,
@@ -793,7 +866,7 @@ static int h2_c2_hook_post_read_request(request_rec *r)
{
h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection);
- if (conn_ctx && conn_ctx->stream_id) {
+ if (conn_ctx && conn_ctx->stream_id && ap_is_initial_req(r)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"h2_c2(%s-%d): adding request filters",
@@ -845,6 +918,11 @@ void h2_c2_register_hooks(void)
* install our own. This needs to be done very early. */
ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
+#if H2_USE_POLLFD_FROM_CONN
+ ap_hook_get_pollfd_from_conn(http2_get_pollfd_from_conn, NULL, NULL,
+ APR_HOOK_MIDDLE);
+#endif
+ APR_REGISTER_OPTIONAL_FN(http2_get_pollfd_from_conn);
ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
NULL, AP_FTYPE_NETWORK);
diff --git a/modules/http2/h2_c2_filter.c b/modules/http2/h2_c2_filter.c
index 37254fc..554f88b 100644
--- a/modules/http2/h2_c2_filter.c
+++ b/modules/http2/h2_c2_filter.c
@@ -39,6 +39,7 @@
#include "h2_c2.h"
#include "h2_mplx.h"
#include "h2_request.h"
+#include "h2_ws.h"
#include "h2_util.h"
@@ -108,20 +109,39 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t *f,
/* This filter is a one-time wonder */
ap_remove_input_filter(f);
- if (f->c->master && (conn_ctx = h2_conn_ctx_get(f->c)) && conn_ctx->stream_id) {
- if (conn_ctx->request->http_status != H2_HTTP_STATUS_UNSET) {
+ if (f->c->master && (conn_ctx = h2_conn_ctx_get(f->c)) &&
+ conn_ctx->stream_id) {
+ const h2_request *req = conn_ctx->request;
+
+ if (req->http_status == H2_HTTP_STATUS_UNSET &&
+ req->protocol && !strcmp("websocket", req->protocol)) {
+ req = h2_ws_rewrite_request(req, f->c, conn_ctx->beam_in == NULL);
+ if (!req)
+ return APR_EGENERAL;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_c2_filter_request_in(%s): adding request bucket",
+ conn_ctx->id);
+ b = h2_request_create_bucket(req, f->r);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ if (req->http_status != H2_HTTP_STATUS_UNSET) {
/* error was encountered preparing this request */
- b = ap_bucket_error_create(conn_ctx->request->http_status, NULL, f->r->pool,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_c2_filter_request_in(%s): adding error bucket %d",
+ conn_ctx->id, req->http_status);
+ b = ap_bucket_error_create(req->http_status, NULL, f->r->pool,
f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
return APR_SUCCESS;
}
- b = h2_request_create_bucket(conn_ctx->request, f->r);
- APR_BRIGADE_INSERT_TAIL(bb, b);
+
if (!conn_ctx->beam_in) {
b = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
}
+
return APR_SUCCESS;
}
@@ -511,10 +531,10 @@ static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f,
{
apr_bucket *b;
apr_status_t status;
-
h2_headers *response = h2_headers_create(parser->http_status,
make_table(parser),
- NULL, 0, parser->pool);
+ parser->c->notes,
+ 0, parser->pool);
apr_brigade_cleanup(parser->tmp);
b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
@@ -636,9 +656,11 @@ apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
int result = ap_map_http_request_error(conn_ctx->last_err,
HTTP_INTERNAL_SERVER_ERROR);
request_rec *r = h2_create_request_rec(conn_ctx->request, f->c, 1);
- ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
- b = ap_bucket_eor_create(f->c->bucket_alloc, r);
- APR_BRIGADE_INSERT_TAIL(bb, b);
+ if (r) {
+ ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
+ b = ap_bucket_eor_create(f->c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
}
}
/* There are cases where we need to parse a serialized http/1.1 response.
@@ -892,10 +914,10 @@ static apr_status_t read_and_chunk(ap_filter_t *f, h2_conn_ctx_t *conn_ctx,
}
apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
- apr_bucket_brigade* bb,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
{
h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
h2_chunk_filter_t *fctx = f->ctx;
diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c
index eea4be2..22653d4 100644
--- a/modules/http2/h2_config.c
+++ b/modules/http2/h2_config.c
@@ -70,11 +70,15 @@ typedef struct h2_config {
int push_diary_size; /* # of entries in push diary */
int copy_files; /* if files shall be copied vs setaside on output */
apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ apr_table_t *early_headers; /* HTTP headers for a 103 response */
int early_hints; /* support status code 103 */
int padding_bits;
int padding_always;
int output_buffered;
apr_interval_time_t stream_timeout;/* beam timeout */
+ int max_data_frame_len; /* max # bytes in a single h2 DATA frame */
+ int proxy_requests; /* act as forward proxy */
+ int h2_websockets; /* if mod_h2 negotiating WebSockets */
} h2_config;
typedef struct h2_dir_config {
@@ -82,6 +86,7 @@ typedef struct h2_dir_config {
int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
int h2_push; /* if HTTP/2 server push is enabled */
apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ apr_table_t *early_headers; /* HTTP headers for a 103 response */
int early_hints; /* support status code 103 */
apr_interval_time_t stream_timeout;/* beam timeout */
} h2_dir_config;
@@ -105,11 +110,15 @@ static h2_config defconf = {
256, /* push diary size */
0, /* copy files across threads */
NULL, /* push list */
+ NULL, /* early headers */
0, /* early hints, http status 103 */
0, /* padding bits */
1, /* padding always */
1, /* stream output buffered */
-1, /* beam timeout */
+ 0, /* max DATA frame len, 0 == no extra limit */
+ 0, /* forward proxy */
+ 0, /* WebSockets negotiation, enabled */
};
static h2_dir_config defdconf = {
@@ -117,6 +126,7 @@ static h2_dir_config defdconf = {
-1, /* HTTP/1 Upgrade support */
-1, /* HTTP/2 server push enabled */
NULL, /* push list */
+ NULL, /* early headers */
-1, /* early hints, http status 103 */
-1, /* beam timeout */
};
@@ -148,11 +158,15 @@ void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
conf->push_diary_size = DEF_VAL;
conf->copy_files = DEF_VAL;
conf->push_list = NULL;
+ conf->early_headers = NULL;
conf->early_hints = DEF_VAL;
conf->padding_bits = DEF_VAL;
conf->padding_always = DEF_VAL;
conf->output_buffered = DEF_VAL;
conf->stream_timeout = DEF_VAL;
+ conf->max_data_frame_len = DEF_VAL;
+ conf->proxy_requests = DEF_VAL;
+ conf->h2_websockets = DEF_VAL;
return conf;
}
@@ -191,10 +205,19 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
else {
n->push_list = add->push_list? add->push_list : base->push_list;
}
+ if (add->early_headers && base->early_headers) {
+ n->early_headers = apr_table_overlay(pool, add->early_headers, base->early_headers);
+ }
+ else {
+ n->early_headers = add->early_headers? add->early_headers : base->early_headers;
+ }
n->early_hints = H2_CONFIG_GET(add, base, early_hints);
n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
n->padding_always = H2_CONFIG_GET(add, base, padding_always);
n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
+ n->max_data_frame_len = H2_CONFIG_GET(add, base, max_data_frame_len);
+ n->proxy_requests = H2_CONFIG_GET(add, base, proxy_requests);
+ n->h2_websockets = H2_CONFIG_GET(add, base, h2_websockets);
return n;
}
@@ -232,6 +255,12 @@ void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
else {
n->push_list = add->push_list? add->push_list : base->push_list;
}
+ if (add->early_headers && base->early_headers) {
+ n->early_headers = apr_table_overlay(pool, add->early_headers, base->early_headers);
+ }
+ else {
+ n->early_headers = add->early_headers? add->early_headers : base->early_headers;
+ }
n->early_hints = H2_CONFIG_GET(add, base, early_hints);
n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
return n;
@@ -278,6 +307,12 @@ static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t v
return H2_CONFIG_GET(conf, &defconf, output_buffered);
case H2_CONF_STREAM_TIMEOUT:
return H2_CONFIG_GET(conf, &defconf, stream_timeout);
+ case H2_CONF_MAX_DATA_FRAME_LEN:
+ return H2_CONFIG_GET(conf, &defconf, max_data_frame_len);
+ case H2_CONF_PROXY_REQUESTS:
+ return H2_CONFIG_GET(conf, &defconf, proxy_requests);
+ case H2_CONF_WEBSOCKETS:
+ return H2_CONFIG_GET(conf, &defconf, h2_websockets);
default:
return DEF_VAL;
}
@@ -337,6 +372,15 @@ static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
case H2_CONF_OUTPUT_BUFFER:
H2_CONFIG_SET(conf, output_buffered, val);
break;
+ case H2_CONF_MAX_DATA_FRAME_LEN:
+ H2_CONFIG_SET(conf, max_data_frame_len, val);
+ break;
+ case H2_CONF_PROXY_REQUESTS:
+ H2_CONFIG_SET(conf, proxy_requests, val);
+ break;
+ case H2_CONF_WEBSOCKETS:
+ H2_CONFIG_SET(conf, h2_websockets, val);
+ break;
default:
break;
}
@@ -502,6 +546,18 @@ apr_array_header_t *h2_config_push_list(request_rec *r)
return sconf? sconf->push_list : NULL;
}
+apr_table_t *h2_config_early_headers(request_rec *r)
+{
+ const h2_config *sconf;
+ const h2_dir_config *conf = h2_config_rget(r);
+
+ if (conf && conf->early_headers) {
+ return conf->early_headers;
+ }
+ sconf = h2_config_sget(r->server);
+ return sconf? sconf->early_headers : NULL;
+}
+
const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
{
const h2_config *conf = h2_config_get(c);
@@ -583,6 +639,17 @@ static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
return NULL;
}
+static const char *h2_conf_set_max_data_frame_len(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ int val = (int)apr_atoi64(value);
+ if (val < 0) {
+ return "value must be 0 or larger";
+ }
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_DATA_FRAME_LEN, val);
+ return NULL;
+}
+
static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
void *dirconf, const char *value)
{
@@ -632,6 +699,26 @@ static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *v
return "value must be On or Off";
}
+static const char *h2_conf_set_websockets(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+#if H2_USE_WEBSOCKETS
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WEBSOCKETS, 1);
+ return NULL;
+#elif !H2_USE_PIPES
+ return "HTTP/2 WebSockets are not supported on this platform";
+#else
+ return "HTTP/2 WebSockets are not supported in this server version";
+#endif
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WEBSOCKETS, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg,
const char *ctype, const char *sdependency,
const char *sweight)
@@ -812,6 +899,37 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
return NULL;
}
+static const char *h2_conf_add_early_hint(cmd_parms *cmd, void *dirconf,
+ const char *name, const char *value)
+{
+ apr_table_t *hds, **phds;
+
+ if(!name || !*name)
+ return "Early Hint header name must not be empty";
+ if(!value)
+ return "Early Hint header value must not be empty";
+ while (apr_isspace(*value))
+ ++value;
+ if(!*value)
+ return "Early Hint header value must not be empty/only space";
+ if (*ap_scan_http_field_content(value))
+ return "Early Hint header value contains invalid characters";
+
+ if (cmd->path) {
+ phds = &((h2_dir_config*)dirconf)->early_headers;
+ }
+ else {
+ phds = &(h2_config_sget(cmd->server))->early_headers;
+ }
+ hds = *phds;
+ if (!hds) {
+ *phds = hds = apr_table_make(cmd->pool, 10);
+ }
+ apr_table_add(hds, name, value);
+
+ return NULL;
+}
+
static const char *h2_conf_set_early_hints(cmd_parms *cmd,
void *dirconf, const char *value)
{
@@ -872,6 +990,20 @@ static const char *h2_conf_set_stream_timeout(cmd_parms *cmd,
return NULL;
}
+static const char *h2_conf_set_proxy_requests(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ if (!strcasecmp(value, "On")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PROXY_REQUESTS, 1);
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PROXY_REQUESTS, 0);
+ return NULL;
+ }
+ return "value must be On or Off";
+}
+
void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw,
apr_time_t *pidle_limit)
{
@@ -937,6 +1069,14 @@ const command_rec h2_cmds[] = {
RSRC_CONF, "set stream output buffer on/off"),
AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL,
RSRC_CONF, "set stream timeout"),
+ AP_INIT_TAKE1("H2MaxDataFrameLen", h2_conf_set_max_data_frame_len, NULL,
+ RSRC_CONF, "maximum number of bytes in a single HTTP/2 DATA frame"),
+ AP_INIT_TAKE2("H2EarlyHint", h2_conf_add_early_hint, NULL,
+ OR_FILEINFO|OR_AUTHCFG, "add a a 'Link:' header for a 103 Early Hints response."),
+ AP_INIT_TAKE1("H2ProxyRequests", h2_conf_set_proxy_requests, NULL,
+ OR_FILEINFO, "Enables forward proxy requests via HTTP/2"),
+ AP_INIT_TAKE1("H2WebSockets", h2_conf_set_websockets, NULL,
+ RSRC_CONF, "off to disable WebSockets over HTTP/2"),
AP_END_CMD
};
diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h
index 6d2e65f..15242db 100644
--- a/modules/http2/h2_config.h
+++ b/modules/http2/h2_config.h
@@ -43,6 +43,9 @@ typedef enum {
H2_CONF_PADDING_ALWAYS,
H2_CONF_OUTPUT_BUFFER,
H2_CONF_STREAM_TIMEOUT,
+ H2_CONF_MAX_DATA_FRAME_LEN,
+ H2_CONF_PROXY_REQUESTS,
+ H2_CONF_WEBSOCKETS,
} h2_config_var_t;
struct apr_hash_t;
@@ -86,6 +89,7 @@ int h2_config_rgeti(request_rec *r, h2_config_var_t var);
apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
apr_array_header_t *h2_config_push_list(request_rec *r);
+apr_table_t *h2_config_early_headers(request_rec *r);
void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw,
diff --git a/modules/http2/h2_conn_ctx.h b/modules/http2/h2_conn_ctx.h
index 35987bc..3b44856 100644
--- a/modules/http2/h2_conn_ctx.h
+++ b/modules/http2/h2_conn_ctx.h
@@ -53,7 +53,8 @@ struct h2_conn_ctx_t {
const struct h2_request *request; /* c2: the request to process */
struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */
struct h2_bucket_beam *beam_in; /* c2: data in or NULL, borrowed from request stream */
- unsigned int input_chunked; /* c2: if input needs HTTP/1.1 chunking applied */
+ unsigned input_chunked:1; /* c2: if input needs HTTP/1.1 chunking applied */
+ unsigned is_upgrade:1; /* c2: if requst is a HTTP Upgrade */
apr_file_t *pipe_in[2]; /* c2: input produced notification pipe */
apr_pollfd_t pfd; /* c1: poll socket input, c2: NUL */
@@ -61,6 +62,7 @@ struct h2_conn_ctx_t {
int has_final_response; /* final HTTP response passed on out */
apr_status_t last_err; /* APR_SUCCES or last error encountered in filters */
+ apr_off_t bytes_sent; /* c2: bytes acutaly sent via c1 */
/* atomic */ apr_uint32_t started; /* c2: processing was started */
apr_time_t started_at; /* c2: when processing started */
/* atomic */ apr_uint32_t done; /* c2: processing has finished */
diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
index cbc7b01..d9b3fd0 100644
--- a/modules/http2/h2_headers.c
+++ b/modules/http2/h2_headers.c
@@ -90,9 +90,18 @@ h2_headers *h2_bucket_headers_get(apr_bucket *b)
return NULL;
}
+static void bucket_destroy(void *data)
+{
+ h2_bucket_headers *h = data;
+
+ if (apr_bucket_shared_destroy(h)) {
+ apr_bucket_free(h);
+ }
+}
+
const apr_bucket_type_t h2_bucket_type_headers = {
"H2HEADERS", 5, APR_BUCKET_METADATA,
- apr_bucket_destroy_noop,
+ bucket_destroy,
bucket_read,
apr_bucket_setaside_noop,
apr_bucket_split_notimpl,
@@ -144,6 +153,9 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status,
const apr_table_t *header, apr_pool_t *pool)
{
h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, headers->status, r,
+ "h2_headers_rcreate(%ld): status=%d",
+ (long)r->connection->id, status);
if (headers->status == HTTP_FORBIDDEN) {
request_rec *r_prev;
for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
index 99c47ea..2aeea42 100644
--- a/modules/http2/h2_mplx.c
+++ b/modules/http2/h2_mplx.c
@@ -146,6 +146,7 @@ static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
if (c2_ctx->beam_in) {
h2_beam_on_send(c2_ctx->beam_in, NULL, NULL);
h2_beam_on_received(c2_ctx->beam_in, NULL, NULL);
+ h2_beam_on_eagain(c2_ctx->beam_in, NULL, NULL);
h2_beam_on_consumed(c2_ctx->beam_in, NULL, NULL);
}
}
@@ -333,7 +334,6 @@ h2_mplx *h2_mplx_c1_create(int child_num, apr_uint32_t id, h2_stream *stream0,
apr_pollset_add(m->pollset, &conn_ctx->pfd);
}
- m->scratch_r = apr_pcalloc(m->pool, sizeof(*m->scratch_r));
m->max_spare_transits = 3;
m->c2_transits = apr_array_make(m->pool, (int)m->max_spare_transits,
sizeof(h2_c2_transit*));
@@ -394,6 +394,31 @@ apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
return APR_SUCCESS;
}
+typedef struct {
+ int stream_count;
+ int stream_want_send;
+} stream_iter_aws_t;
+
+static int m_stream_want_send_data(void *ctx, void *stream)
+{
+ stream_iter_aws_t *x = ctx;
+ ++x->stream_count;
+ if (h2_stream_wants_send_data(stream))
+ ++x->stream_want_send;
+ return 1;
+}
+
+int h2_mplx_c1_all_streams_want_send_data(h2_mplx *m)
+{
+ stream_iter_aws_t x;
+ x.stream_count = 0;
+ x.stream_want_send = 0;
+ H2_MPLX_ENTER(m);
+ h2_ihash_iter(m->streams, m_stream_want_send_data, &x);
+ H2_MPLX_LEAVE(m);
+ return x.stream_count && (x.stream_count == x.stream_want_send);
+}
+
static int m_report_stream_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
@@ -441,6 +466,8 @@ static int m_stream_cancel_iter(void *ctx, void *val) {
return 0;
}
+static void c1_purge_streams(h2_mplx *m);
+
void h2_mplx_c1_destroy(h2_mplx *m)
{
apr_status_t status;
@@ -509,7 +536,9 @@ void h2_mplx_c1_destroy(h2_mplx *m)
h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
}
-
+
+ c1_purge_streams(m);
+
m->c1->aborted = old_aborted;
H2_MPLX_LEAVE(m);
@@ -542,16 +571,6 @@ const h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id)
}
-static void c1_update_scoreboard(h2_mplx *m, h2_stream *stream)
-{
- if (stream->c2) {
- m->scratch_r->connection = stream->c2;
- m->scratch_r->bytes_sent = stream->out_frame_octets;
- ap_increment_counts(m->c1->sbh, m->scratch_r);
- m->scratch_r->connection = NULL;
- }
-}
-
static void c1_purge_streams(h2_mplx *m)
{
h2_stream *stream;
@@ -561,8 +580,6 @@ static void c1_purge_streams(h2_mplx *m)
stream = APR_ARRAY_IDX(m->spurge, i, h2_stream*);
ap_assert(stream->state == H2_SS_CLEANUP);
- c1_update_scoreboard(m, stream);
-
if (stream->input) {
h2_beam_destroy(stream->input, m->c1);
stream->input = NULL;
@@ -585,6 +602,15 @@ static void c1_purge_streams(h2_mplx *m)
apr_array_clear(m->spurge);
}
+void h2_mplx_c1_going_keepalive(h2_mplx *m)
+{
+ H2_MPLX_ENTER_ALWAYS(m);
+ if (m->spurge->nelts) {
+ c1_purge_streams(m);
+ }
+ H2_MPLX_LEAVE(m);
+}
+
apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
stream_ev_callback *on_stream_input,
stream_ev_callback *on_stream_output,
@@ -653,8 +679,12 @@ static apr_status_t c1_process_stream(h2_mplx *m,
if (APLOGctrace1(m->c1)) {
const h2_request *r = stream->request;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
- H2_STRM_MSG(stream, "process %s %s://%s%s"),
- r->method, r->scheme, r->authority, r->path);
+ H2_STRM_MSG(stream, "process %s%s%s %s%s%s%s"),
+ r->protocol? r->protocol : "",
+ r->protocol? " " : "",
+ r->method, r->scheme? r->scheme : "",
+ r->scheme? "://" : "",
+ r->authority, r->path? r->path: "");
}
stream->scheduled = 1;
@@ -765,6 +795,19 @@ static void c2_beam_input_read_notify(void *ctx, h2_bucket_beam *beam)
}
}
+static void c2_beam_input_read_eagain(void *ctx, h2_bucket_beam *beam)
+{
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+ /* installed in the input bucket beams when we use pipes.
+ * Drain the pipe just before the beam returns APR_EAGAIN.
+ * A clean state for allowing polling on the pipe to rest
+ * when the beam is empty */
+ if (conn_ctx && conn_ctx->pipe_in[H2_PIPE_OUT]) {
+ h2_util_drain_pipe(conn_ctx->pipe_in[H2_PIPE_OUT]);
+ }
+}
+
static void c2_beam_output_write_notify(void *ctx, h2_bucket_beam *beam)
{
conn_rec *c = ctx;
@@ -809,6 +852,9 @@ static apr_status_t c2_setup_io(h2_mplx *m, conn_rec *c2, h2_stream *stream, h2_
c2->pool, c2->pool);
if (APR_SUCCESS != rv) goto cleanup;
#endif
+ h2_beam_on_eagain(stream->input, c2_beam_input_read_eagain, c2);
+ if (!h2_beam_empty(stream->input))
+ c2_beam_input_write_notify(c2, stream->input);
}
cleanup:
@@ -915,6 +961,15 @@ static void s_c2_done(h2_mplx *m, conn_rec *c2, h2_conn_ctx_t *conn_ctx)
"h2_c2(%s-%d): processing finished without final response",
conn_ctx->id, conn_ctx->stream_id);
c2->aborted = 1;
+ if (conn_ctx->beam_out)
+ h2_beam_abort(conn_ctx->beam_out, c2);
+ }
+ else if (!conn_ctx->beam_out || !h2_beam_is_complete(conn_ctx->beam_out)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, conn_ctx->last_err, c2,
+ "h2_c2(%s-%d): processing finished with incomplete output",
+ conn_ctx->id, conn_ctx->stream_id);
+ c2->aborted = 1;
+ h2_beam_abort(conn_ctx->beam_out, c2);
}
else if (!c2->aborted) {
s_mplx_be_happy(m, c2, conn_ctx);
@@ -1064,14 +1119,32 @@ static int reset_is_acceptable(h2_stream *stream)
return 1; /* otherwise, be forgiving */
}
-apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id)
+apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id, h2_stream *stream)
{
- h2_stream *stream;
apr_status_t status = APR_SUCCESS;
+ int registered;
H2_MPLX_ENTER_ALWAYS(m);
- stream = h2_ihash_get(m->streams, stream_id);
- if (stream && !reset_is_acceptable(stream)) {
+ registered = (h2_ihash_get(m->streams, stream_id) != NULL);
+ if (!stream) {
+ /* a RST might arrive so late, we have already forgotten
+ * about it. Seems ok. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1,
+ H2_MPLX_MSG(m, "RST on unknown stream %d"), stream_id);
+ AP_DEBUG_ASSERT(!registered);
+ }
+ else if (!registered) {
+ /* a RST on a stream that mplx has not been told about, but
+ * which the session knows. Very early and annoying. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1,
+ H2_STRM_MSG(stream, "very early RST, drop"));
+ h2_stream_set_monitor(stream, NULL);
+ h2_stream_rst(stream, H2_ERR_STREAM_CLOSED);
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+ m_stream_cleanup(m, stream);
+ m_be_annoyed(m);
+ }
+ else if (!reset_is_acceptable(stream)) {
m_be_annoyed(m);
}
H2_MPLX_LEAVE(m);
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
index 1f79aa8..860f916 100644
--- a/modules/http2/h2_mplx.h
+++ b/modules/http2/h2_mplx.h
@@ -99,8 +99,6 @@ struct h2_mplx {
struct h2_workers *workers; /* h2 workers process wide instance */
- request_rec *scratch_r; /* pseudo request_rec for scoreboard reporting */
-
apr_uint32_t max_spare_transits; /* max number of transit pools idling */
apr_array_header_t *c2_transits; /* base pools for running c2 connections */
};
@@ -194,11 +192,17 @@ typedef int h2_mplx_stream_cb(struct h2_stream *s, void *userdata);
apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
/**
+ * Return != 0 iff all open streams want to send data
+ */
+int h2_mplx_c1_all_streams_want_send_data(h2_mplx *m);
+
+/**
* A stream has been RST_STREAM by the client. Abort
* any processing going on and remove from processing
* queue.
*/
-apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id);
+apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id,
+ struct h2_stream *stream);
/**
* Get readonly access to a stream for a secondary connection.
@@ -212,6 +216,14 @@ const struct h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id);
*/
apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c2);
+
+/**
+ * Session processing is entering KEEPALIVE, e.g. giving control
+ * to the MPM for monitoring incoming socket events only.
+ * Last chance for maintenance work before losing control.
+ */
+void h2_mplx_c1_going_keepalive(h2_mplx *m);
+
#define H2_MPLX_MSG(m, msg) \
"h2_mplx(%d-%lu): "msg, m->child_num, (unsigned long)m->id
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
index c3f2ff3..db22301 100644
--- a/modules/http2/h2_proxy_session.c
+++ b/modules/http2/h2_proxy_session.c
@@ -37,6 +37,7 @@ typedef struct h2_proxy_stream {
const char *url;
request_rec *r;
+ conn_rec *cfront;
h2_proxy_request *req;
const char *real_server_uri;
const char *p_server_uri;
@@ -401,7 +402,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
char *s = apr_pstrndup(stream->r->pool, v, vlen);
apr_table_setn(stream->r->notes, "proxy-status", s);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
"h2_proxy_stream(%s-%d): got status %s",
stream->session->id, stream->id, s);
stream->r->status = (int)apr_atoi64(s);
@@ -413,7 +414,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
return APR_SUCCESS;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
"h2_proxy_stream(%s-%d): on_header %s: %s",
stream->session->id, stream->id, n, v);
if (!h2_proxy_res_ignore_header(n, nlen)) {
@@ -425,7 +426,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
h2_proxy_util_camel_case_header(hname, nlen);
hvalue = apr_pstrndup(stream->pool, v, vlen);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
"h2_proxy_stream(%s-%d): got header %s: %s",
stream->session->id, stream->id, hname, hvalue);
process_proxy_header(headers, stream, hname, hvalue);
@@ -532,22 +533,21 @@ static int stream_response_data(nghttp2_session *ngh2, uint8_t flags,
h2_proxy_stream_end_headers_out(stream);
}
stream->data_received += len;
-
- b = apr_bucket_transient_create((const char*)data, len,
- stream->r->connection->bucket_alloc);
+ b = apr_bucket_transient_create((const char*)data, len,
+ stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
/* always flush after a DATA frame, as we have no other indication
* of buffer use */
- b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ b = apr_bucket_flush_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
-
+
status = ap_pass_brigade(stream->r->output_filters, stream->output);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359)
"h2_proxy_session(%s): stream=%d, response DATA %ld, %ld"
" total", session->id, stream_id, (long)len,
(long)stream->data_received);
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03344)
"h2_proxy_session(%s): passing output on stream %d",
session->id, stream->id);
nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
@@ -818,7 +818,7 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
{
h2_proxy_stream *stream;
apr_uri_t puri;
- const char *authority, *scheme, *path;
+ const char *authority, *scheme, *path, *orig_host;
apr_status_t status;
proxy_dir_conf *dconf;
@@ -827,12 +827,13 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
stream->pool = r->pool;
stream->url = url;
stream->r = r;
+ stream->cfront = r->connection;
stream->standalone = standalone;
stream->session = session;
stream->state = H2_STREAM_ST_IDLE;
- stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
- stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ stream->input = apr_brigade_create(stream->pool, stream->cfront->bucket_alloc);
+ stream->output = apr_brigade_create(stream->pool, stream->cfront->bucket_alloc);
stream->req = h2_proxy_req_create(1, stream->pool);
@@ -841,13 +842,14 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
return status;
scheme = (strcmp(puri.scheme, "h2")? "http" : "https");
-
+ orig_host = apr_table_get(r->headers_in, "Host");
+ if (orig_host == NULL) {
+ orig_host = r->hostname;
+ }
+
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
if (dconf->preserve_host) {
- authority = apr_table_get(r->headers_in, "Host");
- if (authority == NULL) {
- authority = r->hostname;
- }
+ authority = orig_host;
}
else {
authority = puri.hostname;
@@ -856,7 +858,7 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
/* port info missing and port is not default for scheme: append */
authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
"authority=%s from uri.hostname=%s and uri.port=%d",
authority, puri.hostname, puri.port);
}
@@ -877,8 +879,6 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
if (dconf->add_forwarded_headers) {
if (PROXYREQ_REVERSE == r->proxyreq) {
- const char *buf;
-
/* Add X-Forwarded-For: so that the upstream has a chance to
* determine, where the original request came from.
*/
@@ -888,8 +888,9 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
/* Add X-Forwarded-Host: so that upstream knows what the
* original request hostname was.
*/
- if ((buf = apr_table_get(r->headers_in, "Host"))) {
- apr_table_mergen(stream->req->headers, "X-Forwarded-Host", buf);
+ if (orig_host) {
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Host",
+ orig_host);
}
/* Add X-Forwarded-Server: so that upstream knows what the
@@ -943,7 +944,7 @@ static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *st
rv = nghttp2_submit_request(session->ngh2, NULL,
hd->nv, hd->nvlen, pp, stream);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03363)
"h2_proxy_session(%s): submit %s%s -> %d",
session->id, stream->req->authority, stream->req->path,
rv);
@@ -1088,7 +1089,7 @@ apr_status_t h2_proxy_session_submit(h2_proxy_session *session,
static void stream_resume(h2_proxy_stream *stream)
{
h2_proxy_session *session = stream->session;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront,
"h2_proxy_stream(%s-%d): resuming",
session->id, stream->id);
stream->suspended = 0;
@@ -1129,7 +1130,7 @@ static apr_status_t check_suspended(h2_proxy_session *session)
return APR_SUCCESS;
}
else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, stream->cfront,
APLOGNO(03382) "h2_proxy_stream(%s-%d): check input",
session->id, stream_id);
stream_resume(stream);
@@ -1366,30 +1367,39 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
/* if the stream's connection is aborted, do not send anything
* more on it. */
apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
- int touched = (stream->data_sent ||
+ int touched = (stream->data_sent || stream->data_received ||
stream_id <= session->last_stream_id);
- if (!session->c->aborted) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+ if (!stream->cfront->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03364)
"h2_proxy_sesssion(%s): stream(%d) closed "
"(touched=%d, error=%d)",
session->id, stream_id, touched, stream->error_code);
if (status != APR_SUCCESS) {
- b = ap_bucket_error_create(HTTP_SERVICE_UNAVAILABLE, NULL, stream->r->pool,
- stream->r->connection->bucket_alloc);
+ /* stream failed. If we have received (and forwarded) response
+ * data already, we need to append an error buckt to inform
+ * consumers.
+ * Otherwise, we have an early fail on the connection and may
+ * retry this request on a new one. In that case, keep the
+ * output virgin so that a new attempt can be made. */
+ if (stream->data_received) {
+ int http_status = ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+ b = ap_bucket_error_create(http_status, NULL, stream->r->pool,
+ stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
- b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
}
else if (!stream->data_received) {
/* if the response had no body, this is the time to flush
* an empty brigade which will also write the response headers */
h2_proxy_stream_end_headers_out(stream);
stream->data_received = 1;
- b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ b = apr_bucket_flush_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
- b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
ap_pass_brigade(stream->r->output_filters, stream->output);
}
@@ -1399,7 +1409,7 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
h2_proxy_ihash_remove(session->streams, stream_id);
h2_proxy_iq_remove(session->suspended, stream_id);
if (session->done) {
- session->done(session, stream->r, status, touched);
+ session->done(session, stream->r, status, touched, stream->error_code);
}
}
@@ -1669,9 +1679,19 @@ static int done_iter(void *udata, void *val)
{
cleanup_iter_ctx *ctx = udata;
h2_proxy_stream *stream = val;
- int touched = (stream->data_sent ||
+ int touched = (stream->data_sent || stream->data_received ||
stream->id <= ctx->session->last_stream_id);
- ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched);
+ if (touched && stream->output) {
+ apr_bucket *b = ap_bucket_error_create(HTTP_BAD_GATEWAY, NULL,
+ stream->r->pool,
+ stream->cfront->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
+ ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched,
+ stream->error_code);
return 1;
}
@@ -1690,6 +1710,12 @@ void h2_proxy_session_cleanup(h2_proxy_session *session,
}
}
+int h2_proxy_session_is_reusable(h2_proxy_session *session)
+{
+ return (session->state != H2_PROXYS_ST_DONE) &&
+ h2_proxy_ihash_empty(session->streams);
+}
+
static int ping_arrived_iter(void *udata, void *val)
{
h2_proxy_stream *stream = val;
diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h
index f40e5ee..3bc16d7 100644
--- a/modules/http2/h2_proxy_session.h
+++ b/modules/http2/h2_proxy_session.h
@@ -68,7 +68,8 @@ typedef enum {
typedef struct h2_proxy_session h2_proxy_session;
typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
- apr_status_t status, int touched);
+ apr_status_t status, int touched,
+ int error_code);
struct h2_proxy_session {
const char *id;
@@ -130,4 +131,6 @@ void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
#define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
+int h2_proxy_session_is_reusable(h2_proxy_session *s);
+
#endif /* h2_proxy_session_h */
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
index 462c470..e6a10c5 100644
--- a/modules/http2/h2_push.c
+++ b/modules/http2/h2_push.c
@@ -426,7 +426,7 @@ static void inspect_link(link_ctx *ctx, const char *s, size_t slen)
static int head_iter(void *ctx, const char *key, const char *value)
{
- if (!apr_strnatcasecmp("link", key)) {
+ if (!ap_cstr_casecmp("link", key)) {
inspect_link(ctx, value, strlen(value));
}
return 1;
@@ -502,6 +502,7 @@ static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push
sha256_update(md, push->req->authority);
sha256_update(md, push->req->path);
EVP_DigestFinal(md, hash, &len);
+ EVP_MD_CTX_destroy(md);
val = 0;
for (i = 0; i != len; ++i)
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
index 20e94cd..2713947 100644
--- a/modules/http2/h2_request.c
+++ b/modules/http2/h2_request.c
@@ -38,6 +38,7 @@
#include "h2_private.h"
#include "h2_config.h"
+#include "h2_conn_ctx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_util.h"
@@ -119,6 +120,7 @@ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
req->path = path;
req->headers = apr_table_make(pool, 10);
req->http_status = H2_HTTP_STATUS_UNSET;
+ req->request_time = apr_time_now();
x.pool = pool;
x.headers = req->headers;
@@ -166,6 +168,10 @@ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
&& !strncmp(H2_HEADER_AUTH, name, nlen)) {
req->authority = apr_pstrndup(pool, value, vlen);
}
+ else if (H2_HEADER_PROTO_LEN == nlen
+ && !strncmp(H2_HEADER_PROTO, name, nlen)) {
+ req->protocol = apr_pstrndup(pool, value, vlen);
+ }
else {
char buffer[32];
memset(buffer, 0, 32);
@@ -214,6 +220,7 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
dst->scheme = apr_pstrdup(p, src->scheme);
dst->authority = apr_pstrdup(p, src->authority);
dst->path = apr_pstrdup(p, src->path);
+ dst->protocol = apr_pstrdup(p, src->protocol);
dst->headers = apr_table_clone(p, src->headers);
return dst;
}
@@ -282,13 +289,20 @@ apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
apr_table_t *headers = apr_table_clone(r->pool, req->headers);
const char *uri = req->path;
+ AP_DEBUG_ASSERT(req->method);
AP_DEBUG_ASSERT(req->authority);
- if (req->scheme && (ap_cstr_casecmp(req->scheme,
- ap_ssl_conn_is_ssl(c->master? c->master : c)? "https" : "http")
- || !ap_cstr_casecmp("CONNECT", req->method))) {
- /* Client sent a non-matching ':scheme' pseudo header or CONNECT.
- * In this case, we use an absolute URI.
- */
+ if (!ap_cstr_casecmp("CONNECT", req->method)) {
+ uri = req->authority;
+ }
+ else if (h2_config_cgeti(c, H2_CONF_PROXY_REQUESTS)) {
+ /* Forward proxying: always absolute uris */
+ uri = apr_psprintf(r->pool, "%s://%s%s",
+ req->scheme, req->authority,
+ req->path ? req->path : "");
+ }
+ else if (req->scheme && ap_cstr_casecmp(req->scheme, "http")
+ && ap_cstr_casecmp(req->scheme, "https")) {
+ /* Client sent a non-http ':scheme', use an absolute URI */
uri = apr_psprintf(r->pool, "%s://%s%s",
req->scheme, req->authority, req->path ? req->path : "");
}
@@ -299,12 +313,13 @@ apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
#endif
static void assign_headers(request_rec *r, const h2_request *req,
- int no_body)
+ int no_body, int is_connect)
{
const char *cl;
r->headers_in = apr_table_clone(r->pool, req->headers);
- if (req->authority) {
+
+ if (req->authority && !is_connect) {
/* for internal handling, we have to simulate that :authority
* came in as Host:, RFC 9113 ch. says that mismatches between
* :authority and Host: SHOULD be rejected as malformed. However,
@@ -323,36 +338,40 @@ static void assign_headers(request_rec *r, const h2_request *req,
"set 'Host: %s' from :authority", req->authority);
}
- cl = apr_table_get(req->headers, "Content-Length");
- if (no_body) {
- if (!cl && apr_table_get(req->headers, "Content-Type")) {
- /* If we have a content-type, but already seen eos, no more
- * data will come. Signal a zero content length explicitly.
- */
- apr_table_setn(req->headers, "Content-Length", "0");
+ /* Unless we open a byte stream via CONNECT, apply content-length guards. */
+ if (!is_connect) {
+ cl = apr_table_get(req->headers, "Content-Length");
+ if (no_body) {
+ if (!cl && apr_table_get(req->headers, "Content-Type")) {
+ /* If we have a content-type, but already seen eos, no more
+ * data will come. Signal a zero content length explicitly.
+ */
+ apr_table_setn(req->headers, "Content-Length", "0");
+ }
}
- }
#if !AP_HAS_RESPONSE_BUCKETS
- else if (!cl) {
- /* there may be a body and we have internal HTTP/1.1 processing.
- * If the Content-Length is unspecified, we MUST simulate
- * chunked Transfer-Encoding.
- *
- * HTTP/2 does not need a Content-Length for framing. Ideally
- * all clients set the EOS flag on the header frame if they
- * do not intent to send a body. However, forwarding proxies
- * might just no know at the time and send an empty DATA
- * frame with EOS much later.
- */
- apr_table_mergen(r->headers_in, "Transfer-Encoding", "chunked");
- }
+ else if (!cl) {
+ /* there may be a body and we have internal HTTP/1.1 processing.
+ * If the Content-Length is unspecified, we MUST simulate
+ * chunked Transfer-Encoding.
+ *
+ * HTTP/2 does not need a Content-Length for framing. Ideally
+ * all clients set the EOS flag on the header frame if they
+ * do not intent to send a body. However, forwarding proxies
+ * might just no know at the time and send an empty DATA
+ * frame with EOS much later.
+ */
+ apr_table_mergen(r->headers_in, "Transfer-Encoding", "chunked");
+ }
#endif /* else AP_HAS_RESPONSE_BUCKETS */
+ }
}
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
int no_body)
{
int access_status = HTTP_OK;
+ int is_connect = !ap_cstr_casecmp("CONNECT", req->method);
#if AP_MODULE_MAGIC_AT_LEAST(20120211, 106)
request_rec *r = ap_create_request(c);
@@ -361,18 +380,63 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
#endif
#if AP_MODULE_MAGIC_AT_LEAST(20120211, 107)
- assign_headers(r, req, no_body);
+ assign_headers(r, req, no_body, is_connect);
ap_run_pre_read_request(r, c);
/* Time to populate r with the data we have. */
r->request_time = req->request_time;
AP_DEBUG_ASSERT(req->authority);
- if (req->scheme && (ap_cstr_casecmp(req->scheme,
- ap_ssl_conn_is_ssl(c->master? c->master : c)? "https" : "http")
- || !ap_cstr_casecmp("CONNECT", req->method))) {
- /* Client sent a non-matching ':scheme' pseudo header. Forward this
- * via an absolute URI in the request line.
- */
+ if (req->http_status != H2_HTTP_STATUS_UNSET) {
+ access_status = req->http_status;
+ goto die;
+ }
+ else if (is_connect) {
+ /* CONNECT MUST NOT have scheme or path */
+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+ req->method, req->authority);
+ if (req->scheme) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10458)
+ "':scheme: %s' header present in CONNECT request",
+ req->scheme);
+ access_status = HTTP_BAD_REQUEST;
+ goto die;
+ }
+ else if (req->path) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10459)
+ "':path: %s' header present in CONNECT request",
+ req->path);
+ access_status = HTTP_BAD_REQUEST;
+ goto die;
+ }
+ }
+ else if (req->protocol) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10470)
+ "':protocol: %s' header present in %s request",
+ req->protocol, req->method);
+ access_status = HTTP_BAD_REQUEST;
+ goto die;
+ }
+ else if (h2_config_cgeti(c, H2_CONF_PROXY_REQUESTS)) {
+ if (!req->scheme) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10468)
+ "H2ProxyRequests on, but request misses :scheme");
+ access_status = HTTP_BAD_REQUEST;
+ goto die;
+ }
+ if (!req->authority) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10469)
+ "H2ProxyRequests on, but request misses :authority");
+ access_status = HTTP_BAD_REQUEST;
+ goto die;
+ }
+ r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0",
+ req->method, req->scheme, req->authority,
+ req->path ? req->path : "");
+ }
+ else if (req->scheme && ap_cstr_casecmp(req->scheme, "http")
+ && ap_cstr_casecmp(req->scheme, "https")) {
+ /* Client sent a ':scheme' pseudo header for something else
+ * than what we have on this connection. Make an absolute URI. */
r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0",
req->method, req->scheme, req->authority,
req->path ? req->path : "");
@@ -413,7 +477,7 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
{
const char *s;
- assign_headers(r, req, no_body);
+ assign_headers(r, req, no_body, is_connect);
ap_run_pre_read_request(r, c);
/* Time to populate r with the data we have. */
@@ -489,6 +553,16 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
return r;
die:
+ if (!r->method) {
+ /* if we fail early, `r` is not properly initialized for error
+ * processing which accesses fields in message generation.
+ * Make a best effort. */
+ if (!r->the_request) {
+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
+ req->method, req->path);
+ }
+ ap_parse_request_line(r);
+ }
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
"ap_die(%d) for %s", access_status, r->the_request);
ap_die(access_status, r);
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
index 7ba49cf..5724fda 100644
--- a/modules/http2/h2_session.c
+++ b/modules/http2/h2_session.c
@@ -319,9 +319,13 @@ static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
status = h2_stream_add_header(stream, (const char *)name, namelen,
(const char *)value, valuelen);
- if (status != APR_SUCCESS
- && (!stream->rtmp
- || stream->rtmp->http_status == H2_HTTP_STATUS_UNSET)) {
+ if (status != APR_SUCCESS &&
+ (!stream->rtmp ||
+ stream->rtmp->http_status == H2_HTTP_STATUS_UNSET ||
+ /* We accept a certain amount of failures in order to reply
+ * with an informative HTTP error response like 413. But if the
+ * client is too wrong, we fail the request a RESET of the stream */
+ stream->request_headers_failed > 100)) {
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
return 0;
@@ -402,6 +406,10 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
H2_SSSN_STRM_MSG(session, frame->hd.stream_id,
"RST_STREAM by client, error=%d"),
(int)frame->rst_stream.error_code);
+ if (stream) {
+ rv = h2_stream_recv_frame(stream, NGHTTP2_RST_STREAM, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
+ }
if (stream && stream->initiated_on) {
/* A stream reset on a request we sent it. Normal, when the
* client does not want it. */
@@ -410,7 +418,8 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
else {
/* A stream reset on a request it sent us. Could happen in a browser
* when the user navigates away or cancels loading - maybe. */
- h2_mplx_c1_client_rst(session->mplx, frame->hd.stream_id);
+ h2_mplx_c1_client_rst(session->mplx, frame->hd.stream_id,
+ stream);
}
++session->streams_reset;
break;
@@ -621,9 +630,8 @@ static int on_invalid_header_cb(nghttp2_session *ngh2,
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03456)
H2_SSSN_STRM_MSG(session, frame->hd.stream_id,
- "invalid header '%s: %s'"),
- apr_pstrndup(session->pool, (const char *)name, namelen),
- apr_pstrndup(session->pool, (const char *)value, valuelen));
+ "invalid header '%.*s: %.*s'"),
+ (int)namelen, name, (int)valuelen, value);
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
@@ -813,6 +821,17 @@ static apr_status_t session_cleanup(h2_session *session, const char *trigger)
"goodbye, clients will be confused, should not happen"));
}
+ if (!h2_iq_empty(session->ready_to_process)) {
+ int sid;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ H2_SSSN_LOG(APLOGNO(10485), session,
+ "cleanup, resetting %d streams in ready-to-process"),
+ h2_iq_count(session->ready_to_process));
+ while ((sid = h2_iq_shift(session->ready_to_process)) > 0) {
+ h2_mplx_c1_client_rst(session->mplx, sid, get_stream(session, sid));
+ }
+ }
+
transit(session, trigger, H2_SESSION_ST_CLEANUP);
h2_mplx_c1_destroy(session->mplx);
session->mplx = NULL;
@@ -902,7 +921,8 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
+ session->max_data_frame_len = h2_config_sgeti(s, H2_CONF_MAX_DATA_FRAME_LEN);
+
session->out_c1_blocked = h2_iq_create(session->pool, (int)session->max_stream_count);
session->ready_to_process = h2_iq_create(session->pool, (int)session->max_stream_count);
@@ -983,13 +1003,15 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
H2_SSSN_LOG(APLOGNO(03200), session,
"created, max_streams=%d, stream_mem=%d, "
"workers_limit=%d, workers_max=%d, "
- "push_diary(type=%d,N=%d)"),
+ "push_diary(type=%d,N=%d), "
+ "max_data_frame_len=%d"),
(int)session->max_stream_count,
(int)session->max_stream_mem,
session->mplx->processing_limit,
session->mplx->processing_max,
session->push_diary->dtype,
- (int)session->push_diary->N);
+ (int)session->push_diary->N,
+ (int)session->max_data_frame_len);
}
apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
@@ -1000,7 +1022,7 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
static apr_status_t h2_session_start(h2_session *session, int *rv)
{
apr_status_t status = APR_SUCCESS;
- nghttp2_settings_entry settings[3];
+ nghttp2_settings_entry settings[4];
size_t slen;
int win_size;
@@ -1067,7 +1089,14 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
settings[slen].value = win_size;
++slen;
}
-
+#if H2_USE_WEBSOCKETS
+ if (h2_config_sgeti(session->s, H2_CONF_WEBSOCKETS)) {
+ settings[slen].settings_id = NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL;
+ settings[slen].value = 1;
+ ++slen;
+ }
+#endif
+
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
H2_SSSN_LOG(APLOGNO(03201), session,
"start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"),
@@ -1278,8 +1307,11 @@ static apr_status_t h2_session_send(h2_session *session)
goto cleanup;
}
}
- if (h2_c1_io_needs_flush(&session->io)) {
+ if (h2_c1_io_needs_flush(&session->io) ||
+ ngrv == NGHTTP2_ERR_WOULDBLOCK) {
rv = h2_c1_io_assure_flushed(&session->io);
+ if (rv != APR_SUCCESS)
+ goto cleanup;
pending = 0;
}
}
@@ -1636,10 +1668,6 @@ static void on_stream_state_enter(void *ctx, h2_stream *stream)
h2_mplx_c1_stream_cleanup(session->mplx, stream, &session->open_streams);
++session->streams_done;
update_child_status(session, SERVER_BUSY_WRITE, "done", stream);
- if (session->open_streams == 0) {
- h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
- 0, "stream done");
- }
break;
default:
break;
@@ -1915,7 +1943,15 @@ apr_status_t h2_session_process(h2_session *session, int async)
status = h2_mplx_c1_poll(session->mplx, session->s->timeout,
on_stream_input, on_stream_output, session);
if (APR_STATUS_IS_TIMEUP(status)) {
- if (session->open_streams == 0) {
+ /* If we timeout without streams open, no new request from client
+ * arrived.
+ * If we timeout without nghttp2 wanting to write something, but
+ * all open streams have something to send, it means we are
+ * blocked on HTTP/2 flow control and the client did not send
+ * WINDOW_UPDATEs to us. */
+ if (session->open_streams == 0 ||
+ (!h2_session_want_send(session) &&
+ h2_mplx_c1_all_streams_want_send_data(session->mplx))) {
h2_session_dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, status, NULL);
break;
}
@@ -1944,7 +1980,8 @@ leaving:
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
H2_SSSN_MSG(session, "process returns"));
}
-
+ h2_mplx_c1_going_keepalive(session->mplx);
+
if (session->state == H2_SESSION_ST_DONE) {
if (session->local.error) {
char buffer[128];
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
index fbddfdd..3328509 100644
--- a/modules/http2/h2_session.h
+++ b/modules/http2/h2_session.h
@@ -103,7 +103,8 @@ typedef struct h2_session {
apr_size_t max_stream_count; /* max number of open streams */
apr_size_t max_stream_mem; /* max buffer memory for a single stream */
-
+ apr_size_t max_data_frame_len; /* max amount of bytes for a single DATA frame */
+
apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */
apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
index cf6f798..ee87555 100644
--- a/modules/http2/h2_stream.c
+++ b/modules/http2/h2_stream.c
@@ -125,7 +125,7 @@ static int trans_on_event[][H2_SS_MAX] = {
{ S_XXX, S_ERR, S_ERR, S_CL_L, S_CLS, S_XXX, S_XXX, S_XXX, },/* EV_CLOSED_L*/
{ S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/
{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/
-{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/
+{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_NOP, },/* EV_EOS_SENT*/
{ S_NOP, S_XXX, S_CLS, S_XXX, S_XXX, S_CLS, S_XXX, S_XXX, },/* EV_IN_ERROR*/
};
@@ -166,6 +166,7 @@ static int on_frame_recv(h2_stream_state_t state, int frame_type)
static int on_event(h2_stream* stream, h2_stream_event_t ev)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (stream->monitor && stream->monitor->on_event) {
stream->monitor->on_event(stream->monitor->ctx, stream, ev);
}
@@ -392,6 +393,7 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
{
int new_state;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "dispatch event %d"), ev);
new_state = on_event(stream, ev);
@@ -425,6 +427,7 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
apr_status_t status = APR_SUCCESS;
int new_state, eos = 0;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
new_state = on_frame_send(stream->state, ftype);
if (new_state < 0) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
@@ -435,6 +438,12 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
++stream->out_frames;
stream->out_frame_octets += frame_len;
+ if(stream->c2) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+ if(conn_ctx)
+ conn_ctx->bytes_sent = stream->out_frame_octets;
+ }
+
switch (ftype) {
case NGHTTP2_DATA:
eos = (flags & NGHTTP2_FLAG_END_STREAM);
@@ -468,6 +477,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
apr_status_t status = APR_SUCCESS;
int new_state, eos = 0;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
new_state = on_frame_recv(stream->state, ftype);
if (new_state < 0) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
@@ -522,6 +532,7 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
h2_session *session = stream->session;
apr_status_t status = APR_SUCCESS;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
stream->in_data_frames++;
if (len > 0) {
if (APLOGctrace3(session->c1)) {
@@ -542,11 +553,38 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
return status;
}
+#ifdef AP_DEBUG
+static apr_status_t stream_pool_destroy(void *data)
+{
+ h2_stream *stream = data;
+ switch (stream->magic) {
+ case H2_STRM_MAGIC_OK:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "was not destroyed explicitly"));
+ AP_DEBUG_ASSERT(0);
+ break;
+ case H2_STRM_MAGIC_SDEL:
+ /* stream has been explicitly destroyed, as it should */
+ H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_PDEL);
+ break;
+ case H2_STRM_MAGIC_PDEL:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "already pool destroyed"));
+ AP_DEBUG_ASSERT(0);
+ break;
+ default:
+ AP_DEBUG_ASSERT(0);
+ }
+ return APR_SUCCESS;
+}
+#endif
+
h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
h2_stream_monitor *monitor, int initiated_on)
{
h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
-
+
+ H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_OK);
stream->id = id;
stream->initiated_on = initiated_on;
stream->created = apr_time_now();
@@ -554,6 +592,12 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
stream->pool = pool;
stream->session = session;
stream->monitor = monitor;
+#ifdef AP_DEBUG
+ if (id) { /* stream 0 has special lifetime */
+ apr_pool_cleanup_register(pool, stream, stream_pool_destroy,
+ apr_pool_cleanup_null);
+ }
+#endif
#ifdef H2_NG2_LOCAL_WIN_SIZE
if (id) {
@@ -575,6 +619,7 @@ void h2_stream_cleanup(h2_stream *stream)
* end of the in/out notifications get closed.
*/
ap_assert(stream);
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (stream->out_buffer) {
apr_brigade_cleanup(stream->out_buffer);
}
@@ -583,13 +628,16 @@ void h2_stream_cleanup(h2_stream *stream)
void h2_stream_destroy(h2_stream *stream)
{
ap_assert(stream);
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c1,
H2_STRM_MSG(stream, "destroy"));
+ H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_SDEL);
apr_pool_destroy(stream->pool);
}
void h2_stream_rst(h2_stream *stream, int error_code)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
stream->rst_error = error_code;
if (stream->c2) {
h2_c2_abort(stream->c2, stream->session->c1);
@@ -605,6 +653,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
h2_request *req;
apr_status_t status;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
ap_assert(stream->request == NULL);
ap_assert(stream->rtmp == NULL);
if (stream->rst_error) {
@@ -626,6 +675,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
void h2_stream_set_request(h2_stream *stream, const h2_request *r)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
ap_assert(stream->request == NULL);
ap_assert(stream->rtmp == NULL);
stream->rtmp = h2_request_clone(stream->pool, r);
@@ -685,6 +735,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
int error = 0, was_added = 0;
apr_status_t status = APR_SUCCESS;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (stream->response) {
return APR_EINVAL;
}
@@ -716,6 +767,9 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
status = h2_request_add_header(stream->rtmp, stream->pool,
name, nlen, value, vlen,
session->s->limit_req_fieldsize, &was_added);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c1,
+ H2_STRM_MSG(stream, "add_header: '%.*s: %.*s"),
+ (int)nlen, name, (int)vlen, value);
if (was_added) ++stream->request_headers_added;
}
else if (H2_SS_OPEN == stream->state) {
@@ -759,6 +813,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
cleanup:
if (error) {
+ ++stream->request_headers_failed;
set_error_response(stream, error);
return APR_EINVAL;
}
@@ -791,6 +846,7 @@ apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
int is_http_or_https;
h2_request *req = stream->rtmp;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
status = h2_request_end_headers(req, stream->pool, raw_bytes);
if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) {
goto cleanup;
@@ -845,7 +901,26 @@ apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
* of CONNECT requests (see [RFC7230], Section 5.3)).
*/
if (!ap_cstr_casecmp(req->method, "CONNECT")) {
- if (req->scheme || req->path) {
+ if (req->protocol) {
+ if (!strcmp("websocket", req->protocol)) {
+ if (!req->scheme || !req->path) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
+ H2_STRM_LOG(APLOGNO(10457), stream, "Request to websocket CONNECT "
+ "without :scheme or :path, sending 400 answer"));
+ set_error_response(stream, HTTP_BAD_REQUEST);
+ goto cleanup;
+ }
+ }
+ else {
+ /* do not know that protocol */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c1, APLOGNO(10460)
+ "':protocol: %s' header present in %s request",
+ req->protocol, req->method);
+ set_error_response(stream, HTTP_NOT_IMPLEMENTED);
+ goto cleanup;
+ }
+ }
+ else if (req->scheme || req->path) {
ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
H2_STRM_LOG(APLOGNO(10384), stream, "Request to CONNECT "
"with :scheme or :path specified, sending 400 answer"));
@@ -1039,6 +1114,7 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
{
apr_status_t rv = APR_SUCCESS;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (stream->rst_error) {
return APR_ECONNRESET;
}
@@ -1133,6 +1209,7 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
apr_array_header_t *pushes;
int i;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
pushes = h2_push_collect_update(stream, stream->request, response);
if (pushes && !apr_is_empty_array(pushes)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
@@ -1152,6 +1229,7 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
apr_table_t *h2_stream_get_trailers(h2_stream *stream)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
return NULL;
}
@@ -1163,6 +1241,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
h2_headers *response)
#endif
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (response && stream->initiated_on) {
const char *ctype = apr_table_get(response->headers, "content-type");
if (ctype) {
@@ -1176,6 +1255,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
int h2_stream_is_ready(h2_stream *stream)
{
/* Have we sent a response or do we have the response in our buffer? */
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (stream->response) {
return 1;
}
@@ -1185,13 +1265,23 @@ int h2_stream_is_ready(h2_stream *stream)
return 0;
}
+int h2_stream_wants_send_data(h2_stream *stream)
+{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
+ return h2_stream_is_ready(stream) &&
+ ((stream->out_buffer && !APR_BRIGADE_EMPTY(stream->out_buffer)) ||
+ (stream->output && !h2_beam_empty(stream->output)));
+}
+
int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
return stream->state == state;
}
int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
switch (state) {
case H2_SS_IDLE:
return 1; /* by definition */
@@ -1214,6 +1304,7 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
{
h2_session *session = stream->session;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (amount > 0) {
apr_off_t consumed = amount;
@@ -1339,6 +1430,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found"));
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (!stream->output || !stream->response || !stream->out_buffer) {
return NGHTTP2_ERR_DEFERRED;
}
@@ -1346,10 +1438,17 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
return NGHTTP2_ERR_DEFERRED;
}
if (h2_c1_io_needs_flush(&session->io)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
- H2_SSSN_STRM_MSG(session, stream_id, "suspending on c1 out needs flush"));
- h2_stream_dispatch(stream, H2_SEV_OUT_C1_BLOCK);
- return NGHTTP2_ERR_DEFERRED;
+ rv = h2_c1_io_pass(&session->io);
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_SSSN_STRM_MSG(session, stream_id, "suspending on c1 out needs flush"));
+ h2_stream_dispatch(stream, H2_SEV_OUT_C1_BLOCK);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ else if (rv) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, rv, NULL);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
}
/* determine how much we'd like to send. We cannot send more than
@@ -1361,6 +1460,11 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
length = chunk_len;
}
}
+ /* We allow configurable max DATA frame length. */
+ if (stream->session->max_data_frame_len > 0
+ && length > stream->session->max_data_frame_len) {
+ length = stream->session->max_data_frame_len;
+ }
/* How much data do we have in our buffers that we can write?
* if not enough, receive more. */
@@ -1393,8 +1497,8 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
* it is all fine. */
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
H2_SSSN_STRM_MSG(session, stream_id, "rst stream"));
- h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ h2_stream_rst(stream, H2_ERR_STREAM_CLOSED);
+ return NGHTTP2_ERR_DEFERRED;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
H2_SSSN_STRM_MSG(session, stream_id,
@@ -1403,10 +1507,17 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
eos = 1;
rv = APR_SUCCESS;
}
+ else if (APR_ECONNRESET == rv || APR_ECONNABORTED == rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10471), stream, "data_cb, reading data"));
+ h2_stream_rst(stream, H2_ERR_STREAM_CLOSED);
+ return NGHTTP2_ERR_DEFERRED;
+ }
else {
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data"));
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ return NGHTTP2_ERR_DEFERRED;
}
}
@@ -1465,6 +1576,7 @@ static apr_status_t stream_do_response(h2_stream *stream)
#endif
nghttp2_data_provider provider, *pprovider = NULL;
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
ap_assert(!stream->response);
ap_assert(stream->out_buffer);
@@ -1562,6 +1674,8 @@ static apr_status_t stream_do_response(h2_stream *stream)
* denies it, submit resources to push */
const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
if (!s || strcmp(s, "0")) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_STRM_MSG(stream, "submit pushes, note=%s"), s);
h2_stream_submit_pushes(stream, resp);
}
}
@@ -1653,6 +1767,7 @@ void h2_stream_on_output_change(h2_stream *stream)
/* stream->pout_recv_write signalled a change. Check what has happend, read
* from it and act on seeing a response/data. */
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
if (!stream->output) {
/* c2 has not assigned the output beam to the stream (yet). */
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
@@ -1701,6 +1816,7 @@ void h2_stream_on_output_change(h2_stream *stream)
void h2_stream_on_input_change(h2_stream *stream)
{
+ H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK);
ap_assert(stream->input);
h2_beam_report_consumption(stream->input);
if (h2_stream_is_at(stream, H2_SS_CLOSED_L)
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
index 695d56a..405978a 100644
--- a/modules/http2/h2_stream.h
+++ b/modules/http2/h2_stream.h
@@ -63,7 +63,22 @@ typedef struct h2_stream_monitor {
trigger a state change */
} h2_stream_monitor;
+#ifdef AP_DEBUG
+#define H2_STRM_MAGIC_OK 0x5354524d
+#define H2_STRM_MAGIC_SDEL 0x5344454c
+#define H2_STRM_MAGIC_PDEL 0x5044454c
+
+#define H2_STRM_ASSIGN_MAGIC(s,m) ((s)->magic = m)
+#define H2_STRM_ASSERT_MAGIC(s,m) ap_assert((s)->magic == m)
+#else
+#define H2_STRM_ASSIGN_MAGIC(s,m) ((void)0)
+#define H2_STRM_ASSERT_MAGIC(s,m) ((void)0)
+#endif
+
struct h2_stream {
+#ifdef AP_DEBUG
+ uint32_t magic;
+#endif
int id; /* http2 stream identifier */
int initiated_on; /* initiating stream id (PUSH) or 0 */
apr_pool_t *pool; /* the memory pool for this stream */
@@ -76,6 +91,7 @@ struct h2_stream {
struct h2_request *rtmp; /* request being assembled */
apr_table_t *trailers_in; /* optional, incoming trailers */
int request_headers_added; /* number of request headers added */
+ int request_headers_failed; /* number of request headers failed to add */
#if AP_HAS_RESPONSE_BUCKETS
ap_bucket_response *response; /* the final, non-interim response or NULL */
@@ -317,6 +333,8 @@ const char *h2_stream_state_str(const h2_stream *stream);
*/
int h2_stream_is_ready(h2_stream *stream);
+int h2_stream_wants_send_data(h2_stream *stream);
+
#define H2_STRM_MSG(s, msg) \
"h2_stream(%d-%lu-%d,%s): "msg, s->session->child_num, \
(unsigned long)s->session->id, s->id, h2_stream_state_str(s)
diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
index a30f27c..3799701 100644
--- a/modules/http2/h2_switch.c
+++ b/modules/http2/h2_switch.c
@@ -104,9 +104,10 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
/* We also allow switching only for requests that have no body.
*/
p = apr_table_get(r->headers_in, "Content-Length");
- if (p && strcmp(p, "0")) {
+ if ((p && strcmp(p, "0"))
+ || (!p && apr_table_get(r->headers_in, "Transfer-Encoding"))) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087)
- "upgrade with content-length: %s, declined", p);
+ "upgrade with body declined");
return DECLINED;
}
}
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
index 728cee9..8e53ceb 100644
--- a/modules/http2/h2_util.c
+++ b/modules/http2/h2_util.c
@@ -1281,8 +1281,8 @@ apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
else if (bmax > off) {
off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]",
b->type->name,
- (long)(b->length == ((apr_size_t)-1)?
- -1 : b->length));
+ (b->length == ((apr_size_t)-1)?
+ -1 : (long)b->length));
}
return off;
}
@@ -1650,7 +1650,7 @@ static int contains_name(const literal *lits, size_t llen, nghttp2_nv *nv)
for (i = 0; i < llen; ++i) {
lit = &lits[i];
if (lit->len == nv->namelen
- && !apr_strnatcasecmp(lit->name, (const char *)nv->name)) {
+ && !ap_cstr_casecmp(lit->name, (const char *)nv->name)) {
return 1;
}
}
@@ -1706,7 +1706,7 @@ static apr_status_t req_add_header(apr_table_t *headers, apr_pool_t *pool,
return APR_SUCCESS;
}
else if (nv->namelen == sizeof("cookie")-1
- && !apr_strnatcasecmp("cookie", (const char *)nv->name)) {
+ && !ap_cstr_casecmp("cookie", (const char *)nv->name)) {
existing = apr_table_get(headers, "cookie");
if (existing) {
/* Cookie header come separately in HTTP/2, but need
@@ -1725,7 +1725,7 @@ static apr_status_t req_add_header(apr_table_t *headers, apr_pool_t *pool,
}
}
else if (nv->namelen == sizeof("host")-1
- && !apr_strnatcasecmp("host", (const char *)nv->name)) {
+ && !ap_cstr_casecmp("host", (const char *)nv->name)) {
if (apr_table_get(headers, "Host")) {
return APR_SUCCESS; /* ignore duplicate */
}
@@ -1883,6 +1883,13 @@ void h2_util_drain_pipe(apr_file_t *pipe)
{
char rb[512];
apr_size_t nr = sizeof(rb);
+ apr_interval_time_t timeout;
+ apr_status_t trv;
+
+ /* Make the pipe non-blocking if we can */
+ trv = apr_file_pipe_timeout_get(pipe, &timeout);
+ if (trv == APR_SUCCESS)
+ apr_file_pipe_timeout_set(pipe, 0);
while (apr_file_read(pipe, rb, &nr) == APR_SUCCESS) {
/* Although we write just one byte to the other end of the pipe
@@ -1893,6 +1900,8 @@ void h2_util_drain_pipe(apr_file_t *pipe)
if (nr != sizeof(rb))
break;
}
+ if (trv == APR_SUCCESS)
+ apr_file_pipe_timeout_set(pipe, timeout);
}
apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe)
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
index c961089..7e7da21 100644
--- a/modules/http2/h2_version.h
+++ b/modules/http2/h2_version.h
@@ -27,7 +27,7 @@
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "2.0.11"
+#define MOD_HTTP2_VERSION "2.0.22"
/**
* @macro
@@ -35,7 +35,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x02000b
+#define MOD_HTTP2_VERSION_NUM 0x020016
#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_ws.c b/modules/http2/h2_ws.c
new file mode 100644
index 0000000..396e6e1
--- /dev/null
+++ b/modules/http2/h2_ws.c
@@ -0,0 +1,362 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_sha1.h"
+#include "apr_strmatch.h"
+
+#include <ap_mmn.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_ssl.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mpm.h>
+
+#include "h2_private.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_headers.h"
+#include "h2_request.h"
+#include "h2_ws.h"
+
+#if H2_USE_WEBSOCKETS
+
+#include "apr_encode.h" /* H2_USE_WEBSOCKETS is conditional on APR 1.6+ */
+
+static ap_filter_rec_t *c2_ws_out_filter_handle;
+
+struct ws_filter_ctx {
+ const char *ws_accept_base64;
+ int has_final_response;
+ int override_body;
+};
+
+/**
+ * Generate the "Sec-WebSocket-Accept" header field for the given key
+ * (base64 encoded) as defined in RFC 6455 ch. 4.2.2 step 5.3
+ */
+static const char *gen_ws_accept(conn_rec *c, const char *key_base64)
+{
+ apr_byte_t dgst[APR_SHA1_DIGESTSIZE];
+ const char ws_guid[] = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
+ apr_sha1_ctx_t sha1_ctx;
+
+ apr_sha1_init(&sha1_ctx);
+ apr_sha1_update(&sha1_ctx, key_base64, (unsigned int)strlen(key_base64));
+ apr_sha1_update(&sha1_ctx, ws_guid, (unsigned int)strlen(ws_guid));
+ apr_sha1_final(dgst, &sha1_ctx);
+
+ return apr_pencode_base64_binary(c->pool, dgst, sizeof(dgst),
+ APR_ENCODE_NONE, NULL);
+}
+
+const h2_request *h2_ws_rewrite_request(const h2_request *req,
+ conn_rec *c2, int no_body)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+ h2_request *wsreq;
+ unsigned char key_raw[16];
+ const char *key_base64, *accept_base64;
+ struct ws_filter_ctx *ws_ctx;
+ apr_status_t rv;
+
+ if (!conn_ctx || !req->protocol || strcmp("websocket", req->protocol))
+ return req;
+
+ if (ap_cstr_casecmp("CONNECT", req->method)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): websocket request with method %s",
+ conn_ctx->id, conn_ctx->stream_id, req->method);
+ return req;
+ }
+ if (!req->scheme) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): websocket CONNECT without :scheme",
+ conn_ctx->id, conn_ctx->stream_id);
+ return req;
+ }
+ if (!req->path) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): websocket CONNECT without :path",
+ conn_ctx->id, conn_ctx->stream_id);
+ return req;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): websocket CONNECT for %s",
+ conn_ctx->id, conn_ctx->stream_id, req->path);
+ /* Transform the HTTP/2 extended CONNECT to an internal GET using
+ * the HTTP/1.1 version of websocket connection setup. */
+ wsreq = h2_request_clone(c2->pool, req);
+ wsreq->method = "GET";
+ wsreq->protocol = NULL;
+ apr_table_set(wsreq->headers, "Upgrade", "websocket");
+ apr_table_add(wsreq->headers, "Connection", "Upgrade");
+ /* add Sec-WebSocket-Key header */
+ rv = apr_generate_random_bytes(key_raw, sizeof(key_raw));
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(10461)
+ "error generating secret");
+ return NULL;
+ }
+ key_base64 = apr_pencode_base64_binary(c2->pool, key_raw, sizeof(key_raw),
+ APR_ENCODE_NONE, NULL);
+ apr_table_set(wsreq->headers, "Sec-WebSocket-Key", key_base64);
+ /* This is now the request to process internally */
+
+ /* When this request gets processed and delivers a 101 response,
+ * we expect it to carry a "Sec-WebSocket-Accept" header with
+ * exactly the following value, as per RFC 6455. */
+ accept_base64 = gen_ws_accept(c2, key_base64);
+ /* Add an output filter that intercepts generated responses:
+ * - if a valid WebSocket negotiation happens, transform the
+ * 101 response to a 200
+ * - if a 2xx response happens, that does not pass the Accept test,
+ * return a 502 indicating that the URI seems not support the websocket
+ * protocol (RFC 8441 does not define this, but it seems the best
+ * choice)
+ * - if a 3xx, 4xx or 5xx response happens, forward this unchanged.
+ */
+ ws_ctx = apr_pcalloc(c2->pool, sizeof(*ws_ctx));
+ ws_ctx->ws_accept_base64 = accept_base64;
+ /* insert our filter just before the C2 core filter */
+ ap_remove_output_filter_byhandle(c2->output_filters, "H2_C2_NET_OUT");
+ ap_add_output_filter("H2_C2_WS_OUT", ws_ctx, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2);
+ /* Mark the connection as being an Upgrade, with some special handling
+ * since the request needs an EOS, without the stream being closed */
+ conn_ctx->is_upgrade = 1;
+
+ return wsreq;
+}
+
+static apr_bucket *make_valid_resp(conn_rec *c2, int status,
+ apr_table_t *headers, apr_table_t *notes)
+{
+ apr_table_t *nheaders, *nnotes;
+
+ ap_assert(headers);
+ nheaders = apr_table_clone(c2->pool, headers);
+ apr_table_unset(nheaders, "Connection");
+ apr_table_unset(nheaders, "Upgrade");
+ apr_table_unset(nheaders, "Sec-WebSocket-Accept");
+ nnotes = notes? apr_table_clone(c2->pool, notes) :
+ apr_table_make(c2->pool, 10);
+#if AP_HAS_RESPONSE_BUCKETS
+ return ap_bucket_response_create(status, NULL, nheaders, nnotes,
+ c2->pool, c2->bucket_alloc);
+#else
+ return h2_bucket_headers_create(c2->bucket_alloc,
+ h2_headers_create(status, nheaders,
+ nnotes, 0, c2->pool));
+#endif
+}
+
+static apr_bucket *make_invalid_resp(conn_rec *c2, int status,
+ apr_table_t *notes)
+{
+ apr_table_t *nheaders, *nnotes;
+
+ nheaders = apr_table_make(c2->pool, 10);
+ apr_table_setn(nheaders, "Content-Length", "0");
+ nnotes = notes? apr_table_clone(c2->pool, notes) :
+ apr_table_make(c2->pool, 10);
+#if AP_HAS_RESPONSE_BUCKETS
+ return ap_bucket_response_create(status, NULL, nheaders, nnotes,
+ c2->pool, c2->bucket_alloc);
+#else
+ return h2_bucket_headers_create(c2->bucket_alloc,
+ h2_headers_create(status, nheaders,
+ nnotes, 0, c2->pool));
+#endif
+}
+
+static void ws_handle_resp(conn_rec *c2, h2_conn_ctx_t *conn_ctx,
+ struct ws_filter_ctx *ws_ctx, apr_bucket *b)
+{
+#if AP_HAS_RESPONSE_BUCKETS
+ ap_bucket_response *resp = b->data;
+#else /* AP_HAS_RESPONSE_BUCKETS */
+ h2_headers *resp = h2_bucket_headers_get(b);
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
+ apr_bucket *b_override = NULL;
+ int is_final = 0;
+ int override_body = 0;
+
+ if (ws_ctx->has_final_response) {
+ /* already did, nop */
+ return;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_c2(%s-%d): H2_C2_WS_OUT inspecting response %d",
+ conn_ctx->id, conn_ctx->stream_id, resp->status);
+ if (resp->status == HTTP_SWITCHING_PROTOCOLS) {
+ /* The resource agreed to switch protocol. But this is only valid
+ * if it send back the correct Sec-WebSocket-Accept header value */
+ const char *hd = apr_table_get(resp->headers, "Sec-WebSocket-Accept");
+ if (hd && !strcmp(ws_ctx->ws_accept_base64, hd)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): websocket CONNECT, valid 101 Upgrade"
+ ", converting to 200 response",
+ conn_ctx->id, conn_ctx->stream_id);
+ b_override = make_valid_resp(c2, HTTP_OK, resp->headers, resp->notes);
+ is_final = 1;
+ }
+ else {
+ if (!hd) {
+ /* This points to someone being confused */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2, APLOGNO(10462)
+ "h2_c2(%s-%d): websocket CONNECT, got 101 response "
+ "without Sec-WebSocket-Accept header",
+ conn_ctx->id, conn_ctx->stream_id);
+ }
+ else {
+ /* This points to a bug, either in our WebSockets negotiation
+ * or in the request processings implementation of WebSockets */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c2, APLOGNO(10463)
+ "h2_c2(%s-%d): websocket CONNECT, 101 response "
+ "with 'Sec-WebSocket-Accept: %s' but expected %s",
+ conn_ctx->id, conn_ctx->stream_id, hd,
+ ws_ctx->ws_accept_base64);
+ }
+ b_override = make_invalid_resp(c2, HTTP_BAD_GATEWAY, resp->notes);
+ override_body = is_final = 1;
+ }
+ }
+ else if (resp->status < 200) {
+ /* other intermediate response, pass through */
+ }
+ else if (resp->status < 300) {
+ /* Failure, we might be talking to a plain http resource */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): websocket CONNECT, invalid response %d",
+ conn_ctx->id, conn_ctx->stream_id, resp->status);
+ b_override = make_invalid_resp(c2, HTTP_BAD_GATEWAY, resp->notes);
+ override_body = is_final = 1;
+ }
+ else {
+ /* error response, pass through. */
+ ws_ctx->has_final_response = 1;
+ }
+
+ if (b_override) {
+ APR_BUCKET_INSERT_BEFORE(b, b_override);
+ apr_bucket_delete(b);
+ b = b_override;
+ }
+ if (override_body) {
+ APR_BUCKET_INSERT_AFTER(b, apr_bucket_eos_create(c2->bucket_alloc));
+ ws_ctx->override_body = 1;
+ }
+ if (is_final) {
+ ws_ctx->has_final_response = 1;
+ conn_ctx->has_final_response = 1;
+ }
+}
+
+static apr_status_t h2_c2_ws_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ struct ws_filter_ctx *ws_ctx = f->ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ apr_bucket *b, *bnext;
+
+ ap_assert(conn_ctx);
+ if (ws_ctx->override_body) {
+ /* We have overridden the original response and also its body.
+ * If this filter is called again, we signal a hard abort to
+ * allow processing to terminate at the earliest. */
+ f->c->aborted = 1;
+ return APR_ECONNABORTED;
+ }
+
+ /* Inspect the brigade, looking for RESPONSE/HEADER buckets.
+ * Remember, this filter is only active for client websocket CONNECT
+ * requests that we translated to an internal GET with websocket
+ * headers.
+ * We inspect the repsone to see if the internal resource actually
+ * agrees to talk websocket or is "just" a normal HTTP resource that
+ * ignored the websocket request headers. */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = bnext)
+ {
+ bnext = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+#else
+ if (H2_BUCKET_IS_HEADERS(b)) {
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
+ ws_handle_resp(f->c, conn_ctx, ws_ctx, b);
+ continue;
+ }
+ }
+ else if (ws_ctx->override_body) {
+ apr_bucket_delete(b);
+ }
+ }
+ return ap_pass_brigade(f->next, bb);
+}
+
+static int ws_post_read(request_rec *r)
+{
+
+ if (r->connection->master) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection);
+ if (conn_ctx && conn_ctx->is_upgrade &&
+ !h2_config_sgeti(r->server, H2_CONF_WEBSOCKETS)) {
+ return HTTP_NOT_IMPLEMENTED;
+ }
+ }
+ return DECLINED;
+}
+
+void h2_ws_register_hooks(void)
+{
+ ap_hook_post_read_request(ws_post_read, NULL, NULL, APR_HOOK_MIDDLE);
+ c2_ws_out_filter_handle =
+ ap_register_output_filter("H2_C2_WS_OUT", h2_c2_ws_filter_out,
+ NULL, AP_FTYPE_NETWORK);
+}
+
+#else /* H2_USE_WEBSOCKETS */
+
+const h2_request *h2_ws_rewrite_request(const h2_request *req,
+ conn_rec *c2, int no_body)
+{
+ (void)c2;
+ (void)no_body;
+ /* no rewriting */
+ return req;
+}
+
+void h2_ws_register_hooks(void)
+{
+ /* NOP */
+}
+
+#endif /* H2_USE_WEBSOCKETS (else part) */
diff --git a/modules/http2/h2_ws.h b/modules/http2/h2_ws.h
new file mode 100644
index 0000000..a94d300
--- /dev/null
+++ b/modules/http2/h2_ws.h
@@ -0,0 +1,35 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_ws__
+#define __mod_h2__h2_ws__
+
+#include "h2.h"
+
+/**
+ * Rewrite a websocket request.
+ *
+ * @param req the h2 request to rewrite
+ * @param c2 the connection to process the request on
+ * @param no_body != 0 iff the request is known to have no body
+ * @return the websocket request for internal submit
+ */
+const h2_request *h2_ws_rewrite_request(const h2_request *req,
+ conn_rec *c2, int no_body);
+
+void h2_ws_register_hooks(void);
+
+#endif /* defined(__mod_h2__h2_ws__) */
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
index 8a1ee3f..1bd34b2 100644
--- a/modules/http2/mod_http2.c
+++ b/modules/http2/mod_http2.c
@@ -42,6 +42,7 @@
#include "h2_switch.h"
#include "h2_version.h"
#include "h2_bucket_beam.h"
+#include "h2_ws.h"
static void h2_hooks(apr_pool_t *pool);
@@ -199,6 +200,7 @@ static void h2_hooks(apr_pool_t *pool)
h2_c1_register_hooks();
h2_switch_register_hooks();
h2_c2_register_hooks();
+ h2_ws_register_hooks();
/* Setup subprocess env for certain variables
*/
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
index d9ff222..9775534 100644
--- a/modules/http2/mod_http2.dsp
+++ b/modules/http2/mod_http2.dsp
@@ -173,6 +173,10 @@ SOURCE=./h2_workers.c
# End Source File
# Begin Source File
+SOURCE=./h2_ws.c
+# End Source File
+# Begin Source File
+
SOURCE=./mod_http2.c
# End Source File
# Begin Source File
diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h
index f68edcd..9cb04a6 100644
--- a/modules/http2/mod_http2.h
+++ b/modules/http2/mod_http2.h
@@ -32,6 +32,29 @@ APR_DECLARE_OPTIONAL_FN(void,
http2_get_num_workers, (server_rec *s,
int *minw, int *max));
+#define AP_HTTP2_HAS_GET_POLLFD
+
+/**
+ * Get a apr_pollfd_t populated for a h2 connection where
+ * (c->master != NULL) is true and pipes are supported.
+ * To be used in Apache modules implementing WebSockets in Apache httpd
+ * versions that do not support the corresponding `ap_get_pollfd_from_conn()`
+ * function.
+ * When available, use `ap_get_pollfd_from_conn()` instead of this function.
+ *
+ * How it works: pass in a `apr_pollfd_t` which gets populated for
+ * monitoring the input of connection `c`. If `c` is not a HTTP/2
+ * stream connection, the function will return `APR_ENOTIMPL`.
+ * `ptimeout` is optional and, if passed, will get the timeout in effect
+ *
+ * On platforms without support for pipes (e.g. Windows), this function
+ * will return `APR_ENOTIMPL`.
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_get_pollfd_from_conn,
+ (conn_rec *c, struct apr_pollfd_t *pfd,
+ apr_interval_time_t *ptimeout));
+
/*******************************************************************************
* START HTTP/2 request engines (DEPRECATED)
******************************************************************************/
diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
index 5abccab..ebf8f61 100644
--- a/modules/http2/mod_proxy_http2.c
+++ b/modules/http2/mod_proxy_http2.c
@@ -50,8 +50,7 @@ static int (*is_h2)(conn_rec *c);
typedef struct h2_proxy_ctx {
const char *id;
- conn_rec *master;
- conn_rec *owner;
+ conn_rec *cfront;
apr_pool_t *pool;
server_rec *server;
const char *proxy_func;
@@ -66,10 +65,10 @@ typedef struct h2_proxy_ctx {
unsigned is_ssl : 1;
request_rec *r; /* the request processed in this ctx */
- apr_status_t r_status; /* status of request work */
+ int r_status; /* status of request work */
int r_done; /* request was processed, not necessarily successfully */
int r_may_retry; /* request may be retried */
- h2_proxy_session *session; /* current http2 session against backend */
+ int has_reusable_session; /* http2 session is live and clean */
} h2_proxy_ctx;
static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
@@ -159,11 +158,16 @@ static int proxy_http2_canon(request_rec *r, char *url)
search = r->args;
}
else {
+#ifdef PROXY_CANONENC_NOENCODEDSLASHENCODING
core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
path = ap_proxy_canonenc_ex(r->pool, url, (int)strlen(url),
enc_path, flags, r->proxyreq);
+#else
+ path = ap_proxy_canonenc(r->pool, url, (int)strlen(url),
+ enc_path, 0, r->proxyreq);
+#endif
if (!path) {
return HTTP_BAD_REQUEST;
}
@@ -227,79 +231,81 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
}
static void request_done(h2_proxy_ctx *ctx, request_rec *r,
- apr_status_t status, int touched)
+ apr_status_t status, int touched, int error_code)
{
if (r == ctx->r) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
- "h2_proxy_session(%s): request done, touched=%d",
- ctx->id, touched);
+ "h2_proxy_session(%s): request done, touched=%d, error=%d",
+ ctx->id, touched, error_code);
ctx->r_done = 1;
if (touched) ctx->r_may_retry = 0;
- ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
- : HTTP_SERVICE_UNAVAILABLE);
+ ctx->r_status = error_code? HTTP_BAD_GATEWAY :
+ ((status == APR_SUCCESS)? OK :
+ ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE));
}
}
static void session_req_done(h2_proxy_session *session, request_rec *r,
- apr_status_t status, int touched)
+ apr_status_t status, int touched, int error_code)
{
- request_done(session->user_data, r, status, touched);
+ request_done(session->user_data, r, status, touched, error_code);
}
static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
apr_status_t status = OK;
+ h2_proxy_session *session;
int h2_front;
/* Step Four: Send the Request in a new HTTP/2 stream and
* loop until we got the response or encounter errors.
*/
- h2_front = is_h2? is_h2(ctx->owner) : 0;
- ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
- h2_front, 30,
- h2_proxy_log2((int)ctx->req_buffer_size),
- session_req_done);
- if (!ctx->session) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner,
+ ctx->has_reusable_session = 0; /* don't know yet */
+ h2_front = is_h2? is_h2(ctx->cfront) : 0;
+ session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
+ h2_front, 30,
+ h2_proxy_log2((int)ctx->req_buffer_size),
+ session_req_done);
+ if (!session) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront,
APLOGNO(03372) "session unavailable");
return HTTP_SERVICE_UNAVAILABLE;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
- "eng(%s): run session %s", ctx->id, ctx->session->id);
- ctx->session->user_data = ctx;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03373)
+ "eng(%s): run session %s", ctx->id, session->id);
+ session->user_data = ctx;
ctx->r_done = 0;
- add_request(ctx->session, ctx->r);
+ add_request(session, ctx->r);
- while (!ctx->owner->aborted && !ctx->r_done) {
+ while (!ctx->cfront->aborted && !ctx->r_done) {
- status = h2_proxy_session_process(ctx->session);
+ status = h2_proxy_session_process(session);
if (status != APR_SUCCESS) {
/* Encountered an error during session processing */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront,
APLOGNO(03375) "eng(%s): end of session %s",
- ctx->id, ctx->session->id);
+ ctx->id, session->id);
/* Any open stream of that session needs to
* a) be reopened on the new session iff safe to do so
* b) reported as done (failed) otherwise
*/
- h2_proxy_session_cleanup(ctx->session, session_req_done);
+ h2_proxy_session_cleanup(session, session_req_done);
goto out;
}
}
out:
- if (ctx->owner->aborted) {
+ if (ctx->cfront->aborted) {
/* master connection gone */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront,
APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
/* cancel all ongoing requests */
- h2_proxy_session_cancel_all(ctx->session);
- h2_proxy_session_process(ctx->session);
+ h2_proxy_session_cancel_all(session);
+ h2_proxy_session_process(session);
}
-
- ctx->session->user_data = NULL;
- ctx->session = NULL;
+ ctx->has_reusable_session = h2_proxy_session_is_reusable(session);
+ session->user_data = NULL;
return status;
}
@@ -344,9 +350,8 @@ static int proxy_http2_handler(request_rec *r,
}
ctx = apr_pcalloc(r->pool, sizeof(*ctx));
- ctx->master = r->connection->master? r->connection->master : r->connection;
- ctx->id = apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
- ctx->owner = r->connection;
+ ctx->id = apr_psprintf(r->pool, "%ld", (long)r->connection->id);
+ ctx->cfront = r->connection;
ctx->pool = r->pool;
ctx->server = r->server;
ctx->proxy_func = proxy_func;
@@ -359,7 +364,7 @@ static int proxy_http2_handler(request_rec *r,
ctx->r_done = 0;
ctx->r_may_retry = 1;
- ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+ ap_set_module_config(ctx->cfront->conn_config, &proxy_http2_module, ctx);
/* scheme says, this is for us. */
apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url);
@@ -367,7 +372,7 @@ static int proxy_http2_handler(request_rec *r,
"H2: serving URL %s", url);
run_connect:
- if (ctx->owner->aborted) goto cleanup;
+ if (ctx->cfront->aborted) goto cleanup;
/* Get a proxy_conn_rec from the worker, might be a new one, might
* be one still open from another request, or it might fail if the
@@ -395,7 +400,7 @@ run_connect:
* backend->hostname. */
if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker,
ctx->server)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03352)
"H2: failed to make connection to backend: %s",
ctx->p_conn->hostname);
goto cleanup;
@@ -404,11 +409,11 @@ run_connect:
/* Step Three: Create conn_rec for the socket we have open now. */
status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r);
if (status != OK) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront, APLOGNO(03353)
"setup new connection: is_ssl=%d %s %s %s",
ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
locurl, ctx->p_conn->hostname);
- ctx->r_status = status;
+ ctx->r_status = ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE);
goto cleanup;
}
@@ -419,10 +424,10 @@ run_connect:
"proxy-request-alpn-protos", "h2");
}
- if (ctx->owner->aborted) goto cleanup;
+ if (ctx->cfront->aborted) goto cleanup;
status = ctx_run(ctx);
- if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->owner->aborted) {
+ if (ctx->r_status != OK && ctx->r_may_retry && !ctx->cfront->aborted) {
/* Not successfully processed, but may retry, tear down old conn and start over */
if (ctx->p_conn) {
ctx->p_conn->close = 1;
@@ -436,15 +441,16 @@ run_connect:
if (reconnects < 2) {
goto run_connect;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(10023)
"giving up after %d reconnects, request-done=%d",
reconnects, ctx->r_done);
}
cleanup:
if (ctx->p_conn) {
- if (status != APR_SUCCESS) {
- /* close socket when errors happened or session shut down (EOF) */
+ if (status != APR_SUCCESS || !ctx->has_reusable_session) {
+ /* close socket when errors happened or session is not "clean",
+ * meaning in a working condition with no open streams */
ctx->p_conn->close = 1;
}
#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
@@ -454,9 +460,15 @@ cleanup:
ctx->p_conn = NULL;
}
- ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ ap_set_module_config(ctx->cfront->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront,
APLOGNO(03377) "leaving handler");
+ if (ctx->r_status != OK) {
+ ap_die(ctx->r_status, r);
+ }
+ else if (status != APR_SUCCESS) {
+ ap_die(ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE), r);
+ }
return ctx->r_status;
}
diff --git a/modules/ldap/util_ldap.c b/modules/ldap/util_ldap.c
index 14b774a..aa0bad1 100644
--- a/modules/ldap/util_ldap.c
+++ b/modules/ldap/util_ldap.c
@@ -75,6 +75,38 @@ module AP_MODULE_DECLARE_DATA ldap_module;
static const char *ldap_cache_mutex_type = "ldap-cache";
static apr_status_t uldap_connection_unbind(void *param);
+/* For OpenLDAP with the 3-arg version of ldap_set_rebind_proc(), use
+ * a simpler rebind callback than the implementation in APR-util.
+ * Testing for API version >= 3001 appears safe although OpenLDAP
+ * 2.1.x (API version = 2004) also has the 3-arg API. */
+#if APR_HAS_OPENLDAP_LDAPSDK && defined(LDAP_API_VERSION) && LDAP_API_VERSION >= 3001
+
+#define uldap_rebind_init(p) APR_SUCCESS /* noop */
+
+static int uldap_rebind_proc(LDAP *ld, const char *url, ber_tag_t request,
+ ber_int_t msgid, void *params)
+{
+ util_ldap_connection_t *ldc = params;
+
+ return ldap_bind_s(ld, ldc->binddn, ldc->bindpw, LDAP_AUTH_SIMPLE);
+}
+
+static apr_status_t uldap_rebind_add(util_ldap_connection_t *ldc)
+{
+ ldap_set_rebind_proc(ldc->ldap, uldap_rebind_proc, ldc);
+ return APR_SUCCESS;
+}
+
+#else /* !APR_HAS_OPENLDAP_LDAPSDK */
+
+#define USE_APR_LDAP_REBIND
+#include <apr_ldap_rebind.h>
+
+#define uldap_rebind_init(p) apr_ldap_rebind_init(p)
+#define uldap_rebind_add(ldc) apr_ldap_rebind_add((ldc)->rebind_pool, \
+ (ldc)->ldap, (ldc)->binddn, \
+ (ldc)->bindpw)
+#endif
static APR_INLINE apr_status_t ldap_cache_lock(util_ldap_state_t *st, request_rec *r) {
apr_status_t rv = APR_SUCCESS;
@@ -195,6 +227,13 @@ static apr_status_t uldap_connection_unbind(void *param)
util_ldap_connection_t *ldc = param;
if (ldc) {
+#ifdef USE_APR_LDAP_REBIND
+ /* forget the rebind info for this conn */
+ if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) {
+ apr_pool_clear(ldc->rebind_pool);
+ }
+#endif
+
if (ldc->ldap) {
if (ldc->r) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, ldc->r, "LDC %pp unbind", ldc);
@@ -203,12 +242,6 @@ static apr_status_t uldap_connection_unbind(void *param)
ldc->ldap = NULL;
}
ldc->bound = 0;
-
- /* forget the rebind info for this conn */
- if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) {
- apr_ldap_rebind_remove(ldc->ldap);
- apr_pool_clear(ldc->rebind_pool);
- }
}
return APR_SUCCESS;
@@ -344,7 +377,7 @@ static int uldap_connection_init(request_rec *r,
if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) {
/* Now that we have an ldap struct, add it to the referral list for rebinds. */
- rc = apr_ldap_rebind_add(ldc->rebind_pool, ldc->ldap, ldc->binddn, ldc->bindpw);
+ rc = uldap_rebind_add(ldc);
if (rc != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server, APLOGNO(01277)
"LDAP: Unable to add rebind cross reference entry. Out of memory?");
@@ -870,6 +903,7 @@ static util_ldap_connection_t *
/* whether or not to keep this connection in the pool when it's returned */
l->keep = (st->connection_pool_ttl == 0) ? 0 : 1;
+#ifdef USE_APR_LDAP_REBIND
if (l->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) {
if (apr_pool_create(&(l->rebind_pool), l->pool) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(01286)
@@ -881,6 +915,7 @@ static util_ldap_connection_t *
}
apr_pool_tag(l->rebind_pool, "util_ldap_rebind");
}
+#endif
if (p) {
p->next = l;
@@ -3070,7 +3105,7 @@ static int util_ldap_post_config(apr_pool_t *p, apr_pool_t *plog,
}
/* Initialize the rebind callback's cross reference list. */
- apr_ldap_rebind_init (p);
+ (void) uldap_rebind_init(p);
#ifdef AP_LDAP_OPT_DEBUG
if (st->debug_level > 0) {
diff --git a/modules/ldap/util_ldap_cache.c b/modules/ldap/util_ldap_cache.c
index 774a76e..27dc733 100644
--- a/modules/ldap/util_ldap_cache.c
+++ b/modules/ldap/util_ldap_cache.c
@@ -230,8 +230,8 @@ void util_ldap_search_node_display(request_rec *r, util_ald_cache_t *cache, void
"<td nowrap>%s</td>"
"<td nowrap>%s</td>"
"</tr>",
- node->username,
- node->dn,
+ ap_escape_html(r->pool, node->username),
+ ap_escape_html(r->pool, node->dn),
date_str);
}
@@ -331,9 +331,9 @@ void util_ldap_compare_node_display(request_rec *r, util_ald_cache_t *cache, voi
"<td nowrap>%s</td>"
"<td nowrap>%s</td>"
"</tr>",
- node->dn,
- node->attrib,
- node->value,
+ ap_escape_html(r->pool, node->dn),
+ ap_escape_html(r->pool, node->attrib),
+ ap_escape_html(r->pool, node->value),
date_str,
cmp_result,
sub_groups_val,
@@ -391,8 +391,8 @@ void util_ldap_dn_compare_node_display(request_rec *r, util_ald_cache_t *cache,
"<td nowrap>%s</td>"
"<td nowrap>%s</td>"
"</tr>",
- node->reqdn,
- node->dn);
+ ap_escape_html(r->pool, node->reqdn),
+ ap_escape_html(r->pool, node->dn));
}
diff --git a/modules/mappers/config9.m4 b/modules/mappers/config9.m4
index 55a97ab..7120b72 100644
--- a/modules/mappers/config9.m4
+++ b/modules/mappers/config9.m4
@@ -14,6 +14,11 @@ APACHE_MODULE(userdir, mapping of requests to user-specific directories, , , mos
APACHE_MODULE(alias, mapping of requests to different filesystem parts, , , yes)
APACHE_MODULE(rewrite, rule based URL manipulation, , , most)
+if test "x$enable_rewrite" != "xno"; then
+ # mod_rewrite needs test_char.h
+ APR_ADDTO(INCLUDES, [-I\$(top_builddir)/server])
+fi
+
APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
APACHE_MODPATH_FINISH
diff --git a/modules/mappers/mod_alias.c b/modules/mappers/mod_alias.c
index 5ff937b..35eca74 100644
--- a/modules/mappers/mod_alias.c
+++ b/modules/mappers/mod_alias.c
@@ -37,6 +37,12 @@
#include "ap_expr.h"
+#define ALIAS_FLAG_DEFAULT -1
+#define ALIAS_FLAG_OFF 0
+#define ALIAS_FLAG_ON 1
+
+#define ALIAS_PRESERVE_PATH_DEFAULT 0
+
typedef struct {
const char *real;
const char *fake;
@@ -55,9 +61,12 @@ typedef struct {
unsigned int redirect_set:1;
apr_array_header_t *redirects;
const ap_expr_info_t *alias;
+ const char *alias_fake;
char *handler;
const ap_expr_info_t *redirect;
int redirect_status; /* 301, 302, 303, 410, etc */
+ int allow_relative; /* skip ap_construct_url() */
+ int alias_preserve_path; /* map full path */
} alias_dir_conf;
module AP_MODULE_DECLARE_DATA alias_module;
@@ -80,6 +89,8 @@ static void *create_alias_dir_config(apr_pool_t *p, char *d)
alias_dir_conf *a =
(alias_dir_conf *) apr_pcalloc(p, sizeof(alias_dir_conf));
a->redirects = apr_array_make(p, 2, sizeof(alias_entry));
+ a->allow_relative = ALIAS_FLAG_DEFAULT;
+ a->alias_preserve_path = ALIAS_FLAG_DEFAULT;
return a;
}
@@ -105,12 +116,19 @@ static void *merge_alias_dir_config(apr_pool_t *p, void *basev, void *overridesv
a->redirects = apr_array_append(p, overrides->redirects, base->redirects);
a->alias = (overrides->alias_set == 0) ? base->alias : overrides->alias;
+ a->alias_fake = (overrides->alias_set == 0) ? base->alias_fake : overrides->alias_fake;
a->handler = (overrides->alias_set == 0) ? base->handler : overrides->handler;
a->alias_set = overrides->alias_set || base->alias_set;
a->redirect = (overrides->redirect_set == 0) ? base->redirect : overrides->redirect;
a->redirect_status = (overrides->redirect_set == 0) ? base->redirect_status : overrides->redirect_status;
a->redirect_set = overrides->redirect_set || base->redirect_set;
+ a->allow_relative = (overrides->allow_relative != ALIAS_FLAG_DEFAULT)
+ ? overrides->allow_relative
+ : base->allow_relative;
+ a->alias_preserve_path = (overrides->alias_preserve_path != ALIAS_FLAG_DEFAULT)
+ ? overrides->alias_preserve_path
+ : base->alias_preserve_path;
return a;
}
@@ -210,6 +228,7 @@ static const char *add_alias(cmd_parms *cmd, void *dummy, const char *fake,
NULL);
}
+ dirconf->alias_fake = cmd->path;
dirconf->handler = cmd->info;
dirconf->alias_set = 1;
@@ -428,6 +447,17 @@ static char *try_alias(request_rec *r)
return PREGSUB_ERROR;
}
+ if (dirconf->alias_fake && dirconf->alias_preserve_path == ALIAS_FLAG_ON) {
+ int l;
+
+ l = alias_matches(r->uri, dirconf->alias_fake);
+
+ if (l > 0) {
+ ap_set_context_info(r, dirconf->alias_fake, found);
+ found = apr_pstrcat(r->pool, found, r->uri + l, NULL);
+ }
+ }
+
if (dirconf->handler) { /* Set handler, and leave a note for mod_cgi */
r->handler = dirconf->handler;
apr_table_setn(r->notes, "alias-forced-type", r->handler);
@@ -591,31 +621,33 @@ static int translate_alias_redir(request_rec *r)
if (ret == PREGSUB_ERROR)
return HTTP_INTERNAL_SERVER_ERROR;
if (ap_is_HTTP_REDIRECT(status)) {
- if (ret[0] == '/') {
- char *orig_target = ret;
-
- ret = ap_construct_url(r->pool, ret, r);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00673)
- "incomplete redirection target of '%s' for "
- "URI '%s' modified to '%s'",
- orig_target, r->uri, ret);
- }
- if (!ap_is_url(ret)) {
- status = HTTP_INTERNAL_SERVER_ERROR;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00674)
- "cannot redirect '%s' to '%s'; "
- "target is not a valid absoluteURI or abs_path",
- r->uri, ret);
- }
- else {
- /* append requested query only, if the config didn't
- * supply its own.
- */
- if (r->args && !ap_strchr(ret, '?')) {
- ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL);
+ alias_dir_conf *dirconf = (alias_dir_conf *)
+ ap_get_module_config(r->per_dir_config, &alias_module);
+ if (dirconf->allow_relative != ALIAS_FLAG_ON || ret[0] != '/') {
+ if (ret[0] == '/') {
+ char *orig_target = ret;
+
+ ret = ap_construct_url(r->pool, ret, r);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00673)
+ "incomplete redirection target of '%s' for "
+ "URI '%s' modified to '%s'",
+ orig_target, r->uri, ret);
}
- apr_table_setn(r->headers_out, "Location", ret);
+ if (!ap_is_url(ret)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00674)
+ "cannot redirect '%s' to '%s'; "
+ "target is not a valid absoluteURI or abs_path",
+ r->uri, ret);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ /* append requested query only, if the config didn't
+ * supply its own.
+ */
+ if (r->args && !ap_strchr(ret, '?')) {
+ ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL);
}
+ apr_table_setn(r->headers_out, "Location", ret);
}
return status;
}
@@ -646,31 +678,31 @@ static int fixup_redir(request_rec *r)
if (ret == PREGSUB_ERROR)
return HTTP_INTERNAL_SERVER_ERROR;
if (ap_is_HTTP_REDIRECT(status)) {
- if (ret[0] == '/') {
- char *orig_target = ret;
-
- ret = ap_construct_url(r->pool, ret, r);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00675)
- "incomplete redirection target of '%s' for "
- "URI '%s' modified to '%s'",
- orig_target, r->uri, ret);
- }
- if (!ap_is_url(ret)) {
- status = HTTP_INTERNAL_SERVER_ERROR;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00676)
- "cannot redirect '%s' to '%s'; "
- "target is not a valid absoluteURI or abs_path",
- r->uri, ret);
- }
- else {
- /* append requested query only, if the config didn't
- * supply its own.
- */
- if (r->args && !ap_strchr(ret, '?')) {
- ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL);
+ if (dirconf->allow_relative != ALIAS_FLAG_ON || ret[0] != '/') {
+ if (ret[0] == '/') {
+ char *orig_target = ret;
+
+ ret = ap_construct_url(r->pool, ret, r);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00675)
+ "incomplete redirection target of '%s' for "
+ "URI '%s' modified to '%s'",
+ orig_target, r->uri, ret);
}
- apr_table_setn(r->headers_out, "Location", ret);
+ if (!ap_is_url(ret)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00676)
+ "cannot redirect '%s' to '%s'; "
+ "target is not a valid absoluteURI or abs_path",
+ r->uri, ret);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ /* append requested query only, if the config didn't
+ * supply its own.
+ */
+ if (r->args && !ap_strchr(ret, '?')) {
+ ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL);
}
+ apr_table_setn(r->headers_out, "Location", ret);
}
return status;
}
@@ -702,6 +734,13 @@ static const command_rec alias_cmds[] =
AP_INIT_TAKE2("RedirectPermanent", add_redirect2,
(void *) HTTP_MOVED_PERMANENTLY, OR_FILEINFO,
"a document to be redirected, then the destination URL"),
+ AP_INIT_FLAG("RedirectRelative", ap_set_flag_slot,
+ (void*)APR_OFFSETOF(alias_dir_conf, allow_relative), OR_FILEINFO,
+ "Set to ON to allow relative redirect targets to be issued as-is"),
+ AP_INIT_FLAG("AliasPreservePath", ap_set_flag_slot,
+ (void*)APR_OFFSETOF(alias_dir_conf, alias_preserve_path), OR_FILEINFO,
+ "Set to ON to map the full path after the fakename to the realname."),
+
{NULL}
};
diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
index f93f23f..bbcc11b 100644
--- a/modules/mappers/mod_rewrite.c
+++ b/modules/mappers/mod_rewrite.c
@@ -3865,8 +3865,16 @@ static const char *cmd_rewriterule(cmd_parms *cmd, void *in_dconf,
if (*(a2_end-1) == '?') {
/* a literal ? at the end of the unsubstituted rewrite rule */
- newrule->flags |= RULEFLAG_QSNONE;
- *(a2_end-1) = '\0'; /* trailing ? has done its job */
+ if (newrule->flags & RULEFLAG_QSAPPEND) {
+ /* with QSA, splitout_queryargs will safely handle it if RULEFLAG_QSLAST is set */
+ newrule->flags |= RULEFLAG_QSLAST;
+ }
+ else {
+ /* avoid getting a query string via inadvertent capture */
+ newrule->flags |= RULEFLAG_QSNONE;
+ /* trailing ? has done its job, but splitout_queryargs will not chop it off */
+ *(a2_end-1) = '\0';
+ }
}
else if (newrule->flags & RULEFLAG_QSDISCARD) {
if (NULL == ap_strchr(newrule->output, '?')) {
@@ -4758,8 +4766,8 @@ static int hook_uri2file(request_rec *r)
}
if (rulestatus) {
- unsigned skip_absolute = is_absolute_uri(r->filename, NULL);
apr_size_t flen = r->filename ? strlen(r->filename) : 0;
+ unsigned skip_absolute = flen ? is_absolute_uri(r->filename, NULL) : 0;
int to_proxyreq = (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0);
int will_escape = skip_absolute && (rulestatus != ACTION_NOESCAPE);
diff --git a/modules/mappers/mod_rewrite.mak b/modules/mappers/mod_rewrite.mak
index 3b08cab..860dd8b 100644
--- a/modules/mappers/mod_rewrite.mak
+++ b/modules/mappers/mod_rewrite.mak
@@ -62,7 +62,7 @@ CLEAN :
if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
CPP=cl.exe
-CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /c
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../server" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /c
.c{$(INTDIR)}.obj::
$(CPP) @<<
@@ -166,7 +166,7 @@ CLEAN :
if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
CPP=cl.exe
-CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /EHsc /c
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../server" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /EHsc /c
.c{$(INTDIR)}.obj::
$(CPP) @<<
diff --git a/modules/md/md.h b/modules/md/md.h
index 1d75d10..035ccba 100644
--- a/modules/md/md.h
+++ b/modules/md/md.h
@@ -78,12 +78,7 @@ struct md_t {
struct apr_array_header_t *domains; /* all DNS names this MD includes */
struct apr_array_header_t *contacts; /* list of contact uris, e.g. mailto:xxx */
- int transitive; /* != 0 iff VirtualHost names/aliases are auto-added */
- md_require_t require_https; /* Iff https: is required for this MD */
-
- int renew_mode; /* mode of obtaining credentials */
struct md_pkeys_spec_t *pks; /* specification for generating private keys */
- int must_staple; /* certificates should set the OCSP Must Staple extension */
md_timeslice_t *renew_window; /* time before expiration that starts renewal */
md_timeslice_t *warn_window; /* time before expiration that warnings are sent out */
@@ -98,19 +93,23 @@ struct md_t {
const char *ca_eab_kid; /* optional KEYID for external account binding */
const char *ca_eab_hmac; /* optional HMAC for external account binding */
- md_state_t state; /* state of this MD */
const char *state_descr; /* description of state of NULL */
struct apr_array_header_t *acme_tls_1_domains; /* domains supporting "acme-tls/1" protocol */
- int stapling; /* if OCSP stapling is enabled */
const char *dns01_cmd; /* DNS challenge command, override global command */
- int watched; /* if certificate is supervised (renew or expiration warning) */
const struct md_srv_conf_t *sc; /* server config where it was defined or NULL */
const char *defn_name; /* config file this MD was defined */
unsigned defn_line_number; /* line number of definition */
-
const char *configured_name; /* name this MD was configured with, if different */
+
+ int renew_mode; /* mode of obtaining credentials */
+ md_require_t require_https; /* Iff https: is required for this MD */
+ md_state_t state; /* state of this MD */
+ int transitive; /* != 0 iff VirtualHost names/aliases are auto-added */
+ int must_staple; /* certificates should set the OCSP Must Staple extension */
+ int stapling; /* if OCSP stapling is enabled */
+ int watched; /* if certificate is supervised (renew or expiration warning) */
};
#define MD_KEY_ACCOUNT "account"
@@ -128,6 +127,7 @@ struct md_t {
#define MD_KEY_CHALLENGE "challenge"
#define MD_KEY_CHALLENGES "challenges"
#define MD_KEY_CMD_DNS01 "cmd-dns-01"
+#define MD_KEY_DNS01_VERSION "cmd-dns-01-version"
#define MD_KEY_COMPLETE "complete"
#define MD_KEY_CONTACT "contact"
#define MD_KEY_CONTACTS "contacts"
diff --git a/modules/md/md_acme_authz.c b/modules/md/md_acme_authz.c
index a55804e..f4579b3 100644
--- a/modules/md/md_acme_authz.c
+++ b/modules/md/md_acme_authz.c
@@ -244,7 +244,8 @@ static apr_status_t cha_http_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t
md_acme_t *acme, md_store_t *store,
md_pkeys_spec_t *key_specs,
apr_array_header_t *acme_tls_1_domains, const md_t *md,
- apr_table_t *env, md_result_t *result, apr_pool_t *p)
+ apr_table_t *env, md_result_t *result,
+ const char **psetup_token, apr_pool_t *p)
{
const char *data;
apr_status_t rv;
@@ -289,6 +290,8 @@ static apr_status_t cha_http_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t
rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, NULL, &ctx);
}
out:
+ *psetup_token = (APR_SUCCESS == rv)?
+ apr_psprintf(p, "%s:%s", MD_AUTHZ_TYPE_HTTP01, authz->domain) : NULL;
return rv;
}
@@ -302,7 +305,8 @@ static apr_status_t cha_tls_alpn_01_setup(md_acme_authz_cha_t *cha, md_acme_auth
md_acme_t *acme, md_store_t *store,
md_pkeys_spec_t *key_specs,
apr_array_header_t *acme_tls_1_domains, const md_t *md,
- apr_table_t *env, md_result_t *result, apr_pool_t *p)
+ apr_table_t *env, md_result_t *result,
+ const char **psetup_token, apr_pool_t *p)
{
const char *acme_id, *token;
apr_status_t rv;
@@ -407,6 +411,8 @@ static apr_status_t cha_tls_alpn_01_setup(md_acme_authz_cha_t *cha, md_acme_auth
rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, NULL, &ctx);
}
out:
+ *psetup_token = (APR_SUCCESS == rv)?
+ apr_psprintf(p, "%s:%s", MD_AUTHZ_TYPE_TLSALPN01, authz->domain) : NULL;
return rv;
}
@@ -414,7 +420,8 @@ static apr_status_t cha_dns_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *
md_acme_t *acme, md_store_t *store,
md_pkeys_spec_t *key_specs,
apr_array_header_t *acme_tls_1_domains, const md_t *md,
- apr_table_t *env, md_result_t *result, apr_pool_t *p)
+ apr_table_t *env, md_result_t *result,
+ const char **psetup_token, apr_pool_t *p)
{
const char *token;
const char * const *argv;
@@ -456,7 +463,7 @@ static apr_status_t cha_dns_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *
"%s: dns-01 setup command: %s", authz->domain, cmdline);
apr_tokenize_to_argv(cmdline, (char***)&argv, p);
- if (APR_SUCCESS != (rv = md_util_exec(p, argv[0], argv, NULL, &exit_code))) {
+ if (APR_SUCCESS != (rv = md_util_exec(p, argv[0], argv, &exit_code))) {
md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p,
"%s: dns-01 setup command failed to execute for %s", md->name, authz->domain);
goto out;
@@ -486,6 +493,8 @@ static apr_status_t cha_dns_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *
rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, NULL, &ctx);
out:
+ *psetup_token = (APR_SUCCESS == rv)?
+ apr_psprintf(p, "%s:%s %s", MD_AUTHZ_TYPE_DNS01, authz->domain, token) : NULL;
return rv;
}
@@ -493,7 +502,8 @@ static apr_status_t cha_dns_01_teardown(md_store_t *store, const char *domain, c
apr_table_t *env, apr_pool_t *p)
{
const char * const *argv;
- const char *cmdline, *dns01_cmd;
+ const char *cmdline, *dns01_cmd, *dns01v;
+ char *tmp, *s;
apr_status_t rv;
int exit_code;
@@ -508,10 +518,20 @@ static apr_status_t cha_dns_01_teardown(md_store_t *store, const char *domain, c
md->name, domain);
goto out;
}
-
+ dns01v = apr_table_get(env, MD_KEY_DNS01_VERSION);
+ if (!dns01v || strcmp(dns01v, "2")) {
+ /* use older version of teardown args with only domain, remove token */
+ tmp = apr_pstrdup(p, domain);
+ s = strchr(tmp, ' ');
+ if (s) {
+ *s = '\0';
+ domain = tmp;
+ }
+ }
+
cmdline = apr_psprintf(p, "%s teardown %s", dns01_cmd, domain);
apr_tokenize_to_argv(cmdline, (char***)&argv, p);
- if (APR_SUCCESS != (rv = md_util_exec(p, argv[0], argv, NULL, &exit_code)) || exit_code) {
+ if (APR_SUCCESS != (rv = md_util_exec(p, argv[0], argv, &exit_code)) || exit_code) {
md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p,
"%s: dns-01 teardown command failed (exit code=%d) for %s",
md->name, exit_code, domain);
@@ -532,7 +552,8 @@ typedef apr_status_t cha_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz,
md_acme_t *acme, md_store_t *store,
md_pkeys_spec_t *key_specs,
apr_array_header_t *acme_tls_1_domains, const md_t *md,
- apr_table_t *env, md_result_t *result, apr_pool_t *p);
+ apr_table_t *env, md_result_t *result,
+ const char **psetup_token, apr_pool_t *p);
typedef apr_status_t cha_teardown(md_store_t *store, const char *domain, const md_t *md,
apr_table_t *env, apr_pool_t *p);
@@ -590,8 +611,7 @@ apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, md_acme_t *acme, md_s
apr_status_t rv;
int i, j;
cha_find_ctx fctx;
- const char *challenge_setup;
-
+
assert(acme);
assert(authz);
assert(authz->resource);
@@ -613,7 +633,7 @@ apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, md_acme_t *acme, md_s
"type, this domain supports %s",
authz->domain, apr_array_pstrcat(p, challenges, ' '));
rv = APR_ENOTIMPL;
- challenge_setup = NULL;
+ *psetup_token = NULL;
for (i = 0; i < challenges->nelts; ++i) {
fctx.type = APR_ARRAY_IDX(challenges, i, const char *);
fctx.accepted = NULL;
@@ -629,12 +649,12 @@ apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, md_acme_t *acme, md_s
md_result_activity_printf(result, "Setting up challenge '%s' for domain %s",
fctx.accepted->type, authz->domain);
rv = CHA_TYPES[j].setup(fctx.accepted, authz, acme, store, key_specs,
- acme_tls_1_domains, md, env, result, p);
+ acme_tls_1_domains, md, env, result,
+ psetup_token, p);
if (APR_SUCCESS == rv) {
md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p,
"%s: set up challenge '%s' for %s",
authz->domain, fctx.accepted->type, md->name);
- challenge_setup = CHA_TYPES[j].name;
goto out;
}
md_result_printf(result, rv, "error setting up challenge '%s' for %s, "
@@ -647,7 +667,6 @@ apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, md_acme_t *acme, md_s
}
out:
- *psetup_token = (APR_SUCCESS == rv)? apr_psprintf(p, "%s:%s", challenge_setup, authz->domain) : NULL;
if (!fctx.accepted || APR_ENOTIMPL == rv) {
rv = APR_EINVAL;
fctx.offered = apr_array_make(p, 5, sizeof(const char*));
diff --git a/modules/md/md_acme_order.c b/modules/md/md_acme_order.c
index 9e25e84..061093a 100644
--- a/modules/md/md_acme_order.c
+++ b/modules/md/md_acme_order.c
@@ -537,8 +537,8 @@ static apr_status_t check_challenges(void *baton, int attempt)
}
}
else {
- md_result_printf(ctx->result, rv, "authorization retrieval failed for domain %s",
- authz->domain);
+ md_result_printf(ctx->result, rv, "authorization retrieval failed for %s on <%s>",
+ ctx->name, url);
}
}
leave:
diff --git a/modules/md/md_crypt.c b/modules/md/md_crypt.c
index f2b0cd5..4b2af89 100644
--- a/modules/md/md_crypt.c
+++ b/modules/md/md_crypt.c
@@ -32,6 +32,9 @@
#include <openssl/rand.h>
#include <openssl/rsa.h>
#include <openssl/x509v3.h>
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+#include <openssl/core_names.h>
+#endif
#include "md.h"
#include "md_crypt.h"
@@ -988,26 +991,42 @@ static const char *bn64(const BIGNUM *b, apr_pool_t *p)
const char *md_pkey_get_rsa_e64(md_pkey_t *pkey, apr_pool_t *p)
{
- const BIGNUM *e;
- RSA *rsa = EVP_PKEY_get1_RSA(pkey->pkey);
-
- if (!rsa) {
- return NULL;
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
+ const RSA *rsa = EVP_PKEY_get0_RSA(pkey->pkey);
+ if (rsa) {
+ const BIGNUM *e;
+ RSA_get0_key(rsa, NULL, &e, NULL);
+ return bn64(e, p);
}
- RSA_get0_key(rsa, NULL, &e, NULL);
- return bn64(e, p);
+#else
+ BIGNUM *e = NULL;
+ if (EVP_PKEY_get_bn_param(pkey->pkey, OSSL_PKEY_PARAM_RSA_E, &e)) {
+ const char *e64 = bn64(e, p);
+ BN_free(e);
+ return e64;
+ }
+#endif
+ return NULL;
}
const char *md_pkey_get_rsa_n64(md_pkey_t *pkey, apr_pool_t *p)
{
- const BIGNUM *n;
- RSA *rsa = EVP_PKEY_get1_RSA(pkey->pkey);
-
- if (!rsa) {
- return NULL;
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
+ const RSA *rsa = EVP_PKEY_get0_RSA(pkey->pkey);
+ if (rsa) {
+ const BIGNUM *n;
+ RSA_get0_key(rsa, &n, NULL, NULL);
+ return bn64(n, p);
}
- RSA_get0_key(rsa, &n, NULL, NULL);
- return bn64(n, p);
+#else
+ BIGNUM *n = NULL;
+ if (EVP_PKEY_get_bn_param(pkey->pkey, OSSL_PKEY_PARAM_RSA_N, &n)) {
+ const char *n64 = bn64(n, p);
+ BN_free(n);
+ return n64;
+ }
+#endif
+ return NULL;
}
apr_status_t md_crypt_sign64(const char **psign64, md_pkey_t *pkey, apr_pool_t *p,
diff --git a/modules/md/md_util.c b/modules/md/md_util.c
index 884c0bb..95ecc27 100644
--- a/modules/md/md_util.c
+++ b/modules/md/md_util.c
@@ -916,6 +916,19 @@ int md_dns_domains_match(const apr_array_header_t *domains, const char *name)
return 0;
}
+int md_is_wild_match(const apr_array_header_t *domains, const char *name)
+{
+ const char *domain;
+ int i;
+
+ for (i = 0; i < domains->nelts; ++i) {
+ domain = APR_ARRAY_IDX(domains, i, const char*);
+ if (md_dns_matches(domain, name))
+ return (domain[0] == '*' && domain[1] == '.');
+ }
+ return 0;
+}
+
const char *md_util_schemify(apr_pool_t *p, const char *s, const char *def_scheme)
{
const char *cp = s;
@@ -1068,32 +1081,24 @@ apr_status_t md_util_try(md_util_try_fn *fn, void *baton, int ignore_errs,
/* execute process ********************************************************************************/
-apr_status_t md_util_exec(apr_pool_t *p, const char *cmd, const char * const *argv,
- apr_array_header_t *env, int *exit_code)
+apr_status_t md_util_exec(apr_pool_t *p, const char *cmd,
+ const char * const *argv, int *exit_code)
{
apr_status_t rv;
apr_procattr_t *procattr;
apr_proc_t *proc;
apr_exit_why_e ewhy;
- const char * const *envp = NULL;
char buffer[1024];
*exit_code = 0;
if (!(proc = apr_pcalloc(p, sizeof(*proc)))) {
return APR_ENOMEM;
}
- if (env && env->nelts > 0) {
- apr_array_header_t *nenv;
-
- nenv = apr_array_copy(p, env);
- APR_ARRAY_PUSH(nenv, const char *) = NULL;
- envp = (const char * const *)nenv->elts;
- }
if ( APR_SUCCESS == (rv = apr_procattr_create(&procattr, p))
&& APR_SUCCESS == (rv = apr_procattr_io_set(procattr, APR_NO_FILE,
APR_NO_PIPE, APR_FULL_BLOCK))
- && APR_SUCCESS == (rv = apr_procattr_cmdtype_set(procattr, APR_PROGRAM))
- && APR_SUCCESS == (rv = apr_proc_create(proc, cmd, argv, envp, procattr, p))) {
+ && APR_SUCCESS == (rv = apr_procattr_cmdtype_set(procattr, APR_PROGRAM_ENV))
+ && APR_SUCCESS == (rv = apr_proc_create(proc, cmd, argv, NULL, procattr, p))) {
/* read stderr and log on INFO for possible fault analysis. */
while(APR_SUCCESS == (rv = apr_file_gets(buffer, sizeof(buffer)-1, proc->err))) {
diff --git a/modules/md/md_util.h b/modules/md/md_util.h
index e430655..d974788 100644
--- a/modules/md/md_util.h
+++ b/modules/md/md_util.h
@@ -133,7 +133,7 @@ int md_array_str_add_missing(struct apr_array_header_t *dest,
/* process execution */
apr_status_t md_util_exec(apr_pool_t *p, const char *cmd, const char * const *argv,
- struct apr_array_header_t *env, int *exit_code);
+ int *exit_code);
/**************************************************************************************************/
/* dns name check */
@@ -174,6 +174,11 @@ struct apr_array_header_t *md_dns_make_minimal(apr_pool_t *p,
*/
int md_dns_domains_match(const apr_array_header_t *domains, const char *name);
+/**
+ * @return != 0 iff `name` is matched by a wildcard pattern in `domains`
+ */
+int md_is_wild_match(const apr_array_header_t *domains, const char *name);
+
/**************************************************************************************************/
/* file system related */
diff --git a/modules/md/md_version.h b/modules/md/md_version.h
index a8f3ef2..86a1821 100644
--- a/modules/md/md_version.h
+++ b/modules/md/md_version.h
@@ -27,7 +27,7 @@
* @macro
* Version number of the md module as c string
*/
-#define MOD_MD_VERSION "2.4.21"
+#define MOD_MD_VERSION "2.4.25"
/**
* @macro
@@ -35,7 +35,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_MD_VERSION_NUM 0x020415
+#define MOD_MD_VERSION_NUM 0x020419
#define MD_ACME_DEF_URL "https://acme-v02.api.letsencrypt.org/directory"
#define MD_TAILSCALE_DEF_URL "file://localhost/var/run/tailscale/tailscaled.sock"
diff --git a/modules/md/mod_md.c b/modules/md/mod_md.c
index 32dea4f..6d3f5b7 100644
--- a/modules/md/mod_md.c
+++ b/modules/md/mod_md.c
@@ -183,7 +183,7 @@ static apr_status_t notify(md_job_t *job, const char *reason,
if (mc->notify_cmd) {
cmdline = apr_psprintf(p, "%s %s", mc->notify_cmd, job->mdomain);
apr_tokenize_to_argv(cmdline, (char***)&argv, p);
- rv = md_util_exec(p, argv[0], argv, NULL, &exit_code);
+ rv = md_util_exec(p, argv[0], argv, &exit_code);
if (APR_SUCCESS == rv && exit_code) rv = APR_EGENERAL;
if (APR_SUCCESS != rv) {
@@ -202,7 +202,7 @@ static apr_status_t notify(md_job_t *job, const char *reason,
if (mc->message_cmd) {
cmdline = apr_psprintf(p, "%s %s %s", mc->message_cmd, reason, job->mdomain);
apr_tokenize_to_argv(cmdline, (char***)&argv, p);
- rv = md_util_exec(p, argv[0], argv, NULL, &exit_code);
+ rv = md_util_exec(p, argv[0], argv, &exit_code);
if (APR_SUCCESS == rv && exit_code) rv = APR_EGENERAL;
if (APR_SUCCESS != rv) {
@@ -377,12 +377,12 @@ static apr_status_t check_coverage(md_t *md, const char *domain, server_rec *s,
return APR_SUCCESS;
}
else {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(10040)
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10040)
"Virtual Host %s:%d matches Managed Domain '%s', but the "
"name/alias %s itself is not managed. A requested MD certificate "
"will not match ServerName.",
s->server_hostname, s->port, md->name, domain);
- return APR_EINVAL;
+ return APR_SUCCESS;
}
}
@@ -586,18 +586,30 @@ static apr_status_t link_md_to_servers(md_mod_conf_t *mc, md_t *md, server_rec *
for (i = 0; i < md->domains->nelts; ++i) {
domain = APR_ARRAY_IDX(md->domains, i, const char*);
- if (ap_matches_request_vhost(&r, domain, s->port)
- || (md_dns_is_wildcard(p, domain) && md_dns_matches(domain, s->server_hostname))) {
+ if ((mc->match_mode == MD_MATCH_ALL &&
+ ap_matches_request_vhost(&r, domain, s->port))
+ || (((mc->match_mode == MD_MATCH_SERVERNAMES) || md_dns_is_wildcard(p, domain)) &&
+ md_dns_matches(domain, s->server_hostname))) {
/* Create a unique md_srv_conf_t record for this server, if there is none yet */
sc = md_config_get_unique(s, p);
if (!sc->assigned) sc->assigned = apr_array_make(p, 2, sizeof(md_t*));
-
+ if (sc->assigned->nelts == 1 && mc->match_mode == MD_MATCH_SERVERNAMES) {
+ /* there is already an MD assigned for this server. But in
+ * this match mode, wildcard matches are pre-empted by non-wildcards */
+ int existing_wild = md_is_wild_match(
+ APR_ARRAY_IDX(sc->assigned, 0, const md_t*)->domains,
+ s->server_hostname);
+ if (!existing_wild && md_dns_is_wildcard(p, domain))
+ continue; /* do not add */
+ if (existing_wild && !md_dns_is_wildcard(p, domain))
+ sc->assigned->nelts = 0; /* overwrite existing */
+ }
APR_ARRAY_PUSH(sc->assigned, md_t*) = md;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10041)
- "Server %s:%d matches md %s (config %s) for domain %s, "
- "has now %d MDs",
+ "Server %s:%d matches md %s (config %s, match-mode=%d) "
+ "for domain %s, has now %d MDs",
s->server_hostname, s->port, md->name, sc->name,
- domain, (int)sc->assigned->nelts);
+ mc->match_mode, domain, (int)sc->assigned->nelts);
if (md->contacts && md->contacts->nelts > 0) {
/* set explicitly */
@@ -670,17 +682,19 @@ static apr_status_t merge_mds_with_conf(md_mod_conf_t *mc, apr_pool_t *p,
md = APR_ARRAY_IDX(mc->mds, i, md_t*);
merge_srv_config(md, base_conf, p);
- /* Check that we have no overlap with the MDs already completed */
- for (j = 0; j < i; ++j) {
- omd = APR_ARRAY_IDX(mc->mds, j, md_t*);
- if ((domain = md_common_name(md, omd)) != NULL) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10038)
- "two Managed Domains have an overlap in domain '%s'"
- ", first definition in %s(line %d), second in %s(line %d)",
- domain, md->defn_name, md->defn_line_number,
- omd->defn_name, omd->defn_line_number);
- return APR_EINVAL;
- }
+ if (mc->match_mode == MD_MATCH_ALL) {
+ /* Check that we have no overlap with the MDs already completed */
+ for (j = 0; j < i; ++j) {
+ omd = APR_ARRAY_IDX(mc->mds, j, md_t*);
+ if ((domain = md_common_name(md, omd)) != NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10038)
+ "two Managed Domains have an overlap in domain '%s'"
+ ", first definition in %s(line %d), second in %s(line %d)",
+ domain, md->defn_name, md->defn_line_number,
+ omd->defn_name, omd->defn_line_number);
+ return APR_EINVAL;
+ }
+ }
}
if (md->cert_files && md->cert_files->nelts) {
diff --git a/modules/md/mod_md_config.c b/modules/md/mod_md_config.c
index e117b16..31d06b4 100644
--- a/modules/md/mod_md_config.c
+++ b/modules/md/mod_md_config.c
@@ -88,6 +88,7 @@ static md_mod_conf_t defmc = {
13, /* retry_failover after 14 errors, with 5s delay ~ half a day */
0, /* store locks, disabled by default */
apr_time_from_sec(5), /* max time to wait to obaint a store lock */
+ MD_MATCH_ALL, /* match vhost severname and aliases */
};
static md_timeslice_t def_renew_window = {
@@ -684,6 +685,27 @@ static const char *md_config_set_store_locks(cmd_parms *cmd, void *dc, const cha
return NULL;
}
+static const char *md_config_set_match_mode(cmd_parms *cmd, void *dc, const char *s)
+{
+ md_srv_conf_t *config = md_config_get(cmd->server);
+ const char *err = md_conf_check_location(cmd, MD_LOC_NOT_MD);
+
+ (void)dc;
+ if (err) {
+ return err;
+ }
+ else if (!apr_strnatcasecmp("all", s)) {
+ config->mc->match_mode = MD_MATCH_ALL;
+ }
+ else if (!apr_strnatcasecmp("servernames", s)) {
+ config->mc->match_mode = MD_MATCH_SERVERNAMES;
+ }
+ else {
+ return "invalid argument, must be a 'all' or 'servernames'";
+ }
+ return NULL;
+}
+
static const char *md_config_set_require_https(cmd_parms *cmd, void *dc, const char *value)
{
md_srv_conf_t *config = md_config_get(cmd->server);
@@ -985,6 +1007,24 @@ static const char *md_config_set_dns01_cmd(cmd_parms *cmd, void *mconfig, const
return NULL;
}
+static const char *md_config_set_dns01_version(cmd_parms *cmd, void *mconfig, const char *value)
+{
+ md_srv_conf_t *sc = md_config_get(cmd->server);
+ const char *err;
+
+ (void)mconfig;
+ if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) {
+ return err;
+ }
+ if (!strcmp("1", value) || !strcmp("2", value)) {
+ apr_table_set(sc->mc->env, MD_KEY_DNS01_VERSION, value);
+ }
+ else {
+ return "Only versions `1` and `2` are supported";
+ }
+ return NULL;
+}
+
static const char *md_config_add_cert_file(cmd_parms *cmd, void *mconfig, const char *arg)
{
md_srv_conf_t *sc = md_config_get(cmd->server);
@@ -1226,7 +1266,9 @@ const command_rec md_cmds[] = {
"Allow managing of base server outside virtual hosts."),
AP_INIT_RAW_ARGS("MDChallengeDns01", md_config_set_dns01_cmd, NULL, RSRC_CONF,
"Set the command for setup/teardown of dns-01 challenges"),
- AP_INIT_TAKE1("MDCertificateFile", md_config_add_cert_file, NULL, RSRC_CONF,
+ AP_INIT_TAKE1("MDChallengeDns01Version", md_config_set_dns01_version, NULL, RSRC_CONF,
+ "Set the type of arguments to call `MDChallengeDns01` with"),
+ AP_INIT_TAKE1("MDCertificateFile", md_config_add_cert_file, NULL, RSRC_CONF,
"set the static certificate (chain) file to use for this domain."),
AP_INIT_TAKE1("MDCertificateKeyFile", md_config_add_key_file, NULL, RSRC_CONF,
"set the static private key file to use for this domain."),
@@ -1260,6 +1302,8 @@ const command_rec md_cmds[] = {
"The number of errors before a failover to another CA is triggered."),
AP_INIT_TAKE1("MDStoreLocks", md_config_set_store_locks, NULL, RSRC_CONF,
"Configure locking of store for updates."),
+ AP_INIT_TAKE1("MDMatchNames", md_config_set_match_mode, NULL, RSRC_CONF,
+ "Determines how DNS names are matched to vhosts."),
AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)
};
diff --git a/modules/md/mod_md_config.h b/modules/md/mod_md_config.h
index de42169..7e87440 100644
--- a/modules/md/mod_md_config.h
+++ b/modules/md/mod_md_config.h
@@ -41,6 +41,11 @@ typedef enum {
MD_CONFIG_STAPLE_OTHERS,
} md_config_var_t;
+typedef enum {
+ MD_MATCH_ALL,
+ MD_MATCH_SERVERNAMES,
+} md_match_mode_t;
+
typedef struct md_mod_conf_t md_mod_conf_t;
struct md_mod_conf_t {
apr_array_header_t *mds; /* all md_t* defined in the config, shared */
@@ -74,6 +79,7 @@ struct md_mod_conf_t {
int retry_failover; /* number of errors to trigger CA failover */
int use_store_locks; /* use locks when updating store */
apr_time_t lock_wait_timeout; /* fail after this time when unable to obtain lock */
+ md_match_mode_t match_mode; /* how dns names are match to vhosts */
};
typedef struct md_srv_conf_t {
diff --git a/modules/md/mod_md_status.c b/modules/md/mod_md_status.c
index 2286051..6b29256 100644
--- a/modules/md/mod_md_status.c
+++ b/modules/md/mod_md_status.c
@@ -325,7 +325,7 @@ static void si_val_valid_time(status_ctx *ctx, md_json_t *mdj, const status_info
apr_pstrcat(ctx->p, info->label, "From", NULL));
}
if (until) {
- print_date(ctx, from,
+ print_date(ctx, until,
apr_pstrcat(ctx->p, info->label, "Until", NULL));
}
}
diff --git a/modules/proxy/ajp_header.c b/modules/proxy/ajp_header.c
index a09a2e4..0266a7d 100644
--- a/modules/proxy/ajp_header.c
+++ b/modules/proxy/ajp_header.c
@@ -17,6 +17,8 @@
#include "ajp_header.h"
#include "ajp.h"
+#include "util_script.h"
+
APLOG_USE_MODULE(proxy_ajp);
static const char *response_trans_headers[] = {
@@ -669,6 +671,14 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg,
}
}
+ /* AJP has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
return APR_SUCCESS;
}
diff --git a/modules/proxy/balancers/mod_lbmethod_heartbeat.c b/modules/proxy/balancers/mod_lbmethod_heartbeat.c
index 5f4873a..0534e5b 100644
--- a/modules/proxy/balancers/mod_lbmethod_heartbeat.c
+++ b/modules/proxy/balancers/mod_lbmethod_heartbeat.c
@@ -115,7 +115,6 @@ static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers,
{
char *t;
- int lineno = 0;
apr_bucket_alloc_t *ba = apr_bucket_alloc_create(pool);
apr_bucket_brigade *bb = apr_brigade_create(pool, ba);
apr_bucket_brigade *tmpbb = apr_brigade_create(pool, ba);
@@ -137,7 +136,6 @@ static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers,
rv = apr_brigade_split_line(tmpbb, bb,
APR_BLOCK_READ, sizeof(buf));
- lineno++;
if (rv) {
return rv;
diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
index 537c3c2..c9cef7c 100644
--- a/modules/proxy/mod_proxy.c
+++ b/modules/proxy/mod_proxy.c
@@ -224,6 +224,24 @@ static const char *set_worker_param(apr_pool_t *p,
return "EnableReuse must be On|Off";
worker->s->disablereuse_set = 1;
}
+ else if (!strcasecmp(key, "addressttl")) {
+ /* Address TTL in seconds
+ */
+ apr_interval_time_t ttl;
+ if (strcmp(val, "-1") == 0) {
+ worker->s->address_ttl = -1;
+ }
+ else if (ap_timeout_parameter_parse(val, &ttl, "s") == APR_SUCCESS
+ && (ttl <= apr_time_from_sec(APR_INT32_MAX))
+ && (ttl % apr_time_from_sec(1)) == 0) {
+ worker->s->address_ttl = apr_time_sec(ttl);
+ }
+ else {
+ return "AddressTTL must be -1 or a number of seconds not "
+ "exceeding " APR_STRINGIFY(APR_INT32_MAX);
+ }
+ worker->s->address_ttl_set = 1;
+ }
else if (!strcasecmp(key, "route")) {
/* Worker route.
*/
@@ -1460,11 +1478,20 @@ static int proxy_handler(request_rec *r)
/* handle the scheme */
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01142)
"Trying to run scheme_handler against proxy");
+
+ if (ents[i].creds) {
+ apr_table_set(r->notes, "proxy-basic-creds", ents[i].creds);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Using proxy auth creds %s", ents[i].creds);
+ }
+
access_status = proxy_run_scheme_handler(r, worker,
conf, url,
ents[i].hostname,
ents[i].port);
+ if (ents[i].creds) apr_table_unset(r->notes, "proxy-basic-creds");
+
/* Did the scheme handler process the request? */
if (access_status != DECLINED) {
const char *cl_a;
@@ -1902,8 +1929,8 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
return new;
}
-static const char *
- add_proxy(cmd_parms *cmd, void *dummy, const char *f1, const char *r1, int regex)
+static const char *add_proxy(cmd_parms *cmd, void *dummy, const char *f1,
+ const char *r1, const char *creds, int regex)
{
server_rec *s = cmd->server;
proxy_server_conf *conf =
@@ -1961,19 +1988,24 @@ static const char *
new->port = port;
new->regexp = reg;
new->use_regex = regex;
+ if (creds) {
+ new->creds = apr_pstrcat(cmd->pool, "Basic ",
+ ap_pbase64encode(cmd->pool, (char *)creds),
+ NULL);
+ }
return NULL;
}
-static const char *
- add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1)
+static const char *add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1,
+ const char *r1, const char *creds)
{
- return add_proxy(cmd, dummy, f1, r1, 0);
+ return add_proxy(cmd, dummy, f1, r1, creds, 0);
}
-static const char *
- add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1)
+static const char *add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1,
+ const char *r1, const char *creds)
{
- return add_proxy(cmd, dummy, f1, r1, 1);
+ return add_proxy(cmd, dummy, f1, r1, creds, 1);
}
PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url)
@@ -3012,9 +3044,9 @@ static const command_rec proxy_cmds[] =
"location, in regular expression syntax"),
AP_INIT_FLAG("ProxyRequests", set_proxy_req, NULL, RSRC_CONF,
"on if the true proxy requests should be accepted"),
- AP_INIT_TAKE2("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF,
+ AP_INIT_TAKE23("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF,
"a scheme, partial URL or '*' and a proxy server"),
- AP_INIT_TAKE2("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF,
+ AP_INIT_TAKE23("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF,
"a regex pattern and a proxy server"),
AP_INIT_FLAG("ProxyPassInterpolateEnv", ap_set_flag_slot_char,
(void*)APR_OFFSETOF(proxy_dir_conf, interpolate_env),
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index c51145e..51a55f8 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -121,6 +121,7 @@ struct proxy_remote {
const char *protocol; /* the scheme used to talk to this proxy */
const char *hostname; /* the hostname of this proxy */
ap_regex_t *regexp; /* compiled regex (if any) for the remote */
+ const char *creds; /* auth credentials (if any) for the proxy */
int use_regex; /* simple boolean. True if we have a regex pattern */
apr_port_t port; /* the port for this proxy */
};
@@ -263,6 +264,8 @@ typedef struct {
apr_array_header_t* cookie_domains;
} proxy_req_conf;
+struct proxy_address; /* opaque TTL'ed and refcount'ed address */
+
typedef struct {
conn_rec *connection;
request_rec *r; /* Request record of the backend request
@@ -288,6 +291,9 @@ typedef struct {
* and its scpool/bucket_alloc (NULL before),
* must be left cleaned when used (locally).
*/
+ apr_pool_t *uds_pool; /* Subpool for reusing UDS paths */
+ apr_pool_t *fwd_pool; /* Subpool for reusing ProxyRemote infos */
+ struct proxy_address *address; /* Current remote address */
} proxy_conn_rec;
typedef struct {
@@ -484,6 +490,9 @@ typedef struct {
unsigned int response_field_size_set:1;
char secret[PROXY_WORKER_MAX_SECRET_SIZE]; /* authentication secret (e.g. AJP13) */
char name_ex[PROXY_WORKER_EXT_NAME_SIZE]; /* Extended name (>96 chars for 2.4.x) */
+ unsigned int address_ttl_set:1;
+ apr_int32_t address_ttl; /* backend address' TTL (seconds) */
+ apr_uint32_t address_expiry; /* backend address' next expiry time */
} proxy_worker_shared;
#define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared)))
@@ -500,6 +509,7 @@ struct proxy_worker {
#endif
void *context; /* general purpose storage */
ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
+ struct proxy_address *volatile address; /* current worker address (if reusable) */
};
/* default to health check every 30 seconds */
@@ -1024,6 +1034,29 @@ PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,
request_rec *r,
proxy_server_conf *conf);
+/* Bitmask for ap_proxy_determine_address() */
+#define PROXY_DETERMINE_ADDRESS_CHECK (1u << 0)
+/**
+ * Resolve an address, reusing the one of the worker if any.
+ * @param proxy_function calling proxy scheme (http, ajp, ...)
+ * @param conn proxy connection the address is used for
+ * @param hostname host to resolve (should be the worker's if reusable)
+ * @param hostport port to resolve (should be the worker's if reusable)
+ * @param flags bitmask of PROXY_DETERMINE_ADDRESS_*
+ * @param r current request (if any)
+ * @param s current server (or NULL if r != NULL and ap_proxyerror()
+ * should be called on error)
+ * @return APR_SUCCESS or an error, APR_EEXIST if the address is still
+ * the same and PROXY_DETERMINE_ADDRESS_CHECK is asked
+ */
+PROXY_DECLARE(apr_status_t) ap_proxy_determine_address(const char *proxy_function,
+ proxy_conn_rec *conn,
+ const char *hostname,
+ apr_port_t hostport,
+ unsigned int flags,
+ request_rec *r,
+ server_rec *s);
+
/**
* Determine backend hostname and port
* @param p memory pool used for processing
diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
index 65773ce..32ec912 100644
--- a/modules/proxy/mod_proxy_ajp.c
+++ b/modules/proxy/mod_proxy_ajp.c
@@ -236,10 +236,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
if (status != APR_SUCCESS) {
conn->close = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00868)
- "request failed to %pI (%s:%d)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex,
- (int)conn->worker->s->port);
+ "request failed to %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
if (status == AJP_EOVERFLOW)
return HTTP_BAD_REQUEST;
else if (status == AJP_EBAD_METHOD) {
@@ -336,10 +334,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
conn->close = 1;
apr_brigade_destroy(input_brigade);
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00876)
- "send failed to %pI (%s:%d)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex,
- (int)conn->worker->s->port);
+ "send failed to %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
/*
* It is fatal when we failed to send a (part) of the request
* body.
@@ -378,10 +374,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
conn->close = 1;
apr_brigade_destroy(input_brigade);
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00878)
- "read response failed from %pI (%s:%d)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex,
- (int)conn->worker->s->port);
+ "read response failed from %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
/* If we had a successful cping/cpong and then a timeout
* we assume it is a request that cause a back-end timeout,
@@ -677,10 +671,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
}
else {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00892)
- "got response from %pI (%s:%d)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex,
- (int)conn->worker->s->port);
+ "got response from %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
if (ap_proxy_should_override(conf, r->status)) {
/* clear r->status for override error, otherwise ErrorDocument
@@ -702,10 +694,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
if (backend_failed) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00893)
- "dialog to %pI (%s:%d) failed",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex,
- (int)conn->worker->s->port);
+ "dialog to %pI (%s:%hu) failed",
+ conn->addr, conn->hostname, conn->port);
/*
* If we already send data, signal a broken backend connection
* upwards in the chain.
@@ -850,9 +840,8 @@ static int proxy_ajp_handler(request_rec *r, proxy_worker *worker,
if (status != APR_SUCCESS) {
backend->close = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00897)
- "cping/cpong failed to %pI (%s:%d)",
- worker->cp->addr, worker->s->hostname_ex,
- (int)worker->s->port);
+ "cping/cpong failed to %pI (%s:%hu)",
+ backend->addr, backend->hostname, backend->port);
status = HTTP_SERVICE_UNAVAILABLE;
retry++;
continue;
diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c
index 831bd15..d420df6 100644
--- a/modules/proxy/mod_proxy_fcgi.c
+++ b/modules/proxy/mod_proxy_fcgi.c
@@ -779,6 +779,15 @@ recv_again:
status = ap_scan_script_header_err_brigade_ex(r, ob,
NULL, APLOG_MODULE_INDEX);
+
+ /* FCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
/* suck in all the rest */
if (status != OK) {
apr_bucket *tmp_b;
diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
index a3fb10a..e0032e5 100644
--- a/modules/proxy/mod_proxy_ftp.c
+++ b/modules/proxy/mod_proxy_ftp.c
@@ -975,13 +975,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
conn_rec *c = r->connection;
proxy_conn_rec *backend;
apr_socket_t *sock, *local_sock, *data_sock = NULL;
- apr_sockaddr_t *connect_addr = NULL;
- apr_status_t rv;
conn_rec *origin, *data = NULL;
apr_status_t err = APR_SUCCESS;
-#if APR_HAS_THREADS
- apr_status_t uerr = APR_SUCCESS;
-#endif
apr_bucket_brigade *bb;
char *buf, *connectname;
apr_port_t connectport;
@@ -1005,8 +1000,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
/* stuff for PASV mode */
int connect = 0, use_port = 0;
char dates[APR_RFC822_DATE_LEN];
+ apr_status_t rv;
int status;
- apr_pool_t *address_pool;
/* is this for us? */
if (proxyhost) {
@@ -1120,53 +1115,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01036)
"connecting %s to %s:%d", url, connectname, connectport);
- if (worker->s->is_address_reusable) {
- if (!worker->cp->addr) {
-#if APR_HAS_THREADS
- if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-#endif
- }
- connect_addr = AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr);
- address_pool = worker->cp->dns_pool;
- }
- else
- address_pool = r->pool;
-
- /* do a DNS lookup for the destination host */
- if (!connect_addr)
- err = apr_sockaddr_info_get(&(connect_addr),
- connectname, APR_UNSPEC,
- connectport, 0,
- address_pool);
- if (worker->s->is_address_reusable && !worker->cp->addr) {
- worker->cp->addr = connect_addr;
-#if APR_HAS_THREADS
- if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock");
- }
-#endif
- }
- /*
- * get all the possible IP addresses for the destname and loop through
- * them until we get a successful connection
- */
- if (APR_SUCCESS != err) {
- return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p,
- "DNS lookup failure for: ",
- connectname, NULL));
- }
-
- /* check if ProxyBlock directive on this host */
- if (OK != ap_proxy_checkproxyblock2(r, conf, connectname, connect_addr)) {
- return ap_proxyerror(r, HTTP_FORBIDDEN,
- "Connect to remote machine blocked");
- }
-
/* create space for state information */
- backend = (proxy_conn_rec *) ap_get_module_config(c->conn_config, &proxy_ftp_module);
+ backend = ap_get_module_config(c->conn_config, &proxy_ftp_module);
if (!backend) {
status = ap_proxy_acquire_connection("FTP", &backend, worker, r->server);
if (status != OK) {
@@ -1176,11 +1126,26 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
}
return status;
}
- /* TODO: see if ftp could use determine_connection */
- backend->addr = connect_addr;
ap_set_module_config(c->conn_config, &proxy_ftp_module, backend);
}
+ /*
+ * get all the possible IP addresses for the destname and loop through
+ * them until we get a successful connection
+ */
+ err = ap_proxy_determine_address("FTP", backend, connectname, connectport,
+ 0, r, r->server);
+ if (APR_SUCCESS != err) {
+ return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY,
+ "Error resolving backend address");
+ }
+
+ /* check if ProxyBlock directive on this host */
+ if (OK != ap_proxy_checkproxyblock2(r, conf, connectname, backend->addr)) {
+ return ftp_proxyerror(r, backend, HTTP_FORBIDDEN,
+ "Connect to remote machine blocked");
+ }
+
/*
* II: Make the Connection -----------------------
@@ -1188,11 +1153,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
* We have determined who to connect to. Now make the connection.
*/
-
if (ap_proxy_connect_backend("FTP", backend, worker, r->server)) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01039)
- "an error occurred creating a new connection to %pI (%s)",
- connect_addr, connectname);
proxy_ftp_cleanup(r, backend);
return HTTP_SERVICE_UNAVAILABLE;
}
@@ -1536,7 +1497,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
"PASV contacting host %d.%d.%d.%d:%d",
h3, h2, h1, h0, pasvport);
- if ((rv = apr_socket_create(&data_sock, connect_addr->family, SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
+ if ((rv = apr_socket_create(&data_sock, backend->addr->family,
+ SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01045)
"error creating PASV socket");
proxy_ftp_cleanup(r, backend);
@@ -1558,7 +1520,14 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
}
/* make the connection */
- apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d", h3, h2, h1, h0), connect_addr->family, pasvport, 0, p);
+ err = apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d",
+ h3, h2, h1, h0),
+ backend->addr->family, pasvport, 0, p);
+ if (APR_SUCCESS != err) {
+ return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "DNS lookup failure for: ",
+ connectname, NULL));
+ }
rv = apr_socket_connect(data_sock, pasv_addr);
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01048)
@@ -1581,7 +1550,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
apr_port_t local_port;
unsigned int h0, h1, h2, h3, p0, p1;
- if ((rv = apr_socket_create(&local_sock, connect_addr->family, SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
+ if ((rv = apr_socket_create(&local_sock, backend->addr->family,
+ SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01049)
"error creating local socket");
proxy_ftp_cleanup(r, backend);
@@ -1601,7 +1571,12 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
#endif /* _OSD_POSIX */
}
- apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool);
+ err = apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool);
+ if (APR_SUCCESS != err) {
+ return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "DNS lookup failure for: ",
+ connectname, NULL));
+ }
if ((rv = apr_socket_bind(local_sock, local_addr)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01051)
diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c
index d618b4d..70f1de8 100644
--- a/modules/proxy/mod_proxy_hcheck.c
+++ b/modules/proxy/mod_proxy_hcheck.c
@@ -551,52 +551,29 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker,
return hc;
}
-static int hc_determine_connection(sctx_t *ctx, proxy_worker *worker,
- apr_sockaddr_t **addr, apr_pool_t *p)
+static int hc_determine_connection(const char *proxy_function,
+ proxy_conn_rec *backend,
+ server_rec *s)
{
- apr_status_t rv = APR_SUCCESS;
+ proxy_worker *worker = backend->worker;
+ apr_status_t rv;
+
/*
* normally, this is done in ap_proxy_determine_connection().
* TODO: Look at using ap_proxy_determine_connection() with a
* fake request_rec
*/
- if (worker->cp->addr) {
- *addr = worker->cp->addr;
- }
- else {
- rv = apr_sockaddr_info_get(addr, worker->s->hostname_ex,
- APR_UNSPEC, worker->s->port, 0, p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03249)
- "DNS lookup failure for: %s:%d",
- worker->s->hostname_ex, (int)worker->s->port);
- }
+ rv = ap_proxy_determine_address(proxy_function, backend,
+ worker->s->hostname_ex, worker->s->port,
+ 0, NULL, s);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(03249)
+ "DNS lookup failure for: %s:%hu",
+ worker->s->hostname_ex, worker->s->port);
+ return !OK;
}
- return (rv == APR_SUCCESS ? OK : !OK);
-}
-static apr_status_t hc_init_worker(sctx_t *ctx, proxy_worker *worker)
-{
- apr_status_t rv = APR_SUCCESS;
- /*
- * Since this is the watchdog, workers never actually handle a
- * request here, and so the local data isn't initialized (of
- * course, the shared memory is). So we need to bootstrap
- * worker->cp. Note, we only need do this once.
- */
- if (!worker->cp) {
- rv = ap_proxy_initialize_worker(worker, ctx->s, ctx->p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ctx->s, APLOGNO(03250) "Cannot init worker");
- return rv;
- }
- if (worker->s->is_address_reusable && !worker->s->disablereuse &&
- hc_determine_connection(ctx, worker, &worker->cp->addr,
- worker->cp->pool) != OK) {
- rv = APR_EGENERAL;
- }
- }
- return rv;
+ return OK;
}
static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *backend,
@@ -618,24 +595,64 @@ static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *
}
static int hc_get_backend(const char *proxy_function, proxy_conn_rec **backend,
- proxy_worker *hc, sctx_t *ctx, apr_pool_t *ptemp)
+ proxy_worker *hc, sctx_t *ctx)
{
int status;
+
status = ap_proxy_acquire_connection(proxy_function, backend, hc, ctx->s);
- if (status == OK) {
- (*backend)->addr = hc->cp->addr;
- (*backend)->hostname = hc->s->hostname_ex;
- if (strcmp(hc->s->scheme, "https") == 0 || strcmp(hc->s->scheme, "wss") == 0 ) {
- if (!ap_ssl_has_outgoing_handlers()) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252)
- "mod_ssl not configured?");
- return !OK;
- }
- (*backend)->is_ssl = 1;
+ if (status != OK) {
+ return status;
+ }
+
+ if (strcmp(hc->s->scheme, "https") == 0 || strcmp(hc->s->scheme, "wss") == 0 ) {
+ if (!ap_ssl_has_outgoing_handlers()) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252)
+ "mod_ssl not configured?");
+ return !OK;
}
+ (*backend)->is_ssl = 1;
+ }
+
+ return hc_determine_connection(proxy_function, *backend, ctx->s);
+}
+
+static apr_status_t hc_init_baton(baton_t *baton)
+{
+ sctx_t *ctx = baton->ctx;
+ proxy_worker *worker = baton->worker, *hc;
+ apr_status_t rv = APR_SUCCESS;
+ int once = 0;
+
+ /*
+ * Since this is the watchdog, workers never actually handle a
+ * request here, and so the local data isn't initialized (of
+ * course, the shared memory is). So we need to bootstrap
+ * worker->cp. Note, we only need do this once.
+ */
+ if (!worker->cp) {
+ rv = ap_proxy_initialize_worker(worker, ctx->s, ctx->p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ctx->s, APLOGNO(03250) "Cannot init worker");
+ return rv;
+ }
+ once = 1;
+ }
+ baton->hc = hc = hc_get_hcworker(ctx, worker, baton->ptemp);
+
+ /* Try to resolve the worker address once if it's reusable */
+ if (once && worker->s->is_address_reusable) {
+ proxy_conn_rec *backend = NULL;
+ if (hc_get_backend("HCHECK", &backend, hc, ctx)) {
+ rv = APR_EGENERAL;
+ }
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection("HCHECK", backend, ctx->s);
+ }
}
- return hc_determine_connection(ctx, hc, &(*backend)->addr, ptemp);
+
+ return rv;
}
static apr_status_t hc_check_cping(baton_t *baton, apr_thread_t *thread)
@@ -653,7 +670,7 @@ static apr_status_t hc_check_cping(baton_t *baton, apr_thread_t *thread)
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, baton->ctx->s, "HCCPING starting");
- if ((status = hc_get_backend("HCCPING", &backend, hc, ctx, baton->ptemp)) != OK) {
+ if ((status = hc_get_backend("HCCPING", &backend, hc, ctx)) != OK) {
return backend_cleanup("HCCPING", backend, ctx->s, status);
}
if ((status = ap_proxy_connect_backend("HCCPING", backend, hc, ctx->s)) != OK) {
@@ -688,7 +705,7 @@ static apr_status_t hc_check_tcp(baton_t *baton)
proxy_worker *hc = baton->hc;
proxy_conn_rec *backend = NULL;
- status = hc_get_backend("HCTCP", &backend, hc, ctx, baton->ptemp);
+ status = hc_get_backend("HCTCP", &backend, hc, ctx);
if (status == OK) {
status = ap_proxy_connect_backend("HCTCP", backend, hc, ctx->s);
/* does an unconditional ap_proxy_is_socket_connected() */
@@ -839,7 +856,7 @@ static apr_status_t hc_check_http(baton_t *baton, apr_thread_t *thread)
return APR_ENOTIMPL;
}
- if ((status = hc_get_backend("HCOH", &backend, hc, ctx, ptemp)) != OK) {
+ if ((status = hc_get_backend("HCOH", &backend, hc, ctx)) != OK) {
return backend_cleanup("HCOH", backend, ctx->s, status);
}
if ((status = ap_proxy_connect_backend("HCOH", backend, hc, ctx->s)) != OK) {
@@ -1033,12 +1050,6 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
"Checking %s worker: %s [%d] (%pp)", balancer->s->name,
worker->s->name_ex, worker->s->method, worker);
- if ((rv = hc_init_worker(ctx, worker)) != APR_SUCCESS) {
- worker->s->updated = now;
- return rv;
- }
- worker->s->updated = 0;
-
/* This pool has the lifetime of the check */
apr_pool_create(&ptemp, ctx->p);
apr_pool_tag(ptemp, "hc_request");
@@ -1047,7 +1058,12 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
baton->balancer = balancer;
baton->worker = worker;
baton->ptemp = ptemp;
- baton->hc = hc_get_hcworker(ctx, worker, ptemp);
+ if ((rv = hc_init_baton(baton))) {
+ worker->s->updated = now;
+ apr_pool_destroy(ptemp);
+ return rv;
+ }
+ worker->s->updated = 0;
#if HC_USE_THREADS
if (hctp) {
apr_thread_pool_push(hctp, hc_check, (void *)baton,
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
index 1842c49..bd57b4d 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -2078,8 +2078,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115)
"HTTP: 100-Continue failed to %pI (%s:%d)",
- worker->cp->addr, worker->s->hostname_ex,
- (int)worker->s->port);
+ backend->addr, backend->hostname, backend->port);
backend->close = 1;
retry++;
continue;
diff --git a/modules/proxy/mod_proxy_scgi.c b/modules/proxy/mod_proxy_scgi.c
index 5444a5c..d63c833 100644
--- a/modules/proxy/mod_proxy_scgi.c
+++ b/modules/proxy/mod_proxy_scgi.c
@@ -390,6 +390,14 @@ static int pass_response(request_rec *r, proxy_conn_rec *conn)
return status;
}
+ /* SCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
conf = ap_get_module_config(r->per_dir_config, &proxy_scgi_module);
if (conf->sendfile && conf->sendfile != scgi_sendfile_off) {
short err = 1;
diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c
index fd76c95..4e57196 100644
--- a/modules/proxy/mod_proxy_uwsgi.c
+++ b/modules/proxy/mod_proxy_uwsgi.c
@@ -404,6 +404,12 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
return HTTP_BAD_GATEWAY;
}
+ /* T-E wins over C-L */
+ if (apr_table_get(r->headers_out, "Transfer-Encoding")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ backend->close = 1;
+ }
+
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(r->pool, buf));
}
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index caafde0..a54a4fa 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -21,10 +21,14 @@
#include "apr_version.h"
#include "apr_strings.h"
#include "apr_hash.h"
+#include "apr_atomic.h"
+#include "http_core.h"
#include "proxy_util.h"
#include "ajp.h"
#include "scgi.h"
+#include "mpm_common.h" /* for ap_max_mem_free */
+
#include "mod_http2.h" /* for http2_get_num_workers() */
#if APR_HAVE_UNISTD_H
@@ -43,7 +47,7 @@ APLOG_USE_MODULE(proxy);
/*
* Opaque structure containing target server info when
* using a forward proxy.
- * Up to now only used in combination with HTTP CONNECT.
+ * Up to now only used in combination with HTTP CONNECT to ProxyRemote
*/
typedef struct {
int use_http_connect; /* Use SSL Tunneling via HTTP CONNECT */
@@ -52,6 +56,17 @@ typedef struct {
const char *proxy_auth; /* Proxy authorization */
} forward_info;
+/*
+ * Opaque structure containing a refcounted and TTL'ed address.
+ */
+typedef struct proxy_address {
+ apr_sockaddr_t *addr; /* Remote address info */
+ const char *hostname; /* Remote host name */
+ apr_port_t hostport; /* Remote host port */
+ apr_uint32_t refcount; /* Number of conns and/or worker using it */
+ apr_uint32_t expiry; /* Expiry timestamp (seconds to proxy_start_time) */
+} proxy_address;
+
/* Global balancer counter */
int PROXY_DECLARE_DATA proxy_lb_workers = 0;
static int lb_workers_limit = 0;
@@ -60,6 +75,8 @@ const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_domain;
extern apr_global_mutex_t *proxy_mutex;
+static const apr_time_t *proxy_start_time; /* epoch for expiring addresses */
+
static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r);
static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r);
static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r);
@@ -392,8 +409,12 @@ PROXY_DECLARE(char *)
return NULL;
}
-PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
+static int proxyerror_core(request_rec *r, int statuscode, const char *message,
+ apr_status_t rv)
{
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00898)
+ "%s returned by %s", message, r->uri);
+
apr_table_setn(r->notes, "error-notes",
apr_pstrcat(r->pool,
"The proxy server could not handle the request<p>"
@@ -405,11 +426,14 @@ PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *mes
apr_table_setn(r->notes, "verbose-error-to", "*");
r->status_line = apr_psprintf(r->pool, "%3.3u Proxy Error", statuscode);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00898) "%s returned by %s", message,
- r->uri);
return statuscode;
}
+PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
+{
+ return proxyerror_core(r, statuscode, message, 0);
+}
+
static const char *
proxy_get_host_of_request(request_rec *r)
{
@@ -1488,43 +1512,94 @@ static void socket_cleanup(proxy_conn_rec *conn)
apr_pool_clear(conn->scpool);
}
+static void address_cleanup(proxy_conn_rec *conn)
+{
+ conn->address = NULL;
+ conn->addr = NULL;
+ conn->hostname = NULL;
+ conn->port = 0;
+ conn->uds_path = NULL;
+ if (conn->uds_pool) {
+ apr_pool_clear(conn->uds_pool);
+ }
+ if (conn->sock) {
+ socket_cleanup(conn);
+ }
+}
+
static apr_status_t conn_pool_cleanup(void *theworker)
{
((proxy_worker *)theworker)->cp = NULL;
return APR_SUCCESS;
}
-static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
+static apr_pool_t *make_conn_subpool(apr_pool_t *p, const char *tag,
+ server_rec *s)
+{
+ apr_pool_t *sp = NULL;
+ apr_allocator_t *alloc;
+ apr_thread_mutex_t *mutex;
+ apr_status_t rv;
+
+ rv = apr_allocator_create(&alloc);
+ if (rv == APR_SUCCESS) {
+ rv = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv == APR_SUCCESS) {
+ apr_allocator_mutex_set(alloc, mutex);
+ apr_allocator_max_free_set(alloc, ap_max_mem_free);
+ rv = apr_pool_create_ex(&sp, p, NULL, alloc);
+ }
+ else {
+ apr_allocator_destroy(alloc);
+ }
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10474)
+ "failed to create %s pool", tag);
+ ap_abort_on_oom();
+ return NULL; /* not reached */
+ }
+ apr_allocator_owner_set(alloc, sp);
+ apr_pool_tag(sp, tag);
+
+ return sp;
+}
+
+static void init_conn_pool(apr_pool_t *p, proxy_worker *worker, server_rec *s)
{
- apr_pool_t *pool;
- apr_pool_t *dns_pool;
proxy_conn_pool *cp;
/*
- * Create a connection pool's subpool.
- * This pool is used for connection recycling.
- * Once the worker is added it is never removed but
- * it can be disabled.
- */
- apr_pool_create(&pool, p);
- apr_pool_tag(pool, "proxy_worker_cp");
- /*
- * Create a subpool of the connection pool for worker
- * scoped DNS resolutions. This is needed to avoid race
- * conditions in using the connection pool by multiple
- * threads during ramp up.
- */
- apr_pool_create(&dns_pool, pool);
- apr_pool_tag(dns_pool, "proxy_worker_dns");
- /*
* Alloc from the same pool as worker.
* proxy_conn_pool is permanently attached to the worker.
*/
cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
- cp->pool = pool;
- cp->dns_pool = dns_pool;
worker->cp = cp;
+ /*
+ * We need a first pool (cp->pool) to maintain the connections attached to
+ * the worker and a second one (cp->dns_pool) to maintain the DNS addresses
+ * in use (TTL'ed, refcounted). New connections are created as/on a subpool
+ * of cp->pool and new addresses as/on a subpool of cp->dns_pool, such that
+ * both leaks (the subpools can be destroyed when the connections and/or
+ * addresses are over) and race conditions (the creation/destruction of
+ * subpools is protected by the parent pool's mutex) can be avoided.
+ *
+ * cp->dns_pool is created before cp->pool because when a connection on the
+ * latter is destroyed it might destroy an address on the former, so when
+ * the base pools are destroyed (e.g. child exit) we thusly make sure that
+ * cp->dns_pool and its subpools are still alive when cp->pool gets killed.
+ *
+ * Both cp->dns_pool and cp->pool have their own allocator/mutex too since
+ * acquiring connections and addresses don't need to contend.
+ */
+ cp->dns_pool = make_conn_subpool(p, "proxy_worker_dns", s);
+ cp->pool = make_conn_subpool(p, "proxy_worker_cp", s);
+
+ /* When p is cleaning up the child is exiting, signal that to e.g. avoid
+ * destroying the subpools explicitely in connection_destructor() when
+ * they have been destroyed already by the reslist cleanup.
+ */
apr_pool_pre_cleanup_register(p, worker, conn_pool_cleanup);
}
@@ -1532,41 +1607,67 @@ PROXY_DECLARE(int) ap_proxy_connection_reusable(proxy_conn_rec *conn)
{
proxy_worker *worker = conn->worker;
- return ! (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse);
+ return !(conn->close
+ || conn->forward
+ || worker->s->disablereuse
+ || !worker->s->is_address_reusable);
+}
+
+static proxy_conn_rec *connection_make(apr_pool_t *p, proxy_worker *worker)
+{
+ proxy_conn_rec *conn;
+
+ conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
+ conn->pool = p;
+ conn->worker = worker;
+
+ /*
+ * Create another subpool that manages the data for the
+ * socket and the connection member of the proxy_conn_rec struct as we
+ * destroy this data more frequently than other data in the proxy_conn_rec
+ * struct like hostname and addr (at least in the case where we have
+ * keepalive connections that timed out).
+ *
+ * XXX: this is really needed only when worker->s->is_address_reusable,
+ * otherwise conn->scpool = conn->pool would be fine. For now we
+ * can't change it since it's (kind of) part of the API.
+ */
+ apr_pool_create(&conn->scpool, p);
+ apr_pool_tag(conn->scpool, "proxy_conn_scpool");
+
+ return conn;
}
-static apr_status_t connection_cleanup(void *theconn)
+static void connection_cleanup(void *theconn)
{
proxy_conn_rec *conn = (proxy_conn_rec *)theconn;
proxy_worker *worker = conn->worker;
- if (conn->r) {
- apr_pool_destroy(conn->r->pool);
- conn->r = NULL;
- }
-
/* Sanity check: Did we already return the pooled connection? */
if (conn->inreslist) {
ap_log_perror(APLOG_MARK, APLOG_ERR, 0, conn->pool, APLOGNO(00923)
"Pooled connection 0x%pp for worker %s has been"
" already returned to the connection pool.", conn,
ap_proxy_worker_name(conn->pool, worker));
- return APR_SUCCESS;
+ return;
+ }
+
+ if (conn->r) {
+ apr_pool_destroy(conn->r->pool);
+ conn->r = NULL;
}
- /* determine if the connection need to be closed */
- if (!worker->s->is_address_reusable || worker->s->disablereuse) {
+ /* determine if the connection should be cleared, closed or reused */
+ if (!worker->s->is_address_reusable) {
apr_pool_t *p = conn->pool;
apr_pool_clear(p);
- conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
- conn->pool = p;
- conn->worker = worker;
- apr_pool_create(&(conn->scpool), p);
- apr_pool_tag(conn->scpool, "proxy_conn_scpool");
+ conn = connection_make(p, worker);
}
else if (conn->close
- || (conn->connection
- && conn->connection->keepalive == AP_CONN_CLOSE)) {
+ || conn->forward
+ || (conn->connection
+ && conn->connection->keepalive == AP_CONN_CLOSE)
+ || worker->s->disablereuse) {
socket_cleanup(conn);
conn->close = 0;
}
@@ -1582,13 +1683,9 @@ static apr_status_t connection_cleanup(void *theconn)
conn->inreslist = 1;
apr_reslist_release(worker->cp->res, (void *)conn);
}
- else
- {
+ else {
worker->cp->conn = conn;
}
-
- /* Always return the SUCCESS */
- return APR_SUCCESS;
}
/* DEPRECATED */
@@ -1629,35 +1726,21 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ssl_connection_cleanup(proxy_conn_rec *conn
static apr_status_t connection_constructor(void **resource, void *params,
apr_pool_t *pool)
{
- apr_pool_t *ctx;
- apr_pool_t *scpool;
+ apr_pool_t *p;
proxy_conn_rec *conn;
proxy_worker *worker = (proxy_worker *)params;
/*
- * Create the subpool for each connection
+ * Create a subpool for each connection
* This keeps the memory consumption constant
- * when disconnecting from backend.
+ * when it's recycled or destroyed.
*/
- apr_pool_create(&ctx, pool);
- apr_pool_tag(ctx, "proxy_conn_pool");
- /*
- * Create another subpool that manages the data for the
- * socket and the connection member of the proxy_conn_rec struct as we
- * destroy this data more frequently than other data in the proxy_conn_rec
- * struct like hostname and addr (at least in the case where we have
- * keepalive connections that timed out).
- */
- apr_pool_create(&scpool, ctx);
- apr_pool_tag(scpool, "proxy_conn_scpool");
- conn = apr_pcalloc(ctx, sizeof(proxy_conn_rec));
-
- conn->pool = ctx;
- conn->scpool = scpool;
- conn->worker = worker;
+ apr_pool_create(&p, pool);
+ apr_pool_tag(p, "proxy_conn_pool");
+ conn = connection_make(p, worker);
conn->inreslist = 1;
- *resource = conn;
+ *resource = conn;
return APR_SUCCESS;
}
@@ -1881,6 +1964,7 @@ PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
proxy_worker_shared *wshared;
const char *ptr = NULL, *sockpath = NULL, *pdollars = NULL;
apr_port_t port_of_scheme;
+ int address_not_reusable = 0;
apr_uri_t uri;
/*
@@ -1909,12 +1993,21 @@ PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
* to fail (e.g. "ProxyPassMatch ^/(a|b)(/.*)? http://host:port$2").
* So we trim all the $n from the :port and prepend them in uri.path
* afterward for apr_uri_unparse() to restore the original URL below.
+ * If a dollar substitution is found in the hostname[:port] part of
+ * the URL, reusing address and connections in the same worker is not
+ * possible (the current implementation of active connections cache
+ * handles/assumes a single origin server:port per worker only), so
+ * we set address_not_reusable here during parsing to take that into
+ * account in the worker settings below.
*/
#define IS_REF(x) (x[0] == '$' && apr_isdigit(x[1]))
const char *pos = ap_strstr_c(ptr, "://");
if (pos) {
pos += 3;
while (*pos && *pos != ':' && *pos != '/') {
+ if (*pos == '$') {
+ address_not_reusable = 1;
+ }
pos++;
}
if (*pos == ':') {
@@ -1934,6 +2027,7 @@ PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
vec[1].iov_base = (void *)path;
vec[1].iov_len = strlen(path);
ptr = apr_pstrcatv(p, vec, 2, NULL);
+ address_not_reusable = 1;
}
}
}
@@ -2029,7 +2123,9 @@ PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
wshared->port = (uri.port) ? uri.port : port_of_scheme;
wshared->flush_packets = flush_off;
wshared->flush_wait = PROXY_FLUSH_WAIT;
- wshared->is_address_reusable = 1;
+ wshared->address_ttl = (address_not_reusable) ? 0 : -1;
+ wshared->is_address_reusable = (address_not_reusable == 0);
+ wshared->disablereuse = (address_not_reusable != 0);
wshared->lbfactor = 100;
wshared->passes = 1;
wshared->fails = 1;
@@ -2038,7 +2134,30 @@ PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
wshared->hash.def = ap_proxy_hashfunc(wshared->name_ex, PROXY_HASHFUNC_DEFAULT);
wshared->hash.fnv = ap_proxy_hashfunc(wshared->name_ex, PROXY_HASHFUNC_FNV);
wshared->was_malloced = (mask & AP_PROXY_WORKER_IS_MALLOCED) != 0;
- wshared->is_name_matchable = 0;
+ if (mask & AP_PROXY_WORKER_IS_MATCH) {
+ wshared->is_name_matchable = 1;
+
+ /* Before AP_PROXY_WORKER_IS_MATCH (< 2.4.47), a regex worker with
+ * dollar substitution was never matched against any actual URL, thus
+ * the requests fell through the generic worker. Now if a ProyPassMatch
+ * matches, a worker (and its parameters) is always used to determine
+ * the properties of the connection with the origin server. So for
+ * instance the same "timeout=" will be enforced for all the requests
+ * matched by the same ProyPassMatch worker, which is an improvement
+ * compared to the global/vhost [Proxy]Timeout applied by the generic
+ * worker. Likewise, address and connection reuse is the default for
+ * a ProyPassMatch worker with no dollar substitution, just like a
+ * "normal" worker. However to avoid DNS and connection reuse compat
+ * issues, connection reuse is disabled by default if there is any
+ * substitution in the uri-path (an explicit enablereuse=on can still
+ * opt-in), and reuse is even disabled definitively for substitutions
+ * happening in the hostname[:port] (is_address_reusable was unset
+ * above so it will prevent enablereuse=on to apply anyway).
+ */
+ if (ap_strchr_c(wshared->name, '$')) {
+ wshared->disablereuse = 1;
+ }
+ }
if (sockpath) {
if (PROXY_STRNCPY(wshared->uds_path, sockpath) != APR_SUCCESS) {
return apr_psprintf(p, "worker uds path (%s) too long", sockpath);
@@ -2058,20 +2177,6 @@ PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
(*worker)->balancer = balancer;
(*worker)->s = wshared;
- if (mask & AP_PROXY_WORKER_IS_MATCH) {
- (*worker)->s->is_name_matchable = 1;
- if (ap_strchr_c((*worker)->s->name_ex, '$')) {
- /* Before AP_PROXY_WORKER_IS_MATCH (< 2.4.47), a regex worker
- * with dollar substitution was never matched against the actual
- * URL thus the request fell through the generic worker. To avoid
- * dns and connection reuse compat issues, let's disable connection
- * reuse by default, it can still be overwritten by an explicit
- * enablereuse=on.
- */
- (*worker)->s->disablereuse = 1;
- }
- }
-
return NULL;
}
@@ -2157,12 +2262,23 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
if (!worker->s->retry_set) {
worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
}
- /* By default address is reusable unless DisableReuse is set */
- if (worker->s->disablereuse) {
+ /* Consistently set address and connection reusabilty: when reuse
+ * is disabled by configuration, or when the address is known already
+ * to not be reusable for this worker (in any case, thus ignore/force
+ * DisableReuse).
+ */
+ if (!worker->s->address_ttl || (!worker->s->address_ttl_set
+ && worker->s->disablereuse)) {
worker->s->is_address_reusable = 0;
}
- else {
- worker->s->is_address_reusable = 1;
+ if (!worker->s->is_address_reusable && !worker->s->disablereuse) {
+ /* Explicit enablereuse=on can't work in this case, warn user. */
+ if (worker->s->disablereuse_set) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10400)
+ "enablereuse/disablereuse ignored for worker %s",
+ ap_proxy_worker_name(p, worker));
+ }
+ worker->s->disablereuse = 1;
}
/*
@@ -2226,7 +2342,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
}
#endif
if (worker->cp == NULL)
- init_conn_pool(p, worker);
+ init_conn_pool(p, worker, s);
if (worker->cp == NULL) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
"can not create connection pool");
@@ -2590,6 +2706,354 @@ PROXY_DECLARE(int) ap_proxy_release_connection(const char *proxy_function,
return OK;
}
+static APR_INLINE void proxy_address_inc(proxy_address *address)
+{
+ apr_uint32_t old = apr_atomic_inc32(&address->refcount);
+ ap_assert(old > 0 && old < APR_UINT32_MAX);
+}
+
+static APR_INLINE void proxy_address_dec(proxy_address *address)
+{
+ /* Use _add32(, -1) since _dec32()'s returned value does not help */
+ apr_uint32_t old = apr_atomic_add32(&address->refcount, -1);
+ ap_assert(old > 0);
+ if (old == 1) {
+ apr_pool_destroy(address->addr->pool);
+ }
+}
+
+static apr_status_t proxy_address_cleanup(void *address)
+{
+ proxy_address_dec(address);
+ return APR_SUCCESS;
+}
+
+static APR_INLINE proxy_address *worker_address_get(proxy_worker *worker)
+{
+ /* No _readptr() so let's _casptr(, NULL, NULL) instead */
+ return apr_atomic_casptr((void *)&worker->address, NULL, NULL);
+}
+
+/* XXX: Call when PROXY_THREAD_LOCK()ed only! */
+static APR_INLINE void worker_address_set(proxy_worker *worker,
+ proxy_address *to)
+{
+ proxy_address *old = apr_atomic_xchgptr((void *)&worker->address, to);
+ if (old && old != to) {
+ proxy_address_dec(old);
+ }
+}
+
+static apr_status_t worker_address_resolve(proxy_worker *worker,
+ apr_sockaddr_t **paddr,
+ const char *hostname,
+ apr_port_t hostport,
+ const char *proxy_function,
+ request_rec *r, server_rec *s)
+{
+ apr_status_t rv;
+ apr_pool_t *pool = NULL;
+
+ apr_pool_create(&pool, worker->cp->dns_pool);
+ rv = apr_sockaddr_info_get(paddr, hostname, APR_UNSPEC,
+ hostport, 0, pool);
+ if (rv != APR_SUCCESS) {
+ if (r && !s) {
+ proxyerror_core(r, HTTP_INTERNAL_SERVER_ERROR,
+ apr_pstrcat(pool,
+ "DNS lookup failure for: ",
+ hostname, NULL),
+ rv);
+ }
+ else if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(10477)
+ "%s: resolving worker %s address",
+ proxy_function, hostname);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10478)
+ "%s: resolving worker %s address",
+ proxy_function, hostname);
+ }
+ apr_pool_destroy(pool);
+ return rv;
+ }
+
+ if (r ? APLOGrdebug(r) : APLOGdebug(s)) {
+ char *addrs = NULL;
+ apr_sockaddr_t *addr = *paddr;
+ for (; addr; addr = addr->next) {
+ addrs = apr_psprintf(pool, "%s%s%pI",
+ addrs ? ", " : "",
+ addrs ? addrs : "",
+ addr);
+ }
+ if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10479)
+ "%s: %s resolved to %s",
+ proxy_function, hostname, addrs);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10480)
+ "%s: %s resolved to %s",
+ proxy_function, hostname, addrs);
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static int proxy_addrs_equal(const apr_sockaddr_t *addr1,
+ const apr_sockaddr_t *addr2)
+{
+ const apr_sockaddr_t *base2 = addr2, *pos2;
+ while (addr1 && addr2) {
+ for (pos2 = base2; pos2; pos2 = pos2->next) {
+ if (apr_sockaddr_equal(pos2, addr1)) {
+ break;
+ }
+ }
+ if (!pos2) {
+ return 0;
+ }
+ addr1 = addr1->next;
+ addr2 = addr2->next;
+ }
+ if (addr1 || addr2) {
+ return 0;
+ }
+ return 1;
+}
+
+PROXY_DECLARE(apr_status_t) ap_proxy_determine_address(const char *proxy_function,
+ proxy_conn_rec *conn,
+ const char *hostname,
+ apr_port_t hostport,
+ unsigned int flags,
+ request_rec *r,
+ server_rec *s)
+{
+ proxy_worker *worker = conn->worker;
+ apr_status_t rv;
+
+ /*
+ * Worker can have the single constant backend adress.
+ * The single DNS lookup is used once per worker.
+ * If dynamic change is needed then set the addr to NULL
+ * inside dynamic config to force the lookup.
+ * The worker's addressTTL parameter may also be configured
+ * to perform the DNS lookups only when the TTL expires,
+ * or each time if that TTL is zero.
+ */
+ if (!worker->s->is_address_reusable) {
+ conn->hostname = apr_pstrdup(conn->pool, hostname);
+ conn->port = hostport;
+
+ rv = apr_sockaddr_info_get(&conn->addr, hostname, APR_UNSPEC,
+ hostport, 0, conn->pool);
+ if (rv != APR_SUCCESS) {
+ if (r && !s) {
+ proxyerror_core(r, HTTP_INTERNAL_SERVER_ERROR,
+ apr_pstrcat(r->pool, "DNS lookup failure for: ",
+ hostname, NULL), rv);
+ }
+ else if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(10475)
+ "%s: resolving backend %s address",
+ proxy_function, hostname);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10476)
+ "%s: resolving backend %s address",
+ proxy_function, hostname);
+ }
+ return rv;
+ }
+ }
+ else {
+ apr_sockaddr_t *addr = NULL;
+ proxy_address *address = NULL;
+ apr_int32_t ttl = worker->s->address_ttl;
+ apr_uint32_t now = 0;
+
+ if (flags & PROXY_DETERMINE_ADDRESS_CHECK) {
+ /* The caller wants to check if the address changed, return
+ * APR_EEXIST if not, otherwise fall through to update the
+ * worker's for everyone to switch.
+ */
+ if (!conn->addr) {
+ /* Need something to compare with */
+ return APR_EINVAL;
+ }
+ rv = worker_address_resolve(worker, &addr,
+ hostname, hostport,
+ proxy_function, r, s);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (proxy_addrs_equal(conn->addr, addr)) {
+ apr_pool_destroy(addr->pool);
+ return APR_EEXIST;
+ }
+ }
+
+ AP_DEBUG_ASSERT(ttl != 0);
+ if (ttl > 0) {
+ /* TODO: use a monotonic clock here */
+ now = apr_time_sec(apr_time_now() - *proxy_start_time);
+ }
+
+ /* Addresses are refcounted, destroyed when their refcount reaches 0.
+ *
+ * One ref is taken by worker->address as the worker's current/latest
+ * address, it's dropped when that address expires/changes (see below).
+ * The other refs are taken by the connections when using/switching to
+ * the current worker address (also below), they are dropped when the
+ * conns are destroyed (by the reslist though it should never happen
+ * if hmax is greater than the number of threads) OR for an expired
+ * conn->address when it's replaced by the new worker->address below.
+ *
+ * Dereferencing worker->address requires holding the worker mutex or
+ * some concurrent connection processing might change/destroy it at any
+ * time. So only conn->address is safe to dereference anywhere (unless
+ * NULL..) since it has at least the lifetime of the connection.
+ */
+ if (!addr) {
+ address = worker_address_get(worker);
+ }
+ if (!address
+ || conn->address != address
+ || apr_atomic_read32(&address->expiry) <= now) {
+ PROXY_THREAD_LOCK(worker);
+
+ /* Re-check while locked, might be a new address already */
+ if (!addr) {
+ address = worker_address_get(worker);
+ }
+ if (!address || apr_atomic_read32(&address->expiry) <= now) {
+ if (!addr) {
+ rv = worker_address_resolve(worker, &addr,
+ hostname, hostport,
+ proxy_function, r, s);
+ if (rv != APR_SUCCESS) {
+ PROXY_THREAD_UNLOCK(worker);
+ return rv;
+ }
+
+ /* Recompute "now" should the DNS be slow
+ * TODO: use a monotonic clock here
+ */
+ now = apr_time_sec(apr_time_now() - *proxy_start_time);
+ }
+
+ address = apr_pcalloc(addr->pool, sizeof(*address));
+ address->hostname = apr_pstrdup(addr->pool, hostname);
+ address->hostport = hostport;
+ address->addr = addr;
+
+ if (ttl > 0) {
+ /* We keep each worker's expiry date shared accross all the
+ * children so that they update their address at the same
+ * time, regardless of whether a specific child forced an
+ * address to expire at some point (for connect() issues).
+ */
+ address->expiry = apr_atomic_read32(&worker->s->address_expiry);
+ if (address->expiry <= now) {
+ apr_uint32_t new_expiry = address->expiry + ttl;
+ while (new_expiry <= now) {
+ new_expiry += ttl;
+ }
+ new_expiry = apr_atomic_cas32(&worker->s->address_expiry,
+ new_expiry, address->expiry);
+ /* race lost? well the expiry should grow anyway.. */
+ AP_DEBUG_ASSERT(new_expiry > now);
+ address->expiry = new_expiry;
+ }
+ }
+ else {
+ /* Never expires */
+ address->expiry = APR_UINT32_MAX;
+ }
+
+ /* One ref is for worker->address in any case */
+ if (worker->address || worker->cp->addr) {
+ apr_atomic_set32(&address->refcount, 1);
+ }
+ else {
+ /* Set worker->cp->addr once for compat with third-party
+ * modules. This addr never changed before and can't change
+ * underneath users now because of some TTL configuration.
+ * So we take one more ref for worker->cp->addr to remain
+ * allocated forever (though it might not be up to date..).
+ * Modules should use conn->addr instead of worker->cp-addr
+ * to get the actual address used by each conn, determined
+ * at connect() time.
+ */
+ apr_atomic_set32(&address->refcount, 2);
+ worker->cp->addr = address->addr;
+ }
+
+ /* Publish the changes. The old worker address (if any) is no
+ * longer used by this worker, it will be destroyed now if the
+ * worker is the last user (refcount == 1) or by the last conn
+ * using it (refcount > 1).
+ */
+ worker_address_set(worker, address);
+ }
+
+ /* Take the ref for conn->address (before dropping the mutex so to
+ * let no chance for this address be killed before it's used!)
+ */
+ proxy_address_inc(address);
+
+ PROXY_THREAD_UNLOCK(worker);
+
+ /* Kill any socket using the old address */
+ if (conn->sock) {
+ if (r ? APLOGrdebug(r) : APLOGdebug(s)) {
+ /* XXX: this requires the old conn->addr[ess] to still
+ * be alive since it's not copied by apr_socket_connect()
+ * in ap_proxy_connect_backend().
+ */
+ apr_sockaddr_t *local_addr = NULL;
+ apr_sockaddr_t *remote_addr = NULL;
+ apr_socket_addr_get(&local_addr, APR_LOCAL, conn->sock);
+ apr_socket_addr_get(&remote_addr, APR_REMOTE, conn->sock);
+ if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10481)
+ "%s: closing connection to %s (%pI<>%pI) on "
+ "address change", proxy_function, hostname,
+ local_addr, remote_addr);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10482)
+ "%s: closing connection to %s (%pI<>%pI) on "
+ "address change", proxy_function, hostname,
+ local_addr, remote_addr);
+ }
+ }
+ socket_cleanup(conn);
+ }
+
+ /* Kill the old address (if any) and use the new one */
+ if (conn->address) {
+ apr_pool_cleanup_run(conn->pool, conn->address,
+ proxy_address_cleanup);
+ }
+ apr_pool_cleanup_register(conn->pool, address,
+ proxy_address_cleanup,
+ apr_pool_cleanup_null);
+ address_cleanup(conn);
+ conn->address = address;
+ conn->hostname = address->hostname;
+ conn->port = address->hostport;
+ conn->addr = address->addr;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
PROXY_DECLARE(int)
ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
proxy_server_conf *conf,
@@ -2603,10 +3067,6 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
int server_portstr_size)
{
int server_port;
- apr_status_t err = APR_SUCCESS;
-#if APR_HAS_THREADS
- apr_status_t uerr = APR_SUCCESS;
-#endif
const char *uds_path;
/*
@@ -2626,6 +3086,12 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00944)
"connecting %s to %s:%d", *url, uri->hostname, uri->port);
+ /* Close a possible existing socket if we are told to do so */
+ if (conn->close) {
+ socket_cleanup(conn);
+ conn->close = 0;
+ }
+
/*
* allocate these out of the specified connection pool
* The scheme handler decides if this is permanent or
@@ -2652,143 +3118,122 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
* to check host and port on the conn and be careful about
* spilling the cached addr from the worker.
*/
- uds_path = (*worker->s->uds_path ? worker->s->uds_path : apr_table_get(r->notes, "uds_path"));
+ uds_path = (*worker->s->uds_path
+ ? worker->s->uds_path
+ : apr_table_get(r->notes, "uds_path"));
if (uds_path) {
- if (conn->uds_path == NULL) {
- /* use (*conn)->pool instead of worker->cp->pool to match lifetime */
- conn->uds_path = apr_pstrdup(conn->pool, uds_path);
- }
- if (conn->uds_path) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545)
- "%s: has determined UDS as %s",
- uri->scheme, conn->uds_path);
- }
- else {
- /* should never happen */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02546)
- "%s: cannot determine UDS (%s)",
- uri->scheme, uds_path);
-
- }
- /*
- * In UDS cases, some structs are NULL. Protect from de-refs
- * and provide info for logging at the same time.
- */
- if (!conn->addr) {
- apr_sockaddr_t *sa;
- apr_sockaddr_info_get(&sa, NULL, APR_UNSPEC, 0, 0, conn->pool);
- conn->addr = sa;
- }
- conn->hostname = "httpd-UDS";
- conn->port = 0;
- }
- else {
- int will_reuse = worker->s->is_address_reusable && !worker->s->disablereuse;
- if (!conn->hostname || !will_reuse) {
- if (proxyname) {
- conn->hostname = apr_pstrdup(conn->pool, proxyname);
- conn->port = proxyport;
- /*
- * If we have a forward proxy and the protocol is HTTPS,
- * then we need to prepend a HTTP CONNECT request before
- * sending our actual HTTPS requests.
- * Save our real backend data for using it later during HTTP CONNECT.
- */
- if (conn->is_ssl) {
- const char *proxy_auth;
-
- forward_info *forward = apr_pcalloc(conn->pool, sizeof(forward_info));
- conn->forward = forward;
- forward->use_http_connect = 1;
- forward->target_host = apr_pstrdup(conn->pool, uri->hostname);
- forward->target_port = uri->port;
- /* Do we want to pass Proxy-Authorization along?
- * If we haven't used it, then YES
- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
- * So let's make it configurable by env.
- * The logic here is the same used in mod_proxy_http.
- */
- proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization");
- if (proxy_auth != NULL &&
- proxy_auth[0] != '\0' &&
- r->user == NULL && /* we haven't yet authenticated */
- apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
- forward->proxy_auth = apr_pstrdup(conn->pool, proxy_auth);
- }
+ if (!conn->uds_path || strcmp(conn->uds_path, uds_path) != 0) {
+ apr_pool_t *pool = conn->pool;
+ if (conn->uds_path) {
+ address_cleanup(conn);
+ if (!conn->uds_pool) {
+ apr_pool_create(&conn->uds_pool, worker->cp->dns_pool);
}
+ pool = conn->uds_pool;
}
- else {
- conn->hostname = apr_pstrdup(conn->pool, uri->hostname);
- conn->port = uri->port;
+ /*
+ * In UDS cases, some structs are NULL. Protect from de-refs
+ * and provide info for logging at the same time.
+ */
+#if APR_HAVE_SOCKADDR_UN
+ apr_sockaddr_info_get(&conn->addr, uds_path, APR_UNIX, 0, 0, pool);
+ if (conn->addr && conn->addr->hostname) {
+ conn->uds_path = conn->addr->hostname;
}
- if (!will_reuse) {
- /*
- * Only do a lookup if we should not reuse the backend address.
- * Otherwise we will look it up once for the worker.
- */
- err = apr_sockaddr_info_get(&(conn->addr),
- conn->hostname, APR_UNSPEC,
- conn->port, 0,
- conn->pool);
+ else {
+ conn->uds_path = apr_pstrdup(pool, uds_path);
}
- socket_cleanup(conn);
- conn->close = 0;
+#else
+ apr_sockaddr_info_get(&conn->addr, NULL, APR_UNSPEC, 0, 0, pool);
+ conn->uds_path = apr_pstrdup(pool, uds_path);
+#endif
+ conn->hostname = apr_pstrdup(pool, uri->hostname);
+ conn->port = uri->port;
}
- if (will_reuse) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545)
+ "%s: has determined UDS as %s (for %s:%hu)",
+ uri->scheme, conn->uds_path, conn->hostname, conn->port);
+ }
+ else {
+ const char *hostname = uri->hostname;
+ apr_port_t hostport = uri->port;
+
+ /* Not a remote CONNECT until further notice */
+ conn->forward = NULL;
+
+ if (proxyname) {
+ hostname = proxyname;
+ hostport = proxyport;
+
/*
- * Looking up the backend address for the worker only makes sense if
- * we can reuse the address.
+ * If we have a remote proxy and the protocol is HTTPS,
+ * then we need to prepend a HTTP CONNECT request before
+ * sending our actual HTTPS requests.
*/
- if (!worker->cp->addr) {
-#if APR_HAS_THREADS
- if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-#endif
-
- /*
- * Recheck addr after we got the lock. This may have changed
- * while waiting for the lock.
+ if (conn->is_ssl) {
+ forward_info *forward;
+ const char *proxy_auth;
+
+ /* Do we want to pass Proxy-Authorization along?
+ * If we haven't used it, then YES
+ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+ * So let's make it configurable by env.
+ * The logic here is the same used in mod_proxy_http.
*/
- if (!AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr)) {
+ proxy_auth = apr_table_get(r->notes, "proxy-basic-creds");
+ if (proxy_auth == NULL
+ && (r->user == NULL /* we haven't yet authenticated */
+ || apr_table_get(r->subprocess_env, "Proxy-Chain-Auth"))) {
+ proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization");
+ }
+ if (proxy_auth != NULL && proxy_auth[0] == '\0') {
+ proxy_auth = NULL;
+ }
- apr_sockaddr_t *addr;
+ /* Reset forward info if they changed */
+ if (!(forward = conn->forward)
+ || forward->target_port != uri->port
+ || ap_cstr_casecmp(forward->target_host, uri->hostname) != 0
+ || (forward->proxy_auth != NULL) != (proxy_auth != NULL)
+ || (forward->proxy_auth != NULL && proxy_auth != NULL &&
+ strcmp(forward->proxy_auth, proxy_auth) != 0)) {
+ apr_pool_t *fwd_pool = conn->pool;
+ if (worker->s->is_address_reusable) {
+ if (conn->fwd_pool) {
+ apr_pool_clear(conn->fwd_pool);
+ }
+ else {
+ apr_pool_create(&conn->fwd_pool, conn->pool);
+ }
+ fwd_pool = conn->fwd_pool;
+ }
+ forward = apr_pcalloc(fwd_pool, sizeof(forward_info));
+ conn->forward = forward;
/*
- * Worker can have the single constant backend address.
- * The single DNS lookup is used once per worker.
- * If dynamic change is needed then set the addr to NULL
- * inside dynamic config to force the lookup.
+ * Save our real backend data for using it later during HTTP CONNECT.
*/
- err = apr_sockaddr_info_get(&addr,
- conn->hostname, APR_UNSPEC,
- conn->port, 0,
- worker->cp->dns_pool);
- worker->cp->addr = addr;
- }
- conn->addr = worker->cp->addr;
-#if APR_HAS_THREADS
- if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
+ forward->use_http_connect = 1;
+ forward->target_host = apr_pstrdup(fwd_pool, uri->hostname);
+ forward->target_port = uri->port;
+ if (proxy_auth) {
+ forward->proxy_auth = apr_pstrdup(fwd_pool, proxy_auth);
+ }
}
-#endif
- }
- else {
- conn->addr = worker->cp->addr;
}
}
- }
- /* Close a possible existing socket if we are told to do so */
- if (conn->close) {
- socket_cleanup(conn);
- conn->close = 0;
- }
- if (err != APR_SUCCESS) {
- return ap_proxyerror(r, HTTP_BAD_GATEWAY,
- apr_pstrcat(p, "DNS lookup failure for: ",
- conn->hostname, NULL));
+ if (conn->hostname
+ && (conn->port != hostport
+ || ap_cstr_casecmp(conn->hostname, hostname) != 0)) {
+ address_cleanup(conn);
+ }
+
+ /* Resolve the connection address with the determined hostname/port */
+ if (ap_proxy_determine_address(uri->scheme, conn, hostname, hostport,
+ 0, r, NULL)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
}
/* Get the server port for the Via headers */
@@ -2847,7 +3292,8 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
}
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00947)
- "connected %s to %s:%d", *url, conn->hostname, conn->port);
+ "connecting %s to %pI (%s:%hu)", *url,
+ conn->addr, conn->hostname, conn->port);
return OK;
}
@@ -2948,7 +3394,8 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend,
nbytes = apr_snprintf(buffer, sizeof(buffer),
"CONNECT %s:%d HTTP/1.0" CRLF,
forward->target_host, forward->target_port);
- /* Add proxy authorization from the initial request if necessary */
+ /* Add proxy authorization from the configuration, or initial
+ * request if necessary */
if (forward->proxy_auth != NULL) {
nbytes += apr_snprintf(buffer + nbytes, sizeof(buffer) - nbytes,
"Proxy-Authorization: %s" CRLF,
@@ -3171,11 +3618,14 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
{
apr_status_t rv;
int loglevel;
- apr_sockaddr_t *backend_addr = conn->addr;
+ forward_info *forward = conn->forward;
+ apr_sockaddr_t *backend_addr;
/* the local address to use for the outgoing connection */
apr_sockaddr_t *local_addr;
apr_socket_t *newsock;
void *sconf = s->module_config;
+ int address_reusable = worker->s->is_address_reusable;
+ int did_dns_lookup = 0;
proxy_server_conf *conf =
(proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
@@ -3184,6 +3634,16 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
return DECLINED;
}
+ /* We'll set conn->addr to the address actually connect()ed, so if the
+ * network connection is not reused (per ap_proxy_check_connection()
+ * above) we need to reset conn->addr to the first resolved address
+ * and try to connect it first.
+ */
+ if (conn->address && rv != APR_SUCCESS) {
+ conn->addr = conn->address->addr;
+ }
+ backend_addr = conn->addr;
+
while (rv != APR_SUCCESS && (backend_addr || conn->uds_path)) {
#if APR_HAVE_SYS_UN_H
if (conn->uds_path)
@@ -3193,11 +3653,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
if (rv != APR_SUCCESS) {
loglevel = APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(02453)
- "%s: error creating Unix domain socket for "
- "target %s:%d",
+ "%s: error creating Unix domain socket "
+ "%s (%s:%hu)",
proxy_function,
- worker->s->hostname_ex,
- (int)worker->s->port);
+ conn->uds_path,
+ conn->hostname, conn->port);
break;
}
conn->connection = NULL;
@@ -3207,21 +3667,18 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
apr_socket_close(newsock);
ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(02454)
"%s: attempt to connect to Unix domain socket "
- "%s (%s:%d) failed",
- proxy_function,
- conn->uds_path,
- worker->s->hostname_ex,
- (int)worker->s->port);
+ "%s (%s:%hu) failed",
+ proxy_function, conn->uds_path,
+ conn->hostname, conn->port);
break;
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02823)
"%s: connection established with Unix domain socket "
- "%s (%s:%d)",
+ "%s (%s:%hu)",
proxy_function,
conn->uds_path,
- worker->s->hostname_ex,
- (int)worker->s->port);
+ conn->hostname, conn->port);
}
else
#endif
@@ -3231,12 +3688,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
conn->scpool)) != APR_SUCCESS) {
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00952)
- "%s: error creating fam %d socket for "
- "target %s:%d",
+ "%s: error creating fam %d socket to %pI for "
+ "(%s:%hu)",
proxy_function,
- backend_addr->family,
- worker->s->hostname_ex,
- (int)worker->s->port);
+ backend_addr->family, backend_addr,
+ conn->hostname, conn->port);
/*
* this could be an IPv6 address from the DNS but the
* local machine won't give us an IPv6 socket; hopefully the
@@ -3285,9 +3741,9 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
}
}
ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s,
- "%s: fam %d socket created to connect to %s:%d",
- proxy_function, backend_addr->family,
- worker->s->hostname_ex, (int)worker->s->port);
+ "%s: fam %d socket created for %pI (%s:%hu)",
+ proxy_function, backend_addr->family, backend_addr,
+ conn->hostname, conn->port);
if (conf->source_address_set) {
local_addr = apr_pmemdup(conn->scpool, conf->source_address,
@@ -3309,21 +3765,45 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
apr_socket_close(newsock);
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00957)
- "%s: attempt to connect to %pI (%s:%d) failed",
- proxy_function,
- backend_addr,
- worker->s->hostname_ex,
- (int)worker->s->port);
+ "%s: attempt to connect to %pI (%s:%hu) failed",
+ proxy_function, backend_addr,
+ conn->hostname, conn->port);
backend_addr = backend_addr->next;
+ /*
+ * If we run out of resolved IP's when connecting and if
+ * we cache the resolution in the worker the resolution
+ * might have changed. Hence try a DNS lookup to see if this
+ * helps.
+ */
+ if (!backend_addr && address_reusable && !did_dns_lookup) {
+ /* Issue a new DNS lookup to check if the address changed,
+ * in which case (SUCCESS) restart the loop with the new
+ * one(s), otherwise leave (nothing we can do about it).
+ */
+ if (ap_proxy_determine_address(proxy_function, conn,
+ conn->hostname, conn->port,
+ PROXY_DETERMINE_ADDRESS_CHECK,
+ NULL, s) == APR_SUCCESS) {
+ backend_addr = conn->addr;
+ }
+
+ /*
+ * In case of an error backend_addr will be NULL which
+ * is enough to leave the loop. If successful we'll retry
+ * the new addresses only once.
+ */
+ did_dns_lookup = 1;
+ }
continue;
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02824)
- "%s: connection established with %pI (%s:%d)",
- proxy_function,
- backend_addr,
- worker->s->hostname_ex,
- (int)worker->s->port);
+ "%s: connection established with %pI (%s:%hu)",
+ proxy_function, backend_addr,
+ conn->hostname, conn->port);
+
+ /* Set the actual sockaddr we are connected to */
+ conn->addr = backend_addr;
}
/* Set a timeout on the socket */
@@ -3339,13 +3819,12 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
conn->sock = newsock;
- if (!conn->uds_path && conn->forward) {
- forward_info *forward = (forward_info *)conn->forward;
+ if (forward && forward->use_http_connect) {
/*
* For HTTP CONNECT we need to prepend CONNECT request before
* sending our actual HTTPS requests.
*/
- if (forward->use_http_connect) {
+ {
rv = send_http_connect(conn, s);
/* If an error occurred, loop round and try again */
if (rv != APR_SUCCESS) {
@@ -3353,12 +3832,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
apr_socket_close(newsock);
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00958)
- "%s: attempt to connect to %s:%d "
- "via http CONNECT through %pI (%s:%d) failed",
+ "%s: attempt to connect to %s:%hu "
+ "via http CONNECT through %pI (%s:%hu) failed",
proxy_function,
forward->target_host, forward->target_port,
- backend_addr, worker->s->hostname_ex,
- (int)worker->s->port);
+ backend_addr, conn->hostname, conn->port);
backend_addr = backend_addr->next;
continue;
}
@@ -3378,8 +3856,8 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
worker->s->error_time = apr_time_now();
worker->s->status |= PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00959)
- "ap_proxy_connect_backend disabling worker for (%s:%d) for %"
- APR_TIME_T_FMT "s",
+ "ap_proxy_connect_backend disabling worker for (%s:%hu) "
+ "for %" APR_TIME_T_FMT "s",
worker->s->hostname_ex, (int)worker->s->port,
apr_time_sec(worker->s->retry));
}
@@ -3473,7 +3951,7 @@ static int proxy_connection_create(const char *proxy_function,
* the peer reset the connection already; ap_run_create_connection()
* closed the socket
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
s, APLOGNO(00960) "%s: an error occurred creating a "
"new connection to %pI (%s)", proxy_function,
backend_addr, conn->hostname);
@@ -3908,7 +4386,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
apr_bucket *e;
int force10 = 0, do_100_continue = 0;
conn_rec *origin = p_conn->connection;
- const char *host, *val;
+ const char *host, *creds, *val;
proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
/*
@@ -4128,6 +4606,11 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
apr_table_unset(r->headers_in, "If-None-Match");
}
+ creds = apr_table_get(r->notes, "proxy-basic-creds");
+ if (creds) {
+ apr_table_mergen(r->headers_in, "Proxy-Authorization", creds);
+ }
+
/* run hook to fixup the request we are about to send */
proxy_run_fixups(r);
@@ -4726,7 +5209,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel,
{
apr_status_t rv;
conn_rec *c_i = r->connection;
- apr_interval_time_t timeout = -1;
+ apr_interval_time_t client_timeout = -1, origin_timeout = -1;
proxy_tunnel_rec *tunnel;
*ptunnel = NULL;
@@ -4753,9 +5236,16 @@ PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel,
tunnel->client->bb = apr_brigade_create(c_i->pool, c_i->bucket_alloc);
tunnel->client->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t);
tunnel->client->pfd->p = r->pool;
- tunnel->client->pfd->desc_type = APR_POLL_SOCKET;
- tunnel->client->pfd->desc.s = ap_get_conn_socket(c_i);
+ tunnel->client->pfd->desc_type = APR_NO_DESC;
+ rv = ap_get_pollfd_from_conn(tunnel->client->c,
+ tunnel->client->pfd, &client_timeout);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
tunnel->client->pfd->client_data = tunnel->client;
+ if (tunnel->client->pfd->desc_type == APR_POLL_SOCKET) {
+ apr_socket_opt_set(tunnel->client->pfd->desc.s, APR_SO_NONBLOCK, 1);
+ }
tunnel->origin->c = c_o;
tunnel->origin->name = "origin";
@@ -4765,18 +5255,13 @@ PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel,
tunnel->origin->pfd->desc_type = APR_POLL_SOCKET;
tunnel->origin->pfd->desc.s = ap_get_conn_socket(c_o);
tunnel->origin->pfd->client_data = tunnel->origin;
-
- /* Defaults to the biggest timeout of both connections */
- apr_socket_timeout_get(tunnel->client->pfd->desc.s, &timeout);
- apr_socket_timeout_get(tunnel->origin->pfd->desc.s, &tunnel->timeout);
- if (timeout >= 0 && (tunnel->timeout < 0 || tunnel->timeout < timeout)) {
- tunnel->timeout = timeout;
- }
-
- /* We should be nonblocking from now on the sockets */
- apr_socket_opt_set(tunnel->client->pfd->desc.s, APR_SO_NONBLOCK, 1);
+ apr_socket_timeout_get(tunnel->origin->pfd->desc.s, &origin_timeout);
apr_socket_opt_set(tunnel->origin->pfd->desc.s, APR_SO_NONBLOCK, 1);
+ /* Defaults to the largest timeout of both connections */
+ tunnel->timeout = (client_timeout >= 0 && client_timeout > origin_timeout ?
+ client_timeout : origin_timeout);
+
/* No coalescing filters */
ap_remove_output_filter_byhandle(c_i->output_filters,
"SSL/TLS Coalescing Filter");
@@ -4799,14 +5284,43 @@ PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel,
tunnel->nohalfclose = 1;
}
- /* Start with POLLOUT and let ap_proxy_tunnel_run() schedule both
- * directions when there are no output data pending (anymore).
- */
- tunnel->client->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
- tunnel->origin->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
- if ((rv = apr_pollset_add(tunnel->pollset, tunnel->client->pfd))
- || (rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) {
- return rv;
+ if (tunnel->client->pfd->desc_type == APR_POLL_SOCKET) {
+ /* Both ends are sockets, the poll strategy is:
+ * - poll both sides POLLOUT
+ * - when one side is writable, remove the POLLOUT
+ * and add POLLIN to the other side.
+ * - tunnel arriving data, remove POLLIN from the source
+ * again and add POLLOUT to the receiving side
+ * - on EOF on read, remove the POLLIN from that side
+ * Repeat until both sides are down */
+ tunnel->client->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
+ tunnel->origin->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
+ if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd)) ||
+ (rv = apr_pollset_add(tunnel->pollset, tunnel->client->pfd))) {
+ return rv;
+ }
+ }
+ else if (tunnel->client->pfd->desc_type == APR_POLL_FILE) {
+ /* Input is a PIPE fd, the poll strategy is:
+ * - always POLLIN on origin
+ * - use socket strategy described above for client only
+ * otherwise the same
+ */
+ tunnel->client->pfd->reqevents = 0;
+ tunnel->origin->pfd->reqevents = APR_POLLIN | APR_POLLHUP |
+ APR_POLLOUT | APR_POLLERR;
+ if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) {
+ return rv;
+ }
+ }
+ else {
+ /* input is already closed, unsual, but we know nothing about
+ * the tunneled protocol. */
+ tunnel->client->down_in = 1;
+ tunnel->origin->pfd->reqevents = APR_POLLIN | APR_POLLHUP;
+ if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) {
+ return rv;
+ }
}
*ptunnel = tunnel;
@@ -4918,7 +5432,23 @@ static int proxy_tunnel_forward(proxy_tunnel_rec *tunnel,
}
del_pollset(tunnel->pollset, in->pfd, APR_POLLIN);
- add_pollset(tunnel->pollset, out->pfd, APR_POLLOUT);
+ if (out->pfd->desc_type == APR_POLL_SOCKET) {
+ /* if the output is a SOCKET, we can stop polling the input
+ * until the output signals POLLOUT again. */
+ add_pollset(tunnel->pollset, out->pfd, APR_POLLOUT);
+ }
+ else {
+ /* We can't use POLLOUT in this direction for the only
+ * APR_POLL_FILE case we have so far (mod_h2's "signal" pipe),
+ * we assume that the client's ouput filters chain will block/flush
+ * if necessary (i.e. no pending data), hence that the origin
+ * is EOF when reaching here. This direction is over. */
+ ap_assert(in->down_in && APR_STATUS_IS_EOF(rv));
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, tunnel->r,
+ "proxy: %s: %s write shutdown",
+ tunnel->scheme, out->name);
+ out->down_out = 1;
+ }
}
return OK;
@@ -5107,4 +5637,14 @@ void proxy_util_register_hooks(apr_pool_t *p)
APR_REGISTER_OPTIONAL_FN(ap_proxy_retry_worker);
APR_REGISTER_OPTIONAL_FN(ap_proxy_clear_connection);
APR_REGISTER_OPTIONAL_FN(proxy_balancer_get_best_worker);
+
+ {
+ apr_time_t *start_time = ap_retained_data_get("proxy_start_time");
+ if (start_time == NULL) {
+ start_time = ap_retained_data_create("proxy_start_time",
+ sizeof(*start_time));
+ *start_time = apr_time_now();
+ }
+ proxy_start_time = start_time;
+ }
}
diff --git a/modules/slotmem/mod_slotmem_shm.c b/modules/slotmem/mod_slotmem_shm.c
index f4eaa84..4d14faf 100644
--- a/modules/slotmem/mod_slotmem_shm.c
+++ b/modules/slotmem/mod_slotmem_shm.c
@@ -92,7 +92,7 @@ static int slotmem_filenames(apr_pool_t *pool,
const char *fname = NULL, *pname = NULL;
if (slotname && *slotname && strcasecmp(slotname, "none") != 0) {
- if (slotname[0] != '/') {
+ if (!ap_os_is_path_absolute(pool, slotname)) {
/* Each generation needs its own file name. */
int generation = 0;
ap_mpm_query(AP_MPMQ_GENERATION, &generation);
@@ -109,7 +109,7 @@ static int slotmem_filenames(apr_pool_t *pool,
if (persistname) {
/* Persisted file names are immutable... */
- if (slotname[0] != '/') {
+ if (!ap_os_is_path_absolute(pool, slotname)) {
pname = apr_pstrcat(pool, DEFAULT_SLOTMEM_PREFIX,
slotname, DEFAULT_SLOTMEM_SUFFIX,
DEFAULT_SLOTMEM_PERSIST_SUFFIX,
diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
index 5b8c4d5..fb66d18 100644
--- a/modules/ssl/mod_ssl.c
+++ b/modules/ssl/mod_ssl.c
@@ -25,8 +25,7 @@
*/
#include "ssl_private.h"
-#include "mod_ssl.h"
-#include "mod_ssl_openssl.h"
+
#include "util_md5.h"
#include "util_mutex.h"
#include "ap_provider.h"
@@ -75,11 +74,9 @@ static const command_rec ssl_config_cmds[] = {
SSL_CMD_SRV(SessionCache, TAKE1,
"SSL Session Cache storage "
"('none', 'nonenotnull', 'dbm:/path/to/file')")
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
SSL_CMD_SRV(CryptoDevice, TAKE1,
"SSL external Crypto Device usage "
"('builtin', '...')")
-#endif
SSL_CMD_SRV(RandomSeed, TAKE23,
"SSL Pseudo Random Number Generator (PRNG) seeding source "
"('startup|connect builtin|file:/path|exec:/path [bytes]')")
diff --git a/modules/ssl/mod_ssl_openssl.h b/modules/ssl/mod_ssl_openssl.h
index d4f684f..e251bd9 100644
--- a/modules/ssl/mod_ssl_openssl.h
+++ b/modules/ssl/mod_ssl_openssl.h
@@ -30,14 +30,17 @@
/* OpenSSL headers */
-#ifndef SSL_PRIVATE_H
#include <openssl/opensslv.h>
-#if (OPENSSL_VERSION_NUMBER >= 0x10001000)
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+#include <openssl/macros.h> /* for OPENSSL_API_LEVEL */
+#endif
+#if OPENSSL_VERSION_NUMBER >= 0x10001000
/* must be defined before including ssl.h */
#define OPENSSL_NO_SSL_INTERN
#endif
#include <openssl/ssl.h>
-#endif
+#include <openssl/evp.h>
+#include <openssl/x509.h>
/**
* init_server hook -- allow SSL_CTX-specific initialization to be performed by
diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c
index de18b8f..9af6f70 100644
--- a/modules/ssl/ssl_engine_config.c
+++ b/modules/ssl/ssl_engine_config.c
@@ -27,6 +27,7 @@
damned if you don't.''
-- Unknown */
#include "ssl_private.h"
+
#include "util_mutex.h"
#include "ap_provider.h"
@@ -592,14 +593,15 @@ const char *ssl_cmd_SSLPassPhraseDialog(cmd_parms *cmd,
return NULL;
}
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd,
void *dcfg,
const char *arg)
{
SSLModConfigRec *mc = myModConfig(cmd->server);
const char *err;
+#if MODSSL_HAVE_ENGINE_API
ENGINE *e;
+#endif
if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) {
return err;
@@ -608,13 +610,16 @@ const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd,
if (strcEQ(arg, "builtin")) {
mc->szCryptoDevice = NULL;
}
+#if MODSSL_HAVE_ENGINE_API
else if ((e = ENGINE_by_id(arg))) {
mc->szCryptoDevice = arg;
ENGINE_free(e);
}
+#endif
else {
err = "SSLCryptoDevice: Invalid argument; must be one of: "
"'builtin' (none)";
+#if MODSSL_HAVE_ENGINE_API
e = ENGINE_get_first();
while (e) {
err = apr_pstrcat(cmd->pool, err, ", '", ENGINE_get_id(e),
@@ -623,12 +628,12 @@ const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd,
* on the 'old' e, per the docs in engine.h. */
e = ENGINE_get_next(e);
}
+#endif
return err;
}
return NULL;
}
-#endif
const char *ssl_cmd_SSLRandomSeed(cmd_parms *cmd,
void *dcfg,
@@ -856,10 +861,12 @@ const char *ssl_cmd_SSLCompression(cmd_parms *cmd, void *dcfg, int flag)
}
}
sc->compression = flag ? TRUE : FALSE;
- return NULL;
#else
- return "Setting Compression mode unsupported; not implemented by the SSL library";
+ if (flag) {
+ return "Setting Compression mode unsupported; not implemented by the SSL library";
+ }
#endif
+ return NULL;
}
const char *ssl_cmd_SSLHonorCipherOrder(cmd_parms *cmd, void *dcfg, int flag)
diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c
index 825621d..c2ec048 100644
--- a/modules/ssl/ssl_engine_init.c
+++ b/modules/ssl/ssl_engine_init.c
@@ -27,8 +27,7 @@
see Recursive.''
-- Unknown */
#include "ssl_private.h"
-#include "mod_ssl.h"
-#include "mod_ssl_openssl.h"
+
#include "mpm_common.h"
#include "mod_md.h"
@@ -218,6 +217,16 @@ static apr_status_t modssl_fips_cleanup(void *data)
}
#endif
+static APR_INLINE unsigned long modssl_runtime_lib_version(void)
+{
+#if MODSSL_USE_OPENSSL_PRE_1_1_API
+ return SSLeay();
+#else
+ return OpenSSL_version_num();
+#endif
+}
+
+
/*
* Per-module initialization
*/
@@ -225,18 +234,22 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog,
apr_pool_t *ptemp,
server_rec *base_server)
{
+ unsigned long runtime_lib_version = modssl_runtime_lib_version();
SSLModConfigRec *mc = myModConfig(base_server);
SSLSrvConfigRec *sc;
server_rec *s;
apr_status_t rv;
apr_array_header_t *pphrases;
- if (SSLeay() < MODSSL_LIBRARY_VERSION) {
+ AP_DEBUG_ASSERT(mc);
+
+ if (runtime_lib_version < MODSSL_LIBRARY_VERSION) {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(01882)
"Init: this version of mod_ssl was compiled against "
- "a newer library (%s, version currently loaded is %s)"
+ "a newer library (%s (%s), version currently loaded is 0x%lX)"
" - may result in undefined or erroneous behavior",
- MODSSL_LIBRARY_TEXT, MODSSL_LIBRARY_DYNTEXT);
+ MODSSL_LIBRARY_TEXT, MODSSL_LIBRARY_DYNTEXT,
+ runtime_lib_version);
}
/* We initialize mc->pid per-process in the child init,
@@ -313,11 +326,9 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog,
/*
* SSL external crypto device ("engine") support
*/
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
if ((rv = ssl_init_Engine(base_server, p)) != APR_SUCCESS) {
return rv;
}
-#endif
ap_log_error(APLOG_MARK, APLOG_INFO, 0, base_server, APLOGNO(01883)
"Init: Initialized %s library", MODSSL_LIBRARY_NAME);
@@ -473,9 +484,9 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog,
* Support for external a Crypto Device ("engine"), usually
* a hardware accelerator card for crypto operations.
*/
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
apr_status_t ssl_init_Engine(server_rec *s, apr_pool_t *p)
{
+#if MODSSL_HAVE_ENGINE_API
SSLModConfigRec *mc = myModConfig(s);
ENGINE *e;
@@ -507,10 +518,9 @@ apr_status_t ssl_init_Engine(server_rec *s, apr_pool_t *p)
ENGINE_free(e);
}
-
+#endif
return APR_SUCCESS;
}
-#endif
#ifdef HAVE_TLSEXT
static apr_status_t ssl_init_ctx_tls_extensions(server_rec *s,
@@ -833,6 +843,14 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s,
SSL_CTX_set_keylog_callback(ctx, modssl_callback_keylog);
}
#endif
+
+#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF
+ /* For server-side SSL_CTX, enable ignoring unexpected EOF */
+ /* (OpenSSL 1.1.1 behavioural compatibility).. */
+ if (!mctx->pkp) {
+ SSL_CTX_set_options(ctx, SSL_OP_IGNORE_UNEXPECTED_EOF);
+ }
+#endif
return APR_SUCCESS;
}
@@ -1302,15 +1320,6 @@ static int ssl_no_passwd_prompt_cb(char *buf, int size, int rwflag,
return 0;
}
-static APR_INLINE int modssl_DH_bits(DH *dh)
-{
-#if OPENSSL_VERSION_NUMBER < 0x30000000L
- return DH_bits(dh);
-#else
- return BN_num_bits(DH_get0_p(dh));
-#endif
-}
-
/* SSL_CTX_use_PrivateKey_file() can fail either because the private
* key was encrypted, or due to a mismatch between an already-loaded
* cert and the key - a common misconfiguration - from calling
@@ -1336,15 +1345,10 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
SSLModConfigRec *mc = myModConfig(s);
const char *vhost_id = mctx->sc->vhost_id, *key_id, *certfile, *keyfile;
int i;
- X509 *cert;
- DH *dh;
+ EVP_PKEY *pkey;
#ifdef HAVE_ECC
- EC_GROUP *ecparams = NULL;
- int nid;
- EC_KEY *eckey = NULL;
-#endif
-#ifndef HAVE_SSL_CONF_CMD
- SSL *ssl;
+ EC_GROUP *ecgroup = NULL;
+ int curve_nid = 0;
#endif
/* no OpenSSL default prompts for any of the SSL_CTX_use_* calls, please */
@@ -1355,7 +1359,7 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
(certfile = APR_ARRAY_IDX(mctx->pks->cert_files, i,
const char *));
i++) {
- EVP_PKEY *pkey;
+ X509 *cert = NULL;
const char *engine_certfile = NULL;
key_id = apr_psprintf(ptemp, "%s:%d", vhost_id, i);
@@ -1398,8 +1402,6 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
if (modssl_is_engine_id(keyfile)) {
apr_status_t rv;
- cert = NULL;
-
if ((rv = modssl_load_engine_keypair(s, ptemp, vhost_id,
engine_certfile, keyfile,
&cert, &pkey))) {
@@ -1470,22 +1472,21 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
* assume that if SSL_CONF is available, it's OpenSSL 1.0.2 or later,
* and SSL_CTX_get0_certificate is implemented.)
*/
- if (!(cert = SSL_CTX_get0_certificate(mctx->ssl_ctx))) {
+ cert = SSL_CTX_get0_certificate(mctx->ssl_ctx);
#else
- ssl = SSL_new(mctx->ssl_ctx);
- if (ssl) {
- /* Workaround bug in SSL_get_certificate in OpenSSL 0.9.8y */
- SSL_set_connect_state(ssl);
- cert = SSL_get_certificate(ssl);
+ {
+ SSL *ssl = SSL_new(mctx->ssl_ctx);
+ if (ssl) {
+ /* Workaround bug in SSL_get_certificate in OpenSSL 0.9.8y */
+ SSL_set_connect_state(ssl);
+ cert = SSL_get_certificate(ssl);
+ SSL_free(ssl);
+ }
}
- if (!ssl || !cert) {
#endif
+ if (!cert) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02566)
"Unable to retrieve certificate %s", key_id);
-#ifndef HAVE_SSL_CONF_CMD
- if (ssl)
- SSL_free(ssl);
-#endif
return APR_EGENERAL;
}
@@ -1507,10 +1508,6 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
}
#endif
-#ifndef HAVE_SSL_CONF_CMD
- SSL_free(ssl);
-#endif
-
ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(02568)
"Certificate and private key %s configured from %s and %s",
key_id, certfile, keyfile);
@@ -1520,15 +1517,33 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
* Try to read DH parameters from the (first) SSLCertificateFile
*/
certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *);
- if (certfile && !modssl_is_engine_id(certfile)
- && (dh = ssl_dh_GetParamFromFile(certfile))) {
- /* ### This should be replaced with SSL_CTX_set0_tmp_dh_pkey()
- * for OpenSSL 3.0+. */
- SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dh);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540)
- "Custom DH parameters (%d bits) for %s loaded from %s",
- modssl_DH_bits(dh), vhost_id, certfile);
- DH_free(dh);
+ if (certfile && !modssl_is_engine_id(certfile)) {
+ int done = 0, num_bits = 0;
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
+ DH *dh = modssl_dh_from_file(certfile);
+ if (dh) {
+ num_bits = DH_bits(dh);
+ SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dh);
+ DH_free(dh);
+ done = 1;
+ }
+#else
+ pkey = modssl_dh_pkey_from_file(certfile);
+ if (pkey) {
+ num_bits = EVP_PKEY_get_bits(pkey);
+ if (!SSL_CTX_set0_tmp_dh_pkey(mctx->ssl_ctx, pkey)) {
+ EVP_PKEY_free(pkey);
+ }
+ else {
+ done = 1;
+ }
+ }
+#endif
+ if (done) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540)
+ "Custom DH parameters (%d bits) for %s loaded from %s",
+ num_bits, vhost_id, certfile);
+ }
}
#if !MODSSL_USE_OPENSSL_PRE_1_1_API
else {
@@ -1543,13 +1558,27 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
* Similarly, try to read the ECDH curve name from SSLCertificateFile...
*/
if (certfile && !modssl_is_engine_id(certfile)
- && (ecparams = ssl_ec_GetParamFromFile(certfile))
- && (nid = EC_GROUP_get_curve_name(ecparams))
- && (eckey = EC_KEY_new_by_curve_name(nid))) {
- SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541)
- "ECDH curve %s for %s specified in %s",
- OBJ_nid2sn(nid), vhost_id, certfile);
+ && (ecgroup = modssl_ec_group_from_file(certfile))
+ && (curve_nid = EC_GROUP_get_curve_name(ecgroup))) {
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
+ EC_KEY *eckey = EC_KEY_new_by_curve_name(curve_nid);
+ if (eckey) {
+ SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey);
+ EC_KEY_free(eckey);
+ }
+ else {
+ curve_nid = 0;
+ }
+#else
+ if (!SSL_CTX_set1_curves(mctx->ssl_ctx, &curve_nid, 1)) {
+ curve_nid = 0;
+ }
+#endif
+ if (curve_nid) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541)
+ "ECDH curve %s for %s specified in %s",
+ OBJ_nid2sn(curve_nid), vhost_id, certfile);
+ }
}
/*
* ...otherwise, enable auto curve selection (OpenSSL 1.0.2)
@@ -1557,18 +1586,20 @@ static apr_status_t ssl_init_server_certs(server_rec *s,
* ECDH is always enabled in 1.1.0 unless excluded from SSLCipherList
*/
#if MODSSL_USE_OPENSSL_PRE_1_1_API
- else {
+ if (!curve_nid) {
#if defined(SSL_CTX_set_ecdh_auto)
SSL_CTX_set_ecdh_auto(mctx->ssl_ctx, 1);
#else
- eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
- SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey);
+ EC_KEY *eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+ if (eckey) {
+ SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey);
+ EC_KEY_free(eckey);
+ }
#endif
}
#endif
/* OpenSSL assures us that _free() is NULL-safe */
- EC_KEY_free(eckey);
- EC_GROUP_free(ecparams);
+ EC_GROUP_free(ecgroup);
#endif
return APR_SUCCESS;
@@ -1680,8 +1711,12 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s,
STACK_OF(X509) *chain;
X509_STORE_CTX *sctx;
X509_STORE *store = SSL_CTX_get_cert_store(mctx->ssl_ctx);
+ int addl_chain = 0; /* non-zero if additional chain certs were
+ * added to store */
-#if OPENSSL_VERSION_NUMBER >= 0x1010100fL
+ ap_assert(store != NULL); /* safe to assume always non-NULL? */
+
+#if OPENSSL_VERSION_NUMBER >= 0x1010100fL && !defined(LIBRESSL_VERSION_NUMBER)
/* For OpenSSL >=1.1.1, turn on client cert support which is
* otherwise turned off by default (by design).
* https://github.com/openssl/openssl/issues/6933 */
@@ -1705,20 +1740,28 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s,
ssl_init_ca_cert_path(s, ptemp, pkp->cert_path, NULL, sk);
}
- if ((ncerts = sk_X509_INFO_num(sk)) <= 0) {
- sk_X509_INFO_free(sk);
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(02206)
- "no client certs found for SSL proxy");
- return APR_SUCCESS;
- }
-
/* Check that all client certs have got certificates and private
- * keys. */
- for (n = 0; n < ncerts; n++) {
+ * keys. Note the number of certs in the stack may decrease
+ * during the loop. */
+ for (n = 0; n < sk_X509_INFO_num(sk); n++) {
X509_INFO *inf = sk_X509_INFO_value(sk, n);
+ int has_privkey = inf->x_pkey && inf->x_pkey->dec_pkey;
- if (!inf->x509 || !inf->x_pkey || !inf->x_pkey->dec_pkey ||
- inf->enc_data) {
+ /* For a lone certificate in the file, trust it as a
+ * CA/intermediate certificate. */
+ if (inf->x509 && !has_privkey && !inf->enc_data) {
+ ssl_log_xerror(SSLLOG_MARK, APLOG_DEBUG, 0, ptemp, s, inf->x509,
+ APLOGNO(10261) "Trusting non-leaf certificate");
+ X509_STORE_add_cert(store, inf->x509); /* increments inf->x509 */
+ /* Delete from the stack and iterate again. */
+ X509_INFO_free(inf);
+ sk_X509_INFO_delete(sk, n);
+ n--;
+ addl_chain = 1;
+ continue;
+ }
+
+ if (!has_privkey || inf->enc_data) {
sk_X509_INFO_free(sk);
ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, s, APLOGNO(02252)
"incomplete client cert configured for SSL proxy "
@@ -1735,13 +1778,21 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s,
}
}
+ if ((ncerts = sk_X509_INFO_num(sk)) <= 0) {
+ sk_X509_INFO_free(sk);
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(02206)
+ "no client certs found for SSL proxy");
+ return APR_SUCCESS;
+ }
+
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02207)
"loaded %d client certs for SSL proxy",
ncerts);
pkp->certs = sk;
-
- if (!pkp->ca_cert_file || !store) {
+ /* If any chain certs are configured, build the ->ca_certs chains
+ * corresponding to the loaded keypairs. */
+ if (!pkp->ca_cert_file && !addl_chain) {
return APR_SUCCESS;
}
@@ -1757,6 +1808,7 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s,
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02208)
"SSL proxy client cert initialization failed");
ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
+ sk_X509_INFO_free(sk);
return ssl_die(s);
}
@@ -1766,7 +1818,11 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s,
int i;
X509_INFO *inf = sk_X509_INFO_value(pkp->certs, n);
- X509_STORE_CTX_init(sctx, store, inf->x509, NULL);
+ if (!X509_STORE_CTX_init(sctx, store, inf->x509, NULL)) {
+ sk_X509_INFO_free(sk);
+ X509_STORE_CTX_free(sctx);
+ return ssl_die(s);
+ }
/* Attempt to verify the client cert */
if (X509_verify_cert(sctx) != 1) {
@@ -2186,52 +2242,6 @@ int ssl_proxy_section_post_config(apr_pool_t *p, apr_pool_t *plog,
return OK;
}
-static int ssl_init_FindCAList_X509NameCmp(const X509_NAME * const *a,
- const X509_NAME * const *b)
-{
- return(X509_NAME_cmp(*a, *b));
-}
-
-static void ssl_init_PushCAList(STACK_OF(X509_NAME) *ca_list,
- server_rec *s, apr_pool_t *ptemp,
- const char *file)
-{
- int n;
- STACK_OF(X509_NAME) *sk;
-
- sk = (STACK_OF(X509_NAME) *)
- SSL_load_client_CA_file(file);
-
- if (!sk) {
- return;
- }
-
- for (n = 0; n < sk_X509_NAME_num(sk); n++) {
- X509_NAME *name = sk_X509_NAME_value(sk, n);
-
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02209)
- "CA certificate: %s",
- modssl_X509_NAME_to_string(ptemp, name, 0));
-
- /*
- * note that SSL_load_client_CA_file() checks for duplicates,
- * but since we call it multiple times when reading a directory
- * we must also check for duplicates ourselves.
- */
-
- if (sk_X509_NAME_find(ca_list, name) < 0) {
- /* this will be freed when ca_list is */
- sk_X509_NAME_push(ca_list, name);
- }
- else {
- /* need to free this ourselves, else it will leak */
- X509_NAME_free(name);
- }
- }
-
- sk_X509_NAME_free(sk);
-}
-
static apr_status_t ssl_init_ca_cert_path(server_rec *s,
apr_pool_t *ptemp,
const char *path,
@@ -2254,7 +2264,7 @@ static apr_status_t ssl_init_ca_cert_path(server_rec *s,
}
file = apr_pstrcat(ptemp, path, "/", direntry.name, NULL);
if (ca_list) {
- ssl_init_PushCAList(ca_list, s, ptemp, file);
+ SSL_add_file_cert_subjects_to_stack(ca_list, file);
}
if (xi_list) {
load_x509_info(ptemp, xi_list, file);
@@ -2271,19 +2281,13 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s,
const char *ca_file,
const char *ca_path)
{
- STACK_OF(X509_NAME) *ca_list;
-
- /*
- * Start with a empty stack/list where new
- * entries get added in sorted order.
- */
- ca_list = sk_X509_NAME_new(ssl_init_FindCAList_X509NameCmp);
+ STACK_OF(X509_NAME) *ca_list = sk_X509_NAME_new_null();;
/*
* Process CA certificate bundle file
*/
if (ca_file) {
- ssl_init_PushCAList(ca_list, s, ptemp, ca_file);
+ SSL_add_file_cert_subjects_to_stack(ca_list, ca_file);
/*
* If ca_list is still empty after trying to load ca_file
* then the file failed to load, and users should hear about that.
@@ -2307,11 +2311,6 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s,
return NULL;
}
- /*
- * Cleanup
- */
- (void) sk_X509_NAME_set_cmp_func(ca_list, NULL);
-
return ca_list;
}
diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c
index f14fc9b..b91f784 100644
--- a/modules/ssl/ssl_engine_io.c
+++ b/modules/ssl/ssl_engine_io.c
@@ -28,8 +28,7 @@
core keeps dumping.''
-- Unknown */
#include "ssl_private.h"
-#include "mod_ssl.h"
-#include "mod_ssl_openssl.h"
+
#include "apr_date.h"
APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, proxy_post_handshake,
@@ -2283,14 +2282,7 @@ void ssl_io_filter_init(conn_rec *c, request_rec *r, SSL *ssl)
ssl_io_filter_cleanup, apr_pool_cleanup_null);
if (APLOG_CS_IS_LEVEL(c, mySrvFromConn(c), APLOG_TRACE4)) {
- BIO *rbio = SSL_get_rbio(ssl),
- *wbio = SSL_get_wbio(ssl);
- BIO_set_callback(rbio, ssl_io_data_cb);
- BIO_set_callback_arg(rbio, (void *)ssl);
- if (wbio && wbio != rbio) {
- BIO_set_callback(wbio, ssl_io_data_cb);
- BIO_set_callback_arg(wbio, (void *)ssl);
- }
+ modssl_set_io_callbacks(ssl);
}
return;
@@ -2374,13 +2366,22 @@ static void ssl_io_data_dump(conn_rec *c, server_rec *s,
"+-------------------------------------------------------------------------+");
}
-long ssl_io_data_cb(BIO *bio, int cmd,
- const char *argp,
- int argi, long argl, long rc)
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+static long modssl_io_cb(BIO *bio, int cmd, const char *argp,
+ size_t len, int argi, long argl, int rc,
+ size_t *processed)
+#else
+static long modssl_io_cb(BIO *bio, int cmd, const char *argp,
+ int argi, long argl, long rc)
+#endif
{
SSL *ssl;
conn_rec *c;
server_rec *s;
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+ (void)len;
+ (void)processed;
+#endif
if ((ssl = (SSL *)BIO_get_callback_arg(bio)) == NULL)
return rc;
@@ -2402,7 +2403,7 @@ long ssl_io_data_cb(BIO *bio, int cmd,
"%s: %s %ld/%d bytes %s BIO#%pp [mem: %pp] %s",
MODSSL_LIBRARY_NAME,
(cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"),
- rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"),
+ (long)rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"),
bio, argp, dump);
if (*dump != '\0' && argp != NULL)
ssl_io_data_dump(c, s, argp, rc);
@@ -2417,3 +2418,25 @@ long ssl_io_data_cb(BIO *bio, int cmd,
}
return rc;
}
+
+static APR_INLINE void set_bio_callback(BIO *bio, void *arg)
+{
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+ BIO_set_callback_ex(bio, modssl_io_cb);
+#else
+ BIO_set_callback(bio, modssl_io_cb);
+#endif
+ BIO_set_callback_arg(bio, arg);
+}
+
+void modssl_set_io_callbacks(SSL *ssl)
+{
+ BIO *rbio = SSL_get_rbio(ssl),
+ *wbio = SSL_get_wbio(ssl);
+ if (rbio) {
+ set_bio_callback(rbio, ssl);
+ }
+ if (wbio && wbio != rbio) {
+ set_bio_callback(wbio, ssl);
+ }
+}
diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c
index 591f6ae..fe0496f 100644
--- a/modules/ssl/ssl_engine_kernel.c
+++ b/modules/ssl/ssl_engine_kernel.c
@@ -2581,6 +2581,7 @@ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s)
sc->server->pks->service_unavailable : 0;
ap_update_child_status_from_server(c->sbh, SERVER_BUSY_READ, c, s);
+
/*
* There is one special filter callback, which is set
* very early depending on the base_server's log level.
@@ -2589,14 +2590,7 @@ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s)
* we need to set that callback here.
*/
if (APLOGtrace4(s)) {
- BIO *rbio = SSL_get_rbio(ssl),
- *wbio = SSL_get_wbio(ssl);
- BIO_set_callback(rbio, ssl_io_data_cb);
- BIO_set_callback_arg(rbio, (void *)ssl);
- if (wbio && wbio != rbio) {
- BIO_set_callback(wbio, ssl_io_data_cb);
- BIO_set_callback_arg(wbio, (void *)ssl);
- }
+ modssl_set_io_callbacks(ssl);
}
return 1;
diff --git a/modules/ssl/ssl_engine_pphrase.c b/modules/ssl/ssl_engine_pphrase.c
index d1859f7..699019f 100644
--- a/modules/ssl/ssl_engine_pphrase.c
+++ b/modules/ssl/ssl_engine_pphrase.c
@@ -30,6 +30,8 @@
-- Clifford Stoll */
#include "ssl_private.h"
+#include <openssl/ui.h>
+
typedef struct {
server_rec *s;
apr_pool_t *p;
@@ -606,8 +608,7 @@ int ssl_pphrase_Handle_CB(char *buf, int bufsize, int verify, void *srv)
return (len);
}
-
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
+#if MODSSL_HAVE_ENGINE_API
/* OpenSSL UI implementation for passphrase entry; largely duplicated
* from ssl_pphrase_Handle_CB but adjusted for UI API. TODO: Might be
@@ -831,7 +832,7 @@ apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p,
const char *certid, const char *keyid,
X509 **pubkey, EVP_PKEY **privkey)
{
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
+#if MODSSL_HAVE_ENGINE_API
const char *c, *scheme;
ENGINE *e;
UI_METHOD *ui_method = get_passphrase_ui(p);
diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h
index cd8df07..859e932 100644
--- a/modules/ssl/ssl_private.h
+++ b/modules/ssl/ssl_private.h
@@ -83,16 +83,13 @@
#include "ap_expr.h"
-/* OpenSSL headers */
-#include <openssl/opensslv.h>
-#if (OPENSSL_VERSION_NUMBER >= 0x10001000)
-/* must be defined before including ssl.h */
-#define OPENSSL_NO_SSL_INTERN
-#endif
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
-#include <openssl/core_names.h>
+/* keep first for compat API */
+#ifndef OPENSSL_API_COMPAT
+#define OPENSSL_API_COMPAT 0x10101000 /* for ENGINE_ API */
#endif
-#include <openssl/ssl.h>
+#include "mod_ssl_openssl.h"
+
+/* OpenSSL headers */
#include <openssl/err.h>
#include <openssl/x509.h>
#include <openssl/pem.h>
@@ -102,12 +99,23 @@
#include <openssl/x509v3.h>
#include <openssl/x509_vfy.h>
#include <openssl/ocsp.h>
+#include <openssl/dh.h>
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+#include <openssl/core_names.h>
+#endif
/* Avoid tripping over an engine build installed globally and detected
* when the user points at an explicit non-engine flavor of OpenSSL
*/
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
+#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) \
+ && (OPENSSL_VERSION_NUMBER < 0x30000000 \
+ || (defined(OPENSSL_API_LEVEL) && OPENSSL_API_LEVEL < 30000)) \
+ && !defined(OPENSSL_NO_ENGINE)
#include <openssl/engine.h>
+#define MODSSL_HAVE_ENGINE_API 1
+#endif
+#ifndef MODSSL_HAVE_ENGINE_API
+#define MODSSL_HAVE_ENGINE_API 0
#endif
#if (OPENSSL_VERSION_NUMBER < 0x0090801f)
@@ -142,10 +150,18 @@
* include most changes from OpenSSL >= 1.1 (new functions, macros,
* deprecations, ...), so we have to work around this...
*/
-#define MODSSL_USE_OPENSSL_PRE_1_1_API (LIBRESSL_VERSION_NUMBER < 0x2070000f)
+#if LIBRESSL_VERSION_NUMBER < 0x2070000f
+#define MODSSL_USE_OPENSSL_PRE_1_1_API 1
+#else
+#define MODSSL_USE_OPENSSL_PRE_1_1_API 0
+#endif
#else /* defined(LIBRESSL_VERSION_NUMBER) */
-#define MODSSL_USE_OPENSSL_PRE_1_1_API (OPENSSL_VERSION_NUMBER < 0x10100000L)
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+#define MODSSL_USE_OPENSSL_PRE_1_1_API 1
+#else
+#define MODSSL_USE_OPENSSL_PRE_1_1_API 0
#endif
+#endif /* defined(LIBRESSL_VERSION_NUMBER) */
#if defined(OPENSSL_FIPS) || OPENSSL_VERSION_NUMBER >= 0x30000000L
#define HAVE_FIPS
@@ -211,7 +227,10 @@
#endif
/* Secure Remote Password */
-#if !defined(OPENSSL_NO_SRP) && defined(SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB)
+#if !defined(OPENSSL_NO_SRP) \
+ && (OPENSSL_VERSION_NUMBER < 0x30000000L \
+ || (defined(OPENSSL_API_LEVEL) && OPENSSL_API_LEVEL < 30000)) \
+ && defined(SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB)
#define HAVE_SRP
#include <openssl/srp.h>
#endif
@@ -254,6 +273,14 @@ void free_bio_methods(void);
#endif
#endif
+/* those may be deprecated */
+#ifndef X509_get_notBefore
+#define X509_get_notBefore X509_getm_notBefore
+#endif
+#ifndef X509_get_notAfter
+#define X509_get_notAfter X509_getm_notAfter
+#endif
+
#if OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined(LIBRESSL_VERSION_NUMBER)
#define HAVE_OPENSSL_KEYLOG
#endif
@@ -625,9 +652,7 @@ typedef struct {
* index), for example the string "vhost.example.com:443:0". */
apr_hash_t *tPrivateKey;
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
- const char *szCryptoDevice;
-#endif
+ const char *szCryptoDevice; /* ENGINE device (if available) */
#ifdef HAVE_OCSP_STAPLING
const ap_socache_provider_t *stapling_cache;
@@ -1019,7 +1044,7 @@ void modssl_callback_keylog(const SSL *ssl, const char *line);
/** I/O */
void ssl_io_filter_init(conn_rec *, request_rec *r, SSL *);
void ssl_io_filter_register(apr_pool_t *);
-long ssl_io_data_cb(BIO *, int, const char *, int, long, long);
+void modssl_set_io_callbacks(SSL *ssl);
/* ssl_io_buffer_fill fills the setaside buffering of the HTTP request
* to allow an SSL renegotiation to take place. */
@@ -1057,9 +1082,13 @@ apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p,
X509 **pubkey, EVP_PKEY **privkey);
/** Diffie-Hellman Parameter Support */
-DH *ssl_dh_GetParamFromFile(const char *);
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
+DH *modssl_dh_from_file(const char *);
+#else
+EVP_PKEY *modssl_dh_pkey_from_file(const char *);
+#endif
#ifdef HAVE_ECC
-EC_GROUP *ssl_ec_GetParamFromFile(const char *);
+EC_GROUP *modssl_ec_group_from_file(const char *);
#endif
/* Store the EVP_PKEY key (serialized into DER) in the hash table with
diff --git a/modules/ssl/ssl_util.c b/modules/ssl/ssl_util.c
index c889295..87ddfa7 100644
--- a/modules/ssl/ssl_util.c
+++ b/modules/ssl/ssl_util.c
@@ -476,7 +476,7 @@ void ssl_util_thread_id_setup(apr_pool_t *p)
int modssl_is_engine_id(const char *name)
{
-#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
+#if MODSSL_HAVE_ENGINE_API
/* ### Can handle any other special ENGINE key names here? */
return strncmp(name, "pkcs11:", 7) == 0;
#else
diff --git a/modules/ssl/ssl_util_ocsp.c b/modules/ssl/ssl_util_ocsp.c
index b9c8a0b..a202a72 100644
--- a/modules/ssl/ssl_util_ocsp.c
+++ b/modules/ssl/ssl_util_ocsp.c
@@ -370,8 +370,11 @@ static STACK_OF(X509) *modssl_read_ocsp_certificates(const char *file)
while ((x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL)) != NULL) {
if (!other_certs) {
other_certs = sk_X509_new_null();
- if (!other_certs)
+ if (!other_certs) {
+ X509_free(x509);
+ BIO_free(bio);
return NULL;
+ }
}
if (!sk_X509_push(other_certs, x509)) {
diff --git a/modules/ssl/ssl_util_ssl.c b/modules/ssl/ssl_util_ssl.c
index 38079a9..44930b7 100644
--- a/modules/ssl/ssl_util_ssl.c
+++ b/modules/ssl/ssl_util_ssl.c
@@ -464,29 +464,52 @@ BOOL modssl_X509_match_name(apr_pool_t *p, X509 *x509, const char *name,
** _________________________________________________________________
*/
-DH *ssl_dh_GetParamFromFile(const char *file)
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
+DH *modssl_dh_from_file(const char *file)
{
- DH *dh = NULL;
+ DH *dh;
BIO *bio;
if ((bio = BIO_new_file(file, "r")) == NULL)
return NULL;
dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
BIO_free(bio);
- return (dh);
+
+ return dh;
+}
+#else
+EVP_PKEY *modssl_dh_pkey_from_file(const char *file)
+{
+ EVP_PKEY *pkey;
+ BIO *bio;
+
+ if ((bio = BIO_new_file(file, "r")) == NULL)
+ return NULL;
+ pkey = PEM_read_bio_Parameters(bio, NULL);
+ BIO_free(bio);
+
+ return pkey;
}
+#endif
#ifdef HAVE_ECC
-EC_GROUP *ssl_ec_GetParamFromFile(const char *file)
+EC_GROUP *modssl_ec_group_from_file(const char *file)
{
- EC_GROUP *group = NULL;
+ EC_GROUP *group;
BIO *bio;
if ((bio = BIO_new_file(file, "r")) == NULL)
return NULL;
+#if OPENSSL_VERSION_NUMBER < 0x30000000L
group = PEM_read_bio_ECPKParameters(bio, NULL, NULL, NULL);
+#else
+ group = PEM_ASN1_read_bio((void *)d2i_ECPKParameters,
+ PEM_STRING_ECPARAMETERS, bio,
+ NULL, NULL, NULL);
+#endif
BIO_free(bio);
- return (group);
+
+ return group;
}
#endif
diff --git a/modules/ssl/ssl_util_stapling.c b/modules/ssl/ssl_util_stapling.c
index ab77e4a..563de55 100644
--- a/modules/ssl/ssl_util_stapling.c
+++ b/modules/ssl/ssl_util_stapling.c
@@ -29,9 +29,9 @@
-- Alexei Sayle */
#include "ssl_private.h"
+
#include "ap_mpm.h"
#include "apr_thread_mutex.h"
-#include "mod_ssl_openssl.h"
APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, init_stapling_status,
(server_rec *s, apr_pool_t *p,
@@ -117,8 +117,10 @@ static X509 *stapling_get_issuer(modssl_ctx_t *mctx, X509 *x)
}
inctx = X509_STORE_CTX_new();
- if (!X509_STORE_CTX_init(inctx, st, NULL, NULL))
+ if (!X509_STORE_CTX_init(inctx, st, NULL, NULL)) {
+ X509_STORE_CTX_free(inctx);
return 0;
+ }
if (X509_STORE_CTX_get1_issuer(&issuer, inctx, x) <= 0)
issuer = NULL;
X509_STORE_CTX_cleanup(inctx);
@@ -445,7 +447,7 @@ static int stapling_check_response(server_rec *s, modssl_ctx_t *mctx,
rv = SSL_TLSEXT_ERR_NOACK;
}
- if (status != V_OCSP_CERTSTATUS_GOOD) {
+ if (status != V_OCSP_CERTSTATUS_GOOD && pok) {
char snum[MAX_STRING_LEN] = { '\0' };
BIO *bio = BIO_new(BIO_s_mem());
@@ -466,12 +468,6 @@ static int stapling_check_response(server_rec *s, modssl_ctx_t *mctx,
(reason != OCSP_REVOKED_STATUS_NOSTATUS) ?
OCSP_crl_reason_str(reason) : "n/a",
snum[0] ? snum : "[n/a]");
-
- if (mctx->stapling_return_errors == FALSE) {
- if (pok)
- *pok = FALSE;
- rv = SSL_TLSEXT_ERR_NOACK;
- }
}
}
diff --git a/modules/tls/config2.m4 b/modules/tls/config2.m4
index af97178..8a32490 100644
--- a/modules/tls/config2.m4
+++ b/modules/tls/config2.m4
@@ -109,9 +109,10 @@ AC_DEFUN([APACHE_CHECK_RUSTLS],[
fi
fi
- AC_MSG_CHECKING([for rustls version >= 0.8.2])
+ AC_MSG_CHECKING([for rustls version >= 0.9.2])
AC_TRY_COMPILE([#include <rustls.h>],[
rustls_version();
+rustls_acceptor_new();
],
[AC_MSG_RESULT(OK)
ac_cv_rustls=yes],
diff --git a/modules/tls/tls_core.c b/modules/tls/tls_core.c
index 38c8873..2547939 100644
--- a/modules/tls/tls_core.c
+++ b/modules/tls/tls_core.c
@@ -507,8 +507,8 @@ static const rustls_certified_key *extract_client_hello_values(
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "extract client hello values");
if (!cc) goto cleanup;
cc->client_hello_seen = 1;
- if (hello->sni_name.len > 0) {
- cc->sni_hostname = apr_pstrndup(c->pool, hello->sni_name.data, hello->sni_name.len);
+ if (hello->server_name.len > 0) {
+ cc->sni_hostname = apr_pstrndup(c->pool, hello->server_name.data, hello->server_name.len);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "sni detected: %s", cc->sni_hostname);
}
else {