summaryrefslogtreecommitdiffstats
path: root/modules/proxy
diff options
context:
space:
mode:
Diffstat (limited to 'modules/proxy')
-rw-r--r--modules/proxy/ajp.h4
-rw-r--r--modules/proxy/ajp_header.c48
-rw-r--r--modules/proxy/balancers/mod_lbmethod_heartbeat.c5
-rw-r--r--modules/proxy/mod_proxy.c704
-rw-r--r--modules/proxy/mod_proxy.h296
-rw-r--r--modules/proxy/mod_proxy_ajp.c135
-rw-r--r--modules/proxy/mod_proxy_balancer.c456
-rw-r--r--modules/proxy/mod_proxy_connect.c168
-rw-r--r--modules/proxy/mod_proxy_express.c29
-rw-r--r--modules/proxy/mod_proxy_fcgi.c196
-rw-r--r--modules/proxy/mod_proxy_fdpass.c4
-rw-r--r--modules/proxy/mod_proxy_ftp.c171
-rw-r--r--modules/proxy/mod_proxy_hcheck.c348
-rw-r--r--modules/proxy/mod_proxy_http.c1812
-rw-r--r--modules/proxy/mod_proxy_scgi.c20
-rw-r--r--modules/proxy/mod_proxy_uwsgi.c148
-rw-r--r--modules/proxy/mod_proxy_wstunnel.c266
-rw-r--r--modules/proxy/proxy_util.c2701
-rw-r--r--modules/proxy/proxy_util.h6
19 files changed, 5257 insertions, 2260 deletions
diff --git a/modules/proxy/ajp.h b/modules/proxy/ajp.h
index c119a7e..a950ee9 100644
--- a/modules/proxy/ajp.h
+++ b/modules/proxy/ajp.h
@@ -414,11 +414,13 @@ apr_status_t ajp_ilink_receive(apr_socket_t *sock, ajp_msg_t *msg);
* @param r current request
* @param buffsize max size of the AJP packet.
* @param uri requested uri
+ * @param secret authentication secret
* @return APR_SUCCESS or error
*/
apr_status_t ajp_send_header(apr_socket_t *sock, request_rec *r,
apr_size_t buffsize,
- apr_uri_t *uri);
+ apr_uri_t *uri,
+ const char *secret);
/**
* Read the ajp message and return the type of the message.
diff --git a/modules/proxy/ajp_header.c b/modules/proxy/ajp_header.c
index 67353a7..0266a7d 100644
--- a/modules/proxy/ajp_header.c
+++ b/modules/proxy/ajp_header.c
@@ -17,6 +17,8 @@
#include "ajp_header.h"
#include "ajp.h"
+#include "util_script.h"
+
APLOG_USE_MODULE(proxy_ajp);
static const char *response_trans_headers[] = {
@@ -59,6 +61,7 @@ static int sc_for_req_header(const char *header_name)
if (len < 4 || len > 15)
return UNKNOWN_METHOD;
+ memset(header, 0, sizeof header);
while (*p)
header[i++] = apr_toupper(*p++);
header[i] = '\0';
@@ -213,7 +216,8 @@ AJPV13_REQUEST/AJPV14_REQUEST=
static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg,
request_rec *r,
- apr_uri_t *uri)
+ apr_uri_t *uri,
+ const char *secret)
{
int method;
apr_uint32_t i, num_headers = 0;
@@ -293,17 +297,15 @@ static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg,
i, elts[i].key, elts[i].val);
}
-/* XXXX need to figure out how to do this
- if (s->secret) {
+ if (secret) {
if (ajp_msg_append_uint8(msg, SC_A_SECRET) ||
- ajp_msg_append_string(msg, s->secret)) {
+ ajp_msg_append_string(msg, secret)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03228)
- "Error ajp_marshal_into_msgb - "
+ "ajp_marshal_into_msgb: "
"Error appending secret");
return APR_EGENERAL;
}
}
- */
if (r->user) {
if (ajp_msg_append_uint8(msg, SC_A_REMOTE_USER) ||
@@ -584,8 +586,15 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg,
r->headers_out = save_table;
}
else {
- r->headers_out = NULL;
+ /*
+ * Reset headers, but not to NULL because things below the chain expect
+ * this to be non NULL e.g. the ap_content_length_filter.
+ */
+ r->headers_out = apr_table_make(r->pool, 1);
num_headers = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10405)
+ "ajp_unmarshal_response: Bad number of headers");
+ return rc;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r,
@@ -633,15 +642,15 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg,
}
/* Set-Cookie need additional processing */
- if (!strcasecmp(stringname, "Set-Cookie")) {
+ if (!ap_cstr_casecmp(stringname, "Set-Cookie")) {
value = ap_proxy_cookie_reverse_map(r, dconf, value);
}
/* Location, Content-Location, URI and Destination need additional
* processing */
- else if (!strcasecmp(stringname, "Location")
- || !strcasecmp(stringname, "Content-Location")
- || !strcasecmp(stringname, "URI")
- || !strcasecmp(stringname, "Destination"))
+ else if (!ap_cstr_casecmp(stringname, "Location")
+ || !ap_cstr_casecmp(stringname, "Content-Location")
+ || !ap_cstr_casecmp(stringname, "URI")
+ || !ap_cstr_casecmp(stringname, "Destination"))
{
value = ap_proxy_location_reverse_map(r, dconf, value);
}
@@ -654,7 +663,7 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg,
apr_table_add(r->headers_out, stringname, value);
/* Content-type needs an additional handling */
- if (strcasecmp(stringname, "Content-Type") == 0) {
+ if (ap_cstr_casecmp(stringname, "Content-Type") == 0) {
/* add corresponding filter */
ap_set_content_type(r, apr_pstrdup(r->pool, value));
ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r,
@@ -662,6 +671,14 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg,
}
}
+ /* AJP has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
return APR_SUCCESS;
}
@@ -671,7 +688,8 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg,
apr_status_t ajp_send_header(apr_socket_t *sock,
request_rec *r,
apr_size_t buffsize,
- apr_uri_t *uri)
+ apr_uri_t *uri,
+ const char *secret)
{
ajp_msg_t *msg;
apr_status_t rc;
@@ -683,7 +701,7 @@ apr_status_t ajp_send_header(apr_socket_t *sock,
return rc;
}
- rc = ajp_marshal_into_msgb(msg, r, uri);
+ rc = ajp_marshal_into_msgb(msg, r, uri, secret);
if (rc != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00988)
"ajp_send_header: ajp_marshal_into_msgb failed");
diff --git a/modules/proxy/balancers/mod_lbmethod_heartbeat.c b/modules/proxy/balancers/mod_lbmethod_heartbeat.c
index 7aeaf71..0534e5b 100644
--- a/modules/proxy/balancers/mod_lbmethod_heartbeat.c
+++ b/modules/proxy/balancers/mod_lbmethod_heartbeat.c
@@ -115,7 +115,6 @@ static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers,
{
char *t;
- int lineno = 0;
apr_bucket_alloc_t *ba = apr_bucket_alloc_create(pool);
apr_bucket_brigade *bb = apr_brigade_create(pool, ba);
apr_bucket_brigade *tmpbb = apr_brigade_create(pool, ba);
@@ -137,7 +136,6 @@ static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers,
rv = apr_brigade_split_line(tmpbb, bb,
APR_BLOCK_READ, sizeof(buf));
- lineno++;
if (rv) {
return rv;
@@ -281,6 +279,7 @@ static proxy_worker *find_best_hb(proxy_balancer *balancer,
}
apr_pool_create(&tpool, r->pool);
+ apr_pool_tag(tpool, "lb_heartbeat_tpool");
servers = apr_hash_make(tpool);
@@ -302,7 +301,7 @@ static proxy_worker *find_best_hb(proxy_balancer *balancer,
if (!server) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(01214)
- "lb_heartbeat: No server for worker %s", (*worker)->s->name);
+ "lb_heartbeat: No server for worker %s", (*worker)->s->name_ex);
continue;
}
diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
index 69a35ce..c9cef7c 100644
--- a/modules/proxy/mod_proxy.c
+++ b/modules/proxy/mod_proxy.c
@@ -17,6 +17,7 @@
#include "mod_proxy.h"
#include "mod_core.h"
#include "apr_optional.h"
+#include "apr_strings.h"
#include "scoreboard.h"
#include "mod_status.h"
#include "proxy_util.h"
@@ -29,10 +30,6 @@ APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *));
APR_DECLARE_OPTIONAL_FN(int, ssl_engine_set, (conn_rec *,
ap_conf_vector_t *,
int proxy, int enable));
-APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
-APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup,
- (apr_pool_t *, server_rec *,
- conn_rec *, request_rec *, char *));
#endif
#ifndef MAX
@@ -55,6 +52,9 @@ proxy_hcmethods_t PROXY_DECLARE_DATA proxy_hcmethods[] = {
{GET, "GET", 1},
{CPING, "CPING", 0},
{PROVIDER, "PROVIDER", 0},
+ {OPTIONS11, "OPTIONS11", 1},
+ {HEAD11, "HEAD11", 1},
+ {GET11, "GET11", 1},
{EOT, NULL, 1}
};
@@ -149,7 +149,7 @@ static const char *set_worker_param(apr_pool_t *p,
return "Max must be a positive number";
worker->s->hmax = ival;
}
- /* XXX: More inteligent naming needed */
+ /* XXX: More intelligent naming needed */
else if (!strcasecmp(key, "smax")) {
/* Maximum number of connections to remote that
* will not be destroyed
@@ -224,6 +224,24 @@ static const char *set_worker_param(apr_pool_t *p,
return "EnableReuse must be On|Off";
worker->s->disablereuse_set = 1;
}
+ else if (!strcasecmp(key, "addressttl")) {
+ /* Address TTL in seconds
+ */
+ apr_interval_time_t ttl;
+ if (strcmp(val, "-1") == 0) {
+ worker->s->address_ttl = -1;
+ }
+ else if (ap_timeout_parameter_parse(val, &ttl, "s") == APR_SUCCESS
+ && (ttl <= apr_time_from_sec(APR_INT32_MAX))
+ && (ttl % apr_time_from_sec(1)) == 0) {
+ worker->s->address_ttl = apr_time_sec(ttl);
+ }
+ else {
+ return "AddressTTL must be -1 or a number of seconds not "
+ "exceeding " APR_STRINGIFY(APR_INT32_MAX);
+ }
+ worker->s->address_ttl_set = 1;
+ }
else if (!strcasecmp(key, "route")) {
/* Worker route.
*/
@@ -308,13 +326,14 @@ static const char *set_worker_param(apr_pool_t *p,
worker->s->conn_timeout_set = 1;
}
else if (!strcasecmp(key, "flusher")) {
- if (strlen(val) >= sizeof(worker->s->flusher))
- apr_psprintf(p, "flusher name length must be < %d characters",
- (int)sizeof(worker->s->flusher));
- PROXY_STRNCPY(worker->s->flusher, val);
+ if (PROXY_STRNCPY(worker->s->flusher, val) != APR_SUCCESS) {
+ return apr_psprintf(p, "flusher name length must be < %d characters",
+ (int)sizeof(worker->s->flusher));
+ }
}
else if (!strcasecmp(key, "upgrade")) {
- if (PROXY_STRNCPY(worker->s->upgrade, val) != APR_SUCCESS) {
+ if (PROXY_STRNCPY(worker->s->upgrade,
+ strcasecmp(val, "ANY") ? val : "*") != APR_SUCCESS) {
return apr_psprintf(p, "upgrade protocol length must be < %d characters",
(int)sizeof(worker->s->upgrade));
}
@@ -327,6 +346,12 @@ static const char *set_worker_param(apr_pool_t *p,
worker->s->response_field_size = (s ? s : HUGE_STRING_LEN);
worker->s->response_field_size_set = 1;
}
+ else if (!strcasecmp(key, "secret")) {
+ if (PROXY_STRNCPY(worker->s->secret, val) != APR_SUCCESS) {
+ return apr_psprintf(p, "Secret length must be < %d characters",
+ (int)sizeof(worker->s->secret));
+ }
+ }
else {
if (set_worker_hc_param_f) {
return set_worker_hc_param_f(p, s, worker, key, val, NULL);
@@ -557,6 +582,201 @@ static int alias_match(const char *uri, const char *alias_fakename)
return urip - uri;
}
+/*
+ * Inspired by mod_jk's jk_servlet_normalize().
+ */
+static int alias_match_servlet(apr_pool_t *p,
+ const char **urip,
+ const char *alias)
+{
+ char *map;
+ const char *uri = *urip;
+ apr_array_header_t *stack;
+ int map_pos, uri_pos, alias_pos, first_pos;
+ int alias_depth = 0, depth;
+
+ /* Both uri and alias should start with '/' */
+ if (uri[0] != '/' || alias[0] != '/') {
+ return 0;
+ }
+
+ stack = apr_array_make(p, 5, sizeof(int));
+ map = apr_palloc(p, strlen(uri) + 1);
+ map[0] = '/';
+ map[1] = '\0';
+
+ map_pos = uri_pos = alias_pos = first_pos = 1;
+ while (uri[uri_pos] != '\0') {
+ /* Remove path parameters ;foo=bar/ from any path segment */
+ if (uri[uri_pos] == ';') {
+ do {
+ uri_pos++;
+ } while (uri[uri_pos] != '/' && uri[uri_pos] != '\0');
+ continue;
+ }
+
+ if (map[map_pos - 1] == '/') {
+ /* Collapse ///// sequences to / */
+ if (uri[uri_pos] == '/') {
+ do {
+ uri_pos++;
+ } while (uri[uri_pos] == '/');
+ continue;
+ }
+
+ if (uri[uri_pos] == '.') {
+ /* Remove /./ segments */
+ if (uri[uri_pos + 1] == '/'
+ || uri[uri_pos + 1] == ';'
+ || uri[uri_pos + 1] == '\0') {
+ uri_pos++;
+ if (uri[uri_pos] == '/') {
+ uri_pos++;
+ }
+ continue;
+ }
+
+ /* Remove /xx/../ segments */
+ if (uri[uri_pos + 1] == '.'
+ && (uri[uri_pos + 2] == '/'
+ || uri[uri_pos + 2] == ';'
+ || uri[uri_pos + 2] == '\0')) {
+ /* Wind map segment back the previous one */
+ if (map_pos == 1) {
+ /* Above root */
+ return 0;
+ }
+ do {
+ map_pos--;
+ } while (map[map_pos - 1] != '/');
+ map[map_pos] = '\0';
+
+ /* Wind alias segment back, unless in deeper segment */
+ if (alias_depth == stack->nelts) {
+ if (alias[alias_pos] == '\0') {
+ alias_pos--;
+ }
+ while (alias_pos > 0 && alias[alias_pos] == '/') {
+ alias_pos--;
+ }
+ while (alias_pos > 0 && alias[alias_pos - 1] != '/') {
+ alias_pos--;
+ }
+ AP_DEBUG_ASSERT(alias_pos > 0);
+ alias_depth--;
+ }
+ apr_array_pop(stack);
+
+ /* Move uri forward to the next segment */
+ uri_pos += 2;
+ if (uri[uri_pos] == '/') {
+ uri_pos++;
+ }
+ first_pos = 0;
+ continue;
+ }
+ }
+ if (first_pos) {
+ while (uri[first_pos] == '/') {
+ first_pos++;
+ }
+ }
+
+ /* New segment */
+ APR_ARRAY_PUSH(stack, int) = first_pos ? first_pos : uri_pos;
+ if (alias[alias_pos] != '\0') {
+ if (alias[alias_pos - 1] != '/') {
+ /* Remain in pair with uri segments */
+ do {
+ alias_pos++;
+ } while (alias[alias_pos - 1] != '/' && alias[alias_pos]);
+ }
+ while (alias[alias_pos] == '/') {
+ alias_pos++;
+ }
+ if (alias[alias_pos] != '\0') {
+ alias_depth++;
+ }
+ }
+ }
+
+ if (alias[alias_pos] != '\0') {
+ int *match = &APR_ARRAY_IDX(stack, alias_depth - 1, int);
+ if (*match) {
+ if (alias[alias_pos] != uri[uri_pos]) {
+ /* Current segment does not match */
+ *match = 0;
+ }
+ else if (alias[alias_pos + 1] == '\0'
+ && alias[alias_pos] != '/') {
+ if (uri[uri_pos + 1] == ';') {
+ /* We'll preserve the parameters of the last
+ * segment if it does not end with '/', so mark
+ * the match as negative for below handling.
+ */
+ *match = -(uri_pos + 1);
+ }
+ else if (uri[uri_pos + 1] != '/'
+ && uri[uri_pos + 1] != '\0') {
+ /* Last segment does not match all the way */
+ *match = 0;
+ }
+ }
+ }
+ /* Don't go past the segment if the uri isn't there yet */
+ if (alias[alias_pos] != '/' || uri[uri_pos] == '/') {
+ alias_pos++;
+ }
+ }
+
+ if (uri[uri_pos] == '/') {
+ first_pos = uri_pos + 1;
+ }
+ map[map_pos++] = uri[uri_pos++];
+ map[map_pos] = '\0';
+ }
+
+ /* Can't reach the end of uri before the end of the alias,
+ * for example if uri is "/" and alias is "/examples"
+ */
+ if (alias[alias_pos] != '\0') {
+ return 0;
+ }
+
+ /* Check whether each alias segment matched */
+ for (depth = 0; depth < alias_depth; ++depth) {
+ if (!APR_ARRAY_IDX(stack, depth, int)) {
+ return 0;
+ }
+ }
+
+ /* If alias_depth == stack->nelts we have a full match, i.e.
+ * uri == alias so we can return uri_pos as is (the end of uri)
+ */
+ if (alias_depth < stack->nelts) {
+ /* Return the segment following the alias */
+ uri_pos = APR_ARRAY_IDX(stack, alias_depth, int);
+ if (alias_depth) {
+ /* But if the last segment of the alias does not end with '/'
+ * and the corresponding segment of the uri has parameters,
+ * we want to forward those parameters (see above for the
+ * negative pos trick/mark).
+ */
+ int pos = APR_ARRAY_IDX(stack, alias_depth - 1, int);
+ if (pos < 0) {
+ uri_pos = -pos;
+ }
+ }
+ }
+ /* If the alias lacks a trailing slash, take it from the uri (if any) */
+ if (alias[alias_pos - 1] != '/' && uri[uri_pos - 1] == '/') {
+ uri_pos--;
+ }
+
+ *urip = map;
+ return uri_pos;
+}
+
/* Detect if an absoluteURI should be proxied or not. Note that we
* have to do this during this phase because later phases are
* "short-circuiting"... i.e. translate_names will end when the first
@@ -578,11 +798,12 @@ static int proxy_detect(request_rec *r)
if (conf->req && r->parsed_uri.scheme) {
/* but it might be something vhosted */
- if (!(r->parsed_uri.hostname
- && !strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r))
- && ap_matches_request_vhost(r, r->parsed_uri.hostname,
- (apr_port_t)(r->parsed_uri.port_str ? r->parsed_uri.port
- : ap_default_port(r))))) {
+ if (!r->parsed_uri.hostname
+ || ap_cstr_casecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
+ || !ap_matches_request_vhost(r, r->parsed_uri.hostname,
+ (apr_port_t)(r->parsed_uri.port_str
+ ? r->parsed_uri.port
+ : ap_default_port(r)))) {
r->proxyreq = PROXYREQ_PROXY;
r->uri = r->unparsed_uri;
r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL);
@@ -667,6 +888,7 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent,
int mismatch = 0;
unsigned int nocanon = ent->flags & PROXYPASS_NOCANON;
const char *use_uri = nocanon ? r->unparsed_uri : r->uri;
+ const char *servlet_uri = NULL;
if (dconf && (dconf->interpolate_env == 1) && (ent->flags & PROXYPASS_INTERPOLATE)) {
fake = proxy_interpolate(r, ent->fake);
@@ -727,7 +949,14 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent,
}
}
else {
- len = alias_match(r->uri, fake);
+ if ((ent->flags & PROXYPASS_MAP_SERVLET) == PROXYPASS_MAP_SERVLET) {
+ servlet_uri = r->uri;
+ len = alias_match_servlet(r->pool, &servlet_uri, fake);
+ nocanon = 0; /* ignored since servlet's normalization applies */
+ }
+ else {
+ len = alias_match(r->uri, fake);
+ }
if (len != 0) {
if ((real[0] == '!') && (real[1] == '\0')) {
@@ -736,7 +965,7 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent,
"'%s'; declining", r->uri);
return DECLINED;
}
- if (nocanon && len != alias_match(r->unparsed_uri, ent->fake)) {
+ if (nocanon && len != alias_match(r->unparsed_uri, fake)) {
mismatch = 1;
use_uri = r->uri;
}
@@ -752,6 +981,17 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent,
}
if (found) {
+ unsigned int encoded = ent->flags & PROXYPASS_MAP_ENCODED;
+
+ /* A proxy module is assigned this URL, check whether it's interested
+ * in the request itself (e.g. proxy_wstunnel cares about Upgrade
+ * requests only, and could hand over to proxy_http otherwise).
+ */
+ int rc = proxy_run_check_trans(r, found + 6);
+ if (rc != OK && rc != DECLINED) {
+ return HTTP_CONTINUE;
+ }
+
r->filename = found;
r->handler = "proxy-server";
r->proxyreq = PROXYREQ_REVERSE;
@@ -762,29 +1002,67 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent,
if (ent->flags & PROXYPASS_NOQUERY) {
apr_table_setn(r->notes, "proxy-noquery", "1");
}
+ if (encoded) {
+ apr_table_setn(r->notes, "proxy-noencode", "1");
+ }
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(03464)
- "URI path '%s' matches proxy handler '%s'", r->uri,
- found);
-
- return OK;
+ if (servlet_uri) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10248)
+ "Servlet path '%s' (%s) matches proxy handler '%s'",
+ r->uri, servlet_uri, found);
+ /* Apply servlet normalization to r->uri so that <Location> or any
+ * directory context match does not have to handle path parameters.
+ * We change r->uri in-place so that r->parsed_uri.path is updated
+ * too. Since normalized servlet_uri is necessarily shorter than
+ * the original r->uri, strcpy() is fine.
+ */
+ AP_DEBUG_ASSERT(strlen(r->uri) >= strlen(servlet_uri));
+ strcpy(r->uri, servlet_uri);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(03464)
+ "URI path '%s' matches proxy handler '%s'", r->uri,
+ found);
+ }
+ return (encoded) ? DONE : OK;
}
- return DONE;
+ return HTTP_CONTINUE;
}
-static int proxy_trans(request_rec *r)
+static int proxy_trans(request_rec *r, int pre_trans)
{
- int i;
+ int i, enc;
struct proxy_alias *ent;
proxy_dir_conf *dconf;
proxy_server_conf *conf;
if (r->proxyreq) {
/* someone has already set up the proxy, it was possibly ourselves
- * in proxy_detect
+ * in proxy_detect (DONE will prevent further decoding of r->uri,
+ * only if proxyreq is set before pre_trans already).
*/
- return OK;
+ return pre_trans ? DONE : OK;
+ }
+
+ /* In early pre_trans hook, r->uri was not manipulated yet so we are
+ * compliant with RFC1945 at this point. Otherwise, it probably isn't
+ * an issue because this is a hybrid proxy/origin server.
+ */
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ conf = (proxy_server_conf *) ap_get_module_config(r->server->module_config,
+ &proxy_module);
+
+ /* Always and only do PROXY_MAP_ENCODED mapping in pre_trans, when
+ * r->uri is still encoded, or we might consider for instance that
+ * a decoded sub-delim is now a delimiter (e.g. "%3B" => ';' for
+ * path parameters), which it's not.
+ */
+ if ((pre_trans && !conf->map_encoded_one)
+ || (!pre_trans && conf->map_encoded_all)) {
+ /* Fast path, nothing at this stage */
+ return DECLINED;
}
if ((r->unparsed_uri[0] == '*' && r->unparsed_uri[1] == '\0')
@@ -796,37 +1074,42 @@ static int proxy_trans(request_rec *r)
return DECLINED;
}
- /* XXX: since r->uri has been manipulated already we're not really
- * compliant with RFC1945 at this point. But this probably isn't
- * an issue because this is a hybrid proxy/origin server.
- */
-
- dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
-
/* short way - this location is reverse proxied? */
if (dconf->alias) {
- int rv = ap_proxy_trans_match(r, dconf->alias, dconf);
- if (DONE != rv) {
- return rv;
+ enc = (dconf->alias->flags & PROXYPASS_MAP_ENCODED) != 0;
+ if (!(pre_trans ^ enc)) {
+ int rv = ap_proxy_trans_match(r, dconf->alias, dconf);
+ if (rv != HTTP_CONTINUE) {
+ return rv;
+ }
}
}
- conf = (proxy_server_conf *) ap_get_module_config(r->server->module_config,
- &proxy_module);
-
/* long way - walk the list of aliases, find a match */
- if (conf->aliases->nelts) {
- ent = (struct proxy_alias *) conf->aliases->elts;
- for (i = 0; i < conf->aliases->nelts; i++) {
- int rv = ap_proxy_trans_match(r, &ent[i], dconf);
- if (DONE != rv) {
+ for (i = 0; i < conf->aliases->nelts; i++) {
+ ent = &((struct proxy_alias *)conf->aliases->elts)[i];
+ enc = (ent->flags & PROXYPASS_MAP_ENCODED) != 0;
+ if (!(pre_trans ^ enc)) {
+ int rv = ap_proxy_trans_match(r, ent, dconf);
+ if (rv != HTTP_CONTINUE) {
return rv;
}
}
}
+
return DECLINED;
}
+static int proxy_pre_translate_name(request_rec *r)
+{
+ return proxy_trans(r, 1);
+}
+
+static int proxy_translate_name(request_rec *r)
+{
+ return proxy_trans(r, 0);
+}
+
static int proxy_walk(request_rec *r)
{
proxy_server_conf *sconf = ap_get_module_config(r->server->module_config,
@@ -857,6 +1140,7 @@ static int proxy_walk(request_rec *r)
if (entry_proxy->refs && entry_proxy->refs->nelts) {
if (!rxpool) {
apr_pool_create(&rxpool, r->pool);
+ apr_pool_tag(rxpool, "proxy_rxpool");
}
nmatch = entry_proxy->refs->nelts;
pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t));
@@ -979,7 +1263,7 @@ static int proxy_needsdomain(request_rec *r, const char *url, const char *domain
/* If host does contain a dot already, or it is "localhost", decline */
if (strchr(r->parsed_uri.hostname, '.') != NULL /* has domain, or IPv4 literal */
|| strchr(r->parsed_uri.hostname, ':') != NULL /* IPv6 literal */
- || strcasecmp(r->parsed_uri.hostname, "localhost") == 0)
+ || ap_cstr_casecmp(r->parsed_uri.hostname, "localhost") == 0)
return DECLINED; /* host name has a dot already */
ref = apr_table_get(r->headers_in, "Referer");
@@ -1049,9 +1333,10 @@ static int proxy_handler(request_rec *r)
char *end;
maxfwd = apr_strtoi64(str, &end, 10);
if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) {
- return ap_proxyerror(r, HTTP_BAD_REQUEST,
- apr_psprintf(r->pool,
- "Max-Forwards value '%s' could not be parsed", str));
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10188)
+ "Max-Forwards value '%s' could not be parsed", str);
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ "Max-Forwards request header could not be parsed");
}
else if (maxfwd == 0) {
switch (r->method_number) {
@@ -1185,23 +1470,31 @@ static int proxy_handler(request_rec *r)
if (strcmp(ents[i].scheme, "*") == 0 ||
(ents[i].use_regex &&
ap_regexec(ents[i].regexp, url, 0, NULL, 0) == 0) ||
- (p2 == NULL && strcasecmp(scheme, ents[i].scheme) == 0) ||
+ (p2 == NULL && ap_cstr_casecmp(scheme, ents[i].scheme) == 0) ||
(p2 != NULL &&
- strncasecmp(url, ents[i].scheme,
+ ap_cstr_casecmpn(url, ents[i].scheme,
strlen(ents[i].scheme)) == 0)) {
/* handle the scheme */
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01142)
"Trying to run scheme_handler against proxy");
+
+ if (ents[i].creds) {
+ apr_table_set(r->notes, "proxy-basic-creds", ents[i].creds);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Using proxy auth creds %s", ents[i].creds);
+ }
+
access_status = proxy_run_scheme_handler(r, worker,
conf, url,
ents[i].hostname,
ents[i].port);
+ if (ents[i].creds) apr_table_unset(r->notes, "proxy-basic-creds");
+
/* Did the scheme handler process the request? */
if (access_status != DECLINED) {
const char *cl_a;
- char *end;
apr_off_t cl;
/*
@@ -1211,18 +1504,17 @@ static int proxy_handler(request_rec *r)
if (access_status != HTTP_BAD_GATEWAY) {
goto cleanup;
}
+
cl_a = apr_table_get(r->headers_in, "Content-Length");
- if (cl_a) {
- apr_strtoff(&cl, cl_a, &end, 10);
+ if (cl_a && (!ap_parse_strict_length(&cl, cl_a)
+ || cl > 0)) {
/*
* The request body is of length > 0. We cannot
* retry with a direct connection since we already
* sent (parts of) the request body to the proxy
* and do not have any longer.
*/
- if (cl > 0) {
- goto cleanup;
- }
+ goto cleanup;
}
/*
* Transfer-Encoding was set as input header, so we had
@@ -1347,6 +1639,8 @@ static void * create_proxy_config(apr_pool_t *p, server_rec *s)
ps->forward = NULL;
ps->reverse = NULL;
ps->domain = NULL;
+ ps->map_encoded_one = 0;
+ ps->map_encoded_all = 1;
ps->id = apr_psprintf(p, "p%x", 1); /* simply for storage size */
ps->viaopt = via_off; /* initially backward compatible with 1.3.1 */
ps->viaopt_set = 0; /* 0 means default */
@@ -1373,6 +1667,7 @@ static void * create_proxy_config(apr_pool_t *p, server_rec *s)
ps->source_address = NULL;
ps->source_address_set = 0;
apr_pool_create_ex(&ps->pool, p, NULL, NULL);
+ apr_pool_tag(ps->pool, "proxy_server_conf");
return ps;
}
@@ -1503,6 +1798,9 @@ static void * merge_proxy_config(apr_pool_t *p, void *basev, void *overridesv)
ps->forward = overrides->forward ? overrides->forward : base->forward;
ps->reverse = overrides->reverse ? overrides->reverse : base->reverse;
+ ps->map_encoded_one = overrides->map_encoded_one || base->map_encoded_one;
+ ps->map_encoded_all = overrides->map_encoded_all && base->map_encoded_all;
+
ps->domain = (overrides->domain == NULL) ? base->domain : overrides->domain;
ps->id = (overrides->id == NULL) ? base->id : overrides->id;
ps->viaopt = (overrides->viaopt_set == 0) ? base->viaopt : overrides->viaopt;
@@ -1560,6 +1858,7 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy)
new->raliases = apr_array_make(p, 10, sizeof(struct proxy_alias));
new->cookie_paths = apr_array_make(p, 10, sizeof(struct proxy_alias));
new->cookie_domains = apr_array_make(p, 10, sizeof(struct proxy_alias));
+ new->error_override_codes = apr_array_make(p, 10, sizeof(int));
new->preserve_host_set = 0;
new->preserve_host = 0;
new->interpolate_env = -1; /* unset */
@@ -1567,10 +1866,17 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy)
new->error_override_set = 0;
new->add_forwarded_headers = 1;
new->add_forwarded_headers_set = 0;
+ new->forward_100_continue = 1;
+ new->forward_100_continue_set = 0;
return (void *) new;
}
+static int int_order(const void *i1, const void *i2)
+{
+ return *(const int *)i1 - *(const int *)i2;
+}
+
static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
{
proxy_dir_conf *new = (proxy_dir_conf *) apr_pcalloc(p, sizeof(proxy_dir_conf));
@@ -1588,6 +1894,17 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
= apr_array_append(p, base->cookie_paths, add->cookie_paths);
new->cookie_domains
= apr_array_append(p, base->cookie_domains, add->cookie_domains);
+ new->error_override_codes
+ = apr_array_append(p, base->error_override_codes, add->error_override_codes);
+ /* Keep the array sorted for binary search (since "base" and "add" are
+ * already sorted, it's only needed only if both are merged).
+ */
+ if (base->error_override_codes->nelts
+ && add->error_override_codes->nelts) {
+ qsort(new->error_override_codes->elts,
+ new->error_override_codes->nelts,
+ sizeof(int), int_order);
+ }
new->interpolate_env = (add->interpolate_env == -1) ? base->interpolate_env
: add->interpolate_env;
new->preserve_host = (add->preserve_host_set == 0) ? base->preserve_host
@@ -1603,12 +1920,17 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
: add->add_forwarded_headers;
new->add_forwarded_headers_set = add->add_forwarded_headers_set
|| base->add_forwarded_headers_set;
-
+ new->forward_100_continue =
+ (add->forward_100_continue_set == 0) ? base->forward_100_continue
+ : add->forward_100_continue;
+ new->forward_100_continue_set = add->forward_100_continue_set
+ || base->forward_100_continue_set;
+
return new;
}
-static const char *
- add_proxy(cmd_parms *cmd, void *dummy, const char *f1, const char *r1, int regex)
+static const char *add_proxy(cmd_parms *cmd, void *dummy, const char *f1,
+ const char *r1, const char *creds, int regex)
{
server_rec *s = cmd->server;
proxy_server_conf *conf =
@@ -1666,19 +1988,24 @@ static const char *
new->port = port;
new->regexp = reg;
new->use_regex = regex;
+ if (creds) {
+ new->creds = apr_pstrcat(cmd->pool, "Basic ",
+ ap_pbase64encode(cmd->pool, (char *)creds),
+ NULL);
+ }
return NULL;
}
-static const char *
- add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1)
+static const char *add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1,
+ const char *r1, const char *creds)
{
- return add_proxy(cmd, dummy, f1, r1, 0);
+ return add_proxy(cmd, dummy, f1, r1, creds, 0);
}
-static const char *
- add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1)
+static const char *add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1,
+ const char *r1, const char *creds)
{
- return add_proxy(cmd, dummy, f1, r1, 1);
+ return add_proxy(cmd, dummy, f1, r1, creds, 1);
}
PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url)
@@ -1688,8 +2015,8 @@ PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url)
* We could be passed a URL during the config stage that contains
* the UDS path... ignore it
*/
- if (!strncasecmp(url, "unix:", 5) &&
- ((ptr = ap_strchr_c(url, '|')) != NULL)) {
+ if (!ap_cstr_casecmpn(url, "unix:", 5) &&
+ ((ptr = ap_strchr_c(url + 5, '|')) != NULL)) {
/* move past the 'unix:...|' UDS path info */
const char *ret, *c;
@@ -1721,12 +2048,14 @@ static const char *
struct proxy_alias *new;
char *f = cmd->path;
char *r = NULL;
+ const char *real;
char *word;
apr_table_t *params = apr_table_make(cmd->pool, 5);
const apr_array_header_t *arr;
const apr_table_entry_t *elts;
int i;
- int use_regex = is_regex;
+ unsigned int worker_type = (is_regex) ? AP_PROXY_WORKER_IS_MATCH
+ : AP_PROXY_WORKER_IS_PREFIX;
unsigned int flags = 0;
const char *err;
@@ -1742,7 +2071,7 @@ static const char *
if (is_regex) {
return "ProxyPassMatch invalid syntax ('~' usage).";
}
- use_regex = 1;
+ worker_type = AP_PROXY_WORKER_IS_MATCH;
continue;
}
f = word;
@@ -1777,15 +2106,39 @@ static const char *
"in the form 'key=value'.";
}
}
- else
+ else {
*val++ = '\0';
- apr_table_setn(params, word, val);
+ }
+ if (!strcasecmp(word, "mapping")) {
+ if (!strcasecmp(val, "encoded")) {
+ flags |= PROXYPASS_MAP_ENCODED;
+ }
+ else if (!strcasecmp(val, "servlet")) {
+ flags |= PROXYPASS_MAP_SERVLET;
+ }
+ else {
+ return "unknown mapping";
+ }
+ }
+ else {
+ apr_table_setn(params, word, val);
+ }
}
- };
+ }
+ if (flags & PROXYPASS_MAP_ENCODED) {
+ conf->map_encoded_one = 1;
+ }
+ else {
+ conf->map_encoded_all = 0;
+ }
if (r == NULL) {
return "ProxyPass|ProxyPassMatch needs a path when not defined in a location";
}
+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, r))) {
+ return "ProxyPass|ProxyPassMatch uses an invalid \"unix:\" URL";
+ }
+
/* if per directory, save away the single alias */
if (cmd->path) {
@@ -1793,7 +2146,7 @@ static const char *
dconf->alias_set = 1;
new = dconf->alias;
if (apr_fnmatch_test(f)) {
- use_regex = 1;
+ worker_type = AP_PROXY_WORKER_IS_MATCH;
}
}
/* if per server, add to the alias array */
@@ -1802,9 +2155,9 @@ static const char *
}
new->fake = apr_pstrdup(cmd->pool, f);
- new->real = apr_pstrdup(cmd->pool, ap_proxy_de_socketfy(cmd->pool, r));
+ new->real = apr_pstrdup(cmd->pool, real);
new->flags = flags;
- if (use_regex) {
+ if (worker_type & AP_PROXY_WORKER_IS_MATCH) {
new->regex = ap_pregcomp(cmd->pool, f, AP_REG_EXTENDED);
if (new->regex == NULL)
return "Regular expression could not be compiled.";
@@ -1828,7 +2181,7 @@ static const char *
* cannot be parsed anyway with apr_uri_parse later on in
* ap_proxy_define_balancer / ap_proxy_update_balancer
*/
- if (use_regex) {
+ if (worker_type & AP_PROXY_WORKER_IS_MATCH) {
fake_copy = NULL;
}
else {
@@ -1851,15 +2204,20 @@ static const char *
new->balancer = balancer;
}
else {
- proxy_worker *worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->pool, r));
int reuse = 0;
+ proxy_worker *worker = ap_proxy_get_worker_ex(cmd->temp_pool, NULL,
+ conf, new->real,
+ worker_type);
if (!worker) {
- const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r, 0);
+ const char *err;
+ err = ap_proxy_define_worker_ex(cmd->pool, &worker, NULL,
+ conf, r, worker_type);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL);
PROXY_COPY_CONF_PARAMS(worker, conf);
- } else {
+ }
+ else {
reuse = 1;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, cmd->server, APLOGNO(01145)
"Sharing worker '%s' instead of creating new worker '%s'",
@@ -2078,14 +2436,50 @@ static const char *
}
static const char *
- set_proxy_error_override(cmd_parms *parms, void *dconf, int flag)
+ set_proxy_error_override(cmd_parms *parms, void *dconf, const char *arg)
{
proxy_dir_conf *conf = dconf;
- conf->error_override = flag;
- conf->error_override_set = 1;
+ if (strcasecmp(arg, "Off") == 0) {
+ conf->error_override = 0;
+ conf->error_override_set = 1;
+ }
+ else if (strcasecmp(arg, "On") == 0) {
+ conf->error_override = 1;
+ conf->error_override_set = 1;
+ }
+ else if (conf->error_override_set == 1) {
+ int *newcode;
+ int argcode, i;
+ if (!apr_isdigit(arg[0]))
+ return "ProxyErrorOverride: status codes to intercept must be numeric";
+ if (!conf->error_override)
+ return "ProxyErrorOverride: status codes must follow a value of 'on'";
+
+ argcode = strtol(arg, NULL, 10);
+ if (!ap_is_HTTP_ERROR(argcode))
+ return "ProxyErrorOverride: status codes to intercept must be valid HTTP Status Codes >=400 && <600";
+
+ newcode = apr_array_push(conf->error_override_codes);
+ *newcode = argcode;
+
+ /* Keep the array sorted for binary search. */
+ for (i = conf->error_override_codes->nelts - 1; i > 0; --i) {
+ int *oldcode = &((int *)conf->error_override_codes->elts)[i - 1];
+ if (*oldcode <= argcode) {
+ break;
+ }
+ *newcode = *oldcode;
+ *oldcode = argcode;
+ newcode = oldcode;
+ }
+ }
+ else
+ return "ProxyErrorOverride first parameter must be one of: off | on";
+
return NULL;
}
+
static const char *
add_proxy_http_headers(cmd_parms *parms, void *dconf, int flag)
{
@@ -2103,6 +2497,14 @@ static const char *
conf->preserve_host_set = 1;
return NULL;
}
+static const char *
+ forward_100_continue(cmd_parms *parms, void *dconf, int flag)
+{
+ proxy_dir_conf *conf = dconf;
+ conf->forward_100_continue = flag;
+ conf->forward_100_continue_set = 1;
+ return NULL;
+}
static const char *
set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg)
@@ -2279,6 +2681,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg)
proxy_worker *worker;
char *path = cmd->path;
char *name = NULL;
+ const char *real;
char *word;
apr_table_t *params = apr_table_make(cmd->pool, 5);
const apr_array_header_t *arr;
@@ -2319,6 +2722,9 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg)
return "BalancerMember must define balancer name when outside <Proxy > section";
if (!name)
return "BalancerMember must define remote proxy server";
+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) {
+ return "BalancerMember uses an invalid \"unix:\" URL";
+ }
ap_str_tolower(path); /* lowercase scheme://hostname */
@@ -2331,7 +2737,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg)
}
/* Try to find existing worker */
- worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, ap_proxy_de_socketfy(cmd->temp_pool, name));
+ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, real);
if (!worker) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01147)
"Defining worker '%s' for balancer '%s'",
@@ -2361,7 +2767,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg)
elts[i].key, elts[i].val, ap_proxy_worker_name(cmd->pool, worker));
} else {
err = set_worker_param(cmd->pool, cmd->server, worker, elts[i].key,
- elts[i].val);
+ elts[i].val);
if (err)
return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
}
@@ -2380,6 +2786,7 @@ static const char *
char *word, *val;
proxy_balancer *balancer = NULL;
proxy_worker *worker = NULL;
+ unsigned int worker_type = 0;
int in_proxy_section = 0;
/* XXX: Should this be NOT_IN_DIRECTORY|NOT_IN_FILES? */
const char *err = ap_check_cmd_context(cmd, NOT_IN_HTACCESS);
@@ -2396,6 +2803,13 @@ static const char *
name = ap_getword_conf(cmd->temp_pool, &pargs);
if ((word = ap_strchr(name, '>')))
*word = '\0';
+ if (strncasecmp(cmd->directive->parent->directive + 6,
+ "Match", 5) == 0) {
+ worker_type = AP_PROXY_WORKER_IS_MATCH;
+ }
+ else {
+ worker_type = AP_PROXY_WORKER_IS_PREFIX;
+ }
in_proxy_section = 1;
}
else {
@@ -2420,11 +2834,18 @@ static const char *
}
}
else {
- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->temp_pool, name));
+ const char *real;
+
+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) {
+ return "ProxySet uses an invalid \"unix:\" URL";
+ }
+
+ worker = ap_proxy_get_worker_ex(cmd->temp_pool, NULL, conf,
+ real, worker_type);
if (!worker) {
if (in_proxy_section) {
- err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
- conf, name, 0);
+ err = ap_proxy_define_worker_ex(cmd->pool, &worker, NULL,
+ conf, name, worker_type);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxySet ",
err, NULL);
@@ -2478,7 +2899,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg)
char *word, *val;
proxy_balancer *balancer = NULL;
proxy_worker *worker = NULL;
-
+ unsigned int worker_type = AP_PROXY_WORKER_IS_PREFIX;
const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT);
proxy_server_conf *sconf =
(proxy_server_conf *) ap_get_module_config(cmd->server->module_config, &proxy_module);
@@ -2516,6 +2937,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg)
if (!r) {
return "Regex could not be compiled";
}
+ worker_type = AP_PROXY_WORKER_IS_MATCH;
}
/* initialize our config and fetch it */
@@ -2562,11 +2984,17 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg)
}
}
else {
- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf,
- ap_proxy_de_socketfy(cmd->temp_pool, (char*)conf->p));
+ const char *real;
+
+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, conf->p))) {
+ return "<Proxy/ProxyMatch > uses an invalid \"unix:\" URL";
+ }
+
+ worker = ap_proxy_get_worker_ex(cmd->temp_pool, NULL, sconf,
+ real, worker_type);
if (!worker) {
- err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
- sconf, conf->p, 0);
+ err = ap_proxy_define_worker_ex(cmd->pool, &worker, NULL, sconf,
+ conf->p, worker_type);
if (err)
return apr_pstrcat(cmd->temp_pool, thiscmd->name,
" ", err, NULL);
@@ -2616,9 +3044,9 @@ static const command_rec proxy_cmds[] =
"location, in regular expression syntax"),
AP_INIT_FLAG("ProxyRequests", set_proxy_req, NULL, RSRC_CONF,
"on if the true proxy requests should be accepted"),
- AP_INIT_TAKE2("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF,
+ AP_INIT_TAKE23("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF,
"a scheme, partial URL or '*' and a proxy server"),
- AP_INIT_TAKE2("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF,
+ AP_INIT_TAKE23("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF,
"a regex pattern and a proxy server"),
AP_INIT_FLAG("ProxyPassInterpolateEnv", ap_set_flag_slot_char,
(void*)APR_OFFSETOF(proxy_dir_conf, interpolate_env),
@@ -2647,7 +3075,7 @@ static const command_rec proxy_cmds[] =
"The default intranet domain name (in absence of a domain in the URL)"),
AP_INIT_TAKE1("ProxyVia", set_via_opt, NULL, RSRC_CONF,
"Configure Via: proxy header header to one of: on | off | block | full"),
- AP_INIT_FLAG("ProxyErrorOverride", set_proxy_error_override, NULL, RSRC_CONF|ACCESS_CONF,
+ AP_INIT_ITERATE("ProxyErrorOverride", set_proxy_error_override, NULL, RSRC_CONF|ACCESS_CONF,
"use our error handling pages instead of the servers' we are proxying"),
AP_INIT_FLAG("ProxyPreserveHost", set_preserve_host, NULL, RSRC_CONF|ACCESS_CONF,
"on if we should preserve host header while proxying"),
@@ -2676,14 +3104,15 @@ static const command_rec proxy_cmds[] =
"Configure local source IP used for request forward"),
AP_INIT_FLAG("ProxyAddHeaders", add_proxy_http_headers, NULL, RSRC_CONF|ACCESS_CONF,
"on if X-Forwarded-* headers should be added or completed"),
+ AP_INIT_FLAG("Proxy100Continue", forward_100_continue, NULL, RSRC_CONF|ACCESS_CONF,
+ "on if 100-Continue should be forwarded to the origin server, off if the "
+ "proxy should handle it by itself"),
{NULL}
};
static APR_OPTIONAL_FN_TYPE(ssl_proxy_enable) *proxy_ssl_enable = NULL;
static APR_OPTIONAL_FN_TYPE(ssl_engine_disable) *proxy_ssl_disable = NULL;
static APR_OPTIONAL_FN_TYPE(ssl_engine_set) *proxy_ssl_engine = NULL;
-static APR_OPTIONAL_FN_TYPE(ssl_is_https) *proxy_is_https = NULL;
-static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *proxy_ssl_val = NULL;
PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c)
{
@@ -2691,20 +3120,15 @@ PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c)
* if c == NULL just check if the optional function was imported
* else run the optional function so ssl filters are inserted
*/
- if (proxy_ssl_enable) {
- return c ? proxy_ssl_enable(c) : 1;
+ if (c == NULL) {
+ return ap_ssl_has_outgoing_handlers();
}
-
- return 0;
+ return ap_ssl_bind_outgoing(c, NULL, 1) == OK;
}
PROXY_DECLARE(int) ap_proxy_ssl_disable(conn_rec *c)
{
- if (proxy_ssl_disable) {
- return proxy_ssl_disable(c);
- }
-
- return 0;
+ return ap_ssl_bind_outgoing(c, NULL, 0) == OK;
}
PROXY_DECLARE(int) ap_proxy_ssl_engine(conn_rec *c,
@@ -2715,41 +3139,22 @@ PROXY_DECLARE(int) ap_proxy_ssl_engine(conn_rec *c,
* if c == NULL just check if the optional function was imported
* else run the optional function so ssl filters are inserted
*/
- if (proxy_ssl_engine) {
- return c ? proxy_ssl_engine(c, per_dir_config, 1, enable) : 1;
+ if (c == NULL) {
+ return ap_ssl_has_outgoing_handlers();
}
-
- if (!per_dir_config) {
- if (enable) {
- return ap_proxy_ssl_enable(c);
- }
- else {
- return ap_proxy_ssl_disable(c);
- }
- }
-
- return 0;
+ return ap_ssl_bind_outgoing(c, per_dir_config, enable) == OK;
}
PROXY_DECLARE(int) ap_proxy_conn_is_https(conn_rec *c)
{
- if (proxy_is_https) {
- return proxy_is_https(c);
- }
- else
- return 0;
+ return ap_ssl_conn_is_ssl(c);
}
PROXY_DECLARE(const char *) ap_proxy_ssl_val(apr_pool_t *p, server_rec *s,
conn_rec *c, request_rec *r,
const char *var)
{
- if (proxy_ssl_val) {
- /* XXX Perhaps the casting useless */
- return (const char *)proxy_ssl_val(p, s, c, r, (char *)var);
- }
- else
- return NULL;
+ return ap_ssl_var_lookup(p, s, c, r, var);
}
static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog,
@@ -2767,8 +3172,6 @@ static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog,
proxy_ssl_enable = APR_RETRIEVE_OPTIONAL_FN(ssl_proxy_enable);
proxy_ssl_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
proxy_ssl_engine = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_set);
- proxy_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
- proxy_ssl_val = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
ap_proxy_strmatch_path = apr_strmatch_precompile(pconf, "path=", 0);
ap_proxy_strmatch_domain = apr_strmatch_precompile(pconf, "domain=", 0);
@@ -2867,7 +3270,7 @@ static int proxy_status_hook(request_rec *r, int flags)
}
else {
ap_rprintf(r, "ProxyBalancer[%d]Worker[%d]Name: %s\n",
- i, n, (*worker)->s->name);
+ i, n, (*worker)->s->name_ex);
ap_rprintf(r, "ProxyBalancer[%d]Worker[%d]Status: %s\n",
i, n, ap_proxy_parse_wstatus(r->pool, *worker));
ap_rprintf(r, "ProxyBalancer[%d]Worker[%d]Elected: %"
@@ -2939,45 +3342,49 @@ static void child_init(apr_pool_t *p, server_rec *s)
*/
worker = (proxy_worker *)conf->workers->elts;
for (i = 0; i < conf->workers->nelts; i++, worker++) {
- ap_proxy_initialize_worker(worker, s, conf->pool);
+ ap_proxy_initialize_worker(worker, s, p);
}
/* Create and initialize forward worker if defined */
if (conf->req_set && conf->req) {
proxy_worker *forward;
- ap_proxy_define_worker(p, &forward, NULL, NULL, "http://www.apache.org", 0);
+ ap_proxy_define_worker(conf->pool, &forward, NULL, NULL,
+ "http://www.apache.org", 0);
conf->forward = forward;
PROXY_STRNCPY(conf->forward->s->name, "proxy:forward");
+ PROXY_STRNCPY(conf->forward->s->name_ex, "proxy:forward");
PROXY_STRNCPY(conf->forward->s->hostname, "*"); /* for compatibility */
PROXY_STRNCPY(conf->forward->s->hostname_ex, "*");
PROXY_STRNCPY(conf->forward->s->scheme, "*");
conf->forward->hash.def = conf->forward->s->hash.def =
- ap_proxy_hashfunc(conf->forward->s->name, PROXY_HASHFUNC_DEFAULT);
+ ap_proxy_hashfunc(conf->forward->s->name_ex, PROXY_HASHFUNC_DEFAULT);
conf->forward->hash.fnv = conf->forward->s->hash.fnv =
- ap_proxy_hashfunc(conf->forward->s->name, PROXY_HASHFUNC_FNV);
+ ap_proxy_hashfunc(conf->forward->s->name_ex, PROXY_HASHFUNC_FNV);
/* Do not disable worker in case of errors */
conf->forward->s->status |= PROXY_WORKER_IGNORE_ERRORS;
/* Mark as the "generic" worker */
conf->forward->s->status |= PROXY_WORKER_GENERIC;
- ap_proxy_initialize_worker(conf->forward, s, conf->pool);
+ ap_proxy_initialize_worker(conf->forward, s, p);
/* Disable address cache for generic forward worker */
conf->forward->s->is_address_reusable = 0;
}
if (!reverse) {
- ap_proxy_define_worker(p, &reverse, NULL, NULL, "http://www.apache.org", 0);
+ ap_proxy_define_worker(conf->pool, &reverse, NULL, NULL,
+ "http://www.apache.org", 0);
PROXY_STRNCPY(reverse->s->name, "proxy:reverse");
+ PROXY_STRNCPY(reverse->s->name_ex, "proxy:reverse");
PROXY_STRNCPY(reverse->s->hostname, "*"); /* for compatibility */
PROXY_STRNCPY(reverse->s->hostname_ex, "*");
PROXY_STRNCPY(reverse->s->scheme, "*");
reverse->hash.def = reverse->s->hash.def =
- ap_proxy_hashfunc(reverse->s->name, PROXY_HASHFUNC_DEFAULT);
+ ap_proxy_hashfunc(reverse->s->name_ex, PROXY_HASHFUNC_DEFAULT);
reverse->hash.fnv = reverse->s->hash.fnv =
- ap_proxy_hashfunc(reverse->s->name, PROXY_HASHFUNC_FNV);
+ ap_proxy_hashfunc(reverse->s->name_ex, PROXY_HASHFUNC_FNV);
/* Do not disable worker in case of errors */
reverse->s->status |= PROXY_WORKER_IGNORE_ERRORS;
/* Mark as the "generic" worker */
reverse->s->status |= PROXY_WORKER_GENERIC;
conf->reverse = reverse;
- ap_proxy_initialize_worker(conf->reverse, s, conf->pool);
+ ap_proxy_initialize_worker(conf->reverse, s, p);
/* Disable address cache for generic reverse worker */
reverse->s->is_address_reusable = 0;
}
@@ -3003,7 +3410,7 @@ static int proxy_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
APR_OPTIONAL_HOOK(ap, status_hook, proxy_status_hook, NULL, NULL,
APR_HOOK_MIDDLE);
- /* Reset workers count on gracefull restart */
+ /* Reset workers count on graceful restart */
proxy_lb_workers = 0;
set_worker_hc_param_f = APR_RETRIEVE_OPTIONAL_FN(set_worker_hc_param);
return OK;
@@ -3023,7 +3430,10 @@ static void register_hooks(apr_pool_t *p)
/* handler */
ap_hook_handler(proxy_handler, NULL, NULL, APR_HOOK_FIRST);
/* filename-to-URI translation */
- ap_hook_translate_name(proxy_trans, aszSucc, NULL, APR_HOOK_FIRST);
+ ap_hook_pre_translate_name(proxy_pre_translate_name, NULL, NULL,
+ APR_HOOK_MIDDLE);
+ ap_hook_translate_name(proxy_translate_name, aszSucc, NULL,
+ APR_HOOK_FIRST);
/* walk <Proxy > entries and suppress default TRACE behavior */
ap_hook_map_to_storage(proxy_map_location, NULL,NULL, APR_HOOK_FIRST);
/* fixups */
@@ -3058,6 +3468,7 @@ APR_HOOK_STRUCT(
APR_HOOK_LINK(pre_request)
APR_HOOK_LINK(post_request)
APR_HOOK_LINK(request_status)
+ APR_HOOK_LINK(check_trans)
)
APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, scheme_handler,
@@ -3066,6 +3477,9 @@ APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, scheme_handler,
char *url, const char *proxyhost,
apr_port_t proxyport),(r,worker,conf,
url,proxyhost,proxyport),DECLINED)
+APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, check_trans,
+ (request_rec *r, const char *url),
+ (r, url), DECLINED)
APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, canon_handler,
(request_rec *r, char *url),(r,
url),DECLINED)
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index aabd09f..51a55f8 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -58,6 +58,7 @@
#include "http_main.h"
#include "http_log.h"
#include "http_connection.h"
+#include "http_ssl.h"
#include "util_filter.h"
#include "util_ebcdic.h"
#include "ap_provider.h"
@@ -75,8 +76,12 @@ enum enctype {
enc_path, enc_search, enc_user, enc_fpath, enc_parm
};
+/* Flags for ap_proxy_canonenc_ex */
+#define PROXY_CANONENC_FORCEDEC 0x01
+#define PROXY_CANONENC_NOENCODEDSLASHENCODING 0x02
+
typedef enum {
- NONE, TCP, OPTIONS, HEAD, GET, CPING, PROVIDER, EOT
+ NONE, TCP, OPTIONS, HEAD, GET, CPING, PROVIDER, OPTIONS11, HEAD11, GET11, EOT
} hcmethod_t;
typedef struct {
@@ -116,6 +121,7 @@ struct proxy_remote {
const char *protocol; /* the scheme used to talk to this proxy */
const char *hostname; /* the hostname of this proxy */
ap_regex_t *regexp; /* compiled regex (if any) for the remote */
+ const char *creds; /* auth credentials (if any) for the proxy */
int use_regex; /* simple boolean. True if we have a regex pattern */
apr_port_t port; /* the port for this proxy */
};
@@ -123,6 +129,8 @@ struct proxy_remote {
#define PROXYPASS_NOCANON 0x01
#define PROXYPASS_INTERPOLATE 0x02
#define PROXYPASS_NOQUERY 0x04
+#define PROXYPASS_MAP_ENCODED 0x08
+#define PROXYPASS_MAP_SERVLET 0x18 /* + MAP_ENCODED */
struct proxy_alias {
const char *real;
const char *fake;
@@ -199,9 +207,10 @@ typedef struct {
unsigned int inherit_set:1;
unsigned int ppinherit:1;
unsigned int ppinherit_set:1;
+ unsigned int map_encoded_one:1;
+ unsigned int map_encoded_all:1;
} proxy_server_conf;
-
typedef struct {
const char *p; /* The path */
ap_regex_t *r; /* Is this a regex? */
@@ -240,6 +249,10 @@ typedef struct {
/** Named back references */
apr_array_header_t *refs;
+ unsigned int forward_100_continue:1;
+ unsigned int forward_100_continue_set:1;
+
+ apr_array_header_t *error_override_codes;
} proxy_dir_conf;
/* if we interpolate env vars per-request, we'll need a per-request
@@ -251,6 +264,8 @@ typedef struct {
apr_array_header_t* cookie_domains;
} proxy_req_conf;
+struct proxy_address; /* opaque TTL'ed and refcount'ed address */
+
typedef struct {
conn_rec *connection;
request_rec *r; /* Request record of the backend request
@@ -276,6 +291,9 @@ typedef struct {
* and its scpool/bucket_alloc (NULL before),
* must be left cleaned when used (locally).
*/
+ apr_pool_t *uds_pool; /* Subpool for reusing UDS paths */
+ apr_pool_t *fwd_pool; /* Subpool for reusing ProxyRemote infos */
+ struct proxy_address *address; /* Current remote address */
} proxy_conn_rec;
typedef struct {
@@ -285,12 +303,15 @@ typedef struct {
/* Connection pool */
struct proxy_conn_pool {
- apr_pool_t *pool; /* The pool used in constructor and destructor calls */
- apr_sockaddr_t *addr; /* Preparsed remote address info */
- apr_reslist_t *res; /* Connection resource list */
- proxy_conn_rec *conn; /* Single connection for prefork mpm */
+ apr_pool_t *pool; /* The pool used in constructor and destructor calls */
+ apr_sockaddr_t *addr; /* Preparsed remote address info */
+ apr_reslist_t *res; /* Connection resource list */
+ proxy_conn_rec *conn; /* Single connection for prefork mpm */
+ apr_pool_t *dns_pool; /* The pool used for worker scoped DNS resolutions */
};
+#define AP_VOLATILIZE_T(T, x) (*(T volatile *)&(x))
+
/* worker status bits */
/*
* NOTE: Keep up-to-date w/ proxy_wstat_tbl[]
@@ -343,6 +364,8 @@ PROXY_WORKER_HC_FAIL )
#define PROXY_WORKER_IS_HCFAILED(f) ( (f)->s->status & PROXY_WORKER_HC_FAIL )
+#define PROXY_WORKER_IS_ERROR(f) ( (f)->s->status & PROXY_WORKER_IN_ERROR )
+
#define PROXY_WORKER_IS(f, b) ( (f)->s->status & (b) )
/* default worker retry timeout in seconds */
@@ -357,8 +380,10 @@ PROXY_WORKER_HC_FAIL )
#define PROXY_WORKER_MAX_HOSTNAME_SIZE 64
#define PROXY_BALANCER_MAX_HOSTNAME_SIZE PROXY_WORKER_MAX_HOSTNAME_SIZE
#define PROXY_BALANCER_MAX_STICKY_SIZE 64
+#define PROXY_WORKER_MAX_SECRET_SIZE 64
#define PROXY_RFC1035_HOSTNAME_SIZE 256
+#define PROXY_WORKER_EXT_NAME_SIZE 384
/* RFC-1035 mentions limits of 255 for host-names and 253 for domain-names,
* dotted together(?) this would fit the below size (+ trailing NUL).
@@ -379,6 +404,15 @@ do { \
(w)->s->io_buffer_size_set = (c)->io_buffer_size_set; \
} while (0)
+#define PROXY_SHOULD_PING_100_CONTINUE(w, r) \
+ ((w)->s->ping_timeout_set \
+ && (PROXYREQ_REVERSE == (r)->proxyreq) \
+ && ap_request_has_body((r)))
+
+#define PROXY_DO_100_CONTINUE(w, r) \
+ (PROXY_SHOULD_PING_100_CONTINUE(w, r) \
+ && !apr_table_get((r)->subprocess_env, "force-proxy-request-1.0"))
+
/* use 2 hashes */
typedef struct {
unsigned int def;
@@ -441,6 +475,7 @@ typedef struct {
unsigned int keepalive_set:1;
unsigned int disablereuse_set:1;
unsigned int was_malloced:1;
+ unsigned int is_name_matchable:1;
char hcuri[PROXY_WORKER_MAX_ROUTE_SIZE]; /* health check uri */
char hcexpr[PROXY_WORKER_MAX_SCHEME_SIZE]; /* name of condition expr for health check */
int passes; /* number of successes for check to pass */
@@ -453,6 +488,11 @@ typedef struct {
char hostname_ex[PROXY_RFC1035_HOSTNAME_SIZE]; /* RFC1035 compliant version of the remote backend address */
apr_size_t response_field_size; /* Size of proxy response buffer in bytes. */
unsigned int response_field_size_set:1;
+ char secret[PROXY_WORKER_MAX_SECRET_SIZE]; /* authentication secret (e.g. AJP13) */
+ char name_ex[PROXY_WORKER_EXT_NAME_SIZE]; /* Extended name (>96 chars for 2.4.x) */
+ unsigned int address_ttl_set:1;
+ apr_int32_t address_ttl; /* backend address' TTL (seconds) */
+ apr_uint32_t address_expiry; /* backend address' next expiry time */
} proxy_worker_shared;
#define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared)))
@@ -464,9 +504,12 @@ struct proxy_worker {
proxy_conn_pool *cp; /* Connection pool to use */
proxy_worker_shared *s; /* Shared data */
proxy_balancer *balancer; /* which balancer am I in? */
+#if APR_HAS_THREADS
apr_thread_mutex_t *tmutex; /* Thread lock for updating address cache */
+#endif
void *context; /* general purpose storage */
ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
+ struct proxy_address *volatile address; /* current worker address (if reusable) */
};
/* default to health check every 30 seconds */
@@ -523,7 +566,9 @@ struct proxy_balancer {
apr_time_t wupdated; /* timestamp of last change to workers list */
proxy_balancer_method *lbmethod;
apr_global_mutex_t *gmutex; /* global lock for updating list of workers */
+#if APR_HAS_THREADS
apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */
+#endif
proxy_server_conf *sconf;
void *context; /* general purpose storage */
proxy_balancer_shared *s; /* Shared data */
@@ -602,6 +647,8 @@ APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, scheme_handler,
(request_rec *r, proxy_worker *worker,
proxy_server_conf *conf, char *url,
const char *proxyhost, apr_port_t proxyport))
+APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, check_trans,
+ (request_rec *r, const char *url))
APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, canon_handler,
(request_rec *r, char *url))
@@ -643,6 +690,8 @@ PROXY_DECLARE(apr_status_t) ap_proxy_strncpy(char *dst, const char *src,
apr_size_t dlen);
PROXY_DECLARE(int) ap_proxy_hex2c(const char *x);
PROXY_DECLARE(void) ap_proxy_c2hex(int ch, char *x);
+PROXY_DECLARE(char *)ap_proxy_canonenc_ex(apr_pool_t *p, const char *x, int len, enum enctype t,
+ int flags, int proxyreq);
PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, enum enctype t,
int forcedec, int proxyreq);
PROXY_DECLARE(char *)ap_proxy_canon_netloc(apr_pool_t *p, char **const urlp, char **userp,
@@ -656,7 +705,7 @@ PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *c
* @param conf server configuration
* @param hostname hostname from request URI
* @param addr resolved address of hostname, or NULL if not known
- * @return OK on success, or else an errro
+ * @return OK on success, or else an error
*/
PROXY_DECLARE(int) ap_proxy_checkproxyblock2(request_rec *r, proxy_server_conf *conf,
const char *hostname, apr_sockaddr_t *addr);
@@ -708,7 +757,42 @@ PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p,
proxy_worker *worker);
/**
- * Get the worker from proxy configuration
+ * Return whether a worker upgrade configuration matches Upgrade header
+ * @param p memory pool used for displaying worker name
+ * @param worker the worker
+ * @param upgrade the Upgrade header to match
+ * @param dflt default protocol (NULL for none)
+ * @return 1 (true) or 0 (false)
+ */
+PROXY_DECLARE(int) ap_proxy_worker_can_upgrade(apr_pool_t *p,
+ const proxy_worker *worker,
+ const char *upgrade,
+ const char *dflt);
+
+/* Bitmask for ap_proxy_{define,get}_worker_ex(). */
+#define AP_PROXY_WORKER_IS_PREFIX (1u << 0)
+#define AP_PROXY_WORKER_IS_MATCH (1u << 1)
+#define AP_PROXY_WORKER_IS_MALLOCED (1u << 2)
+#define AP_PROXY_WORKER_NO_UDS (1u << 3)
+
+/**
+ * Get the worker from proxy configuration, looking for either PREFIXED or
+ * MATCHED or both types of workers according to given mask
+ * @param p memory pool used for finding worker
+ * @param balancer the balancer that the worker belongs to
+ * @param conf current proxy server configuration
+ * @param url url to find the worker from
+ * @param mask bitmask of AP_PROXY_WORKER_IS_*
+ * @return proxy_worker or NULL if not found
+ */
+PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker_ex(apr_pool_t *p,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url,
+ unsigned int mask);
+
+/**
+ * Get the worker from proxy configuration, both types
* @param p memory pool used for finding worker
* @param balancer the balancer that the worker belongs to
* @param conf current proxy server configuration
@@ -719,7 +803,26 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
proxy_balancer *balancer,
proxy_server_conf *conf,
const char *url);
+
/**
+ * Define and Allocate space for the worker to proxy configuration, of either
+ * PREFIXED or MATCHED type according to given mask
+ * @param p memory pool to allocate worker from
+ * @param worker the new worker
+ * @param balancer the balancer that the worker belongs to
+ * @param conf current proxy server configuration
+ * @param url url containing worker name
+ * @param mask bitmask of AP_PROXY_WORKER_IS_*
+ * @return error message or NULL if successful (*worker is new worker)
+ */
+PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
+ proxy_worker **worker,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url,
+ unsigned int mask);
+
+ /**
* Define and Allocate space for the worker to proxy configuration
* @param p memory pool to allocate worker from
* @param worker the new worker
@@ -737,6 +840,25 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
int do_malloc);
/**
+ * Define and Allocate space for the ap_strcmp_match()able worker to proxy
+ * configuration.
+ * @param p memory pool to allocate worker from
+ * @param worker the new worker
+ * @param balancer the balancer that the worker belongs to
+ * @param conf current proxy server configuration
+ * @param url url containing worker name (produces match pattern)
+ * @param do_malloc true if shared struct should be malloced
+ * @return error message or NULL if successful (*worker is new worker)
+ * @deprecated Replaced by ap_proxy_define_worker_ex()
+ */
+PROXY_DECLARE(char *) ap_proxy_define_match_worker(apr_pool_t *p,
+ proxy_worker **worker,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url,
+ int do_malloc);
+
+/**
* Share a defined proxy worker via shm
* @param worker worker to be shared
* @param shm location of shared info
@@ -912,6 +1034,29 @@ PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,
request_rec *r,
proxy_server_conf *conf);
+/* Bitmask for ap_proxy_determine_address() */
+#define PROXY_DETERMINE_ADDRESS_CHECK (1u << 0)
+/**
+ * Resolve an address, reusing the one of the worker if any.
+ * @param proxy_function calling proxy scheme (http, ajp, ...)
+ * @param conn proxy connection the address is used for
+ * @param hostname host to resolve (should be the worker's if reusable)
+ * @param hostport port to resolve (should be the worker's if reusable)
+ * @param flags bitmask of PROXY_DETERMINE_ADDRESS_*
+ * @param r current request (if any)
+ * @param s current server (or NULL if r != NULL and ap_proxyerror()
+ * should be called on error)
+ * @return APR_SUCCESS or an error, APR_EEXIST if the address is still
+ * the same and PROXY_DETERMINE_ADDRESS_CHECK is asked
+ */
+PROXY_DECLARE(apr_status_t) ap_proxy_determine_address(const char *proxy_function,
+ proxy_conn_rec *conn,
+ const char *hostname,
+ apr_port_t hostport,
+ unsigned int flags,
+ request_rec *r,
+ server_rec *s);
+
/**
* Determine backend hostname and port
* @param p memory pool used for processing
@@ -1112,13 +1257,27 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b,
server_rec *s,
proxy_server_conf *conf);
+/**
+ * Configure and create workers (and balancer) in mod_balancer.
+ * @param r request
+ * @param params table with the parameters like b=mycluster etc.
+ * @return 404 when the worker/balancer doesn't exist,
+ * 400 if something is invalid
+ * 200 for success.
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t, balancer_manage,
+ (request_rec *, apr_table_t *params));
/**
* Find the matched alias for this request and setup for proxy handler
* @param r request
* @param ent proxy_alias record
* @param dconf per-dir config or NULL
- * @return DECLINED, DONE or OK if matched
+ * @return OK if the alias matched,
+ * DONE if the alias matched and r->uri was normalized so
+ * no further transformation should happen on it,
+ * DECLINED if proxying is disabled for this alias,
+ * HTTP_CONTINUE if the alias did not match
*/
PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r,
struct proxy_alias *ent,
@@ -1151,6 +1310,55 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
char **old_te_val);
/**
+ * Prefetch the client request body (in memory), up to a limit.
+ * Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
+ * pass a FLUSH bucket to the backend and read again in blocking mode.
+ * @param r client request
+ * @param backend backend connection
+ * @param input_brigade input brigade to use/fill
+ * @param block blocking or non-blocking mode
+ * @param bytes_read number of bytes read
+ * @param max_read maximum number of bytes to read
+ * @return OK or HTTP_* error code
+ * @note max_read is rounded up to APR_BUCKET_BUFF_SIZE
+ */
+PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
+ proxy_conn_rec *backend,
+ apr_bucket_brigade *input_brigade,
+ apr_read_type_e block,
+ apr_off_t *bytes_read,
+ apr_off_t max_read);
+
+/**
+ * Spool the client request body to memory, or disk above given limit.
+ * @param r client request
+ * @param backend backend connection
+ * @param input_brigade input brigade to use/fill
+ * @param bytes_spooled number of bytes spooled
+ * @param max_mem_spool maximum number of in-memory bytes
+ * @return OK or HTTP_* error code
+ */
+PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
+ proxy_conn_rec *backend,
+ apr_bucket_brigade *input_brigade,
+ apr_off_t *bytes_spooled,
+ apr_off_t max_mem_spool);
+
+/**
+ * Read what's in the client pipe. If the read would block (EAGAIN),
+ * pass a FLUSH bucket to the backend and read again in blocking mode.
+ * @param r client request
+ * @param backend backend connection
+ * @param input_brigade brigade to use/fill
+ * @param max_read maximum number of bytes to read
+ * @return OK or HTTP_* error code
+ */
+PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r,
+ proxy_conn_rec *backend,
+ apr_bucket_brigade *input_brigade,
+ apr_off_t max_read);
+
+/**
* @param bucket_alloc bucket allocator
* @param r request
* @param p_conn proxy connection
@@ -1164,6 +1372,41 @@ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
conn_rec *origin, apr_bucket_brigade *bb,
int flush);
+struct proxy_tunnel_conn; /* opaque */
+typedef struct {
+ request_rec *r;
+ const char *scheme;
+ apr_pollset_t *pollset;
+ apr_array_header_t *pfds;
+ apr_interval_time_t timeout;
+ struct proxy_tunnel_conn *client,
+ *origin;
+ apr_size_t read_buf_size;
+ int replied; /* TODO 2.5+: one bit to merge in below bitmask */
+ unsigned int nohalfclose :1;
+} proxy_tunnel_rec;
+
+/**
+ * Create a tunnel, to be activated by ap_proxy_tunnel_run().
+ * @param tunnel tunnel created
+ * @param r client request
+ * @param c_o connection to origin
+ * @param scheme caller proxy scheme (connect, ws(s), http(s), ...)
+ * @return APR_SUCCESS or error status
+ */
+PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **tunnel,
+ request_rec *r, conn_rec *c_o,
+ const char *scheme);
+
+/**
+ * Forward anything from either side of the tunnel to the other,
+ * until one end aborts or a polling timeout/error occurs.
+ * @param tunnel tunnel to run
+ * @return OK if completion is full, HTTP_GATEWAY_TIME_OUT on timeout
+ * or another HTTP_ error otherwise.
+ */
+PROXY_DECLARE(int) ap_proxy_tunnel_run(proxy_tunnel_rec *tunnel);
+
/**
* Clear the headers referenced by the Connection header from the given
* table, and remove the Connection header.
@@ -1175,6 +1418,15 @@ APR_DECLARE_OPTIONAL_FN(int, ap_proxy_clear_connection,
(request_rec *r, apr_table_t *headers));
/**
+ * Do a AJP CPING and wait for CPONG on the socket
+ *
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t, ajp_handle_cping_cpong,
+ (apr_socket_t *sock, request_rec *r,
+ apr_interval_time_t timeout));
+
+
+/**
* @param socket socket to test
* @return TRUE if socket is connected/active
*/
@@ -1194,6 +1446,15 @@ PROXY_DECLARE(int) ap_proxy_is_socket_connected(apr_socket_t *socket);
int ap_proxy_lb_workers(void);
/**
+ * Returns 1 if a response with the given status should be overridden.
+ *
+ * @param conf proxy directory configuration
+ * @param code http status code
+ * @return 1 if code is considered an error-code, 0 otherwise
+ */
+PROXY_DECLARE(int) ap_proxy_should_override(proxy_dir_conf *conf, int code);
+
+/**
* Return the port number of a known scheme (eg: http -> 80).
* @param scheme scheme to test
* @return port number or 0 if unknown
@@ -1238,6 +1499,15 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
apr_bucket_brigade *from,
apr_bucket_brigade *to);
+/*
+ * The flags for ap_proxy_transfer_between_connections(), where for legacy and
+ * compatibility reasons FLUSH_EACH and FLUSH_AFTER are boolean values.
+ */
+#define AP_PROXY_TRANSFER_FLUSH_EACH (0x00)
+#define AP_PROXY_TRANSFER_FLUSH_AFTER (0x01)
+#define AP_PROXY_TRANSFER_YIELD_PENDING (0x02)
+#define AP_PROXY_TRANSFER_YIELD_MAX_READS (0x04)
+
/*
* Sends all data that can be read non blocking from the input filter chain of
* c_i and send it down the output filter chain of c_o. For reading it uses
@@ -1255,10 +1525,12 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
* @param name string for logging from where data was pulled
* @param sent if not NULL will be set to 1 if data was sent through c_o
* @param bsize maximum amount of data pulled in one iteration from c_i
- * @param after if set flush data on c_o only once after the loop
+ * @param flags AP_PROXY_TRANSFER_* bitmask
* @return apr_status_t of the operation. Could be any error returned from
* either the input filter chain of c_i or the output filter chain
- * of c_o. APR_EPIPE if the outgoing connection was aborted.
+ * of c_o, APR_EPIPE if the outgoing connection was aborted, or
+ * APR_INCOMPLETE if AP_PROXY_TRANSFER_YIELD_PENDING was set and
+ * the output stack gets full before the input stack is exhausted.
*/
PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
request_rec *r,
@@ -1269,7 +1541,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
const char *name,
int *sent,
apr_off_t bsize,
- int after);
+ int flags);
extern module PROXY_DECLARE_DATA proxy_module;
diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
index 73716af..32ec912 100644
--- a/modules/proxy/mod_proxy_ajp.c
+++ b/modules/proxy/mod_proxy_ajp.c
@@ -35,7 +35,7 @@ static int proxy_ajp_canon(request_rec *r, char *url)
apr_port_t port, def_port;
/* ap_port_of_scheme() */
- if (strncasecmp(url, "ajp:", 4) == 0) {
+ if (ap_cstr_casecmpn(url, "ajp:", 4) == 0) {
url += 4;
}
else {
@@ -65,13 +65,37 @@ static int proxy_ajp_canon(request_rec *r, char *url)
if (apr_table_get(r->notes, "proxy-nocanon")) {
path = url; /* this is the raw path */
}
+ else if (apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the encoded path already */
+ search = r->args;
+ }
else {
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags,
+ r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
search = r->args;
}
- if (path == NULL)
- return HTTP_BAD_REQUEST;
+ /*
+ * If we have a raw control character or a ' ' in nocanon path or
+ * r->args, correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10418)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
+ if (search && *ap_scan_vchar_obstext(search)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10406)
+ "To be forwarded query string contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
if (port != def_port)
apr_snprintf(sport, sizeof(sport), ":%d", port);
@@ -126,11 +150,8 @@ static apr_off_t get_content_length(request_rec * r)
if (r->main == NULL) {
const char *clp = apr_table_get(r->headers_in, "Content-Length");
- if (clp) {
- char *errp;
- if (apr_strtoff(&len, clp, &errp, 10) || *errp || len < 0) {
- len = 0; /* parse error */
- }
+ if (clp && !ap_parse_strict_length(&len, clp)) {
+ len = -1; /* parse error */
}
}
@@ -193,6 +214,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
apr_off_t content_length = 0;
int original_status = r->status;
const char *original_status_line = r->status_line;
+ const char *secret = NULL;
if (psf->io_buffer_size_set)
maxsize = psf->io_buffer_size;
@@ -202,18 +224,20 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
maxsize = AJP_MSG_BUFFER_SZ;
maxsize = APR_ALIGN(maxsize, 1024);
+ if (*conn->worker->s->secret)
+ secret = conn->worker->s->secret;
+
/*
* Send the AJP request to the remote server
*/
/* send request headers */
- status = ajp_send_header(conn->sock, r, maxsize, uri);
+ status = ajp_send_header(conn->sock, r, maxsize, uri, secret);
if (status != APR_SUCCESS) {
conn->close = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00868)
- "request failed to %pI (%s)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex);
+ "request failed to %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
if (status == AJP_EOVERFLOW)
return HTTP_BAD_REQUEST;
else if (status == AJP_EBAD_METHOD) {
@@ -242,19 +266,34 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
return HTTP_INTERNAL_SERVER_ERROR;
}
- /* read the first bloc of data */
+ /* read the first block of data */
input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
- if (tenc && (strcasecmp(tenc, "chunked") == 0)) {
- /* The AJP protocol does not want body data yet */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870) "request is chunked");
+ if (tenc) {
+ if (ap_cstr_casecmp(tenc, "chunked") == 0) {
+ /* The AJP protocol does not want body data yet */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870)
+ "request is chunked");
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396)
+ "%s Transfer-Encoding is not supported",
+ tenc);
+ /* We had a failure: Close connection to backend */
+ conn->close = 1;
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
} else {
/* Get client provided Content-Length header */
content_length = get_content_length(r);
- status = ap_get_brigade(r->input_filters, input_brigade,
- AP_MODE_READBYTES, APR_BLOCK_READ,
- maxsize - AJP_HEADER_SZ);
-
+ if (content_length < 0) {
+ status = APR_EINVAL;
+ }
+ else {
+ status = ap_get_brigade(r->input_filters, input_brigade,
+ AP_MODE_READBYTES, APR_BLOCK_READ,
+ maxsize - AJP_HEADER_SZ);
+ }
if (status != APR_SUCCESS) {
/* We had a failure: Close connection to backend */
conn->close = 1;
@@ -295,9 +334,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
conn->close = 1;
apr_brigade_destroy(input_brigade);
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00876)
- "send failed to %pI (%s)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex);
+ "send failed to %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
/*
* It is fatal when we failed to send a (part) of the request
* body.
@@ -336,15 +374,15 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
conn->close = 1;
apr_brigade_destroy(input_brigade);
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00878)
- "read response failed from %pI (%s)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex);
+ "read response failed from %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
/* If we had a successful cping/cpong and then a timeout
* we assume it is a request that cause a back-end timeout,
* but doesn't affect the whole worker.
*/
- if (APR_STATUS_IS_TIMEUP(status) && conn->worker->s->ping_timeout_set) {
+ if (APR_STATUS_IS_TIMEUP(status) &&
+ conn->worker->s->ping_timeout_set) {
return HTTP_GATEWAY_TIME_OUT;
}
@@ -470,7 +508,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
/* If we are overriding the errors, we can't put the content
* of the page into the brigade.
*/
- if (!conf->error_override || !ap_is_HTTP_ERROR(r->status)) {
+ if (!ap_proxy_should_override(conf, r->status)) {
/* AJP13_SEND_BODY_CHUNK with zero length
* is explicit flush message
*/
@@ -493,8 +531,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
* error status so that an underlying error (eg HTTP_NOT_FOUND)
* doesn't become an HTTP_OK.
*/
- if (conf->error_override && !ap_is_HTTP_ERROR(r->status)
- && ap_is_HTTP_ERROR(original_status)) {
+ if (ap_proxy_should_override(conf, original_status)) {
r->status = original_status;
r->status_line = original_status_line;
}
@@ -543,7 +580,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
if (status != APR_SUCCESS) {
backend_failed = 1;
}
- if (!conf->error_override || !ap_is_HTTP_ERROR(r->status)) {
+ if (!ap_proxy_should_override(conf, r->status)) {
e = apr_bucket_eos_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(output_brigade, e);
if (ap_pass_brigade(r->output_filters,
@@ -634,11 +671,10 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
}
else {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00892)
- "got response from %pI (%s)",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex);
+ "got response from %pI (%s:%hu)",
+ conn->addr, conn->hostname, conn->port);
- if (conf->error_override && ap_is_HTTP_ERROR(r->status)) {
+ if (ap_proxy_should_override(conf, r->status)) {
/* clear r->status for override error, otherwise ErrorDocument
* thinks that this is a recursive error, and doesn't find the
* custom error page
@@ -658,9 +694,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
if (backend_failed) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00893)
- "dialog to %pI (%s) failed",
- conn->worker->cp->addr,
- conn->worker->s->hostname_ex);
+ "dialog to %pI (%s:%hu) failed",
+ conn->addr, conn->hostname, conn->port);
/*
* If we already send data, signal a broken backend connection
* upwards in the chain.
@@ -676,7 +711,18 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
*/
rv = HTTP_SERVICE_UNAVAILABLE;
} else {
- rv = HTTP_INTERNAL_SERVER_ERROR;
+ /* If we had a successful cping/cpong and then a timeout
+ * we assume it is a request that cause a back-end timeout,
+ * but doesn't affect the whole worker.
+ */
+ if (APR_STATUS_IS_TIMEUP(status) &&
+ conn->worker->s->ping_timeout_set) {
+ apr_table_setn(r->notes, "proxy_timedout", "1");
+ rv = HTTP_GATEWAY_TIME_OUT;
+ }
+ else {
+ rv = HTTP_INTERNAL_SERVER_ERROR;
+ }
}
}
else if (client_failed) {
@@ -735,7 +781,7 @@ static int proxy_ajp_handler(request_rec *r, proxy_worker *worker,
apr_pool_t *p = r->pool;
apr_uri_t *uri;
- if (strncasecmp(url, "ajp:", 4) != 0) {
+ if (ap_cstr_casecmpn(url, "ajp:", 4) != 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00894) "declining URL %s", url);
return DECLINED;
}
@@ -794,8 +840,8 @@ static int proxy_ajp_handler(request_rec *r, proxy_worker *worker,
if (status != APR_SUCCESS) {
backend->close = 1;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00897)
- "cping/cpong failed to %pI (%s)",
- worker->cp->addr, worker->s->hostname_ex);
+ "cping/cpong failed to %pI (%s:%hu)",
+ backend->addr, backend->hostname, backend->port);
status = HTTP_SERVICE_UNAVAILABLE;
retry++;
continue;
@@ -816,6 +862,7 @@ static void ap_proxy_http_register_hook(apr_pool_t *p)
{
proxy_hook_scheme_handler(proxy_ajp_handler, NULL, NULL, APR_HOOK_FIRST);
proxy_hook_canon_handler(proxy_ajp_canon, NULL, NULL, APR_HOOK_FIRST);
+ APR_REGISTER_OPTIONAL_FN(ajp_handle_cping_cpong);
}
AP_DECLARE_MODULE(proxy_ajp) = {
diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c
index c59f5e9..b8b452d 100644
--- a/modules/proxy/mod_proxy_balancer.c
+++ b/modules/proxy/mod_proxy_balancer.c
@@ -75,7 +75,7 @@ static int proxy_balancer_canon(request_rec *r, char *url)
apr_port_t port = 0;
/* TODO: offset of BALANCER_PREFIX ?? */
- if (strncasecmp(url, "balancer:", 9) == 0) {
+ if (ap_cstr_casecmpn(url, "balancer:", 9) == 0) {
url += 9;
}
else {
@@ -102,13 +102,37 @@ static int proxy_balancer_canon(request_rec *r, char *url)
if (apr_table_get(r->notes, "proxy-nocanon")) {
path = url; /* this is the raw path */
}
+ else if (apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the encoded path already */
+ search = r->args;
+ }
else {
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags,
+ r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
search = r->args;
}
- if (path == NULL)
- return HTTP_BAD_REQUEST;
+ /*
+ * If we have a raw control character or a ' ' in nocanon path or
+ * r->args, correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10416)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
+ if (search && *ap_scan_vchar_obstext(search)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10407)
+ "To be forwarded query string contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
r->filename = apr_pstrcat(r->pool, "proxy:" BALANCER_PREFIX, host,
"/", path, (search) ? "?" : "", (search) ? search : "", NULL);
@@ -346,23 +370,27 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer,
proxy_worker *candidate = NULL;
apr_status_t rv;
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
"%s: Lock failed for find_best_worker()",
balancer->s->name);
return NULL;
}
+#endif
candidate = (*balancer->lbmethod->finder)(balancer, r);
if (candidate)
candidate->s->elected++;
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
"%s: Unlock failed for find_best_worker()",
balancer->s->name);
}
+#endif
if (candidate == NULL) {
/* All the workers are in error state or disabled.
@@ -376,7 +404,7 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer,
/* XXX: This can perhaps be build using some
* smarter mechanism, like tread_cond.
* But since the statuses can came from
- * different childs, use the provided algo.
+ * different children, use the provided algo.
*/
apr_interval_time_t timeout = balancer->s->timeout;
apr_interval_time_t step, tval = 0;
@@ -417,7 +445,7 @@ static int rewrite_url(request_rec *r, proxy_worker *worker,
NULL));
}
- *url = apr_pstrcat(r->pool, worker->s->name, path, NULL);
+ *url = apr_pstrcat(r->pool, worker->s->name_ex, path, NULL);
return OK;
}
@@ -451,8 +479,9 @@ static void force_recovery(proxy_balancer *balancer, server_rec *s)
++(*worker)->s->retries;
(*worker)->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01165)
- "%s: Forcing recovery for worker (%s)",
- balancer->s->name, (*worker)->s->hostname_ex);
+ "%s: Forcing recovery for worker (%s:%d)",
+ balancer->s->name, (*worker)->s->hostname_ex,
+ (int)(*worker)->s->port);
}
}
}
@@ -492,11 +521,13 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
/* Step 2: Lock the LoadBalancer
* XXX: perhaps we need the process lock here
*/
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
"%s: Lock failed for pre_request", (*balancer)->s->name);
return DECLINED;
}
+#endif
/* Step 3: force recovery */
force_recovery(*balancer, r->server);
@@ -557,20 +588,24 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
"%s: All workers are in error state for route (%s)",
(*balancer)->s->name, route);
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
"%s: Unlock failed for pre_request",
(*balancer)->s->name);
}
+#endif
return HTTP_SERVICE_UNAVAILABLE;
}
}
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
"%s: Unlock failed for pre_request",
(*balancer)->s->name);
}
+#endif
if (!*worker) {
runtime = find_best_worker(*balancer, r);
if (!runtime) {
@@ -608,7 +643,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
apr_table_setn(r->subprocess_env,
"BALANCER_NAME", (*balancer)->s->name);
apr_table_setn(r->subprocess_env,
- "BALANCER_WORKER_NAME", (*worker)->s->name);
+ "BALANCER_WORKER_NAME", (*worker)->s->name_ex);
apr_table_setn(r->subprocess_env,
"BALANCER_WORKER_ROUTE", (*worker)->s->route);
@@ -631,7 +666,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01172)
"%s: worker (%s) rewritten to %s",
- (*balancer)->s->name, (*worker)->s->name, *url);
+ (*balancer)->s->name, (*worker)->s->name_ex, *url);
return access_status;
}
@@ -644,12 +679,14 @@ static int proxy_balancer_post_request(proxy_worker *worker,
apr_status_t rv;
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
"%s: Lock failed for post_request",
balancer->s->name);
return HTTP_INTERNAL_SERVER_ERROR;
}
+#endif
if (!apr_is_empty_array(balancer->errstatuses)
&& !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
@@ -681,11 +718,12 @@ static int proxy_balancer_post_request(proxy_worker *worker,
worker->s->error_time = apr_time_now();
}
-
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
"%s: Unlock failed for post_request", balancer->s->name);
}
+#endif
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
"proxy_balancer_post_request for (%s)", balancer->s->name);
@@ -784,7 +822,7 @@ static apr_status_t lock_remove(void *data)
/*
* First try to compute an unique ID for each vhost with minimal criteria,
* that is the first Host/IP:port and ServerName. For most cases this should
- * be enough and avoids changing the ID unnecessarily accross restart (or
+ * be enough and avoids changing the ID unnecessarily across restart (or
* stop/start w.r.t. persisted files) for things that this module does not
* care about.
*
@@ -945,7 +983,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
balancer->max_workers = balancer->workers->nelts + balancer->growth;
-
/* Create global mutex */
rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
balancer->s->sname, s, pconf, 0);
@@ -955,7 +992,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
balancer->s->sname);
return HTTP_INTERNAL_SERVER_ERROR;
}
-
apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
apr_pool_cleanup_null);
@@ -1078,6 +1114,8 @@ static void push2table(const char *input, apr_table_t *params,
}
ap_unescape_url(key);
ap_unescape_url(val);
+ /* hcuri, worker name, balancer name, at least are escaped when building the form, so twice */
+ ap_unescape_url(val);
if (allowed == NULL) { /* allow all */
apr_table_set(params, key, val);
}
@@ -1095,105 +1133,32 @@ static void push2table(const char *input, apr_table_t *params,
}
}
-/* Manages the loadfactors and member status
- * The balancer, worker and nonce are obtained from
- * the request args (?b=...&w=...&nonce=....).
- * All other params are pulled from any POST
- * data that exists.
- * TODO:
- * /.../<whatever>/balancer/worker/nonce
- */
-static int balancer_handler(request_rec *r)
+/* Returns non-zero if the Referer: header value passed matches the
+ * host of the request. */
+static int safe_referer(request_rec *r, const char *ref)
{
- void *sconf;
- proxy_server_conf *conf;
- proxy_balancer *balancer, *bsel = NULL;
- proxy_worker *worker, *wsel = NULL;
- proxy_worker **workers = NULL;
- apr_table_t *params;
- int i, n;
- int ok2change = 1;
- const char *name;
- const char *action;
- apr_status_t rv;
-
- /* is this for us? */
- if (strcmp(r->handler, "balancer-manager")) {
- return DECLINED;
- }
-
- r->allowed = 0
- | (AP_METHOD_BIT << M_GET)
- | (AP_METHOD_BIT << M_POST);
- if ((r->method_number != M_GET) && (r->method_number != M_POST)) {
- return DECLINED;
- }
-
- sconf = r->server->module_config;
- conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
- params = apr_table_make(r->pool, 10);
-
- balancer = (proxy_balancer *)conf->balancers->elts;
- for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
- if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
- "%s: Lock failed for balancer_handler",
- balancer->s->name);
- }
- ap_proxy_sync_balancer(balancer, r->server, conf);
- if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
- "%s: Unlock failed for balancer_handler",
- balancer->s->name);
- }
- }
+ apr_uri_t uri;
- if (r->args && (r->method_number == M_GET)) {
- const char *allowed[] = { "w", "b", "nonce", "xml", NULL };
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01191) "parsing r->args");
-
- push2table(r->args, params, allowed, r->pool);
- }
- if (r->method_number == M_POST) {
- apr_bucket_brigade *ib;
- apr_size_t len = 1024;
- char *buf = apr_pcalloc(r->pool, len+1);
-
- ib = apr_brigade_create(r->connection->pool, r->connection->bucket_alloc);
- rv = ap_get_brigade(r->input_filters, ib, AP_MODE_READBYTES,
- APR_BLOCK_READ, len);
- if (rv != APR_SUCCESS) {
- return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
- }
- apr_brigade_flatten(ib, buf, &len);
- buf[len] = '\0';
- push2table(buf, params, NULL, r->pool);
- }
- if ((name = apr_table_get(params, "b")))
- bsel = ap_proxy_get_balancer(r->pool, conf,
- apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL), 0);
-
- if ((name = apr_table_get(params, "w"))) {
- wsel = ap_proxy_get_worker(r->pool, bsel, conf, name);
- }
+ if (apr_uri_parse(r->pool, ref, &uri) || !uri.hostname)
+ return 0;
+ return strcasecmp(uri.hostname, ap_get_server_name(r)) == 0;
+}
- /* Check that the supplied nonce matches this server's nonce;
- * otherwise ignore all parameters, to prevent a CSRF attack. */
- if (!bsel ||
- (*bsel->s->nonce &&
- (
- (name = apr_table_get(params, "nonce")) == NULL ||
- strcmp(bsel->s->nonce, name) != 0
- )
- )
- ) {
- apr_table_clear(params);
- ok2change = 0;
- }
+/*
+ * Process the paramters and add or update the worker of the
+ * balancer. Must only be called if the nonce has been validated to
+ * match, to avoid XSS attacks.
+ */
+static int balancer_process_balancer_worker(request_rec *r, proxy_server_conf *conf,
+ proxy_balancer *bsel,
+ proxy_worker *wsel,
+ apr_table_t *params)
+{
+ apr_status_t rv;
/* First set the params */
- if (wsel && ok2change) {
+ if (wsel) {
const char *val;
int was_usable = PROXY_WORKER_IS_USABLE(wsel);
@@ -1275,7 +1240,7 @@ static int balancer_handler(request_rec *r)
if ((val = apr_table_get(params, "w_hm"))) {
proxy_hcmethods_t *method = proxy_hcmethods;
for (; method->name; method++) {
- if (!strcasecmp(method->name, val) && method->implemented)
+ if (!ap_cstr_casecmp(method->name, val) && method->implemented)
wsel->s->method = method->method;
}
}
@@ -1292,7 +1257,7 @@ static int balancer_handler(request_rec *r)
*wsel->s->hcexpr = '\0';
}
/* If the health check method doesn't support an expr, then null it */
- if (wsel->s->method == NONE || wsel->s->method == TCP) {
+ if (wsel->s->method == NONE || wsel->s->method == TCP || wsel->s->method == CPING) {
*wsel->s->hcexpr = '\0';
}
/* if enabling, we need to reset all lb params */
@@ -1302,7 +1267,7 @@ static int balancer_handler(request_rec *r)
}
- if (bsel && ok2change) {
+ if (bsel) {
const char *val;
int ival;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01193)
@@ -1359,11 +1324,13 @@ static int balancer_handler(request_rec *r)
proxy_worker *nworker;
nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
if (!nworker && storage->num_free_slots(bsel->wslot)) {
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
"%s: Lock failed for adding worker",
bsel->s->name);
}
+#endif
ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
if (!ret) {
unsigned int index;
@@ -1372,63 +1339,137 @@ static int balancer_handler(request_rec *r)
if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
"worker slotmem_grab failed");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
"worker slotmem_dptr failed");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
"Cannot share worker");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
"Cannot init worker");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
/* sync all timestamps */
bsel->wupdated = bsel->s->wupdated = nworker->s->updated = apr_time_now();
/* by default, all new workers are disabled */
ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, 1, nworker);
+ } else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10163)
+ "%s: failed to add worker %s",
+ bsel->s->name, val);
+#if APR_HAS_THREADS
+ PROXY_GLOBAL_UNLOCK(bsel);
+#endif
+ return HTTP_BAD_REQUEST;
}
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
+ } else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10164)
+ "%s: failed to add worker %s",
+ bsel->s->name, val);
+ return HTTP_BAD_REQUEST;
}
}
}
+ return APR_SUCCESS;
+}
+
+/*
+ * Process a request for balancer or worker management from another module
+ */
+static apr_status_t balancer_manage(request_rec *r, apr_table_t *params)
+{
+ void *sconf;
+ proxy_server_conf *conf;
+ proxy_balancer *bsel = NULL;
+ proxy_worker *wsel = NULL;
+ const char *name;
+ sconf = r->server->module_config;
+ conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+
+ /* Process the parameters */
+ if ((name = apr_table_get(params, "b"))) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage "
+ "balancer: %s", name);
+ bsel = ap_proxy_get_balancer(r->pool, conf,
+ apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL), 0);
+ }
+
+ if ((name = apr_table_get(params, "w"))) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage "
+ "worker: %s", name);
+ wsel = ap_proxy_get_worker(r->pool, bsel, conf, name);
+ }
+ if (bsel) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage "
+ "balancer: %s", bsel->s->name);
+ return(balancer_process_balancer_worker(r, conf, bsel, wsel, params));
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage failed: "
+ "No balancer!");
+ return HTTP_BAD_REQUEST;
+}
+/*
+ * builds the page and links to configure via HTLM or XML.
+ */
+static void balancer_display_page(request_rec *r, proxy_server_conf *conf,
+ proxy_balancer *bsel,
+ proxy_worker *wsel,
+ int usexml)
+{
+ const char *action;
+ proxy_balancer *balancer;
+ proxy_worker *worker;
+ proxy_worker **workers;
+ int i, n;
action = ap_construct_url(r->pool, r->uri, r);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01204) "genning page");
- if (apr_table_get(params, "xml")) {
+ if (usexml) {
char date[APR_RFC822_DATE_LEN];
ap_set_content_type(r, "text/xml");
ap_rputs("<?xml version='1.0' encoding='UTF-8' ?>\n", r);
@@ -1440,7 +1481,7 @@ static int balancer_handler(request_rec *r)
/* Start proxy_balancer */
ap_rvputs(r, " <httpd:name>", balancer->s->name, "</httpd:name>\n", NULL);
if (*balancer->s->sticky) {
- ap_rvputs(r, " <httpd:stickysession>", balancer->s->sticky,
+ ap_rvputs(r, " <httpd:stickysession>", ap_escape_html(r->pool, balancer->s->sticky),
"</httpd:stickysession>\n", NULL);
ap_rprintf(r,
" <httpd:nofailover>%s</httpd:nofailover>\n",
@@ -1551,7 +1592,7 @@ static int balancer_handler(request_rec *r)
ap_rprintf(r, " <httpd:lbset>%d</httpd:lbset>\n",
worker->s->lbset);
/* End proxy_worker_stat */
- if (!strcasecmp(worker->s->scheme, "ajp")) {
+ if (!ap_cstr_casecmp(worker->s->scheme, "ajp")) {
ap_rputs(" <httpd:flushpackets>", r);
switch (worker->s->flush_packets) {
case flush_off:
@@ -1650,10 +1691,10 @@ static int balancer_handler(request_rec *r)
for (i = 0; i < conf->balancers->nelts; i++) {
ap_rputs("<hr />\n<h3>LoadBalancer Status for ", r);
- ap_rvputs(r, "<a href='", ap_escape_uri(r->pool, r->uri), "?b=",
+ ap_rvputs(r, "<a href=\"", ap_escape_uri(r->pool, r->uri), "?b=",
balancer->s->name + sizeof(BALANCER_PREFIX) - 1,
"&amp;nonce=", balancer->s->nonce,
- "'>", NULL);
+ "\">", NULL);
ap_rvputs(r, balancer->s->name, "</a> [",balancer->s->sname, "]</h3>\n", NULL);
ap_rputs("\n\n<table><tr>"
"<th>MaxMembers</th><th>StickySession</th><th>DisableFailover</th><th>Timeout</th><th>FailoverAttempts</th><th>Method</th>"
@@ -1664,11 +1705,11 @@ static int balancer_handler(request_rec *r)
balancer->max_workers - (int)storage->num_free_slots(balancer->wslot));
if (*balancer->s->sticky) {
if (strcmp(balancer->s->sticky, balancer->s->sticky_path)) {
- ap_rvputs(r, "<td>", balancer->s->sticky, " | ",
- balancer->s->sticky_path, NULL);
+ ap_rvputs(r, "<td>", ap_escape_html(r->pool, balancer->s->sticky), " | ",
+ ap_escape_html(r->pool, balancer->s->sticky_path), NULL);
}
else {
- ap_rvputs(r, "<td>", balancer->s->sticky, NULL);
+ ap_rvputs(r, "<td>", ap_escape_html(r->pool, balancer->s->sticky), NULL);
}
}
else {
@@ -1688,7 +1729,7 @@ static int balancer_handler(request_rec *r)
ap_rvputs(r, balancer->s->vpath, "</td>\n", NULL);
ap_rprintf(r, "<td>%s</td>\n",
!balancer->s->inactive ? "Yes" : "No");
- ap_rputs("</table>\n<br />", r);
+ ap_rputs("</tr>\n</table>\n<br />", r);
ap_rputs("\n\n<table><tr>"
"<th>Worker URL</th>"
"<th>Route</th><th>RouteRedir</th>"
@@ -1703,12 +1744,12 @@ static int balancer_handler(request_rec *r)
for (n = 0; n < balancer->workers->nelts; n++) {
char fbuf[50];
worker = *workers;
- ap_rvputs(r, "<tr>\n<td><a href='",
+ ap_rvputs(r, "<tr>\n<td><a href=\"",
ap_escape_uri(r->pool, r->uri), "?b=",
balancer->s->name + sizeof(BALANCER_PREFIX) - 1, "&amp;w=",
- ap_escape_uri(r->pool, worker->s->name),
+ ap_escape_uri(r->pool, worker->s->name_ex),
"&amp;nonce=", balancer->s->nonce,
- "'>", NULL);
+ "\">", NULL);
ap_rvputs(r, (*worker->s->uds_path ? "<i>" : ""), ap_proxy_worker_name(r->pool, worker),
(*worker->s->uds_path ? "</i>" : ""), "</a></td>", NULL);
ap_rvputs(r, "<td>", ap_escape_html(r->pool, worker->s->route),
@@ -1730,7 +1771,7 @@ static int balancer_handler(request_rec *r)
ap_rprintf(r, "<td>%" APR_TIME_T_FMT "ms</td>", apr_time_as_msec(worker->s->interval));
ap_rprintf(r, "<td>%d (%d)</td>", worker->s->passes,worker->s->pcount);
ap_rprintf(r, "<td>%d (%d)</td>", worker->s->fails, worker->s->fcount);
- ap_rprintf(r, "<td>%s</td>", worker->s->hcuri);
+ ap_rprintf(r, "<td>%s</td>", ap_escape_html(r->pool, worker->s->hcuri));
ap_rprintf(r, "<td>%s", worker->s->hcexpr);
}
ap_rputs("</td></tr>\n", r);
@@ -1747,20 +1788,20 @@ static int balancer_handler(request_rec *r)
if (wsel && bsel) {
ap_rputs("<h3>Edit worker settings for ", r);
ap_rvputs(r, (*wsel->s->uds_path?"<i>":""), ap_proxy_worker_name(r->pool, wsel), (*wsel->s->uds_path?"</i>":""), "</h3>\n", NULL);
- ap_rputs("<form method='POST' enctype='application/x-www-form-urlencoded' action='", r);
- ap_rvputs(r, ap_escape_uri(r->pool, action), "'>\n", NULL);
+ ap_rputs("<form method='POST' enctype='application/x-www-form-urlencoded' action=\"", r);
+ ap_rvputs(r, ap_escape_uri(r->pool, action), "\">\n", NULL);
ap_rputs("<table><tr><td>Load factor:</td><td><input name='w_lf' id='w_lf' type=text ", r);
ap_rprintf(r, "value='%.2f'></td></tr>\n", (float)(wsel->s->lbfactor)/100.0);
ap_rputs("<tr><td>LB Set:</td><td><input name='w_ls' id='w_ls' type=text ", r);
ap_rprintf(r, "value='%d'></td></tr>\n", wsel->s->lbset);
ap_rputs("<tr><td>Route:</td><td><input name='w_wr' id='w_wr' type=text ", r);
- ap_rvputs(r, "value='", ap_escape_html(r->pool, wsel->s->route),
+ ap_rvputs(r, "value=\"", ap_escape_html(r->pool, wsel->s->route),
NULL);
- ap_rputs("'></td></tr>\n", r);
+ ap_rputs("\"></td></tr>\n", r);
ap_rputs("<tr><td>Route Redirect:</td><td><input name='w_rr' id='w_rr' type=text ", r);
- ap_rvputs(r, "value='", ap_escape_html(r->pool, wsel->s->redirect),
+ ap_rvputs(r, "value=\"", ap_escape_html(r->pool, wsel->s->redirect),
NULL);
- ap_rputs("'></td></tr>\n", r);
+ ap_rputs("\"></td></tr>\n", r);
ap_rputs("<tr><td>Status:</td>", r);
ap_rputs("<td><table><tr>"
"<th>Ignore Errors</th>"
@@ -1798,22 +1839,22 @@ static int balancer_handler(request_rec *r)
ap_rputs("<tr><td>Expr</td><td><select name='w_he'>\n", r);
hc_select_exprs_f(r, wsel->s->hcexpr);
ap_rputs("</select>\n</td></tr>\n", r);
- ap_rprintf(r, "<tr><td>Interval (ms)</td><td><input name='w_hi' id='w_hi' type='text'"
+ ap_rprintf(r, "<tr><td>Interval (ms)</td><td><input name='w_hi' id='w_hi' type='text' "
"value='%" APR_TIME_T_FMT "'></td></tr>\n", apr_time_as_msec(wsel->s->interval));
- ap_rprintf(r, "<tr><td>Passes trigger</td><td><input name='w_hp' id='w_hp' type='text'"
+ ap_rprintf(r, "<tr><td>Passes trigger</td><td><input name='w_hp' id='w_hp' type='text' "
"value='%d'></td></tr>\n", wsel->s->passes);
- ap_rprintf(r, "<tr><td>Fails trigger)</td><td><input name='w_hf' id='w_hf' type='text'"
+ ap_rprintf(r, "<tr><td>Fails trigger)</td><td><input name='w_hf' id='w_hf' type='text' "
"value='%d'></td></tr>\n", wsel->s->fails);
- ap_rprintf(r, "<tr><td>HC uri</td><td><input name='w_hu' id='w_hu' type='text'"
- "value='%s'</td></tr>\n", ap_escape_html(r->pool, wsel->s->hcuri));
+ ap_rprintf(r, "<tr><td>HC uri</td><td><input name='w_hu' id='w_hu' type='text' "
+ "value=\"%s\"></td></tr>\n", ap_escape_html(r->pool, wsel->s->hcuri));
ap_rputs("</table>\n</td></tr>\n", r);
}
ap_rputs("<tr><td colspan='2'><input type=submit value='Submit'></td></tr>\n", r);
ap_rvputs(r, "</table>\n<input type=hidden name='w' id='w' ", NULL);
- ap_rvputs(r, "value='", ap_escape_uri(r->pool, wsel->s->name), "'>\n", NULL);
+ ap_rvputs(r, "value=\"", ap_escape_uri(r->pool, wsel->s->name_ex), "\">\n", NULL);
ap_rvputs(r, "<input type=hidden name='b' id='b' ", NULL);
- ap_rvputs(r, "value='", bsel->s->name + sizeof(BALANCER_PREFIX) - 1,
- "'>\n", NULL);
+ ap_rvputs(r, "value=\"", ap_escape_html(r->pool, bsel->s->name + sizeof(BALANCER_PREFIX) - 1),
+ "\">\n", NULL);
ap_rvputs(r, "<input type=hidden name='nonce' id='nonce' value='",
bsel->s->nonce, "'>\n", NULL);
ap_rputs("</form>\n", r);
@@ -1823,9 +1864,9 @@ static int balancer_handler(request_rec *r)
const ap_list_provider_names_t *pname;
int i;
ap_rputs("<h3>Edit balancer settings for ", r);
- ap_rvputs(r, bsel->s->name, "</h3>\n", NULL);
- ap_rputs("<form method='POST' enctype='application/x-www-form-urlencoded' action='", r);
- ap_rvputs(r, ap_escape_uri(r->pool, action), "'>\n", NULL);
+ ap_rvputs(r, ap_escape_html(r->pool, bsel->s->name), "</h3>\n", NULL);
+ ap_rputs("<form method='POST' enctype='application/x-www-form-urlencoded' action=\"", r);
+ ap_rvputs(r, ap_escape_uri(r->pool, action), "\">\n", NULL);
ap_rputs("<table>\n", r);
provs = ap_list_provider_names(r->pool, PROXY_LBMETHOD, "0");
if (provs) {
@@ -1846,15 +1887,16 @@ static int balancer_handler(request_rec *r)
ap_rprintf(r, "value='%d'></td></tr>\n", bsel->s->max_attempts);
ap_rputs("<tr><td>Disable Failover:</td>", r);
create_radio("b_sforce", bsel->s->sticky_force, r);
+ ap_rputs("</tr>\n", r);
ap_rputs("<tr><td>Sticky Session:</td><td><input name='b_ss' id='b_ss' size=64 type=text ", r);
if (strcmp(bsel->s->sticky, bsel->s->sticky_path)) {
- ap_rvputs(r, "value ='", bsel->s->sticky, " | ",
- bsel->s->sticky_path, NULL);
+ ap_rvputs(r, "value =\"", ap_escape_html(r->pool, bsel->s->sticky), " | ",
+ ap_escape_html(r->pool, bsel->s->sticky_path), NULL);
}
else {
- ap_rvputs(r, "value ='", bsel->s->sticky, NULL);
+ ap_rvputs(r, "value =\"", ap_escape_html(r->pool, bsel->s->sticky), NULL);
}
- ap_rputs("'>&nbsp;&nbsp;&nbsp;&nbsp;(Use '-' to delete)</td></tr>\n", r);
+ ap_rputs("\">&nbsp;&nbsp;&nbsp;&nbsp;(Use '-' to delete)</td></tr>\n", r);
if (storage->num_free_slots(bsel->wslot) != 0) {
ap_rputs("<tr><td>Add New Worker:</td><td><input name='b_nwrkr' id='b_nwrkr' size=32 type=text>"
"&nbsp;&nbsp;&nbsp;&nbsp;Are you sure? <input name='b_wyes' id='b_wyes' type=checkbox value='1'>"
@@ -1862,8 +1904,8 @@ static int balancer_handler(request_rec *r)
}
ap_rputs("<tr><td colspan=2><input type=submit value='Submit'></td></tr>\n", r);
ap_rvputs(r, "</table>\n<input type=hidden name='b' id='b' ", NULL);
- ap_rvputs(r, "value='", bsel->s->name + sizeof(BALANCER_PREFIX) - 1,
- "'>\n", NULL);
+ ap_rvputs(r, "value=\"", ap_escape_html(r->pool, bsel->s->name + sizeof(BALANCER_PREFIX) - 1),
+ "\">\n", NULL);
ap_rvputs(r, "<input type=hidden name='nonce' id='nonce' value='",
bsel->s->nonce, "'>\n", NULL);
ap_rputs("</form>\n", r);
@@ -1873,6 +1915,123 @@ static int balancer_handler(request_rec *r)
ap_rputs("</body></html>\n", r);
ap_rflush(r);
}
+}
+
+/* Manages the loadfactors and member status
+ * The balancer, worker and nonce are obtained from
+ * the request args (?b=...&w=...&nonce=....).
+ * All other params are pulled from any POST
+ * data that exists.
+ * TODO:
+ * /.../<whatever>/balancer/worker/nonce
+ */
+static int balancer_handler(request_rec *r)
+{
+ void *sconf;
+ proxy_server_conf *conf;
+ proxy_balancer *balancer, *bsel = NULL;
+ proxy_worker *wsel = NULL;
+ apr_table_t *params;
+ int i;
+ const char *name, *ref;
+ apr_status_t rv;
+
+ /* is this for us? */
+ if (strcmp(r->handler, "balancer-manager")) {
+ return DECLINED;
+ }
+
+ r->allowed = 0
+ | (AP_METHOD_BIT << M_GET)
+ | (AP_METHOD_BIT << M_POST);
+ if ((r->method_number != M_GET) && (r->method_number != M_POST)) {
+ return DECLINED;
+ }
+
+ sconf = r->server->module_config;
+ conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+ params = apr_table_make(r->pool, 10);
+
+ balancer = (proxy_balancer *)conf->balancers->elts;
+ for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+#if APR_HAS_THREADS
+ if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
+ "%s: Lock failed for balancer_handler",
+ balancer->s->name);
+ }
+#endif
+ ap_proxy_sync_balancer(balancer, r->server, conf);
+#if APR_HAS_THREADS
+ if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
+ "%s: Unlock failed for balancer_handler",
+ balancer->s->name);
+ }
+#endif
+ }
+
+ if (r->args && (r->method_number == M_GET)) {
+ const char *allowed[] = { "w", "b", "nonce", "xml", NULL };
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01191) "parsing r->args");
+
+ push2table(r->args, params, allowed, r->pool);
+ }
+ if (r->method_number == M_POST) {
+ apr_bucket_brigade *ib;
+ apr_size_t len = 1024;
+ char *buf = apr_pcalloc(r->pool, len+1);
+
+ ib = apr_brigade_create(r->connection->pool, r->connection->bucket_alloc);
+ rv = ap_get_brigade(r->input_filters, ib, AP_MODE_READBYTES,
+ APR_BLOCK_READ, len);
+ if (rv != APR_SUCCESS) {
+ return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ }
+ apr_brigade_flatten(ib, buf, &len);
+ buf[len] = '\0';
+ push2table(buf, params, NULL, r->pool);
+ }
+
+ /* Ignore parameters if this looks like XSRF */
+ ref = apr_table_get(r->headers_in, "Referer");
+ if (apr_table_elts(params)
+ && (!ref || !safe_referer(r, ref))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10187)
+ "ignoring params in balancer-manager cross-site access %s: %s", ref, ap_get_server_name(r));
+ apr_table_clear(params);
+ }
+
+ /* Process the parameters */
+ if ((name = apr_table_get(params, "b")))
+ bsel = ap_proxy_get_balancer(r->pool, conf,
+ apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL), 0);
+
+ if ((name = apr_table_get(params, "w"))) {
+ wsel = ap_proxy_get_worker(r->pool, bsel, conf, name);
+ }
+
+
+ /* Check that the supplied nonce matches this server's nonce;
+ * otherwise ignore all parameters, to prevent a CSRF
+ * attack. */
+ if (bsel
+ && (*bsel->s->nonce
+ && ((name = apr_table_get(params, "nonce")) != NULL
+ && strcmp(bsel->s->nonce, name) == 0))) {
+ /* Process the parameters and add the worker to the balancer */
+ rv = balancer_process_balancer_worker(r, conf, bsel, wsel, params);
+ if (rv != APR_SUCCESS) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ /* display the HTML or XML page */
+ if (apr_table_get(params, "xml")) {
+ balancer_display_page(r, conf, bsel, wsel, 1);
+ } else {
+ balancer_display_page(r, conf, bsel, wsel, 0);
+ }
return DONE;
}
@@ -1905,7 +2064,7 @@ static void balancer_child_init(apr_pool_t *p, server_rec *s)
balancer->s->name);
exit(1); /* Ugly, but what else? */
}
- init_balancer_members(conf->pool, s, balancer);
+ init_balancer_members(p, s, balancer);
}
s = s->next;
}
@@ -1921,6 +2080,7 @@ static void ap_proxy_balancer_register_hook(apr_pool_t *p)
static const char *const aszPred[] = { "mpm_winnt.c", "mod_slotmem_shm.c", NULL};
static const char *const aszPred2[] = { "mod_proxy.c", NULL};
/* manager handler */
+ APR_REGISTER_OPTIONAL_FN(balancer_manage);
ap_hook_post_config(balancer_post_config, aszPred2, NULL, APR_HOOK_MIDDLE);
ap_hook_pre_config(balancer_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(balancer_handler, NULL, NULL, APR_HOOK_FIRST);
diff --git a/modules/proxy/mod_proxy_connect.c b/modules/proxy/mod_proxy_connect.c
index 7be6a6a..5a68135 100644
--- a/modules/proxy/mod_proxy_connect.c
+++ b/modules/proxy/mod_proxy_connect.c
@@ -39,7 +39,7 @@ module AP_MODULE_DECLARE_DATA proxy_connect_module;
* that may be okay, since the data is supposed to
* be transparent. In fact, this doesn't log at all
* yet. 8^)
- * FIXME: doesn't check any headers initally sent from the
+ * FIXME: doesn't check any headers initially sent from the
* client.
* FIXME: should allow authentication, but hopefully the
* generic proxy authentication is good enough.
@@ -156,25 +156,21 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
apr_socket_t *sock;
conn_rec *c = r->connection;
conn_rec *backconn;
- int done = 0;
- apr_bucket_brigade *bb_front;
- apr_bucket_brigade *bb_back;
apr_status_t rv;
apr_size_t nbytes;
char buffer[HUGE_STRING_LEN];
- apr_socket_t *client_socket = ap_get_conn_socket(c);
+
+ apr_bucket_brigade *bb;
+ proxy_tunnel_rec *tunnel;
int failed, rc;
- apr_pollset_t *pollset;
- apr_pollfd_t pollfd;
- const apr_pollfd_t *signalled;
- apr_int32_t pollcnt, pi;
- apr_int16_t pollevent;
- apr_sockaddr_t *nexthop;
apr_uri_t uri;
const char *connectname;
apr_port_t connectport = 0;
+ apr_sockaddr_t *nexthop;
+
+ apr_interval_time_t current_timeout;
/* is this for us? */
if (r->method_number != M_CONNECT) {
@@ -261,28 +257,6 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
}
}
- /* setup polling for connection */
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");
-
- if ((rv = apr_pollset_create(&pollset, 2, r->pool, 0)) != APR_SUCCESS) {
- apr_socket_close(sock);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01020)
- "error apr_pollset_create()");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-
- /* Add client side to the poll */
- pollfd.p = r->pool;
- pollfd.desc_type = APR_POLL_SOCKET;
- pollfd.reqevents = APR_POLLIN | APR_POLLHUP;
- pollfd.desc.s = client_socket;
- pollfd.client_data = NULL;
- apr_pollset_add(pollset, &pollfd);
-
- /* Add the server side to the poll */
- pollfd.desc.s = sock;
- apr_pollset_add(pollset, &pollfd);
-
/*
* Step Three: Send the Request
*
@@ -300,13 +274,22 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
return HTTP_INTERNAL_SERVER_ERROR;
}
ap_proxy_ssl_engine(backconn, r->per_dir_config, 0);
+
+ /*
+ * save the timeout of the socket because core_pre_connection
+ * will set it to base_server->timeout
+ * (core TimeOut directive).
+ */
+ apr_socket_timeout_get(sock, &current_timeout);
rc = ap_run_pre_connection(backconn, sock);
if (rc != OK && rc != DONE) {
backconn->aborted = 1;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01022)
"pre_connection setup failed (%d)", rc);
+ apr_socket_close(sock);
return HTTP_INTERNAL_SERVER_ERROR;
}
+ apr_socket_timeout_set(sock, current_timeout);
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"connection complete to %pI (%s)",
@@ -314,9 +297,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
backconn->local_addr->port));
-
- bb_front = apr_brigade_create(p, c->bucket_alloc);
- bb_back = apr_brigade_create(p, backconn->bucket_alloc);
+ bb = apr_brigade_create(p, c->bucket_alloc);
/* If we are connecting through a remote proxy, we need to pass
* the CONNECT request on to it.
@@ -326,24 +307,24 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
*/
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"sending the CONNECT request to the remote proxy");
- ap_fprintf(backconn->output_filters, bb_back,
+ ap_fprintf(backconn->output_filters, bb,
"CONNECT %s HTTP/1.0" CRLF, r->uri);
- ap_fprintf(backconn->output_filters, bb_back,
+ ap_fprintf(backconn->output_filters, bb,
"Proxy-agent: %s" CRLF CRLF, ap_get_server_banner());
- ap_fflush(backconn->output_filters, bb_back);
+ ap_fflush(backconn->output_filters, bb);
}
else {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Returning 200 OK");
nbytes = apr_snprintf(buffer, sizeof(buffer),
"HTTP/1.0 200 Connection Established" CRLF);
ap_xlate_proto_to_ascii(buffer, nbytes);
- ap_fwrite(c->output_filters, bb_front, buffer, nbytes);
+ ap_fwrite(c->output_filters, bb, buffer, nbytes);
nbytes = apr_snprintf(buffer, sizeof(buffer),
"Proxy-agent: %s" CRLF CRLF,
ap_get_server_banner());
ap_xlate_proto_to_ascii(buffer, nbytes);
- ap_fwrite(c->output_filters, bb_front, buffer, nbytes);
- ap_fflush(c->output_filters, bb_front);
+ ap_fwrite(c->output_filters, bb, buffer, nbytes);
+ ap_fflush(c->output_filters, bb);
#if 0
/* This is safer code, but it doesn't work yet. I'm leaving it
* here so that I can fix it later.
@@ -354,8 +335,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
ap_rflush(r);
#endif
}
-
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");
+ apr_brigade_cleanup(bb);
/*
* Step Four: Handle Data Transfer
@@ -363,88 +343,28 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
* Handle two way transfer of data over the socket (this is a tunnel).
*/
- /* we are now acting as a tunnel - the input/output filter stacks should
- * not contain any non-connection filters.
- */
- r->output_filters = c->output_filters;
- r->proto_output_filters = c->output_filters;
- r->input_filters = c->input_filters;
- r->proto_input_filters = c->input_filters;
-/* r->sent_bodyct = 1;*/
-
- do { /* Loop until done (one side closes the connection, or an error) */
- rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled);
- if (rv != APR_SUCCESS) {
- if (APR_STATUS_IS_EINTR(rv)) {
- continue;
- }
- apr_socket_close(sock);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01023) "error apr_poll()");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-#ifdef DEBUGGING
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01024)
- "woke from poll(), i=%d", pollcnt);
-#endif
-
- for (pi = 0; pi < pollcnt; pi++) {
- const apr_pollfd_t *cur = &signalled[pi];
+ /* r->sent_bodyct = 1; */
- if (cur->desc.s == sock) {
- pollevent = cur->rtnevents;
- if (pollevent & (APR_POLLIN | APR_POLLHUP)) {
-#ifdef DEBUGGING
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01025)
- "sock was readable");
-#endif
- done |= ap_proxy_transfer_between_connections(r, backconn,
- c, bb_back,
- bb_front,
- "sock", NULL,
- CONN_BLKSZ, 1)
- != APR_SUCCESS;
- }
- else if (pollevent & APR_POLLERR) {
- ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(01026)
- "err on backconn");
- backconn->aborted = 1;
- done = 1;
- }
- }
- else if (cur->desc.s == client_socket) {
- pollevent = cur->rtnevents;
- if (pollevent & (APR_POLLIN | APR_POLLHUP)) {
-#ifdef DEBUGGING
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01027)
- "client was readable");
-#endif
- done |= ap_proxy_transfer_between_connections(r, c,
- backconn,
- bb_front,
- bb_back,
- "client",
- NULL,
- CONN_BLKSZ, 1)
- != APR_SUCCESS;
- }
- else if (pollevent & APR_POLLERR) {
- ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02827)
- "err on client");
- c->aborted = 1;
- done = 1;
- }
- }
- else {
- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01028)
- "unknown socket in pollset");
- done = 1;
- }
+ rv = ap_proxy_tunnel_create(&tunnel, r, backconn, "CONNECT");
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10208)
+ "can't create tunnel for %pI (%s)",
+ nexthop, connectname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ rc = ap_proxy_tunnel_run(tunnel);
+ if (ap_is_HTTP_ERROR(rc)) {
+ if (rc == HTTP_GATEWAY_TIME_OUT) {
+ /* ap_proxy_tunnel_run() didn't log this */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10224)
+ "tunnel timed out");
}
- } while (!done);
-
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "finished with poll() - cleaning up");
+ /* Don't send an error page if we sent data already */
+ if (proxyport && !tunnel->replied) {
+ return rc;
+ }
+ }
/*
* Step Five: Clean Up
@@ -457,8 +377,6 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
else
ap_lingering_close(backconn);
- c->keepalive = AP_CONN_CLOSE;
-
return OK;
}
diff --git a/modules/proxy/mod_proxy_express.c b/modules/proxy/mod_proxy_express.c
index 0f5d604..5d458c4 100644
--- a/modules/proxy/mod_proxy_express.c
+++ b/modules/proxy/mod_proxy_express.c
@@ -19,6 +19,11 @@
module AP_MODULE_DECLARE_DATA proxy_express_module;
+#include "apr_version.h"
+#if !APR_VERSION_AT_LEAST(2,0,0)
+#include "apu_version.h"
+#endif
+
static int proxy_available = 0;
typedef struct {
@@ -115,6 +120,10 @@ static int xlate_name(request_rec *r)
struct proxy_alias *ralias;
proxy_dir_conf *dconf;
express_server_conf *sconf;
+#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7)
+ const apr_dbm_driver_t *driver;
+ const apu_err_t *err;
+#endif
sconf = ap_get_module_config(r->server->module_config, &proxy_express_module);
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
@@ -132,11 +141,31 @@ static int xlate_name(request_rec *r)
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01002)
"proxy_express: Opening DBM file: %s (%s)",
sconf->dbmfile, sconf->dbmtype);
+
+#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7)
+ rv = apr_dbm_get_driver(&driver, sconf->dbmtype, &err, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ APLOGNO(10275) "The dbm library '%s' could not be loaded: %s (%s: %d)",
+ sconf->dbmtype, err->msg, err->reason, err->rc);
+ return DECLINED;
+ }
+
+ rv = apr_dbm_open2(&db, driver, sconf->dbmfile, APR_DBM_READONLY,
+ APR_OS_DEFAULT, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ APLOGNO(10276) "The '%s' file '%s' could not be loaded",
+ sconf->dbmtype, sconf->dbmfile);
+ return DECLINED;
+ }
+#else
rv = apr_dbm_open_ex(&db, sconf->dbmtype, sconf->dbmfile, APR_DBM_READONLY,
APR_OS_DEFAULT, r->pool);
if (rv != APR_SUCCESS) {
return DECLINED;
}
+#endif
name = ap_get_server_name(r);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01003)
diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c
index 2e97408..d420df6 100644
--- a/modules/proxy/mod_proxy_fcgi.c
+++ b/modules/proxy/mod_proxy_fcgi.c
@@ -92,15 +92,30 @@ static int proxy_fcgi_canon(request_rec *r, char *url)
host = apr_pstrcat(r->pool, "[", host, "]", NULL);
}
- if (apr_table_get(r->notes, "proxy-nocanon")) {
- path = url; /* this is the raw path */
+ if (apr_table_get(r->notes, "proxy-nocanon")
+ || apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the raw/encoded path */
}
else {
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags,
+ r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ /*
+ * If we have a raw control character or a ' ' in nocanon path,
+ * correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10414)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
}
- if (path == NULL)
- return HTTP_BAD_REQUEST;
r->filename = apr_pstrcat(r->pool, "proxy:fcgi://", host, sport, "/",
path, NULL);
@@ -164,7 +179,7 @@ static int proxy_fcgi_canon(request_rec *r, char *url)
ProxyFCGISetEnvIf "reqenv('PATH_INFO') =~ m#/foo(\d+)\.php$#" PATH_INFO "/foo.php"
ProxyFCGISetEnvIf "reqenv('PATH_TRANSLATED') =~ m#(/.*foo)(\d+)(.*)#" PATH_TRANSLATED "$1$3"
*/
-static void fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf)
+static apr_status_t fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf)
{
sei_entry *entries;
const char *err, *src;
@@ -175,10 +190,21 @@ static void fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf)
for (i = 0; i < dconf->env_fixups->nelts; i++) {
sei_entry *entry = &entries[i];
+ rc = ap_expr_exec_re(r, entry->cond, AP_MAX_REG_MATCH, regm, &src, &err);
+ if (rc < 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10241)
+ "fix_cgivars: Condition eval returned %d: %s",
+ rc, err);
+ return APR_EGENERAL;
+ }
+ else if (rc == 0) {
+ continue; /* evaluated false */
+ }
+
if (entry->envname[0] == '!') {
apr_table_unset(r->subprocess_env, entry->envname+1);
}
- else if (0 < (rc = ap_expr_exec_re(r, entry->cond, AP_MAX_REG_MATCH, regm, &src, &err))) {
+ else {
const char *val = ap_expr_str_exec_re(r, entry->subst, AP_MAX_REG_MATCH, regm, &src, &err);
if (err) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(03514)
@@ -195,10 +221,8 @@ static void fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf)
}
apr_table_setn(r->subprocess_env, entry->envname, val);
}
- else {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, "fix_cgivars: Condition returned %d", rc);
- }
}
+ return APR_SUCCESS;
}
/* Wrapper for apr_socket_sendv that handles updating the worker stats. */
@@ -367,7 +391,9 @@ static apr_status_t send_environment(proxy_conn_rec *conn, request_rec *r,
/* XXX are there any FastCGI specific env vars we need to send? */
/* Give admins final option to fine-tune env vars */
- fix_cgivars(r, dconf);
+ if (APR_SUCCESS != (rv = fix_cgivars(r, dconf))) {
+ return rv;
+ }
/* XXX mod_cgi/mod_cgid use ap_create_environment here, which fills in
* the TZ value specially. We could use that, but it would mean
@@ -521,7 +547,8 @@ static int handle_headers(request_rec *r, int *state,
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
request_rec *r, apr_pool_t *setaside_pool,
apr_uint16_t request_id, const char **err,
- int *bad_request, int *has_responded)
+ int *bad_request, int *has_responded,
+ apr_bucket_brigade *input_brigade)
{
apr_bucket_brigade *ib, *ob;
int seen_end_of_headers = 0, done = 0, ignore_body = 0;
@@ -583,9 +610,26 @@ static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
int last_stdin = 0;
char *iobuf_cursor;
- rv = ap_get_brigade(r->input_filters, ib,
- AP_MODE_READBYTES, APR_BLOCK_READ,
- iobuf_size);
+ if (APR_BRIGADE_EMPTY(input_brigade)) {
+ rv = ap_get_brigade(r->input_filters, ib,
+ AP_MODE_READBYTES, APR_BLOCK_READ,
+ iobuf_size);
+ }
+ else {
+ apr_bucket *e;
+ APR_BRIGADE_CONCAT(ib, input_brigade);
+ rv = apr_brigade_partition(ib, iobuf_size, &e);
+ if (rv == APR_SUCCESS) {
+ while (e != APR_BRIGADE_SENTINEL(ib)
+ && APR_BUCKET_IS_METADATA(e)) {
+ e = APR_BUCKET_NEXT(e);
+ }
+ apr_brigade_split_ex(ib, e, input_brigade);
+ }
+ else if (rv == APR_INCOMPLETE) {
+ rv = APR_SUCCESS;
+ }
+ }
if (rv != APR_SUCCESS) {
*err = "reading input brigade";
*bad_request = 1;
@@ -735,6 +779,15 @@ recv_again:
status = ap_scan_script_header_err_brigade_ex(r, ob,
NULL, APLOG_MODULE_INDEX);
+
+ /* FCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
/* suck in all the rest */
if (status != OK) {
apr_bucket *tmp_b;
@@ -771,8 +824,7 @@ recv_again:
}
}
- if (conf->error_override
- && ap_is_HTTP_ERROR(r->status) && ap_is_initial_req(r)) {
+ if (ap_proxy_should_override(conf, r->status) && ap_is_initial_req(r)) {
/*
* set script_error_status to discard
* everything after the headers
@@ -924,7 +976,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
conn_rec *origin,
proxy_dir_conf *conf,
apr_uri_t *uri,
- char *url, char *server_portstr)
+ char *url, char *server_portstr,
+ apr_bucket_brigade *input_brigade)
{
/* Request IDs are arbitrary numbers that we assign to a
* single request. This would allow multiplex/pipelining of
@@ -948,6 +1001,7 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
}
apr_pool_create(&temp_pool, r->pool);
+ apr_pool_tag(temp_pool, "proxy_fcgi_do_request");
/* Step 2: Send Environment via FCGI_PARAMS */
rv = send_environment(conn, r, temp_pool, request_id);
@@ -960,7 +1014,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
/* Step 3: Read records from the back end server and handle them. */
rv = dispatch(conn, conf, r, temp_pool, request_id,
- &err, &bad_request, &has_responded);
+ &err, &bad_request, &has_responded,
+ input_brigade);
if (rv != APR_SUCCESS) {
/* If the client aborted the connection during retrieval or (partially)
* sending the response, don't return a HTTP_SERVICE_UNAVAILABLE, since
@@ -996,6 +1051,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
#define FCGI_SCHEME "FCGI"
+#define MAX_MEM_SPOOL 16384
+
/*
* This handles fcgi:(dest) URLs
*/
@@ -1008,6 +1065,8 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
char server_portstr[32];
conn_rec *origin = NULL;
proxy_conn_rec *backend = NULL;
+ apr_bucket_brigade *input_brigade;
+ apr_off_t input_bytes = 0;
apr_uri_t *uri;
proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
@@ -1050,6 +1109,101 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
goto cleanup;
}
+ /* We possibly reuse input data prefetched in previous call(s), e.g. for a
+ * balancer fallback scenario.
+ */
+ apr_pool_userdata_get((void **)&input_brigade, "proxy-fcgi-input", p);
+ if (input_brigade == NULL) {
+ const char *old_te = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *old_cl = NULL;
+ if (old_te) {
+ apr_table_unset(r->headers_in, "Content-Length");
+ }
+ else {
+ old_cl = apr_table_get(r->headers_in, "Content-Length");
+ }
+
+ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
+ apr_pool_userdata_setn(input_brigade, "proxy-fcgi-input", NULL, p);
+
+ /* Prefetch (nonlocking) the request body so to increase the chance
+ * to get the whole (or enough) body and determine Content-Length vs
+ * chunked or spooled. By doing this before connecting or reusing the
+ * backend, we want to minimize the delay between this connection is
+ * considered alive and the first bytes sent (should the client's link
+ * be slow or some input filter retain the data). This is a best effort
+ * to prevent the backend from closing (from under us) what it thinks is
+ * an idle connection, hence to reduce to the minimum the unavoidable
+ * local is_socket_connected() vs remote keepalive race condition.
+ */
+ status = ap_proxy_prefetch_input(r, backend, input_brigade,
+ APR_NONBLOCK_READ, &input_bytes,
+ MAX_MEM_SPOOL);
+ if (status != OK) {
+ goto cleanup;
+ }
+
+ /*
+ * The request body is streamed by default, using either C-L or
+ * chunked T-E, like this:
+ *
+ * The whole body (including no body) was received on prefetch, i.e.
+ * the input brigade ends with EOS => C-L = input_bytes.
+ *
+ * C-L is known and reliable, i.e. only protocol filters in the input
+ * chain thus none should change the body => use C-L from client.
+ *
+ * The administrator has not "proxy-sendcl" which prevents T-E => use
+ * T-E and chunks.
+ *
+ * Otherwise we need to determine and set a content-length, so spool
+ * the entire request body to memory/temporary file (MAX_MEM_SPOOL),
+ * such that we finally know its length => C-L = input_bytes.
+ */
+ if (!APR_BRIGADE_EMPTY(input_brigade)
+ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ /* The whole thing fit, so our decision is trivial, use the input
+ * bytes for the Content-Length. If we expected no body, and read
+ * no body, do not set the Content-Length.
+ */
+ if (old_cl || old_te || input_bytes) {
+ apr_table_setn(r->headers_in, "Content-Length",
+ apr_off_t_toa(p, input_bytes));
+ if (old_te) {
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
+ }
+ }
+ }
+ else if (old_cl && r->input_filters == r->proto_input_filters) {
+ /* Streaming is possible by preserving the existing C-L */
+ }
+ else if (!apr_table_get(r->subprocess_env, "proxy-sendcl")) {
+ /* Streaming is possible using T-E: chunked */
+ }
+ else {
+ /* No streaming, C-L is the only option so spool to memory/file */
+ apr_bucket_brigade *tmp_bb;
+ apr_off_t remaining_bytes = 0;
+
+ AP_DEBUG_ASSERT(MAX_MEM_SPOOL >= input_bytes);
+ tmp_bb = apr_brigade_create(p, r->connection->bucket_alloc);
+ status = ap_proxy_spool_input(r, backend, tmp_bb, &remaining_bytes,
+ MAX_MEM_SPOOL - input_bytes);
+ if (status != OK) {
+ goto cleanup;
+ }
+
+ APR_BRIGADE_CONCAT(input_brigade, tmp_bb);
+ input_bytes += remaining_bytes;
+
+ apr_table_setn(r->headers_in, "Content-Length",
+ apr_off_t_toa(p, input_bytes));
+ if (old_te) {
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
+ }
+ }
+ }
+
/* This scheme handler does not reuse connections by default, to
* avoid tying up a fastcgi that isn't expecting to work on
* parallel requests. But if the user went out of their way to
@@ -1074,7 +1228,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
/* Step Three: Process the Request */
status = fcgi_do_request(p, r, backend, origin, dconf, uri, url,
- server_portstr);
+ server_portstr, input_brigade);
cleanup:
ap_proxy_release_connection(FCGI_SCHEME, backend, r->server);
diff --git a/modules/proxy/mod_proxy_fdpass.c b/modules/proxy/mod_proxy_fdpass.c
index 195b0fd..8f9893d 100644
--- a/modules/proxy/mod_proxy_fdpass.c
+++ b/modules/proxy/mod_proxy_fdpass.c
@@ -32,7 +32,7 @@ static int proxy_fdpass_canon(request_rec *r, char *url)
{
const char *path;
- if (strncasecmp(url, "fd://", 5) == 0) {
+ if (ap_cstr_casecmpn(url, "fd://", 5) == 0) {
url += 5;
}
else {
@@ -129,7 +129,7 @@ static int proxy_fdpass_handler(request_rec *r, proxy_worker *worker,
apr_socket_t *sock;
apr_socket_t *clientsock;
- if (strncasecmp(url, "fd://", 5) == 0) {
+ if (ap_cstr_casecmpn(url, "fd://", 5) == 0) {
url += 5;
}
else {
diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
index 4a10987..e0032e5 100644
--- a/modules/proxy/mod_proxy_ftp.c
+++ b/modules/proxy/mod_proxy_ftp.c
@@ -23,11 +23,6 @@
#endif
#include "apr_version.h"
-#if (APR_MAJOR_VERSION < 1)
-#undef apr_socket_create
-#define apr_socket_create apr_socket_create_ex
-#endif
-
#define AUTODETECT_PWD
/* Automatic timestamping (Last-Modified header) based on MDTM is used if:
* 1) the FTP server supports the MDTM command and
@@ -218,7 +213,7 @@ static int ftp_check_string(const char *x)
* (EBCDIC) machines either.
*/
static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
- char *buff, apr_size_t bufflen, int *eos)
+ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen)
{
apr_bucket *e;
apr_status_t rv;
@@ -230,6 +225,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
/* start with an empty string */
buff[0] = 0;
*eos = 0;
+ *outlen = 0;
/* loop through each brigade */
while (!found) {
@@ -273,6 +269,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
if (len > 0) {
memcpy(pos, response, len);
pos += len;
+ *outlen += len;
}
}
apr_bucket_delete(e);
@@ -292,9 +289,11 @@ static int proxy_ftp_canon(request_rec *r, char *url)
apr_pool_t *p = r->pool;
const char *err;
apr_port_t port, def_port;
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
/* */
- if (strncasecmp(url, "ftp:", 4) == 0) {
+ if (ap_cstr_casecmpn(url, "ftp:", 4) == 0) {
url += 4;
}
else {
@@ -330,7 +329,8 @@ static int proxy_ftp_canon(request_rec *r, char *url)
else
parms = "";
- path = ap_proxy_canonenc(p, url, strlen(url), enc_path, 0, r->proxyreq);
+ path = ap_proxy_canonenc_ex(p, url, strlen(url), enc_path, flags,
+ r->proxyreq);
if (path == NULL)
return HTTP_BAD_REQUEST;
if (!ftp_check_string(path))
@@ -385,28 +385,36 @@ static int ftp_getrc_msg(conn_rec *ftp_ctrl, apr_bucket_brigade *bb, char *msgbu
char buff[5];
char *mb = msgbuf, *me = &msgbuf[msglen];
apr_status_t rv;
+ apr_size_t nread;
+
int eos;
- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
+ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
return -1;
}
/*
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233)
"<%s", response);
*/
+ if (nread < 4) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response);
+ *mb = '\0';
+ return -1;
+ }
+
if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) ||
- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
+ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
status = 0;
else
status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0';
mb = apr_cpystrn(mb, response + 4, me - mb);
- if (response[3] == '-') {
+ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */
memcpy(buff, response, 3);
buff[3] = ' ';
do {
- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
+ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
return -1;
}
mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb);
@@ -494,7 +502,7 @@ static apr_status_t proxy_send_dir_filter(ap_filter_t *f,
path = apr_uri_unparse(p, &f->r->parsed_uri, APR_URI_UNP_OMITSITEPART | APR_URI_UNP_OMITQUERY);
/* If path began with /%2f, change the basedir */
- if (strncasecmp(path, "/%2f", 4) == 0) {
+ if (ap_cstr_casecmpn(path, "/%2f", 4) == 0) {
basedir = "/%2f";
}
@@ -813,17 +821,19 @@ proxy_ftp_command(const char *cmd, request_rec *r, conn_rec *ftp_ctrl,
APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(c->bucket_alloc));
ap_pass_brigade(ftp_ctrl->output_filters, bb);
- /* strip off the CRLF for logging */
- apr_cpystrn(message, cmd, sizeof(message));
- if ((crlf = strchr(message, '\r')) != NULL ||
- (crlf = strchr(message, '\n')) != NULL)
- *crlf = '\0';
- if (strncmp(message,"PASS ", 5) == 0)
- strcpy(&message[5], "****");
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, ">%s", message);
+ if (APLOGrtrace2(r)) {
+ /* strip off the CRLF for logging */
+ apr_cpystrn(message, cmd, sizeof(message));
+ if ((crlf = strchr(message, '\r')) != NULL ||
+ (crlf = strchr(message, '\n')) != NULL)
+ *crlf = '\0';
+ if (strncmp(message,"PASS ", 5) == 0)
+ strcpy(&message[5], "****");
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, ">%s", message);
+ }
}
- rc = ftp_getrc_msg(ftp_ctrl, bb, message, sizeof message);
+ rc = ftp_getrc_msg(ftp_ctrl, bb, message, sizeof(message));
if (rc == -1 || rc == 421)
strcpy(message,"<unable to read result>");
if ((crlf = strchr(message, '\r')) != NULL ||
@@ -909,7 +919,7 @@ static char *ftp_get_PWD(request_rec *r, conn_rec *ftp_ctrl, apr_bucket_brigade
* with username and password (which was presumably queried from the user)
* supplied in the Authorization: header.
* Note that we "invent" a realm name which consists of the
- * ftp://user@host part of the reqest (sans password -if supplied but invalid-)
+ * ftp://user@host part of the request (sans password -if supplied but invalid-)
*/
static int ftp_unauthorized(request_rec *r, int log_it)
{
@@ -965,12 +975,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
conn_rec *c = r->connection;
proxy_conn_rec *backend;
apr_socket_t *sock, *local_sock, *data_sock = NULL;
- apr_sockaddr_t *connect_addr = NULL;
- apr_status_t rv;
conn_rec *origin, *data = NULL;
apr_status_t err = APR_SUCCESS;
- apr_status_t uerr = APR_SUCCESS;
- apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
+ apr_bucket_brigade *bb;
char *buf, *connectname;
apr_port_t connectport;
char *ftpmessage = NULL;
@@ -993,8 +1000,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
/* stuff for PASV mode */
int connect = 0, use_port = 0;
char dates[APR_RFC822_DATE_LEN];
+ apr_status_t rv;
int status;
- apr_pool_t *address_pool;
/* is this for us? */
if (proxyhost) {
@@ -1003,7 +1010,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
proxyhost);
return DECLINED; /* proxy connections are via HTTP */
}
- if (strncasecmp(url, "ftp:", 4)) {
+ if (ap_cstr_casecmpn(url, "ftp:", 4)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"declining URL %s - not ftp:", url);
return DECLINED; /* only interested in FTP */
@@ -1024,8 +1031,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
/* We break the URL into host, port, path-search */
if (r->parsed_uri.hostname == NULL) {
if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) {
- return ap_proxyerror(r, HTTP_BAD_REQUEST,
- apr_psprintf(p, "URI cannot be parsed: %s", url));
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10189)
+ "URI cannot be parsed: %s", url);
+ return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed");
}
connectname = uri.hostname;
connectport = uri.port;
@@ -1074,7 +1082,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
* still smaller that the URL is logged regularly.
*/
if ((password = apr_table_get(r->headers_in, "Authorization")) != NULL
- && strcasecmp(ap_getword(r->pool, &password, ' '), "Basic") == 0
+ && ap_cstr_casecmp(ap_getword(r->pool, &password, ' '), "Basic") == 0
&& (password = ap_pbase64decode(r->pool, password))[0] != ':') {
/* Check the decoded string for special characters. */
if (!ftp_check_string(password)) {
@@ -1107,61 +1115,35 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01036)
"connecting %s to %s:%d", url, connectname, connectport);
- if (worker->s->is_address_reusable) {
- if (!worker->cp->addr) {
- if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock");
- return HTTP_INTERNAL_SERVER_ERROR;
+ /* create space for state information */
+ backend = ap_get_module_config(c->conn_config, &proxy_ftp_module);
+ if (!backend) {
+ status = ap_proxy_acquire_connection("FTP", &backend, worker, r->server);
+ if (status != OK) {
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection("FTP", backend, r->server);
}
+ return status;
}
- connect_addr = worker->cp->addr;
- address_pool = worker->cp->pool;
+ ap_set_module_config(c->conn_config, &proxy_ftp_module, backend);
}
- else
- address_pool = r->pool;
- /* do a DNS lookup for the destination host */
- if (!connect_addr)
- err = apr_sockaddr_info_get(&(connect_addr),
- connectname, APR_UNSPEC,
- connectport, 0,
- address_pool);
- if (worker->s->is_address_reusable && !worker->cp->addr) {
- worker->cp->addr = connect_addr;
- if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock");
- }
- }
/*
* get all the possible IP addresses for the destname and loop through
* them until we get a successful connection
*/
+ err = ap_proxy_determine_address("FTP", backend, connectname, connectport,
+ 0, r, r->server);
if (APR_SUCCESS != err) {
- return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p,
- "DNS lookup failure for: ",
- connectname, NULL));
+ return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY,
+ "Error resolving backend address");
}
/* check if ProxyBlock directive on this host */
- if (OK != ap_proxy_checkproxyblock2(r, conf, connectname, connect_addr)) {
- return ap_proxyerror(r, HTTP_FORBIDDEN,
- "Connect to remote machine blocked");
- }
-
- /* create space for state information */
- backend = (proxy_conn_rec *) ap_get_module_config(c->conn_config, &proxy_ftp_module);
- if (!backend) {
- status = ap_proxy_acquire_connection("FTP", &backend, worker, r->server);
- if (status != OK) {
- if (backend) {
- backend->close = 1;
- ap_proxy_release_connection("FTP", backend, r->server);
- }
- return status;
- }
- /* TODO: see if ftp could use determine_connection */
- backend->addr = connect_addr;
- ap_set_module_config(c->conn_config, &proxy_ftp_module, backend);
+ if (OK != ap_proxy_checkproxyblock2(r, conf, connectname, backend->addr)) {
+ return ftp_proxyerror(r, backend, HTTP_FORBIDDEN,
+ "Connect to remote machine blocked");
}
@@ -1171,21 +1153,15 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
* We have determined who to connect to. Now make the connection.
*/
-
if (ap_proxy_connect_backend("FTP", backend, worker, r->server)) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01039)
- "an error occurred creating a new connection to %pI (%s)",
- connect_addr, connectname);
proxy_ftp_cleanup(r, backend);
return HTTP_SERVICE_UNAVAILABLE;
}
- if (!backend->connection) {
- status = ap_proxy_connection_create_ex("FTP", backend, r);
- if (status != OK) {
- proxy_ftp_cleanup(r, backend);
- return status;
- }
+ status = ap_proxy_connection_create_ex("FTP", backend, r);
+ if (status != OK) {
+ proxy_ftp_cleanup(r, backend);
+ return status;
}
/* Use old naming */
@@ -1203,6 +1179,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
* correct directory...
*/
+ bb = apr_brigade_create(p, c->bucket_alloc);
/* possible results: */
/* 120 Service ready in nnn minutes. */
@@ -1306,7 +1283,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
/* Special handling for leading "%2f": this enforces a "cwd /"
* out of the $HOME directory which was the starting point after login
*/
- if (strncasecmp(path, "%2f", 3) == 0) {
+ if (ap_cstr_casecmpn(path, "%2f", 3) == 0) {
path += 3;
while (*path == '/') /* skip leading '/' (after root %2f) */
++path;
@@ -1520,7 +1497,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
"PASV contacting host %d.%d.%d.%d:%d",
h3, h2, h1, h0, pasvport);
- if ((rv = apr_socket_create(&data_sock, connect_addr->family, SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
+ if ((rv = apr_socket_create(&data_sock, backend->addr->family,
+ SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01045)
"error creating PASV socket");
proxy_ftp_cleanup(r, backend);
@@ -1542,7 +1520,14 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
}
/* make the connection */
- apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d", h3, h2, h1, h0), connect_addr->family, pasvport, 0, p);
+ err = apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d",
+ h3, h2, h1, h0),
+ backend->addr->family, pasvport, 0, p);
+ if (APR_SUCCESS != err) {
+ return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "DNS lookup failure for: ",
+ connectname, NULL));
+ }
rv = apr_socket_connect(data_sock, pasv_addr);
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01048)
@@ -1565,7 +1550,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
apr_port_t local_port;
unsigned int h0, h1, h2, h3, p0, p1;
- if ((rv = apr_socket_create(&local_sock, connect_addr->family, SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
+ if ((rv = apr_socket_create(&local_sock, backend->addr->family,
+ SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01049)
"error creating local socket");
proxy_ftp_cleanup(r, backend);
@@ -1585,7 +1571,12 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
#endif /* _OSD_POSIX */
}
- apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool);
+ err = apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool);
+ if (APR_SUCCESS != err) {
+ return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "DNS lookup failure for: ",
+ connectname, NULL));
+ }
if ((rv = apr_socket_bind(local_sock, local_addr)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01051)
diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c
index 2783a58..70f1de8 100644
--- a/modules/proxy/mod_proxy_hcheck.c
+++ b/modules/proxy/mod_proxy_hcheck.c
@@ -20,6 +20,7 @@
#if APR_HAS_THREADS
#include "apr_thread_pool.h"
#endif
+#include "http_ssl.h"
module AP_MODULE_DECLARE_DATA proxy_hcheck_module;
@@ -33,7 +34,6 @@ module AP_MODULE_DECLARE_DATA proxy_hcheck_module;
#endif
#else
#define HC_USE_THREADS 0
-typedef void apr_thread_pool_t;
#endif
typedef struct {
@@ -65,6 +65,7 @@ typedef struct {
const char *method; /* Method string for the HTTP/AJP request */
const char *req; /* pre-formatted HTTP/AJP request */
proxy_worker *w; /* Pointer to the actual worker */
+ const char *protocol; /* HTTP 1.0 or 1.1? */
} wctx_t;
typedef struct {
@@ -73,9 +74,11 @@ typedef struct {
proxy_balancer *balancer;
proxy_worker *worker;
proxy_worker *hc;
- apr_time_t now;
+ apr_time_t *now;
} baton_t;
+static APR_OPTIONAL_FN_TYPE(ajp_handle_cping_cpong) *ajp_handle_cping_cpong = NULL;
+
static void *hc_create_config(apr_pool_t *p, server_rec *s)
{
sctx_t *ctx = apr_pcalloc(p, sizeof(sctx_t));
@@ -89,7 +92,10 @@ static void *hc_create_config(apr_pool_t *p, server_rec *s)
}
static ap_watchdog_t *watchdog;
-static int tpsize = HC_THREADPOOL_SIZE;
+#if HC_USE_THREADS
+static apr_thread_pool_t *hctp;
+static int tpsize;
+#endif
/*
* This serves double duty by not only validating (and creating)
@@ -110,6 +116,10 @@ static const char *set_worker_hc_param(apr_pool_t *p,
if (!worker && !v) {
return "Bad call to set_worker_hc_param()";
}
+ if (!ctx) {
+ ctx = hc_create_config(p, s);
+ ap_set_module_config(s->module_config, &proxy_hcheck_module, ctx);
+ }
temp = (hc_template_t *)v;
if (!strcasecmp(key, "hctemplate")) {
hc_template_t *template;
@@ -333,7 +343,8 @@ static const char *set_hc_tpsize (cmd_parms *cmd, void *dummy, const char *arg)
*/
static request_rec *create_request_rec(apr_pool_t *p, server_rec *s,
proxy_balancer *balancer,
- const char *method)
+ const char *method,
+ const char *protocol)
{
request_rec *r;
@@ -391,10 +402,12 @@ static request_rec *create_request_rec(apr_pool_t *p, server_rec *s,
else {
r->header_only = 0;
}
-
r->protocol = "HTTP/1.0";
r->proto_num = HTTP_VERSION(1, 0);
-
+ if ( protocol && (protocol[7] == '1') ) {
+ r->protocol = "HTTP/1.1";
+ r->proto_num = HTTP_VERSION(1, 1);
+ }
r->hostname = NULL;
return r;
@@ -418,31 +431,43 @@ static void create_hcheck_req(wctx_t *wctx, proxy_worker *hc,
{
char *req = NULL;
const char *method = NULL;
+ const char *protocol = NULL;
+
+ /* TODO: Fold into switch/case below? This seems more obvious */
+ if ( (hc->s->method == OPTIONS11) || (hc->s->method == HEAD11) || (hc->s->method == GET11) ) {
+ protocol = "HTTP/1.1";
+ } else {
+ protocol = "HTTP/1.0";
+ }
switch (hc->s->method) {
case OPTIONS:
+ case OPTIONS11:
method = "OPTIONS";
req = apr_psprintf(p,
- "OPTIONS * HTTP/1.0\r\n"
+ "OPTIONS * %s\r\n"
"Host: %s:%d\r\n"
- "\r\n",
+ "\r\n", protocol,
hc->s->hostname_ex, (int)hc->s->port);
break;
case HEAD:
+ case HEAD11:
method = "HEAD";
/* fallthru */
case GET:
+ case GET11:
if (!method) { /* did we fall thru? If not, we are GET */
method = "GET";
}
req = apr_psprintf(p,
- "%s %s%s%s HTTP/1.0\r\n"
+ "%s %s%s%s %s\r\n"
"Host: %s:%d\r\n"
"\r\n",
method,
(wctx->path ? wctx->path : ""),
(wctx->path && *hc->s->hcuri ? "/" : "" ),
(*hc->s->hcuri ? hc->s->hcuri : ""),
+ protocol,
hc->s->hostname_ex, (int)hc->s->port);
break;
@@ -451,6 +476,7 @@ static void create_hcheck_req(wctx_t *wctx, proxy_worker *hc,
}
wctx->req = req;
wctx->method = method;
+ wctx->protocol = protocol;
}
static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker,
@@ -463,7 +489,7 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker,
if (!hc) {
apr_uri_t uri;
apr_status_t rv;
- const char *url = worker->s->name;
+ const char *url = worker->s->name_ex;
wctx_t *wctx = apr_pcalloc(ctx->p, sizeof(wctx_t));
port = (worker->s->port ? worker->s->port
@@ -473,16 +499,25 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker,
worker, worker->s->scheme, worker->s->hostname_ex,
(int)port);
- ap_proxy_define_worker(ctx->p, &hc, NULL, NULL, worker->s->name, 0);
+ ap_proxy_define_worker(ctx->p, &hc, NULL, NULL, worker->s->name_ex, 0);
apr_snprintf(hc->s->name, sizeof hc->s->name, "%pp", worker);
+ apr_snprintf(hc->s->name_ex, sizeof hc->s->name_ex, "%pp", worker);
PROXY_STRNCPY(hc->s->hostname, worker->s->hostname); /* for compatibility */
PROXY_STRNCPY(hc->s->hostname_ex, worker->s->hostname_ex);
PROXY_STRNCPY(hc->s->scheme, worker->s->scheme);
PROXY_STRNCPY(hc->s->hcuri, worker->s->hcuri);
PROXY_STRNCPY(hc->s->hcexpr, worker->s->hcexpr);
- hc->hash.def = hc->s->hash.def = ap_proxy_hashfunc(hc->s->name, PROXY_HASHFUNC_DEFAULT);
- hc->hash.fnv = hc->s->hash.fnv = ap_proxy_hashfunc(hc->s->name, PROXY_HASHFUNC_FNV);
+ hc->hash.def = hc->s->hash.def = ap_proxy_hashfunc(hc->s->name_ex,
+ PROXY_HASHFUNC_DEFAULT);
+ hc->hash.fnv = hc->s->hash.fnv = ap_proxy_hashfunc(hc->s->name_ex,
+ PROXY_HASHFUNC_FNV);
hc->s->port = port;
+ hc->s->conn_timeout_set = worker->s->conn_timeout_set;
+ hc->s->conn_timeout = worker->s->conn_timeout;
+ hc->s->ping_timeout_set = worker->s->ping_timeout_set;
+ hc->s->ping_timeout = worker->s->ping_timeout;
+ hc->s->timeout_set = worker->s->timeout_set;
+ hc->s->timeout = worker->s->timeout;
/* Do not disable worker in case of errors */
hc->s->status |= PROXY_WORKER_IGNORE_ERRORS;
/* Mark as the "generic" worker */
@@ -516,33 +551,78 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker,
return hc;
}
-static int hc_determine_connection(sctx_t *ctx, proxy_worker *worker,
- apr_sockaddr_t **addr, apr_pool_t *p)
+static int hc_determine_connection(const char *proxy_function,
+ proxy_conn_rec *backend,
+ server_rec *s)
{
- apr_status_t rv = APR_SUCCESS;
+ proxy_worker *worker = backend->worker;
+ apr_status_t rv;
+
/*
* normally, this is done in ap_proxy_determine_connection().
* TODO: Look at using ap_proxy_determine_connection() with a
* fake request_rec
*/
- if (worker->cp->addr) {
- *addr = worker->cp->addr;
+ rv = ap_proxy_determine_address(proxy_function, backend,
+ worker->s->hostname_ex, worker->s->port,
+ 0, NULL, s);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(03249)
+ "DNS lookup failure for: %s:%hu",
+ worker->s->hostname_ex, worker->s->port);
+ return !OK;
}
- else {
- rv = apr_sockaddr_info_get(addr, worker->s->hostname_ex,
- APR_UNSPEC, worker->s->port, 0, p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03249)
- "DNS lookup failure for: %s:%d",
- worker->s->hostname_ex, (int)worker->s->port);
+
+ return OK;
+}
+
+static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *backend,
+ server_rec *s, int status)
+{
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection(proxy_function, backend, s);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03251)
+ "Health check %s Status (%d) for %s.",
+ ap_proxy_show_hcmethod(backend->worker->s->method),
+ status,
+ backend->worker->s->name_ex);
+ }
+ if (status != OK) {
+ return APR_EGENERAL;
+ }
+ return APR_SUCCESS;
+}
+
+static int hc_get_backend(const char *proxy_function, proxy_conn_rec **backend,
+ proxy_worker *hc, sctx_t *ctx)
+{
+ int status;
+
+ status = ap_proxy_acquire_connection(proxy_function, backend, hc, ctx->s);
+ if (status != OK) {
+ return status;
+ }
+
+ if (strcmp(hc->s->scheme, "https") == 0 || strcmp(hc->s->scheme, "wss") == 0 ) {
+ if (!ap_ssl_has_outgoing_handlers()) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252)
+ "mod_ssl not configured?");
+ return !OK;
}
+ (*backend)->is_ssl = 1;
}
- return (rv == APR_SUCCESS ? OK : !OK);
+
+ return hc_determine_connection(proxy_function, *backend, ctx->s);
}
-static apr_status_t hc_init_worker(sctx_t *ctx, proxy_worker *worker)
+static apr_status_t hc_init_baton(baton_t *baton)
{
+ sctx_t *ctx = baton->ctx;
+ proxy_worker *worker = baton->worker, *hc;
apr_status_t rv = APR_SUCCESS;
+ int once = 0;
+
/*
* Since this is the watchdog, workers never actually handle a
* request here, and so the local data isn't initialized (of
@@ -555,52 +635,67 @@ static apr_status_t hc_init_worker(sctx_t *ctx, proxy_worker *worker)
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ctx->s, APLOGNO(03250) "Cannot init worker");
return rv;
}
- if (worker->s->is_address_reusable && !worker->s->disablereuse &&
- hc_determine_connection(ctx, worker, &worker->cp->addr,
- worker->cp->pool) != OK) {
+ once = 1;
+ }
+
+ baton->hc = hc = hc_get_hcworker(ctx, worker, baton->ptemp);
+
+ /* Try to resolve the worker address once if it's reusable */
+ if (once && worker->s->is_address_reusable) {
+ proxy_conn_rec *backend = NULL;
+ if (hc_get_backend("HCHECK", &backend, hc, ctx)) {
rv = APR_EGENERAL;
}
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection("HCHECK", backend, ctx->s);
+ }
}
- return rv;
-}
-static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *backend,
- server_rec *s, int status)
-{
- if (backend) {
- backend->close = 1;
- ap_proxy_release_connection(proxy_function, backend, s);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03251)
- "Health check %s Status (%d) for %s.",
- ap_proxy_show_hcmethod(backend->worker->s->method),
- status,
- backend->worker->s->name);
- }
- if (status != OK) {
- return APR_EGENERAL;
- }
- return APR_SUCCESS;
+ return rv;
}
-static int hc_get_backend(const char *proxy_function, proxy_conn_rec **backend,
- proxy_worker *hc, sctx_t *ctx, apr_pool_t *ptemp)
+static apr_status_t hc_check_cping(baton_t *baton, apr_thread_t *thread)
{
int status;
- status = ap_proxy_acquire_connection(proxy_function, backend, hc, ctx->s);
- if (status == OK) {
- (*backend)->addr = hc->cp->addr;
- (*backend)->hostname = hc->s->hostname_ex;
- if (strcmp(hc->s->scheme, "https") == 0) {
- if (!ap_proxy_ssl_enable(NULL)) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252)
- "mod_ssl not configured?");
- return !OK;
- }
- (*backend)->is_ssl = 1;
- }
+ sctx_t *ctx = baton->ctx;
+ proxy_worker *hc = baton->hc;
+ proxy_conn_rec *backend = NULL;
+ apr_pool_t *ptemp = baton->ptemp;
+ request_rec *r;
+ apr_interval_time_t timeout;
+ if (!ajp_handle_cping_cpong) {
+ return APR_ENOTIMPL;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, baton->ctx->s, "HCCPING starting");
+ if ((status = hc_get_backend("HCCPING", &backend, hc, ctx)) != OK) {
+ return backend_cleanup("HCCPING", backend, ctx->s, status);
+ }
+ if ((status = ap_proxy_connect_backend("HCCPING", backend, hc, ctx->s)) != OK) {
+ return backend_cleanup("HCCPING", backend, ctx->s, status);
}
- return hc_determine_connection(ctx, hc, &(*backend)->addr, ptemp);
+ r = create_request_rec(ptemp, ctx->s, baton->balancer, "CPING", NULL);
+ if ((status = ap_proxy_connection_create_ex("HCCPING", backend, r)) != OK) {
+ return backend_cleanup("HCCPING", backend, ctx->s, status);
+ }
+ set_request_connection(r, backend->connection);
+ backend->connection->current_thread = thread;
+
+ if (hc->s->ping_timeout_set) {
+ timeout = hc->s->ping_timeout;
+ } else if ( hc->s->conn_timeout_set) {
+ timeout = hc->s->conn_timeout;
+ } else if ( hc->s->timeout_set) {
+ timeout = hc->s->timeout;
+ } else {
+ /* default to socket timeout */
+ apr_socket_timeout_get(backend->sock, &timeout);
+ }
+ status = ajp_handle_cping_cpong(backend->sock, r, timeout);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, baton->ctx->s, "HCCPING done %d", status);
+ return backend_cleanup("HCCPING", backend, ctx->s, status);
}
static apr_status_t hc_check_tcp(baton_t *baton)
@@ -610,7 +705,7 @@ static apr_status_t hc_check_tcp(baton_t *baton)
proxy_worker *hc = baton->hc;
proxy_conn_rec *backend = NULL;
- status = hc_get_backend("HCTCP", &backend, hc, ctx, baton->ptemp);
+ status = hc_get_backend("HCTCP", &backend, hc, ctx);
if (status == OK) {
status = ap_proxy_connect_backend("HCTCP", backend, hc, ctx->s);
/* does an unconditional ap_proxy_is_socket_connected() */
@@ -636,6 +731,7 @@ static int hc_read_headers(request_rec *r)
{
char buffer[HUGE_STRING_LEN];
int len;
+ const char *ct;
len = ap_getline(buffer, sizeof(buffer), r, 1);
if (len <= 0) {
@@ -670,6 +766,7 @@ static int hc_read_headers(request_rec *r)
} else {
return !OK;
}
+
/* OK, 1st line is OK... scarf in the headers */
while ((len = ap_getline(buffer, sizeof(buffer), r, 1)) > 0) {
char *value, *end;
@@ -686,6 +783,11 @@ static int hc_read_headers(request_rec *r)
*end = '\0';
apr_table_add(r->headers_out, buffer, value);
}
+
+ /* Set the Content-Type for the request if set */
+ if ((ct = apr_table_get(r->headers_out, "Content-Type")) != NULL)
+ ap_set_content_type(r, ct);
+
return OK;
}
@@ -736,7 +838,7 @@ static int hc_read_body(request_rec *r, apr_bucket_brigade *bb)
* then apply those to the resulting response, otherwise
* any status code 2xx or 3xx is considered "passing"
*/
-static apr_status_t hc_check_http(baton_t *baton)
+static apr_status_t hc_check_http(baton_t *baton, apr_thread_t *thread)
{
int status;
proxy_conn_rec *backend = NULL;
@@ -754,20 +856,19 @@ static apr_status_t hc_check_http(baton_t *baton)
return APR_ENOTIMPL;
}
- if ((status = hc_get_backend("HCOH", &backend, hc, ctx, ptemp)) != OK) {
+ if ((status = hc_get_backend("HCOH", &backend, hc, ctx)) != OK) {
return backend_cleanup("HCOH", backend, ctx->s, status);
}
if ((status = ap_proxy_connect_backend("HCOH", backend, hc, ctx->s)) != OK) {
return backend_cleanup("HCOH", backend, ctx->s, status);
}
- r = create_request_rec(ptemp, ctx->s, baton->balancer, wctx->method);
- if (!backend->connection) {
- if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) {
- return backend_cleanup("HCOH", backend, ctx->s, status);
- }
+ r = create_request_rec(ptemp, ctx->s, baton->balancer, wctx->method, wctx->protocol);
+ if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) {
+ return backend_cleanup("HCOH", backend, ctx->s, status);
}
set_request_connection(r, backend->connection);
+ backend->connection->current_thread = thread;
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
@@ -796,22 +897,22 @@ static apr_status_t hc_check_http(baton_t *baton)
if (ok > 0) {
ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s,
"Condition %s for %s (%s): passed", worker->s->hcexpr,
- hc->s->name, worker->s->name);
+ hc->s->name_ex, worker->s->name_ex);
} else if (ok < 0 || err) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0, ctx->s, APLOGNO(03301)
"Error on checking condition %s for %s (%s): %s", worker->s->hcexpr,
- hc->s->name, worker->s->name, err);
+ hc->s->name_ex, worker->s->name_ex, err);
status = !OK;
} else {
ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s,
"Condition %s for %s (%s) : failed", worker->s->hcexpr,
- hc->s->name, worker->s->name);
+ hc->s->name_ex, worker->s->name_ex);
status = !OK;
}
} else if (r->status < 200 || r->status > 399) {
ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s,
"Response status %i for %s (%s): failed", r->status,
- hc->s->name, worker->s->name);
+ hc->s->name_ex, worker->s->name_ex);
status = !OK;
}
return backend_cleanup("HCOH", backend, ctx->s, status);
@@ -823,29 +924,31 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b)
server_rec *s = baton->ctx->s;
proxy_worker *worker = baton->worker;
proxy_worker *hc = baton->hc;
- apr_time_t now = baton->now;
+ apr_time_t now;
apr_status_t rv;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03256)
"%sHealth checking %s", (thread ? "Threaded " : ""),
- worker->s->name);
+ worker->s->name_ex);
- worker->s->updated = now;
if (hc->s->method == TCP) {
rv = hc_check_tcp(baton);
}
+ else if (hc->s->method == CPING) {
+ rv = hc_check_cping(baton, thread);
+ }
else {
- rv = hc_check_http(baton);
+ rv = hc_check_http(baton, thread);
}
+
+ now = apr_time_now();
if (rv == APR_ENOTIMPL) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(03257)
"Somehow tried to use unimplemented hcheck method: %d",
(int)hc->s->method);
- apr_pool_destroy(baton->ptemp);
- return NULL;
}
/* what state are we in ? */
- if (PROXY_WORKER_IS_HCFAILED(worker)) {
+ else if (PROXY_WORKER_IS_HCFAILED(worker) || PROXY_WORKER_IS_ERROR(worker)) {
if (rv == APR_SUCCESS) {
worker->s->pcount += 1;
if (worker->s->pcount >= worker->s->passes) {
@@ -854,11 +957,12 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b)
worker->s->pcount = 0;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03302)
"%sHealth check ENABLING %s", (thread ? "Threaded " : ""),
- worker->s->name);
+ worker->s->name_ex);
}
}
- } else {
+ }
+ else {
if (rv != APR_SUCCESS) {
worker->s->error_time = now;
worker->s->fcount += 1;
@@ -867,11 +971,16 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b)
worker->s->fcount = 0;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03303)
"%sHealth check DISABLING %s", (thread ? "Threaded " : ""),
- worker->s->name);
+ worker->s->name_ex);
}
}
}
+ if (baton->now) {
+ *baton->now = now;
+ }
apr_pool_destroy(baton->ptemp);
+ worker->s->updated = now;
+
return NULL;
}
@@ -879,12 +988,10 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
apr_pool_t *pool)
{
apr_status_t rv = APR_SUCCESS;
- apr_time_t now = apr_time_now();
proxy_balancer *balancer;
sctx_t *ctx = (sctx_t *)data;
server_rec *s = ctx->s;
proxy_server_conf *conf;
- static apr_thread_pool_t *hctp = NULL;
switch (state) {
case AP_WATCHDOG_STATE_STARTING:
@@ -911,15 +1018,11 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
"Skipping apr_thread_pool_create()");
hctp = NULL;
}
-
#endif
break;
case AP_WATCHDOG_STATE_RUNNING:
/* loop thru all workers */
- ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, s,
- "Run of %s watchdog.",
- HCHECK_WATHCHDOG_NAME);
if (s) {
int i;
conf = (proxy_server_conf *) ap_get_module_config(s->module_config, &proxy_module);
@@ -927,45 +1030,52 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
ctx->s = s;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
int n;
+ apr_time_t now;
proxy_worker **workers;
proxy_worker *worker;
/* Have any new balancers or workers been added dynamically? */
ap_proxy_sync_balancer(balancer, s, conf);
workers = (proxy_worker **)balancer->workers->elts;
+ now = apr_time_now();
for (n = 0; n < balancer->workers->nelts; n++) {
worker = *workers;
if (!PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED) &&
- (worker->s->method != NONE) &&
- (now > worker->s->updated + worker->s->interval)) {
+ (worker->s->method != NONE) &&
+ (worker->s->updated != 0) &&
+ (now > worker->s->updated + worker->s->interval)) {
baton_t *baton;
apr_pool_t *ptemp;
+
ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
"Checking %s worker: %s [%d] (%pp)", balancer->s->name,
- worker->s->name, worker->s->method, worker);
+ worker->s->name_ex, worker->s->method, worker);
- if ((rv = hc_init_worker(ctx, worker)) != APR_SUCCESS) {
- return rv;
- }
- /* This pool must last the lifetime of the (possible) thread */
+ /* This pool has the lifetime of the check */
apr_pool_create(&ptemp, ctx->p);
apr_pool_tag(ptemp, "hc_request");
- baton = apr_palloc(ptemp, sizeof(baton_t));
+ baton = apr_pcalloc(ptemp, sizeof(baton_t));
baton->ctx = ctx;
- baton->now = now;
baton->balancer = balancer;
baton->worker = worker;
baton->ptemp = ptemp;
- baton->hc = hc_get_hcworker(ctx, worker, ptemp);
-
- if (!hctp) {
- hc_check(NULL, baton);
+ if ((rv = hc_init_baton(baton))) {
+ worker->s->updated = now;
+ apr_pool_destroy(ptemp);
+ return rv;
}
+ worker->s->updated = 0;
#if HC_USE_THREADS
- else {
- rv = apr_thread_pool_push(hctp, hc_check, (void *)baton,
- APR_THREAD_TASK_PRIORITY_NORMAL, NULL);
+ if (hctp) {
+ apr_thread_pool_push(hctp, hc_check, (void *)baton,
+ APR_THREAD_TASK_PRIORITY_NORMAL,
+ NULL);
}
+ else
#endif
+ {
+ baton->now = &now;
+ hc_check(NULL, baton);
+ }
}
workers++;
}
@@ -984,9 +1094,9 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
ap_log_error(APLOG_MARK, APLOG_INFO, rv, s, APLOGNO(03315)
"apr_thread_pool_destroy() failed");
}
+ hctp = NULL;
}
#endif
- hctp = NULL;
break;
}
return rv;
@@ -994,7 +1104,22 @@ static apr_status_t hc_watchdog_callback(int state, void *data,
static int hc_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
+#if HC_USE_THREADS
+ hctp = NULL;
tpsize = HC_THREADPOOL_SIZE;
+#endif
+
+ ajp_handle_cping_cpong = APR_RETRIEVE_OPTIONAL_FN(ajp_handle_cping_cpong);
+ if (ajp_handle_cping_cpong) {
+ proxy_hcmethods_t *method = proxy_hcmethods;
+ for (; method->name; method++) {
+ if (method->method == CPING) {
+ method->implemented = 1;
+ break;
+ }
+ }
+ }
+
return OK;
}
static int hc_post_config(apr_pool_t *p, apr_pool_t *plog,
@@ -1050,6 +1175,7 @@ static int hc_post_config(apr_pool_t *p, apr_pool_t *plog,
"watchdog callback registered (%s for %s)", HCHECK_WATHCHDOG_NAME, s->server_hostname);
s = s->next;
}
+
return OK;
}
@@ -1060,6 +1186,8 @@ static void hc_show_exprs(request_rec *r)
int i;
sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config,
&proxy_hcheck_module);
+ if (!ctx)
+ return;
if (apr_is_empty_table(ctx->conditions))
return;
@@ -1089,6 +1217,8 @@ static void hc_select_exprs(request_rec *r, const char *expr)
int i;
sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config,
&proxy_hcheck_module);
+ if (!ctx)
+ return;
if (apr_is_empty_table(ctx->conditions))
return;
@@ -1112,6 +1242,8 @@ static int hc_valid_expr(request_rec *r, const char *expr)
int i;
sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config,
&proxy_hcheck_module);
+ if (!ctx)
+ return 0;
if (apr_is_empty_table(ctx->conditions))
return 0;
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
index 56af9a8..bd57b4d 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -31,36 +31,71 @@ static apr_status_t ap_proxy_http_cleanup(const char *scheme,
static apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n,
request_rec *r, int flags, int *read);
+static const char *get_url_scheme(const char **url, int *is_ssl)
+{
+ const char *u = *url;
+
+ switch (u[0]) {
+ case 'h':
+ case 'H':
+ if (strncasecmp(u + 1, "ttp", 3) == 0) {
+ if (u[4] == ':') {
+ *is_ssl = 0;
+ *url = u + 5;
+ return "http";
+ }
+ if (apr_tolower(u[4]) == 's' && u[5] == ':') {
+ *is_ssl = 1;
+ *url = u + 6;
+ return "https";
+ }
+ }
+ break;
+
+ case 'w':
+ case 'W':
+ if (apr_tolower(u[1]) == 's') {
+ if (u[2] == ':') {
+ *is_ssl = 0;
+ *url = u + 3;
+ return "ws";
+ }
+ if (apr_tolower(u[2]) == 's' && u[3] == ':') {
+ *is_ssl = 1;
+ *url = u + 4;
+ return "wss";
+ }
+ }
+ break;
+ }
+
+ *is_ssl = 0;
+ return NULL;
+}
+
/*
* Canonicalise http-like URLs.
* scheme is the scheme for the URL
* url is the URL starting with the first '/'
- * def_port is the default port for this scheme.
*/
static int proxy_http_canon(request_rec *r, char *url)
{
+ const char *base_url = url;
char *host, *path, sport[7];
char *search = NULL;
const char *err;
const char *scheme;
apr_port_t port, def_port;
+ int is_ssl = 0;
- /* ap_port_of_scheme() */
- if (strncasecmp(url, "http:", 5) == 0) {
- url += 5;
- scheme = "http";
- }
- else if (strncasecmp(url, "https:", 6) == 0) {
- url += 6;
- scheme = "https";
- }
- else {
+ scheme = get_url_scheme((const char **)&url, &is_ssl);
+ if (!scheme) {
return DECLINED;
}
- port = def_port = ap_proxy_port_of_scheme(scheme);
+ port = def_port = (is_ssl) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT;
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "HTTP: canonicalising URL %s", url);
+ "HTTP: canonicalising URL %s", base_url);
/* do syntatic check.
* We break the URL into host, port, path, search
@@ -68,7 +103,7 @@ static int proxy_http_canon(request_rec *r, char *url)
err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
if (err) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01083)
- "error parsing URL %s: %s", url, err);
+ "error parsing URL %s: %s", base_url, err);
return HTTP_BAD_REQUEST;
}
@@ -86,9 +121,19 @@ static int proxy_http_canon(request_rec *r, char *url)
if (apr_table_get(r->notes, "proxy-nocanon")) {
path = url; /* this is the raw path */
}
+ else if (apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the encoded path already */
+ search = r->args;
+ }
else {
- path = ap_proxy_canonenc(r->pool, url, strlen(url),
- enc_path, 0, r->proxyreq);
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path,
+ flags, r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
search = r->args;
}
break;
@@ -96,9 +141,22 @@ static int proxy_http_canon(request_rec *r, char *url)
path = url;
break;
}
-
- if (path == NULL)
- return HTTP_BAD_REQUEST;
+ /*
+ * If we have a raw control character or a ' ' in nocanon path or
+ * r->args, correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10415)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
+ if (search && *ap_scan_vchar_obstext(search)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10408)
+ "To be forwarded query string contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
if (port != def_port)
apr_snprintf(sport, sizeof(sport), ":%d", port);
@@ -108,8 +166,9 @@ static int proxy_http_canon(request_rec *r, char *url)
if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
host = apr_pstrcat(r->pool, "[", host, "]", NULL);
}
+
r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
- "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+ "/", path, (search) ? "?" : "", search, NULL);
return OK;
}
@@ -216,486 +275,230 @@ static void add_cl(apr_pool_t *p,
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
}
-#define ASCII_CRLF "\015\012"
-#define ASCII_ZERO "\060"
-
-static void terminate_headers(apr_bucket_alloc_t *bucket_alloc,
- apr_bucket_brigade *header_brigade)
-{
- apr_bucket *e;
-
- /* add empty line at the end of the headers */
- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-}
-
+#ifndef CRLF_ASCII
+#define CRLF_ASCII "\015\012"
+#endif
+#ifndef ZERO_ASCII
+#define ZERO_ASCII "\060"
+#endif
#define MAX_MEM_SPOOL 16384
-static int stream_reqbody_chunked(apr_pool_t *p,
- request_rec *r,
- proxy_conn_rec *p_conn,
- conn_rec *origin,
- apr_bucket_brigade *header_brigade,
- apr_bucket_brigade *input_brigade)
-{
- int seen_eos = 0, rv = OK;
- apr_size_t hdr_len;
- apr_off_t bytes;
- apr_status_t status;
- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
- apr_bucket_brigade *bb;
- apr_bucket *e;
-
- add_te_chunked(p, bucket_alloc, header_brigade);
- terminate_headers(bucket_alloc, header_brigade);
+typedef enum {
+ RB_INIT = 0,
+ RB_STREAM_CL,
+ RB_STREAM_CHUNKED,
+ RB_SPOOL_CL
+} rb_methods;
- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
- {
- char chunk_hdr[20]; /* must be here due to transient bucket. */
+typedef struct {
+ apr_pool_t *p;
+ request_rec *r;
+ const char *proto;
+ proxy_worker *worker;
+ proxy_server_conf *sconf;
- /* If this brigade contains EOS, either stop or remove it. */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
- seen_eos = 1;
-
- /* We can't pass this EOS to the output_filters. */
- e = APR_BRIGADE_LAST(input_brigade);
- apr_bucket_delete(e);
- }
+ char server_portstr[32];
+ proxy_conn_rec *backend;
+ conn_rec *origin;
- apr_brigade_length(input_brigade, 1, &bytes);
+ apr_bucket_alloc_t *bucket_alloc;
+ apr_bucket_brigade *header_brigade;
+ apr_bucket_brigade *input_brigade;
+ char *old_cl_val, *old_te_val;
+ apr_off_t cl_val;
- hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
- "%" APR_UINT64_T_HEX_FMT CRLF,
- (apr_uint64_t)bytes);
+ rb_methods rb_method;
- ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
- e = apr_bucket_transient_create(chunk_hdr, hdr_len,
- bucket_alloc);
- APR_BRIGADE_INSERT_HEAD(input_brigade, e);
+ const char *upgrade;
- /*
- * Append the end-of-chunk CRLF
- */
- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+ unsigned int do_100_continue :1,
+ prefetch_nonblocking :1,
+ force10 :1;
+} proxy_http_req_t;
- if (header_brigade) {
- /* we never sent the header brigade, so go ahead and
- * take care of that now
- */
- bb = header_brigade;
+static int stream_reqbody(proxy_http_req_t *req)
+{
+ request_rec *r = req->r;
+ int seen_eos = 0, rv = OK;
+ apr_size_t hdr_len;
+ char chunk_hdr[20]; /* must be here due to transient bucket. */
+ conn_rec *origin = req->origin;
+ proxy_conn_rec *p_conn = req->backend;
+ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
+ apr_bucket_brigade *header_brigade = req->header_brigade;
+ apr_bucket_brigade *input_brigade = req->input_brigade;
+ rb_methods rb_method = req->rb_method;
+ apr_off_t bytes, bytes_streamed = 0;
+ apr_bucket *e;
- /*
- * Save input_brigade in bb brigade. (At least) in the SSL case
- * input_brigade contains transient buckets whose data would get
- * overwritten during the next call of ap_get_brigade in the loop.
- * ap_save_brigade ensures these buckets to be set aside.
- * Calling ap_save_brigade with NULL as filter is OK, because
- * bb brigade already has been created and does not need to get
- * created by ap_save_brigade.
- */
- status = ap_save_brigade(NULL, &bb, &input_brigade, p);
- if (status != APR_SUCCESS) {
- return HTTP_INTERNAL_SERVER_ERROR;
+ do {
+ if (APR_BRIGADE_EMPTY(input_brigade)
+ && APR_BRIGADE_EMPTY(header_brigade)) {
+ rv = ap_proxy_read_input(r, p_conn, input_brigade,
+ HUGE_STRING_LEN);
+ if (rv != OK) {
+ return rv;
}
-
- header_brigade = NULL;
- }
- else {
- bb = input_brigade;
}
- /* The request is flushed below this loop with chunk EOS header */
- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0);
- if (rv != OK) {
- return rv;
- }
-
- if (seen_eos) {
- break;
- }
-
- status = ap_get_brigade(r->input_filters, input_brigade,
- AP_MODE_READBYTES, APR_BLOCK_READ,
- HUGE_STRING_LEN);
-
- if (status != APR_SUCCESS) {
- conn_rec *c = r->connection;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608)
- "read request body failed to %pI (%s)"
- " from %s (%s)", p_conn->addr,
- p_conn->hostname ? p_conn->hostname: "",
- c->client_ip, c->remote_host ? c->remote_host: "");
- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
- }
- }
-
- if (header_brigade) {
- /* we never sent the header brigade because there was no request body;
- * send it now
- */
- bb = header_brigade;
- }
- else {
if (!APR_BRIGADE_EMPTY(input_brigade)) {
- /* input brigade still has an EOS which we can't pass to the output_filters. */
- e = APR_BRIGADE_LAST(input_brigade);
- AP_DEBUG_ASSERT(APR_BUCKET_IS_EOS(e));
- apr_bucket_delete(e);
- }
- bb = input_brigade;
- }
-
- e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
- /* <trailers> */
- ASCII_CRLF,
- 5, bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
-
- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- }
-
- /* Now we have headers-only, or the chunk EOS mark; flush it */
- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
- return rv;
-}
-
-static int stream_reqbody_cl(apr_pool_t *p,
- request_rec *r,
- proxy_conn_rec *p_conn,
- conn_rec *origin,
- apr_bucket_brigade *header_brigade,
- apr_bucket_brigade *input_brigade,
- char *old_cl_val)
-{
- int seen_eos = 0, rv = 0;
- apr_status_t status = APR_SUCCESS;
- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
- apr_bucket_brigade *bb;
- apr_bucket *e;
- apr_off_t cl_val = 0;
- apr_off_t bytes;
- apr_off_t bytes_streamed = 0;
-
- if (old_cl_val) {
- char *endstr;
-
- add_cl(p, bucket_alloc, header_brigade, old_cl_val);
- status = apr_strtoff(&cl_val, old_cl_val, &endstr, 10);
-
- if (status || *endstr || endstr == old_cl_val || cl_val < 0) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085)
- "could not parse request Content-Length (%s)",
- old_cl_val);
- return HTTP_BAD_REQUEST;
- }
- }
- terminate_headers(bucket_alloc, header_brigade);
+ /* If this brigade contains EOS, remove it and be done. */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ seen_eos = 1;
- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
- {
- apr_brigade_length(input_brigade, 1, &bytes);
- bytes_streamed += bytes;
+ /* We can't pass this EOS to the output_filters. */
+ e = APR_BRIGADE_LAST(input_brigade);
+ apr_bucket_delete(e);
+ }
- /* If this brigade contains EOS, either stop or remove it. */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
- seen_eos = 1;
+ apr_brigade_length(input_brigade, 1, &bytes);
+ bytes_streamed += bytes;
- /* We can't pass this EOS to the output_filters. */
- e = APR_BRIGADE_LAST(input_brigade);
- apr_bucket_delete(e);
+ if (rb_method == RB_STREAM_CHUNKED) {
+ if (bytes) {
+ /*
+ * Prepend the size of the chunk
+ */
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF,
+ (apr_uint64_t)bytes);
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(input_brigade, e);
+
+ /*
+ * Append the end-of-chunk CRLF
+ */
+ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+ }
+ if (seen_eos) {
+ /*
+ * Append the tailing 0-size chunk
+ */
+ e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII
+ /* <trailers> */
+ CRLF_ASCII,
+ 5, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+ }
+ }
+ else if (rb_method == RB_STREAM_CL
+ && (bytes_streamed > req->cl_val
+ || (seen_eos && bytes_streamed < req->cl_val))) {
+ /* C-L != bytes streamed?!?
+ *
+ * Prevent HTTP Request/Response Splitting.
+ *
+ * We can't stream more (or less) bytes at the back end since
+ * they could be interpreted in separate requests (more bytes
+ * now would start a new request, less bytes would make the
+ * first bytes of the next request be part of the current one).
+ *
+ * It can't happen from the client connection here thanks to
+ * ap_http_filter(), but some module's filter may be playing
+ * bad games, hence the HTTP_INTERNAL_SERVER_ERROR.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086)
+ "read %s bytes of request body than expected "
+ "(got %" APR_OFF_T_FMT ", expected "
+ "%" APR_OFF_T_FMT ")",
+ bytes_streamed > req->cl_val ? "more" : "less",
+ bytes_streamed, req->cl_val);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
+ if (seen_eos && apr_table_get(r->subprocess_env,
+ "proxy-sendextracrlf")) {
+ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
APR_BRIGADE_INSERT_TAIL(input_brigade, e);
}
}
- /* C-L < bytes streamed?!?
- * We will error out after the body is completely
- * consumed, but we can't stream more bytes at the
- * back end since they would in part be interpreted
- * as another request! If nothing is sent, then
- * just send nothing.
- *
- * Prevents HTTP Response Splitting.
+ /* If we never sent the header brigade, go ahead and take care of
+ * that now by prepending it (once only since header_brigade will be
+ * empty afterward).
*/
- if (bytes_streamed > cl_val) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086)
- "read more bytes of request body than expected "
- "(got %" APR_OFF_T_FMT ", expected %" APR_OFF_T_FMT ")",
- bytes_streamed, cl_val);
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-
- if (header_brigade) {
- /* we never sent the header brigade, so go ahead and
- * take care of that now
- */
- bb = header_brigade;
-
- /*
- * Save input_brigade in bb brigade. (At least) in the SSL case
- * input_brigade contains transient buckets whose data would get
- * overwritten during the next call of ap_get_brigade in the loop.
- * ap_save_brigade ensures these buckets to be set aside.
- * Calling ap_save_brigade with NULL as filter is OK, because
- * bb brigade already has been created and does not need to get
- * created by ap_save_brigade.
- */
- status = ap_save_brigade(NULL, &bb, &input_brigade, p);
- if (status != APR_SUCCESS) {
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-
- header_brigade = NULL;
- }
- else {
- bb = input_brigade;
- }
+ APR_BRIGADE_PREPEND(input_brigade, header_brigade);
- /* Once we hit EOS, we are ready to flush. */
- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos);
+ /* Flush here on EOS because we won't ap_proxy_read_input() again. */
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin,
+ input_brigade, seen_eos);
if (rv != OK) {
- return rv ;
- }
-
- if (seen_eos) {
- break;
- }
-
- status = ap_get_brigade(r->input_filters, input_brigade,
- AP_MODE_READBYTES, APR_BLOCK_READ,
- HUGE_STRING_LEN);
-
- if (status != APR_SUCCESS) {
- conn_rec *c = r->connection;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02609)
- "read request body failed to %pI (%s)"
- " from %s (%s)", p_conn->addr,
- p_conn->hostname ? p_conn->hostname: "",
- c->client_ip, c->remote_host ? c->remote_host: "");
- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+ return rv;
}
- }
-
- if (bytes_streamed != cl_val) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087)
- "client %s given Content-Length did not match"
- " number of body bytes read", r->connection->client_ip);
- return HTTP_BAD_REQUEST;
- }
-
- if (header_brigade) {
- /* we never sent the header brigade since there was no request
- * body; send it now with the flush flag
- */
- bb = header_brigade;
- return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1));
- }
+ } while (!seen_eos);
return OK;
}
-static int spool_reqbody_cl(apr_pool_t *p,
- request_rec *r,
- proxy_conn_rec *p_conn,
- conn_rec *origin,
- apr_bucket_brigade *header_brigade,
- apr_bucket_brigade *input_brigade,
- int force_cl)
+static void terminate_headers(proxy_http_req_t *req)
{
- int seen_eos = 0;
- apr_status_t status;
- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
- apr_bucket_brigade *body_brigade;
+ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
apr_bucket *e;
- apr_off_t bytes, bytes_spooled = 0, fsize = 0;
- apr_file_t *tmpfile = NULL;
- apr_off_t limit;
-
- body_brigade = apr_brigade_create(p, bucket_alloc);
-
- limit = ap_get_limit_req_body(r);
-
- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
- {
- /* If this brigade contains EOS, either stop or remove it. */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
- seen_eos = 1;
+ char *buf;
- /* We can't pass this EOS to the output_filters. */
- e = APR_BRIGADE_LAST(input_brigade);
- apr_bucket_delete(e);
+ /*
+ * Handle Connection: header if we do HTTP/1.1 request:
+ * If we plan to close the backend connection sent Connection: close
+ * otherwise sent Connection: Keep-Alive.
+ */
+ if (!req->force10) {
+ if (req->upgrade) {
+ buf = apr_pstrdup(req->p, "Connection: Upgrade" CRLF);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(req->header_brigade, e);
+
+ /* Tell the backend that it can upgrade the connection. */
+ buf = apr_pstrcat(req->p, "Upgrade: ", req->upgrade, CRLF, NULL);
}
-
- apr_brigade_length(input_brigade, 1, &bytes);
-
- if (bytes_spooled + bytes > MAX_MEM_SPOOL) {
- /*
- * LimitRequestBody does not affect Proxy requests (Should it?).
- * Let it take effect if we decide to store the body in a
- * temporary file on disk.
- */
- if (limit && (bytes_spooled + bytes > limit)) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
- "Request body is larger than the configured "
- "limit of %" APR_OFF_T_FMT, limit);
- return HTTP_REQUEST_ENTITY_TOO_LARGE;
- }
- /* can't spool any more in memory; write latest brigade to disk */
- if (tmpfile == NULL) {
- const char *temp_dir;
- char *template;
-
- status = apr_temp_dir_get(&temp_dir, p);
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089)
- "search for temporary directory failed");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- apr_filepath_merge(&template, temp_dir,
- "modproxy.tmp.XXXXXX",
- APR_FILEPATH_NATIVE, p);
- status = apr_file_mktemp(&tmpfile, template, 0, p);
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090)
- "creation of temporary file in directory "
- "%s failed", temp_dir);
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- }
- for (e = APR_BRIGADE_FIRST(input_brigade);
- e != APR_BRIGADE_SENTINEL(input_brigade);
- e = APR_BUCKET_NEXT(e)) {
- const char *data;
- apr_size_t bytes_read, bytes_written;
-
- apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
- status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
- if (status != APR_SUCCESS) {
- const char *tmpfile_name;
-
- if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
- tmpfile_name = "(unknown)";
- }
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091)
- "write to temporary file %s failed",
- tmpfile_name);
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- AP_DEBUG_ASSERT(bytes_read == bytes_written);
- fsize += bytes_written;
- }
- apr_brigade_cleanup(input_brigade);
+ else if (ap_proxy_connection_reusable(req->backend)) {
+ buf = apr_pstrdup(req->p, "Connection: Keep-Alive" CRLF);
}
else {
-
- /*
- * Save input_brigade in body_brigade. (At least) in the SSL case
- * input_brigade contains transient buckets whose data would get
- * overwritten during the next call of ap_get_brigade in the loop.
- * ap_save_brigade ensures these buckets to be set aside.
- * Calling ap_save_brigade with NULL as filter is OK, because
- * body_brigade already has been created and does not need to get
- * created by ap_save_brigade.
- */
- status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
- if (status != APR_SUCCESS) {
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-
- }
-
- bytes_spooled += bytes;
-
- if (seen_eos) {
- break;
- }
-
- status = ap_get_brigade(r->input_filters, input_brigade,
- AP_MODE_READBYTES, APR_BLOCK_READ,
- HUGE_STRING_LEN);
-
- if (status != APR_SUCCESS) {
- conn_rec *c = r->connection;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02610)
- "read request body failed to %pI (%s)"
- " from %s (%s)", p_conn->addr,
- p_conn->hostname ? p_conn->hostname: "",
- c->client_ip, c->remote_host ? c->remote_host: "");
- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+ buf = apr_pstrdup(req->p, "Connection: close" CRLF);
}
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(req->header_brigade, e);
}
- if (bytes_spooled || force_cl) {
- add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled));
- }
- terminate_headers(bucket_alloc, header_brigade);
- APR_BRIGADE_CONCAT(header_brigade, body_brigade);
- if (tmpfile) {
- apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p);
- }
- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
- }
- /* This is all a single brigade, pass with flush flagged */
- return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1));
+ /* add empty line at the end of the headers */
+ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(req->header_brigade, e);
}
-static
-int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
- proxy_conn_rec *p_conn, proxy_worker *worker,
- proxy_server_conf *conf,
- apr_uri_t *uri,
- char *url, char *server_portstr)
+static int ap_proxy_http_prefetch(proxy_http_req_t *req,
+ apr_uri_t *uri, char *url)
{
+ apr_pool_t *p = req->p;
+ request_rec *r = req->r;
conn_rec *c = r->connection;
- apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc;
- apr_bucket_brigade *header_brigade;
- apr_bucket_brigade *input_brigade;
- apr_bucket_brigade *temp_brigade;
+ proxy_conn_rec *p_conn = req->backend;
+ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
+ apr_bucket_brigade *header_brigade = req->header_brigade;
+ apr_bucket_brigade *input_brigade = req->input_brigade;
apr_bucket *e;
- char *buf;
- apr_status_t status;
- enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL};
- enum rb_methods rb_method = RB_INIT;
- char *old_cl_val = NULL;
- char *old_te_val = NULL;
apr_off_t bytes_read = 0;
apr_off_t bytes;
- int force10, rv;
- conn_rec *origin = p_conn->connection;
-
- if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
- if (r->expecting_100) {
- return HTTP_EXPECTATION_FAILED;
- }
- force10 = 1;
- } else {
- force10 = 0;
- }
+ int rv;
- header_brigade = apr_brigade_create(p, bucket_alloc);
rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn,
- worker, conf, uri, url, server_portstr,
- &old_cl_val, &old_te_val);
+ req->worker, req->sconf,
+ uri, url, req->server_portstr,
+ &req->old_cl_val, &req->old_te_val);
if (rv != OK) {
return rv;
}
- /* We have headers, let's figure out our request body... */
- input_brigade = apr_brigade_create(p, bucket_alloc);
-
/* sub-requests never use keepalives, and mustn't pass request bodies.
* Because the new logic looks at input_brigade, we will self-terminate
* input_brigade and jump past all of the request body logic...
* Reading anything with ap_get_brigade is likely to consume the
- * main request's body or read beyond EOS - which would be unplesant.
+ * main request's body or read beyond EOS - which would be unpleasant.
*
* An exception: when a kept_body is present, then subrequest CAN use
* pass request bodies, and we DONT skip the body.
@@ -703,9 +506,9 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
if (!r->kept_body && r->main) {
/* XXX: Why DON'T sub-requests use keepalives? */
p_conn->close = 1;
- old_cl_val = NULL;
- old_te_val = NULL;
- rb_method = RB_STREAM_CL;
+ req->old_te_val = NULL;
+ req->old_cl_val = NULL;
+ req->rb_method = RB_STREAM_CL;
e = apr_bucket_eos_create(input_brigade->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(input_brigade, e);
goto skip_body;
@@ -719,73 +522,29 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
* encoding has been done by the extensions' handler, and
* do not modify add_te_chunked's logic
*/
- if (old_te_val && strcasecmp(old_te_val, "chunked") != 0) {
+ if (req->old_te_val && ap_cstr_casecmp(req->old_te_val, "chunked") != 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01093)
- "%s Transfer-Encoding is not supported", old_te_val);
+ "%s Transfer-Encoding is not supported",
+ req->old_te_val);
return HTTP_INTERNAL_SERVER_ERROR;
}
- if (old_cl_val && old_te_val) {
+ if (req->old_cl_val && req->old_te_val) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01094)
"client %s (%s) requested Transfer-Encoding "
"chunked body with Content-Length (C-L ignored)",
c->client_ip, c->remote_host ? c->remote_host: "");
- old_cl_val = NULL;
- origin->keepalive = AP_CONN_CLOSE;
+ req->old_cl_val = NULL;
p_conn->close = 1;
}
- /* Prefetch MAX_MEM_SPOOL bytes
- *
- * This helps us avoid any election of C-L v.s. T-E
- * request bodies, since we are willing to keep in
- * memory this much data, in any case. This gives
- * us an instant C-L election if the body is of some
- * reasonable size.
- */
- temp_brigade = apr_brigade_create(p, bucket_alloc);
- do {
- status = ap_get_brigade(r->input_filters, temp_brigade,
- AP_MODE_READBYTES, APR_BLOCK_READ,
- MAX_MEM_SPOOL - bytes_read);
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095)
- "prefetch request body failed to %pI (%s)"
- " from %s (%s)",
- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
- c->client_ip, c->remote_host ? c->remote_host: "");
- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
- }
-
- apr_brigade_length(temp_brigade, 1, &bytes);
- bytes_read += bytes;
-
- /*
- * Save temp_brigade in input_brigade. (At least) in the SSL case
- * temp_brigade contains transient buckets whose data would get
- * overwritten during the next call of ap_get_brigade in the loop.
- * ap_save_brigade ensures these buckets to be set aside.
- * Calling ap_save_brigade with NULL as filter is OK, because
- * input_brigade already has been created and does not need to get
- * created by ap_save_brigade.
- */
- status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096)
- "processing prefetched request body failed"
- " to %pI (%s) from %s (%s)",
- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
- c->client_ip, c->remote_host ? c->remote_host: "");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-
- /* Ensure we don't hit a wall where we have a buffer too small
- * for ap_get_brigade's filters to fetch us another bucket,
- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL
- * (an arbitrary value.)
- */
- } while ((bytes_read < MAX_MEM_SPOOL - 80)
- && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)));
+ rv = ap_proxy_prefetch_input(r, req->backend, input_brigade,
+ req->prefetch_nonblocking ? APR_NONBLOCK_READ
+ : APR_BLOCK_READ,
+ &bytes_read, MAX_MEM_SPOOL);
+ if (rv != OK) {
+ return rv;
+ }
/* Use chunked request body encoding or send a content-length body?
*
@@ -812,7 +571,7 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
* To reduce server resource use, setenv proxy-sendchunked
*
* Then address specific servers with conditional setenv
- * options to restore the default behavior where desireable.
+ * options to restore the default behavior where desirable.
*
* We have to compute content length by reading the entire request
* body; if request body is not small, we'll spool the remaining
@@ -822,7 +581,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
* is absent, and the filters are unchanged (the body won't
* be resized by another content filter).
*/
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ if (!APR_BRIGADE_EMPTY(input_brigade)
+ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
/* The whole thing fit, so our decision is trivial, use
* the filtered bytes read from the client for the request
* body Content-Length.
@@ -830,34 +590,41 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
* If we expected no body, and read no body, do not set
* the Content-Length.
*/
- if (old_cl_val || old_te_val || bytes_read) {
- old_cl_val = apr_off_t_toa(r->pool, bytes_read);
+ if (req->old_cl_val || req->old_te_val || bytes_read) {
+ req->old_cl_val = apr_off_t_toa(r->pool, bytes_read);
+ req->cl_val = bytes_read;
}
- rb_method = RB_STREAM_CL;
+ req->rb_method = RB_STREAM_CL;
}
- else if (old_te_val) {
- if (force10
+ else if (req->old_te_val) {
+ if (req->force10
|| (apr_table_get(r->subprocess_env, "proxy-sendcl")
&& !apr_table_get(r->subprocess_env, "proxy-sendchunks")
&& !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) {
- rb_method = RB_SPOOL_CL;
+ req->rb_method = RB_SPOOL_CL;
}
else {
- rb_method = RB_STREAM_CHUNKED;
+ req->rb_method = RB_STREAM_CHUNKED;
}
}
- else if (old_cl_val) {
+ else if (req->old_cl_val) {
if (r->input_filters == r->proto_input_filters) {
- rb_method = RB_STREAM_CL;
+ if (!ap_parse_strict_length(&req->cl_val, req->old_cl_val)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085)
+ "could not parse request Content-Length (%s)",
+ req->old_cl_val);
+ return HTTP_BAD_REQUEST;
+ }
+ req->rb_method = RB_STREAM_CL;
}
- else if (!force10
+ else if (!req->force10
&& (apr_table_get(r->subprocess_env, "proxy-sendchunks")
|| apr_table_get(r->subprocess_env, "proxy-sendchunked"))
&& !apr_table_get(r->subprocess_env, "proxy-sendcl")) {
- rb_method = RB_STREAM_CHUNKED;
+ req->rb_method = RB_STREAM_CHUNKED;
}
else {
- rb_method = RB_SPOOL_CL;
+ req->rb_method = RB_SPOOL_CL;
}
}
else {
@@ -865,44 +632,60 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
* requests, and has the behavior that it will not add any C-L
* when the old_cl_val is NULL.
*/
- rb_method = RB_SPOOL_CL;
+ req->rb_method = RB_SPOOL_CL;
}
-/* Yes I hate gotos. This is the subrequest shortcut */
-skip_body:
- /*
- * Handle Connection: header if we do HTTP/1.1 request:
- * If we plan to close the backend connection sent Connection: close
- * otherwise sent Connection: Keep-Alive.
- */
- if (!force10) {
- if (!ap_proxy_connection_reusable(p_conn)) {
- buf = apr_pstrdup(p, "Connection: close" CRLF);
- }
- else {
- buf = apr_pstrdup(p, "Connection: Keep-Alive" CRLF);
- }
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
- }
-
- /* send the request body, if any. */
- switch(rb_method) {
+ switch (req->rb_method) {
case RB_STREAM_CHUNKED:
- rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade,
- input_brigade);
+ add_te_chunked(req->p, bucket_alloc, header_brigade);
break;
+
case RB_STREAM_CL:
- rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade,
- input_brigade, old_cl_val);
+ if (req->old_cl_val) {
+ add_cl(req->p, bucket_alloc, header_brigade, req->old_cl_val);
+ }
break;
+
+ default: /* => RB_SPOOL_CL */
+ /* If we have to spool the body, do it now, before connecting or
+ * reusing the backend connection.
+ */
+ rv = ap_proxy_spool_input(r, p_conn, input_brigade,
+ &bytes, MAX_MEM_SPOOL);
+ if (rv != OK) {
+ return rv;
+ }
+ if (bytes || req->old_te_val || req->old_cl_val) {
+ add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes));
+ }
+ }
+
+/* Yes I hate gotos. This is the subrequest shortcut */
+skip_body:
+ terminate_headers(req);
+
+ return OK;
+}
+
+static int ap_proxy_http_request(proxy_http_req_t *req)
+{
+ int rv;
+ request_rec *r = req->r;
+
+ /* send the request header/body, if any. */
+ switch (req->rb_method) {
case RB_SPOOL_CL:
- rv = spool_reqbody_cl(p, r, p_conn, origin, header_brigade,
- input_brigade, (old_cl_val != NULL)
- || (old_te_val != NULL)
- || (bytes_read > 0));
+ case RB_STREAM_CL:
+ case RB_STREAM_CHUNKED:
+ if (req->do_100_continue) {
+ rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend,
+ req->origin, req->header_brigade, 1);
+ }
+ else {
+ rv = stream_reqbody(req);
+ }
break;
+
default:
/* shouldn't be possible */
rv = HTTP_INTERNAL_SERVER_ERROR;
@@ -910,10 +693,12 @@ skip_body:
}
if (rv != OK) {
+ conn_rec *c = r->connection;
/* apr_status_t value has been logged in lower level method */
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01097)
"pass request body failed to %pI (%s) from %s (%s)",
- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
+ req->backend->addr,
+ req->backend->hostname ? req->backend->hostname: "",
c->client_ip, c->remote_host ? c->remote_host: "");
return rv;
}
@@ -950,6 +735,7 @@ static request_rec *make_fake_req(conn_rec *c, request_rec *r)
request_rec *rp;
apr_pool_create(&pool, c->pool);
+ apr_pool_tag(pool, "proxy_http_rp");
rp = apr_pcalloc(pool, sizeof(*r));
@@ -1001,14 +787,14 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c,
};
int i;
for (i = 0; date_hdrs[i]; ++i) {
- if (!strcasecmp(date_hdrs[i], key)) {
+ if (!ap_cstr_casecmp(date_hdrs[i], key)) {
apr_table_add(r->headers_out, key,
date_canon(r->pool, value));
return;
}
}
for (i = 0; transform_hdrs[i].name; ++i) {
- if (!strcasecmp(transform_hdrs[i].name, key)) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, key)) {
apr_table_add(r->headers_out, key,
(*transform_hdrs[i].func)(r, c, value));
return;
@@ -1025,7 +811,7 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c,
* any sense at all, since we depend on buffer still containing
* what was read by ap_getline() upon return.
*/
-static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
+static apr_status_t ap_proxy_read_headers(request_rec *r, request_rec *rr,
char *buffer, int size,
conn_rec *c, int *pread_len)
{
@@ -1057,19 +843,26 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
rc = ap_proxygetline(tmp_bb, buffer, size, rr,
AP_GETLINE_FOLD | AP_GETLINE_NOSPC_EOL, &len);
- if (len <= 0)
- break;
- if (APR_STATUS_IS_ENOSPC(rc)) {
- /* The header could not fit in the provided buffer, warn.
- * XXX: falls through with the truncated header, 5xx instead?
- */
- int trunc = (len > 128 ? 128 : len) / 2;
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124)
- "header size is over the limit allowed by "
- "ResponseFieldSize (%d bytes). "
- "Bad response header: '%.*s[...]%s'",
- size, trunc, buffer, buffer + len - trunc);
+ if (rc != APR_SUCCESS) {
+ if (APR_STATUS_IS_ENOSPC(rc)) {
+ int trunc = (len > 128 ? 128 : len) / 2;
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124)
+ "header size is over the limit allowed by "
+ "ResponseFieldSize (%d bytes). "
+ "Bad response header: '%.*s[...]%s'",
+ size, trunc, buffer, buffer + len - trunc);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10404)
+ "Error reading headers from backend");
+ }
+ r->headers_out = NULL;
+ return rc;
+ }
+
+ if (len <= 0) {
+ break;
}
else {
ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "%s", buffer);
@@ -1092,7 +885,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
if (psc->badopt == bad_error) {
/* Nope, it wasn't even an extra HTTP header. Give up. */
r->headers_out = NULL;
- return;
+ return APR_EINVAL;
}
else if (psc->badopt == bad_body) {
/* if we've already started loading headers_out, then
@@ -1106,13 +899,13 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
"in headers returned by %s (%s)",
r->uri, r->method);
*pread_len = len;
- return;
+ return APR_SUCCESS;
}
else {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01099)
"No HTTP headers returned by %s (%s)",
r->uri, r->method);
- return;
+ return APR_SUCCESS;
}
}
}
@@ -1142,6 +935,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
process_proxy_header(r, dconf, buffer, value);
saw_headers = 1;
}
+ return APR_SUCCESS;
}
@@ -1188,13 +982,48 @@ static int add_trailers(void *data, const char *key, const char *val)
return 1;
}
+static int send_continue_body(proxy_http_req_t *req)
+{
+ int status;
+
+ /* Send the request body (fully). */
+ switch(req->rb_method) {
+ case RB_SPOOL_CL:
+ case RB_STREAM_CL:
+ case RB_STREAM_CHUNKED:
+ status = stream_reqbody(req);
+ break;
+ default:
+ /* Shouldn't happen */
+ status = HTTP_INTERNAL_SERVER_ERROR;
+ break;
+ }
+ if (status != OK) {
+ conn_rec *c = req->r->connection;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req->r,
+ APLOGNO(10154) "pass request body failed "
+ "to %pI (%s) from %s (%s) with status %i",
+ req->backend->addr,
+ req->backend->hostname ? req->backend->hostname : "",
+ c->client_ip, c->remote_host ? c->remote_host : "",
+ status);
+ req->backend->close = 1;
+ }
+ return status;
+}
+
static
-apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
- proxy_conn_rec **backend_ptr,
- proxy_worker *worker,
- proxy_server_conf *conf,
- char *server_portstr) {
+int ap_proxy_http_process_response(proxy_http_req_t *req)
+{
+ apr_pool_t *p = req->p;
+ request_rec *r = req->r;
conn_rec *c = r->connection;
+ proxy_worker *worker = req->worker;
+ proxy_conn_rec *backend = req->backend;
+ conn_rec *origin = req->origin;
+ int do_100_continue = req->do_100_continue;
+ int status;
+
char *buffer;
char fixed_buffer[HUGE_STRING_LEN];
const char *buf;
@@ -1217,19 +1046,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
int proxy_status = OK;
const char *original_status_line = r->status_line;
const char *proxy_status_line = NULL;
- proxy_conn_rec *backend = *backend_ptr;
- conn_rec *origin = backend->connection;
apr_interval_time_t old_timeout = 0;
proxy_dir_conf *dconf;
- int do_100_continue;
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
- do_100_continue = (worker->s->ping_timeout_set
- && ap_request_has_body(r)
- && (PROXYREQ_REVERSE == r->proxyreq)
- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
-
bb = apr_brigade_create(p, c->bucket_alloc);
pass_bb = apr_brigade_create(p, c->bucket_alloc);
@@ -1248,7 +1069,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
}
/* Setup for 100-Continue timeout if appropriate */
- if (do_100_continue) {
+ if (do_100_continue && worker->s->ping_timeout_set) {
apr_socket_timeout_get(backend->sock, &old_timeout);
if (worker->s->ping_timeout != old_timeout) {
apr_status_t rc;
@@ -1273,6 +1094,9 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
origin->local_addr->port));
do {
apr_status_t rc;
+ const char *upgrade = NULL;
+ int major = 0, minor = 0;
+ int toclose = 0;
apr_brigade_cleanup(bb);
@@ -1291,7 +1115,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
apr_table_setn(r->notes, "proxy_timedout", "1");
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout");
if (do_100_continue) {
- return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, "Timeout on 100-Continue");
+ return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE,
+ "Timeout on 100-Continue");
}
}
/*
@@ -1340,15 +1165,30 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
ap_pass_brigade(r->output_filters, bb);
/* Mark the backend connection for closing */
backend->close = 1;
- /* Need to return OK to avoid sending an error message */
- return OK;
+ if (origin->keepalives) {
+ /* We already had a request on this backend connection and
+ * might just have run into a keepalive race. Hence we
+ * think positive and assume that the backend is fine and
+ * we do not need to signal an error on backend side.
+ */
+ return OK;
+ }
+ /*
+ * This happened on our first request on this connection to the
+ * backend. This indicates something fishy with the backend.
+ * Return HTTP_INTERNAL_SERVER_ERROR to signal an unrecoverable
+ * server error. We do not worry about r->status code and a
+ * possible error response here as the ap_http_outerror_filter
+ * will fix all of this for us.
+ */
+ return HTTP_INTERNAL_SERVER_ERROR;
}
- else if (!c->keepalives) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105)
- "NOT Closing connection to client"
- " although reading from backend server %s:%d"
- " failed.",
- backend->hostname, backend->port);
+ if (!c->keepalives) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105)
+ "NOT Closing connection to client"
+ " although reading from backend server %s:%d"
+ " failed.",
+ backend->hostname, backend->port);
}
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
"Error reading from remote server");
@@ -1360,9 +1200,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
* This is buggy if we ever see an HTTP/1.10
*/
if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
- int major, minor;
- int toclose;
-
major = buffer[5] - '0';
minor = buffer[7] - '0';
@@ -1371,8 +1208,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
*/
if ((major != 1) || (len >= response_field_size - 1)) {
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
- apr_pstrcat(p, "Corrupt status line returned by remote "
- "server: ", buffer, NULL));
+ apr_pstrcat(p, "Corrupt status line returned "
+ "by remote server: ", buffer, NULL));
}
backasswards = 0;
@@ -1404,7 +1241,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
/* read the headers. */
/* N.B. for HTTP/1.0 clients, we have to fold line-wrapped headers*/
- /* Also, take care with headers with multiple occurences. */
+ /* Also, take care with headers with multiple occurrences. */
/* First, tuck away all already existing cookies */
save_table = apr_table_make(r->pool, 2);
@@ -1412,10 +1249,10 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
"Set-Cookie", NULL);
/* shove the headers direct into r->headers_out */
- ap_proxy_read_headers(r, backend->r, buffer, response_field_size, origin,
- &pread_len);
+ rc = ap_proxy_read_headers(r, backend->r, buffer, response_field_size,
+ origin, &pread_len);
- if (r->headers_out == NULL) {
+ if (rc != APR_SUCCESS || r->headers_out == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106)
"bad HTTP/%d.%d header returned by %s (%s)",
major, minor, r->uri, r->method);
@@ -1443,9 +1280,14 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
save_table);
}
+ /*
+ * Save a possible Transfer-Encoding header as we need it later for
+ * ap_http_filter to know where to end.
+ */
+ te = apr_table_get(r->headers_out, "Transfer-Encoding");
+
/* can't have both Content-Length and Transfer-Encoding */
- if (apr_table_get(r->headers_out, "Transfer-Encoding")
- && apr_table_get(r->headers_out, "Content-Length")) {
+ if (te && apr_table_get(r->headers_out, "Content-Length")) {
/*
* 2616 section 4.4, point 3: "if both Transfer-Encoding
* and Content-Length are received, the latter MUST be
@@ -1463,16 +1305,29 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
backend->close = 1;
}
- /*
- * Save a possible Transfer-Encoding header as we need it later for
- * ap_http_filter to know where to end.
- */
- te = apr_table_get(r->headers_out, "Transfer-Encoding");
+ upgrade = apr_table_get(r->headers_out, "Upgrade");
+ if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
+ if (!upgrade || !req->upgrade || (strcasecmp(req->upgrade,
+ upgrade) != 0)) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ apr_pstrcat(p, "Unexpected Upgrade: ",
+ upgrade ? upgrade : "n/a",
+ " (expecting ",
+ req->upgrade ? req->upgrade
+ : "n/a", ")",
+ NULL));
+ }
+ backend->close = 1;
+ }
/* strip connection listed hop-by-hop headers from response */
toclose = ap_proxy_clear_connection_fn(r, r->headers_out);
if (toclose) {
backend->close = 1;
+ if (toclose < 0) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ "Malformed connection header");
+ }
}
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
@@ -1491,7 +1346,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
r->headers_out = ap_proxy_clean_warnings(p, r->headers_out);
/* handle Via header in response */
- if (conf->viaopt != via_off && conf->viaopt != via_block) {
+ if (req->sconf->viaopt != via_off
+ && req->sconf->viaopt != via_block) {
const char *server_name = ap_get_server_name(r);
/* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
* then the server name returned by ap_get_server_name() is the
@@ -1502,18 +1358,18 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
server_name = r->server->server_hostname;
/* create a "Via:" response header entry and merge it */
apr_table_addn(r->headers_out, "Via",
- (conf->viaopt == via_full)
+ (req->sconf->viaopt == via_full)
? apr_psprintf(p, "%d.%d %s%s (%s)",
HTTP_VERSION_MAJOR(r->proto_num),
HTTP_VERSION_MINOR(r->proto_num),
server_name,
- server_portstr,
+ req->server_portstr,
AP_SERVER_BASEVERSION)
: apr_psprintf(p, "%d.%d %s%s",
HTTP_VERSION_MAJOR(r->proto_num),
HTTP_VERSION_MINOR(r->proto_num),
server_name,
- server_portstr)
+ req->server_portstr)
);
}
@@ -1522,27 +1378,25 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
backend->close = 1;
origin->keepalive = AP_CONN_CLOSE;
}
+ else {
+ /*
+ * Keep track of the number of keepalives we processed on this
+ * connection.
+ */
+ origin->keepalives++;
+ }
+
} else {
/* an http/0.9 response */
backasswards = 1;
- r->status = 200;
+ r->status = proxy_status = 200;
r->status_line = "200 OK";
backend->close = 1;
}
if (ap_is_HTTP_INFO(proxy_status)) {
- interim_response++;
- /* Reset to old timeout iff we've adjusted it */
- if (do_100_continue
- && (r->status == HTTP_CONTINUE)
- && (worker->s->ping_timeout != old_timeout)) {
- apr_socket_timeout_set(backend->sock, old_timeout);
- }
- }
- else {
- interim_response = 0;
- }
- if (interim_response) {
+ const char *policy = NULL;
+
/* RFC2616 tells us to forward this.
*
* OTOH, an interim response here may mean the backend
@@ -1555,15 +1409,32 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
*
* So let's make it configurable.
*
- * We need to set "r->expecting_100 = 1" otherwise origin
- * server behaviour will apply.
+ * We need to force "r->expecting_100 = 1" for RFC behaviour
+ * otherwise ap_send_interim_response() does nothing when
+ * the client did not ask for 100-continue.
+ *
+ * 101 Switching Protocol has its own configuration which
+ * shouldn't be interfered by "proxy-interim-response".
*/
- const char *policy = apr_table_get(r->subprocess_env,
- "proxy-interim-response");
+ if (proxy_status != HTTP_SWITCHING_PROTOCOLS) {
+ policy = apr_table_get(r->subprocess_env,
+ "proxy-interim-response");
+ }
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "HTTP: received interim %d response", r->status);
+ "HTTP: received interim %d response (policy: %s)",
+ r->status, policy ? policy : "n/a");
if (!policy
- || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) {
+ || (!strcasecmp(policy, "RFC")
+ && (proxy_status != HTTP_CONTINUE
+ || (r->expecting_100 = 1)))) {
+ switch (proxy_status) {
+ case HTTP_SWITCHING_PROTOCOLS:
+ AP_DEBUG_ASSERT(upgrade != NULL);
+ apr_table_setn(r->headers_out, "Connection", "Upgrade");
+ apr_table_setn(r->headers_out, "Upgrade",
+ apr_pstrdup(p, upgrade));
+ break;
+ }
ap_send_interim_response(r, 1);
}
/* FIXME: refine this to be able to specify per-response-status
@@ -1573,57 +1444,144 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108)
"undefined proxy interim response policy");
}
+ interim_response++;
}
- /* Moved the fixups of Date headers and those affected by
- * ProxyPassReverse/etc from here to ap_proxy_read_headers
+ else {
+ interim_response = 0;
+ }
+
+ /* If we still do 100-continue (end-to-end or ping), either the
+ * current response is the expected "100 Continue" and we are done
+ * with this mode, or this is another interim response and we'll wait
+ * for the next one, or this is a final response and hence the backend
+ * did not honor our expectation.
*/
+ if (do_100_continue && (!interim_response
+ || proxy_status == HTTP_CONTINUE)) {
+ /* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers
+ * A server that responds with a final status code before
+ * reading the entire message body SHOULD indicate in that
+ * response whether it intends to close the connection or
+ * continue reading and discarding the request message.
+ *
+ * So, if this response is not an interim 100 Continue, we can
+ * avoid sending the request body if the backend responded with
+ * "Connection: close" or HTTP < 1.1, and either let the core
+ * discard it or the caller try another balancer member with the
+ * same body (given status 503, though not implemented yet).
+ */
+ int do_send_body = (proxy_status == HTTP_CONTINUE
+ || (!toclose && major > 0 && minor > 0));
- if ((proxy_status == 401) && (dconf->error_override)) {
- const char *buf;
- const char *wa = "WWW-Authenticate";
- if ((buf = apr_table_get(r->headers_out, wa))) {
- apr_table_set(r->err_headers_out, wa, buf);
- } else {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109)
- "origin server sent 401 without "
- "WWW-Authenticate header");
+ /* Reset to old timeout iff we've adjusted it. */
+ if (worker->s->ping_timeout_set) {
+ apr_socket_timeout_set(backend->sock, old_timeout);
}
- }
- r->sent_bodyct = 1;
- /*
- * Is it an HTTP/0.9 response or did we maybe preread the 1st line of
- * the response? If so, load the extra data. These are 2 mutually
- * exclusive possibilities, that just happen to require very
- * similar behavior.
- */
- if (backasswards || pread_len) {
- apr_ssize_t cntr = (apr_ssize_t)pread_len;
- if (backasswards) {
- /*@@@FIXME:
- * At this point in response processing of a 0.9 response,
- * we don't know yet whether data is binary or not.
- * mod_charset_lite will get control later on, so it cannot
- * decide on the conversion of this buffer full of data.
- * However, chances are that we are not really talking to an
- * HTTP/0.9 server, but to some different protocol, therefore
- * the best guess IMHO is to always treat the buffer as "text/x":
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10153)
+ "HTTP: %s100 continue sent by %pI (%s): "
+ "%ssending body (response: HTTP/%i.%i %s)",
+ proxy_status != HTTP_CONTINUE ? "no " : "",
+ backend->addr,
+ backend->hostname ? backend->hostname : "",
+ do_send_body ? "" : "not ",
+ major, minor, proxy_status_line);
+
+ if (do_send_body) {
+ status = send_continue_body(req);
+ if (status != OK) {
+ return status;
+ }
+ }
+ else {
+ /* If we don't read the client connection any further, since
+ * there are pending data it should be "Connection: close"d to
+ * prevent reuse. We don't exactly c->keepalive = AP_CONN_CLOSE
+ * here though, because error_override or a potential retry on
+ * another backend could finally read that data and finalize
+ * the request processing, making keep-alive possible. So what
+ * we do is leaving r->expecting_100 alone, ap_set_keepalive()
+ * will do the right thing according to the final response and
+ * any later update of r->expecting_100.
*/
- ap_xlate_proto_to_ascii(buffer, len);
- cntr = (apr_ssize_t)len;
}
- e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ /* Once only! */
+ do_100_continue = 0;
+ }
+
+ if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
+ apr_status_t rv;
+ proxy_tunnel_rec *tunnel;
+ apr_interval_time_t client_timeout = -1,
+ backend_timeout = -1;
+
+ /* If we didn't send the full body yet, do it now */
+ if (do_100_continue) {
+ r->expecting_100 = 0;
+ status = send_continue_body(req);
+ if (status != OK) {
+ return status;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239)
+ "HTTP: tunneling protocol %s", upgrade);
+
+ rv = ap_proxy_tunnel_create(&tunnel, r, origin, upgrade);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240)
+ "can't create tunnel for %s", upgrade);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Set timeout to the highest configured for client or backend */
+ apr_socket_timeout_get(backend->sock, &backend_timeout);
+ apr_socket_timeout_get(ap_get_conn_socket(c), &client_timeout);
+ if (backend_timeout >= 0 && backend_timeout > client_timeout) {
+ tunnel->timeout = backend_timeout;
+ }
+ else {
+ tunnel->timeout = client_timeout;
+ }
+
+ /* Let proxy tunnel forward everything */
+ status = ap_proxy_tunnel_run(tunnel);
+
+ /* We are done with both connections */
+ return DONE;
+ }
+
+ if (interim_response) {
+ /* Already forwarded above, read next response */
+ continue;
}
+
+ /* Moved the fixups of Date headers and those affected by
+ * ProxyPassReverse/etc from here to ap_proxy_read_headers
+ */
+
/* PR 41646: get HEAD right with ProxyErrorOverride */
- if (ap_is_HTTP_ERROR(r->status) && dconf->error_override) {
+ if (ap_proxy_should_override(dconf, proxy_status)) {
+ if (proxy_status == HTTP_UNAUTHORIZED) {
+ const char *buf;
+ const char *wa = "WWW-Authenticate";
+ if ((buf = apr_table_get(r->headers_out, wa))) {
+ apr_table_set(r->err_headers_out, wa, buf);
+ } else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109)
+ "origin server sent 401 without "
+ "WWW-Authenticate header");
+ }
+ }
+
/* clear r->status for override error, otherwise ErrorDocument
* thinks that this is a recursive error, and doesn't find the
* custom error page
*/
r->status = HTTP_OK;
/* Discard body, if one is expected */
- if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) {
+ if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) {
const char *tmp;
/* Add minimal headers needed to allow http_in filter
* detecting end of body without waiting for a timeout. */
@@ -1646,11 +1604,49 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
return proxy_status;
}
+ /* Forward back Upgrade header if it matches the configured one(s), it
+ * may be an HTTP_UPGRADE_REQUIRED response or some other status where
+ * Upgrade makes sense to negotiate the protocol by other means.
+ */
+ if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade,
+ (*req->proto == 'w')
+ ? "WebSocket" : NULL)) {
+ apr_table_setn(r->headers_out, "Connection", "Upgrade");
+ apr_table_setn(r->headers_out, "Upgrade", apr_pstrdup(p, upgrade));
+ }
+
+ r->sent_bodyct = 1;
+ /*
+ * Is it an HTTP/0.9 response or did we maybe preread the 1st line of
+ * the response? If so, load the extra data. These are 2 mutually
+ * exclusive possibilities, that just happen to require very
+ * similar behavior.
+ */
+ if (backasswards || pread_len) {
+ apr_ssize_t cntr = (apr_ssize_t)pread_len;
+ if (backasswards) {
+ /*@@@FIXME:
+ * At this point in response processing of a 0.9 response,
+ * we don't know yet whether data is binary or not.
+ * mod_charset_lite will get control later on, so it cannot
+ * decide on the conversion of this buffer full of data.
+ * However, chances are that we are not really talking to an
+ * HTTP/0.9 server, but to some different protocol, therefore
+ * the best guess IMHO is to always treat the buffer as "text/x":
+ */
+ ap_xlate_proto_to_ascii(buffer, len);
+ cntr = (apr_ssize_t)len;
+ }
+ e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+
/* send body - but only if a body is expected */
if ((!r->header_only) && /* not HEAD request */
- !interim_response && /* not any 1xx response */
(proxy_status != HTTP_NO_CONTENT) && /* not 204 */
(proxy_status != HTTP_NOT_MODIFIED)) { /* not 304 */
+ apr_read_type_e mode;
+ int finish;
/* We need to copy the output headers and treat them as input
* headers as well. BUT, we need to do this before we remove
@@ -1671,152 +1667,148 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "start body send");
- /*
- * if we are overriding the errors, we can't put the content
- * of the page into the brigade
+ /* read the body, pass it to the output filters */
+
+ /* Handle the case where the error document is itself reverse
+ * proxied and was successful. We must maintain any previous
+ * error status so that an underlying error (eg HTTP_NOT_FOUND)
+ * doesn't become an HTTP_OK.
*/
- if (!dconf->error_override || !ap_is_HTTP_ERROR(proxy_status)) {
- /* read the body, pass it to the output filters */
- apr_read_type_e mode = APR_NONBLOCK_READ;
- int finish = FALSE;
-
- /* Handle the case where the error document is itself reverse
- * proxied and was successful. We must maintain any previous
- * error status so that an underlying error (eg HTTP_NOT_FOUND)
- * doesn't become an HTTP_OK.
- */
- if (dconf->error_override && !ap_is_HTTP_ERROR(proxy_status)
- && ap_is_HTTP_ERROR(original_status)) {
- r->status = original_status;
- r->status_line = original_status_line;
- }
+ if (ap_proxy_should_override(dconf, original_status)) {
+ r->status = original_status;
+ r->status_line = original_status_line;
+ }
- do {
- apr_off_t readbytes;
- apr_status_t rv;
-
- rv = ap_get_brigade(backend->r->input_filters, bb,
- AP_MODE_READBYTES, mode,
- conf->io_buffer_size);
-
- /* ap_get_brigade will return success with an empty brigade
- * for a non-blocking read which would block: */
- if (mode == APR_NONBLOCK_READ
- && (APR_STATUS_IS_EAGAIN(rv)
- || (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) {
- /* flush to the client and switch to blocking mode */
- e = apr_bucket_flush_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- if (ap_pass_brigade(r->output_filters, bb)
- || c->aborted) {
- backend->close = 1;
- break;
- }
- apr_brigade_cleanup(bb);
- mode = APR_BLOCK_READ;
- continue;
- }
- else if (rv == APR_EOF) {
- backend->close = 1;
- break;
- }
- else if (rv != APR_SUCCESS) {
- /* In this case, we are in real trouble because
- * our backend bailed on us. Pass along a 502 error
- * error bucket
- */
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110)
- "error reading response");
- ap_proxy_backend_broke(r, bb);
- ap_pass_brigade(r->output_filters, bb);
- backend_broke = 1;
+ mode = APR_NONBLOCK_READ;
+ finish = FALSE;
+ do {
+ apr_off_t readbytes;
+ apr_status_t rv;
+
+ rv = ap_get_brigade(backend->r->input_filters, bb,
+ AP_MODE_READBYTES, mode,
+ req->sconf->io_buffer_size);
+
+ /* ap_get_brigade will return success with an empty brigade
+ * for a non-blocking read which would block: */
+ if (mode == APR_NONBLOCK_READ
+ && (APR_STATUS_IS_EAGAIN(rv)
+ || (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) {
+ /* flush to the client and switch to blocking mode */
+ e = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ if (ap_pass_brigade(r->output_filters, bb)
+ || c->aborted) {
backend->close = 1;
break;
}
- /* next time try a non-blocking read */
- mode = APR_NONBLOCK_READ;
+ apr_brigade_cleanup(bb);
+ mode = APR_BLOCK_READ;
+ continue;
+ }
+ else if (rv == APR_EOF) {
+ backend->close = 1;
+ break;
+ }
+ else if (rv != APR_SUCCESS) {
+ /* In this case, we are in real trouble because
+ * our backend bailed on us. Pass along a 502 error
+ * error bucket
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110)
+ "error reading response");
+ apr_brigade_cleanup(bb);
+ ap_proxy_backend_broke(r, bb);
+ ap_pass_brigade(r->output_filters, bb);
+ backend_broke = 1;
+ backend->close = 1;
+ break;
+ }
+ /* next time try a non-blocking read */
+ mode = APR_NONBLOCK_READ;
- if (!apr_is_empty_table(backend->r->trailers_in)) {
- apr_table_do(add_trailers, r->trailers_out,
- backend->r->trailers_in, NULL);
- apr_table_clear(backend->r->trailers_in);
- }
+ if (!apr_is_empty_table(backend->r->trailers_in)) {
+ apr_table_do(add_trailers, r->trailers_out,
+ backend->r->trailers_in, NULL);
+ apr_table_clear(backend->r->trailers_in);
+ }
- apr_brigade_length(bb, 0, &readbytes);
- backend->worker->s->read += readbytes;
+ apr_brigade_length(bb, 0, &readbytes);
+ backend->worker->s->read += readbytes;
#if DEBUGGING
- {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111)
- "readbytes: %#x", readbytes);
- }
+ {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111)
+ "readbytes: %#x", readbytes);
+ }
#endif
- /* sanity check */
- if (APR_BRIGADE_EMPTY(bb)) {
- break;
- }
+ /* sanity check */
+ if (APR_BRIGADE_EMPTY(bb)) {
+ break;
+ }
- /* Switch the allocator lifetime of the buckets */
- ap_proxy_buckets_lifetime_transform(r, bb, pass_bb);
-
- /* found the last brigade? */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) {
-
- /* signal that we must leave */
- finish = TRUE;
-
- /* the brigade may contain transient buckets that contain
- * data that lives only as long as the backend connection.
- * Force a setaside so these transient buckets become heap
- * buckets that live as long as the request.
- */
- for (e = APR_BRIGADE_FIRST(pass_bb); e
- != APR_BRIGADE_SENTINEL(pass_bb); e
- = APR_BUCKET_NEXT(e)) {
- apr_bucket_setaside(e, r->pool);
- }
-
- /* finally it is safe to clean up the brigade from the
- * connection pool, as we have forced a setaside on all
- * buckets.
- */
- apr_brigade_cleanup(bb);
-
- /* make sure we release the backend connection as soon
- * as we know we are done, so that the backend isn't
- * left waiting for a slow client to eventually
- * acknowledge the data.
- */
- ap_proxy_release_connection(backend->worker->s->scheme,
- backend, r->server);
- /* Ensure that the backend is not reused */
- *backend_ptr = NULL;
+ /* Switch the allocator lifetime of the buckets */
+ ap_proxy_buckets_lifetime_transform(r, bb, pass_bb);
- }
+ /* found the last brigade? */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) {
- /* try send what we read */
- if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS
- || c->aborted) {
- /* Ack! Phbtt! Die! User aborted! */
- /* Only close backend if we haven't got all from the
- * backend. Furthermore if *backend_ptr is NULL it is no
- * longer safe to fiddle around with backend as it might
- * be already in use by another thread.
- */
- if (*backend_ptr) {
- backend->close = 1; /* this causes socket close below */
- }
- finish = TRUE;
+ /* signal that we must leave */
+ finish = TRUE;
+
+ /* the brigade may contain transient buckets that contain
+ * data that lives only as long as the backend connection.
+ * Force a setaside so these transient buckets become heap
+ * buckets that live as long as the request.
+ */
+ for (e = APR_BRIGADE_FIRST(pass_bb); e
+ != APR_BRIGADE_SENTINEL(pass_bb); e
+ = APR_BUCKET_NEXT(e)) {
+ apr_bucket_setaside(e, r->pool);
}
- /* make sure we always clean up after ourselves */
- apr_brigade_cleanup(pass_bb);
+ /* finally it is safe to clean up the brigade from the
+ * connection pool, as we have forced a setaside on all
+ * buckets.
+ */
apr_brigade_cleanup(bb);
- } while (!finish);
- }
+ /* make sure we release the backend connection as soon
+ * as we know we are done, so that the backend isn't
+ * left waiting for a slow client to eventually
+ * acknowledge the data.
+ */
+ ap_proxy_release_connection(backend->worker->s->scheme,
+ backend, r->server);
+ /* Ensure that the backend is not reused */
+ req->backend = NULL;
+
+ }
+
+ /* try send what we read */
+ if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS
+ || c->aborted) {
+ /* Ack! Phbtt! Die! User aborted! */
+ /* Only close backend if we haven't got all from the
+ * backend. Furthermore if req->backend is NULL it is no
+ * longer safe to fiddle around with backend as it might
+ * be already in use by another thread.
+ */
+ if (req->backend) {
+ /* this causes socket close below */
+ req->backend->close = 1;
+ }
+ finish = TRUE;
+ }
+
+ /* make sure we always clean up after ourselves */
+ apr_brigade_cleanup(pass_bb);
+ apr_brigade_cleanup(bb);
+
+ } while (!finish);
+
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "end body send");
}
- else if (!interim_response) {
+ else {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "header only");
/* make sure we release the backend connection as soon
@@ -1826,7 +1818,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
*/
ap_proxy_release_connection(backend->worker->s->scheme,
backend, r->server);
- *backend_ptr = NULL;
+ /* Ensure that the backend is not reused */
+ req->backend = NULL;
/* Pass EOS bucket down the filter chain. */
e = apr_bucket_eos_create(c->bucket_alloc);
@@ -1880,62 +1873,108 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
apr_port_t proxyport)
{
int status;
- char server_portstr[32];
- char *scheme;
- const char *proxy_function;
- const char *u;
+ const char *scheme;
+ const char *u = url;
+ proxy_http_req_t *req = NULL;
proxy_conn_rec *backend = NULL;
+ apr_bucket_brigade *input_brigade = NULL;
int is_ssl = 0;
conn_rec *c = r->connection;
+ proxy_dir_conf *dconf;
int retry = 0;
+ char *locurl = url;
+ int toclose = 0;
/*
* Use a shorter-lived pool to reduce memory usage
* and avoid a memory leak
*/
apr_pool_t *p = r->pool;
- apr_uri_t *uri = apr_palloc(p, sizeof(*uri));
+ apr_uri_t *uri;
- /* find the scheme */
- u = strchr(url, ':');
- if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0')
- return DECLINED;
- if ((u - url) > 14)
- return HTTP_BAD_REQUEST;
- scheme = apr_pstrmemdup(p, url, u - url);
- /* scheme is lowercase */
- ap_str_tolower(scheme);
- /* is it for us? */
- if (strcmp(scheme, "https") == 0) {
- if (!ap_proxy_ssl_enable(NULL)) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112)
- "HTTPS: declining URL %s (mod_ssl not configured?)",
- url);
- return DECLINED;
- }
- is_ssl = 1;
- proxy_function = "HTTPS";
+ scheme = get_url_scheme(&u, &is_ssl);
+ if (!scheme && proxyname && strncasecmp(url, "ftp:", 4) == 0) {
+ u = url + 4;
+ scheme = "ftp";
+ is_ssl = 0;
}
- else if (!(strcmp(scheme, "http") == 0 || (strcmp(scheme, "ftp") == 0 && proxyname))) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113) "HTTP: declining URL %s",
- url);
- return DECLINED; /* only interested in HTTP, or FTP via proxy */
+ if (!scheme || u[0] != '/' || u[1] != '/' || u[2] == '\0') {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113)
+ "HTTP: declining URL %s", url);
+ return DECLINED; /* only interested in HTTP, WS or FTP via proxy */
}
- else {
- if (*scheme == 'h')
- proxy_function = "HTTP";
- else
- proxy_function = "FTP";
+ if (is_ssl && !ap_ssl_has_outgoing_handlers()) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112)
+ "HTTP: declining URL %s (mod_ssl not configured?)", url);
+ return DECLINED;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url);
-
/* create space for state information */
- if ((status = ap_proxy_acquire_connection(proxy_function, &backend,
- worker, r->server)) != OK)
- goto cleanup;
+ if ((status = ap_proxy_acquire_connection(scheme, &backend,
+ worker, r->server)) != OK) {
+ return status;
+ }
backend->is_ssl = is_ssl;
+ req = apr_pcalloc(p, sizeof(*req));
+ req->p = p;
+ req->r = r;
+ req->sconf = conf;
+ req->worker = worker;
+ req->backend = backend;
+ req->proto = scheme;
+ req->bucket_alloc = c->bucket_alloc;
+ req->rb_method = RB_INIT;
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+
+ if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
+ req->force10 = 1;
+ }
+ else if (*worker->s->upgrade || *req->proto == 'w') {
+ /* Forward Upgrade header if it matches the configured one(s),
+ * the default being "WebSocket" for ws[s] schemes.
+ */
+ const char *upgrade = apr_table_get(r->headers_in, "Upgrade");
+ if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade,
+ (*req->proto == 'w')
+ ? "WebSocket" : NULL)) {
+ req->upgrade = upgrade;
+ }
+ }
+
+ /* We possibly reuse input data prefetched in previous call(s), e.g. for a
+ * balancer fallback scenario, and in this case the 100 continue settings
+ * should be consistent between balancer members. If not, we need to ignore
+ * Proxy100Continue on=>off once we tried to prefetch already, otherwise
+ * the HTTP_IN filter won't send 100 Continue for us anymore, and we might
+ * deadlock with the client waiting for each other. Note that off=>on is
+ * not an issue because in this case r->expecting_100 is false (the 100
+ * Continue is out already), but we make sure that prefetch will be
+ * nonblocking to avoid passing more time there.
+ */
+ apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p);
+
+ /* Should we handle end-to-end or ping 100-continue? */
+ if (!req->force10
+ && ((r->expecting_100 && (dconf->forward_100_continue || input_brigade))
+ || PROXY_SHOULD_PING_100_CONTINUE(worker, r))) {
+ /* Tell ap_proxy_create_hdrbrgd() to preserve/add the Expect header */
+ apr_table_setn(r->notes, "proxy-100-continue", "1");
+ req->do_100_continue = 1;
+ }
+
+ /* Should we block while prefetching the body or try nonblocking and flush
+ * data to the backend ASAP?
+ */
+ if (input_brigade
+ || req->do_100_continue
+ || apr_table_get(r->subprocess_env,
+ "proxy-prefetch-nonblocking")) {
+ req->prefetch_nonblocking = 1;
+ }
+
/*
* In the case that we are handling a reverse proxy connection and this
* is not a request that is coming over an already kept alive connection
@@ -1949,20 +1988,68 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
backend->close = 1;
}
+ /* Step One: Determine Who To Connect To */
+ uri = apr_palloc(p, sizeof(*uri));
+ if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend,
+ uri, &locurl, proxyname,
+ proxyport, req->server_portstr,
+ sizeof(req->server_portstr))))
+ goto cleanup;
+
+ /* The header is always (re-)built since it depends on worker settings,
+ * but the body can be fetched only once (even partially), so it's saved
+ * in between proxy_http_handler() calls should we come back here.
+ */
+ req->header_brigade = apr_brigade_create(p, req->bucket_alloc);
+ if (input_brigade == NULL) {
+ input_brigade = apr_brigade_create(p, req->bucket_alloc);
+ apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p);
+ }
+ req->input_brigade = input_brigade;
+
+ /* Prefetch (nonlocking) the request body so to increase the chance to get
+ * the whole (or enough) body and determine Content-Length vs chunked or
+ * spooled. By doing this before connecting or reusing the backend, we want
+ * to minimize the delay between this connection is considered alive and
+ * the first bytes sent (should the client's link be slow or some input
+ * filter retain the data). This is a best effort to prevent the backend
+ * from closing (from under us) what it thinks is an idle connection, hence
+ * to reduce to the minimum the unavoidable local is_socket_connected() vs
+ * remote keepalive race condition.
+ */
+ if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK)
+ goto cleanup;
+
+ /* We need to reset backend->close now, since ap_proxy_http_prefetch() set
+ * it to disable the reuse of the connection *after* this request (no keep-
+ * alive), not to close any reusable connection before this request. However
+ * assure what is expected later by using a local flag and do the right thing
+ * when ap_proxy_connect_backend() below provides the connection to close.
+ */
+ toclose = backend->close;
+ backend->close = 0;
+
while (retry < 2) {
- char *locurl = url;
+ if (retry) {
+ char *newurl = url;
- /* Step One: Determine Who To Connect To */
- if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend,
- uri, &locurl, proxyname,
- proxyport, server_portstr,
- sizeof(server_portstr))) != OK)
- break;
+ /* Step One (again): (Re)Determine Who To Connect To */
+ if ((status = ap_proxy_determine_connection(p, r, conf, worker,
+ backend, uri, &newurl, proxyname, proxyport,
+ req->server_portstr, sizeof(req->server_portstr))))
+ break;
+
+ /* The code assumes locurl is not changed during the loop, or
+ * ap_proxy_http_prefetch() would have to be called every time,
+ * and header_brigade be changed accordingly...
+ */
+ AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0);
+ }
/* Step Two: Make the Connection */
- if (ap_proxy_check_connection(proxy_function, backend, r->server, 1,
+ if (ap_proxy_check_connection(scheme, backend, r->server, 1,
PROXY_CHECK_CONN_EMPTY)
- && ap_proxy_connect_backend(proxy_function, backend, worker,
+ && ap_proxy_connect_backend(scheme, backend, worker,
r->server)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01114)
"HTTP: failed to make connection to backend: %s",
@@ -1972,54 +2059,45 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
}
/* Step Three: Create conn_rec */
- if (!backend->connection) {
- if ((status = ap_proxy_connection_create_ex(proxy_function,
- backend, r)) != OK)
- break;
- /*
- * On SSL connections set a note on the connection what CN is
- * requested, such that mod_ssl can check if it is requested to do
- * so.
- */
- if (backend->ssl_hostname) {
- apr_table_setn(backend->connection->notes,
- "proxy-request-hostname",
- backend->ssl_hostname);
- }
+ if ((status = ap_proxy_connection_create_ex(scheme, backend, r)) != OK)
+ break;
+ req->origin = backend->connection;
+
+ /* Don't recycle the connection if prefetch (above) told not to do so */
+ if (toclose) {
+ backend->close = 1;
+ req->origin->keepalive = AP_CONN_CLOSE;
}
/* Step Four: Send the Request
* On the off-chance that we forced a 100-Continue as a
* kinda HTTP ping test, allow for retries
*/
- if ((status = ap_proxy_http_request(p, r, backend, worker,
- conf, uri, locurl, server_portstr)) != OK) {
- if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set) {
- backend->close = 1;
+ status = ap_proxy_http_request(req);
+ if (status != OK) {
+ if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115)
- "HTTP: 100-Continue failed to %pI (%s)",
- worker->cp->addr, worker->s->hostname_ex);
+ "HTTP: 100-Continue failed to %pI (%s:%d)",
+ backend->addr, backend->hostname, backend->port);
+ backend->close = 1;
retry++;
continue;
- } else {
- break;
}
-
+ break;
}
/* Step Five: Receive the Response... Fall thru to cleanup */
- status = ap_proxy_http_process_response(p, r, &backend, worker,
- conf, server_portstr);
+ status = ap_proxy_http_process_response(req);
break;
}
/* Step Six: Clean Up */
cleanup:
- if (backend) {
+ if (req->backend) {
if (status != OK)
- backend->close = 1;
- ap_proxy_http_cleanup(proxy_function, r, backend);
+ req->backend->close = 1;
+ ap_proxy_http_cleanup(scheme, r, req->backend);
}
return status;
}
diff --git a/modules/proxy/mod_proxy_scgi.c b/modules/proxy/mod_proxy_scgi.c
index 11f75de..d63c833 100644
--- a/modules/proxy/mod_proxy_scgi.c
+++ b/modules/proxy/mod_proxy_scgi.c
@@ -179,8 +179,10 @@ static int scgi_canon(request_rec *r, char *url)
char *host, sport[sizeof(":65535")];
const char *err, *path;
apr_port_t port, def_port;
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
- if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) {
+ if (ap_cstr_casecmpn(url, SCHEME "://", sizeof(SCHEME) + 2)) {
return DECLINED;
}
url += sizeof(SCHEME); /* Keep slashes */
@@ -205,8 +207,8 @@ static int scgi_canon(request_rec *r, char *url)
host = apr_pstrcat(r->pool, "[", host, "]", NULL);
}
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags,
+ r->proxyreq);
if (!path) {
return HTTP_BAD_REQUEST;
}
@@ -388,6 +390,14 @@ static int pass_response(request_rec *r, proxy_conn_rec *conn)
return status;
}
+ /* SCGI has its own body framing mechanism which we don't
+ * match against any provided Content-Length, so let the
+ * core determine C-L vs T-E based on what's actually sent.
+ */
+ if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+
conf = ap_get_module_config(r->per_dir_config, &proxy_scgi_module);
if (conf->sendfile && conf->sendfile != scgi_sendfile_off) {
short err = 1;
@@ -434,7 +444,7 @@ static int pass_response(request_rec *r, proxy_conn_rec *conn)
if (location && *location == '/') {
scgi_request_config *req_conf = apr_palloc(r->pool,
sizeof(*req_conf));
- if (strcasecmp(location_header, "Location")) {
+ if (ap_cstr_casecmp(location_header, "Location")) {
if (err) {
apr_table_unset(r->err_headers_out, location_header);
}
@@ -533,7 +543,7 @@ static int scgi_handler(request_rec *r, proxy_worker *worker,
apr_uri_t *uri;
char dummy;
- if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) {
+ if (ap_cstr_casecmpn(url, SCHEME "://", sizeof(SCHEME) + 2)) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00865)
"declining URL %s", url);
return DECLINED;
diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c
index c5d4f8e..4e57196 100644
--- a/modules/proxy/mod_proxy_uwsgi.c
+++ b/modules/proxy/mod_proxy_uwsgi.c
@@ -84,10 +84,29 @@ static int uwsgi_canon(request_rec *r, char *url)
host = apr_pstrcat(r->pool, "[", host, "]", NULL);
}
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
- if (!path) {
- return HTTP_BAD_REQUEST;
+ if (apr_table_get(r->notes, "proxy-nocanon")
+ || apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the raw/encoded path */
+ }
+ else {
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags,
+ r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ /*
+ * If we have a raw control character or a ' ' in nocanon path,
+ * correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10417)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
}
r->filename =
@@ -136,7 +155,7 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn)
int j;
apr_size_t headerlen = 4;
- apr_uint16_t pktsize, keylen, vallen;
+ apr_size_t pktsize, keylen, vallen;
const char *script_name;
const char *path_info;
const char *auth;
@@ -175,7 +194,16 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn)
env = (apr_table_entry_t *) env_table->elts;
for (j = 0; j < env_table->nelts; ++j) {
- headerlen += 2 + strlen(env[j].key) + 2 + strlen(env[j].val);
+ headerlen += 2 + strlen(env[j].key) + 2 + (env[j].val ? strlen(env[j].val) : 0);
+ }
+
+ pktsize = headerlen - 4;
+ if (pktsize > APR_UINT16_MAX) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10259)
+ "can't send headers to %s:%u: packet size too "
+ "large (%" APR_SIZE_T_FMT ")",
+ conn->hostname, conn->port, pktsize);
+ return HTTP_INTERNAL_SERVER_ERROR;
}
ptr = buf = apr_palloc(r->pool, headerlen);
@@ -189,15 +217,15 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn)
memcpy(ptr, env[j].key, keylen);
ptr += keylen;
- vallen = strlen(env[j].val);
+ vallen = env[j].val ? strlen(env[j].val) : 0;
*ptr++ = (apr_byte_t) (vallen & 0xff);
*ptr++ = (apr_byte_t) ((vallen >> 8) & 0xff);
- memcpy(ptr, env[j].val, vallen);
+ if (env[j].val) {
+ memcpy(ptr, env[j].val, vallen);
+ }
ptr += vallen;
}
- pktsize = headerlen - 4;
-
buf[0] = 0;
buf[1] = (apr_byte_t) (pktsize & 0xff);
buf[2] = (apr_byte_t) ((pktsize >> 8) & 0xff);
@@ -238,6 +266,7 @@ static request_rec *make_fake_req(conn_rec *c, request_rec *r)
request_rec *rp;
apr_pool_create(&pool, c->pool);
+ apr_pool_tag(pool, "proxy_uwsgi_rp");
rp = apr_pcalloc(pool, sizeof(*r));
@@ -297,18 +326,16 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
pass_bb = apr_brigade_create(r->pool, c->bucket_alloc);
len = ap_getline(buffer, sizeof(buffer), rp, 1);
-
if (len <= 0) {
- /* oops */
+ /* invalid or empty */
return HTTP_INTERNAL_SERVER_ERROR;
}
-
backend->worker->s->read += len;
-
- if (len >= sizeof(buffer) - 1) {
- /* oops */
+ if ((apr_size_t)len >= sizeof(buffer)) {
+ /* too long */
return HTTP_INTERNAL_SERVER_ERROR;
}
+
/* Position of http status code */
if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
status_start = 9;
@@ -317,8 +344,8 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
status_start = 7;
}
else {
- /* oops */
- return HTTP_INTERNAL_SERVER_ERROR;
+ /* not HTTP */
+ return HTTP_BAD_GATEWAY;
}
status_end = status_start + 3;
@@ -338,21 +365,50 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
}
r->status_line = apr_pstrdup(r->pool, &buffer[status_start]);
- /* start parsing headers */
+ /* parse headers */
while ((len = ap_getline(buffer, sizeof(buffer), rp, 1)) > 0) {
+ if ((apr_size_t)len >= sizeof(buffer)) {
+ /* too long */
+ len = -1;
+ break;
+ }
value = strchr(buffer, ':');
- /* invalid header skip */
- if (!value)
- continue;
- *value = '\0';
- ++value;
+ if (!value) {
+ /* invalid header */
+ len = -1;
+ break;
+ }
+ *value++ = '\0';
+ if (*ap_scan_http_token(buffer)) {
+ /* invalid name */
+ len = -1;
+ break;
+ }
while (apr_isspace(*value))
++value;
for (end = &value[strlen(value) - 1];
end > value && apr_isspace(*end); --end)
*end = '\0';
+ if (*ap_scan_http_field_content(value)) {
+ /* invalid value */
+ len = -1;
+ break;
+ }
apr_table_add(r->headers_out, buffer, value);
}
+ if (len < 0) {
+ /* Reset headers, but not to NULL because things below the chain expect
+ * this to be non NULL e.g. the ap_content_length_filter.
+ */
+ r->headers_out = apr_table_make(r->pool, 1);
+ return HTTP_BAD_GATEWAY;
+ }
+
+ /* T-E wins over C-L */
+ if (apr_table_get(r->headers_out, "Transfer-Encoding")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ backend->close = 1;
+ }
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(r->pool, buf));
@@ -362,9 +418,9 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
#if AP_MODULE_MAGIC_AT_LEAST(20101106,0)
dconf =
ap_get_module_config(r->per_dir_config, &proxy_module);
- if (dconf->error_override && ap_is_HTTP_ERROR(r->status)) {
+ if (ap_proxy_should_override(dconf, r->status)) {
#else
- if (conf->error_override && ap_is_HTTP_ERROR(r->status)) {
+ if (ap_proxy_should_override(conf, r->status)) {
#endif
int status = r->status;
r->status = HTTP_OK;
@@ -446,11 +502,8 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker,
const char *proxyname, apr_port_t proxyport)
{
int status;
- int delta = 0;
- int decode_status;
proxy_conn_rec *backend = NULL;
apr_pool_t *p = r->pool;
- size_t w_len;
char server_portstr[32];
char *u_path_info;
apr_uri_t *uri;
@@ -462,24 +515,23 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker,
uri = apr_palloc(r->pool, sizeof(*uri));
- /* ADD PATH_INFO */
-#if AP_MODULE_MAGIC_AT_LEAST(20111130,0)
- w_len = strlen(worker->s->name);
-#else
- w_len = strlen(worker->name);
-#endif
- u_path_info = r->filename + 6 + w_len;
- if (u_path_info[0] != '/') {
- delta = 1;
+ /* ADD PATH_INFO (unescaped) */
+ u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/');
+ if (!u_path_info) {
+ u_path_info = apr_pstrdup(r->pool, "/");
}
- decode_status = ap_unescape_url(url + w_len - delta);
- if (decode_status) {
+ else if (ap_unescape_url(u_path_info) != OK) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100)
- "unable to decode uri: %s", url + w_len - delta);
+ "unable to decode uwsgi uri: %s", url);
return HTTP_INTERNAL_SERVER_ERROR;
}
- apr_table_add(r->subprocess_env, "PATH_INFO", url + w_len - delta);
-
+ else {
+ /* Remove duplicate slashes at the beginning of PATH_INFO */
+ while (u_path_info[1] == '/') {
+ u_path_info++;
+ }
+ }
+ apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info);
/* Create space for state information */
status = ap_proxy_acquire_connection(UWSGI_SCHEME, &backend, worker,
@@ -509,12 +561,10 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker,
}
/* Step Three: Create conn_rec */
- if (!backend->connection) {
- if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend,
- r->connection,
- r->server)) != OK)
- goto cleanup;
- }
+ if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend,
+ r->connection,
+ r->server)) != OK)
+ goto cleanup;
/* Step Four: Process the Request */
if (((status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)) != OK)
diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c
index 9dda010..30ba1b4 100644
--- a/modules/proxy/mod_proxy_wstunnel.c
+++ b/modules/proxy/mod_proxy_wstunnel.c
@@ -15,9 +15,43 @@
*/
#include "mod_proxy.h"
+#include "http_config.h"
module AP_MODULE_DECLARE_DATA proxy_wstunnel_module;
+typedef struct {
+ unsigned int fallback_to_proxy_http :1,
+ fallback_to_proxy_http_set :1;
+} proxyws_dir_conf;
+
+static int can_fallback_to_proxy_http;
+
+static int proxy_wstunnel_check_trans(request_rec *r, const char *url)
+{
+ proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
+ &proxy_wstunnel_module);
+
+ if (can_fallback_to_proxy_http && dconf->fallback_to_proxy_http) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "check_trans fallback");
+ return DECLINED;
+ }
+
+ if (ap_cstr_casecmpn(url, "ws:", 3) != 0
+ && ap_cstr_casecmpn(url, "wss:", 4) != 0) {
+ return DECLINED;
+ }
+
+ if (!apr_table_get(r->headers_in, "Upgrade")) {
+ /* No Upgrade, let mod_proxy_http handle it (for instance).
+ * Note: anything but OK/DECLINED will do (i.e. bypass wstunnel w/o
+ * aborting the request), HTTP_UPGRADE_REQUIRED is documentary...
+ */
+ return HTTP_UPGRADE_REQUIRED;
+ }
+
+ return OK;
+}
+
/*
* Canonicalise http-like URLs.
* scheme is the scheme for the URL
@@ -26,19 +60,26 @@ module AP_MODULE_DECLARE_DATA proxy_wstunnel_module;
*/
static int proxy_wstunnel_canon(request_rec *r, char *url)
{
+ proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
+ &proxy_wstunnel_module);
char *host, *path, sport[7];
char *search = NULL;
const char *err;
char *scheme;
apr_port_t port, def_port;
+ if (can_fallback_to_proxy_http && dconf->fallback_to_proxy_http) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "canon fallback");
+ return DECLINED;
+ }
+
/* ap_port_of_scheme() */
- if (strncasecmp(url, "ws:", 3) == 0) {
+ if (ap_cstr_casecmpn(url, "ws:", 3) == 0) {
url += 3;
scheme = "ws:";
def_port = apr_uri_port_of_scheme("http");
}
- else if (strncasecmp(url, "wss:", 4) == 0) {
+ else if (ap_cstr_casecmpn(url, "wss:", 4) == 0) {
url += 4;
scheme = "wss:";
def_port = apr_uri_port_of_scheme("https");
@@ -69,15 +110,42 @@ static int proxy_wstunnel_canon(request_rec *r, char *url)
if (apr_table_get(r->notes, "proxy-nocanon")) {
path = url; /* this is the raw path */
}
+ else if (apr_table_get(r->notes, "proxy-noencode")) {
+ path = url; /* this is the encoded path already */
+ search = r->args;
+ }
else {
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
+ core_dir_config *d = ap_get_core_module_config(r->per_dir_config);
+ int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0;
+
+ path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags,
+ r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
search = r->args;
}
- if (path == NULL)
- return HTTP_BAD_REQUEST;
+ /*
+ * If we have a raw control character or a ' ' in nocanon path or
+ * r->args, correct encoding was missed.
+ */
+ if (path == url && *ap_scan_vchar_obstext(path)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10419)
+ "To be forwarded path contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
+ if (search && *ap_scan_vchar_obstext(search)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10409)
+ "To be forwarded query string contains control "
+ "characters or spaces");
+ return HTTP_FORBIDDEN;
+ }
- apr_snprintf(sport, sizeof(sport), ":%d", port);
+ if (port != def_port)
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ else
+ sport[0] = '\0';
if (ap_strchr_c(host, ':')) {
/* if literal IPv6 address */
@@ -280,112 +348,166 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker,
char *url, const char *proxyname,
apr_port_t proxyport)
{
+ proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
+ &proxy_wstunnel_module);
int status;
char server_portstr[32];
proxy_conn_rec *backend = NULL;
+ const char *upgrade;
char *scheme;
- int retry;
apr_pool_t *p = r->pool;
+ char *locurl = url;
apr_uri_t *uri;
int is_ssl = 0;
- const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket";
- if (strncasecmp(url, "wss:", 4) == 0) {
+ if (can_fallback_to_proxy_http && dconf->fallback_to_proxy_http) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "handler fallback");
+ return DECLINED;
+ }
+
+ if (ap_cstr_casecmpn(url, "wss:", 4) == 0) {
scheme = "WSS";
is_ssl = 1;
}
- else if (strncasecmp(url, "ws:", 3) == 0) {
+ else if (ap_cstr_casecmpn(url, "ws:", 3) == 0) {
scheme = "WS";
}
else {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450) "declining URL %s", url);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450)
+ "declining URL %s", url);
return DECLINED;
}
-
- if (ap_cstr_casecmp(upgrade_method, "NONE") != 0) {
- const char *upgrade;
- upgrade = apr_table_get(r->headers_in, "Upgrade");
- if (!upgrade || (ap_cstr_casecmp(upgrade, upgrade_method) != 0 &&
- ap_cstr_casecmp(upgrade_method, "ANY") !=0)) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02900)
- "declining URL %s (not %s, Upgrade: header is %s)",
- url, upgrade_method, upgrade ? upgrade : "missing");
- return DECLINED;
- }
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "serving URL %s", url);
+
+ upgrade = apr_table_get(r->headers_in, "Upgrade");
+ if (!upgrade || !ap_proxy_worker_can_upgrade(p, worker, upgrade,
+ "WebSocket")) {
+ const char *worker_upgrade = *worker->s->upgrade ? worker->s->upgrade
+ : "WebSocket";
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02900)
+ "require upgrade for URL %s "
+ "(Upgrade header is %s, expecting %s)",
+ url, upgrade ? upgrade : "missing", worker_upgrade);
+ apr_table_setn(r->err_headers_out, "Connection", "Upgrade");
+ apr_table_setn(r->err_headers_out, "Upgrade", worker_upgrade);
+ return HTTP_UPGRADE_REQUIRED;
}
uri = apr_palloc(p, sizeof(*uri));
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02451) "serving URL %s", url);
/* create space for state information */
- status = ap_proxy_acquire_connection(scheme, &backend, worker,
- r->server);
+ status = ap_proxy_acquire_connection(scheme, &backend, worker, r->server);
if (status != OK) {
- if (backend) {
- backend->close = 1;
- ap_proxy_release_connection(scheme, backend, r->server);
- }
- return status;
+ goto cleanup;
}
backend->is_ssl = is_ssl;
backend->close = 0;
- retry = 0;
- while (retry < 2) {
- char *locurl = url;
- /* Step One: Determine Who To Connect To */
- status = ap_proxy_determine_connection(p, r, conf, worker, backend,
- uri, &locurl, proxyname, proxyport,
- server_portstr,
- sizeof(server_portstr));
-
- if (status != OK)
- break;
-
- /* Step Two: Make the Connection */
- if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452)
- "failed to make connection to backend: %s",
- backend->hostname);
- status = HTTP_SERVICE_UNAVAILABLE;
- break;
- }
-
- /* Step Three: Create conn_rec */
- if (!backend->connection) {
- status = ap_proxy_connection_create_ex(scheme, backend, r);
- if (status != OK) {
- break;
- }
- }
-
- backend->close = 1; /* must be after ap_proxy_determine_connection */
+ /* Step One: Determine Who To Connect To */
+ status = ap_proxy_determine_connection(p, r, conf, worker, backend,
+ uri, &locurl, proxyname, proxyport,
+ server_portstr,
+ sizeof(server_portstr));
+ if (status != OK) {
+ goto cleanup;
+ }
+ /* Step Two: Make the Connection */
+ if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452)
+ "failed to make connection to backend: %s",
+ backend->hostname);
+ status = HTTP_SERVICE_UNAVAILABLE;
+ goto cleanup;
+ }
- /* Step Three: Process the Request */
- status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl,
- server_portstr);
- break;
+ /* Step Three: Create conn_rec */
+ status = ap_proxy_connection_create_ex(scheme, backend, r);
+ if (status != OK) {
+ goto cleanup;
}
+ /* Step Four: Process the Request */
+ status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl,
+ server_portstr);
+
+cleanup:
/* Do not close the socket */
- ap_proxy_release_connection(scheme, backend, r->server);
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection(scheme, backend, r->server);
+ }
return status;
}
-static void ap_proxy_http_register_hook(apr_pool_t *p)
+static void *create_proxyws_dir_config(apr_pool_t *p, char *dummy)
+{
+ proxyws_dir_conf *new =
+ (proxyws_dir_conf *) apr_pcalloc(p, sizeof(proxyws_dir_conf));
+
+ new->fallback_to_proxy_http = 1;
+
+ return (void *) new;
+}
+
+static void *merge_proxyws_dir_config(apr_pool_t *p, void *vbase, void *vadd)
+{
+ proxyws_dir_conf *new = apr_pcalloc(p, sizeof(proxyws_dir_conf)),
+ *add = vadd, *base = vbase;
+
+ new->fallback_to_proxy_http = (add->fallback_to_proxy_http_set)
+ ? add->fallback_to_proxy_http
+ : base->fallback_to_proxy_http;
+ new->fallback_to_proxy_http_set = (add->fallback_to_proxy_http_set
+ || base->fallback_to_proxy_http_set);
+
+ return new;
+}
+
+static const char * proxyws_fallback_to_proxy_http(cmd_parms *cmd, void *conf, int arg)
+{
+ proxyws_dir_conf *dconf = conf;
+ dconf->fallback_to_proxy_http = !!arg;
+ dconf->fallback_to_proxy_http_set = 1;
+ return NULL;
+}
+
+static int proxy_wstunnel_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ can_fallback_to_proxy_http =
+ (ap_find_linked_module("mod_proxy_http.c") != NULL);
+
+ return OK;
+}
+
+static const command_rec ws_proxy_cmds[] =
+{
+ AP_INIT_FLAG("ProxyWebsocketFallbackToProxyHttp",
+ proxyws_fallback_to_proxy_http, NULL, RSRC_CONF|ACCESS_CONF,
+ "whether to let mod_proxy_http handle the upgrade and tunneling, "
+ "On by default"),
+
+ {NULL}
+};
+
+static void ws_proxy_hooks(apr_pool_t *p)
{
- proxy_hook_scheme_handler(proxy_wstunnel_handler, NULL, NULL, APR_HOOK_FIRST);
- proxy_hook_canon_handler(proxy_wstunnel_canon, NULL, NULL, APR_HOOK_FIRST);
+ static const char * const aszSucc[] = { "mod_proxy_http.c", NULL};
+ ap_hook_post_config(proxy_wstunnel_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ proxy_hook_scheme_handler(proxy_wstunnel_handler, NULL, aszSucc, APR_HOOK_FIRST);
+ proxy_hook_check_trans(proxy_wstunnel_check_trans, NULL, aszSucc, APR_HOOK_MIDDLE);
+ proxy_hook_canon_handler(proxy_wstunnel_canon, NULL, aszSucc, APR_HOOK_FIRST);
}
AP_DECLARE_MODULE(proxy_wstunnel) = {
STANDARD20_MODULE_STUFF,
- NULL, /* create per-directory config structure */
- NULL, /* merge per-directory config structures */
+ create_proxyws_dir_config, /* create per-directory config structure */
+ merge_proxyws_dir_config, /* merge per-directory config structures */
NULL, /* create per-server config structure */
NULL, /* merge per-server config structures */
- NULL, /* command apr_table_t */
- ap_proxy_http_register_hook /* register hooks */
+ ws_proxy_cmds, /* command apr_table_t */
+ ws_proxy_hooks /* register hooks */
};
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index cbf8826..a54a4fa 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -19,22 +19,22 @@
#include "ap_mpm.h"
#include "scoreboard.h"
#include "apr_version.h"
+#include "apr_strings.h"
#include "apr_hash.h"
+#include "apr_atomic.h"
+#include "http_core.h"
#include "proxy_util.h"
#include "ajp.h"
#include "scgi.h"
+#include "mpm_common.h" /* for ap_max_mem_free */
+
#include "mod_http2.h" /* for http2_get_num_workers() */
#if APR_HAVE_UNISTD_H
#include <unistd.h> /* for getpid() */
#endif
-#if (APR_MAJOR_VERSION < 1)
-#undef apr_socket_create
-#define apr_socket_create apr_socket_create_ex
-#endif
-
#if APR_HAVE_SYS_UN_H
#include <sys/un.h>
#endif
@@ -47,7 +47,7 @@ APLOG_USE_MODULE(proxy);
/*
* Opaque structure containing target server info when
* using a forward proxy.
- * Up to now only used in combination with HTTP CONNECT.
+ * Up to now only used in combination with HTTP CONNECT to ProxyRemote
*/
typedef struct {
int use_http_connect; /* Use SSL Tunneling via HTTP CONNECT */
@@ -56,6 +56,17 @@ typedef struct {
const char *proxy_auth; /* Proxy authorization */
} forward_info;
+/*
+ * Opaque structure containing a refcounted and TTL'ed address.
+ */
+typedef struct proxy_address {
+ apr_sockaddr_t *addr; /* Remote address info */
+ const char *hostname; /* Remote host name */
+ apr_port_t hostport; /* Remote host port */
+ apr_uint32_t refcount; /* Number of conns and/or worker using it */
+ apr_uint32_t expiry; /* Expiry timestamp (seconds to proxy_start_time) */
+} proxy_address;
+
/* Global balancer counter */
int PROXY_DECLARE_DATA proxy_lb_workers = 0;
static int lb_workers_limit = 0;
@@ -64,6 +75,8 @@ const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_domain;
extern apr_global_mutex_t *proxy_mutex;
+static const apr_time_t *proxy_start_time; /* epoch for expiring addresses */
+
static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r);
static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r);
static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r);
@@ -204,14 +217,16 @@ PROXY_DECLARE(void) ap_proxy_c2hex(int ch, char *x)
* and encodes those which must be encoded, and does not touch
* those which must not be touched.
*/
-PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len,
- enum enctype t, int forcedec,
- int proxyreq)
+PROXY_DECLARE(char *)ap_proxy_canonenc_ex(apr_pool_t *p, const char *x, int len,
+ enum enctype t, int flags,
+ int proxyreq)
{
int i, j, ch;
char *y;
char *allowed; /* characters which should not be encoded */
char *reserved; /* characters which much not be en/de-coded */
+ int forcedec = flags & PROXY_CANONENC_FORCEDEC;
+ int noencslashesenc = flags & PROXY_CANONENC_NOENCODEDSLASHENCODING;
/*
* N.B. in addition to :@&=, this allows ';' in an http path
@@ -260,17 +275,29 @@ PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len,
* decode it if not already done. do not decode reverse proxied URLs
* unless specifically forced
*/
- if ((forcedec || (proxyreq && proxyreq != PROXYREQ_REVERSE)) && ch == '%') {
+ if ((forcedec || noencslashesenc
+ || (proxyreq && proxyreq != PROXYREQ_REVERSE)) && ch == '%') {
if (!apr_isxdigit(x[i + 1]) || !apr_isxdigit(x[i + 2])) {
return NULL;
}
ch = ap_proxy_hex2c(&x[i + 1]);
- i += 2;
if (ch != 0 && strchr(reserved, ch)) { /* keep it encoded */
- ap_proxy_c2hex(ch, &y[j]);
- j += 2;
+ y[j++] = x[i++];
+ y[j++] = x[i++];
+ y[j] = x[i];
continue;
}
+ if (noencslashesenc && !forcedec && (proxyreq == PROXYREQ_REVERSE)) {
+ /*
+ * In the reverse proxy case when we only want to keep encoded
+ * slashes untouched revert back to '%' which will cause
+ * '%' to be encoded in the following.
+ */
+ ch = '%';
+ }
+ else {
+ i += 2;
+ }
}
/* recode it, if necessary */
if (!apr_isalnum(ch) && !strchr(allowed, ch)) {
@@ -286,6 +313,22 @@ PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len,
}
/*
+ * Convert a URL-encoded string to canonical form.
+ * It decodes characters which need not be encoded,
+ * and encodes those which must be encoded, and does not touch
+ * those which must not be touched.
+ */
+PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len,
+ enum enctype t, int forcedec,
+ int proxyreq)
+{
+ int flags;
+
+ flags = forcedec ? PROXY_CANONENC_FORCEDEC : 0;
+ return ap_proxy_canonenc_ex(p, x, len, t, flags, proxyreq);
+}
+
+/*
* Parses network-location.
* urlp on input the URL; on output the path, after the leading /
* user NULL if no user/password permitted
@@ -366,14 +409,15 @@ PROXY_DECLARE(char *)
return NULL;
}
-PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
+static int proxyerror_core(request_rec *r, int statuscode, const char *message,
+ apr_status_t rv)
{
- const char *uri = ap_escape_html(r->pool, r->uri);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00898)
+ "%s returned by %s", message, r->uri);
+
apr_table_setn(r->notes, "error-notes",
apr_pstrcat(r->pool,
- "The proxy server could not handle the request <em><a href=\"",
- uri, "\">", ap_escape_html(r->pool, r->method), "&nbsp;", uri,
- "</a></em>.<p>\n"
+ "The proxy server could not handle the request<p>"
"Reason: <strong>", ap_escape_html(r->pool, message),
"</strong></p>",
NULL));
@@ -382,11 +426,14 @@ PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *mes
apr_table_setn(r->notes, "verbose-error-to", "*");
r->status_line = apr_psprintf(r->pool, "%3.3u Proxy Error", statuscode);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00898) "%s returned by %s", message,
- r->uri);
return statuscode;
}
+PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
+{
+ return proxyerror_core(r, statuscode, message, 0);
+}
+
static const char *
proxy_get_host_of_request(request_rec *r)
{
@@ -890,20 +937,20 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
* translate url http://example.com/foo/bar/that to /bash/that
*/
for (n = 0; n < balancer->workers->nelts; n++) {
- l2 = strlen((*worker)->s->name);
+ l2 = strlen((*worker)->s->name_ex);
if (urlpart) {
/* urlpart (l3) assuredly starts with its own '/' */
- if ((*worker)->s->name[l2 - 1] == '/')
+ if ((*worker)->s->name_ex[l2 - 1] == '/')
--l2;
if (l1 >= l2 + l3
- && strncasecmp((*worker)->s->name, url, l2) == 0
+ && strncasecmp((*worker)->s->name_ex, url, l2) == 0
&& strncmp(urlpart, url + l2, l3) == 0) {
u = apr_pstrcat(r->pool, ent[i].fake, &url[l2 + l3],
NULL);
return ap_is_url(u) ? u : ap_construct_url(r->pool, u, r);
}
}
- else if (l1 >= l2 && strncasecmp((*worker)->s->name, url, l2) == 0) {
+ else if (l1 >= l2 && strncasecmp((*worker)->s->name_ex, url, l2) == 0) {
/* edge case where fake is just "/"... avoid double slash */
if ((ent[i].fake[0] == '/') && (ent[i].fake[1] == 0) && (url[l2] == '/')) {
u = apr_pstrdup(r->pool, &url[l2]);
@@ -1080,7 +1127,7 @@ PROXY_DECLARE(int) ap_proxy_valid_balancer_name(char *name, int i)
{
if (!i)
i = sizeof(BALANCER_PREFIX)-1;
- return (!strncasecmp(name, BALANCER_PREFIX, i));
+ return (!ap_cstr_casecmpn(name, BALANCER_PREFIX, i));
}
@@ -1172,11 +1219,13 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
* exist, that's OK at this time. We check when we share and sync
*/
lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
-
+ (*balancer)->lbmethod = lbmethod;
+
(*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
+#if APR_HAS_THREADS
(*balancer)->gmutex = NULL;
(*balancer)->tmutex = NULL;
- (*balancer)->lbmethod = lbmethod;
+#endif
if (do_malloc)
bshared = ap_malloc(sizeof(proxy_balancer_shared));
@@ -1188,8 +1237,11 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
bshared->was_malloced = (do_malloc != 0);
PROXY_STRNCPY(bshared->lbpname, "byrequests");
if (PROXY_STRNCPY(bshared->name, uri) != APR_SUCCESS) {
+ if (do_malloc) free(bshared);
return apr_psprintf(p, "balancer name (%s) too long", uri);
}
+ (*balancer)->lbmethod_set = 1;
+
/*
* We do the below for verification. The real sname will be
* done post_config
@@ -1198,6 +1250,7 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
&sname);
sname = apr_pstrcat(p, conf->id, "_", sname, NULL);
if (PROXY_STRNCPY(bshared->sname, sname) != APR_SUCCESS) {
+ if (do_malloc) free(bshared);
return apr_psprintf(p, "balancer safe-name (%s) too long", sname);
}
bshared->hash.def = ap_proxy_hashfunc(bshared->name, PROXY_HASHFUNC_DEFAULT);
@@ -1244,6 +1297,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
lbmethod = ap_lookup_provider(PROXY_LBMETHOD, balancer->s->lbpname, "0");
if (lbmethod) {
balancer->lbmethod = lbmethod;
+ balancer->lbmethod_set = 1;
} else {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, ap_server_conf, APLOGNO(02432)
"Cannot find LB Method: %s", balancer->s->lbpname);
@@ -1252,10 +1306,11 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
if (*balancer->s->nonce == PROXY_UNSET_NONCE) {
char nonce[APR_UUID_FORMATTED_LENGTH + 1];
apr_uuid_t uuid;
- /* Retrieve a UUID and store the nonce for the lifetime of
- * the process.
- */
- apr_uuid_get(&uuid);
+
+ /* Generate a pseudo-UUID from the PRNG to use as a nonce for
+ * the lifetime of the process. uuid.data is a char array so
+ * this is an adequate substitute for apr_uuid_get(). */
+ ap_random_insecure_bytes(uuid.data, sizeof uuid.data);
apr_uuid_format(nonce, &uuid);
rv = PROXY_STRNCPY(balancer->s->nonce, nonce);
}
@@ -1264,7 +1319,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p)
{
+#if APR_HAS_THREADS
apr_status_t rv = APR_SUCCESS;
+#endif
ap_slotmem_provider_t *storage = balancer->storage;
apr_size_t size;
unsigned int num;
@@ -1304,6 +1361,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
if (balancer->lbmethod && balancer->lbmethod->reset)
balancer->lbmethod->reset(balancer, s);
+#if APR_HAS_THREADS
if (balancer->tmutex == NULL) {
rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
@@ -1312,6 +1370,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
return rv;
}
}
+#endif
return APR_SUCCESS;
}
@@ -1335,6 +1394,7 @@ static proxy_worker *proxy_balancer_get_best_worker(proxy_balancer *balancer,
balancer->lbmethod->name, balancer->s->name);
apr_pool_create(&tpool, r->pool);
+ apr_pool_tag(tpool, "proxy_lb_best");
spares = apr_array_make(tpool, 1, sizeof(proxy_worker*));
standbys = apr_array_make(tpool, 1, sizeof(proxy_worker*));
@@ -1424,7 +1484,8 @@ static proxy_worker *proxy_balancer_get_best_worker(proxy_balancer *balancer,
if (best_worker) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(10123)
"proxy: %s selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
- balancer->lbmethod->name, best_worker->s->name, best_worker->s->busy, best_worker->s->lbstatus);
+ balancer->lbmethod->name, best_worker->s->name_ex,
+ best_worker->s->busy, best_worker->s->lbstatus);
}
return best_worker;
@@ -1451,61 +1512,136 @@ static void socket_cleanup(proxy_conn_rec *conn)
apr_pool_clear(conn->scpool);
}
-static apr_status_t conn_pool_cleanup(void *theworker)
+static void address_cleanup(proxy_conn_rec *conn)
{
- proxy_worker *worker = (proxy_worker *)theworker;
- if (worker->cp->res) {
- worker->cp->pool = NULL;
+ conn->address = NULL;
+ conn->addr = NULL;
+ conn->hostname = NULL;
+ conn->port = 0;
+ conn->uds_path = NULL;
+ if (conn->uds_pool) {
+ apr_pool_clear(conn->uds_pool);
+ }
+ if (conn->sock) {
+ socket_cleanup(conn);
}
+}
+
+static apr_status_t conn_pool_cleanup(void *theworker)
+{
+ ((proxy_worker *)theworker)->cp = NULL;
return APR_SUCCESS;
}
-static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
+static apr_pool_t *make_conn_subpool(apr_pool_t *p, const char *tag,
+ server_rec *s)
+{
+ apr_pool_t *sp = NULL;
+ apr_allocator_t *alloc;
+ apr_thread_mutex_t *mutex;
+ apr_status_t rv;
+
+ rv = apr_allocator_create(&alloc);
+ if (rv == APR_SUCCESS) {
+ rv = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv == APR_SUCCESS) {
+ apr_allocator_mutex_set(alloc, mutex);
+ apr_allocator_max_free_set(alloc, ap_max_mem_free);
+ rv = apr_pool_create_ex(&sp, p, NULL, alloc);
+ }
+ else {
+ apr_allocator_destroy(alloc);
+ }
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10474)
+ "failed to create %s pool", tag);
+ ap_abort_on_oom();
+ return NULL; /* not reached */
+ }
+ apr_allocator_owner_set(alloc, sp);
+ apr_pool_tag(sp, tag);
+
+ return sp;
+}
+
+static void init_conn_pool(apr_pool_t *p, proxy_worker *worker, server_rec *s)
{
- apr_pool_t *pool;
proxy_conn_pool *cp;
/*
- * Create a connection pool's subpool.
- * This pool is used for connection recycling.
- * Once the worker is added it is never removed but
- * it can be disabled.
- */
- apr_pool_create(&pool, p);
- apr_pool_tag(pool, "proxy_worker_cp");
- /*
* Alloc from the same pool as worker.
* proxy_conn_pool is permanently attached to the worker.
*/
cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
- cp->pool = pool;
worker->cp = cp;
+
+ /*
+ * We need a first pool (cp->pool) to maintain the connections attached to
+ * the worker and a second one (cp->dns_pool) to maintain the DNS addresses
+ * in use (TTL'ed, refcounted). New connections are created as/on a subpool
+ * of cp->pool and new addresses as/on a subpool of cp->dns_pool, such that
+ * both leaks (the subpools can be destroyed when the connections and/or
+ * addresses are over) and race conditions (the creation/destruction of
+ * subpools is protected by the parent pool's mutex) can be avoided.
+ *
+ * cp->dns_pool is created before cp->pool because when a connection on the
+ * latter is destroyed it might destroy an address on the former, so when
+ * the base pools are destroyed (e.g. child exit) we thusly make sure that
+ * cp->dns_pool and its subpools are still alive when cp->pool gets killed.
+ *
+ * Both cp->dns_pool and cp->pool have their own allocator/mutex too since
+ * acquiring connections and addresses don't need to contend.
+ */
+ cp->dns_pool = make_conn_subpool(p, "proxy_worker_dns", s);
+ cp->pool = make_conn_subpool(p, "proxy_worker_cp", s);
+
+ /* When p is cleaning up the child is exiting, signal that to e.g. avoid
+ * destroying the subpools explicitely in connection_destructor() when
+ * they have been destroyed already by the reslist cleanup.
+ */
+ apr_pool_pre_cleanup_register(p, worker, conn_pool_cleanup);
}
PROXY_DECLARE(int) ap_proxy_connection_reusable(proxy_conn_rec *conn)
{
proxy_worker *worker = conn->worker;
- return ! (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse);
+ return !(conn->close
+ || conn->forward
+ || worker->s->disablereuse
+ || !worker->s->is_address_reusable);
}
-static apr_status_t connection_cleanup(void *theconn)
+static proxy_conn_rec *connection_make(apr_pool_t *p, proxy_worker *worker)
{
- proxy_conn_rec *conn = (proxy_conn_rec *)theconn;
- proxy_worker *worker = conn->worker;
+ proxy_conn_rec *conn;
+
+ conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
+ conn->pool = p;
+ conn->worker = worker;
/*
- * If the connection pool is NULL the worker
- * cleanup has been run. Just return.
+ * Create another subpool that manages the data for the
+ * socket and the connection member of the proxy_conn_rec struct as we
+ * destroy this data more frequently than other data in the proxy_conn_rec
+ * struct like hostname and addr (at least in the case where we have
+ * keepalive connections that timed out).
+ *
+ * XXX: this is really needed only when worker->s->is_address_reusable,
+ * otherwise conn->scpool = conn->pool would be fine. For now we
+ * can't change it since it's (kind of) part of the API.
*/
- if (!worker->cp->pool) {
- return APR_SUCCESS;
- }
+ apr_pool_create(&conn->scpool, p);
+ apr_pool_tag(conn->scpool, "proxy_conn_scpool");
- if (conn->r) {
- apr_pool_destroy(conn->r->pool);
- conn->r = NULL;
- }
+ return conn;
+}
+
+static void connection_cleanup(void *theconn)
+{
+ proxy_conn_rec *conn = (proxy_conn_rec *)theconn;
+ proxy_worker *worker = conn->worker;
/* Sanity check: Did we already return the pooled connection? */
if (conn->inreslist) {
@@ -1513,37 +1649,43 @@ static apr_status_t connection_cleanup(void *theconn)
"Pooled connection 0x%pp for worker %s has been"
" already returned to the connection pool.", conn,
ap_proxy_worker_name(conn->pool, worker));
- return APR_SUCCESS;
+ return;
+ }
+
+ if (conn->r) {
+ apr_pool_destroy(conn->r->pool);
+ conn->r = NULL;
}
- /* determine if the connection need to be closed */
- if (!worker->s->is_address_reusable || worker->s->disablereuse) {
+ /* determine if the connection should be cleared, closed or reused */
+ if (!worker->s->is_address_reusable) {
apr_pool_t *p = conn->pool;
apr_pool_clear(p);
- conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
- conn->pool = p;
- conn->worker = worker;
- apr_pool_create(&(conn->scpool), p);
- apr_pool_tag(conn->scpool, "proxy_conn_scpool");
+ conn = connection_make(p, worker);
}
else if (conn->close
- || (conn->connection
- && conn->connection->keepalive == AP_CONN_CLOSE)) {
+ || conn->forward
+ || (conn->connection
+ && conn->connection->keepalive == AP_CONN_CLOSE)
+ || worker->s->disablereuse) {
socket_cleanup(conn);
conn->close = 0;
}
+ else if (conn->is_ssl) {
+ /* Unbind/reset the SSL connection dir config (sslconn->dc) from
+ * r->per_dir_config, r will likely get destroyed before this proxy
+ * conn is reused.
+ */
+ ap_proxy_ssl_engine(conn->connection, worker->section_config, 1);
+ }
if (worker->s->hmax && worker->cp->res) {
conn->inreslist = 1;
apr_reslist_release(worker->cp->res, (void *)conn);
}
- else
- {
+ else {
worker->cp->conn = conn;
}
-
- /* Always return the SUCCESS */
- return APR_SUCCESS;
}
/* DEPRECATED */
@@ -1584,35 +1726,21 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ssl_connection_cleanup(proxy_conn_rec *conn
static apr_status_t connection_constructor(void **resource, void *params,
apr_pool_t *pool)
{
- apr_pool_t *ctx;
- apr_pool_t *scpool;
+ apr_pool_t *p;
proxy_conn_rec *conn;
proxy_worker *worker = (proxy_worker *)params;
/*
- * Create the subpool for each connection
+ * Create a subpool for each connection
* This keeps the memory consumption constant
- * when disconnecting from backend.
- */
- apr_pool_create(&ctx, pool);
- apr_pool_tag(ctx, "proxy_conn_pool");
- /*
- * Create another subpool that manages the data for the
- * socket and the connection member of the proxy_conn_rec struct as we
- * destroy this data more frequently than other data in the proxy_conn_rec
- * struct like hostname and addr (at least in the case where we have
- * keepalive connections that timed out).
+ * when it's recycled or destroyed.
*/
- apr_pool_create(&scpool, ctx);
- apr_pool_tag(scpool, "proxy_conn_scpool");
- conn = apr_pcalloc(ctx, sizeof(proxy_conn_rec));
-
- conn->pool = ctx;
- conn->scpool = scpool;
- conn->worker = worker;
+ apr_pool_create(&p, pool);
+ apr_pool_tag(p, "proxy_conn_pool");
+ conn = connection_make(p, worker);
conn->inreslist = 1;
- *resource = conn;
+ *resource = conn;
return APR_SUCCESS;
}
@@ -1623,7 +1751,7 @@ static apr_status_t connection_destructor(void *resource, void *params,
proxy_worker *worker = params;
/* Destroy the pool only if not called from reslist_destroy */
- if (worker->cp->pool) {
+ if (worker->cp) {
proxy_conn_rec *conn = resource;
apr_pool_destroy(conn->pool);
}
@@ -1640,15 +1768,73 @@ PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p,
{
if (!(*worker->s->uds_path) || !p) {
/* just in case */
- return worker->s->name;
+ return worker->s->name_ex;
}
- return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name, NULL);
+ return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name_ex, NULL);
}
-PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
- proxy_balancer *balancer,
- proxy_server_conf *conf,
- const char *url)
+PROXY_DECLARE(int) ap_proxy_worker_can_upgrade(apr_pool_t *p,
+ const proxy_worker *worker,
+ const char *upgrade,
+ const char *dflt)
+{
+ /* Find in worker->s->upgrade list (if any) */
+ const char *worker_upgrade = worker->s->upgrade;
+ if (*worker_upgrade) {
+ return (strcmp(worker_upgrade, "*") == 0
+ || ap_cstr_casecmp(worker_upgrade, upgrade) == 0
+ || ap_find_token(p, worker_upgrade, upgrade));
+ }
+
+ /* Compare to the provided default (if any) */
+ return (dflt && ap_cstr_casecmp(dflt, upgrade) == 0);
+}
+
+/*
+ * Taken from ap_strcmp_match() :
+ * Match = 0, NoMatch = 1, Abort = -1, Inval = -2
+ * Based loosely on sections of wildmat.c by Rich Salz
+ * Hmmm... shouldn't this really go component by component?
+ *
+ * Adds handling of the "\<any>" => "<any>" unescaping.
+ */
+static int ap_proxy_strcmp_ematch(const char *str, const char *expected)
+{
+ apr_size_t x, y;
+
+ for (x = 0, y = 0; expected[y]; ++y, ++x) {
+ if (expected[y] == '$' && apr_isdigit(expected[y + 1])) {
+ do {
+ y += 2;
+ } while (expected[y] == '$' && apr_isdigit(expected[y + 1]));
+ if (!expected[y])
+ return 0;
+ while (str[x]) {
+ int ret;
+ if ((ret = ap_proxy_strcmp_ematch(&str[x++], &expected[y])) != 1)
+ return ret;
+ }
+ return -1;
+ }
+ else if (!str[x]) {
+ return -1;
+ }
+ else if (expected[y] == '\\' && !expected[++y]) {
+ /* NUL is an invalid char! */
+ return -2;
+ }
+ if (str[x] != expected[y])
+ return 1;
+ }
+ /* We got all the way through the worker path without a difference */
+ return 0;
+}
+
+PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker_ex(apr_pool_t *p,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url,
+ unsigned int mask)
{
proxy_worker *worker;
proxy_worker *max_worker = NULL;
@@ -1664,7 +1850,12 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
return NULL;
}
- url = ap_proxy_de_socketfy(p, url);
+ if (!(mask & AP_PROXY_WORKER_NO_UDS)) {
+ url = ap_proxy_de_socketfy(p, url);
+ if (!url) {
+ return NULL;
+ }
+ }
c = ap_strchr_c(url, ':');
if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
@@ -1674,6 +1865,11 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
url_length = strlen(url);
url_copy = apr_pstrmemdup(p, url, url_length);
+ /* Default to lookup for both _PREFIX and _MATCH workers */
+ if (!(mask & (AP_PROXY_WORKER_IS_PREFIX | AP_PROXY_WORKER_IS_MATCH))) {
+ mask |= AP_PROXY_WORKER_IS_PREFIX | AP_PROXY_WORKER_IS_MATCH;
+ }
+
/*
* We need to find the start of the path and
* therefore we know the length of the scheme://hostname/
@@ -1704,22 +1900,35 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
proxy_worker **workers = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++, workers++) {
worker = *workers;
- if ( ((worker_name_length = strlen(worker->s->name)) <= url_length)
+ if ( ((worker_name_length = strlen(worker->s->name_ex)) <= url_length)
&& (worker_name_length >= min_match)
&& (worker_name_length > max_match)
- && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) {
+ && (worker->s->is_name_matchable
+ || ((mask & AP_PROXY_WORKER_IS_PREFIX)
+ && strncmp(url_copy, worker->s->name_ex,
+ worker_name_length) == 0))
+ && (!worker->s->is_name_matchable
+ || ((mask & AP_PROXY_WORKER_IS_MATCH)
+ && ap_proxy_strcmp_ematch(url_copy,
+ worker->s->name_ex) == 0)) ) {
max_worker = worker;
max_match = worker_name_length;
}
-
}
} else {
worker = (proxy_worker *)conf->workers->elts;
for (i = 0; i < conf->workers->nelts; i++, worker++) {
- if ( ((worker_name_length = strlen(worker->s->name)) <= url_length)
+ if ( ((worker_name_length = strlen(worker->s->name_ex)) <= url_length)
&& (worker_name_length >= min_match)
&& (worker_name_length > max_match)
- && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) {
+ && (worker->s->is_name_matchable
+ || ((mask & AP_PROXY_WORKER_IS_PREFIX)
+ && strncmp(url_copy, worker->s->name_ex,
+ worker_name_length) == 0))
+ && (!worker->s->is_name_matchable
+ || ((mask & AP_PROXY_WORKER_IS_MATCH)
+ && ap_proxy_strcmp_ematch(url_copy,
+ worker->s->name_ex) == 0)) ) {
max_worker = worker;
max_match = worker_name_length;
}
@@ -1729,6 +1938,14 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
return max_worker;
}
+PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url)
+{
+ return ap_proxy_get_worker_ex(p, balancer, conf, url, 0);
+}
+
/*
* To create a worker from scratch first we define the
* specifics of the worker; this is all local data.
@@ -1736,46 +1953,98 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
* shared. This allows for dynamic addition during
* config and runtime.
*/
-PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
+PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p,
proxy_worker **worker,
proxy_balancer *balancer,
proxy_server_conf *conf,
const char *url,
- int do_malloc)
+ unsigned int mask)
{
- int rv;
- apr_uri_t uri, urisock;
+ apr_status_t rv;
proxy_worker_shared *wshared;
- char *ptr, *sockpath = NULL;
+ const char *ptr = NULL, *sockpath = NULL, *pdollars = NULL;
+ apr_port_t port_of_scheme;
+ int address_not_reusable = 0;
+ apr_uri_t uri;
/*
* Look to see if we are using UDS:
* require format: unix:/path/foo/bar.sock|http://ignored/path2/
* This results in talking http to the socket at /path/foo/bar.sock
*/
- ptr = ap_strchr((char *)url, '|');
- if (ptr) {
- *ptr = '\0';
- rv = apr_uri_parse(p, url, &urisock);
- if (rv == APR_SUCCESS && !strcasecmp(urisock.scheme, "unix")) {
- sockpath = ap_runtime_dir_relative(p, urisock.path);;
- url = ptr+1; /* so we get the scheme for the uds */
+ if (!ap_cstr_casecmpn(url, "unix:", 5)
+ && (ptr = ap_strchr_c(url + 5, '|'))) {
+ rv = apr_uri_parse(p, apr_pstrmemdup(p, url, ptr - url), &uri);
+ if (rv == APR_SUCCESS) {
+ sockpath = ap_runtime_dir_relative(p, uri.path);;
+ ptr++; /* so we get the scheme for the uds */
}
else {
- *ptr = '|';
+ ptr = url;
}
}
- rv = apr_uri_parse(p, url, &uri);
+ else {
+ ptr = url;
+ }
+
+ if (mask & AP_PROXY_WORKER_IS_MATCH) {
+ /* apr_uri_parse() will accept the '$' sign anywhere in the URL but
+ * in the :port part, and we don't want scheme://host:port$1$2/path
+ * to fail (e.g. "ProxyPassMatch ^/(a|b)(/.*)? http://host:port$2").
+ * So we trim all the $n from the :port and prepend them in uri.path
+ * afterward for apr_uri_unparse() to restore the original URL below.
+ * If a dollar substitution is found in the hostname[:port] part of
+ * the URL, reusing address and connections in the same worker is not
+ * possible (the current implementation of active connections cache
+ * handles/assumes a single origin server:port per worker only), so
+ * we set address_not_reusable here during parsing to take that into
+ * account in the worker settings below.
+ */
+#define IS_REF(x) (x[0] == '$' && apr_isdigit(x[1]))
+ const char *pos = ap_strstr_c(ptr, "://");
+ if (pos) {
+ pos += 3;
+ while (*pos && *pos != ':' && *pos != '/') {
+ if (*pos == '$') {
+ address_not_reusable = 1;
+ }
+ pos++;
+ }
+ if (*pos == ':') {
+ pos++;
+ while (*pos && !IS_REF(pos) && *pos != '/') {
+ pos++;
+ }
+ if (IS_REF(pos)) {
+ struct iovec vec[2];
+ const char *path = pos + 2;
+ while (*path && *path != '/') {
+ path++;
+ }
+ pdollars = apr_pstrmemdup(p, pos, path - pos);
+ vec[0].iov_base = (void *)ptr;
+ vec[0].iov_len = pos - ptr;
+ vec[1].iov_base = (void *)path;
+ vec[1].iov_len = strlen(path);
+ ptr = apr_pstrcatv(p, vec, 2, NULL);
+ address_not_reusable = 1;
+ }
+ }
+ }
+#undef IS_REF
+ }
+ /* Normalize the url (worker name) */
+ rv = apr_uri_parse(p, ptr, &uri);
if (rv != APR_SUCCESS) {
return apr_pstrcat(p, "Unable to parse URL: ", url, NULL);
}
if (!uri.scheme) {
return apr_pstrcat(p, "URL must be absolute!: ", url, NULL);
}
- /* allow for unix:/path|http: */
if (!uri.hostname) {
if (sockpath) {
+ /* allow for unix:/path|http: */
uri.hostname = "localhost";
}
else {
@@ -1786,6 +2055,16 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
ap_str_tolower(uri.hostname);
}
ap_str_tolower(uri.scheme);
+ port_of_scheme = ap_proxy_port_of_scheme(uri.scheme);
+ if (uri.port && uri.port == port_of_scheme) {
+ uri.port = 0;
+ }
+ if (pdollars) {
+ /* Restore/prepend pdollars into the path. */
+ uri.path = apr_pstrcat(p, pdollars, uri.path, NULL);
+ }
+ ptr = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD);
+
/*
* Workers can be associated w/ balancers or on their
* own; ie: the generic reverse-proxy or a worker
@@ -1809,26 +2088,25 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
/* we need to allocate space here */
*worker = apr_palloc(p, sizeof(proxy_worker));
}
-
memset(*worker, 0, sizeof(proxy_worker));
+
/* right here we just want to tuck away the worker info.
* if called during config, we don't have shm setup yet,
* so just note the info for later. */
- if (do_malloc)
+ if (mask & AP_PROXY_WORKER_IS_MALLOCED)
wshared = ap_malloc(sizeof(proxy_worker_shared)); /* will be freed ap_proxy_share_worker */
else
wshared = apr_palloc(p, sizeof(proxy_worker_shared));
-
memset(wshared, 0, sizeof(proxy_worker_shared));
- wshared->port = (uri.port ? uri.port : ap_proxy_port_of_scheme(uri.scheme));
- if (uri.port && uri.port == ap_proxy_port_of_scheme(uri.scheme)) {
- uri.port = 0;
+ if (PROXY_STRNCPY(wshared->name_ex, ptr) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(10366)
+ "Alert! worker name (%s) too long; truncated to: %s", ptr, wshared->name_ex);
}
- ptr = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD);
if (PROXY_STRNCPY(wshared->name, ptr) != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(02808)
- "Alert! worker name (%s) too long; truncated to: %s", ptr, wshared->name);
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(010118)
+ "worker name (%s) too long; truncated for legacy modules that do not use "
+ "proxy_worker_shared->name_ex: %s", ptr, wshared->name);
}
if (PROXY_STRNCPY(wshared->scheme, uri.scheme) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(010117)
@@ -1842,17 +2120,44 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
"worker hostname (%s) too long; truncated for legacy modules that do not use "
"proxy_worker_shared->hostname_ex: %s", uri.hostname, wshared->hostname);
}
+ wshared->port = (uri.port) ? uri.port : port_of_scheme;
wshared->flush_packets = flush_off;
wshared->flush_wait = PROXY_FLUSH_WAIT;
- wshared->is_address_reusable = 1;
+ wshared->address_ttl = (address_not_reusable) ? 0 : -1;
+ wshared->is_address_reusable = (address_not_reusable == 0);
+ wshared->disablereuse = (address_not_reusable != 0);
wshared->lbfactor = 100;
wshared->passes = 1;
wshared->fails = 1;
wshared->interval = apr_time_from_sec(HCHECK_WATHCHDOG_DEFAULT_INTERVAL);
wshared->smax = -1;
- wshared->hash.def = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_DEFAULT);
- wshared->hash.fnv = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_FNV);
- wshared->was_malloced = (do_malloc != 0);
+ wshared->hash.def = ap_proxy_hashfunc(wshared->name_ex, PROXY_HASHFUNC_DEFAULT);
+ wshared->hash.fnv = ap_proxy_hashfunc(wshared->name_ex, PROXY_HASHFUNC_FNV);
+ wshared->was_malloced = (mask & AP_PROXY_WORKER_IS_MALLOCED) != 0;
+ if (mask & AP_PROXY_WORKER_IS_MATCH) {
+ wshared->is_name_matchable = 1;
+
+ /* Before AP_PROXY_WORKER_IS_MATCH (< 2.4.47), a regex worker with
+ * dollar substitution was never matched against any actual URL, thus
+ * the requests fell through the generic worker. Now if a ProyPassMatch
+ * matches, a worker (and its parameters) is always used to determine
+ * the properties of the connection with the origin server. So for
+ * instance the same "timeout=" will be enforced for all the requests
+ * matched by the same ProyPassMatch worker, which is an improvement
+ * compared to the global/vhost [Proxy]Timeout applied by the generic
+ * worker. Likewise, address and connection reuse is the default for
+ * a ProyPassMatch worker with no dollar substitution, just like a
+ * "normal" worker. However to avoid DNS and connection reuse compat
+ * issues, connection reuse is disabled by default if there is any
+ * substitution in the uri-path (an explicit enablereuse=on can still
+ * opt-in), and reuse is even disabled definitively for substitutions
+ * happening in the hostname[:port] (is_address_reusable was unset
+ * above so it will prevent enablereuse=on to apply anyway).
+ */
+ if (ap_strchr_c(wshared->name, '$')) {
+ wshared->disablereuse = 1;
+ }
+ }
if (sockpath) {
if (PROXY_STRNCPY(wshared->uds_path, sockpath) != APR_SUCCESS) {
return apr_psprintf(p, "worker uds path (%s) too long", sockpath);
@@ -1875,6 +2180,33 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
return NULL;
}
+PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
+ proxy_worker **worker,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url,
+ int do_malloc)
+{
+ return ap_proxy_define_worker_ex(p, worker, balancer, conf, url,
+ AP_PROXY_WORKER_IS_PREFIX |
+ (do_malloc ? AP_PROXY_WORKER_IS_MALLOCED
+ : 0));
+}
+
+/* DEPRECATED */
+PROXY_DECLARE(char *) ap_proxy_define_match_worker(apr_pool_t *p,
+ proxy_worker **worker,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url,
+ int do_malloc)
+{
+ return ap_proxy_define_worker_ex(p, worker, balancer, conf, url,
+ AP_PROXY_WORKER_IS_MATCH |
+ (do_malloc ? AP_PROXY_WORKER_IS_MALLOCED
+ : 0));
+}
+
/*
* Create an already defined worker and free up memory
*/
@@ -1899,6 +2231,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_wo
if (APLOGdebug(ap_server_conf)) {
apr_pool_t *pool;
apr_pool_create(&pool, ap_server_conf->process->pool);
+ apr_pool_tag(pool, "proxy_worker_name");
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02338)
"%s shm[%d] (0x%pp) for worker: %s", action, i, (void *)shm,
ap_proxy_worker_name(pool, worker));
@@ -1929,12 +2262,23 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
if (!worker->s->retry_set) {
worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
}
- /* By default address is reusable unless DisableReuse is set */
- if (worker->s->disablereuse) {
+ /* Consistently set address and connection reusabilty: when reuse
+ * is disabled by configuration, or when the address is known already
+ * to not be reusable for this worker (in any case, thus ignore/force
+ * DisableReuse).
+ */
+ if (!worker->s->address_ttl || (!worker->s->address_ttl_set
+ && worker->s->disablereuse)) {
worker->s->is_address_reusable = 0;
}
- else {
- worker->s->is_address_reusable = 1;
+ if (!worker->s->is_address_reusable && !worker->s->disablereuse) {
+ /* Explicit enablereuse=on can't work in this case, warn user. */
+ if (worker->s->disablereuse_set) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10400)
+ "enablereuse/disablereuse ignored for worker %s",
+ ap_proxy_worker_name(p, worker));
+ }
+ worker->s->disablereuse = 1;
}
/*
@@ -1979,67 +2323,71 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
ap_proxy_worker_name(p, worker));
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
- "initializing worker %s local",
- ap_proxy_worker_name(p, worker));
apr_global_mutex_lock(proxy_mutex);
- /* Now init local worker data */
- if (worker->tmutex == NULL) {
- rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00928)
- "can not create worker thread mutex");
+ /* Check again after we got the lock if we are still uninitialized */
+ if (!(AP_VOLATILIZE_T(unsigned int, worker->local_status) & PROXY_WORKER_INITIALIZED)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
+ "initializing worker %s local",
+ ap_proxy_worker_name(p, worker));
+ /* Now init local worker data */
+#if APR_HAS_THREADS
+ if (worker->tmutex == NULL) {
+ rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00928)
+ "can not create worker thread mutex");
+ apr_global_mutex_unlock(proxy_mutex);
+ return rv;
+ }
+ }
+#endif
+ if (worker->cp == NULL)
+ init_conn_pool(p, worker, s);
+ if (worker->cp == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
+ "can not create connection pool");
apr_global_mutex_unlock(proxy_mutex);
- return rv;
+ return APR_EGENERAL;
}
- }
- if (worker->cp == NULL)
- init_conn_pool(p, worker);
- if (worker->cp == NULL) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
- "can not create connection pool");
- apr_global_mutex_unlock(proxy_mutex);
- return APR_EGENERAL;
- }
- if (worker->s->hmax) {
- rv = apr_reslist_create(&(worker->cp->res),
- worker->s->min, worker->s->smax,
- worker->s->hmax, worker->s->ttl,
- connection_constructor, connection_destructor,
- worker, worker->cp->pool);
-
- apr_pool_cleanup_register(worker->cp->pool, (void *)worker,
- conn_pool_cleanup,
- apr_pool_cleanup_null);
-
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
- "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
- getpid(), worker->s->hostname_ex, worker->s->min,
- worker->s->hmax, worker->s->smax);
+ if (worker->s->hmax) {
+ rv = apr_reslist_create(&(worker->cp->res),
+ worker->s->min, worker->s->smax,
+ worker->s->hmax, worker->s->ttl,
+ connection_constructor, connection_destructor,
+ worker, worker->cp->pool);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
+ "initialized pool in child %" APR_PID_T_FMT " for (%s:%d) min=%d max=%d smax=%d",
+ getpid(), worker->s->hostname_ex, (int)worker->s->port,
+ worker->s->min, worker->s->hmax, worker->s->smax);
+
+ /* Set the acquire timeout */
+ if (rv == APR_SUCCESS && worker->s->acquire_set) {
+ apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
+ }
- /* Set the acquire timeout */
- if (rv == APR_SUCCESS && worker->s->acquire_set) {
- apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
}
+ else {
+ void *conn;
- }
- else {
- void *conn;
-
- rv = connection_constructor(&conn, worker, worker->cp->pool);
- worker->cp->conn = conn;
+ rv = connection_constructor(&conn, worker, worker->cp->pool);
+ worker->cp->conn = conn;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00931)
- "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
- getpid(), worker->s->hostname_ex);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00931)
+ "initialized single connection worker in child %" APR_PID_T_FMT " for (%s:%d)",
+ getpid(), worker->s->hostname_ex,
+ (int)worker->s->port);
+ }
+ if (rv == APR_SUCCESS) {
+ worker->local_status |= (PROXY_WORKER_INITIALIZED);
+ }
}
apr_global_mutex_unlock(proxy_mutex);
}
if (rv == APR_SUCCESS) {
worker->s->status |= (PROXY_WORKER_INITIALIZED);
- worker->local_status |= (PROXY_WORKER_INITIALIZED);
}
return rv;
}
@@ -2050,8 +2398,9 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke
if (worker->s->status & PROXY_WORKER_IN_ERROR) {
if (PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED)) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(3305)
- "%s: Won't retry worker (%s): stopped",
- proxy_function, worker->s->hostname_ex);
+ "%s: Won't retry worker (%s:%d): stopped",
+ proxy_function, worker->s->hostname_ex,
+ (int)worker->s->port);
return DECLINED;
}
if ((worker->s->status & PROXY_WORKER_IGNORE_ERRORS)
@@ -2059,14 +2408,16 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke
++worker->s->retries;
worker->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00932)
- "%s: worker for (%s) has been marked for retry",
- proxy_function, worker->s->hostname_ex);
+ "%s: worker for (%s:%d) has been marked for retry",
+ proxy_function, worker->s->hostname_ex,
+ (int)worker->s->port);
return OK;
}
else {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00933)
- "%s: too soon to retry worker for (%s)",
- proxy_function, worker->s->hostname_ex);
+ "%s: too soon to retry worker for (%s:%d)",
+ proxy_function, worker->s->hostname_ex,
+ (int)worker->s->port);
return DECLINED;
}
}
@@ -2080,33 +2431,43 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke
* were passed a UDS url (eg: from mod_proxy) and adjust uds_path
* as required.
*/
-static void fix_uds_filename(request_rec *r, char **url)
+static int fix_uds_filename(request_rec *r, char **url)
{
- char *ptr, *ptr2;
- if (!r || !r->filename) return;
+ char *uds_url = r->filename + 6, *origin_url;
if (!strncmp(r->filename, "proxy:", 6) &&
- (ptr2 = ap_strcasestr(r->filename, "unix:")) &&
- (ptr = ap_strchr(ptr2, '|'))) {
+ !ap_cstr_casecmpn(uds_url, "unix:", 5) &&
+ (origin_url = ap_strchr(uds_url + 5, '|'))) {
+ char *uds_path = NULL;
+ apr_size_t url_len;
apr_uri_t urisock;
apr_status_t rv;
- *ptr = '\0';
- rv = apr_uri_parse(r->pool, ptr2, &urisock);
- if (rv == APR_SUCCESS) {
- char *rurl = ptr+1;
- char *sockpath = ap_runtime_dir_relative(r->pool, urisock.path);
- apr_table_setn(r->notes, "uds_path", sockpath);
- *url = apr_pstrdup(r->pool, rurl); /* so we get the scheme for the uds */
- /* r->filename starts w/ "proxy:", so add after that */
- memmove(r->filename+6, rurl, strlen(rurl)+1);
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "*: rewrite of url due to UDS(%s): %s (%s)",
- sockpath, *url, r->filename);
+
+ *origin_url = '\0';
+ rv = apr_uri_parse(r->pool, uds_url, &urisock);
+ *origin_url++ = '|';
+
+ if (rv == APR_SUCCESS && urisock.path && (!urisock.hostname
+ || !urisock.hostname[0])) {
+ uds_path = ap_runtime_dir_relative(r->pool, urisock.path);
}
- else {
- *ptr = '|';
+ if (!uds_path) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10292)
+ "Invalid proxy UDS filename (%s)", r->filename);
+ return 0;
}
+ apr_table_setn(r->notes, "uds_path", uds_path);
+
+ /* Remove the UDS path from *url and r->filename */
+ url_len = strlen(origin_url);
+ *url = apr_pstrmemdup(r->pool, origin_url, url_len);
+ memcpy(uds_url, *url, url_len + 1);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "*: rewrite of url due to UDS(%s): %s (%s)",
+ uds_path, *url, r->filename);
}
+ return 1;
}
PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
@@ -2118,20 +2479,22 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
if (access_status == DECLINED && *balancer == NULL) {
- *worker = ap_proxy_get_worker(r->pool, NULL, conf, *url);
+ const int forward = (r->proxyreq == PROXYREQ_PROXY);
+ *worker = ap_proxy_get_worker_ex(r->pool, NULL, conf, *url,
+ forward ? AP_PROXY_WORKER_NO_UDS : 0);
if (*worker) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"%s: found worker %s for %s",
- (*worker)->s->scheme, (*worker)->s->name, *url);
- *balancer = NULL;
- fix_uds_filename(r, url);
+ (*worker)->s->scheme, (*worker)->s->name_ex, *url);
+ if (!forward && !fix_uds_filename(r, url)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
access_status = OK;
}
- else if (r->proxyreq == PROXYREQ_PROXY) {
+ else if (forward) {
if (conf->forward) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"*: found forward proxy worker for %s", *url);
- *balancer = NULL;
*worker = conf->forward;
access_status = OK;
/*
@@ -2145,8 +2508,8 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
else if (r->proxyreq == PROXYREQ_REVERSE) {
if (conf->reverse) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "*: using default reverse proxy worker for %s (no keepalive)", *url);
- *balancer = NULL;
+ "*: using default reverse proxy worker for %s "
+ "(no keepalive)", *url);
*worker = conf->reverse;
access_status = OK;
/*
@@ -2155,7 +2518,9 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
* regarding the Connection header in the request.
*/
apr_table_setn(r->subprocess_env, "proxy-nokeepalive", "1");
- fix_uds_filename(r, url);
+ if (!fix_uds_filename(r, url)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
}
}
}
@@ -2287,8 +2652,9 @@ PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function,
if (!PROXY_WORKER_IS_USABLE(worker)) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00940)
- "%s: disabled connection for (%s)",
- proxy_function, worker->s->hostname_ex);
+ "%s: disabled connection for (%s:%d)",
+ proxy_function, worker->s->hostname_ex,
+ (int)worker->s->port);
return HTTP_SERVICE_UNAVAILABLE;
}
}
@@ -2299,24 +2665,26 @@ PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function,
else {
/* create the new connection if the previous was destroyed */
if (!worker->cp->conn) {
- connection_constructor((void **)conn, worker, worker->cp->pool);
+ rv = connection_constructor((void **)conn, worker, worker->cp->pool);
}
else {
*conn = worker->cp->conn;
worker->cp->conn = NULL;
+ rv = APR_SUCCESS;
}
- rv = APR_SUCCESS;
}
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00941)
- "%s: failed to acquire connection for (%s)",
- proxy_function, worker->s->hostname_ex);
+ "%s: failed to acquire connection for (%s:%d)",
+ proxy_function, worker->s->hostname_ex,
+ (int)worker->s->port);
return HTTP_SERVICE_UNAVAILABLE;
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00942)
- "%s: has acquired connection for (%s)",
- proxy_function, worker->s->hostname_ex);
+ "%s: has acquired connection for (%s:%d)",
+ proxy_function, worker->s->hostname_ex,
+ (int)worker->s->port);
(*conn)->worker = worker;
(*conn)->close = 0;
@@ -2330,13 +2698,362 @@ PROXY_DECLARE(int) ap_proxy_release_connection(const char *proxy_function,
server_rec *s)
{
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00943)
- "%s: has released connection for (%s)",
- proxy_function, conn->worker->s->hostname_ex);
+ "%s: has released connection for (%s:%d)",
+ proxy_function, conn->worker->s->hostname_ex,
+ (int)conn->worker->s->port);
connection_cleanup(conn);
return OK;
}
+static APR_INLINE void proxy_address_inc(proxy_address *address)
+{
+ apr_uint32_t old = apr_atomic_inc32(&address->refcount);
+ ap_assert(old > 0 && old < APR_UINT32_MAX);
+}
+
+static APR_INLINE void proxy_address_dec(proxy_address *address)
+{
+ /* Use _add32(, -1) since _dec32()'s returned value does not help */
+ apr_uint32_t old = apr_atomic_add32(&address->refcount, -1);
+ ap_assert(old > 0);
+ if (old == 1) {
+ apr_pool_destroy(address->addr->pool);
+ }
+}
+
+static apr_status_t proxy_address_cleanup(void *address)
+{
+ proxy_address_dec(address);
+ return APR_SUCCESS;
+}
+
+static APR_INLINE proxy_address *worker_address_get(proxy_worker *worker)
+{
+ /* No _readptr() so let's _casptr(, NULL, NULL) instead */
+ return apr_atomic_casptr((void *)&worker->address, NULL, NULL);
+}
+
+/* XXX: Call when PROXY_THREAD_LOCK()ed only! */
+static APR_INLINE void worker_address_set(proxy_worker *worker,
+ proxy_address *to)
+{
+ proxy_address *old = apr_atomic_xchgptr((void *)&worker->address, to);
+ if (old && old != to) {
+ proxy_address_dec(old);
+ }
+}
+
+static apr_status_t worker_address_resolve(proxy_worker *worker,
+ apr_sockaddr_t **paddr,
+ const char *hostname,
+ apr_port_t hostport,
+ const char *proxy_function,
+ request_rec *r, server_rec *s)
+{
+ apr_status_t rv;
+ apr_pool_t *pool = NULL;
+
+ apr_pool_create(&pool, worker->cp->dns_pool);
+ rv = apr_sockaddr_info_get(paddr, hostname, APR_UNSPEC,
+ hostport, 0, pool);
+ if (rv != APR_SUCCESS) {
+ if (r && !s) {
+ proxyerror_core(r, HTTP_INTERNAL_SERVER_ERROR,
+ apr_pstrcat(pool,
+ "DNS lookup failure for: ",
+ hostname, NULL),
+ rv);
+ }
+ else if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(10477)
+ "%s: resolving worker %s address",
+ proxy_function, hostname);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10478)
+ "%s: resolving worker %s address",
+ proxy_function, hostname);
+ }
+ apr_pool_destroy(pool);
+ return rv;
+ }
+
+ if (r ? APLOGrdebug(r) : APLOGdebug(s)) {
+ char *addrs = NULL;
+ apr_sockaddr_t *addr = *paddr;
+ for (; addr; addr = addr->next) {
+ addrs = apr_psprintf(pool, "%s%s%pI",
+ addrs ? ", " : "",
+ addrs ? addrs : "",
+ addr);
+ }
+ if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10479)
+ "%s: %s resolved to %s",
+ proxy_function, hostname, addrs);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10480)
+ "%s: %s resolved to %s",
+ proxy_function, hostname, addrs);
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static int proxy_addrs_equal(const apr_sockaddr_t *addr1,
+ const apr_sockaddr_t *addr2)
+{
+ const apr_sockaddr_t *base2 = addr2, *pos2;
+ while (addr1 && addr2) {
+ for (pos2 = base2; pos2; pos2 = pos2->next) {
+ if (apr_sockaddr_equal(pos2, addr1)) {
+ break;
+ }
+ }
+ if (!pos2) {
+ return 0;
+ }
+ addr1 = addr1->next;
+ addr2 = addr2->next;
+ }
+ if (addr1 || addr2) {
+ return 0;
+ }
+ return 1;
+}
+
+PROXY_DECLARE(apr_status_t) ap_proxy_determine_address(const char *proxy_function,
+ proxy_conn_rec *conn,
+ const char *hostname,
+ apr_port_t hostport,
+ unsigned int flags,
+ request_rec *r,
+ server_rec *s)
+{
+ proxy_worker *worker = conn->worker;
+ apr_status_t rv;
+
+ /*
+ * Worker can have the single constant backend adress.
+ * The single DNS lookup is used once per worker.
+ * If dynamic change is needed then set the addr to NULL
+ * inside dynamic config to force the lookup.
+ * The worker's addressTTL parameter may also be configured
+ * to perform the DNS lookups only when the TTL expires,
+ * or each time if that TTL is zero.
+ */
+ if (!worker->s->is_address_reusable) {
+ conn->hostname = apr_pstrdup(conn->pool, hostname);
+ conn->port = hostport;
+
+ rv = apr_sockaddr_info_get(&conn->addr, hostname, APR_UNSPEC,
+ hostport, 0, conn->pool);
+ if (rv != APR_SUCCESS) {
+ if (r && !s) {
+ proxyerror_core(r, HTTP_INTERNAL_SERVER_ERROR,
+ apr_pstrcat(r->pool, "DNS lookup failure for: ",
+ hostname, NULL), rv);
+ }
+ else if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(10475)
+ "%s: resolving backend %s address",
+ proxy_function, hostname);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10476)
+ "%s: resolving backend %s address",
+ proxy_function, hostname);
+ }
+ return rv;
+ }
+ }
+ else {
+ apr_sockaddr_t *addr = NULL;
+ proxy_address *address = NULL;
+ apr_int32_t ttl = worker->s->address_ttl;
+ apr_uint32_t now = 0;
+
+ if (flags & PROXY_DETERMINE_ADDRESS_CHECK) {
+ /* The caller wants to check if the address changed, return
+ * APR_EEXIST if not, otherwise fall through to update the
+ * worker's for everyone to switch.
+ */
+ if (!conn->addr) {
+ /* Need something to compare with */
+ return APR_EINVAL;
+ }
+ rv = worker_address_resolve(worker, &addr,
+ hostname, hostport,
+ proxy_function, r, s);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (proxy_addrs_equal(conn->addr, addr)) {
+ apr_pool_destroy(addr->pool);
+ return APR_EEXIST;
+ }
+ }
+
+ AP_DEBUG_ASSERT(ttl != 0);
+ if (ttl > 0) {
+ /* TODO: use a monotonic clock here */
+ now = apr_time_sec(apr_time_now() - *proxy_start_time);
+ }
+
+ /* Addresses are refcounted, destroyed when their refcount reaches 0.
+ *
+ * One ref is taken by worker->address as the worker's current/latest
+ * address, it's dropped when that address expires/changes (see below).
+ * The other refs are taken by the connections when using/switching to
+ * the current worker address (also below), they are dropped when the
+ * conns are destroyed (by the reslist though it should never happen
+ * if hmax is greater than the number of threads) OR for an expired
+ * conn->address when it's replaced by the new worker->address below.
+ *
+ * Dereferencing worker->address requires holding the worker mutex or
+ * some concurrent connection processing might change/destroy it at any
+ * time. So only conn->address is safe to dereference anywhere (unless
+ * NULL..) since it has at least the lifetime of the connection.
+ */
+ if (!addr) {
+ address = worker_address_get(worker);
+ }
+ if (!address
+ || conn->address != address
+ || apr_atomic_read32(&address->expiry) <= now) {
+ PROXY_THREAD_LOCK(worker);
+
+ /* Re-check while locked, might be a new address already */
+ if (!addr) {
+ address = worker_address_get(worker);
+ }
+ if (!address || apr_atomic_read32(&address->expiry) <= now) {
+ if (!addr) {
+ rv = worker_address_resolve(worker, &addr,
+ hostname, hostport,
+ proxy_function, r, s);
+ if (rv != APR_SUCCESS) {
+ PROXY_THREAD_UNLOCK(worker);
+ return rv;
+ }
+
+ /* Recompute "now" should the DNS be slow
+ * TODO: use a monotonic clock here
+ */
+ now = apr_time_sec(apr_time_now() - *proxy_start_time);
+ }
+
+ address = apr_pcalloc(addr->pool, sizeof(*address));
+ address->hostname = apr_pstrdup(addr->pool, hostname);
+ address->hostport = hostport;
+ address->addr = addr;
+
+ if (ttl > 0) {
+ /* We keep each worker's expiry date shared accross all the
+ * children so that they update their address at the same
+ * time, regardless of whether a specific child forced an
+ * address to expire at some point (for connect() issues).
+ */
+ address->expiry = apr_atomic_read32(&worker->s->address_expiry);
+ if (address->expiry <= now) {
+ apr_uint32_t new_expiry = address->expiry + ttl;
+ while (new_expiry <= now) {
+ new_expiry += ttl;
+ }
+ new_expiry = apr_atomic_cas32(&worker->s->address_expiry,
+ new_expiry, address->expiry);
+ /* race lost? well the expiry should grow anyway.. */
+ AP_DEBUG_ASSERT(new_expiry > now);
+ address->expiry = new_expiry;
+ }
+ }
+ else {
+ /* Never expires */
+ address->expiry = APR_UINT32_MAX;
+ }
+
+ /* One ref is for worker->address in any case */
+ if (worker->address || worker->cp->addr) {
+ apr_atomic_set32(&address->refcount, 1);
+ }
+ else {
+ /* Set worker->cp->addr once for compat with third-party
+ * modules. This addr never changed before and can't change
+ * underneath users now because of some TTL configuration.
+ * So we take one more ref for worker->cp->addr to remain
+ * allocated forever (though it might not be up to date..).
+ * Modules should use conn->addr instead of worker->cp-addr
+ * to get the actual address used by each conn, determined
+ * at connect() time.
+ */
+ apr_atomic_set32(&address->refcount, 2);
+ worker->cp->addr = address->addr;
+ }
+
+ /* Publish the changes. The old worker address (if any) is no
+ * longer used by this worker, it will be destroyed now if the
+ * worker is the last user (refcount == 1) or by the last conn
+ * using it (refcount > 1).
+ */
+ worker_address_set(worker, address);
+ }
+
+ /* Take the ref for conn->address (before dropping the mutex so to
+ * let no chance for this address be killed before it's used!)
+ */
+ proxy_address_inc(address);
+
+ PROXY_THREAD_UNLOCK(worker);
+
+ /* Kill any socket using the old address */
+ if (conn->sock) {
+ if (r ? APLOGrdebug(r) : APLOGdebug(s)) {
+ /* XXX: this requires the old conn->addr[ess] to still
+ * be alive since it's not copied by apr_socket_connect()
+ * in ap_proxy_connect_backend().
+ */
+ apr_sockaddr_t *local_addr = NULL;
+ apr_sockaddr_t *remote_addr = NULL;
+ apr_socket_addr_get(&local_addr, APR_LOCAL, conn->sock);
+ apr_socket_addr_get(&remote_addr, APR_REMOTE, conn->sock);
+ if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10481)
+ "%s: closing connection to %s (%pI<>%pI) on "
+ "address change", proxy_function, hostname,
+ local_addr, remote_addr);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10482)
+ "%s: closing connection to %s (%pI<>%pI) on "
+ "address change", proxy_function, hostname,
+ local_addr, remote_addr);
+ }
+ }
+ socket_cleanup(conn);
+ }
+
+ /* Kill the old address (if any) and use the new one */
+ if (conn->address) {
+ apr_pool_cleanup_run(conn->pool, conn->address,
+ proxy_address_cleanup);
+ }
+ apr_pool_cleanup_register(conn->pool, address,
+ proxy_address_cleanup,
+ apr_pool_cleanup_null);
+ address_cleanup(conn);
+ conn->address = address;
+ conn->hostname = address->hostname;
+ conn->port = address->hostport;
+ conn->addr = address->addr;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
PROXY_DECLARE(int)
ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
proxy_server_conf *conf,
@@ -2350,8 +3067,6 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
int server_portstr_size)
{
int server_port;
- apr_status_t err = APR_SUCCESS;
- apr_status_t uerr = APR_SUCCESS;
const char *uds_path;
/*
@@ -2371,6 +3086,12 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00944)
"connecting %s to %s:%d", *url, uri->hostname, uri->port);
+ /* Close a possible existing socket if we are told to do so */
+ if (conn->close) {
+ socket_cleanup(conn);
+ conn->close = 0;
+ }
+
/*
* allocate these out of the specified connection pool
* The scheme handler decides if this is permanent or
@@ -2397,129 +3118,122 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
* to check host and port on the conn and be careful about
* spilling the cached addr from the worker.
*/
- uds_path = (*worker->s->uds_path ? worker->s->uds_path : apr_table_get(r->notes, "uds_path"));
+ uds_path = (*worker->s->uds_path
+ ? worker->s->uds_path
+ : apr_table_get(r->notes, "uds_path"));
if (uds_path) {
- if (conn->uds_path == NULL) {
- /* use (*conn)->pool instead of worker->cp->pool to match lifetime */
- conn->uds_path = apr_pstrdup(conn->pool, uds_path);
- }
- if (conn->uds_path) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545)
- "%s: has determined UDS as %s",
- uri->scheme, conn->uds_path);
- }
- else {
- /* should never happen */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02546)
- "%s: cannot determine UDS (%s)",
- uri->scheme, uds_path);
-
- }
- /*
- * In UDS cases, some structs are NULL. Protect from de-refs
- * and provide info for logging at the same time.
- */
- if (!conn->addr) {
- apr_sockaddr_t *sa;
- apr_sockaddr_info_get(&sa, NULL, APR_UNSPEC, 0, 0, conn->pool);
- conn->addr = sa;
+ if (!conn->uds_path || strcmp(conn->uds_path, uds_path) != 0) {
+ apr_pool_t *pool = conn->pool;
+ if (conn->uds_path) {
+ address_cleanup(conn);
+ if (!conn->uds_pool) {
+ apr_pool_create(&conn->uds_pool, worker->cp->dns_pool);
+ }
+ pool = conn->uds_pool;
+ }
+ /*
+ * In UDS cases, some structs are NULL. Protect from de-refs
+ * and provide info for logging at the same time.
+ */
+#if APR_HAVE_SOCKADDR_UN
+ apr_sockaddr_info_get(&conn->addr, uds_path, APR_UNIX, 0, 0, pool);
+ if (conn->addr && conn->addr->hostname) {
+ conn->uds_path = conn->addr->hostname;
+ }
+ else {
+ conn->uds_path = apr_pstrdup(pool, uds_path);
+ }
+#else
+ apr_sockaddr_info_get(&conn->addr, NULL, APR_UNSPEC, 0, 0, pool);
+ conn->uds_path = apr_pstrdup(pool, uds_path);
+#endif
+ conn->hostname = apr_pstrdup(pool, uri->hostname);
+ conn->port = uri->port;
}
- conn->hostname = "httpd-UDS";
- conn->port = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545)
+ "%s: has determined UDS as %s (for %s:%hu)",
+ uri->scheme, conn->uds_path, conn->hostname, conn->port);
}
else {
- int will_reuse = worker->s->is_address_reusable && !worker->s->disablereuse;
- if (!conn->hostname || !will_reuse) {
- if (proxyname) {
- conn->hostname = apr_pstrdup(conn->pool, proxyname);
- conn->port = proxyport;
- /*
- * If we have a forward proxy and the protocol is HTTPS,
- * then we need to prepend a HTTP CONNECT request before
- * sending our actual HTTPS requests.
- * Save our real backend data for using it later during HTTP CONNECT.
+ const char *hostname = uri->hostname;
+ apr_port_t hostport = uri->port;
+
+ /* Not a remote CONNECT until further notice */
+ conn->forward = NULL;
+
+ if (proxyname) {
+ hostname = proxyname;
+ hostport = proxyport;
+
+ /*
+ * If we have a remote proxy and the protocol is HTTPS,
+ * then we need to prepend a HTTP CONNECT request before
+ * sending our actual HTTPS requests.
+ */
+ if (conn->is_ssl) {
+ forward_info *forward;
+ const char *proxy_auth;
+
+ /* Do we want to pass Proxy-Authorization along?
+ * If we haven't used it, then YES
+ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+ * So let's make it configurable by env.
+ * The logic here is the same used in mod_proxy_http.
*/
- if (conn->is_ssl) {
- const char *proxy_auth;
+ proxy_auth = apr_table_get(r->notes, "proxy-basic-creds");
+ if (proxy_auth == NULL
+ && (r->user == NULL /* we haven't yet authenticated */
+ || apr_table_get(r->subprocess_env, "Proxy-Chain-Auth"))) {
+ proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization");
+ }
+ if (proxy_auth != NULL && proxy_auth[0] == '\0') {
+ proxy_auth = NULL;
+ }
- forward_info *forward = apr_pcalloc(conn->pool, sizeof(forward_info));
+ /* Reset forward info if they changed */
+ if (!(forward = conn->forward)
+ || forward->target_port != uri->port
+ || ap_cstr_casecmp(forward->target_host, uri->hostname) != 0
+ || (forward->proxy_auth != NULL) != (proxy_auth != NULL)
+ || (forward->proxy_auth != NULL && proxy_auth != NULL &&
+ strcmp(forward->proxy_auth, proxy_auth) != 0)) {
+ apr_pool_t *fwd_pool = conn->pool;
+ if (worker->s->is_address_reusable) {
+ if (conn->fwd_pool) {
+ apr_pool_clear(conn->fwd_pool);
+ }
+ else {
+ apr_pool_create(&conn->fwd_pool, conn->pool);
+ }
+ fwd_pool = conn->fwd_pool;
+ }
+ forward = apr_pcalloc(fwd_pool, sizeof(forward_info));
conn->forward = forward;
+
+ /*
+ * Save our real backend data for using it later during HTTP CONNECT.
+ */
forward->use_http_connect = 1;
- forward->target_host = apr_pstrdup(conn->pool, uri->hostname);
+ forward->target_host = apr_pstrdup(fwd_pool, uri->hostname);
forward->target_port = uri->port;
- /* Do we want to pass Proxy-Authorization along?
- * If we haven't used it, then YES
- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
- * So let's make it configurable by env.
- * The logic here is the same used in mod_proxy_http.
- */
- proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization");
- if (proxy_auth != NULL &&
- proxy_auth[0] != '\0' &&
- r->user == NULL && /* we haven't yet authenticated */
- apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
- forward->proxy_auth = apr_pstrdup(conn->pool, proxy_auth);
+ if (proxy_auth) {
+ forward->proxy_auth = apr_pstrdup(fwd_pool, proxy_auth);
}
}
}
- else {
- conn->hostname = apr_pstrdup(conn->pool, uri->hostname);
- conn->port = uri->port;
- }
- if (!will_reuse) {
- /*
- * Only do a lookup if we should not reuse the backend address.
- * Otherwise we will look it up once for the worker.
- */
- err = apr_sockaddr_info_get(&(conn->addr),
- conn->hostname, APR_UNSPEC,
- conn->port, 0,
- conn->pool);
- }
- socket_cleanup(conn);
- conn->close = 0;
}
- if (will_reuse) {
- /*
- * Looking up the backend address for the worker only makes sense if
- * we can reuse the address.
- */
- if (!worker->cp->addr) {
- if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- /*
- * Worker can have the single constant backend address.
- * The single DNS lookup is used once per worker.
- * If dynamic change is needed then set the addr to NULL
- * inside dynamic config to force the lookup.
- */
- err = apr_sockaddr_info_get(&(worker->cp->addr),
- conn->hostname, APR_UNSPEC,
- conn->port, 0,
- worker->cp->pool);
- conn->addr = worker->cp->addr;
- if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
- }
- }
- else {
- conn->addr = worker->cp->addr;
- }
+ if (conn->hostname
+ && (conn->port != hostport
+ || ap_cstr_casecmp(conn->hostname, hostname) != 0)) {
+ address_cleanup(conn);
}
- }
- /* Close a possible existing socket if we are told to do so */
- if (conn->close) {
- socket_cleanup(conn);
- conn->close = 0;
- }
- if (err != APR_SUCCESS) {
- return ap_proxyerror(r, HTTP_BAD_GATEWAY,
- apr_pstrcat(p, "DNS lookup failure for: ",
- conn->hostname, NULL));
+ /* Resolve the connection address with the determined hostname/port */
+ if (ap_proxy_determine_address(uri->scheme, conn, hostname, hostport,
+ 0, r, NULL)) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
}
/* Get the server port for the Via headers */
@@ -2578,7 +3292,8 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
}
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00947)
- "connected %s to %s:%d", *url, conn->hostname, conn->port);
+ "connecting %s to %pI (%s:%hu)", *url,
+ conn->addr, conn->hostname, conn->port);
return OK;
}
@@ -2679,7 +3394,8 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend,
nbytes = apr_snprintf(buffer, sizeof(buffer),
"CONNECT %s:%d HTTP/1.0" CRLF,
forward->target_host, forward->target_port);
- /* Add proxy authorization from the initial request if necessary */
+ /* Add proxy authorization from the configuration, or initial
+ * request if necessary */
if (forward->proxy_auth != NULL) {
nbytes += apr_snprintf(buffer + nbytes, sizeof(buffer) - nbytes,
"Proxy-Authorization: %s" CRLF,
@@ -2835,7 +3551,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_check_connection(const char *scheme,
/* Filter chain is OK and empty, yet we can't determine from
* ap_check_pipeline (actually ap_core_input_filter) whether
* an empty non-blocking read is EAGAIN or EOF on the socket
- * side (it's always SUCCESS), so check it explicitely here.
+ * side (it's always SUCCESS), so check it explicitly here.
*/
if (ap_proxy_is_socket_connected(conn->sock)) {
rv = APR_SUCCESS;
@@ -2882,7 +3598,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_check_connection(const char *scheme,
"%s: backend socket is disconnected.", scheme);
}
else {
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, server, APLOGNO(03408)
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, server, APLOGNO(03408)
"%s: reusable backend connection is not empty: "
"forcibly closed", scheme);
}
@@ -2902,11 +3618,14 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
{
apr_status_t rv;
int loglevel;
- apr_sockaddr_t *backend_addr = conn->addr;
+ forward_info *forward = conn->forward;
+ apr_sockaddr_t *backend_addr;
/* the local address to use for the outgoing connection */
apr_sockaddr_t *local_addr;
apr_socket_t *newsock;
void *sconf = s->module_config;
+ int address_reusable = worker->s->is_address_reusable;
+ int did_dns_lookup = 0;
proxy_server_conf *conf =
(proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
@@ -2915,6 +3634,16 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
return DECLINED;
}
+ /* We'll set conn->addr to the address actually connect()ed, so if the
+ * network connection is not reused (per ap_proxy_check_connection()
+ * above) we need to reset conn->addr to the first resolved address
+ * and try to connect it first.
+ */
+ if (conn->address && rv != APR_SUCCESS) {
+ conn->addr = conn->address->addr;
+ }
+ backend_addr = conn->addr;
+
while (rv != APR_SUCCESS && (backend_addr || conn->uds_path)) {
#if APR_HAVE_SYS_UN_H
if (conn->uds_path)
@@ -2924,10 +3653,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
if (rv != APR_SUCCESS) {
loglevel = APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(02453)
- "%s: error creating Unix domain socket for "
- "target %s",
+ "%s: error creating Unix domain socket "
+ "%s (%s:%hu)",
proxy_function,
- worker->s->hostname_ex);
+ conn->uds_path,
+ conn->hostname, conn->port);
break;
}
conn->connection = NULL;
@@ -2937,19 +3667,18 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
apr_socket_close(newsock);
ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(02454)
"%s: attempt to connect to Unix domain socket "
- "%s (%s) failed",
- proxy_function,
- conn->uds_path,
- worker->s->hostname_ex);
+ "%s (%s:%hu) failed",
+ proxy_function, conn->uds_path,
+ conn->hostname, conn->port);
break;
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02823)
"%s: connection established with Unix domain socket "
- "%s (%s)",
+ "%s (%s:%hu)",
proxy_function,
conn->uds_path,
- worker->s->hostname_ex);
+ conn->hostname, conn->port);
}
else
#endif
@@ -2959,11 +3688,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
conn->scpool)) != APR_SUCCESS) {
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00952)
- "%s: error creating fam %d socket for "
- "target %s",
+ "%s: error creating fam %d socket to %pI for "
+ "(%s:%hu)",
proxy_function,
- backend_addr->family,
- worker->s->hostname_ex);
+ backend_addr->family, backend_addr,
+ conn->hostname, conn->port);
/*
* this could be an IPv6 address from the DNS but the
* local machine won't give us an IPv6 socket; hopefully the
@@ -3012,8 +3741,9 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
}
}
ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s,
- "%s: fam %d socket created to connect to %s",
- proxy_function, backend_addr->family, worker->s->hostname_ex);
+ "%s: fam %d socket created for %pI (%s:%hu)",
+ proxy_function, backend_addr->family, backend_addr,
+ conn->hostname, conn->port);
if (conf->source_address_set) {
local_addr = apr_pmemdup(conn->scpool, conf->source_address,
@@ -3035,19 +3765,45 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
apr_socket_close(newsock);
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00957)
- "%s: attempt to connect to %pI (%s) failed",
- proxy_function,
- backend_addr,
- worker->s->hostname_ex);
+ "%s: attempt to connect to %pI (%s:%hu) failed",
+ proxy_function, backend_addr,
+ conn->hostname, conn->port);
backend_addr = backend_addr->next;
+ /*
+ * If we run out of resolved IP's when connecting and if
+ * we cache the resolution in the worker the resolution
+ * might have changed. Hence try a DNS lookup to see if this
+ * helps.
+ */
+ if (!backend_addr && address_reusable && !did_dns_lookup) {
+ /* Issue a new DNS lookup to check if the address changed,
+ * in which case (SUCCESS) restart the loop with the new
+ * one(s), otherwise leave (nothing we can do about it).
+ */
+ if (ap_proxy_determine_address(proxy_function, conn,
+ conn->hostname, conn->port,
+ PROXY_DETERMINE_ADDRESS_CHECK,
+ NULL, s) == APR_SUCCESS) {
+ backend_addr = conn->addr;
+ }
+
+ /*
+ * In case of an error backend_addr will be NULL which
+ * is enough to leave the loop. If successful we'll retry
+ * the new addresses only once.
+ */
+ did_dns_lookup = 1;
+ }
continue;
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02824)
- "%s: connection established with %pI (%s)",
- proxy_function,
- backend_addr,
- worker->s->hostname_ex);
+ "%s: connection established with %pI (%s:%hu)",
+ proxy_function, backend_addr,
+ conn->hostname, conn->port);
+
+ /* Set the actual sockaddr we are connected to */
+ conn->addr = backend_addr;
}
/* Set a timeout on the socket */
@@ -3063,13 +3819,12 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
conn->sock = newsock;
- if (!conn->uds_path && conn->forward) {
- forward_info *forward = (forward_info *)conn->forward;
+ if (forward && forward->use_http_connect) {
/*
* For HTTP CONNECT we need to prepend CONNECT request before
* sending our actual HTTPS requests.
*/
- if (forward->use_http_connect) {
+ {
rv = send_http_connect(conn, s);
/* If an error occurred, loop round and try again */
if (rv != APR_SUCCESS) {
@@ -3077,11 +3832,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
apr_socket_close(newsock);
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00958)
- "%s: attempt to connect to %s:%d "
- "via http CONNECT through %pI (%s) failed",
+ "%s: attempt to connect to %s:%hu "
+ "via http CONNECT through %pI (%s:%hu) failed",
proxy_function,
forward->target_host, forward->target_port,
- backend_addr, worker->s->hostname_ex);
+ backend_addr, conn->hostname, conn->port);
backend_addr = backend_addr->next;
continue;
}
@@ -3101,9 +3856,10 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
worker->s->error_time = apr_time_now();
worker->s->status |= PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00959)
- "ap_proxy_connect_backend disabling worker for (%s) for %"
- APR_TIME_T_FMT "s",
- worker->s->hostname_ex, apr_time_sec(worker->s->retry));
+ "ap_proxy_connect_backend disabling worker for (%s:%hu) "
+ "for %" APR_TIME_T_FMT "s",
+ worker->s->hostname_ex, (int)worker->s->port,
+ apr_time_sec(worker->s->retry));
}
}
else {
@@ -3172,6 +3928,12 @@ static int proxy_connection_create(const char *proxy_function,
apr_bucket_alloc_t *bucket_alloc;
if (conn->connection) {
+ if (conn->is_ssl) {
+ /* on reuse, reinit the SSL connection dir config with the current
+ * r->per_dir_config, the previous one was reset on release.
+ */
+ ap_proxy_ssl_engine(conn->connection, per_dir_config, 1);
+ }
return OK;
}
@@ -3189,7 +3951,7 @@ static int proxy_connection_create(const char *proxy_function,
* the peer reset the connection already; ap_run_create_connection()
* closed the socket
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
s, APLOGNO(00960) "%s: an error occurred creating a "
"new connection to %pI (%s)", proxy_function,
backend_addr, conn->hostname);
@@ -3207,6 +3969,16 @@ static int proxy_connection_create(const char *proxy_function,
backend_addr, conn->hostname);
return HTTP_INTERNAL_SERVER_ERROR;
}
+ if (conn->ssl_hostname) {
+ /* Set a note on the connection about what CN is requested,
+ * such that mod_ssl can check if it is requested to do so.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, conn->connection,
+ "%s: set SNI to %s for (%s)", proxy_function,
+ conn->ssl_hostname, conn->hostname);
+ apr_table_setn(conn->connection->notes, "proxy-request-hostname",
+ conn->ssl_hostname);
+ }
}
else {
/* TODO: See if this will break FTP */
@@ -3269,6 +4041,45 @@ int ap_proxy_lb_workers(void)
return lb_workers_limit;
}
+static APR_INLINE int error_code_overridden(const int *elts, int nelts,
+ int code)
+{
+ int min = 0;
+ int max = nelts - 1;
+ AP_DEBUG_ASSERT(max >= 0);
+
+ while (min < max) {
+ int mid = (min + max) / 2;
+ int val = elts[mid];
+
+ if (val < code) {
+ min = mid + 1;
+ }
+ else if (val > code) {
+ max = mid - 1;
+ }
+ else {
+ return 1;
+ }
+ }
+
+ return elts[min] == code;
+}
+
+PROXY_DECLARE(int) ap_proxy_should_override(proxy_dir_conf *conf, int code)
+{
+ if (!conf->error_override)
+ return 0;
+
+ if (apr_is_empty_array(conf->error_override_codes))
+ return ap_is_HTTP_ERROR(code);
+
+ /* Since error_override_codes is sorted, apply binary search. */
+ return error_code_overridden((int *)conf->error_override_codes->elts,
+ conf->error_override_codes->nelts,
+ code);
+}
+
PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
apr_bucket_brigade *brigade)
{
@@ -3416,16 +4227,15 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
}
if (!found) {
proxy_worker **runtime;
+ /* XXX: a thread mutex is maybe enough here */
apr_global_mutex_lock(proxy_mutex);
runtime = apr_array_push(b->workers);
- *runtime = apr_palloc(conf->pool, sizeof(proxy_worker));
+ *runtime = apr_pcalloc(conf->pool, sizeof(proxy_worker));
apr_global_mutex_unlock(proxy_mutex);
(*runtime)->hash = shm->hash;
- (*runtime)->context = NULL;
- (*runtime)->cp = NULL;
(*runtime)->balancer = b;
(*runtime)->s = shm;
- (*runtime)->tmutex = NULL;
+
rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");
@@ -3433,7 +4243,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02403)
"grabbing shm[%d] (0x%pp) for worker: %s", i, (void *)shm,
- (*runtime)->s->name);
+ (*runtime)->s->name_ex);
}
}
if (b->s->need_reset) {
@@ -3565,97 +4375,125 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
char **old_cl_val,
char **old_te_val)
{
+ int rc = OK;
conn_rec *c = r->connection;
int counter;
char *buf;
+ apr_table_t *saved_headers_in = r->headers_in;
+ const char *saved_host = apr_table_get(saved_headers_in, "Host");
const apr_array_header_t *headers_in_array;
const apr_table_entry_t *headers_in;
- apr_table_t *saved_headers_in;
apr_bucket *e;
- int do_100_continue;
+ int force10 = 0, do_100_continue = 0;
conn_rec *origin = p_conn->connection;
+ const char *host, *creds, *val;
proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
/*
+ * HTTP "Ping" test? Easiest is 100-Continue. However:
* To be compliant, we only use 100-Continue for requests with bodies.
* We also make sure we won't be talking HTTP/1.0 as well.
*/
- do_100_continue = (worker->s->ping_timeout_set
- && ap_request_has_body(r)
- && (PROXYREQ_REVERSE == r->proxyreq)
- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
-
if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
- /*
- * According to RFC 2616 8.2.3 we are not allowed to forward an
- * Expect: 100-continue to an HTTP/1.0 server. Instead we MUST return
- * a HTTP_EXPECTATION_FAILED
- */
- if (r->expecting_100) {
- return HTTP_EXPECTATION_FAILED;
+ force10 = 1;
+ }
+ else if (apr_table_get(r->notes, "proxy-100-continue")
+ || PROXY_SHOULD_PING_100_CONTINUE(worker, r)) {
+ do_100_continue = 1;
+ }
+ if (force10 || apr_table_get(r->subprocess_env, "proxy-nokeepalive")) {
+ if (origin) {
+ origin->keepalive = AP_CONN_CLOSE;
}
- buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL);
p_conn->close = 1;
- } else {
- buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL);
}
- if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) {
- origin->keepalive = AP_CONN_CLOSE;
- p_conn->close = 1;
+
+ if (force10) {
+ buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL);
+ }
+ else {
+ buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL);
}
ap_xlate_proto_to_ascii(buf, strlen(buf));
e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+
+ /*
+ * Make a copy on r->headers_in for the request we make to the backend,
+ * modify the copy in place according to our configuration and connection
+ * handling, use it to fill in the forwarded headers' brigade, and finally
+ * restore the saved/original ones in r->headers_in.
+ *
+ * Note: We need to take r->pool for apr_table_copy as the key / value
+ * pairs in r->headers_in have been created out of r->pool and
+ * p might be (and actually is) a longer living pool.
+ * This would trigger the bad pool ancestry abort in apr_table_copy if
+ * apr is compiled with APR_POOL_DEBUG.
+ *
+ * icing: if p indeed lives longer than r->pool, we should allocate
+ * all new header values from r->pool as well and avoid leakage.
+ */
+ r->headers_in = apr_table_copy(r->pool, saved_headers_in);
+
+ /* Return the original Transfer-Encoding and/or Content-Length values
+ * then drop the headers, they must be set by the proxy handler based
+ * on the actual body being forwarded.
+ */
+ if ((*old_te_val = (char *)apr_table_get(r->headers_in,
+ "Transfer-Encoding"))) {
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
+ }
+ if ((*old_cl_val = (char *)apr_table_get(r->headers_in,
+ "Content-Length"))) {
+ apr_table_unset(r->headers_in, "Content-Length");
+ }
+
+ /* Clear out hop-by-hop request headers not to forward */
+ if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
+ rc = HTTP_BAD_REQUEST;
+ goto cleanup;
+ }
+
+ /* RFC2616 13.5.1 says we should strip these */
+ apr_table_unset(r->headers_in, "Keep-Alive");
+ apr_table_unset(r->headers_in, "Upgrade");
+ apr_table_unset(r->headers_in, "Trailer");
+ apr_table_unset(r->headers_in, "TE");
+
+ /* Compute Host header */
if (dconf->preserve_host == 0) {
if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
- buf = apr_pstrcat(p, "Host: [", uri->hostname, "]:",
- uri->port_str, CRLF, NULL);
+ host = apr_pstrcat(r->pool, "[", uri->hostname, "]:",
+ uri->port_str, NULL);
} else {
- buf = apr_pstrcat(p, "Host: [", uri->hostname, "]", CRLF, NULL);
+ host = apr_pstrcat(r->pool, "[", uri->hostname, "]", NULL);
}
} else {
if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
- buf = apr_pstrcat(p, "Host: ", uri->hostname, ":",
- uri->port_str, CRLF, NULL);
+ host = apr_pstrcat(r->pool, uri->hostname, ":",
+ uri->port_str, NULL);
} else {
- buf = apr_pstrcat(p, "Host: ", uri->hostname, CRLF, NULL);
+ host = uri->hostname;
}
}
+ apr_table_setn(r->headers_in, "Host", host);
}
else {
- /* don't want to use r->hostname, as the incoming header might have a
- * port attached
+ /* don't want to use r->hostname as the incoming header might have a
+ * port attached, let's use the original header.
*/
- const char* hostname = apr_table_get(r->headers_in,"Host");
- if (!hostname) {
- hostname = r->server->server_hostname;
+ host = saved_host;
+ if (!host) {
+ host = r->server->server_hostname;
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
"no HTTP 0.9 request (with no host line) "
"on incoming request and preserve host set "
"forcing hostname to be %s for uri %s",
- hostname, r->uri);
+ host, r->uri);
+ apr_table_setn(r->headers_in, "Host", host);
}
- buf = apr_pstrcat(p, "Host: ", hostname, CRLF, NULL);
}
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-
- /*
- * Save the original headers in here and restore them when leaving, since
- * we will apply proxy purpose only modifications (eg. clearing hop-by-hop
- * headers, add Via or X-Forwarded-* or Expect...), whereas the originals
- * will be needed later to prepare the correct response and logging.
- *
- * Note: We need to take r->pool for apr_table_copy as the key / value
- * pairs in r->headers_in have been created out of r->pool and
- * p might be (and actually is) a longer living pool.
- * This would trigger the bad pool ancestry abort in apr_table_copy if
- * apr is compiled with APR_POOL_DEBUG.
- */
- saved_headers_in = r->headers_in;
- r->headers_in = apr_table_copy(r->pool, saved_headers_in);
/* handle Via */
if (conf->viaopt == via_block) {
@@ -3690,23 +4528,19 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
* to backend
*/
if (do_100_continue) {
- const char *val;
-
- if (!r->expecting_100) {
- /* Don't forward any "100 Continue" response if the client is
- * not expecting it.
- */
- apr_table_setn(r->subprocess_env, "proxy-interim-response",
- "Suppress");
- }
-
/* Add the Expect header if not already there. */
- if (((val = apr_table_get(r->headers_in, "Expect")) == NULL)
- || (strcasecmp(val, "100-Continue") != 0 /* fast path */
- && !ap_find_token(r->pool, val, "100-Continue"))) {
+ if (!(val = apr_table_get(r->headers_in, "Expect"))
+ || (ap_cstr_casecmp(val, "100-Continue") != 0 /* fast path */
+ && !ap_find_token(r->pool, val, "100-Continue"))) {
apr_table_mergen(r->headers_in, "Expect", "100-Continue");
}
}
+ else {
+ /* XXX: we should strip the 100-continue token only from the
+ * Expect header, but are there others actually used anywhere?
+ */
+ apr_table_unset(r->headers_in, "Expect");
+ }
/* X-Forwarded-*: handling
*
@@ -3730,8 +4564,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
*/
if (dconf->add_forwarded_headers) {
if (PROXYREQ_REVERSE == r->proxyreq) {
- const char *buf;
-
/* Add X-Forwarded-For: so that the upstream has a chance to
* determine, where the original request came from.
*/
@@ -3741,8 +4573,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
/* Add X-Forwarded-Host: so that upstream knows what the
* original request hostname was.
*/
- if ((buf = apr_table_get(r->headers_in, "Host"))) {
- apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
+ if (saved_host) {
+ apr_table_mergen(r->headers_in, "X-Forwarded-Host",
+ saved_host);
}
/* Add X-Forwarded-Server: so that upstream knows what the
@@ -3754,79 +4587,315 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
}
}
+ /* Do we want to strip Proxy-Authorization ?
+ * If we haven't used it, then NO
+ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+ * So let's make it configurable by env.
+ */
+ if (r->user != NULL /* we've authenticated */
+ && !apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
+ apr_table_unset(r->headers_in, "Proxy-Authorization");
+ }
+
+ /* for sub-requests, ignore freshness/expiry headers */
+ if (r->main) {
+ apr_table_unset(r->headers_in, "If-Match");
+ apr_table_unset(r->headers_in, "If-Modified-Since");
+ apr_table_unset(r->headers_in, "If-Range");
+ apr_table_unset(r->headers_in, "If-Unmodified-Since");
+ apr_table_unset(r->headers_in, "If-None-Match");
+ }
+
+ creds = apr_table_get(r->notes, "proxy-basic-creds");
+ if (creds) {
+ apr_table_mergen(r->headers_in, "Proxy-Authorization", creds);
+ }
+
+ /* run hook to fixup the request we are about to send */
proxy_run_fixups(r);
- if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
- return HTTP_BAD_REQUEST;
+
+ /* We used to send `Host: ` always first, so let's keep it that
+ * way. No telling which legacy backend is relying on this.
+ * If proxy_run_fixups() changed the value, use it (though removal
+ * is ignored).
+ */
+ val = apr_table_get(r->headers_in, "Host");
+ if (val) {
+ apr_table_unset(r->headers_in, "Host");
+ host = val;
}
+ buf = apr_pstrcat(p, "Host: ", host, CRLF, NULL);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
- /* send request headers */
+ /* Append the (remaining) headers to the brigade */
headers_in_array = apr_table_elts(r->headers_in);
headers_in = (const apr_table_entry_t *) headers_in_array->elts;
for (counter = 0; counter < headers_in_array->nelts; counter++) {
if (headers_in[counter].key == NULL
- || headers_in[counter].val == NULL
+ || headers_in[counter].val == NULL) {
+ continue;
+ }
- /* Already sent */
- || !strcasecmp(headers_in[counter].key, "Host")
+ buf = apr_pstrcat(p, headers_in[counter].key, ": ",
+ headers_in[counter].val, CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
- /* Clear out hop-by-hop request headers not to send
- * RFC2616 13.5.1 says we should strip these headers
- */
- || !strcasecmp(headers_in[counter].key, "Keep-Alive")
- || !strcasecmp(headers_in[counter].key, "TE")
- || !strcasecmp(headers_in[counter].key, "Trailer")
- || !strcasecmp(headers_in[counter].key, "Upgrade")
+cleanup:
+ r->headers_in = saved_headers_in;
+ return rc;
+}
- ) {
- continue;
+PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
+ proxy_conn_rec *backend,
+ apr_bucket_brigade *input_brigade,
+ apr_read_type_e block,
+ apr_off_t *bytes_read,
+ apr_off_t max_read)
+{
+ apr_pool_t *p = r->pool;
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *temp_brigade;
+ apr_status_t status;
+ apr_off_t bytes;
+
+ *bytes_read = 0;
+ if (max_read < APR_BUCKET_BUFF_SIZE) {
+ max_read = APR_BUCKET_BUFF_SIZE;
+ }
+
+ /* Prefetch max_read bytes
+ *
+ * This helps us avoid any election of C-L v.s. T-E
+ * request bodies, since we are willing to keep in
+ * memory this much data, in any case. This gives
+ * us an instant C-L election if the body is of some
+ * reasonable size.
+ */
+ temp_brigade = apr_brigade_create(p, input_brigade->bucket_alloc);
+
+ /* Account for saved input, if any. */
+ apr_brigade_length(input_brigade, 0, bytes_read);
+
+ /* Ensure we don't hit a wall where we have a buffer too small for
+ * ap_get_brigade's filters to fetch us another bucket, surrender
+ * once we hit 80 bytes (an arbitrary value) less than max_read.
+ */
+ while (*bytes_read < max_read - 80
+ && (APR_BRIGADE_EMPTY(input_brigade)
+ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) {
+ status = ap_get_brigade(r->input_filters, temp_brigade,
+ AP_MODE_READBYTES, block,
+ max_read - *bytes_read);
+ /* ap_get_brigade may return success with an empty brigade
+ * for a non-blocking read which would block
+ */
+ if (block == APR_NONBLOCK_READ
+ && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade))
+ || APR_STATUS_IS_EAGAIN(status))) {
+ break;
}
- /* Do we want to strip Proxy-Authorization ?
- * If we haven't used it, then NO
- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
- * So let's make it configurable by env.
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095)
+ "prefetch request body failed to %pI (%s)"
+ " from %s (%s)", backend->addr,
+ backend->hostname ? backend->hostname : "",
+ c->client_ip, c->remote_host ? c->remote_host : "");
+ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+ }
+
+ apr_brigade_length(temp_brigade, 1, &bytes);
+ *bytes_read += bytes;
+
+ /*
+ * Save temp_brigade in input_brigade. (At least) in the SSL case
+ * temp_brigade contains transient buckets whose data would get
+ * overwritten during the next call of ap_get_brigade in the loop.
+ * ap_save_brigade ensures these buckets to be set aside.
+ * Calling ap_save_brigade with NULL as filter is OK, because
+ * input_brigade already has been created and does not need to get
+ * created by ap_save_brigade.
*/
- if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) {
- if (r->user != NULL) { /* we've authenticated */
- if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
- continue;
- }
+ status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096)
+ "processing prefetched request body failed"
+ " to %pI (%s) from %s (%s)", backend->addr,
+ backend->hostname ? backend->hostname : "",
+ c->client_ip, c->remote_host ? c->remote_host : "");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ return OK;
+}
+
+PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r,
+ proxy_conn_rec *backend,
+ apr_bucket_brigade *bb,
+ apr_off_t max_read)
+{
+ apr_bucket_alloc_t *bucket_alloc = bb->bucket_alloc;
+ apr_read_type_e block = (backend->connection) ? APR_NONBLOCK_READ
+ : APR_BLOCK_READ;
+ apr_status_t status;
+ int rv;
+
+ for (;;) {
+ apr_brigade_cleanup(bb);
+ status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ block, max_read);
+ if (block == APR_BLOCK_READ
+ || (!(status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb))
+ && !APR_STATUS_IS_EAGAIN(status))) {
+ break;
+ }
+
+ /* Flush and retry (blocking) */
+ apr_brigade_cleanup(bb);
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, backend,
+ backend->connection, bb, 1);
+ if (rv != OK) {
+ return rv;
+ }
+ block = APR_BLOCK_READ;
+ }
+
+ if (status != APR_SUCCESS) {
+ conn_rec *c = r->connection;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608)
+ "read request body failed to %pI (%s)"
+ " from %s (%s)", backend->addr,
+ backend->hostname ? backend->hostname : "",
+ c->client_ip, c->remote_host ? c->remote_host : "");
+ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+ }
+
+ return OK;
+}
+
+PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
+ proxy_conn_rec *backend,
+ apr_bucket_brigade *input_brigade,
+ apr_off_t *bytes_spooled,
+ apr_off_t max_mem_spool)
+{
+ apr_pool_t *p = r->pool;
+ int seen_eos = 0, rv = OK;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_alloc_t *bucket_alloc = input_brigade->bucket_alloc;
+ apr_bucket_brigade *body_brigade;
+ apr_bucket *e;
+ apr_off_t bytes, fsize = 0;
+ apr_file_t *tmpfile = NULL;
+
+ *bytes_spooled = 0;
+ body_brigade = apr_brigade_create(p, bucket_alloc);
+
+ do {
+ if (APR_BRIGADE_EMPTY(input_brigade)) {
+ rv = ap_proxy_read_input(r, backend, input_brigade,
+ HUGE_STRING_LEN);
+ if (rv != OK) {
+ return rv;
}
}
- /* Skip Transfer-Encoding and Content-Length for now.
- */
- if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
- *old_te_val = headers_in[counter].val;
- continue;
+ /* If this brigade contains EOS, either stop or remove it. */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+ seen_eos = 1;
}
- if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
- *old_cl_val = headers_in[counter].val;
- continue;
+
+ apr_brigade_length(input_brigade, 1, &bytes);
+
+ if (*bytes_spooled + bytes > max_mem_spool) {
+ /* can't spool any more in memory; write latest brigade to disk */
+ if (tmpfile == NULL) {
+ const char *temp_dir;
+ char *template;
+
+ status = apr_temp_dir_get(&temp_dir, p);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089)
+ "search for temporary directory failed");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ apr_filepath_merge(&template, temp_dir,
+ "modproxy.tmp.XXXXXX",
+ APR_FILEPATH_NATIVE, p);
+ status = apr_file_mktemp(&tmpfile, template, 0, p);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090)
+ "creation of temporary file in directory "
+ "%s failed", temp_dir);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ for (e = APR_BRIGADE_FIRST(input_brigade);
+ e != APR_BRIGADE_SENTINEL(input_brigade);
+ e = APR_BUCKET_NEXT(e)) {
+ const char *data;
+ apr_size_t bytes_read, bytes_written;
+
+ apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
+ status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
+ if (status != APR_SUCCESS) {
+ const char *tmpfile_name;
+
+ if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
+ tmpfile_name = "(unknown)";
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091)
+ "write to temporary file %s failed",
+ tmpfile_name);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ AP_DEBUG_ASSERT(bytes_read == bytes_written);
+ fsize += bytes_written;
+ }
+ apr_brigade_cleanup(input_brigade);
}
+ else {
- /* for sub-requests, ignore freshness/expiry headers */
- if (r->main) {
- if ( !strcasecmp(headers_in[counter].key, "If-Match")
- || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
- || !strcasecmp(headers_in[counter].key, "If-Range")
- || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
- || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
- continue;
+ /*
+ * Save input_brigade in body_brigade. (At least) in the SSL case
+ * input_brigade contains transient buckets whose data would get
+ * overwritten during the next call of ap_get_brigade in the loop.
+ * ap_save_brigade ensures these buckets to be set aside.
+ * Calling ap_save_brigade with NULL as filter is OK, because
+ * body_brigade already has been created and does not need to get
+ * created by ap_save_brigade.
+ */
+ status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
+ if (status != APR_SUCCESS) {
+ return HTTP_INTERNAL_SERVER_ERROR;
}
+
}
- buf = apr_pstrcat(p, headers_in[counter].key, ": ",
- headers_in[counter].val, CRLF,
- NULL);
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
- }
+ *bytes_spooled += bytes;
+ } while (!seen_eos);
- /* Restore the original headers in (see comment above),
- * we won't modify them anymore.
- */
- r->headers_in = saved_headers_in;
+ APR_BRIGADE_CONCAT(input_brigade, body_brigade);
+ if (tmpfile) {
+ apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p);
+ }
+ if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
+ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+ }
+ if (tmpfile) {
+ /* We dropped metadata buckets when spooling to tmpfile,
+ * terminate with EOS to allow for flushing in a one go.
+ */
+ e = apr_bucket_eos_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+ }
return OK;
}
@@ -3899,7 +4968,7 @@ PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme)
} else {
proxy_schemes_t *pscheme;
for (pscheme = pschemes; pscheme->name != NULL; ++pscheme) {
- if (strcasecmp(scheme, pscheme->name) == 0) {
+ if (ap_cstr_casecmp(scheme, pscheme->name) == 0) {
return pscheme->default_port;
}
}
@@ -3908,6 +4977,23 @@ PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme)
return 0;
}
+static APR_INLINE int ap_filter_should_yield(ap_filter_t *f)
+{
+ return f->c->data_in_output_filters;
+}
+
+static APR_INLINE int ap_filter_output_pending(conn_rec *c)
+{
+ ap_filter_t *f = c->output_filters;
+ while (f->next) {
+ f = f->next;
+ }
+ if (f->frec->filter_func.out_func(f, NULL)) {
+ return AP_FILTER_ERROR;
+ }
+ return c->data_in_output_filters ? OK : DECLINED;
+}
+
PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
apr_bucket_brigade *from,
apr_bucket_brigade *to)
@@ -3946,6 +5032,16 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r,
return rv;
}
+/* An arbitrary large value to address pathological case where we keep
+ * reading from one side only, without scheduling the other direction for
+ * too long. This can happen with large MTU and small read buffers, like
+ * micro-benchmarking huge files bidirectional transfer with client, proxy
+ * and backend on localhost for instance. Though we could just ignore the
+ * case and let the sender stop by itself at some point when/if it needs to
+ * receive data, or the receiver stop when/if it needs to send...
+ */
+#define PROXY_TRANSFER_MAX_READS 10000
+
PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
request_rec *r,
conn_rec *c_i,
@@ -3955,81 +5051,576 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
const char *name,
int *sent,
apr_off_t bsize,
- int after)
+ int flags)
{
apr_status_t rv;
+ int flush_each = 0;
+ unsigned int num_reads = 0;
#ifdef DEBUGGING
apr_off_t len;
#endif
- do {
+ /*
+ * Compat: since FLUSH_EACH is default (and zero) for legacy reasons, we
+ * pretend it's no FLUSH_AFTER nor YIELD_PENDING flags, the latter because
+ * flushing would defeat the purpose of checking for pending data (hence
+ * determine whether or not the output chain/stack is full for stopping).
+ */
+ if (!(flags & (AP_PROXY_TRANSFER_FLUSH_AFTER |
+ AP_PROXY_TRANSFER_YIELD_PENDING))) {
+ flush_each = 1;
+ }
+
+ for (;;) {
apr_brigade_cleanup(bb_i);
rv = ap_get_brigade(c_i->input_filters, bb_i, AP_MODE_READBYTES,
APR_NONBLOCK_READ, bsize);
- if (rv == APR_SUCCESS) {
- if (c_o->aborted) {
- return APR_EPIPE;
- }
- if (APR_BRIGADE_EMPTY(bb_i)) {
- break;
+ if (rv != APR_SUCCESS) {
+ if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03308)
+ "ap_proxy_transfer_between_connections: "
+ "error on %s - ap_get_brigade",
+ name);
+ if (rv == APR_INCOMPLETE) {
+ /* Don't return APR_INCOMPLETE, it'd mean "should yield"
+ * for the caller, while it means "incomplete body" here
+ * from ap_http_filter(), which is an error.
+ */
+ rv = APR_EGENERAL;
+ }
}
+ break;
+ }
+
+ if (c_o->aborted) {
+ apr_brigade_cleanup(bb_i);
+ flags &= ~AP_PROXY_TRANSFER_FLUSH_AFTER;
+ rv = APR_EPIPE;
+ break;
+ }
+ if (APR_BRIGADE_EMPTY(bb_i)) {
+ break;
+ }
#ifdef DEBUGGING
- len = -1;
- apr_brigade_length(bb_i, 0, &len);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03306)
- "ap_proxy_transfer_between_connections: "
- "read %" APR_OFF_T_FMT
- " bytes from %s", len, name);
+ len = -1;
+ apr_brigade_length(bb_i, 0, &len);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03306)
+ "ap_proxy_transfer_between_connections: "
+ "read %" APR_OFF_T_FMT
+ " bytes from %s", len, name);
#endif
- if (sent) {
- *sent = 1;
- }
- ap_proxy_buckets_lifetime_transform(r, bb_i, bb_o);
- if (!after) {
- apr_bucket *b;
+ if (sent) {
+ *sent = 1;
+ }
+ ap_proxy_buckets_lifetime_transform(r, bb_i, bb_o);
+ if (flush_each) {
+ apr_bucket *b;
+ /*
+ * Do not use ap_fflush here since this would cause the flush
+ * bucket to be sent in a separate brigade afterwards which
+ * causes some filters to set aside the buckets from the first
+ * brigade and process them when FLUSH arrives in the second
+ * brigade. As set asides of our transformed buckets involve
+ * memory copying we try to avoid this. If we have the flush
+ * bucket in the first brigade they directly process the
+ * buckets without setting them aside.
+ */
+ b = apr_bucket_flush_create(bb_o->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb_o, b);
+ }
+ rv = ap_pass_brigade(c_o->output_filters, bb_o);
+ apr_brigade_cleanup(bb_o);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(03307)
+ "ap_proxy_transfer_between_connections: "
+ "error on %s - ap_pass_brigade",
+ name);
+ flags &= ~AP_PROXY_TRANSFER_FLUSH_AFTER;
+ break;
+ }
- /*
- * Do not use ap_fflush here since this would cause the flush
- * bucket to be sent in a separate brigade afterwards which
- * causes some filters to set aside the buckets from the first
- * brigade and process them when the flush arrives in the second
- * brigade. As set asides of our transformed buckets involve
- * memory copying we try to avoid this. If we have the flush
- * bucket in the first brigade they directly process the
- * buckets without setting them aside.
- */
- b = apr_bucket_flush_create(bb_o->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb_o, b);
- }
- rv = ap_pass_brigade(c_o->output_filters, bb_o);
- if (rv != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(03307)
+ /* Yield if the output filters stack is full? This is to avoid
+ * blocking and give the caller a chance to POLLOUT async.
+ */
+ if ((flags & AP_PROXY_TRANSFER_YIELD_PENDING)
+ && ap_filter_should_yield(c_o->output_filters)) {
+ int rc = ap_filter_output_pending(c_o);
+ if (rc == OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"ap_proxy_transfer_between_connections: "
- "error on %s - ap_pass_brigade",
- name);
+ "yield (output pending)");
+ rv = APR_INCOMPLETE;
+ break;
}
- } else if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03308)
+ if (rc != DECLINED) {
+ rv = AP_FILTER_ERROR;
+ break;
+ }
+ }
+
+ /* Yield if we keep hold of the thread for too long? This gives
+ * the caller a chance to schedule the other direction too.
+ */
+ if ((flags & AP_PROXY_TRANSFER_YIELD_MAX_READS)
+ && ++num_reads > PROXY_TRANSFER_MAX_READS) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"ap_proxy_transfer_between_connections: "
- "error on %s - ap_get_brigade",
- name);
+ "yield (max reads)");
+ rv = APR_SUCCESS;
+ break;
}
- } while (rv == APR_SUCCESS);
+ }
- if (after) {
+ if (flags & AP_PROXY_TRANSFER_FLUSH_AFTER) {
ap_fflush(c_o->output_filters, bb_o);
+ apr_brigade_cleanup(bb_o);
}
+ apr_brigade_cleanup(bb_i);
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, rv, r,
- "ap_proxy_transfer_between_connections complete");
+ "ap_proxy_transfer_between_connections complete (%s %pI)",
+ (c_i == r->connection) ? "to" : "from",
+ (c_i == r->connection) ? c_o->client_addr
+ : c_i->client_addr);
if (APR_STATUS_IS_EAGAIN(rv)) {
rv = APR_SUCCESS;
}
-
return rv;
}
+struct proxy_tunnel_conn {
+ /* the other side of the tunnel */
+ struct proxy_tunnel_conn *other;
+
+ conn_rec *c;
+ const char *name;
+
+ apr_pollfd_t *pfd;
+ apr_bucket_brigade *bb;
+
+ unsigned int down_in:1,
+ down_out:1;
+};
+
+PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel,
+ request_rec *r, conn_rec *c_o,
+ const char *scheme)
+{
+ apr_status_t rv;
+ conn_rec *c_i = r->connection;
+ apr_interval_time_t client_timeout = -1, origin_timeout = -1;
+ proxy_tunnel_rec *tunnel;
+
+ *ptunnel = NULL;
+
+ tunnel = apr_pcalloc(r->pool, sizeof(*tunnel));
+
+ rv = apr_pollset_create(&tunnel->pollset, 2, r->pool, APR_POLLSET_NOCOPY);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ tunnel->r = r;
+ tunnel->scheme = apr_pstrdup(r->pool, scheme);
+ tunnel->client = apr_pcalloc(r->pool, sizeof(struct proxy_tunnel_conn));
+ tunnel->origin = apr_pcalloc(r->pool, sizeof(struct proxy_tunnel_conn));
+ tunnel->pfds = apr_array_make(r->pool, 2, sizeof(apr_pollfd_t));
+ tunnel->read_buf_size = ap_get_read_buf_size(r);
+ tunnel->client->other = tunnel->origin;
+ tunnel->origin->other = tunnel->client;
+ tunnel->timeout = -1;
+
+ tunnel->client->c = c_i;
+ tunnel->client->name = "client";
+ tunnel->client->bb = apr_brigade_create(c_i->pool, c_i->bucket_alloc);
+ tunnel->client->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t);
+ tunnel->client->pfd->p = r->pool;
+ tunnel->client->pfd->desc_type = APR_NO_DESC;
+ rv = ap_get_pollfd_from_conn(tunnel->client->c,
+ tunnel->client->pfd, &client_timeout);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ tunnel->client->pfd->client_data = tunnel->client;
+ if (tunnel->client->pfd->desc_type == APR_POLL_SOCKET) {
+ apr_socket_opt_set(tunnel->client->pfd->desc.s, APR_SO_NONBLOCK, 1);
+ }
+
+ tunnel->origin->c = c_o;
+ tunnel->origin->name = "origin";
+ tunnel->origin->bb = apr_brigade_create(c_o->pool, c_o->bucket_alloc);
+ tunnel->origin->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t);
+ tunnel->origin->pfd->p = r->pool;
+ tunnel->origin->pfd->desc_type = APR_POLL_SOCKET;
+ tunnel->origin->pfd->desc.s = ap_get_conn_socket(c_o);
+ tunnel->origin->pfd->client_data = tunnel->origin;
+ apr_socket_timeout_get(tunnel->origin->pfd->desc.s, &origin_timeout);
+ apr_socket_opt_set(tunnel->origin->pfd->desc.s, APR_SO_NONBLOCK, 1);
+
+ /* Defaults to the largest timeout of both connections */
+ tunnel->timeout = (client_timeout >= 0 && client_timeout > origin_timeout ?
+ client_timeout : origin_timeout);
+
+ /* No coalescing filters */
+ ap_remove_output_filter_byhandle(c_i->output_filters,
+ "SSL/TLS Coalescing Filter");
+ ap_remove_output_filter_byhandle(c_o->output_filters,
+ "SSL/TLS Coalescing Filter");
+
+ /* Bidirectional non-HTTP stream will confuse mod_reqtimeoout */
+ ap_remove_input_filter_byhandle(c_i->input_filters, "reqtimeout");
+
+ /* The input/output filter stacks should contain connection filters only */
+ r->input_filters = r->proto_input_filters = c_i->input_filters;
+ r->output_filters = r->proto_output_filters = c_i->output_filters;
+
+ /* Won't be reused after tunneling */
+ c_i->keepalive = AP_CONN_CLOSE;
+ c_o->keepalive = AP_CONN_CLOSE;
+
+ /* Disable half-close forwarding for this request? */
+ if (apr_table_get(r->subprocess_env, "proxy-nohalfclose")) {
+ tunnel->nohalfclose = 1;
+ }
+
+ if (tunnel->client->pfd->desc_type == APR_POLL_SOCKET) {
+ /* Both ends are sockets, the poll strategy is:
+ * - poll both sides POLLOUT
+ * - when one side is writable, remove the POLLOUT
+ * and add POLLIN to the other side.
+ * - tunnel arriving data, remove POLLIN from the source
+ * again and add POLLOUT to the receiving side
+ * - on EOF on read, remove the POLLIN from that side
+ * Repeat until both sides are down */
+ tunnel->client->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
+ tunnel->origin->pfd->reqevents = APR_POLLOUT | APR_POLLERR;
+ if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd)) ||
+ (rv = apr_pollset_add(tunnel->pollset, tunnel->client->pfd))) {
+ return rv;
+ }
+ }
+ else if (tunnel->client->pfd->desc_type == APR_POLL_FILE) {
+ /* Input is a PIPE fd, the poll strategy is:
+ * - always POLLIN on origin
+ * - use socket strategy described above for client only
+ * otherwise the same
+ */
+ tunnel->client->pfd->reqevents = 0;
+ tunnel->origin->pfd->reqevents = APR_POLLIN | APR_POLLHUP |
+ APR_POLLOUT | APR_POLLERR;
+ if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) {
+ return rv;
+ }
+ }
+ else {
+ /* input is already closed, unsual, but we know nothing about
+ * the tunneled protocol. */
+ tunnel->client->down_in = 1;
+ tunnel->origin->pfd->reqevents = APR_POLLIN | APR_POLLHUP;
+ if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) {
+ return rv;
+ }
+ }
+
+ *ptunnel = tunnel;
+ return APR_SUCCESS;
+}
+
+static void add_pollset(apr_pollset_t *pollset, apr_pollfd_t *pfd,
+ apr_int16_t events)
+{
+ apr_status_t rv;
+
+ AP_DEBUG_ASSERT((pfd->reqevents & events) == 0);
+
+ if (pfd->reqevents) {
+ rv = apr_pollset_remove(pollset, pfd);
+ if (rv != APR_SUCCESS) {
+ AP_DEBUG_ASSERT(1);
+ }
+ }
+
+ if (events & APR_POLLIN) {
+ events |= APR_POLLHUP;
+ }
+ pfd->reqevents |= events | APR_POLLERR;
+ rv = apr_pollset_add(pollset, pfd);
+ if (rv != APR_SUCCESS) {
+ AP_DEBUG_ASSERT(1);
+ }
+}
+
+static void del_pollset(apr_pollset_t *pollset, apr_pollfd_t *pfd,
+ apr_int16_t events)
+{
+ apr_status_t rv;
+
+ AP_DEBUG_ASSERT((pfd->reqevents & events) != 0);
+
+ rv = apr_pollset_remove(pollset, pfd);
+ if (rv != APR_SUCCESS) {
+ AP_DEBUG_ASSERT(0);
+ return;
+ }
+
+ if (events & APR_POLLIN) {
+ events |= APR_POLLHUP;
+ }
+ if (pfd->reqevents & ~(events | APR_POLLERR)) {
+ pfd->reqevents &= ~events;
+ rv = apr_pollset_add(pollset, pfd);
+ if (rv != APR_SUCCESS) {
+ AP_DEBUG_ASSERT(0);
+ return;
+ }
+ }
+ else {
+ pfd->reqevents = 0;
+ }
+}
+
+static int proxy_tunnel_forward(proxy_tunnel_rec *tunnel,
+ struct proxy_tunnel_conn *in)
+{
+ struct proxy_tunnel_conn *out = in->other;
+ apr_status_t rv;
+ int sent = 0;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, tunnel->r,
+ "proxy: %s: %s input ready",
+ tunnel->scheme, in->name);
+
+ rv = ap_proxy_transfer_between_connections(tunnel->r,
+ in->c, out->c,
+ in->bb, out->bb,
+ in->name, &sent,
+ tunnel->read_buf_size,
+ AP_PROXY_TRANSFER_YIELD_PENDING |
+ AP_PROXY_TRANSFER_YIELD_MAX_READS);
+ if (sent && out == tunnel->client) {
+ tunnel->replied = 1;
+ }
+ if (rv != APR_SUCCESS) {
+ if (APR_STATUS_IS_INCOMPLETE(rv)) {
+ /* Pause POLLIN while waiting for POLLOUT on the other
+ * side, hence avoid filling the output filters even
+ * more to avoid blocking there.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, tunnel->r,
+ "proxy: %s: %s wait writable",
+ tunnel->scheme, out->name);
+ }
+ else if (APR_STATUS_IS_EOF(rv)) {
+ /* Stop POLLIN and wait for POLLOUT (flush) on the
+ * other side to shut it down.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, tunnel->r,
+ "proxy: %s: %s read shutdown",
+ tunnel->scheme, in->name);
+ if (tunnel->nohalfclose) {
+ /* No half-close forwarding, we are done both ways as
+ * soon as one side shuts down.
+ */
+ return DONE;
+ }
+ in->down_in = 1;
+ }
+ else {
+ /* Real failure, bail out */
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ del_pollset(tunnel->pollset, in->pfd, APR_POLLIN);
+ if (out->pfd->desc_type == APR_POLL_SOCKET) {
+ /* if the output is a SOCKET, we can stop polling the input
+ * until the output signals POLLOUT again. */
+ add_pollset(tunnel->pollset, out->pfd, APR_POLLOUT);
+ }
+ else {
+ /* We can't use POLLOUT in this direction for the only
+ * APR_POLL_FILE case we have so far (mod_h2's "signal" pipe),
+ * we assume that the client's ouput filters chain will block/flush
+ * if necessary (i.e. no pending data), hence that the origin
+ * is EOF when reaching here. This direction is over. */
+ ap_assert(in->down_in && APR_STATUS_IS_EOF(rv));
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, tunnel->r,
+ "proxy: %s: %s write shutdown",
+ tunnel->scheme, out->name);
+ out->down_out = 1;
+ }
+ }
+
+ return OK;
+}
+
+PROXY_DECLARE(int) ap_proxy_tunnel_run(proxy_tunnel_rec *tunnel)
+{
+ int status = OK, rc;
+ request_rec *r = tunnel->r;
+ apr_pollset_t *pollset = tunnel->pollset;
+ struct proxy_tunnel_conn *client = tunnel->client,
+ *origin = tunnel->origin;
+ apr_interval_time_t timeout = tunnel->timeout >= 0 ? tunnel->timeout : -1;
+ const char *scheme = tunnel->scheme;
+ apr_status_t rv;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10212)
+ "proxy: %s: tunnel running (timeout %lf)",
+ scheme, timeout >= 0 ? (double)timeout / APR_USEC_PER_SEC
+ : (double)-1.0);
+
+ /* Loop until both directions of the connection are closed,
+ * or a failure occurs.
+ */
+ do {
+ const apr_pollfd_t *results;
+ apr_int32_t nresults, i;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
+ "proxy: %s: polling (client=%hx, origin=%hx)",
+ scheme, client->pfd->reqevents, origin->pfd->reqevents);
+ do {
+ rv = apr_pollset_poll(pollset, timeout, &nresults, &results);
+ } while (APR_STATUS_IS_EINTR(rv));
+
+ if (rv != APR_SUCCESS) {
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, APLOGNO(10213)
+ "proxy: %s: polling timed out "
+ "(client=%hx, origin=%hx)",
+ scheme, client->pfd->reqevents,
+ origin->pfd->reqevents);
+ status = HTTP_GATEWAY_TIME_OUT;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10214)
+ "proxy: %s: polling failed", scheme);
+ status = HTTP_INTERNAL_SERVER_ERROR;
+ }
+ goto done;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, APLOGNO(10215)
+ "proxy: %s: woken up, %i result(s)", scheme, nresults);
+
+ for (i = 0; i < nresults; i++) {
+ const apr_pollfd_t *pfd = &results[i];
+ struct proxy_tunnel_conn *tc = pfd->client_data;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
+ "proxy: %s: #%i: %s: %hx/%hx", scheme, i,
+ tc->name, pfd->rtnevents, tc->pfd->reqevents);
+
+ /* sanity check */
+ if (pfd->desc.s != client->pfd->desc.s
+ && pfd->desc.s != origin->pfd->desc.s) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10222)
+ "proxy: %s: unknown socket in pollset", scheme);
+ status = HTTP_INTERNAL_SERVER_ERROR;
+ goto done;
+ }
+
+ if (!(pfd->rtnevents & (APR_POLLIN | APR_POLLOUT |
+ APR_POLLHUP | APR_POLLERR))) {
+ /* this catches POLLNVAL etc.. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10220)
+ "proxy: %s: polling events error (%x)",
+ scheme, pfd->rtnevents);
+ status = HTTP_INTERNAL_SERVER_ERROR;
+ goto done;
+ }
+
+ /* We want to write if we asked for POLLOUT and got:
+ * - POLLOUT: the socket is ready for write;
+ * - !POLLIN: the socket is in error state (POLLERR) so we let
+ * the user know by failing the write and log, OR the socket
+ * is shutdown for read already (POLLHUP) so we have to
+ * shutdown for write.
+ */
+ if ((tc->pfd->reqevents & APR_POLLOUT)
+ && ((pfd->rtnevents & APR_POLLOUT)
+ || !(tc->pfd->reqevents & APR_POLLIN)
+ || !(pfd->rtnevents & (APR_POLLIN | APR_POLLHUP)))) {
+ struct proxy_tunnel_conn *out = tc, *in = tc->other;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
+ "proxy: %s: %s output ready",
+ scheme, out->name);
+
+ rc = ap_filter_output_pending(out->c);
+ if (rc == OK) {
+ /* Keep polling out (only) */
+ continue;
+ }
+ if (rc != DECLINED) {
+ /* Real failure, bail out */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10221)
+ "proxy: %s: %s flushing failed (%i)",
+ scheme, out->name, rc);
+ status = rc;
+ goto done;
+ }
+
+ /* No more pending data. If the other side is not readable
+ * anymore it's time to shutdown for write (this direction
+ * is over). Otherwise back to normal business.
+ */
+ del_pollset(pollset, out->pfd, APR_POLLOUT);
+ if (in->down_in) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "proxy: %s: %s write shutdown",
+ scheme, out->name);
+ apr_socket_shutdown(out->pfd->desc.s, 1);
+ out->down_out = 1;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r,
+ "proxy: %s: %s resume writable",
+ scheme, out->name);
+ add_pollset(pollset, in->pfd, APR_POLLIN);
+
+ /* Flush any pending input data now, we don't know when
+ * the next POLLIN will trigger and retaining data might
+ * deadlock the underlying protocol. We don't check for
+ * pending data first with ap_filter_input_pending() since
+ * the read from proxy_tunnel_forward() is nonblocking
+ * anyway and returning OK if there's no data.
+ */
+ rc = proxy_tunnel_forward(tunnel, in);
+ if (rc != OK) {
+ status = rc;
+ goto done;
+ }
+ }
+ }
+
+ /* We want to read if we asked for POLLIN|HUP and got:
+ * - POLLIN|HUP: the socket is ready for read or EOF (POLLHUP);
+ * - !POLLOUT: the socket is in error state (POLLERR) so we let
+ * the user know by failing the read and log.
+ */
+ if ((tc->pfd->reqevents & APR_POLLIN)
+ && ((pfd->rtnevents & (APR_POLLIN | APR_POLLHUP))
+ || !(pfd->rtnevents & APR_POLLOUT))) {
+ rc = proxy_tunnel_forward(tunnel, tc);
+ if (rc != OK) {
+ status = rc;
+ goto done;
+ }
+ }
+ }
+ } while (!client->down_out || !origin->down_out);
+
+done:
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10223)
+ "proxy: %s: tunneling returns (%i)", scheme, status);
+ if (status == DONE) {
+ status = OK;
+ }
+ return status;
+}
+
PROXY_DECLARE (const char *) ap_proxy_show_hcmethod(hcmethod_t method)
{
proxy_hcmethods_t *m = proxy_hcmethods;
@@ -4046,4 +5637,14 @@ void proxy_util_register_hooks(apr_pool_t *p)
APR_REGISTER_OPTIONAL_FN(ap_proxy_retry_worker);
APR_REGISTER_OPTIONAL_FN(ap_proxy_clear_connection);
APR_REGISTER_OPTIONAL_FN(proxy_balancer_get_best_worker);
+
+ {
+ apr_time_t *start_time = ap_retained_data_get("proxy_start_time");
+ if (start_time == NULL) {
+ start_time = ap_retained_data_create("proxy_start_time",
+ sizeof(*start_time));
+ *start_time = apr_time_now();
+ }
+ proxy_start_time = start_time;
+ }
}
diff --git a/modules/proxy/proxy_util.h b/modules/proxy/proxy_util.h
index 202be8d..bc131da 100644
--- a/modules/proxy/proxy_util.h
+++ b/modules/proxy/proxy_util.h
@@ -31,9 +31,9 @@ PROXY_DECLARE(int) ap_proxy_is_domainname(struct dirconn_entry *This, apr_pool_t
PROXY_DECLARE(int) ap_proxy_is_hostname(struct dirconn_entry *This, apr_pool_t *p);
PROXY_DECLARE(int) ap_proxy_is_word(struct dirconn_entry *This, apr_pool_t *p);
-PROXY_DECLARE_DATA extern int proxy_lb_workers;
-PROXY_DECLARE_DATA extern const apr_strmatch_pattern *ap_proxy_strmatch_path;
-PROXY_DECLARE_DATA extern const apr_strmatch_pattern *ap_proxy_strmatch_domain;
+extern PROXY_DECLARE_DATA int proxy_lb_workers;
+extern PROXY_DECLARE_DATA const apr_strmatch_pattern *ap_proxy_strmatch_path;
+extern PROXY_DECLARE_DATA const apr_strmatch_pattern *ap_proxy_strmatch_domain;
/**
* Register optional functions declared within proxy_util.c.