diff options
Diffstat (limited to 'modules/proxy/mod_proxy_http.c')
-rw-r--r-- | modules/proxy/mod_proxy_http.c | 1812 |
1 files changed, 945 insertions, 867 deletions
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c index 56af9a8..bd57b4d 100644 --- a/modules/proxy/mod_proxy_http.c +++ b/modules/proxy/mod_proxy_http.c @@ -31,36 +31,71 @@ static apr_status_t ap_proxy_http_cleanup(const char *scheme, static apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n, request_rec *r, int flags, int *read); +static const char *get_url_scheme(const char **url, int *is_ssl) +{ + const char *u = *url; + + switch (u[0]) { + case 'h': + case 'H': + if (strncasecmp(u + 1, "ttp", 3) == 0) { + if (u[4] == ':') { + *is_ssl = 0; + *url = u + 5; + return "http"; + } + if (apr_tolower(u[4]) == 's' && u[5] == ':') { + *is_ssl = 1; + *url = u + 6; + return "https"; + } + } + break; + + case 'w': + case 'W': + if (apr_tolower(u[1]) == 's') { + if (u[2] == ':') { + *is_ssl = 0; + *url = u + 3; + return "ws"; + } + if (apr_tolower(u[2]) == 's' && u[3] == ':') { + *is_ssl = 1; + *url = u + 4; + return "wss"; + } + } + break; + } + + *is_ssl = 0; + return NULL; +} + /* * Canonicalise http-like URLs. * scheme is the scheme for the URL * url is the URL starting with the first '/' - * def_port is the default port for this scheme. */ static int proxy_http_canon(request_rec *r, char *url) { + const char *base_url = url; char *host, *path, sport[7]; char *search = NULL; const char *err; const char *scheme; apr_port_t port, def_port; + int is_ssl = 0; - /* ap_port_of_scheme() */ - if (strncasecmp(url, "http:", 5) == 0) { - url += 5; - scheme = "http"; - } - else if (strncasecmp(url, "https:", 6) == 0) { - url += 6; - scheme = "https"; - } - else { + scheme = get_url_scheme((const char **)&url, &is_ssl); + if (!scheme) { return DECLINED; } - port = def_port = ap_proxy_port_of_scheme(scheme); + port = def_port = (is_ssl) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "HTTP: canonicalising URL %s", url); + "HTTP: canonicalising URL %s", base_url); /* do syntatic check. * We break the URL into host, port, path, search @@ -68,7 +103,7 @@ static int proxy_http_canon(request_rec *r, char *url) err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); if (err) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01083) - "error parsing URL %s: %s", url, err); + "error parsing URL %s: %s", base_url, err); return HTTP_BAD_REQUEST; } @@ -86,9 +121,19 @@ static int proxy_http_canon(request_rec *r, char *url) if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } + else if (apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the encoded path already */ + search = r->args; + } else { - path = ap_proxy_canonenc(r->pool, url, strlen(url), - enc_path, 0, r->proxyreq); + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, + flags, r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } search = r->args; } break; @@ -96,9 +141,22 @@ static int proxy_http_canon(request_rec *r, char *url) path = url; break; } - - if (path == NULL) - return HTTP_BAD_REQUEST; + /* + * If we have a raw control character or a ' ' in nocanon path or + * r->args, correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10415) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } + if (search && *ap_scan_vchar_obstext(search)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10408) + "To be forwarded query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } if (port != def_port) apr_snprintf(sport, sizeof(sport), ":%d", port); @@ -108,8 +166,9 @@ static int proxy_http_canon(request_rec *r, char *url) if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */ host = apr_pstrcat(r->pool, "[", host, "]", NULL); } + r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport, - "/", path, (search) ? "?" : "", (search) ? search : "", NULL); + "/", path, (search) ? "?" : "", search, NULL); return OK; } @@ -216,486 +275,230 @@ static void add_cl(apr_pool_t *p, APR_BRIGADE_INSERT_TAIL(header_brigade, e); } -#define ASCII_CRLF "\015\012" -#define ASCII_ZERO "\060" - -static void terminate_headers(apr_bucket_alloc_t *bucket_alloc, - apr_bucket_brigade *header_brigade) -{ - apr_bucket *e; - - /* add empty line at the end of the headers */ - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); -} - +#ifndef CRLF_ASCII +#define CRLF_ASCII "\015\012" +#endif +#ifndef ZERO_ASCII +#define ZERO_ASCII "\060" +#endif #define MAX_MEM_SPOOL 16384 -static int stream_reqbody_chunked(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade) -{ - int seen_eos = 0, rv = OK; - apr_size_t hdr_len; - apr_off_t bytes; - apr_status_t status; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *bb; - apr_bucket *e; - - add_te_chunked(p, bucket_alloc, header_brigade); - terminate_headers(bucket_alloc, header_brigade); +typedef enum { + RB_INIT = 0, + RB_STREAM_CL, + RB_STREAM_CHUNKED, + RB_SPOOL_CL +} rb_methods; - while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) - { - char chunk_hdr[20]; /* must be here due to transient bucket. */ +typedef struct { + apr_pool_t *p; + request_rec *r; + const char *proto; + proxy_worker *worker; + proxy_server_conf *sconf; - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; - - /* We can't pass this EOS to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - apr_bucket_delete(e); - } + char server_portstr[32]; + proxy_conn_rec *backend; + conn_rec *origin; - apr_brigade_length(input_brigade, 1, &bytes); + apr_bucket_alloc_t *bucket_alloc; + apr_bucket_brigade *header_brigade; + apr_bucket_brigade *input_brigade; + char *old_cl_val, *old_te_val; + apr_off_t cl_val; - hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), - "%" APR_UINT64_T_HEX_FMT CRLF, - (apr_uint64_t)bytes); + rb_methods rb_method; - ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); - e = apr_bucket_transient_create(chunk_hdr, hdr_len, - bucket_alloc); - APR_BRIGADE_INSERT_HEAD(input_brigade, e); + const char *upgrade; - /* - * Append the end-of-chunk CRLF - */ - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(input_brigade, e); + unsigned int do_100_continue :1, + prefetch_nonblocking :1, + force10 :1; +} proxy_http_req_t; - if (header_brigade) { - /* we never sent the header brigade, so go ahead and - * take care of that now - */ - bb = header_brigade; +static int stream_reqbody(proxy_http_req_t *req) +{ + request_rec *r = req->r; + int seen_eos = 0, rv = OK; + apr_size_t hdr_len; + char chunk_hdr[20]; /* must be here due to transient bucket. */ + conn_rec *origin = req->origin; + proxy_conn_rec *p_conn = req->backend; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; + rb_methods rb_method = req->rb_method; + apr_off_t bytes, bytes_streamed = 0; + apr_bucket *e; - /* - * Save input_brigade in bb brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * bb brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &bb, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; + do { + if (APR_BRIGADE_EMPTY(input_brigade) + && APR_BRIGADE_EMPTY(header_brigade)) { + rv = ap_proxy_read_input(r, p_conn, input_brigade, + HUGE_STRING_LEN); + if (rv != OK) { + return rv; } - - header_brigade = NULL; - } - else { - bb = input_brigade; } - /* The request is flushed below this loop with chunk EOS header */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0); - if (rv != OK) { - return rv; - } - - if (seen_eos) { - break; - } - - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - HUGE_STRING_LEN); - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); - } - } - - if (header_brigade) { - /* we never sent the header brigade because there was no request body; - * send it now - */ - bb = header_brigade; - } - else { if (!APR_BRIGADE_EMPTY(input_brigade)) { - /* input brigade still has an EOS which we can't pass to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - AP_DEBUG_ASSERT(APR_BUCKET_IS_EOS(e)); - apr_bucket_delete(e); - } - bb = input_brigade; - } - - e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF - /* <trailers> */ - ASCII_CRLF, - 5, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - } - - /* Now we have headers-only, or the chunk EOS mark; flush it */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1); - return rv; -} - -static int stream_reqbody_cl(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - char *old_cl_val) -{ - int seen_eos = 0, rv = 0; - apr_status_t status = APR_SUCCESS; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *bb; - apr_bucket *e; - apr_off_t cl_val = 0; - apr_off_t bytes; - apr_off_t bytes_streamed = 0; - - if (old_cl_val) { - char *endstr; - - add_cl(p, bucket_alloc, header_brigade, old_cl_val); - status = apr_strtoff(&cl_val, old_cl_val, &endstr, 10); - - if (status || *endstr || endstr == old_cl_val || cl_val < 0) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) - "could not parse request Content-Length (%s)", - old_cl_val); - return HTTP_BAD_REQUEST; - } - } - terminate_headers(bucket_alloc, header_brigade); + /* If this brigade contains EOS, remove it and be done. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; - while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) - { - apr_brigade_length(input_brigade, 1, &bytes); - bytes_streamed += bytes; + /* We can't pass this EOS to the output_filters. */ + e = APR_BRIGADE_LAST(input_brigade); + apr_bucket_delete(e); + } - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; + apr_brigade_length(input_brigade, 1, &bytes); + bytes_streamed += bytes; - /* We can't pass this EOS to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - apr_bucket_delete(e); + if (rb_method == RB_STREAM_CHUNKED) { + if (bytes) { + /* + * Prepend the size of the chunk + */ + hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), + "%" APR_UINT64_T_HEX_FMT CRLF, + (apr_uint64_t)bytes); + ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); + e = apr_bucket_transient_create(chunk_hdr, hdr_len, + bucket_alloc); + APR_BRIGADE_INSERT_HEAD(input_brigade, e); + + /* + * Append the end-of-chunk CRLF + */ + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + if (seen_eos) { + /* + * Append the tailing 0-size chunk + */ + e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII + /* <trailers> */ + CRLF_ASCII, + 5, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + } + else if (rb_method == RB_STREAM_CL + && (bytes_streamed > req->cl_val + || (seen_eos && bytes_streamed < req->cl_val))) { + /* C-L != bytes streamed?!? + * + * Prevent HTTP Request/Response Splitting. + * + * We can't stream more (or less) bytes at the back end since + * they could be interpreted in separate requests (more bytes + * now would start a new request, less bytes would make the + * first bytes of the next request be part of the current one). + * + * It can't happen from the client connection here thanks to + * ap_http_filter(), but some module's filter may be playing + * bad games, hence the HTTP_INTERNAL_SERVER_ERROR. + */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) + "read %s bytes of request body than expected " + "(got %" APR_OFF_T_FMT ", expected " + "%" APR_OFF_T_FMT ")", + bytes_streamed > req->cl_val ? "more" : "less", + bytes_streamed, req->cl_val); + return HTTP_INTERNAL_SERVER_ERROR; + } - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); + if (seen_eos && apr_table_get(r->subprocess_env, + "proxy-sendextracrlf")) { + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); APR_BRIGADE_INSERT_TAIL(input_brigade, e); } } - /* C-L < bytes streamed?!? - * We will error out after the body is completely - * consumed, but we can't stream more bytes at the - * back end since they would in part be interpreted - * as another request! If nothing is sent, then - * just send nothing. - * - * Prevents HTTP Response Splitting. + /* If we never sent the header brigade, go ahead and take care of + * that now by prepending it (once only since header_brigade will be + * empty afterward). */ - if (bytes_streamed > cl_val) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) - "read more bytes of request body than expected " - "(got %" APR_OFF_T_FMT ", expected %" APR_OFF_T_FMT ")", - bytes_streamed, cl_val); - return HTTP_INTERNAL_SERVER_ERROR; - } - - if (header_brigade) { - /* we never sent the header brigade, so go ahead and - * take care of that now - */ - bb = header_brigade; - - /* - * Save input_brigade in bb brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * bb brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &bb, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - header_brigade = NULL; - } - else { - bb = input_brigade; - } + APR_BRIGADE_PREPEND(input_brigade, header_brigade); - /* Once we hit EOS, we are ready to flush. */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos); + /* Flush here on EOS because we won't ap_proxy_read_input() again. */ + rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, + input_brigade, seen_eos); if (rv != OK) { - return rv ; - } - - if (seen_eos) { - break; - } - - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - HUGE_STRING_LEN); - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02609) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + return rv; } - } - - if (bytes_streamed != cl_val) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087) - "client %s given Content-Length did not match" - " number of body bytes read", r->connection->client_ip); - return HTTP_BAD_REQUEST; - } - - if (header_brigade) { - /* we never sent the header brigade since there was no request - * body; send it now with the flush flag - */ - bb = header_brigade; - return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1)); - } + } while (!seen_eos); return OK; } -static int spool_reqbody_cl(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - int force_cl) +static void terminate_headers(proxy_http_req_t *req) { - int seen_eos = 0; - apr_status_t status; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *body_brigade; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; apr_bucket *e; - apr_off_t bytes, bytes_spooled = 0, fsize = 0; - apr_file_t *tmpfile = NULL; - apr_off_t limit; - - body_brigade = apr_brigade_create(p, bucket_alloc); - - limit = ap_get_limit_req_body(r); - - while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) - { - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; + char *buf; - /* We can't pass this EOS to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - apr_bucket_delete(e); + /* + * Handle Connection: header if we do HTTP/1.1 request: + * If we plan to close the backend connection sent Connection: close + * otherwise sent Connection: Keep-Alive. + */ + if (!req->force10) { + if (req->upgrade) { + buf = apr_pstrdup(req->p, "Connection: Upgrade" CRLF); + ap_xlate_proto_to_ascii(buf, strlen(buf)); + e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(req->header_brigade, e); + + /* Tell the backend that it can upgrade the connection. */ + buf = apr_pstrcat(req->p, "Upgrade: ", req->upgrade, CRLF, NULL); } - - apr_brigade_length(input_brigade, 1, &bytes); - - if (bytes_spooled + bytes > MAX_MEM_SPOOL) { - /* - * LimitRequestBody does not affect Proxy requests (Should it?). - * Let it take effect if we decide to store the body in a - * temporary file on disk. - */ - if (limit && (bytes_spooled + bytes > limit)) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) - "Request body is larger than the configured " - "limit of %" APR_OFF_T_FMT, limit); - return HTTP_REQUEST_ENTITY_TOO_LARGE; - } - /* can't spool any more in memory; write latest brigade to disk */ - if (tmpfile == NULL) { - const char *temp_dir; - char *template; - - status = apr_temp_dir_get(&temp_dir, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) - "search for temporary directory failed"); - return HTTP_INTERNAL_SERVER_ERROR; - } - apr_filepath_merge(&template, temp_dir, - "modproxy.tmp.XXXXXX", - APR_FILEPATH_NATIVE, p); - status = apr_file_mktemp(&tmpfile, template, 0, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) - "creation of temporary file in directory " - "%s failed", temp_dir); - return HTTP_INTERNAL_SERVER_ERROR; - } - } - for (e = APR_BRIGADE_FIRST(input_brigade); - e != APR_BRIGADE_SENTINEL(input_brigade); - e = APR_BUCKET_NEXT(e)) { - const char *data; - apr_size_t bytes_read, bytes_written; - - apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); - status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); - if (status != APR_SUCCESS) { - const char *tmpfile_name; - - if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { - tmpfile_name = "(unknown)"; - } - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) - "write to temporary file %s failed", - tmpfile_name); - return HTTP_INTERNAL_SERVER_ERROR; - } - AP_DEBUG_ASSERT(bytes_read == bytes_written); - fsize += bytes_written; - } - apr_brigade_cleanup(input_brigade); + else if (ap_proxy_connection_reusable(req->backend)) { + buf = apr_pstrdup(req->p, "Connection: Keep-Alive" CRLF); } else { - - /* - * Save input_brigade in body_brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * body_brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - } - - bytes_spooled += bytes; - - if (seen_eos) { - break; - } - - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - HUGE_STRING_LEN); - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02610) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + buf = apr_pstrdup(req->p, "Connection: close" CRLF); } + ap_xlate_proto_to_ascii(buf, strlen(buf)); + e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(req->header_brigade, e); } - if (bytes_spooled || force_cl) { - add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled)); - } - terminate_headers(bucket_alloc, header_brigade); - APR_BRIGADE_CONCAT(header_brigade, body_brigade); - if (tmpfile) { - apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p); - } - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); - } - /* This is all a single brigade, pass with flush flagged */ - return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1)); + /* add empty line at the end of the headers */ + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(req->header_brigade, e); } -static -int ap_proxy_http_request(apr_pool_t *p, request_rec *r, - proxy_conn_rec *p_conn, proxy_worker *worker, - proxy_server_conf *conf, - apr_uri_t *uri, - char *url, char *server_portstr) +static int ap_proxy_http_prefetch(proxy_http_req_t *req, + apr_uri_t *uri, char *url) { + apr_pool_t *p = req->p; + request_rec *r = req->r; conn_rec *c = r->connection; - apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc; - apr_bucket_brigade *header_brigade; - apr_bucket_brigade *input_brigade; - apr_bucket_brigade *temp_brigade; + proxy_conn_rec *p_conn = req->backend; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; apr_bucket *e; - char *buf; - apr_status_t status; - enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL}; - enum rb_methods rb_method = RB_INIT; - char *old_cl_val = NULL; - char *old_te_val = NULL; apr_off_t bytes_read = 0; apr_off_t bytes; - int force10, rv; - conn_rec *origin = p_conn->connection; - - if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { - if (r->expecting_100) { - return HTTP_EXPECTATION_FAILED; - } - force10 = 1; - } else { - force10 = 0; - } + int rv; - header_brigade = apr_brigade_create(p, bucket_alloc); rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn, - worker, conf, uri, url, server_portstr, - &old_cl_val, &old_te_val); + req->worker, req->sconf, + uri, url, req->server_portstr, + &req->old_cl_val, &req->old_te_val); if (rv != OK) { return rv; } - /* We have headers, let's figure out our request body... */ - input_brigade = apr_brigade_create(p, bucket_alloc); - /* sub-requests never use keepalives, and mustn't pass request bodies. * Because the new logic looks at input_brigade, we will self-terminate * input_brigade and jump past all of the request body logic... * Reading anything with ap_get_brigade is likely to consume the - * main request's body or read beyond EOS - which would be unplesant. + * main request's body or read beyond EOS - which would be unpleasant. * * An exception: when a kept_body is present, then subrequest CAN use * pass request bodies, and we DONT skip the body. @@ -703,9 +506,9 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, if (!r->kept_body && r->main) { /* XXX: Why DON'T sub-requests use keepalives? */ p_conn->close = 1; - old_cl_val = NULL; - old_te_val = NULL; - rb_method = RB_STREAM_CL; + req->old_te_val = NULL; + req->old_cl_val = NULL; + req->rb_method = RB_STREAM_CL; e = apr_bucket_eos_create(input_brigade->bucket_alloc); APR_BRIGADE_INSERT_TAIL(input_brigade, e); goto skip_body; @@ -719,73 +522,29 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * encoding has been done by the extensions' handler, and * do not modify add_te_chunked's logic */ - if (old_te_val && strcasecmp(old_te_val, "chunked") != 0) { + if (req->old_te_val && ap_cstr_casecmp(req->old_te_val, "chunked") != 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01093) - "%s Transfer-Encoding is not supported", old_te_val); + "%s Transfer-Encoding is not supported", + req->old_te_val); return HTTP_INTERNAL_SERVER_ERROR; } - if (old_cl_val && old_te_val) { + if (req->old_cl_val && req->old_te_val) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01094) "client %s (%s) requested Transfer-Encoding " "chunked body with Content-Length (C-L ignored)", c->client_ip, c->remote_host ? c->remote_host: ""); - old_cl_val = NULL; - origin->keepalive = AP_CONN_CLOSE; + req->old_cl_val = NULL; p_conn->close = 1; } - /* Prefetch MAX_MEM_SPOOL bytes - * - * This helps us avoid any election of C-L v.s. T-E - * request bodies, since we are willing to keep in - * memory this much data, in any case. This gives - * us an instant C-L election if the body is of some - * reasonable size. - */ - temp_brigade = apr_brigade_create(p, bucket_alloc); - do { - status = ap_get_brigade(r->input_filters, temp_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - MAX_MEM_SPOOL - bytes_read); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) - "prefetch request body failed to %pI (%s)" - " from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); - } - - apr_brigade_length(temp_brigade, 1, &bytes); - bytes_read += bytes; - - /* - * Save temp_brigade in input_brigade. (At least) in the SSL case - * temp_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * input_brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) - "processing prefetched request body failed" - " to %pI (%s) from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return HTTP_INTERNAL_SERVER_ERROR; - } - - /* Ensure we don't hit a wall where we have a buffer too small - * for ap_get_brigade's filters to fetch us another bucket, - * surrender once we hit 80 bytes less than MAX_MEM_SPOOL - * (an arbitrary value.) - */ - } while ((bytes_read < MAX_MEM_SPOOL - 80) - && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))); + rv = ap_proxy_prefetch_input(r, req->backend, input_brigade, + req->prefetch_nonblocking ? APR_NONBLOCK_READ + : APR_BLOCK_READ, + &bytes_read, MAX_MEM_SPOOL); + if (rv != OK) { + return rv; + } /* Use chunked request body encoding or send a content-length body? * @@ -812,7 +571,7 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * To reduce server resource use, setenv proxy-sendchunked * * Then address specific servers with conditional setenv - * options to restore the default behavior where desireable. + * options to restore the default behavior where desirable. * * We have to compute content length by reading the entire request * body; if request body is not small, we'll spool the remaining @@ -822,7 +581,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * is absent, and the filters are unchanged (the body won't * be resized by another content filter). */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + if (!APR_BRIGADE_EMPTY(input_brigade) + && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { /* The whole thing fit, so our decision is trivial, use * the filtered bytes read from the client for the request * body Content-Length. @@ -830,34 +590,41 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * If we expected no body, and read no body, do not set * the Content-Length. */ - if (old_cl_val || old_te_val || bytes_read) { - old_cl_val = apr_off_t_toa(r->pool, bytes_read); + if (req->old_cl_val || req->old_te_val || bytes_read) { + req->old_cl_val = apr_off_t_toa(r->pool, bytes_read); + req->cl_val = bytes_read; } - rb_method = RB_STREAM_CL; + req->rb_method = RB_STREAM_CL; } - else if (old_te_val) { - if (force10 + else if (req->old_te_val) { + if (req->force10 || (apr_table_get(r->subprocess_env, "proxy-sendcl") && !apr_table_get(r->subprocess_env, "proxy-sendchunks") && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) { - rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } else { - rb_method = RB_STREAM_CHUNKED; + req->rb_method = RB_STREAM_CHUNKED; } } - else if (old_cl_val) { + else if (req->old_cl_val) { if (r->input_filters == r->proto_input_filters) { - rb_method = RB_STREAM_CL; + if (!ap_parse_strict_length(&req->cl_val, req->old_cl_val)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085) + "could not parse request Content-Length (%s)", + req->old_cl_val); + return HTTP_BAD_REQUEST; + } + req->rb_method = RB_STREAM_CL; } - else if (!force10 + else if (!req->force10 && (apr_table_get(r->subprocess_env, "proxy-sendchunks") || apr_table_get(r->subprocess_env, "proxy-sendchunked")) && !apr_table_get(r->subprocess_env, "proxy-sendcl")) { - rb_method = RB_STREAM_CHUNKED; + req->rb_method = RB_STREAM_CHUNKED; } else { - rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } } else { @@ -865,44 +632,60 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * requests, and has the behavior that it will not add any C-L * when the old_cl_val is NULL. */ - rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } -/* Yes I hate gotos. This is the subrequest shortcut */ -skip_body: - /* - * Handle Connection: header if we do HTTP/1.1 request: - * If we plan to close the backend connection sent Connection: close - * otherwise sent Connection: Keep-Alive. - */ - if (!force10) { - if (!ap_proxy_connection_reusable(p_conn)) { - buf = apr_pstrdup(p, "Connection: close" CRLF); - } - else { - buf = apr_pstrdup(p, "Connection: Keep-Alive" CRLF); - } - ap_xlate_proto_to_ascii(buf, strlen(buf)); - e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); - } - - /* send the request body, if any. */ - switch(rb_method) { + switch (req->rb_method) { case RB_STREAM_CHUNKED: - rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade, - input_brigade); + add_te_chunked(req->p, bucket_alloc, header_brigade); break; + case RB_STREAM_CL: - rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade, - input_brigade, old_cl_val); + if (req->old_cl_val) { + add_cl(req->p, bucket_alloc, header_brigade, req->old_cl_val); + } break; + + default: /* => RB_SPOOL_CL */ + /* If we have to spool the body, do it now, before connecting or + * reusing the backend connection. + */ + rv = ap_proxy_spool_input(r, p_conn, input_brigade, + &bytes, MAX_MEM_SPOOL); + if (rv != OK) { + return rv; + } + if (bytes || req->old_te_val || req->old_cl_val) { + add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes)); + } + } + +/* Yes I hate gotos. This is the subrequest shortcut */ +skip_body: + terminate_headers(req); + + return OK; +} + +static int ap_proxy_http_request(proxy_http_req_t *req) +{ + int rv; + request_rec *r = req->r; + + /* send the request header/body, if any. */ + switch (req->rb_method) { case RB_SPOOL_CL: - rv = spool_reqbody_cl(p, r, p_conn, origin, header_brigade, - input_brigade, (old_cl_val != NULL) - || (old_te_val != NULL) - || (bytes_read > 0)); + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: + if (req->do_100_continue) { + rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend, + req->origin, req->header_brigade, 1); + } + else { + rv = stream_reqbody(req); + } break; + default: /* shouldn't be possible */ rv = HTTP_INTERNAL_SERVER_ERROR; @@ -910,10 +693,12 @@ skip_body: } if (rv != OK) { + conn_rec *c = r->connection; /* apr_status_t value has been logged in lower level method */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01097) "pass request body failed to %pI (%s) from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", + req->backend->addr, + req->backend->hostname ? req->backend->hostname: "", c->client_ip, c->remote_host ? c->remote_host: ""); return rv; } @@ -950,6 +735,7 @@ static request_rec *make_fake_req(conn_rec *c, request_rec *r) request_rec *rp; apr_pool_create(&pool, c->pool); + apr_pool_tag(pool, "proxy_http_rp"); rp = apr_pcalloc(pool, sizeof(*r)); @@ -1001,14 +787,14 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c, }; int i; for (i = 0; date_hdrs[i]; ++i) { - if (!strcasecmp(date_hdrs[i], key)) { + if (!ap_cstr_casecmp(date_hdrs[i], key)) { apr_table_add(r->headers_out, key, date_canon(r->pool, value)); return; } } for (i = 0; transform_hdrs[i].name; ++i) { - if (!strcasecmp(transform_hdrs[i].name, key)) { + if (!ap_cstr_casecmp(transform_hdrs[i].name, key)) { apr_table_add(r->headers_out, key, (*transform_hdrs[i].func)(r, c, value)); return; @@ -1025,7 +811,7 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c, * any sense at all, since we depend on buffer still containing * what was read by ap_getline() upon return. */ -static void ap_proxy_read_headers(request_rec *r, request_rec *rr, +static apr_status_t ap_proxy_read_headers(request_rec *r, request_rec *rr, char *buffer, int size, conn_rec *c, int *pread_len) { @@ -1057,19 +843,26 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, rc = ap_proxygetline(tmp_bb, buffer, size, rr, AP_GETLINE_FOLD | AP_GETLINE_NOSPC_EOL, &len); - if (len <= 0) - break; - if (APR_STATUS_IS_ENOSPC(rc)) { - /* The header could not fit in the provided buffer, warn. - * XXX: falls through with the truncated header, 5xx instead? - */ - int trunc = (len > 128 ? 128 : len) / 2; - ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124) - "header size is over the limit allowed by " - "ResponseFieldSize (%d bytes). " - "Bad response header: '%.*s[...]%s'", - size, trunc, buffer, buffer + len - trunc); + if (rc != APR_SUCCESS) { + if (APR_STATUS_IS_ENOSPC(rc)) { + int trunc = (len > 128 ? 128 : len) / 2; + ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124) + "header size is over the limit allowed by " + "ResponseFieldSize (%d bytes). " + "Bad response header: '%.*s[...]%s'", + size, trunc, buffer, buffer + len - trunc); + } + else { + ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10404) + "Error reading headers from backend"); + } + r->headers_out = NULL; + return rc; + } + + if (len <= 0) { + break; } else { ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "%s", buffer); @@ -1092,7 +885,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, if (psc->badopt == bad_error) { /* Nope, it wasn't even an extra HTTP header. Give up. */ r->headers_out = NULL; - return; + return APR_EINVAL; } else if (psc->badopt == bad_body) { /* if we've already started loading headers_out, then @@ -1106,13 +899,13 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, "in headers returned by %s (%s)", r->uri, r->method); *pread_len = len; - return; + return APR_SUCCESS; } else { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01099) "No HTTP headers returned by %s (%s)", r->uri, r->method); - return; + return APR_SUCCESS; } } } @@ -1142,6 +935,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, process_proxy_header(r, dconf, buffer, value); saw_headers = 1; } + return APR_SUCCESS; } @@ -1188,13 +982,48 @@ static int add_trailers(void *data, const char *key, const char *val) return 1; } +static int send_continue_body(proxy_http_req_t *req) +{ + int status; + + /* Send the request body (fully). */ + switch(req->rb_method) { + case RB_SPOOL_CL: + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: + status = stream_reqbody(req); + break; + default: + /* Shouldn't happen */ + status = HTTP_INTERNAL_SERVER_ERROR; + break; + } + if (status != OK) { + conn_rec *c = req->r->connection; + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req->r, + APLOGNO(10154) "pass request body failed " + "to %pI (%s) from %s (%s) with status %i", + req->backend->addr, + req->backend->hostname ? req->backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : "", + status); + req->backend->close = 1; + } + return status; +} + static -apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, - proxy_conn_rec **backend_ptr, - proxy_worker *worker, - proxy_server_conf *conf, - char *server_portstr) { +int ap_proxy_http_process_response(proxy_http_req_t *req) +{ + apr_pool_t *p = req->p; + request_rec *r = req->r; conn_rec *c = r->connection; + proxy_worker *worker = req->worker; + proxy_conn_rec *backend = req->backend; + conn_rec *origin = req->origin; + int do_100_continue = req->do_100_continue; + int status; + char *buffer; char fixed_buffer[HUGE_STRING_LEN]; const char *buf; @@ -1217,19 +1046,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, int proxy_status = OK; const char *original_status_line = r->status_line; const char *proxy_status_line = NULL; - proxy_conn_rec *backend = *backend_ptr; - conn_rec *origin = backend->connection; apr_interval_time_t old_timeout = 0; proxy_dir_conf *dconf; - int do_100_continue; dconf = ap_get_module_config(r->per_dir_config, &proxy_module); - do_100_continue = (worker->s->ping_timeout_set - && ap_request_has_body(r) - && (PROXYREQ_REVERSE == r->proxyreq) - && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); - bb = apr_brigade_create(p, c->bucket_alloc); pass_bb = apr_brigade_create(p, c->bucket_alloc); @@ -1248,7 +1069,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, } /* Setup for 100-Continue timeout if appropriate */ - if (do_100_continue) { + if (do_100_continue && worker->s->ping_timeout_set) { apr_socket_timeout_get(backend->sock, &old_timeout); if (worker->s->ping_timeout != old_timeout) { apr_status_t rc; @@ -1273,6 +1094,9 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, origin->local_addr->port)); do { apr_status_t rc; + const char *upgrade = NULL; + int major = 0, minor = 0; + int toclose = 0; apr_brigade_cleanup(bb); @@ -1291,7 +1115,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, apr_table_setn(r->notes, "proxy_timedout", "1"); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout"); if (do_100_continue) { - return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, "Timeout on 100-Continue"); + return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, + "Timeout on 100-Continue"); } } /* @@ -1340,15 +1165,30 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, ap_pass_brigade(r->output_filters, bb); /* Mark the backend connection for closing */ backend->close = 1; - /* Need to return OK to avoid sending an error message */ - return OK; + if (origin->keepalives) { + /* We already had a request on this backend connection and + * might just have run into a keepalive race. Hence we + * think positive and assume that the backend is fine and + * we do not need to signal an error on backend side. + */ + return OK; + } + /* + * This happened on our first request on this connection to the + * backend. This indicates something fishy with the backend. + * Return HTTP_INTERNAL_SERVER_ERROR to signal an unrecoverable + * server error. We do not worry about r->status code and a + * possible error response here as the ap_http_outerror_filter + * will fix all of this for us. + */ + return HTTP_INTERNAL_SERVER_ERROR; } - else if (!c->keepalives) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105) - "NOT Closing connection to client" - " although reading from backend server %s:%d" - " failed.", - backend->hostname, backend->port); + if (!c->keepalives) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105) + "NOT Closing connection to client" + " although reading from backend server %s:%d" + " failed.", + backend->hostname, backend->port); } return ap_proxyerror(r, HTTP_BAD_GATEWAY, "Error reading from remote server"); @@ -1360,9 +1200,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, * This is buggy if we ever see an HTTP/1.10 */ if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) { - int major, minor; - int toclose; - major = buffer[5] - '0'; minor = buffer[7] - '0'; @@ -1371,8 +1208,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, */ if ((major != 1) || (len >= response_field_size - 1)) { return ap_proxyerror(r, HTTP_BAD_GATEWAY, - apr_pstrcat(p, "Corrupt status line returned by remote " - "server: ", buffer, NULL)); + apr_pstrcat(p, "Corrupt status line returned " + "by remote server: ", buffer, NULL)); } backasswards = 0; @@ -1404,7 +1241,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, /* read the headers. */ /* N.B. for HTTP/1.0 clients, we have to fold line-wrapped headers*/ - /* Also, take care with headers with multiple occurences. */ + /* Also, take care with headers with multiple occurrences. */ /* First, tuck away all already existing cookies */ save_table = apr_table_make(r->pool, 2); @@ -1412,10 +1249,10 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, "Set-Cookie", NULL); /* shove the headers direct into r->headers_out */ - ap_proxy_read_headers(r, backend->r, buffer, response_field_size, origin, - &pread_len); + rc = ap_proxy_read_headers(r, backend->r, buffer, response_field_size, + origin, &pread_len); - if (r->headers_out == NULL) { + if (rc != APR_SUCCESS || r->headers_out == NULL) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106) "bad HTTP/%d.%d header returned by %s (%s)", major, minor, r->uri, r->method); @@ -1443,9 +1280,14 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, save_table); } + /* + * Save a possible Transfer-Encoding header as we need it later for + * ap_http_filter to know where to end. + */ + te = apr_table_get(r->headers_out, "Transfer-Encoding"); + /* can't have both Content-Length and Transfer-Encoding */ - if (apr_table_get(r->headers_out, "Transfer-Encoding") - && apr_table_get(r->headers_out, "Content-Length")) { + if (te && apr_table_get(r->headers_out, "Content-Length")) { /* * 2616 section 4.4, point 3: "if both Transfer-Encoding * and Content-Length are received, the latter MUST be @@ -1463,16 +1305,29 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, backend->close = 1; } - /* - * Save a possible Transfer-Encoding header as we need it later for - * ap_http_filter to know where to end. - */ - te = apr_table_get(r->headers_out, "Transfer-Encoding"); + upgrade = apr_table_get(r->headers_out, "Upgrade"); + if (proxy_status == HTTP_SWITCHING_PROTOCOLS) { + if (!upgrade || !req->upgrade || (strcasecmp(req->upgrade, + upgrade) != 0)) { + return ap_proxyerror(r, HTTP_BAD_GATEWAY, + apr_pstrcat(p, "Unexpected Upgrade: ", + upgrade ? upgrade : "n/a", + " (expecting ", + req->upgrade ? req->upgrade + : "n/a", ")", + NULL)); + } + backend->close = 1; + } /* strip connection listed hop-by-hop headers from response */ toclose = ap_proxy_clear_connection_fn(r, r->headers_out); if (toclose) { backend->close = 1; + if (toclose < 0) { + return ap_proxyerror(r, HTTP_BAD_GATEWAY, + "Malformed connection header"); + } } if ((buf = apr_table_get(r->headers_out, "Content-Type"))) { @@ -1491,7 +1346,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, r->headers_out = ap_proxy_clean_warnings(p, r->headers_out); /* handle Via header in response */ - if (conf->viaopt != via_off && conf->viaopt != via_block) { + if (req->sconf->viaopt != via_off + && req->sconf->viaopt != via_block) { const char *server_name = ap_get_server_name(r); /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, * then the server name returned by ap_get_server_name() is the @@ -1502,18 +1358,18 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, server_name = r->server->server_hostname; /* create a "Via:" response header entry and merge it */ apr_table_addn(r->headers_out, "Via", - (conf->viaopt == via_full) + (req->sconf->viaopt == via_full) ? apr_psprintf(p, "%d.%d %s%s (%s)", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, - server_portstr, + req->server_portstr, AP_SERVER_BASEVERSION) : apr_psprintf(p, "%d.%d %s%s", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, - server_portstr) + req->server_portstr) ); } @@ -1522,27 +1378,25 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, backend->close = 1; origin->keepalive = AP_CONN_CLOSE; } + else { + /* + * Keep track of the number of keepalives we processed on this + * connection. + */ + origin->keepalives++; + } + } else { /* an http/0.9 response */ backasswards = 1; - r->status = 200; + r->status = proxy_status = 200; r->status_line = "200 OK"; backend->close = 1; } if (ap_is_HTTP_INFO(proxy_status)) { - interim_response++; - /* Reset to old timeout iff we've adjusted it */ - if (do_100_continue - && (r->status == HTTP_CONTINUE) - && (worker->s->ping_timeout != old_timeout)) { - apr_socket_timeout_set(backend->sock, old_timeout); - } - } - else { - interim_response = 0; - } - if (interim_response) { + const char *policy = NULL; + /* RFC2616 tells us to forward this. * * OTOH, an interim response here may mean the backend @@ -1555,15 +1409,32 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, * * So let's make it configurable. * - * We need to set "r->expecting_100 = 1" otherwise origin - * server behaviour will apply. + * We need to force "r->expecting_100 = 1" for RFC behaviour + * otherwise ap_send_interim_response() does nothing when + * the client did not ask for 100-continue. + * + * 101 Switching Protocol has its own configuration which + * shouldn't be interfered by "proxy-interim-response". */ - const char *policy = apr_table_get(r->subprocess_env, - "proxy-interim-response"); + if (proxy_status != HTTP_SWITCHING_PROTOCOLS) { + policy = apr_table_get(r->subprocess_env, + "proxy-interim-response"); + } ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "HTTP: received interim %d response", r->status); + "HTTP: received interim %d response (policy: %s)", + r->status, policy ? policy : "n/a"); if (!policy - || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) { + || (!strcasecmp(policy, "RFC") + && (proxy_status != HTTP_CONTINUE + || (r->expecting_100 = 1)))) { + switch (proxy_status) { + case HTTP_SWITCHING_PROTOCOLS: + AP_DEBUG_ASSERT(upgrade != NULL); + apr_table_setn(r->headers_out, "Connection", "Upgrade"); + apr_table_setn(r->headers_out, "Upgrade", + apr_pstrdup(p, upgrade)); + break; + } ap_send_interim_response(r, 1); } /* FIXME: refine this to be able to specify per-response-status @@ -1573,57 +1444,144 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108) "undefined proxy interim response policy"); } + interim_response++; } - /* Moved the fixups of Date headers and those affected by - * ProxyPassReverse/etc from here to ap_proxy_read_headers + else { + interim_response = 0; + } + + /* If we still do 100-continue (end-to-end or ping), either the + * current response is the expected "100 Continue" and we are done + * with this mode, or this is another interim response and we'll wait + * for the next one, or this is a final response and hence the backend + * did not honor our expectation. */ + if (do_100_continue && (!interim_response + || proxy_status == HTTP_CONTINUE)) { + /* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers + * A server that responds with a final status code before + * reading the entire message body SHOULD indicate in that + * response whether it intends to close the connection or + * continue reading and discarding the request message. + * + * So, if this response is not an interim 100 Continue, we can + * avoid sending the request body if the backend responded with + * "Connection: close" or HTTP < 1.1, and either let the core + * discard it or the caller try another balancer member with the + * same body (given status 503, though not implemented yet). + */ + int do_send_body = (proxy_status == HTTP_CONTINUE + || (!toclose && major > 0 && minor > 0)); - if ((proxy_status == 401) && (dconf->error_override)) { - const char *buf; - const char *wa = "WWW-Authenticate"; - if ((buf = apr_table_get(r->headers_out, wa))) { - apr_table_set(r->err_headers_out, wa, buf); - } else { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109) - "origin server sent 401 without " - "WWW-Authenticate header"); + /* Reset to old timeout iff we've adjusted it. */ + if (worker->s->ping_timeout_set) { + apr_socket_timeout_set(backend->sock, old_timeout); } - } - r->sent_bodyct = 1; - /* - * Is it an HTTP/0.9 response or did we maybe preread the 1st line of - * the response? If so, load the extra data. These are 2 mutually - * exclusive possibilities, that just happen to require very - * similar behavior. - */ - if (backasswards || pread_len) { - apr_ssize_t cntr = (apr_ssize_t)pread_len; - if (backasswards) { - /*@@@FIXME: - * At this point in response processing of a 0.9 response, - * we don't know yet whether data is binary or not. - * mod_charset_lite will get control later on, so it cannot - * decide on the conversion of this buffer full of data. - * However, chances are that we are not really talking to an - * HTTP/0.9 server, but to some different protocol, therefore - * the best guess IMHO is to always treat the buffer as "text/x": + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10153) + "HTTP: %s100 continue sent by %pI (%s): " + "%ssending body (response: HTTP/%i.%i %s)", + proxy_status != HTTP_CONTINUE ? "no " : "", + backend->addr, + backend->hostname ? backend->hostname : "", + do_send_body ? "" : "not ", + major, minor, proxy_status_line); + + if (do_send_body) { + status = send_continue_body(req); + if (status != OK) { + return status; + } + } + else { + /* If we don't read the client connection any further, since + * there are pending data it should be "Connection: close"d to + * prevent reuse. We don't exactly c->keepalive = AP_CONN_CLOSE + * here though, because error_override or a potential retry on + * another backend could finally read that data and finalize + * the request processing, making keep-alive possible. So what + * we do is leaving r->expecting_100 alone, ap_set_keepalive() + * will do the right thing according to the final response and + * any later update of r->expecting_100. */ - ap_xlate_proto_to_ascii(buffer, len); - cntr = (apr_ssize_t)len; } - e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); + + /* Once only! */ + do_100_continue = 0; + } + + if (proxy_status == HTTP_SWITCHING_PROTOCOLS) { + apr_status_t rv; + proxy_tunnel_rec *tunnel; + apr_interval_time_t client_timeout = -1, + backend_timeout = -1; + + /* If we didn't send the full body yet, do it now */ + if (do_100_continue) { + r->expecting_100 = 0; + status = send_continue_body(req); + if (status != OK) { + return status; + } + } + + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239) + "HTTP: tunneling protocol %s", upgrade); + + rv = ap_proxy_tunnel_create(&tunnel, r, origin, upgrade); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240) + "can't create tunnel for %s", upgrade); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* Set timeout to the highest configured for client or backend */ + apr_socket_timeout_get(backend->sock, &backend_timeout); + apr_socket_timeout_get(ap_get_conn_socket(c), &client_timeout); + if (backend_timeout >= 0 && backend_timeout > client_timeout) { + tunnel->timeout = backend_timeout; + } + else { + tunnel->timeout = client_timeout; + } + + /* Let proxy tunnel forward everything */ + status = ap_proxy_tunnel_run(tunnel); + + /* We are done with both connections */ + return DONE; + } + + if (interim_response) { + /* Already forwarded above, read next response */ + continue; } + + /* Moved the fixups of Date headers and those affected by + * ProxyPassReverse/etc from here to ap_proxy_read_headers + */ + /* PR 41646: get HEAD right with ProxyErrorOverride */ - if (ap_is_HTTP_ERROR(r->status) && dconf->error_override) { + if (ap_proxy_should_override(dconf, proxy_status)) { + if (proxy_status == HTTP_UNAUTHORIZED) { + const char *buf; + const char *wa = "WWW-Authenticate"; + if ((buf = apr_table_get(r->headers_out, wa))) { + apr_table_set(r->err_headers_out, wa, buf); + } else { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109) + "origin server sent 401 without " + "WWW-Authenticate header"); + } + } + /* clear r->status for override error, otherwise ErrorDocument * thinks that this is a recursive error, and doesn't find the * custom error page */ r->status = HTTP_OK; /* Discard body, if one is expected */ - if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) { + if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) { const char *tmp; /* Add minimal headers needed to allow http_in filter * detecting end of body without waiting for a timeout. */ @@ -1646,11 +1604,49 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, return proxy_status; } + /* Forward back Upgrade header if it matches the configured one(s), it + * may be an HTTP_UPGRADE_REQUIRED response or some other status where + * Upgrade makes sense to negotiate the protocol by other means. + */ + if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade, + (*req->proto == 'w') + ? "WebSocket" : NULL)) { + apr_table_setn(r->headers_out, "Connection", "Upgrade"); + apr_table_setn(r->headers_out, "Upgrade", apr_pstrdup(p, upgrade)); + } + + r->sent_bodyct = 1; + /* + * Is it an HTTP/0.9 response or did we maybe preread the 1st line of + * the response? If so, load the extra data. These are 2 mutually + * exclusive possibilities, that just happen to require very + * similar behavior. + */ + if (backasswards || pread_len) { + apr_ssize_t cntr = (apr_ssize_t)pread_len; + if (backasswards) { + /*@@@FIXME: + * At this point in response processing of a 0.9 response, + * we don't know yet whether data is binary or not. + * mod_charset_lite will get control later on, so it cannot + * decide on the conversion of this buffer full of data. + * However, chances are that we are not really talking to an + * HTTP/0.9 server, but to some different protocol, therefore + * the best guess IMHO is to always treat the buffer as "text/x": + */ + ap_xlate_proto_to_ascii(buffer, len); + cntr = (apr_ssize_t)len; + } + e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, e); + } + /* send body - but only if a body is expected */ if ((!r->header_only) && /* not HEAD request */ - !interim_response && /* not any 1xx response */ (proxy_status != HTTP_NO_CONTENT) && /* not 204 */ (proxy_status != HTTP_NOT_MODIFIED)) { /* not 304 */ + apr_read_type_e mode; + int finish; /* We need to copy the output headers and treat them as input * headers as well. BUT, we need to do this before we remove @@ -1671,152 +1667,148 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "start body send"); - /* - * if we are overriding the errors, we can't put the content - * of the page into the brigade + /* read the body, pass it to the output filters */ + + /* Handle the case where the error document is itself reverse + * proxied and was successful. We must maintain any previous + * error status so that an underlying error (eg HTTP_NOT_FOUND) + * doesn't become an HTTP_OK. */ - if (!dconf->error_override || !ap_is_HTTP_ERROR(proxy_status)) { - /* read the body, pass it to the output filters */ - apr_read_type_e mode = APR_NONBLOCK_READ; - int finish = FALSE; - - /* Handle the case where the error document is itself reverse - * proxied and was successful. We must maintain any previous - * error status so that an underlying error (eg HTTP_NOT_FOUND) - * doesn't become an HTTP_OK. - */ - if (dconf->error_override && !ap_is_HTTP_ERROR(proxy_status) - && ap_is_HTTP_ERROR(original_status)) { - r->status = original_status; - r->status_line = original_status_line; - } + if (ap_proxy_should_override(dconf, original_status)) { + r->status = original_status; + r->status_line = original_status_line; + } - do { - apr_off_t readbytes; - apr_status_t rv; - - rv = ap_get_brigade(backend->r->input_filters, bb, - AP_MODE_READBYTES, mode, - conf->io_buffer_size); - - /* ap_get_brigade will return success with an empty brigade - * for a non-blocking read which would block: */ - if (mode == APR_NONBLOCK_READ - && (APR_STATUS_IS_EAGAIN(rv) - || (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) { - /* flush to the client and switch to blocking mode */ - e = apr_bucket_flush_create(c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - if (ap_pass_brigade(r->output_filters, bb) - || c->aborted) { - backend->close = 1; - break; - } - apr_brigade_cleanup(bb); - mode = APR_BLOCK_READ; - continue; - } - else if (rv == APR_EOF) { - backend->close = 1; - break; - } - else if (rv != APR_SUCCESS) { - /* In this case, we are in real trouble because - * our backend bailed on us. Pass along a 502 error - * error bucket - */ - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110) - "error reading response"); - ap_proxy_backend_broke(r, bb); - ap_pass_brigade(r->output_filters, bb); - backend_broke = 1; + mode = APR_NONBLOCK_READ; + finish = FALSE; + do { + apr_off_t readbytes; + apr_status_t rv; + + rv = ap_get_brigade(backend->r->input_filters, bb, + AP_MODE_READBYTES, mode, + req->sconf->io_buffer_size); + + /* ap_get_brigade will return success with an empty brigade + * for a non-blocking read which would block: */ + if (mode == APR_NONBLOCK_READ + && (APR_STATUS_IS_EAGAIN(rv) + || (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) { + /* flush to the client and switch to blocking mode */ + e = apr_bucket_flush_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, e); + if (ap_pass_brigade(r->output_filters, bb) + || c->aborted) { backend->close = 1; break; } - /* next time try a non-blocking read */ - mode = APR_NONBLOCK_READ; + apr_brigade_cleanup(bb); + mode = APR_BLOCK_READ; + continue; + } + else if (rv == APR_EOF) { + backend->close = 1; + break; + } + else if (rv != APR_SUCCESS) { + /* In this case, we are in real trouble because + * our backend bailed on us. Pass along a 502 error + * error bucket + */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110) + "error reading response"); + apr_brigade_cleanup(bb); + ap_proxy_backend_broke(r, bb); + ap_pass_brigade(r->output_filters, bb); + backend_broke = 1; + backend->close = 1; + break; + } + /* next time try a non-blocking read */ + mode = APR_NONBLOCK_READ; - if (!apr_is_empty_table(backend->r->trailers_in)) { - apr_table_do(add_trailers, r->trailers_out, - backend->r->trailers_in, NULL); - apr_table_clear(backend->r->trailers_in); - } + if (!apr_is_empty_table(backend->r->trailers_in)) { + apr_table_do(add_trailers, r->trailers_out, + backend->r->trailers_in, NULL); + apr_table_clear(backend->r->trailers_in); + } - apr_brigade_length(bb, 0, &readbytes); - backend->worker->s->read += readbytes; + apr_brigade_length(bb, 0, &readbytes); + backend->worker->s->read += readbytes; #if DEBUGGING - { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111) - "readbytes: %#x", readbytes); - } + { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111) + "readbytes: %#x", readbytes); + } #endif - /* sanity check */ - if (APR_BRIGADE_EMPTY(bb)) { - break; - } + /* sanity check */ + if (APR_BRIGADE_EMPTY(bb)) { + break; + } - /* Switch the allocator lifetime of the buckets */ - ap_proxy_buckets_lifetime_transform(r, bb, pass_bb); - - /* found the last brigade? */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) { - - /* signal that we must leave */ - finish = TRUE; - - /* the brigade may contain transient buckets that contain - * data that lives only as long as the backend connection. - * Force a setaside so these transient buckets become heap - * buckets that live as long as the request. - */ - for (e = APR_BRIGADE_FIRST(pass_bb); e - != APR_BRIGADE_SENTINEL(pass_bb); e - = APR_BUCKET_NEXT(e)) { - apr_bucket_setaside(e, r->pool); - } - - /* finally it is safe to clean up the brigade from the - * connection pool, as we have forced a setaside on all - * buckets. - */ - apr_brigade_cleanup(bb); - - /* make sure we release the backend connection as soon - * as we know we are done, so that the backend isn't - * left waiting for a slow client to eventually - * acknowledge the data. - */ - ap_proxy_release_connection(backend->worker->s->scheme, - backend, r->server); - /* Ensure that the backend is not reused */ - *backend_ptr = NULL; + /* Switch the allocator lifetime of the buckets */ + ap_proxy_buckets_lifetime_transform(r, bb, pass_bb); - } + /* found the last brigade? */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) { - /* try send what we read */ - if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS - || c->aborted) { - /* Ack! Phbtt! Die! User aborted! */ - /* Only close backend if we haven't got all from the - * backend. Furthermore if *backend_ptr is NULL it is no - * longer safe to fiddle around with backend as it might - * be already in use by another thread. - */ - if (*backend_ptr) { - backend->close = 1; /* this causes socket close below */ - } - finish = TRUE; + /* signal that we must leave */ + finish = TRUE; + + /* the brigade may contain transient buckets that contain + * data that lives only as long as the backend connection. + * Force a setaside so these transient buckets become heap + * buckets that live as long as the request. + */ + for (e = APR_BRIGADE_FIRST(pass_bb); e + != APR_BRIGADE_SENTINEL(pass_bb); e + = APR_BUCKET_NEXT(e)) { + apr_bucket_setaside(e, r->pool); } - /* make sure we always clean up after ourselves */ - apr_brigade_cleanup(pass_bb); + /* finally it is safe to clean up the brigade from the + * connection pool, as we have forced a setaside on all + * buckets. + */ apr_brigade_cleanup(bb); - } while (!finish); - } + /* make sure we release the backend connection as soon + * as we know we are done, so that the backend isn't + * left waiting for a slow client to eventually + * acknowledge the data. + */ + ap_proxy_release_connection(backend->worker->s->scheme, + backend, r->server); + /* Ensure that the backend is not reused */ + req->backend = NULL; + + } + + /* try send what we read */ + if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS + || c->aborted) { + /* Ack! Phbtt! Die! User aborted! */ + /* Only close backend if we haven't got all from the + * backend. Furthermore if req->backend is NULL it is no + * longer safe to fiddle around with backend as it might + * be already in use by another thread. + */ + if (req->backend) { + /* this causes socket close below */ + req->backend->close = 1; + } + finish = TRUE; + } + + /* make sure we always clean up after ourselves */ + apr_brigade_cleanup(pass_bb); + apr_brigade_cleanup(bb); + + } while (!finish); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "end body send"); } - else if (!interim_response) { + else { ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "header only"); /* make sure we release the backend connection as soon @@ -1826,7 +1818,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, */ ap_proxy_release_connection(backend->worker->s->scheme, backend, r->server); - *backend_ptr = NULL; + /* Ensure that the backend is not reused */ + req->backend = NULL; /* Pass EOS bucket down the filter chain. */ e = apr_bucket_eos_create(c->bucket_alloc); @@ -1880,62 +1873,108 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, apr_port_t proxyport) { int status; - char server_portstr[32]; - char *scheme; - const char *proxy_function; - const char *u; + const char *scheme; + const char *u = url; + proxy_http_req_t *req = NULL; proxy_conn_rec *backend = NULL; + apr_bucket_brigade *input_brigade = NULL; int is_ssl = 0; conn_rec *c = r->connection; + proxy_dir_conf *dconf; int retry = 0; + char *locurl = url; + int toclose = 0; /* * Use a shorter-lived pool to reduce memory usage * and avoid a memory leak */ apr_pool_t *p = r->pool; - apr_uri_t *uri = apr_palloc(p, sizeof(*uri)); + apr_uri_t *uri; - /* find the scheme */ - u = strchr(url, ':'); - if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') - return DECLINED; - if ((u - url) > 14) - return HTTP_BAD_REQUEST; - scheme = apr_pstrmemdup(p, url, u - url); - /* scheme is lowercase */ - ap_str_tolower(scheme); - /* is it for us? */ - if (strcmp(scheme, "https") == 0) { - if (!ap_proxy_ssl_enable(NULL)) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112) - "HTTPS: declining URL %s (mod_ssl not configured?)", - url); - return DECLINED; - } - is_ssl = 1; - proxy_function = "HTTPS"; + scheme = get_url_scheme(&u, &is_ssl); + if (!scheme && proxyname && strncasecmp(url, "ftp:", 4) == 0) { + u = url + 4; + scheme = "ftp"; + is_ssl = 0; } - else if (!(strcmp(scheme, "http") == 0 || (strcmp(scheme, "ftp") == 0 && proxyname))) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113) "HTTP: declining URL %s", - url); - return DECLINED; /* only interested in HTTP, or FTP via proxy */ + if (!scheme || u[0] != '/' || u[1] != '/' || u[2] == '\0') { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113) + "HTTP: declining URL %s", url); + return DECLINED; /* only interested in HTTP, WS or FTP via proxy */ } - else { - if (*scheme == 'h') - proxy_function = "HTTP"; - else - proxy_function = "FTP"; + if (is_ssl && !ap_ssl_has_outgoing_handlers()) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112) + "HTTP: declining URL %s (mod_ssl not configured?)", url); + return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url); - /* create space for state information */ - if ((status = ap_proxy_acquire_connection(proxy_function, &backend, - worker, r->server)) != OK) - goto cleanup; + if ((status = ap_proxy_acquire_connection(scheme, &backend, + worker, r->server)) != OK) { + return status; + } backend->is_ssl = is_ssl; + req = apr_pcalloc(p, sizeof(*req)); + req->p = p; + req->r = r; + req->sconf = conf; + req->worker = worker; + req->backend = backend; + req->proto = scheme; + req->bucket_alloc = c->bucket_alloc; + req->rb_method = RB_INIT; + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { + req->force10 = 1; + } + else if (*worker->s->upgrade || *req->proto == 'w') { + /* Forward Upgrade header if it matches the configured one(s), + * the default being "WebSocket" for ws[s] schemes. + */ + const char *upgrade = apr_table_get(r->headers_in, "Upgrade"); + if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade, + (*req->proto == 'w') + ? "WebSocket" : NULL)) { + req->upgrade = upgrade; + } + } + + /* We possibly reuse input data prefetched in previous call(s), e.g. for a + * balancer fallback scenario, and in this case the 100 continue settings + * should be consistent between balancer members. If not, we need to ignore + * Proxy100Continue on=>off once we tried to prefetch already, otherwise + * the HTTP_IN filter won't send 100 Continue for us anymore, and we might + * deadlock with the client waiting for each other. Note that off=>on is + * not an issue because in this case r->expecting_100 is false (the 100 + * Continue is out already), but we make sure that prefetch will be + * nonblocking to avoid passing more time there. + */ + apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p); + + /* Should we handle end-to-end or ping 100-continue? */ + if (!req->force10 + && ((r->expecting_100 && (dconf->forward_100_continue || input_brigade)) + || PROXY_SHOULD_PING_100_CONTINUE(worker, r))) { + /* Tell ap_proxy_create_hdrbrgd() to preserve/add the Expect header */ + apr_table_setn(r->notes, "proxy-100-continue", "1"); + req->do_100_continue = 1; + } + + /* Should we block while prefetching the body or try nonblocking and flush + * data to the backend ASAP? + */ + if (input_brigade + || req->do_100_continue + || apr_table_get(r->subprocess_env, + "proxy-prefetch-nonblocking")) { + req->prefetch_nonblocking = 1; + } + /* * In the case that we are handling a reverse proxy connection and this * is not a request that is coming over an already kept alive connection @@ -1949,20 +1988,68 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, backend->close = 1; } + /* Step One: Determine Who To Connect To */ + uri = apr_palloc(p, sizeof(*uri)); + if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, + uri, &locurl, proxyname, + proxyport, req->server_portstr, + sizeof(req->server_portstr)))) + goto cleanup; + + /* The header is always (re-)built since it depends on worker settings, + * but the body can be fetched only once (even partially), so it's saved + * in between proxy_http_handler() calls should we come back here. + */ + req->header_brigade = apr_brigade_create(p, req->bucket_alloc); + if (input_brigade == NULL) { + input_brigade = apr_brigade_create(p, req->bucket_alloc); + apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p); + } + req->input_brigade = input_brigade; + + /* Prefetch (nonlocking) the request body so to increase the chance to get + * the whole (or enough) body and determine Content-Length vs chunked or + * spooled. By doing this before connecting or reusing the backend, we want + * to minimize the delay between this connection is considered alive and + * the first bytes sent (should the client's link be slow or some input + * filter retain the data). This is a best effort to prevent the backend + * from closing (from under us) what it thinks is an idle connection, hence + * to reduce to the minimum the unavoidable local is_socket_connected() vs + * remote keepalive race condition. + */ + if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) + goto cleanup; + + /* We need to reset backend->close now, since ap_proxy_http_prefetch() set + * it to disable the reuse of the connection *after* this request (no keep- + * alive), not to close any reusable connection before this request. However + * assure what is expected later by using a local flag and do the right thing + * when ap_proxy_connect_backend() below provides the connection to close. + */ + toclose = backend->close; + backend->close = 0; + while (retry < 2) { - char *locurl = url; + if (retry) { + char *newurl = url; - /* Step One: Determine Who To Connect To */ - if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, - uri, &locurl, proxyname, - proxyport, server_portstr, - sizeof(server_portstr))) != OK) - break; + /* Step One (again): (Re)Determine Who To Connect To */ + if ((status = ap_proxy_determine_connection(p, r, conf, worker, + backend, uri, &newurl, proxyname, proxyport, + req->server_portstr, sizeof(req->server_portstr)))) + break; + + /* The code assumes locurl is not changed during the loop, or + * ap_proxy_http_prefetch() would have to be called every time, + * and header_brigade be changed accordingly... + */ + AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0); + } /* Step Two: Make the Connection */ - if (ap_proxy_check_connection(proxy_function, backend, r->server, 1, + if (ap_proxy_check_connection(scheme, backend, r->server, 1, PROXY_CHECK_CONN_EMPTY) - && ap_proxy_connect_backend(proxy_function, backend, worker, + && ap_proxy_connect_backend(scheme, backend, worker, r->server)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01114) "HTTP: failed to make connection to backend: %s", @@ -1972,54 +2059,45 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, } /* Step Three: Create conn_rec */ - if (!backend->connection) { - if ((status = ap_proxy_connection_create_ex(proxy_function, - backend, r)) != OK) - break; - /* - * On SSL connections set a note on the connection what CN is - * requested, such that mod_ssl can check if it is requested to do - * so. - */ - if (backend->ssl_hostname) { - apr_table_setn(backend->connection->notes, - "proxy-request-hostname", - backend->ssl_hostname); - } + if ((status = ap_proxy_connection_create_ex(scheme, backend, r)) != OK) + break; + req->origin = backend->connection; + + /* Don't recycle the connection if prefetch (above) told not to do so */ + if (toclose) { + backend->close = 1; + req->origin->keepalive = AP_CONN_CLOSE; } /* Step Four: Send the Request * On the off-chance that we forced a 100-Continue as a * kinda HTTP ping test, allow for retries */ - if ((status = ap_proxy_http_request(p, r, backend, worker, - conf, uri, locurl, server_portstr)) != OK) { - if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set) { - backend->close = 1; + status = ap_proxy_http_request(req); + if (status != OK) { + if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) { ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115) - "HTTP: 100-Continue failed to %pI (%s)", - worker->cp->addr, worker->s->hostname_ex); + "HTTP: 100-Continue failed to %pI (%s:%d)", + backend->addr, backend->hostname, backend->port); + backend->close = 1; retry++; continue; - } else { - break; } - + break; } /* Step Five: Receive the Response... Fall thru to cleanup */ - status = ap_proxy_http_process_response(p, r, &backend, worker, - conf, server_portstr); + status = ap_proxy_http_process_response(req); break; } /* Step Six: Clean Up */ cleanup: - if (backend) { + if (req->backend) { if (status != OK) - backend->close = 1; - ap_proxy_http_cleanup(proxy_function, r, backend); + req->backend->close = 1; + ap_proxy_http_cleanup(scheme, r, req->backend); } return status; } |