From c54018b07a9085c0a3aedbc2bd01a85a3b3e20cf Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 25 May 2024 06:41:27 +0200 Subject: Merging upstream version 2.4.59. Signed-off-by: Daniel Baumann --- modules/aaa/mod_auth_basic.c | 14 +- modules/aaa/mod_auth_digest.c | 91 +- modules/aaa/mod_auth_form.c | 13 +- modules/aaa/mod_authn_core.c | 61 +- modules/aaa/mod_authn_dbd.c | 6 +- modules/aaa/mod_authn_dbm.c | 55 +- modules/aaa/mod_authn_socache.c | 4 +- modules/aaa/mod_authnz_fcgi.c | 15 +- modules/aaa/mod_authnz_ldap.c | 45 +- modules/aaa/mod_authz_core.c | 22 +- modules/aaa/mod_authz_dbd.c | 4 +- modules/aaa/mod_authz_dbm.c | 32 +- modules/aaa/mod_authz_groupfile.c | 4 +- modules/arch/unix/config5.m4 | 9 + modules/arch/unix/mod_systemd.c | 119 + modules/arch/win32/mod_isapi.c | 9 +- modules/arch/win32/mod_isapi.h | 2 +- modules/cache/cache_storage.c | 5 +- modules/cache/cache_util.c | 2 +- modules/cache/config.m4 | 1 + modules/cache/mod_cache.c | 27 +- modules/cache/mod_cache_disk.c | 41 +- modules/cache/mod_cache_socache.c | 98 +- modules/cache/mod_file_cache.c | 2 +- modules/cache/mod_socache_dbm.c | 171 +- modules/cache/mod_socache_dc.c | 2 +- modules/cache/mod_socache_memcache.c | 13 - modules/cache/mod_socache_redis.c | 486 ++++ modules/cache/mod_socache_redis.dep | 5 + modules/cache/mod_socache_redis.dsp | 111 + modules/cache/mod_socache_redis.mak | 353 +++ modules/cache/mod_socache_shmcb.c | 27 +- modules/cluster/mod_heartmonitor.c | 22 +- modules/core/mod_macro.c | 2 +- modules/core/mod_so.c | 2 +- modules/core/mod_watchdog.c | 67 +- modules/database/mod_dbd.c | 3 + modules/dav/fs/dbm.c | 47 +- modules/dav/fs/lock.c | 2 +- modules/dav/fs/repos.c | 75 +- modules/dav/lock/locks.c | 28 +- modules/dav/main/mod_dav.c | 635 +++-- modules/dav/main/mod_dav.h | 132 +- modules/dav/main/props.c | 88 +- modules/dav/main/std_liveprop.c | 8 +- modules/dav/main/util.c | 87 +- modules/examples/mod_case_filter_in.c | 2 +- modules/examples/mod_example_hooks.c | 26 +- modules/filters/libsed.h | 12 +- modules/filters/mod_brotli.c | 15 +- modules/filters/mod_charset_lite.c | 4 +- modules/filters/mod_data.c | 4 +- modules/filters/mod_deflate.c | 349 +-- modules/filters/mod_ext_filter.c | 6 +- modules/filters/mod_include.c | 41 +- modules/filters/mod_proxy_html.c | 31 +- modules/filters/mod_reflector.c | 11 +- modules/filters/mod_reqtimeout.c | 237 +- modules/filters/mod_request.c | 28 +- modules/filters/mod_sed.c | 116 +- modules/filters/mod_substitute.c | 9 +- modules/filters/mod_xml2enc.c | 47 +- modules/filters/regexp.h | 4 +- modules/filters/sed1.c | 207 +- modules/generators/mod_autoindex.c | 10 +- modules/generators/mod_cgi.c | 75 +- modules/generators/mod_cgid.c | 35 +- modules/generators/mod_info.c | 37 +- modules/generators/mod_status.c | 48 +- modules/http/byterange_filter.c | 7 +- modules/http/http_core.c | 16 +- modules/http/http_etag.c | 349 ++- modules/http/http_filters.c | 290 ++- modules/http/http_protocol.c | 128 +- modules/http/http_request.c | 33 +- modules/http/mod_mime.c | 19 +- modules/http2/.gitignore | 35 - modules/http2/config2.m4 | 22 +- modules/http2/h2.h | 85 +- modules/http2/h2_alt_svc.c | 131 -- modules/http2/h2_alt_svc.h | 40 - modules/http2/h2_bucket_beam.c | 1470 +++++------- modules/http2/h2_bucket_beam.h | 375 +-- modules/http2/h2_bucket_eos.c | 17 +- modules/http2/h2_c1.c | 323 +++ modules/http2/h2_c1.h | 83 + modules/http2/h2_c1_io.c | 559 +++++ modules/http2/h2_c1_io.h | 101 + modules/http2/h2_c2.c | 942 ++++++++ modules/http2/h2_c2.h | 57 + modules/http2/h2_c2_filter.c | 1056 +++++++++ modules/http2/h2_c2_filter.h | 68 + modules/http2/h2_config.c | 859 +++++-- modules/http2/h2_config.h | 74 +- modules/http2/h2_conn.c | 370 --- modules/http2/h2_conn.h | 77 - modules/http2/h2_conn_ctx.c | 123 + modules/http2/h2_conn_ctx.h | 100 + modules/http2/h2_conn_io.c | 389 --- modules/http2/h2_conn_io.h | 77 - modules/http2/h2_ctx.c | 121 - modules/http2/h2_ctx.h | 78 - modules/http2/h2_filter.c | 568 ----- modules/http2/h2_filter.h | 73 - modules/http2/h2_from_h1.c | 875 ------- modules/http2/h2_from_h1.h | 50 - modules/http2/h2_h2.c | 765 ------ modules/http2/h2_h2.h | 79 - modules/http2/h2_headers.c | 99 +- modules/http2/h2_headers.h | 50 +- modules/http2/h2_mplx.c | 1834 +++++++-------- modules/http2/h2_mplx.h | 314 +-- modules/http2/h2_ngn_shed.c | 392 --- modules/http2/h2_ngn_shed.h | 79 - modules/http2/h2_protocol.c | 485 ++++ modules/http2/h2_protocol.h | 56 + modules/http2/h2_proxy_session.c | 423 ++-- modules/http2/h2_proxy_session.h | 18 +- modules/http2/h2_proxy_util.c | 40 +- modules/http2/h2_proxy_util.h | 7 +- modules/http2/h2_push.c | 339 +-- modules/http2/h2_push.h | 86 +- modules/http2/h2_request.c | 494 +++- modules/http2/h2_request.h | 19 +- modules/http2/h2_session.c | 1816 ++++++-------- modules/http2/h2_session.h | 118 +- modules/http2/h2_stream.c | 1520 +++++++++--- modules/http2/h2_stream.h | 129 +- modules/http2/h2_switch.c | 85 +- modules/http2/h2_task.c | 769 ------ modules/http2/h2_task.h | 127 - modules/http2/h2_util.c | 983 ++++---- modules/http2/h2_util.h | 161 +- modules/http2/h2_version.h | 4 +- modules/http2/h2_workers.c | 755 ++++-- modules/http2/h2_workers.h | 131 +- modules/http2/h2_ws.c | 362 +++ modules/http2/h2_ws.h | 35 + modules/http2/mod_http2.c | 164 +- modules/http2/mod_http2.dep | 2 - modules/http2/mod_http2.dsp | 34 +- modules/http2/mod_http2.h | 75 +- modules/http2/mod_http2.mak | 9 - modules/http2/mod_proxy_http2.c | 494 ++-- modules/ldap/util_ldap.c | 162 +- modules/ldap/util_ldap_cache.c | 14 +- modules/ldap/util_ldap_cache_mgr.c | 14 +- modules/loggers/mod_log_config.c | 37 +- modules/loggers/mod_log_debug.c | 8 + modules/loggers/mod_log_forensic.c | 8 +- modules/lua/config.m4 | 33 +- modules/lua/lua_apr.c | 8 +- modules/lua/lua_request.c | 225 +- modules/lua/mod_lua.c | 71 +- modules/lua/mod_lua.h | 12 +- modules/mappers/config9.m4 | 5 + modules/mappers/mod_alias.c | 186 +- modules/mappers/mod_imagemap.c | 2 +- modules/mappers/mod_negotiation.c | 10 +- modules/mappers/mod_rewrite.c | 288 ++- modules/mappers/mod_rewrite.mak | 4 +- modules/mappers/mod_speling.c | 37 +- modules/mappers/mod_vhost_alias.c | 2 +- modules/md/config2.m4 | 15 +- modules/md/md.h | 218 +- modules/md/md_acme.c | 724 ++++-- modules/md/md_acme.h | 216 +- modules/md/md_acme_acct.c | 785 +++--- modules/md/md_acme_acct.h | 103 +- modules/md/md_acme_authz.c | 801 ++++--- modules/md/md_acme_authz.h | 65 +- modules/md/md_acme_drive.c | 1316 ++++++----- modules/md/md_acme_drive.h | 55 + modules/md/md_acme_order.c | 562 +++++ modules/md/md_acme_order.h | 91 + modules/md/md_acmev2_drive.c | 181 ++ modules/md/md_acmev2_drive.h | 27 + modules/md/md_core.c | 316 +-- modules/md/md_crypt.c | 1334 +++++++++-- modules/md/md_crypt.h | 156 +- modules/md/md_curl.c | 482 +++- modules/md/md_event.c | 89 + modules/md/md_event.h | 46 + modules/md/md_http.c | 330 ++- modules/md/md_http.h | 210 +- modules/md/md_json.c | 376 ++- modules/md/md_json.h | 75 +- modules/md/md_jws.c | 116 +- modules/md/md_jws.h | 26 +- modules/md/md_log.h | 4 + modules/md/md_ocsp.c | 1063 +++++++++ modules/md/md_ocsp.h | 71 + modules/md/md_reg.c | 1283 ++++++---- modules/md/md_reg.h | 242 +- modules/md/md_result.c | 285 +++ modules/md/md_result.h | 87 + modules/md/md_status.c | 653 +++++ modules/md/md_status.h | 126 + modules/md/md_store.c | 134 +- modules/md/md_store.h | 310 ++- modules/md/md_store_fs.c | 426 +++- modules/md/md_store_fs.h | 2 +- modules/md/md_tailscale.c | 383 +++ modules/md/md_tailscale.h | 25 + modules/md/md_time.c | 325 +++ modules/md/md_time.h | 77 + modules/md/md_util.c | 388 ++- modules/md/md_util.h | 134 +- modules/md/md_version.h | 7 +- modules/md/mod_md.c | 2187 ++++++++--------- modules/md/mod_md.dsp | 63 +- modules/md/mod_md.h | 30 - modules/md/mod_md.mak | 102 +- modules/md/mod_md_config.c | 1048 ++++++--- modules/md/mod_md_config.h | 70 +- modules/md/mod_md_drive.c | 345 +++ modules/md/mod_md_drive.h | 35 + modules/md/mod_md_ocsp.c | 272 +++ modules/md/mod_md_ocsp.h | 33 + modules/md/mod_md_os.c | 33 +- modules/md/mod_md_status.c | 987 ++++++++ modules/md/mod_md_status.h | 27 + modules/metadata/mod_cern_meta.c | 4 +- modules/metadata/mod_headers.c | 30 +- modules/metadata/mod_mime_magic.c | 26 +- modules/metadata/mod_remoteip.c | 38 +- modules/metadata/mod_unique_id.c | 54 +- modules/metadata/mod_usertrack.c | 67 +- modules/proxy/ajp.h | 4 +- modules/proxy/ajp_header.c | 48 +- modules/proxy/balancers/mod_lbmethod_heartbeat.c | 5 +- modules/proxy/mod_proxy.c | 704 ++++-- modules/proxy/mod_proxy.h | 296 ++- modules/proxy/mod_proxy_ajp.c | 135 +- modules/proxy/mod_proxy_balancer.c | 456 ++-- modules/proxy/mod_proxy_connect.c | 168 +- modules/proxy/mod_proxy_express.c | 29 + modules/proxy/mod_proxy_fcgi.c | 196 +- modules/proxy/mod_proxy_fdpass.c | 4 +- modules/proxy/mod_proxy_ftp.c | 171 +- modules/proxy/mod_proxy_hcheck.c | 348 ++- modules/proxy/mod_proxy_http.c | 1812 +++++++------- modules/proxy/mod_proxy_scgi.c | 20 +- modules/proxy/mod_proxy_uwsgi.c | 148 +- modules/proxy/mod_proxy_wstunnel.c | 266 ++- modules/proxy/proxy_util.c | 2751 +++++++++++++++++----- modules/proxy/proxy_util.h | 6 +- modules/session/mod_session.c | 51 +- modules/session/mod_session.h | 3 + modules/session/mod_session_cookie.c | 6 +- modules/session/mod_session_crypto.c | 6 +- modules/session/mod_session_dbd.c | 8 +- modules/slotmem/mod_slotmem_shm.c | 6 +- modules/ssl/mod_ssl.c | 162 +- modules/ssl/mod_ssl.h | 29 + modules/ssl/mod_ssl_openssl.h | 49 +- modules/ssl/ssl_engine_config.c | 56 +- modules/ssl/ssl_engine_init.c | 723 +++--- modules/ssl/ssl_engine_io.c | 341 ++- modules/ssl/ssl_engine_kernel.c | 355 ++- modules/ssl/ssl_engine_log.c | 18 +- modules/ssl/ssl_engine_ocsp.c | 3 +- modules/ssl/ssl_engine_pphrase.c | 322 ++- modules/ssl/ssl_engine_vars.c | 52 +- modules/ssl/ssl_private.h | 171 +- modules/ssl/ssl_scache.c | 4 +- modules/ssl/ssl_util.c | 51 +- modules/ssl/ssl_util_ocsp.c | 6 +- modules/ssl/ssl_util_ssl.c | 180 +- modules/ssl/ssl_util_ssl.h | 31 +- modules/ssl/ssl_util_stapling.c | 190 +- modules/test/mod_dialup.c | 4 +- modules/test/mod_optional_hook_import.c | 4 +- modules/tls/Makefile.in | 20 + modules/tls/config2.m4 | 173 ++ modules/tls/mod_tls.c | 288 +++ modules/tls/mod_tls.h | 19 + modules/tls/tls_cache.c | 310 +++ modules/tls/tls_cache.h | 63 + modules/tls/tls_cert.c | 564 +++++ modules/tls/tls_cert.h | 211 ++ modules/tls/tls_conf.c | 780 ++++++ modules/tls/tls_conf.h | 185 ++ modules/tls/tls_core.c | 1433 +++++++++++ modules/tls/tls_core.h | 184 ++ modules/tls/tls_filter.c | 1017 ++++++++ modules/tls/tls_filter.h | 90 + modules/tls/tls_ocsp.c | 120 + modules/tls/tls_ocsp.h | 47 + modules/tls/tls_proto.c | 603 +++++ modules/tls/tls_proto.h | 124 + modules/tls/tls_util.c | 367 +++ modules/tls/tls_util.h | 157 ++ modules/tls/tls_var.c | 397 ++++ modules/tls/tls_var.h | 39 + modules/tls/tls_version.h | 39 + 296 files changed, 45744 insertions(+), 20994 deletions(-) create mode 100644 modules/arch/unix/mod_systemd.c create mode 100644 modules/cache/mod_socache_redis.c create mode 100644 modules/cache/mod_socache_redis.dep create mode 100644 modules/cache/mod_socache_redis.dsp create mode 100644 modules/cache/mod_socache_redis.mak delete mode 100644 modules/http2/.gitignore delete mode 100644 modules/http2/h2_alt_svc.c delete mode 100644 modules/http2/h2_alt_svc.h create mode 100644 modules/http2/h2_c1.c create mode 100644 modules/http2/h2_c1.h create mode 100644 modules/http2/h2_c1_io.c create mode 100644 modules/http2/h2_c1_io.h create mode 100644 modules/http2/h2_c2.c create mode 100644 modules/http2/h2_c2.h create mode 100644 modules/http2/h2_c2_filter.c create mode 100644 modules/http2/h2_c2_filter.h delete mode 100644 modules/http2/h2_conn.c delete mode 100644 modules/http2/h2_conn.h create mode 100644 modules/http2/h2_conn_ctx.c create mode 100644 modules/http2/h2_conn_ctx.h delete mode 100644 modules/http2/h2_conn_io.c delete mode 100644 modules/http2/h2_conn_io.h delete mode 100644 modules/http2/h2_ctx.c delete mode 100644 modules/http2/h2_ctx.h delete mode 100644 modules/http2/h2_filter.c delete mode 100644 modules/http2/h2_filter.h delete mode 100644 modules/http2/h2_from_h1.c delete mode 100644 modules/http2/h2_from_h1.h delete mode 100644 modules/http2/h2_h2.c delete mode 100644 modules/http2/h2_h2.h delete mode 100644 modules/http2/h2_ngn_shed.c delete mode 100644 modules/http2/h2_ngn_shed.h create mode 100644 modules/http2/h2_protocol.c create mode 100644 modules/http2/h2_protocol.h delete mode 100644 modules/http2/h2_task.c delete mode 100644 modules/http2/h2_task.h create mode 100644 modules/http2/h2_ws.c create mode 100644 modules/http2/h2_ws.h create mode 100644 modules/md/md_acme_drive.h create mode 100644 modules/md/md_acme_order.c create mode 100644 modules/md/md_acme_order.h create mode 100644 modules/md/md_acmev2_drive.c create mode 100644 modules/md/md_acmev2_drive.h create mode 100644 modules/md/md_event.c create mode 100644 modules/md/md_event.h create mode 100644 modules/md/md_ocsp.c create mode 100644 modules/md/md_ocsp.h create mode 100644 modules/md/md_result.c create mode 100644 modules/md/md_result.h create mode 100644 modules/md/md_status.c create mode 100644 modules/md/md_status.h create mode 100644 modules/md/md_tailscale.c create mode 100644 modules/md/md_tailscale.h create mode 100644 modules/md/md_time.c create mode 100644 modules/md/md_time.h create mode 100644 modules/md/mod_md_drive.c create mode 100644 modules/md/mod_md_drive.h create mode 100644 modules/md/mod_md_ocsp.c create mode 100644 modules/md/mod_md_ocsp.h create mode 100644 modules/md/mod_md_status.c create mode 100644 modules/md/mod_md_status.h create mode 100644 modules/tls/Makefile.in create mode 100644 modules/tls/config2.m4 create mode 100644 modules/tls/mod_tls.c create mode 100644 modules/tls/mod_tls.h create mode 100644 modules/tls/tls_cache.c create mode 100644 modules/tls/tls_cache.h create mode 100644 modules/tls/tls_cert.c create mode 100644 modules/tls/tls_cert.h create mode 100644 modules/tls/tls_conf.c create mode 100644 modules/tls/tls_conf.h create mode 100644 modules/tls/tls_core.c create mode 100644 modules/tls/tls_core.h create mode 100644 modules/tls/tls_filter.c create mode 100644 modules/tls/tls_filter.h create mode 100644 modules/tls/tls_ocsp.c create mode 100644 modules/tls/tls_ocsp.h create mode 100644 modules/tls/tls_proto.c create mode 100644 modules/tls/tls_proto.h create mode 100644 modules/tls/tls_util.c create mode 100644 modules/tls/tls_util.h create mode 100644 modules/tls/tls_var.c create mode 100644 modules/tls/tls_var.h create mode 100644 modules/tls/tls_version.h (limited to 'modules') diff --git a/modules/aaa/mod_auth_basic.c b/modules/aaa/mod_auth_basic.c index e8163d0..c8c9492 100644 --- a/modules/aaa/mod_auth_basic.c +++ b/modules/aaa/mod_auth_basic.c @@ -40,9 +40,9 @@ typedef struct { ap_expr_info_t *fakeuser; ap_expr_info_t *fakepass; const char *use_digest_algorithm; - int fake_set:1; - int use_digest_algorithm_set:1; - int authoritative_set:1; + unsigned int fake_set:1, + use_digest_algorithm_set:1, + authoritative_set:1; } auth_basic_config_rec; static void *create_auth_basic_dir_config(apr_pool_t *p, char *d) @@ -238,7 +238,7 @@ static void note_basic_auth_failure(request_rec *r) static int hook_note_basic_auth_failure(request_rec *r, const char *auth_type) { - if (strcasecmp(auth_type, "Basic")) + if (ap_cstr_casecmp(auth_type, "Basic")) return DECLINED; note_basic_auth_failure(r); @@ -261,7 +261,7 @@ static int get_basic_auth(request_rec *r, const char **user, return HTTP_UNAUTHORIZED; } - if (strcasecmp(ap_getword(r->pool, &auth_line, ' '), "Basic")) { + if (ap_cstr_casecmp(ap_getword(r->pool, &auth_line, ' '), "Basic")) { /* Client tried to authenticate using wrong auth scheme */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01614) "client used wrong authentication scheme: %s", r->uri); @@ -301,7 +301,7 @@ static int authenticate_basic_user(request_rec *r) /* Are we configured to be Basic auth? */ current_auth = ap_auth_type(r); - if (!current_auth || strcasecmp(current_auth, "Basic")) { + if (!current_auth || ap_cstr_casecmp(current_auth, "Basic")) { return DECLINED; } @@ -320,7 +320,7 @@ static int authenticate_basic_user(request_rec *r) } if (conf->use_digest_algorithm - && !strcasecmp(conf->use_digest_algorithm, "MD5")) { + && !ap_cstr_casecmp(conf->use_digest_algorithm, "MD5")) { realm = ap_auth_name(r); digest = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, sent_user, ":", diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c index a67f069..791cec2 100644 --- a/modules/aaa/mod_auth_digest.c +++ b/modules/aaa/mod_auth_digest.c @@ -92,7 +92,6 @@ typedef struct digest_config_struct { int check_nc; const char *algorithm; char *uri_list; - const char *ha1; } digest_config_rec; @@ -153,6 +152,7 @@ typedef struct digest_header_struct { apr_time_t nonce_time; enum hdr_sts auth_hdr_sts; int needed_auth; + const char *ha1; client_entry *client; } digest_header_rec; @@ -262,6 +262,12 @@ static int initialize_tables(server_rec *s, apr_pool_t *ctx) /* Create the shared memory segment */ + client_shm = NULL; + client_rmm = NULL; + client_lock = NULL; + opaque_lock = NULL; + client_list = NULL; + /* * Create a unique filename using our pid. This information is * stashed in the global variable so the children inherit it. @@ -408,8 +414,6 @@ static int initialize_module(apr_pool_t *p, apr_pool_t *plog, if (initialize_tables(s, p) != OK) { return !OK; } - /* Call cleanup_tables on exit or restart */ - apr_pool_cleanup_register(p, NULL, cleanup_tables, apr_pool_cleanup_null); #endif /* APR_HAS_SHARED_MEMORY */ return OK; } @@ -553,16 +557,16 @@ static const char *set_qop(cmd_parms *cmd, void *config, const char *op) { digest_config_rec *conf = (digest_config_rec *) config; - if (!strcasecmp(op, "none")) { + if (!ap_cstr_casecmp(op, "none")) { apr_array_clear(conf->qop_list); *(const char **)apr_array_push(conf->qop_list) = "none"; return NULL; } - if (!strcasecmp(op, "auth-int")) { + if (!ap_cstr_casecmp(op, "auth-int")) { return "AuthDigestQop auth-int is not implemented"; } - else if (strcasecmp(op, "auth")) { + else if (ap_cstr_casecmp(op, "auth")) { return apr_pstrcat(cmd->pool, "Unrecognized qop: ", op, NULL); } @@ -610,11 +614,11 @@ static const char *set_nc_check(cmd_parms *cmd, void *config, int flag) static const char *set_algorithm(cmd_parms *cmd, void *config, const char *alg) { - if (!strcasecmp(alg, "MD5-sess")) { + if (!ap_cstr_casecmp(alg, "MD5-sess")) { return "AuthDigestAlgorithm: ERROR: algorithm `MD5-sess' " "is not implemented"; } - else if (strcasecmp(alg, "MD5")) { + else if (ap_cstr_casecmp(alg, "MD5")) { return apr_pstrcat(cmd->pool, "Invalid algorithm in AuthDigestAlgorithm: ", alg, NULL); } @@ -927,7 +931,7 @@ static int get_digest_rec(request_rec *r, digest_header_rec *resp) } resp->scheme = ap_getword_white(r->pool, &auth_line); - if (strcasecmp(resp->scheme, "Digest")) { + if (ap_cstr_casecmp(resp->scheme, "Digest")) { resp->auth_hdr_sts = NOT_DIGEST; return !OK; } @@ -991,25 +995,25 @@ static int get_digest_rec(request_rec *r, digest_header_rec *resp) auth_line++; } - if (!strcasecmp(key, "username")) + if (!ap_cstr_casecmp(key, "username")) resp->username = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "realm")) + else if (!ap_cstr_casecmp(key, "realm")) resp->realm = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "nonce")) + else if (!ap_cstr_casecmp(key, "nonce")) resp->nonce = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "uri")) + else if (!ap_cstr_casecmp(key, "uri")) resp->uri = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "response")) + else if (!ap_cstr_casecmp(key, "response")) resp->digest = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "algorithm")) + else if (!ap_cstr_casecmp(key, "algorithm")) resp->algorithm = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "cnonce")) + else if (!ap_cstr_casecmp(key, "cnonce")) resp->cnonce = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "opaque")) + else if (!ap_cstr_casecmp(key, "opaque")) resp->opaque = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "qop")) + else if (!ap_cstr_casecmp(key, "qop")) resp->message_qop = apr_pstrdup(r->pool, value); - else if (!strcasecmp(key, "nc")) + else if (!ap_cstr_casecmp(key, "nc")) resp->nonce_count = apr_pstrdup(r->pool, value); } @@ -1182,7 +1186,7 @@ static void note_digest_auth_failure(request_rec *r, if (apr_is_empty_array(conf->qop_list)) { qop = ", qop=\"auth\""; } - else if (!strcasecmp(*(const char **)(conf->qop_list->elts), "none")) { + else if (!ap_cstr_casecmp(*(const char **)(conf->qop_list->elts), "none")) { qop = ""; } else { @@ -1271,7 +1275,7 @@ static int hook_note_digest_auth_failure(request_rec *r, const char *auth_type) digest_header_rec *resp; digest_config_rec *conf; - if (strcasecmp(auth_type, "Digest")) + if (ap_cstr_casecmp(auth_type, "Digest")) return DECLINED; /* get the client response and mark */ @@ -1304,7 +1308,7 @@ static int hook_note_digest_auth_failure(request_rec *r, const char *auth_type) */ static authn_status get_hash(request_rec *r, const char *user, - digest_config_rec *conf) + digest_config_rec *conf, const char **rethash) { authn_status auth_result; char *password; @@ -1356,7 +1360,7 @@ static authn_status get_hash(request_rec *r, const char *user, } while (current_provider); if (auth_result == AUTH_USER_FOUND) { - conf->ha1 = password; + *rethash = password; } return auth_result; @@ -1381,7 +1385,7 @@ static int check_nc(const request_rec *r, const digest_header_rec *resp, } if (!apr_is_empty_array(conf->qop_list) && - !strcasecmp(*(const char **)(conf->qop_list->elts), "none")) { + !ap_cstr_casecmp(*(const char **)(conf->qop_list->elts), "none")) { /* qop is none, client must not send a nonce count */ if (snc != NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01772) @@ -1422,9 +1426,14 @@ static int check_nonce(request_rec *r, digest_header_rec *resp, time_rec nonce_time; char tmp, hash[NONCE_HASH_LEN+1]; - if (strlen(resp->nonce) != NONCE_LEN) { + /* Since the time part of the nonce is a base64 encoding of an + * apr_time_t (8 bytes), it should end with a '=', fail early otherwise. + */ + if (strlen(resp->nonce) != NONCE_LEN + || resp->nonce[NONCE_TIME_LEN - 1] != '=') { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01775) - "invalid nonce %s received - length is not %d", + "invalid nonce '%s' received - length is not %d " + "or time encoding is incorrect", resp->nonce, NONCE_LEN); note_digest_auth_failure(r, conf, resp, 1); return HTTP_UNAUTHORIZED; @@ -1483,25 +1492,24 @@ static int check_nonce(request_rec *r, digest_header_rec *resp, /* RFC-2069 */ static const char *old_digest(const request_rec *r, - const digest_header_rec *resp, const char *ha1) + const digest_header_rec *resp) { const char *ha2; ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL)); return ap_md5(r->pool, - (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce, - ":", ha2, NULL)); + (unsigned char *)apr_pstrcat(r->pool, resp->ha1, ":", + resp->nonce, ":", ha2, NULL)); } /* RFC-2617 */ static const char *new_digest(const request_rec *r, - digest_header_rec *resp, - const digest_config_rec *conf) + digest_header_rec *resp) { const char *ha1, *ha2, *a2; - ha1 = conf->ha1; + ha1 = resp->ha1; a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL); ha2 = ap_md5(r->pool, (const unsigned char *)a2); @@ -1514,7 +1522,6 @@ static const char *new_digest(const request_rec *r, NULL)); } - static void copy_uri_components(apr_uri_t *dst, apr_uri_t *src, request_rec *r) { if (src->scheme && src->scheme[0] != '\0') { @@ -1583,7 +1590,7 @@ static int authenticate_digest_user(request_rec *r) /* do we require Digest auth for this URI? */ - if (!(t = ap_auth_type(r)) || strcasecmp(t, "Digest")) { + if (!(t = ap_auth_type(r)) || ap_cstr_casecmp(t, "Digest")) { return DECLINED; } @@ -1751,7 +1758,7 @@ static int authenticate_digest_user(request_rec *r) } if (resp->algorithm != NULL - && strcasecmp(resp->algorithm, "MD5")) { + && ap_cstr_casecmp(resp->algorithm, "MD5")) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01789) "unknown algorithm `%s' received: %s", resp->algorithm, r->uri); @@ -1759,7 +1766,7 @@ static int authenticate_digest_user(request_rec *r) return HTTP_UNAUTHORIZED; } - return_code = get_hash(r, r->user, conf); + return_code = get_hash(r, r->user, conf, &resp->ha1); if (return_code == AUTH_USER_NOT_FOUND) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01790) @@ -1789,7 +1796,7 @@ static int authenticate_digest_user(request_rec *r) if (resp->message_qop == NULL) { /* old (rfc-2069) style digest */ - if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) { + if (strcmp(resp->digest, old_digest(r, resp))) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01792) "user %s: password mismatch: %s", r->user, r->uri); @@ -1802,7 +1809,7 @@ static int authenticate_digest_user(request_rec *r) int match = 0, idx; const char **tmp = (const char **)(conf->qop_list->elts); for (idx = 0; idx < conf->qop_list->nelts; idx++) { - if (!strcasecmp(*tmp, resp->message_qop)) { + if (!ap_cstr_casecmp(*tmp, resp->message_qop)) { match = 1; break; } @@ -1811,7 +1818,7 @@ static int authenticate_digest_user(request_rec *r) if (!match && !(apr_is_empty_array(conf->qop_list) - && !strcasecmp(resp->message_qop, "auth"))) { + && !ap_cstr_casecmp(resp->message_qop, "auth"))) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01793) "invalid qop `%s' received: %s", resp->message_qop, r->uri); @@ -1819,7 +1826,7 @@ static int authenticate_digest_user(request_rec *r) return HTTP_UNAUTHORIZED; } - exp_digest = new_digest(r, resp, conf); + exp_digest = new_digest(r, resp); if (!exp_digest) { /* we failed to allocate a client struct */ return HTTP_INTERNAL_SERVER_ERROR; @@ -1893,7 +1900,7 @@ static int add_auth_info(request_rec *r) /* do rfc-2069 digest */ if (!apr_is_empty_array(conf->qop_list) && - !strcasecmp(*(const char **)(conf->qop_list->elts), "none") + !ap_cstr_casecmp(*(const char **)(conf->qop_list->elts), "none") && resp->message_qop == NULL) { /* use only RFC-2069 format */ ai = nextnonce; @@ -1903,7 +1910,7 @@ static int add_auth_info(request_rec *r) /* calculate rspauth attribute */ - ha1 = conf->ha1; + ha1 = resp->ha1; a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL); ha2 = ap_md5(r->pool, (const unsigned char *)a2); diff --git a/modules/aaa/mod_auth_form.c b/modules/aaa/mod_auth_form.c index bea7d51..d443092 100644 --- a/modules/aaa/mod_auth_form.c +++ b/modules/aaa/mod_auth_form.c @@ -40,11 +40,10 @@ #define FORM_REDIRECT_HANDLER "form-redirect-handler" #define MOD_AUTH_FORM_HASH "site" -static int (*ap_session_load_fn) (request_rec * r, session_rec ** z) = NULL; -static apr_status_t (*ap_session_get_fn)(request_rec * r, session_rec * z, - const char *key, const char **value) = NULL; -static apr_status_t (*ap_session_set_fn)(request_rec * r, session_rec * z, - const char *key, const char *value) = NULL; +static APR_OPTIONAL_FN_TYPE(ap_session_load) *ap_session_load_fn = NULL; +static APR_OPTIONAL_FN_TYPE(ap_session_get) *ap_session_get_fn = NULL; +static APR_OPTIONAL_FN_TYPE(ap_session_set) *ap_session_set_fn = NULL; + static void (*ap_request_insert_filter_fn) (request_rec * r) = NULL; static void (*ap_request_remove_filter_fn) (request_rec * r) = NULL; @@ -420,7 +419,7 @@ static void note_cookie_auth_failure(request_rec * r) static int hook_note_cookie_auth_failure(request_rec * r, const char *auth_type) { - if (strcasecmp(auth_type, "form")) + if (ap_cstr_casecmp(auth_type, "form")) return DECLINED; note_cookie_auth_failure(r); @@ -892,7 +891,7 @@ static int authenticate_form_authn(request_rec * r) /* Are we configured to be Form auth? */ current_auth = ap_auth_type(r); - if (!current_auth || strcasecmp(current_auth, "form")) { + if (!current_auth || ap_cstr_casecmp(current_auth, "form")) { return DECLINED; } diff --git a/modules/aaa/mod_authn_core.c b/modules/aaa/mod_authn_core.c index 7af1265..f3a494c 100644 --- a/modules/aaa/mod_authn_core.c +++ b/modules/aaa/mod_authn_core.c @@ -34,6 +34,7 @@ #include "http_log.h" #include "http_request.h" #include "http_protocol.h" +#include "ap_expr.h" #include "ap_provider.h" #include "mod_auth.h" @@ -52,9 +53,9 @@ */ typedef struct { - const char *ap_auth_type; + ap_expr_info_t *ap_auth_type; int auth_type_set; - const char *ap_auth_name; + ap_expr_info_t *ap_auth_name; } authn_core_dir_conf; typedef struct provider_alias_rec { @@ -298,8 +299,16 @@ static const char *set_authname(cmd_parms *cmd, void *mconfig, const char *word1) { authn_core_dir_conf *aconfig = (authn_core_dir_conf *)mconfig; + const char *expr_err = NULL; + + aconfig->ap_auth_name = ap_expr_parse_cmd(cmd, word1, AP_EXPR_FLAG_STRING_RESULT, + &expr_err, NULL); + if (expr_err) { + return apr_pstrcat(cmd->temp_pool, + "Cannot parse expression '", word1, "' in AuthName: ", + expr_err, NULL); + } - aconfig->ap_auth_name = ap_escape_quotes(cmd->pool, word1); return NULL; } @@ -307,9 +316,17 @@ static const char *set_authtype(cmd_parms *cmd, void *mconfig, const char *word1) { authn_core_dir_conf *aconfig = (authn_core_dir_conf *)mconfig; + const char *expr_err = NULL; + + aconfig->ap_auth_type = ap_expr_parse_cmd(cmd, word1, AP_EXPR_FLAG_STRING_RESULT, + &expr_err, NULL); + if (expr_err) { + return apr_pstrcat(cmd->temp_pool, + "Cannot parse expression '", word1, "' in AuthType: ", + expr_err, NULL); + } aconfig->auth_type_set = 1; - aconfig->ap_auth_type = strcasecmp(word1, "None") ? word1 : NULL; return NULL; } @@ -318,20 +335,44 @@ static const char *authn_ap_auth_type(request_rec *r) { authn_core_dir_conf *conf; - conf = (authn_core_dir_conf *)ap_get_module_config(r->per_dir_config, - &authn_core_module); + conf = (authn_core_dir_conf *) ap_get_module_config(r->per_dir_config, + &authn_core_module); + + if (conf->ap_auth_type) { + const char *err = NULL, *type; + type = ap_expr_str_exec(r, conf->ap_auth_type, &err); + if (err) { + ap_log_rerror( + APLOG_MARK, APLOG_ERR, APR_SUCCESS, r, APLOGNO(02834) "AuthType expression could not be evaluated: %s", err); + return NULL; + } + + return strcasecmp(type, "None") ? type : NULL; + } - return conf->ap_auth_type; + return NULL; } static const char *authn_ap_auth_name(request_rec *r) { authn_core_dir_conf *conf; + const char *err = NULL, *name; + + conf = (authn_core_dir_conf *) ap_get_module_config(r->per_dir_config, + &authn_core_module); + + if (conf->ap_auth_name) { + name = ap_expr_str_exec(r, conf->ap_auth_name, &err); + if (err) { + ap_log_rerror( + APLOG_MARK, APLOG_ERR, APR_SUCCESS, r, APLOGNO(02835) "AuthName expression could not be evaluated: %s", err); + return NULL; + } - conf = (authn_core_dir_conf *)ap_get_module_config(r->per_dir_config, - &authn_core_module); + return ap_escape_quotes(r->pool, name); + } - return apr_pstrdup(r->pool, conf->ap_auth_name); + return NULL; } static const command_rec authn_cmds[] = diff --git a/modules/aaa/mod_authn_dbd.c b/modules/aaa/mod_authn_dbd.c index 57090d2..08e5993 100644 --- a/modules/aaa/mod_authn_dbd.c +++ b/modules/aaa/mod_authn_dbd.c @@ -143,7 +143,6 @@ static authn_status authn_dbd_password(request_rec *r, const char *user, return AUTH_GENERAL_ERROR; } if (dbd_password == NULL) { -#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 3) /* add the rest of the columns to the environment */ int i = 1; const char *name; @@ -168,7 +167,7 @@ static authn_status authn_dbd_password(request_rec *r, const char *user, apr_dbd_get_entry(dbd->driver, row, i)); i++; } -#endif + dbd_password = apr_pstrdup(r->pool, apr_dbd_get_entry(dbd->driver, row, 0)); } @@ -239,7 +238,6 @@ static authn_status authn_dbd_realm(request_rec *r, const char *user, return AUTH_GENERAL_ERROR; } if (dbd_hash == NULL) { -#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 3) /* add the rest of the columns to the environment */ int i = 1; const char *name; @@ -264,7 +262,7 @@ static authn_status authn_dbd_realm(request_rec *r, const char *user, apr_dbd_get_entry(dbd->driver, row, i)); i++; } -#endif + dbd_hash = apr_pstrdup(r->pool, apr_dbd_get_entry(dbd->driver, row, 0)); } diff --git a/modules/aaa/mod_authn_dbm.c b/modules/aaa/mod_authn_dbm.c index f4fb736..9f47350 100644 --- a/modules/aaa/mod_authn_dbm.c +++ b/modules/aaa/mod_authn_dbm.c @@ -39,6 +39,11 @@ #include "mod_auth.h" +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + static APR_OPTIONAL_FN_TYPE(ap_authn_cache_store) *authn_cache_store = NULL; #define AUTHN_CACHE_STORE(r,user,realm,data) \ if (authn_cache_store != NULL) \ @@ -72,18 +77,39 @@ static const command_rec authn_dbm_cmds[] = module AP_MODULE_DECLARE_DATA authn_dbm_module; -static apr_status_t fetch_dbm_value(const char *dbmtype, const char *dbmfile, - const char *user, char **value, - apr_pool_t *pool) +static apr_status_t fetch_dbm_value(request_rec *r, const char *dbmtype, + const char *dbmfile, + const char *user, char **value) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *f; apr_datum_t key, val; apr_status_t rv; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + rv = apr_dbm_get_driver(&driver, dbmtype, &err, r->pool); + + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10284) + "could not load '%s' dbm library: %s", + err->reason, err->msg); + return rv; + } + + rv = apr_dbm_open2(&f, driver, dbmfile, APR_DBM_READONLY, + APR_OS_DEFAULT, r->pool); +#else rv = apr_dbm_open_ex(&f, dbmtype, dbmfile, APR_DBM_READONLY, - APR_OS_DEFAULT, pool); + APR_OS_DEFAULT, r->pool); +#endif if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10285) + "could not open dbm (type %s) file: %s", + dbmtype, dbmfile); return rv; } @@ -97,12 +123,16 @@ static apr_status_t fetch_dbm_value(const char *dbmtype, const char *dbmfile, *value = NULL; if (apr_dbm_fetch(f, key, &val) == APR_SUCCESS && val.dptr) { - *value = apr_pstrmemdup(pool, val.dptr, val.dsize); + *value = apr_pstrmemdup(r->pool, val.dptr, val.dsize); } apr_dbm_close(f); - return rv; + /* NOT FOUND is not an error case; this is indicated by a NULL result. + * Treat all NULL lookup/error results as success for the simple case + * of auth credential lookup, these are DECLINED in both cases. + */ + return APR_SUCCESS; } static authn_status check_dbm_pw(request_rec *r, const char *user, @@ -114,13 +144,9 @@ static authn_status check_dbm_pw(request_rec *r, const char *user, char *dbm_password; char *colon_pw; - rv = fetch_dbm_value(conf->dbmtype, conf->pwfile, user, &dbm_password, - r->pool); + rv = fetch_dbm_value(r, conf->dbmtype, conf->pwfile, user, &dbm_password); if (rv != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01754) - "could not open dbm (type %s) auth file: %s", - conf->dbmtype, conf->pwfile); return AUTH_GENERAL_ERROR; } @@ -152,14 +178,11 @@ static authn_status get_dbm_realm_hash(request_rec *r, const char *user, char *dbm_hash; char *colon_hash; - rv = fetch_dbm_value(conf->dbmtype, conf->pwfile, + rv = fetch_dbm_value(r, conf->dbmtype, conf->pwfile, apr_pstrcat(r->pool, user, ":", realm, NULL), - &dbm_hash, r->pool); + &dbm_hash); if (rv != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01755) - "Could not open dbm (type %s) hash file: %s", - conf->dbmtype, conf->pwfile); return AUTH_GENERAL_ERROR; } diff --git a/modules/aaa/mod_authn_socache.c b/modules/aaa/mod_authn_socache.c index 550bc66..0e4454a 100644 --- a/modules/aaa/mod_authn_socache.c +++ b/modules/aaa/mod_authn_socache.c @@ -299,7 +299,7 @@ static void ap_authn_cache_store(request_rec *r, const char *module, const char *key; apr_time_t expiry; - /* first check whether we're cacheing for this module */ + /* first check whether we're caching for this module */ dcfg = ap_get_module_config(r->per_dir_config, &authn_socache_module); if (!configured || !dcfg->providers) { return; @@ -350,7 +350,7 @@ static void ap_authn_cache_store(request_rec *r, const char *module, } } -#define MAX_VAL_LEN 100 +#define MAX_VAL_LEN 256 static authn_status check_password(request_rec *r, const char *user, const char *password) { diff --git a/modules/aaa/mod_authnz_fcgi.c b/modules/aaa/mod_authnz_fcgi.c index d99f391..69743f1 100644 --- a/modules/aaa/mod_authnz_fcgi.c +++ b/modules/aaa/mod_authnz_fcgi.c @@ -571,6 +571,14 @@ static apr_status_t handle_response(const fcgi_provider_conf *conf, "parsing -> %d/%d", fn, status, r->status); + /* FCGI has its own body framing mechanism which we don't + * match against any provided Content-Length, so let the + * core determine C-L vs T-E based on what's actually sent. + */ + if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR)) + apr_table_unset(r->headers_out, "Content-Length"); + apr_table_unset(r->headers_out, "Transfer-Encoding"); + if (rspbuf) { /* caller wants to see response body, * if any */ @@ -681,7 +689,7 @@ static int mod_fcgid_modify_auth_header(void *vars, /* When the application gives a 200 response, the server ignores response headers whose names aren't prefixed with Variable- prefix, and ignores any response content */ - if (strncasecmp(key, "Variable-", 9) == 0) + if (ap_cstr_casecmpn(key, "Variable-", 9) == 0) apr_table_setn(vars, key, val); return 1; } @@ -714,6 +722,7 @@ static void req_rsp(request_rec *r, const fcgi_provider_conf *conf, } apr_pool_create(&temp_pool, r->pool); + apr_pool_tag(temp_pool, "mod_authnz_fcgi (req_rsp)"); setupenv(r, password, apache_role); @@ -809,7 +818,7 @@ static int fcgi_check_authn(request_rec *r) prov = dconf && dconf->name ? dconf->name : NULL; - if (!prov || !strcasecmp(prov, "None")) { + if (!prov || !ap_cstr_casecmp(prov, "None")) { return DECLINED; } @@ -824,7 +833,7 @@ static int fcgi_check_authn(request_rec *r) dconf->user_expr ? "yes" : "no", auth_type); - if (auth_type && !strcasecmp(auth_type, "Basic")) { + if (auth_type && !ap_cstr_casecmp(auth_type, "Basic")) { if ((res = ap_get_basic_auth_pw(r, &password))) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02517) "%s: couldn't retrieve basic auth " diff --git a/modules/aaa/mod_authnz_ldap.c b/modules/aaa/mod_authnz_ldap.c index 4634fe9..a7b4939 100644 --- a/modules/aaa/mod_authnz_ldap.c +++ b/modules/aaa/mod_authnz_ldap.c @@ -500,6 +500,32 @@ static authn_status authn_ldap_check_password(request_rec *r, const char *user, return AUTH_GENERAL_ERROR; } + /* Get the password that the client sent */ + if (password == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01692) + "auth_ldap authenticate: no password specified"); + return AUTH_GENERAL_ERROR; + } + + if (user == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01693) + "auth_ldap authenticate: no user specified"); + return AUTH_GENERAL_ERROR; + } + + /* + * A bind to the server with an empty password always succeeds, so + * we check to ensure that the password is not empty. This implies + * that users who actually do have empty passwords will never be + * able to authenticate with this module. I don't see this as a big + * problem. + */ + if (!(*password)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10263) + "auth_ldap authenticate: empty password specified"); + return AUTH_DENIED; + } + /* There is a good AuthLDAPURL, right? */ if (sec->host) { const char *binddn = sec->binddn; @@ -522,21 +548,6 @@ static authn_status authn_ldap_check_password(request_rec *r, const char *user, ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01691) "auth_ldap authenticate: using URL %s", sec->url); - /* Get the password that the client sent */ - if (password == NULL) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01692) - "auth_ldap authenticate: no password specified"); - util_ldap_connection_close(ldc); - return AUTH_GENERAL_ERROR; - } - - if (user == NULL) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01693) - "auth_ldap authenticate: no user specified"); - util_ldap_connection_close(ldc); - return AUTH_GENERAL_ERROR; - } - /* build the username filter */ authn_ldap_build_filter(filtbuf, r, user, NULL, sec); @@ -1673,6 +1684,10 @@ static const char *set_bind_password(cmd_parms *cmd, void *_cfg, const char *arg sec->bindpw = (char *)arg; } + if (!(*sec->bindpw)) { + return "Empty passwords are invalid for AuthLDAPBindPassword"; + } + return NULL; } diff --git a/modules/aaa/mod_authz_core.c b/modules/aaa/mod_authz_core.c index 9585114..40e5fe1 100644 --- a/modules/aaa/mod_authz_core.c +++ b/modules/aaa/mod_authz_core.c @@ -193,12 +193,11 @@ static authz_status authz_alias_check_authorization(request_rec *r, const void *parsed_require_args) { const char *provider_name; - authz_status ret = AUTHZ_DENIED; /* Look up the provider alias in the alias list. - * Get the dir_config and call ap_Merge_per_dir_configs() + * Get the dir_config and call ap_merge_per_dir_configs() * Call the real provider->check_authorization() function - * return the result of the above function call + * Return the result of the above function call */ provider_name = apr_table_get(r->notes, AUTHZ_PROVIDER_NAME_NOTE); @@ -217,6 +216,7 @@ static authz_status authz_alias_check_authorization(request_rec *r, configurations and call the real provider */ if (prvdraliasrec) { ap_conf_vector_t *orig_dir_config = r->per_dir_config; + authz_status ret; r->per_dir_config = ap_merge_per_dir_configs(r->pool, orig_dir_config, @@ -227,18 +227,16 @@ static authz_status authz_alias_check_authorization(request_rec *r, prvdraliasrec->provider_parsed_args); r->per_dir_config = orig_dir_config; + + return ret; } - else { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02305) - "no alias provider found for '%s' (BUG?)", - provider_name); - } - } - else { - ap_assert(provider_name != NULL); } - return ret; + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02305) + "no alias provider found for '%s' (BUG?)", + provider_name ? provider_name : "n/a"); + + return AUTHZ_DENIED; } static const authz_provider authz_alias_provider = diff --git a/modules/aaa/mod_authz_dbd.c b/modules/aaa/mod_authz_dbd.c index e1bb623..5d169e1 100644 --- a/modules/aaa/mod_authz_dbd.c +++ b/modules/aaa/mod_authz_dbd.c @@ -212,7 +212,7 @@ static int authz_dbd_login(request_rec *r, authz_dbd_cfg *cfg, static int authz_dbd_group_query(request_rec *r, authz_dbd_cfg *cfg, apr_array_header_t *groups) { - /* SELECT group FROM authz WHERE user = %s */ + /* SELECT user_group FROM authz WHERE user = %s */ int rv; const char *message; ap_dbd_t *dbd; @@ -254,7 +254,7 @@ static int authz_dbd_group_query(request_rec *r, authz_dbd_cfg *cfg, else { message = apr_dbd_error(dbd->driver, dbd->handle, rv); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01651) - "authz_dbd in get_row; group query for user=%s [%s]", + "authz_dbd in get_row; user_group query for user=%s [%s]", r->user, message?message:noerror); return HTTP_INTERNAL_SERVER_ERROR; } diff --git a/modules/aaa/mod_authz_dbm.c b/modules/aaa/mod_authz_dbm.c index 843d9a8..f11de68 100644 --- a/modules/aaa/mod_authz_dbm.c +++ b/modules/aaa/mod_authz_dbm.c @@ -20,6 +20,11 @@ #include "apr_dbm.h" #include "apr_md5.h" +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + #include "httpd.h" #include "http_config.h" #include "ap_provider.h" @@ -96,14 +101,35 @@ static apr_status_t get_dbm_grp(request_rec *r, char *key1, char *key2, const char *dbmgrpfile, const char *dbtype, const char ** out) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif char *grp_colon, *val; apr_status_t retval; apr_dbm_t *f; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + retval = apr_dbm_get_driver(&driver, dbtype, &err, r->pool); + + if (retval != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, retval, r, APLOGNO(10286) + "could not load '%s' dbm library: %s", + err->reason, err->msg); + return retval; + } + + retval = apr_dbm_open2(&f, driver, dbmgrpfile, APR_DBM_READONLY, + APR_OS_DEFAULT, r->pool); +#else retval = apr_dbm_open_ex(&f, dbtype, dbmgrpfile, APR_DBM_READONLY, APR_OS_DEFAULT, r->pool); +#endif if (retval != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, retval, r, APLOGNO(01799) + "could not open dbm (type %s) group access " + "file: %s", dbtype, dbmgrpfile); return retval; } @@ -166,9 +192,6 @@ static authz_status dbmgroup_check_authorization(request_rec *r, user, conf->grpfile, conf->dbmtype, &groups); if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01799) - "could not open dbm (type %s) group access " - "file: %s", conf->dbmtype, conf->grpfile); return AUTHZ_GENERAL_ERROR; } @@ -241,9 +264,6 @@ static authz_status dbmfilegroup_check_authorization(request_rec *r, user, conf->grpfile, conf->dbmtype, &groups); if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01803) - "could not open dbm (type %s) group access " - "file: %s", conf->dbmtype, conf->grpfile); return AUTHZ_DENIED; } diff --git a/modules/aaa/mod_authz_groupfile.c b/modules/aaa/mod_authz_groupfile.c index 76957f7..c2431e0 100644 --- a/modules/aaa/mod_authz_groupfile.c +++ b/modules/aaa/mod_authz_groupfile.c @@ -98,6 +98,8 @@ static apr_status_t groups_for_user(apr_pool_t *p, char *user, char *grpfile, } apr_pool_create(&sp, p); + apr_pool_tag(sp, "authz_groupfile (groups_for_user)"); + ap_varbuf_init(p, &vb, VARBUF_INIT_LEN); while (!(ap_varbuf_cfg_getline(&vb, f, VARBUF_MAX_LEN))) { @@ -172,7 +174,7 @@ static authz_status group_check_authorization(request_rec *r, if (apr_is_empty_table(grpstatus)) { /* no groups available, so exit immediately */ - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01666) + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01666) "Authorization of user %s to access %s failed, reason: " "user doesn't appear in group file (%s).", r->user, r->uri, conf->groupfile); diff --git a/modules/arch/unix/config5.m4 b/modules/arch/unix/config5.m4 index 77027a8..3d099f8 100644 --- a/modules/arch/unix/config5.m4 +++ b/modules/arch/unix/config5.m4 @@ -18,6 +18,15 @@ APACHE_MODULE(privileges, Per-virtualhost Unix UserIDs and enhanced security for fi ]) +APACHE_MODULE(systemd, Systemd support, , , no, [ + if test "${ac_cv_header_systemd_sd_daemon_h}" = "no" || test -z "${SYSTEMD_LIBS}"; then + AC_MSG_WARN([Your system does not support systemd.]) + enable_systemd="no" + else + APR_ADDTO(MOD_SYSTEMD_LDADD, [$SYSTEMD_LIBS]) + fi +]) + APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) APACHE_MODPATH_FINISH diff --git a/modules/arch/unix/mod_systemd.c b/modules/arch/unix/mod_systemd.c new file mode 100644 index 0000000..c3e7082 --- /dev/null +++ b/modules/arch/unix/mod_systemd.c @@ -0,0 +1,119 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include +#include "ap_mpm.h" +#include +#include +#include +#include +#include +#include +#include "unixd.h" +#include "scoreboard.h" +#include "mpm_common.h" + +#include "systemd/sd-daemon.h" + +#if APR_HAVE_UNISTD_H +#include +#endif + +static int systemd_pre_config(apr_pool_t *pconf, apr_pool_t *plog, + apr_pool_t *ptemp) +{ + sd_notify(0, + "RELOADING=1\n" + "STATUS=Reading configuration...\n"); + ap_extended_status = 1; + return OK; +} + +/* Report the service is ready in post_config, which could be during + * startup or after a reload. The server could still hit a fatal + * startup error after this point during ap_run_mpm(), so this is + * perhaps too early, but by post_config listen() has been called on + * the TCP ports so new connections will not be rejected. There will + * always be a possible async failure event simultaneous to the + * service reporting "ready", so this should be good enough. */ +static int systemd_post_config(apr_pool_t *p, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *main_server) +{ + sd_notify(0, "READY=1\n" + "STATUS=Configuration loaded.\n"); + return OK; +} + +static int systemd_pre_mpm(apr_pool_t *p, ap_scoreboard_e sb_type) +{ + sd_notifyf(0, "READY=1\n" + "STATUS=Processing requests...\n" + "MAINPID=%" APR_PID_T_FMT, getpid()); + + return OK; +} + +static int systemd_monitor(apr_pool_t *p, server_rec *s) +{ + ap_sload_t sload; + apr_interval_time_t up_time; + char bps[5]; + + if (!ap_extended_status) { + /* Nothing useful to report with ExtendedStatus disabled. */ + return DECLINED; + } + + ap_get_sload(&sload); + /* up_time in seconds */ + up_time = (apr_uint32_t) apr_time_sec(apr_time_now() - + ap_scoreboard_image->global->restart_time); + + apr_strfsize((unsigned long)((float) (sload.bytes_served) + / (float) up_time), bps); + + sd_notifyf(0, "READY=1\n" + "STATUS=Total requests: %lu; Idle/Busy workers %d/%d;" + "Requests/sec: %.3g; Bytes served/sec: %sB/sec\n", + sload.access_count, sload.idle, sload.busy, + ((float) sload.access_count) / (float) up_time, bps); + + return DECLINED; +} + +static void systemd_register_hooks(apr_pool_t *p) +{ + /* Enable ap_extended_status. */ + ap_hook_pre_config(systemd_pre_config, NULL, NULL, APR_HOOK_LAST); + /* Signal service is ready. */ + ap_hook_post_config(systemd_post_config, NULL, NULL, APR_HOOK_REALLY_LAST); + /* We know the PID in this hook ... */ + ap_hook_pre_mpm(systemd_pre_mpm, NULL, NULL, APR_HOOK_LAST); + /* Used to update httpd's status line using sd_notifyf */ + ap_hook_monitor(systemd_monitor, NULL, NULL, APR_HOOK_MIDDLE); +} + +AP_DECLARE_MODULE(systemd) = { + STANDARD20_MODULE_STUFF, + NULL, + NULL, + NULL, + NULL, + NULL, + systemd_register_hooks, +}; diff --git a/modules/arch/win32/mod_isapi.c b/modules/arch/win32/mod_isapi.c index 2e51d51..a9816e5 100644 --- a/modules/arch/win32/mod_isapi.c +++ b/modules/arch/win32/mod_isapi.c @@ -178,7 +178,7 @@ static const command_rec isapi_cmds[] = { " on or off (default: off)"), AP_INIT_FLAG("ISAPIAppendLogToQuery", ap_set_flag_slot, (void *)APR_OFFSETOF(isapi_dir_conf, log_to_query), - OR_FILEINFO, "Append Log requests are concatinated to the query args" + OR_FILEINFO, "Append Log requests are concatenated to the query args" " on or off (default: on)"), AP_INIT_FLAG("ISAPIFakeAsync", ap_set_flag_slot, (void *)APR_OFFSETOF(isapi_dir_conf, fake_async), @@ -257,7 +257,7 @@ static apr_status_t isapi_load(apr_pool_t *p, server_rec *s, isapi_loaded *isa) isa->isapi_version = apr_pcalloc(p, sizeof(HSE_VERSION_INFO)); - /* TODO: These aught to become overrideable, so that we + /* TODO: These aught to become overridable, so that we * assure a given isapi can be fooled into behaving well. * * The tricky bit, they aren't really a per-dir sort of @@ -976,11 +976,11 @@ static int APR_THREAD_FUNC regfnServerSupportFunction(isapi_cid *cid, return 0; } - len = (apr_uint32_t)strlen(r->filename); + len = (apr_uint32_t)strlen(subreq->filename); if ((subreq->finfo.filetype == APR_DIR) && (!subreq->path_info) - && (file[len - 1] != '/')) + && (subreq->filename[len - 1] != '/')) file = apr_pstrcat(cid->r->pool, subreq->filename, "/", NULL); else file = apr_pstrcat(cid->r->pool, subreq->filename, @@ -1692,6 +1692,7 @@ static int isapi_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *pte "could not create the isapi cache pool"); return APR_EGENERAL; } + apr_pool_tag(loaded.pool, "mod_isapi_load"); loaded.hash = apr_hash_make(loaded.pool); if (!loaded.hash) { diff --git a/modules/arch/win32/mod_isapi.h b/modules/arch/win32/mod_isapi.h index 6afa27b..5284386 100644 --- a/modules/arch/win32/mod_isapi.h +++ b/modules/arch/win32/mod_isapi.h @@ -183,7 +183,7 @@ typedef struct HSE_URL_MAPEX_INFO { #define HSE_REQ_SEND_RESPONSE_HEADER 3 #define HSE_REQ_DONE_WITH_SESSION 4 -/* MS Extented methods to ISAPI ServerSupportFunction() HSE_code */ +/* MS Extended methods to ISAPI ServerSupportFunction() HSE_code */ #define HSE_REQ_MAP_URL_TO_PATH 1001 /* Emulated */ #define HSE_REQ_GET_SSPI_INFO 1002 /* Not Supported */ #define HSE_APPEND_LOG_PARAMETER 1003 /* Supported */ diff --git a/modules/cache/cache_storage.c b/modules/cache/cache_storage.c index 41f638c..dfda34b 100644 --- a/modules/cache/cache_storage.c +++ b/modules/cache/cache_storage.c @@ -270,8 +270,7 @@ int cache_select(cache_request_rec *cache, request_rec *r) * language negotiated document in a different language by mistake. * * This code makes the assumption that the storage manager will - * cache the req_hdrs if the response contains a Vary - * header. + * cache the req_hdrs if the response contains a Vary header. * * RFC2616 13.6 and 14.44 describe the Vary mechanism. */ @@ -549,7 +548,7 @@ static apr_status_t cache_canonicalise_key(request_rec *r, apr_pool_t* p, } else { if (conf->base_uri && conf->base_uri->port_str) { - port_str = conf->base_uri->port_str; + port_str = apr_pstrcat(p, ":", conf->base_uri->port_str, NULL); } else if (conf->base_uri && conf->base_uri->hostname) { port_str = ""; diff --git a/modules/cache/cache_util.c b/modules/cache/cache_util.c index aa04913..fc36431 100644 --- a/modules/cache/cache_util.c +++ b/modules/cache/cache_util.c @@ -30,7 +30,7 @@ extern module AP_MODULE_DECLARE_DATA cache_module; /* Determine if "url" matches the hostname, scheme and port and path * in "filter". All but the path comparisons are case-insensitive. */ -static int uri_meets_conditions(const apr_uri_t *filter, const int pathlen, +static int uri_meets_conditions(const apr_uri_t *filter, const apr_size_t pathlen, const apr_uri_t *url, const char *path) { /* Scheme, hostname port and local part. The filter URI and the diff --git a/modules/cache/config.m4 b/modules/cache/config.m4 index 8115094..4fa414b 100644 --- a/modules/cache/config.m4 +++ b/modules/cache/config.m4 @@ -133,6 +133,7 @@ fi APACHE_MODULE(socache_shmcb, shmcb small object cache provider, , , most) APACHE_MODULE(socache_dbm, dbm small object cache provider, , , most) APACHE_MODULE(socache_memcache, memcache small object cache provider, , , most) +APACHE_MODULE(socache_redis, redis small object cache provider, , , most) APACHE_MODULE(socache_dc, distcache small object cache provider, , , no, [ APACHE_CHECK_DISTCACHE ]) diff --git a/modules/cache/mod_cache.c b/modules/cache/mod_cache.c index 56a09f5..3b4469e 100644 --- a/modules/cache/mod_cache.c +++ b/modules/cache/mod_cache.c @@ -986,7 +986,7 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in) /* 304 does not contain Content-Type and mod_mime regenerates the * Content-Type based on the r->filename. This would lead to original - * Content-Type to be lost (overwriten by whatever mod_mime generates). + * Content-Type to be lost (overwritten by whatever mod_mime generates). * We preserves the original Content-Type here. */ ap_set_content_type(r, apr_table_get( cache->stale_handle->resp_hdrs, "Content-Type")); @@ -1229,6 +1229,16 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in) return APR_SUCCESS; } + /* Set the content length if known. + */ + cl = apr_table_get(r->err_headers_out, "Content-Length"); + if (cl == NULL) { + cl = apr_table_get(r->headers_out, "Content-Length"); + } + if (cl && !ap_parse_strict_length(&size, cl)) { + reason = "invalid content length"; + } + if (reason) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00768) "cache: %s not cached for request %s. Reason: %s", @@ -1251,19 +1261,6 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in) /* Make it so that we don't execute this path again. */ cache->in_checked = 1; - /* Set the content length if known. - */ - cl = apr_table_get(r->err_headers_out, "Content-Length"); - if (cl == NULL) { - cl = apr_table_get(r->headers_out, "Content-Length"); - } - if (cl) { - char *errp; - if (apr_strtoff(&size, cl, &errp, 10) || *errp || size < 0) { - cl = NULL; /* parse error, see next 'if' block */ - } - } - if (!cl) { /* if we don't get the content-length, see if we have all the * buckets and use their length to calculate the size @@ -2533,7 +2530,7 @@ static const command_rec cache_cmds[] = { /* XXX * Consider a new config directive that enables loading specific cache - * implememtations (like mod_cache_mem, mod_cache_file, etc.). + * implementations (like mod_cache_mem, mod_cache_file, etc.). * Rather than using a LoadModule directive, admin would use something * like CacheModule mem_cache_module | file_cache_module, etc, * which would cause the approprpriate cache module to be loaded. diff --git a/modules/cache/mod_cache_disk.c b/modules/cache/mod_cache_disk.c index 52d5dba..8d17a19 100644 --- a/modules/cache/mod_cache_disk.c +++ b/modules/cache/mod_cache_disk.c @@ -284,11 +284,11 @@ static const char* regen_key(apr_pool_t *p, apr_table_t *headers, * HTTP URI's (3.2.3) [host and scheme are insensitive] * HTTP method (5.1.1) * HTTP-date values (3.3.1) - * 3.7 Media Types [exerpt] + * 3.7 Media Types [excerpt] * The type, subtype, and parameter attribute names are case- * insensitive. Parameter values might or might not be case-sensitive, * depending on the semantics of the parameter name. - * 4.20 Except [exerpt] + * 4.20 Except [excerpt] * Comparison of expectation values is case-insensitive for unquoted * tokens (including the 100-continue token), and is case-sensitive for * quoted-string expectation-extensions. @@ -713,7 +713,7 @@ static apr_status_t read_array(request_rec *r, apr_array_header_t* arr, apr_file_t *file) { char w[MAX_STRING_LEN]; - int p; + apr_size_t p; apr_status_t rv; while (1) { @@ -778,7 +778,7 @@ static apr_status_t read_table(cache_handle_t *handle, request_rec *r, { char w[MAX_STRING_LEN]; char *l; - int p; + apr_size_t p; apr_status_t rv; while (1) { @@ -994,10 +994,11 @@ static apr_status_t write_headers(cache_handle_t *h, request_rec *r) } rv = mkdir_structure(conf, dobj->hdrs.file, r->pool); - - rv = apr_file_mktemp(&dobj->vary.tempfd, dobj->vary.tempfile, - APR_CREATE | APR_WRITE | APR_BINARY | APR_EXCL, - dobj->vary.pool); + if (rv == APR_SUCCESS) { + rv = apr_file_mktemp(&dobj->vary.tempfd, dobj->vary.tempfile, + APR_CREATE | APR_WRITE | APR_BINARY | APR_EXCL, + dobj->vary.pool); + } if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, APLOGNO(00721) @@ -1275,9 +1276,9 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r, * sanity checks. */ if (seen_eos) { - const char *cl_header = apr_table_get(r->headers_out, "Content-Length"); - if (!dobj->disk_info.header_only) { + const char *cl_header; + apr_off_t cl; if (dobj->data.tempfd) { rv = apr_file_close(dobj->data.tempfd); @@ -1296,6 +1297,7 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_pool_destroy(dobj->data.pool); return APR_EGENERAL; } + if (dobj->file_size < dconf->minfs) { ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00734) "URL %s failed the size check " @@ -1304,17 +1306,16 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_pool_destroy(dobj->data.pool); return APR_EGENERAL; } - if (cl_header) { - apr_int64_t cl = apr_atoi64(cl_header); - if ((errno == 0) && (dobj->file_size != cl)) { - ap_log_rerror( - APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00735) "URL %s didn't receive complete response, not caching", h->cache_obj->key); - /* Remove the intermediate cache file and return non-APR_SUCCESS */ - apr_pool_destroy(dobj->data.pool); - return APR_EGENERAL; - } - } + cl_header = apr_table_get(r->headers_out, "Content-Length"); + if (cl_header && (!ap_parse_strict_length(&cl, cl_header) + || cl != dobj->file_size)) { + ap_log_rerror( + APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00735) "URL %s didn't receive complete response, not caching", h->cache_obj->key); + /* Remove the intermediate cache file and return non-APR_SUCCESS */ + apr_pool_destroy(dobj->data.pool); + return APR_EGENERAL; + } } /* All checks were fine, we're good to go when the commit comes */ diff --git a/modules/cache/mod_cache_socache.c b/modules/cache/mod_cache_socache.c index 0d76760..5f9e1d6 100644 --- a/modules/cache/mod_cache_socache.c +++ b/modules/cache/mod_cache_socache.c @@ -18,6 +18,12 @@ #include "apr_file_io.h" #include "apr_strings.h" #include "apr_buckets.h" + +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + #include "httpd.h" #include "http_config.h" #include "http_log.h" @@ -217,8 +223,8 @@ static apr_status_t read_table(cache_handle_t *handle, request_rec *r, while (apr_isspace(buffer[colon]) && (colon < *slider)) { colon++; } - apr_table_addn(table, apr_pstrndup(r->pool, (const char *) buffer - + key, len - key), apr_pstrndup(r->pool, + apr_table_addn(table, apr_pstrmemdup(r->pool, (const char *) buffer + + key, len - key), apr_pstrmemdup(r->pool, (const char *) buffer + colon, *slider - colon)); (*slider)++; if (buffer[*slider] == '\n') { @@ -298,11 +304,11 @@ static const char* regen_key(apr_pool_t *p, apr_table_t *headers, * HTTP URI's (3.2.3) [host and scheme are insensitive] * HTTP method (5.1.1) * HTTP-date values (3.3.1) - * 3.7 Media Types [exerpt] + * 3.7 Media Types [excerpt] * The type, subtype, and parameter attribute names are case- * insensitive. Parameter values might or might not be case-sensitive, * depending on the semantics of the parameter name. - * 4.20 Except [exerpt] + * 4.20 Except [excerpt] * Comparison of expectation values is case-insensitive for unquoted * tokens (including the 100-continue token), and is case-sensitive for * quoted-string expectation-extensions. @@ -429,6 +435,14 @@ static int create_entity(cache_handle_t *h, request_rec *r, const char *key, return OK; } +static apr_status_t sobj_body_pre_cleanup(void *baton) +{ + cache_socache_object_t *sobj = baton; + apr_brigade_cleanup(sobj->body); + sobj->body = NULL; + return APR_SUCCESS; +} + static int open_entity(cache_handle_t *h, request_rec *r, const char *key) { cache_socache_dir_conf *dconf = @@ -463,6 +477,7 @@ static int open_entity(cache_handle_t *h, request_rec *r, const char *key) * about for the lifetime of the response. */ apr_pool_create(&sobj->pool, r->pool); + apr_pool_tag(sobj->pool, "mod_cache_socache (open_entity)"); sobj->buffer = apr_palloc(sobj->pool, dconf->max); sobj->buffer_len = dconf->max; @@ -648,36 +663,25 @@ static int open_entity(cache_handle_t *h, request_rec *r, const char *key) } /* Retrieve the body if we have one */ - sobj->body = apr_brigade_create(r->pool, r->connection->bucket_alloc); len = buffer_len - slider; - - /* - * Optimisation: if the body is small, we want to make a - * copy of the body and free the temporary pool, as we - * don't want large blocks of unused memory hanging around - * to the end of the response. In contrast, if the body is - * large, we would rather leave the body where it is in the - * temporary pool, and save ourselves the copy. - */ - if (len * 2 > dconf->max) { + if (len > 0) { apr_bucket *e; - - /* large - use the brigade as is, we're done */ - e = apr_bucket_immortal_create((const char *) sobj->buffer + slider, - len, r->connection->bucket_alloc); - + /* Create the body brigade later concatenated to the output filters' + * brigade by recall_body(). Since sobj->buffer (the data) point to + * sobj->pool (a subpool of r->pool), be safe by using a pool bucket + * which can morph to heap if sobj->pool is destroyed while the bucket + * is still alive. But if sobj->pool gets destroyed while the bucket is + * still in sobj->body (i.e. recall_body() was never called), we don't + * need to morph to something just about to be freed, so a pre_cleanup + * will take care of cleaning up sobj->body before this happens (and is + * a noop otherwise). + */ + sobj->body = apr_brigade_create(sobj->pool, r->connection->bucket_alloc); + apr_pool_pre_cleanup_register(sobj->pool, sobj, sobj_body_pre_cleanup); + e = apr_bucket_pool_create((const char *) sobj->buffer + slider, len, + sobj->pool, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(sobj->body, e); } - else { - - /* small - make a copy of the data... */ - apr_brigade_write(sobj->body, NULL, NULL, (const char *) sobj->buffer - + slider, len); - - /* ...and get rid of the large memory buffer */ - apr_pool_destroy(sobj->pool); - sobj->pool = NULL; - } /* make the configuration stick */ h->cache_obj = obj; @@ -766,13 +770,9 @@ static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb) { cache_socache_object_t *sobj = (cache_socache_object_t*) h->cache_obj->vobj; - apr_bucket *e; - e = APR_BRIGADE_FIRST(sobj->body); - - if (e != APR_BRIGADE_SENTINEL(sobj->body)) { - APR_BUCKET_REMOVE(e); - APR_BRIGADE_INSERT_TAIL(bb, e); + if (sobj->body) { + APR_BRIGADE_CONCAT(bb, sobj->body); } return APR_SUCCESS; @@ -807,6 +807,7 @@ static apr_status_t store_headers(cache_handle_t *h, request_rec *r, : obj->info.expire + dconf->mintime; apr_pool_create(&sobj->pool, r->pool); + apr_pool_tag(sobj->pool, "mod_cache_socache (store_headers)"); sobj->buffer = apr_palloc(sobj->pool, dconf->max); sobj->buffer_len = dconf->max; @@ -1051,7 +1052,8 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r, /* Was this the final bucket? If yes, perform sanity checks. */ if (seen_eos) { - const char *cl_header = apr_table_get(r->headers_out, "Content-Length"); + const char *cl_header; + apr_off_t cl; if (r->connection->aborted || r->no_cache) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02380) @@ -1062,18 +1064,16 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r, sobj->pool = NULL; return APR_EGENERAL; } - if (cl_header) { - apr_off_t cl; - char *cl_endp; - if (apr_strtoff(&cl, cl_header, &cl_endp, 10) != APR_SUCCESS - || *cl_endp != '\0' || cl != sobj->body_length) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02381) - "URL %s didn't receive complete response, not caching", - h->cache_obj->key); - apr_pool_destroy(sobj->pool); - sobj->pool = NULL; - return APR_EGENERAL; - } + + cl_header = apr_table_get(r->headers_out, "Content-Length"); + if (cl_header && (!ap_parse_strict_length(&cl, cl_header) + || cl != sobj->body_length)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02381) + "URL %s didn't receive complete response, not caching", + h->cache_obj->key); + apr_pool_destroy(sobj->pool); + sobj->pool = NULL; + return APR_EGENERAL; } /* All checks were fine, we're good to go when the commit comes */ diff --git a/modules/cache/mod_file_cache.c b/modules/cache/mod_file_cache.c index 4199361..ce1db2d 100644 --- a/modules/cache/mod_file_cache.c +++ b/modules/cache/mod_file_cache.c @@ -380,7 +380,7 @@ static int file_cache_handler(request_rec *r) return rc; } -static command_rec file_cache_cmds[] = +static const command_rec file_cache_cmds[] = { AP_INIT_ITERATE("cachefile", cachefilehandle, NULL, RSRC_CONF, "A space separated list of files to add to the file handle cache at config time"), diff --git a/modules/cache/mod_socache_dbm.c b/modules/cache/mod_socache_dbm.c index 579d2ff..df97573 100644 --- a/modules/cache/mod_socache_dbm.c +++ b/modules/cache/mod_socache_dbm.c @@ -92,6 +92,7 @@ static const char *socache_dbm_create(ap_socache_instance_t **context, } apr_pool_create(&ctx->pool, p); + apr_pool_tag(ctx->pool, "socache_dbm_instance"); return NULL; } @@ -120,6 +121,10 @@ static apr_status_t socache_dbm_init(ap_socache_instance_t *ctx, const struct ap_socache_hints *hints, server_rec *s, apr_pool_t *p) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_status_t rv; @@ -141,6 +146,22 @@ static apr_status_t socache_dbm_init(ap_socache_instance_t *ctx, /* open it once to create it and to make sure it _can_ be created */ apr_pool_clear(ctx->pool); +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10277) + "Cannot load socache DBM library '%s': %s", + err->reason, err->msg); + return rv; + } + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, + APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00804) + "Cannot create socache DBM file `%s'", + ctx->data_file); + return DECLINED; + } +#else if ((rv = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00804) @@ -148,6 +169,7 @@ static apr_status_t socache_dbm_init(ap_socache_instance_t *ctx, ctx->data_file); return rv; } +#endif apr_dbm_close(dbm); ctx->expiry_interval = (hints && hints->expiry_interval @@ -192,6 +214,10 @@ static apr_status_t socache_dbm_store(ap_socache_instance_t *ctx, unsigned char *ucaData, unsigned int nData, apr_pool_t *pool) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_datum_t dbmkey; apr_datum_t dbmval; @@ -227,6 +253,25 @@ static apr_status_t socache_dbm_store(ap_socache_instance_t *ctx, /* and store it to the DBM file */ apr_pool_clear(ctx->pool); +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10278) + "Cannot load socache DBM library '%s' (store): %s", + err->reason, err->msg); + free(dbmval.dptr); + return rv; + } + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, + APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00807) + "Cannot open socache DBM file `%s' for writing " + "(store)", + ctx->data_file); + free(dbmval.dptr); + return rv; + } +#else if ((rv = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00807) @@ -236,6 +281,7 @@ static apr_status_t socache_dbm_store(ap_socache_instance_t *ctx, free(dbmval.dptr); return rv; } +#endif if ((rv = apr_dbm_store(dbm, dbmkey, dbmval)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00808) "Cannot store socache object to DBM file `%s'", @@ -260,6 +306,10 @@ static apr_status_t socache_dbm_retrieve(ap_socache_instance_t *ctx, server_rec unsigned char *dest, unsigned int *destlen, apr_pool_t *p) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_datum_t dbmkey; apr_datum_t dbmval; @@ -280,6 +330,23 @@ static apr_status_t socache_dbm_retrieve(ap_socache_instance_t *ctx, server_rec * do the apr_dbm_close? This would make the code a bit cleaner. */ apr_pool_clear(ctx->pool); +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10279) + "Cannot load socache DBM library '%s' (fetch): %s", + err->reason, err->msg); + return rc; + } + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, + APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rc, s, APLOGNO(00809) + "Cannot open socache DBM file `%s' for reading " + "(fetch)", + ctx->data_file); + return rc; + } +#else if ((rc = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rc, s, APLOGNO(00809) @@ -288,6 +355,7 @@ static apr_status_t socache_dbm_retrieve(ap_socache_instance_t *ctx, server_rec ctx->data_file); return rc; } +#endif rc = apr_dbm_fetch(dbm, dbmkey, &dbmval); if (rc != APR_SUCCESS) { apr_dbm_close(dbm); @@ -325,6 +393,10 @@ static apr_status_t socache_dbm_remove(ap_socache_instance_t *ctx, server_rec *s, const unsigned char *id, unsigned int idlen, apr_pool_t *p) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_datum_t dbmkey; apr_status_t rv; @@ -336,6 +408,23 @@ static apr_status_t socache_dbm_remove(ap_socache_instance_t *ctx, /* and delete it from the DBM file */ apr_pool_clear(ctx->pool); +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10280) + "Cannot load socache DBM library '%s' (delete): %s", + err->reason, err->msg); + return rv; + } + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, + APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00810) + "Cannot open socache DBM file `%s' for writing " + "(delete)", + ctx->data_file); + return rv; + } +#else if ((rv = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00810) @@ -344,6 +433,7 @@ static apr_status_t socache_dbm_remove(ap_socache_instance_t *ctx, ctx->data_file); return rv; } +#endif apr_dbm_delete(dbm, dbmkey); apr_dbm_close(dbm); @@ -352,6 +442,10 @@ static apr_status_t socache_dbm_remove(ap_socache_instance_t *ctx, static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_datum_t dbmkey; apr_datum_t dbmval; @@ -377,6 +471,16 @@ static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) ctx->last_expiry = now; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10281) + "Cannot load socache DBM library '%s' (expire): %s", + err->reason, err->msg); + return rv; + } +#endif + /* * Here we have to be very carefully: Not all DBM libraries are * smart enough to allow one to iterate over the elements and at the @@ -400,6 +504,16 @@ static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) /* pass 1: scan DBM database */ keyidx = 0; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, APR_DBM_RWCREATE, + DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00811) + "Cannot open socache DBM file `%s' for " + "scanning", + ctx->data_file); + break; + } +#else if ((rv = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00811) @@ -408,6 +522,7 @@ static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) ctx->data_file); break; } +#endif apr_dbm_firstkey(dbm, &dbmkey); while (dbmkey.dptr != NULL) { elts++; @@ -433,6 +548,16 @@ static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) apr_dbm_close(dbm); /* pass 2: delete expired elements */ +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if (apr_dbm_open2(&dbm, driver, ctx->data_file, APR_DBM_RWCREATE, + DBM_FILE_MODE, ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00812) + "Cannot re-open socache DBM file `%s' for " + "expiring", + ctx->data_file); + break; + } +#else if (apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00812) @@ -441,6 +566,7 @@ static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) ctx->data_file); break; } +#endif for (i = 0; i < keyidx; i++) { apr_dbm_delete(dbm, keylist[i]); deleted++; @@ -460,6 +586,10 @@ static void socache_dbm_expire(ap_socache_instance_t *ctx, server_rec *s) static void socache_dbm_status(ap_socache_instance_t *ctx, request_rec *r, int flags) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_datum_t dbmkey; apr_datum_t dbmval; @@ -472,14 +602,32 @@ static void socache_dbm_status(ap_socache_instance_t *ctx, request_rec *r, size = 0; apr_pool_clear(ctx->pool); +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10282) + "Cannot load socache DBM library '%s' (status retrieval): %s", + err->reason, err->msg); + return; + } + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, APR_DBM_RWCREATE, + DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00814) + "Cannot open socache DBM file `%s' for status " + "retrieval", + ctx->data_file); + return; + } +#else if ((rv = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00814) "Cannot open socache DBM file `%s' for status " - "retrival", + "retrieval", ctx->data_file); return; } +#endif /* * XXX - Check the return value of apr_dbm_firstkey, apr_dbm_fetch - TBD */ @@ -515,6 +663,10 @@ static apr_status_t socache_dbm_iterate(ap_socache_instance_t *ctx, ap_socache_iterator_t *iterator, apr_pool_t *pool) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbm; apr_datum_t dbmkey; apr_datum_t dbmval; @@ -527,6 +679,22 @@ static apr_status_t socache_dbm_iterate(ap_socache_instance_t *ctx, * make sure the expired records are omitted */ now = apr_time_now(); +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, NULL, &err, + ctx->pool) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10283) + "Cannot load socache DBM library '%s' (iterating): %s", + err->reason, err->msg); + return rv; + } + if ((rv = apr_dbm_open2(&dbm, driver, ctx->data_file, APR_DBM_RWCREATE, + DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00815) + "Cannot open socache DBM file `%s' for " + "iterating", ctx->data_file); + return rv; + } +#else if ((rv = apr_dbm_open(&dbm, ctx->data_file, APR_DBM_RWCREATE, DBM_FILE_MODE, ctx->pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00815) @@ -534,6 +702,7 @@ static apr_status_t socache_dbm_iterate(ap_socache_instance_t *ctx, "iterating", ctx->data_file); return rv; } +#endif rv = apr_dbm_firstkey(dbm, &dbmkey); while (rv == APR_SUCCESS && dbmkey.dptr != NULL) { expired = FALSE; diff --git a/modules/cache/mod_socache_dc.c b/modules/cache/mod_socache_dc.c index c1d4ab8..1fc52a7 100644 --- a/modules/cache/mod_socache_dc.c +++ b/modules/cache/mod_socache_dc.c @@ -69,7 +69,7 @@ static apr_status_t socache_dc_init(ap_socache_instance_t *ctx, /* This mode of operation will open a temporary connection to the 'target' * for each cache operation - this makes it safe against fork() * automatically. This mode is preferred when running a local proxy (over - * unix domain sockets) because overhead is negligable and it reduces the + * unix domain sockets) because overhead is negligible and it reduces the * performance/stability danger of file-descriptor bloatage. */ #define SESSION_CTX_FLAGS 0 #endif diff --git a/modules/cache/mod_socache_memcache.c b/modules/cache/mod_socache_memcache.c index e943b9b..f122ba4 100644 --- a/modules/cache/mod_socache_memcache.c +++ b/modules/cache/mod_socache_memcache.c @@ -20,15 +20,6 @@ #include "http_protocol.h" #include "apr.h" -#include "apu_version.h" - -/* apr_memcache support requires >= 1.3 */ -#if APU_MAJOR_VERSION > 1 || \ - (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION > 2) -#define HAVE_APU_MEMCACHE 1 -#endif - -#ifdef HAVE_APU_MEMCACHE #include "ap_socache.h" #include "ap_mpm.h" @@ -371,8 +362,6 @@ static const ap_socache_provider_t socache_mc = { socache_mc_iterate }; -#endif /* HAVE_APU_MEMCACHE */ - static void *create_server_config(apr_pool_t *p, server_rec *s) { socache_mc_svr_cfg *sconf = apr_pcalloc(p, sizeof(socache_mc_svr_cfg)); @@ -407,11 +396,9 @@ static const char *socache_mc_set_ttl(cmd_parms *cmd, void *dummy, static void register_hooks(apr_pool_t *p) { -#ifdef HAVE_APU_MEMCACHE ap_register_provider(p, AP_SOCACHE_PROVIDER_GROUP, "memcache", AP_SOCACHE_PROVIDER_VERSION, &socache_mc); -#endif } static const command_rec socache_memcache_cmds[] = { diff --git a/modules/cache/mod_socache_redis.c b/modules/cache/mod_socache_redis.c new file mode 100644 index 0000000..450f406 --- /dev/null +++ b/modules/cache/mod_socache_redis.c @@ -0,0 +1,486 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include "httpd.h" +#include "http_config.h" +#include "http_protocol.h" + +#include "apr.h" +#include "apu_version.h" + +#include "ap_socache.h" +#include "ap_mpm.h" +#include "http_log.h" +#include "apr_strings.h" +#include "mod_status.h" + +typedef struct { + apr_uint32_t ttl; + apr_uint32_t rwto; +} socache_rd_svr_cfg; + +/* apr_redis support requires >= 1.6 */ +#if APU_MAJOR_VERSION > 1 || \ + (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION > 5) +#define HAVE_APU_REDIS 1 +#endif + +/* The underlying apr_redis system is thread safe.. */ +#define RD_KEY_LEN 254 + +#ifndef RD_DEFAULT_SERVER_PORT +#define RD_DEFAULT_SERVER_PORT 6379 +#endif + + +#ifndef RD_DEFAULT_SERVER_MIN +#define RD_DEFAULT_SERVER_MIN 0 +#endif + +#ifndef RD_DEFAULT_SERVER_SMAX +#define RD_DEFAULT_SERVER_SMAX 1 +#endif + +#ifndef RD_DEFAULT_SERVER_TTL +#define RD_DEFAULT_SERVER_TTL apr_time_from_sec(15) +#endif + +#ifndef RD_DEFAULT_SERVER_RWTO +#define RD_DEFAULT_SERVER_RWTO apr_time_from_sec(5) +#endif + +module AP_MODULE_DECLARE_DATA socache_redis_module; + +#ifdef HAVE_APU_REDIS +#include "apr_redis.h" +struct ap_socache_instance_t { + const char *servers; + apr_redis_t *rc; + const char *tag; + apr_size_t taglen; /* strlen(tag) + 1 */ +}; + +static const char *socache_rd_create(ap_socache_instance_t **context, + const char *arg, + apr_pool_t *tmp, apr_pool_t *p) +{ + ap_socache_instance_t *ctx; + + *context = ctx = apr_pcalloc(p, sizeof *ctx); + + if (!arg || !*arg) { + return "List of server names required to create redis socache."; + } + + ctx->servers = apr_pstrdup(p, arg); + + return NULL; +} + +static apr_status_t socache_rd_init(ap_socache_instance_t *ctx, + const char *namespace, + const struct ap_socache_hints *hints, + server_rec *s, apr_pool_t *p) +{ + apr_status_t rv; + int thread_limit = 0; + apr_uint16_t nservers = 0; + char *cache_config; + char *split; + char *tok; + + socache_rd_svr_cfg *sconf = ap_get_module_config(s->module_config, + &socache_redis_module); + + ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &thread_limit); + + /* Find all the servers in the first run to get a total count */ + cache_config = apr_pstrdup(p, ctx->servers); + split = apr_strtok(cache_config, ",", &tok); + while (split) { + nservers++; + split = apr_strtok(NULL,",", &tok); + } + + rv = apr_redis_create(p, nservers, 0, &ctx->rc); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03473) + "Failed to create Redis Object of '%d' size.", + nservers); + return rv; + } + + /* Now add each server to the redis */ + cache_config = apr_pstrdup(p, ctx->servers); + split = apr_strtok(cache_config, ",", &tok); + while (split) { + apr_redis_server_t *st; + char *host_str; + char *scope_id; + apr_port_t port; + + rv = apr_parse_addr_port(&host_str, &scope_id, &port, split, p); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03474) + "Failed to Parse redis Server: '%s'", split); + return rv; + } + + if (host_str == NULL) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03475) + "Failed to Parse Server, " + "no hostname specified: '%s'", split); + return APR_EINVAL; + } + + if (port == 0) { + port = RD_DEFAULT_SERVER_PORT; + } + + rv = apr_redis_server_create(p, + host_str, port, + RD_DEFAULT_SERVER_MIN, + RD_DEFAULT_SERVER_SMAX, + thread_limit, + sconf->ttl, + sconf->rwto, + &st); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03476) + "Failed to Create redis Server: %s:%d", + host_str, port); + return rv; + } + + rv = apr_redis_add_server(ctx->rc, st); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03477) + "Failed to Add redis Server: %s:%d", + host_str, port); + return rv; + } + + split = apr_strtok(NULL,",", &tok); + } + + ctx->tag = apr_pstrcat(p, namespace, ":", NULL); + ctx->taglen = strlen(ctx->tag) + 1; + + /* socache API constraint: */ + AP_DEBUG_ASSERT(ctx->taglen <= 16); + + return APR_SUCCESS; +} + +static void socache_rd_destroy(ap_socache_instance_t *context, server_rec *s) +{ + /* noop. */ +} + +/* Converts (binary) id into a key prefixed by the predetermined + * namespace tag; writes output to key buffer. Returns non-zero if + * the id won't fit in the key buffer. */ +static int socache_rd_id2key(ap_socache_instance_t *ctx, + const unsigned char *id, unsigned int idlen, + char *key, apr_size_t keylen) +{ + char *cp; + + if (idlen * 2 + ctx->taglen >= keylen) + return 1; + + cp = apr_cpystrn(key, ctx->tag, ctx->taglen); + ap_bin2hex(id, idlen, cp); + + return 0; +} + +static apr_status_t socache_rd_store(ap_socache_instance_t *ctx, server_rec *s, + const unsigned char *id, unsigned int idlen, + apr_time_t expiry, + unsigned char *ucaData, unsigned int nData, + apr_pool_t *p) +{ + char buf[RD_KEY_LEN]; + apr_status_t rv; + apr_uint32_t timeout; + + if (socache_rd_id2key(ctx, id, idlen, buf, sizeof(buf))) { + return APR_EINVAL; + } + timeout = apr_time_sec(expiry - apr_time_now()); + if (timeout <= 0) { + return APR_EINVAL; + } + + rv = apr_redis_setex(ctx->rc, buf, (char*)ucaData, nData, timeout, 0); + + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03478) + "scache_rd: error setting key '%s' " + "with %d bytes of data", buf, nData); + return rv; + } + + return APR_SUCCESS; +} + +static apr_status_t socache_rd_retrieve(ap_socache_instance_t *ctx, server_rec *s, + const unsigned char *id, unsigned int idlen, + unsigned char *dest, unsigned int *destlen, + apr_pool_t *p) +{ + apr_size_t data_len; + char buf[RD_KEY_LEN], *data; + apr_status_t rv; + + if (socache_rd_id2key(ctx, id, idlen, buf, sizeof buf)) { + return APR_EINVAL; + } + + /* ### this could do with a subpool, but _getp looks like it will + * eat memory like it's going out of fashion anyway. */ + + rv = apr_redis_getp(ctx->rc, p, buf, &data, &data_len, NULL); + if (rv) { + if (rv != APR_NOTFOUND) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(03479) + "scache_rd: 'retrieve' FAIL"); + } + return rv; + } + else if (data_len > *destlen) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(03480) + "scache_rd: 'retrieve' OVERFLOW"); + return APR_ENOMEM; + } + + memcpy(dest, data, data_len); + *destlen = data_len; + + return APR_SUCCESS; +} + +static apr_status_t socache_rd_remove(ap_socache_instance_t *ctx, server_rec *s, + const unsigned char *id, + unsigned int idlen, apr_pool_t *p) +{ + char buf[RD_KEY_LEN]; + apr_status_t rv; + + if (socache_rd_id2key(ctx, id, idlen, buf, sizeof buf)) { + return APR_EINVAL; + } + + rv = apr_redis_delete(ctx->rc, buf, 0); + + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(03481) + "scache_rd: error deleting key '%s' ", + buf); + } + + return rv; +} + +static void socache_rd_status(ap_socache_instance_t *ctx, request_rec *r, int flags) +{ + apr_redis_t *rc = ctx->rc; + int i; + + for (i = 0; i < rc->ntotal; i++) { + apr_redis_server_t *rs; + apr_redis_stats_t *stats; + char *role; + apr_status_t rv; + char *br = (!(flags & AP_STATUS_SHORT) ? "
" : ""); + + rs = rc->live_servers[i]; + + ap_rprintf(r, "Redis server: %s:%d [%s]%s\n", rs->host, (int)rs->port, + (rs->status == APR_RC_SERVER_LIVE) ? "Up" : "Down", + br); + rv = apr_redis_stats(rs, r->pool, &stats); + if (rv != APR_SUCCESS) + continue; + if (!(flags & AP_STATUS_SHORT)) { + ap_rprintf(r, "General:: Version: %u.%u.%u [%u bits], PID: %u, Uptime: %u hrs
\n", + stats->major, stats->minor, stats->patch, stats->arch_bits, + stats->process_id, stats->uptime_in_seconds/3600); + ap_rprintf(r, "Clients:: Connected: %d, Blocked: %d
\n", + stats->connected_clients, stats->blocked_clients); + ap_rprintf(r, "Memory:: Total: %" APR_UINT64_T_FMT ", Max: %" APR_UINT64_T_FMT ", Used: %" APR_UINT64_T_FMT "
\n", + stats->total_system_memory, stats->maxmemory, stats->used_memory); + ap_rprintf(r, "CPU:: System: %u, User: %u
\n", + stats->used_cpu_sys, stats->used_cpu_user ); + ap_rprintf(r, "Connections:: Recd: %" APR_UINT64_T_FMT ", Processed: %" APR_UINT64_T_FMT ", Rejected: %" APR_UINT64_T_FMT "
\n", + stats->total_connections_received, stats->total_commands_processed, + stats->rejected_connections); + ap_rprintf(r, "Cache:: Hits: %" APR_UINT64_T_FMT ", Misses: %" APR_UINT64_T_FMT "
\n", + stats->keyspace_hits, stats->keyspace_misses); + ap_rprintf(r, "Net:: Input bytes: %" APR_UINT64_T_FMT ", Output bytes: %" APR_UINT64_T_FMT "
\n", + stats->total_net_input_bytes, stats->total_net_output_bytes); + if (stats->role == APR_RS_SERVER_MASTER) + role = "master"; + else if (stats->role == APR_RS_SERVER_SLAVE) + role = "slave"; + else + role = "unknown"; + ap_rprintf(r, "Misc:: Role: %s, Connected Slaves: %u, Is Cluster?: %s \n", + role, stats->connected_clients, + (stats->cluster_enabled ? "yes" : "no")); + ap_rputs("

\n", r); + } + else { + ap_rprintf(r, "Version: %u.%u.%u [%u bits], PID: %u, Uptime: %u hrs %s\n", + stats->major, stats->minor, stats->patch, stats->arch_bits, + stats->process_id, stats->uptime_in_seconds/3600, br); + ap_rprintf(r, "Clients:: Connected: %d, Blocked: %d %s\n", + stats->connected_clients, stats->blocked_clients, br); + ap_rprintf(r, "Memory:: Total: %" APR_UINT64_T_FMT ", Max: %" APR_UINT64_T_FMT ", Used: %" APR_UINT64_T_FMT " %s\n", + stats->total_system_memory, stats->maxmemory, stats->used_memory, + br); + ap_rprintf(r, "CPU:: System: %u, User: %u %s\n", + stats->used_cpu_sys, stats->used_cpu_user , br); + ap_rprintf(r, "Connections:: Recd: %" APR_UINT64_T_FMT ", Processed: %" APR_UINT64_T_FMT ", Rejected: %" APR_UINT64_T_FMT " %s\n", + stats->total_connections_received, stats->total_commands_processed, + stats->rejected_connections, br); + ap_rprintf(r, "Cache:: Hits: %" APR_UINT64_T_FMT ", Misses: %" APR_UINT64_T_FMT " %s\n", + stats->keyspace_hits, stats->keyspace_misses, br); + ap_rprintf(r, "Net:: Input bytes: %" APR_UINT64_T_FMT ", Output bytes: %" APR_UINT64_T_FMT " %s\n", + stats->total_net_input_bytes, stats->total_net_output_bytes, br); + if (stats->role == APR_RS_SERVER_MASTER) + role = "master"; + else if (stats->role == APR_RS_SERVER_SLAVE) + role = "slave"; + else + role = "unknown"; + ap_rprintf(r, "Misc:: Role: %s, Connected Slaves: %u, Is Cluster?: %s %s\n", + role, stats->connected_clients, + (stats->cluster_enabled ? "yes" : "no"), br); + } + } + +} + +static apr_status_t socache_rd_iterate(ap_socache_instance_t *instance, + server_rec *s, void *userctx, + ap_socache_iterator_t *iterator, + apr_pool_t *pool) +{ + return APR_ENOTIMPL; +} + +static const ap_socache_provider_t socache_mc = { + "redis", + 0, + socache_rd_create, + socache_rd_init, + socache_rd_destroy, + socache_rd_store, + socache_rd_retrieve, + socache_rd_remove, + socache_rd_status, + socache_rd_iterate, +}; + +#endif /* HAVE_APU_REDIS */ + +static void* create_server_config(apr_pool_t* p, server_rec* s) +{ + socache_rd_svr_cfg *sconf = apr_palloc(p, sizeof(socache_rd_svr_cfg)); + + sconf->ttl = RD_DEFAULT_SERVER_TTL; + sconf->rwto = RD_DEFAULT_SERVER_RWTO; + + return sconf; +} + +static const char *socache_rd_set_ttl(cmd_parms *cmd, void *dummy, + const char *arg) +{ + apr_interval_time_t ttl; + socache_rd_svr_cfg *sconf = ap_get_module_config(cmd->server->module_config, + &socache_redis_module); + + if (ap_timeout_parameter_parse(arg, &ttl, "s") != APR_SUCCESS) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + " has wrong format", NULL); + } + + if ((ttl < apr_time_from_sec(0)) || (ttl > apr_time_from_sec(3600))) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + " can only be 0 or up to one hour.", NULL); + } + + /* apr_redis_server_create needs a ttl in usec. */ + sconf->ttl = ttl; + + return NULL; +} + +static const char *socache_rd_set_rwto(cmd_parms *cmd, void *dummy, + const char *arg) +{ + apr_interval_time_t rwto; + socache_rd_svr_cfg *sconf = ap_get_module_config(cmd->server->module_config, + &socache_redis_module); + + if (ap_timeout_parameter_parse(arg, &rwto, "s") != APR_SUCCESS) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + " has wrong format", NULL); + } + + if ((rwto < apr_time_from_sec(0)) || (rwto > apr_time_from_sec(3600))) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + " can only be 0 or up to one hour.", NULL); + } + + /* apr_redis_server_create needs a ttl in usec. */ + sconf->rwto = rwto; + + return NULL; +} + +static void register_hooks(apr_pool_t *p) +{ +#ifdef HAVE_APU_REDIS + + ap_register_provider(p, AP_SOCACHE_PROVIDER_GROUP, "redis", + AP_SOCACHE_PROVIDER_VERSION, + &socache_mc); +#endif +} + +static const command_rec socache_redis_cmds[] = +{ + AP_INIT_TAKE1("RedisConnPoolTTL", socache_rd_set_ttl, NULL, RSRC_CONF, + "TTL used for the connection pool with the Redis server(s)"), + AP_INIT_TAKE1("RedisTimeout", socache_rd_set_rwto, NULL, RSRC_CONF, + "R/W timeout used for the connection with the Redis server(s)"), + {NULL} +}; + +AP_DECLARE_MODULE(socache_redis) = { + STANDARD20_MODULE_STUFF, + NULL, /* create per-dir config structures */ + NULL, /* merge per-dir config structures */ + create_server_config, /* create per-server config structures */ + NULL, /* merge per-server config structures */ + socache_redis_cmds, /* table of config file commands */ + register_hooks /* register hooks */ +}; + diff --git a/modules/cache/mod_socache_redis.dep b/modules/cache/mod_socache_redis.dep new file mode 100644 index 0000000..a450a54 --- /dev/null +++ b/modules/cache/mod_socache_redis.dep @@ -0,0 +1,5 @@ +# Microsoft Developer Studio Generated Dependency File, included by mod_socache_shmcb.mak + +..\..\build\win32\httpd.rc : \ + "..\..\include\ap_release.h"\ + diff --git a/modules/cache/mod_socache_redis.dsp b/modules/cache/mod_socache_redis.dsp new file mode 100644 index 0000000..18f5962 --- /dev/null +++ b/modules/cache/mod_socache_redis.dsp @@ -0,0 +1,111 @@ +# Microsoft Developer Studio Project File - Name="mod_socache_redis" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=mod_socache_redis - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "mod_socache_redis.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "mod_socache_redis.mak" CFG="mod_socache_redis - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "mod_socache_redis - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "mod_socache_redis - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c +# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../generators" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "AAA_DECLARE_EXPORT" /Fd"Release\mod_socache_redis_src" /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o /win32 "NUL" +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o /win32 "NUL" +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /fo"Release/mod_socache_redis.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_socache_redis.so" /d LONG_NAME="socache_redis module for Apache" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:".\Release\mod_socache_redis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_socache_redis.so +# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_socache_redis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_socache_redis.so /opt:ref +# Begin Special Build Tool +TargetPath=.\Release\mod_socache_redis.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2 +# End Special Build Tool + +!ELSEIF "$(CFG)" == "mod_socache_redis - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../generators" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "AAA_DECLARE_EXPORT" /Fd"Debug\mod_socache_redis_src" /FD /c +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o /win32 "NUL" +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o /win32 "NUL" +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /fo"Debug/mod_socache_redis.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_socache_redis.so" /d LONG_NAME="socache_redis module for Apache" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_socache_redis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_socache_redis.so +# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_socache_redis.so" /base:@..\..\os\win32\BaseAddr.ref,mod_socache_redis.so +# Begin Special Build Tool +TargetPath=.\Debug\mod_socache_redis.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2 +# End Special Build Tool + +!ENDIF + +# Begin Target + +# Name "mod_socache_redis - Win32 Release" +# Name "mod_socache_redis - Win32 Debug" +# Begin Source File + +SOURCE=.\mod_socache_redis.c +# End Source File +# Begin Source File + +SOURCE=..\..\build\win32\httpd.rc +# End Source File +# End Target +# End Project diff --git a/modules/cache/mod_socache_redis.mak b/modules/cache/mod_socache_redis.mak new file mode 100644 index 0000000..e4aab37 --- /dev/null +++ b/modules/cache/mod_socache_redis.mak @@ -0,0 +1,353 @@ +# Microsoft Developer Studio Generated NMAKE File, Based on mod_socache_redis.dsp +!IF "$(CFG)" == "" +CFG=mod_socache_redis - Win32 Debug +!MESSAGE No configuration specified. Defaulting to mod_socache_redis - Win32 Debug. +!ENDIF + +!IF "$(CFG)" != "mod_socache_redis - Win32 Release" && "$(CFG)" != "mod_socache_redis - Win32 Debug" +!MESSAGE Invalid configuration "$(CFG)" specified. +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "mod_socache_redis.mak" CFG="mod_socache_redis - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "mod_socache_redis - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "mod_socache_redis - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE +!ERROR An invalid configuration is specified. +!ENDIF + +!IF "$(OS)" == "Windows_NT" +NULL= +!ELSE +NULL=nul +!ENDIF + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" + +OUTDIR=.\Release +INTDIR=.\Release +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep +# Begin Custom Macros +OutDir=.\Release +# End Custom Macros + +!IF "$(RECURSE)" == "0" + +ALL : "$(OUTDIR)\mod_socache_redis.so" "$(DS_POSTBUILD_DEP)" + +!ELSE + +ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_socache_redis.so" "$(DS_POSTBUILD_DEP)" + +!ENDIF + +!IF "$(RECURSE)" == "1" +CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" +!ELSE +CLEAN : +!ENDIF + -@erase "$(INTDIR)\mod_socache_redis.obj" + -@erase "$(INTDIR)\mod_socache_redis.res" + -@erase "$(INTDIR)\mod_socache_redis_src.idb" + -@erase "$(INTDIR)\mod_socache_redis_src.pdb" + -@erase "$(OUTDIR)\mod_socache_redis.exp" + -@erase "$(OUTDIR)\mod_socache_redis.lib" + -@erase "$(OUTDIR)\mod_socache_redis.pdb" + -@erase "$(OUTDIR)\mod_socache_redis.so" + +"$(OUTDIR)" : + if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" + +CPP=cl.exe +CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /I "../generators" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_socache_redis_src" /FD /c + +.c{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.c{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +MTL=midl.exe +MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32 +RSC=rc.exe +RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_socache_redis.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_socache_redis.so" /d LONG_NAME="socache_redis_module for Apache" +BSC32=bscmake.exe +BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_socache_redis.bsc" +BSC32_SBRS= \ + +LINK32=link.exe +LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_socache_redis.pdb" /debug /out:"$(OUTDIR)\mod_socache_redis.so" /implib:"$(OUTDIR)\mod_socache_redis.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_socache_redis.so /opt:ref +LINK32_OBJS= \ + "$(INTDIR)\mod_socache_redis.obj" \ + "$(INTDIR)\mod_socache_redis.res" \ + "..\..\srclib\apr\Release\libapr-1.lib" \ + "..\..\srclib\apr-util\Release\libaprutil-1.lib" \ + "..\..\Release\libhttpd.lib" + +"$(OUTDIR)\mod_socache_redis.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +TargetPath=.\Release\mod_socache_redis.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep + +# Begin Custom Macros +OutDir=.\Release +# End Custom Macros + +"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_socache_redis.so" + if exist .\Release\mod_socache_redis.so.manifest mt.exe -manifest .\Release\mod_socache_redis.so.manifest -outputresource:.\Release\mod_socache_redis.so;2 + echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)" + +!ELSEIF "$(CFG)" == "mod_socache_redis - Win32 Debug" + +OUTDIR=.\Debug +INTDIR=.\Debug +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep +# Begin Custom Macros +OutDir=.\Debug +# End Custom Macros + +!IF "$(RECURSE)" == "0" + +ALL : "$(OUTDIR)\mod_socache_redis.so" "$(DS_POSTBUILD_DEP)" + +!ELSE + +ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_socache_redis.so" "$(DS_POSTBUILD_DEP)" + +!ENDIF + +!IF "$(RECURSE)" == "1" +CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" +!ELSE +CLEAN : +!ENDIF + -@erase "$(INTDIR)\mod_socache_redis.obj" + -@erase "$(INTDIR)\mod_socache_redis.res" + -@erase "$(INTDIR)\mod_socache_redis_src.idb" + -@erase "$(INTDIR)\mod_socache_redis_src.pdb" + -@erase "$(OUTDIR)\mod_socache_redis.exp" + -@erase "$(OUTDIR)\mod_socache_redis.lib" + -@erase "$(OUTDIR)\mod_socache_redis.pdb" + -@erase "$(OUTDIR)\mod_socache_redis.so" + +"$(OUTDIR)" : + if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" + +CPP=cl.exe +CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /I "../generators" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_socache_redis_src" /FD /EHsc /c + +.c{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.c{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +MTL=midl.exe +MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32 +RSC=rc.exe +RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_socache_redis.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_socache_redis.so" /d LONG_NAME="socache_redis_module for Apache" +BSC32=bscmake.exe +BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_socache_redis.bsc" +BSC32_SBRS= \ + +LINK32=link.exe +LINK32_FLAGS=kernel32.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_socache_redis.pdb" /debug /out:"$(OUTDIR)\mod_socache_redis.so" /implib:"$(OUTDIR)\mod_socache_redis.lib" /base:@..\..\os\win32\BaseAddr.ref,mod_socache_redis.so +LINK32_OBJS= \ + "$(INTDIR)\mod_socache_redis.obj" \ + "$(INTDIR)\mod_socache_redis.res" \ + "..\..\srclib\apr\Debug\libapr-1.lib" \ + "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \ + "..\..\Debug\libhttpd.lib" + +"$(OUTDIR)\mod_socache_redis.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +TargetPath=.\Debug\mod_socache_redis.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep + +# Begin Custom Macros +OutDir=.\Debug +# End Custom Macros + +"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_socache_redis.so" + if exist .\Debug\mod_socache_redis.so.manifest mt.exe -manifest .\Debug\mod_socache_redis.so.manifest -outputresource:.\Debug\mod_socache_redis.so;2 + echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)" + +!ENDIF + + +!IF "$(NO_EXTERNAL_DEPS)" != "1" +!IF EXISTS("mod_socache_redis.dep") +!INCLUDE "mod_socache_redis.dep" +!ELSE +!MESSAGE Warning: cannot find "mod_socache_redis.dep" +!ENDIF +!ENDIF + + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" || "$(CFG)" == "mod_socache_redis - Win32 Debug" + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" + +"libapr - Win32 Release" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" + cd "..\..\modules\cache" + +"libapr - Win32 ReleaseCLEAN" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN + cd "..\..\modules\cache" + +!ELSEIF "$(CFG)" == "mod_socache_redis - Win32 Debug" + +"libapr - Win32 Debug" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" + cd "..\..\modules\cache" + +"libapr - Win32 DebugCLEAN" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN + cd "..\..\modules\cache" + +!ENDIF + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" + +"libaprutil - Win32 Release" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" + cd "..\..\modules\cache" + +"libaprutil - Win32 ReleaseCLEAN" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN + cd "..\..\modules\cache" + +!ELSEIF "$(CFG)" == "mod_socache_redis - Win32 Debug" + +"libaprutil - Win32 Debug" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" + cd "..\..\modules\cache" + +"libaprutil - Win32 DebugCLEAN" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN + cd "..\..\modules\cache" + +!ENDIF + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" + +"libhttpd - Win32 Release" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" + cd ".\modules\cache" + +"libhttpd - Win32 ReleaseCLEAN" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN + cd ".\modules\cache" + +!ELSEIF "$(CFG)" == "mod_socache_redis - Win32 Debug" + +"libhttpd - Win32 Debug" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" + cd ".\modules\cache" + +"libhttpd - Win32 DebugCLEAN" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN + cd ".\modules\cache" + +!ENDIF + +SOURCE=..\..\build\win32\httpd.rc + +!IF "$(CFG)" == "mod_socache_redis - Win32 Release" + + +"$(INTDIR)\mod_socache_redis.res" : $(SOURCE) "$(INTDIR)" + $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_redis.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_redis.so" /d LONG_NAME="socache_redis_module for Apache" $(SOURCE) + + +!ELSEIF "$(CFG)" == "mod_socache_redis - Win32 Debug" + + +"$(INTDIR)\mod_socache_redis.res" : $(SOURCE) "$(INTDIR)" + $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_redis.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_redis.so" /d LONG_NAME="socache_redis_module for Apache" $(SOURCE) + + +!ENDIF + +SOURCE=.\mod_socache_redis.c + +"$(INTDIR)\mod_socache_redis.obj" : $(SOURCE) "$(INTDIR)" + + + +!ENDIF + diff --git a/modules/cache/mod_socache_shmcb.c b/modules/cache/mod_socache_shmcb.c index 2750f25..1785839 100644 --- a/modules/cache/mod_socache_shmcb.c +++ b/modules/cache/mod_socache_shmcb.c @@ -105,6 +105,7 @@ typedef struct { } SHMCBIndex; struct ap_socache_instance_t { + apr_pool_t *pool; const char *data_file; apr_size_t shm_size; apr_shm_t *shm; @@ -295,6 +296,7 @@ static const char *socache_shmcb_create(ap_socache_instance_t **context, /* Allocate the context. */ *context = ctx = apr_pcalloc(p, sizeof *ctx); + ctx->pool = p; ctx->shm_size = 1024*512; /* 512KB */ @@ -340,6 +342,16 @@ static const char *socache_shmcb_create(ap_socache_instance_t **context, return NULL; } +static apr_status_t socache_shmcb_cleanup(void *arg) +{ + ap_socache_instance_t *ctx = arg; + if (ctx->shm) { + apr_shm_destroy(ctx->shm); + ctx->shm = NULL; + } + return APR_SUCCESS; +} + static apr_status_t socache_shmcb_init(ap_socache_instance_t *ctx, const char *namespace, const struct ap_socache_hints *hints, @@ -368,8 +380,9 @@ static apr_status_t socache_shmcb_init(ap_socache_instance_t *ctx, * above will return NULL for invalid paths. */ if (ctx->data_file == NULL) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00818) - "Could not use default path '%s' for shmcb socache", - ctx->data_file); + "Could not use anonymous shm for '%s' cache", + namespace); + ctx->shm = NULL; return APR_EINVAL; } @@ -384,8 +397,11 @@ static apr_status_t socache_shmcb_init(ap_socache_instance_t *ctx, ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00819) "Could not allocate shared memory segment for shmcb " "socache"); + ctx->shm = NULL; return rv; } + apr_pool_cleanup_register(ctx->pool, ctx, socache_shmcb_cleanup, + apr_pool_cleanup_null); shm_segment = apr_shm_baseaddr_get(ctx->shm); shm_segsize = apr_shm_size_get(ctx->shm); @@ -473,9 +489,8 @@ static apr_status_t socache_shmcb_init(ap_socache_instance_t *ctx, static void socache_shmcb_destroy(ap_socache_instance_t *ctx, server_rec *s) { - if (ctx && ctx->shm) { - apr_shm_destroy(ctx->shm); - ctx->shm = NULL; + if (ctx) { + apr_pool_cleanup_run(ctx->pool, ctx, socache_shmcb_cleanup); } } @@ -778,7 +793,6 @@ static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header, */ if (header->subcache_data_size - subcache->data_used < total_len || subcache->idx_used == header->index_num) { - unsigned int loop = 0; idx = SHMCB_INDEX(subcache, subcache->idx_pos); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00845) @@ -805,7 +819,6 @@ static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header, header->stat_scrolled++; /* Loop admin */ idx = idx2; - loop++; } while (header->subcache_data_size - subcache->data_used < total_len); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00846) diff --git a/modules/cluster/mod_heartmonitor.c b/modules/cluster/mod_heartmonitor.c index 965fef5..53b6504 100644 --- a/modules/cluster/mod_heartmonitor.c +++ b/modules/cluster/mod_heartmonitor.c @@ -39,7 +39,7 @@ static const ap_slotmem_provider_t *storage = NULL; static ap_slotmem_instance_t *slotmem = NULL; -static int maxworkers = 0; +static int maxworkers = 10; module AP_MODULE_DECLARE_DATA heartmonitor_module; @@ -171,7 +171,7 @@ static apr_status_t hm_update(void* mem, void *data, apr_pool_t *p) hm_slot_server_t *old = (hm_slot_server_t *) mem; hm_slot_server_ctx_t *s = (hm_slot_server_ctx_t *) data; hm_server_t *new = s->s; - if (strncmp(old->ip, new->ip, MAXIPSIZE)==0) { + if (strcmp(old->ip, new->ip)==0) { s->found = 1; old->busy = new->busy; old->ready = new->ready; @@ -185,7 +185,7 @@ static apr_status_t hm_readid(void* mem, void *data, apr_pool_t *p) hm_slot_server_t *old = (hm_slot_server_t *) mem; hm_slot_server_ctx_t *s = (hm_slot_server_ctx_t *) data; hm_server_t *new = s->s; - if (strncmp(old->ip, new->ip, MAXIPSIZE)==0) { + if (strcmp(old->ip, new->ip)==0) { s->found = 1; s->item_id = old->id; } @@ -202,7 +202,8 @@ static apr_status_t hm_slotmem_update_stat(hm_server_t *s, apr_pool_t *pool) if (!ctx.found) { unsigned int i; hm_slot_server_t hmserver; - memcpy(hmserver.ip, s->ip, MAXIPSIZE); + memset(&hmserver, 0, sizeof(hmserver)); + apr_cpystrn(hmserver.ip, s->ip, sizeof(hmserver.ip)); hmserver.busy = s->busy; hmserver.ready = s->ready; hmserver.seen = s->seen; @@ -528,7 +529,7 @@ static hm_server_t *hm_get_server(hm_ctx_t *ctx, const char *ip, const int port) /* Process a message received from a backend node */ static void hm_processmsg(hm_ctx_t *ctx, apr_pool_t *p, - apr_sockaddr_t *from, char *buf, int len) + apr_sockaddr_t *from, char *buf, apr_size_t len) { apr_table_t *tbl; @@ -624,9 +625,7 @@ static apr_status_t hm_watchdog_callback(int state, void *data, /* store in the slotmem or in the file depending on configuration */ hm_update_stats(ctx, pool); cur = now = apr_time_sec(apr_time_now()); - /* TODO: Insted HN_UPDATE_SEC use - * the ctx->interval - */ + while ((now - cur) < apr_time_sec(ctx->interval)) { int n; apr_status_t rc; @@ -635,6 +634,7 @@ static apr_status_t hm_watchdog_callback(int state, void *data, apr_interval_time_t timeout; apr_pool_create(&p, pool); + apr_pool_tag(p, "hm_running"); pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = ctx->sock; @@ -809,6 +809,7 @@ static void *hm_create_config(apr_pool_t *p, server_rec *s) ctx->interval = apr_time_from_sec(HM_UPDATE_SEC); ctx->s = s; apr_pool_create(&ctx->p, p); + apr_pool_tag(ctx->p, "hm_ctx"); ctx->servers = apr_hash_make(ctx->p); return ctx; @@ -891,8 +892,9 @@ static const char *cmd_hm_maxworkers(cmd_parms *cmd, } maxworkers = atoi(data); - if (maxworkers <= 10) - return "HeartbeatMaxServers: Should be bigger than 10"; + if (maxworkers != 0 && maxworkers < 10) + return "HeartbeatMaxServers: Should be 0 for file storage, " + "or greater or equal than 10 for slotmem"; return NULL; } diff --git a/modules/core/mod_macro.c b/modules/core/mod_macro.c index 04af43b..cc42d0b 100644 --- a/modules/core/mod_macro.c +++ b/modules/core/mod_macro.c @@ -465,7 +465,7 @@ static const char *process_content(apr_pool_t * pool, for (i = 0; i < contents->nelts; i++) { const char *errmsg; /* copy the line and substitute macro parameters */ - strncpy(line, ((char **) contents->elts)[i], MAX_STRING_LEN - 1); + apr_cpystrn(line, ((char **) contents->elts)[i], MAX_STRING_LEN); errmsg = substitute_macro_args(line, MAX_STRING_LEN, macro, replacements, used); if (errmsg) { diff --git a/modules/core/mod_so.c b/modules/core/mod_so.c index 6eafbe9..f5d18c1 100644 --- a/modules/core/mod_so.c +++ b/modules/core/mod_so.c @@ -159,7 +159,7 @@ static const char *dso_load(cmd_parms *cmd, apr_dso_handle_t **modhandlep, cmd->cmd->name, filename); } *used_filename = fullname; - if (apr_dso_load(modhandlep, fullname, cmd->pool) == APR_SUCCESS) { + if (fullname && apr_dso_load(modhandlep, fullname, cmd->pool) == APR_SUCCESS) { return NULL; } if (retry) { diff --git a/modules/core/mod_watchdog.c b/modules/core/mod_watchdog.c index 61f4675..99ec7cf 100644 --- a/modules/core/mod_watchdog.c +++ b/modules/core/mod_watchdog.c @@ -24,6 +24,8 @@ #include "http_core.h" #include "util_mutex.h" +#include "apr_atomic.h" + #define AP_WATCHDOG_PGROUP "watchdog" #define AP_WATCHDOG_PVERSION "parent" #define AP_WATCHDOG_CVERSION "child" @@ -43,7 +45,7 @@ struct watchdog_list_t struct ap_watchdog_t { - apr_thread_mutex_t *startup; + apr_uint32_t thread_started; /* set to 1 once thread running */ apr_proc_mutex_t *mutex; const char *name; watchdog_list_t *callbacks; @@ -74,6 +76,10 @@ static apr_status_t wd_worker_cleanup(void *data) apr_status_t rv; ap_watchdog_t *w = (ap_watchdog_t *)data; + /* Do nothing if the thread wasn't started. */ + if (apr_atomic_read32(&w->thread_started) != 1) + return APR_SUCCESS; + if (w->is_running) { watchdog_list_t *wl = w->callbacks; while (wl) { @@ -106,11 +112,13 @@ static void* APR_THREAD_FUNC wd_worker(apr_thread_t *thread, void *data) int probed = 0; int inited = 0; int mpmq_s = 0; + apr_pool_t *temp_pool = NULL; w->pool = apr_thread_pool_get(thread); w->is_running = 1; - apr_thread_mutex_unlock(w->startup); + apr_atomic_set32(&w->thread_started, 1); /* thread started */ + if (w->mutex) { while (w->is_running) { if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpmq_s) != APR_SUCCESS) { @@ -151,6 +159,10 @@ static void* APR_THREAD_FUNC wd_worker(apr_thread_t *thread, void *data) apr_sleep(AP_WD_TM_SLICE); } } + + apr_pool_create(&temp_pool, w->pool); + apr_pool_tag(temp_pool, "wd_running"); + if (w->is_running) { watchdog_list_t *wl = w->callbacks; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd_server_conf->s, @@ -158,15 +170,13 @@ static void* APR_THREAD_FUNC wd_worker(apr_thread_t *thread, void *data) w->singleton ? "Singleton " : "", w->name); apr_time_clock_hires(w->pool); if (wl) { - apr_pool_t *ctx = NULL; - apr_pool_create(&ctx, w->pool); while (wl && w->is_running) { /* Execute watchdog callback */ wl->status = (*wl->callback_fn)(AP_WATCHDOG_STATE_STARTING, - (void *)wl->data, ctx); + (void *)wl->data, temp_pool); wl = wl->next; } - apr_pool_destroy(ctx); + apr_pool_clear(temp_pool); } else { ap_run_watchdog_init(wd_server_conf->s, w->name, w->pool); @@ -176,7 +186,6 @@ static void* APR_THREAD_FUNC wd_worker(apr_thread_t *thread, void *data) /* Main execution loop */ while (w->is_running) { - apr_pool_t *ctx = NULL; apr_time_t curr; watchdog_list_t *wl = w->callbacks; @@ -195,12 +204,10 @@ static void* APR_THREAD_FUNC wd_worker(apr_thread_t *thread, void *data) if (wl->status == APR_SUCCESS) { wl->step += (apr_time_now() - curr); if (wl->step >= wl->interval) { - if (!ctx) - apr_pool_create(&ctx, w->pool); wl->step = 0; /* Execute watchdog callback */ wl->status = (*wl->callback_fn)(AP_WATCHDOG_STATE_RUNNING, - (void *)wl->data, ctx); + (void *)wl->data, temp_pool); if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpmq_s) != APR_SUCCESS) { w->is_running = 0; } @@ -217,19 +224,17 @@ static void* APR_THREAD_FUNC wd_worker(apr_thread_t *thread, void *data) */ w->step += (apr_time_now() - curr); if (w->step >= wd_interval) { - if (!ctx) - apr_pool_create(&ctx, w->pool); w->step = 0; /* Run watchdog step hook */ - ap_run_watchdog_step(wd_server_conf->s, w->name, ctx); + ap_run_watchdog_step(wd_server_conf->s, w->name, temp_pool); } } - if (ctx) - apr_pool_destroy(ctx); - if (!w->is_running) { - break; - } + + apr_pool_clear(temp_pool); } + + apr_pool_destroy(temp_pool); + if (inited) { /* Run the watchdog exit hooks. * If this was singleton watchdog the init hook @@ -264,10 +269,7 @@ static apr_status_t wd_startup(ap_watchdog_t *w, apr_pool_t *p) { apr_status_t rc; - /* Create thread startup mutex */ - rc = apr_thread_mutex_create(&w->startup, APR_THREAD_MUTEX_UNNESTED, p); - if (rc != APR_SUCCESS) - return rc; + apr_atomic_set32(&w->thread_started, 0); if (w->singleton) { /* Initialize singleton mutex in child */ @@ -277,22 +279,12 @@ static apr_status_t wd_startup(ap_watchdog_t *w, apr_pool_t *p) return rc; } - /* This mutex fixes problems with a fast start/fast end, where the pool - * cleanup was being invoked before the thread completely spawned. - */ - apr_thread_mutex_lock(w->startup); - apr_pool_pre_cleanup_register(p, w, wd_worker_cleanup); - /* Start the newly created watchdog */ - rc = apr_thread_create(&w->thread, NULL, wd_worker, w, p); - if (rc) { - apr_pool_cleanup_kill(p, w, wd_worker_cleanup); + rc = ap_thread_create(&w->thread, NULL, wd_worker, w, p); + if (rc == APR_SUCCESS) { + apr_pool_pre_cleanup_register(p, w, wd_worker_cleanup); } - apr_thread_mutex_lock(w->startup); - apr_thread_mutex_unlock(w->startup); - apr_thread_mutex_destroy(w->startup); - return rc; } @@ -447,6 +439,7 @@ static int wd_post_config_hook(apr_pool_t *pconf, apr_pool_t *plog, if (!(wd_server_conf = apr_pcalloc(ppconf, sizeof(wd_server_conf_t)))) return APR_ENOMEM; apr_pool_create(&wd_server_conf->pool, ppconf); + apr_pool_tag(wd_server_conf->pool, "wd_server_conf"); apr_pool_userdata_set(wd_server_conf, pk, apr_pool_cleanup_null, ppconf); } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(010033) @@ -473,7 +466,7 @@ static int wd_post_config_hook(apr_pool_t *pconf, apr_pool_t *plog, int status = ap_run_watchdog_need(s, w->name, 1, w->singleton); if (status == OK) { - /* One of the modules returned OK to this watchog. + /* One of the modules returned OK to this watchdog. * Mark it as active */ w->active = 1; @@ -519,7 +512,7 @@ static int wd_post_config_hook(apr_pool_t *pconf, apr_pool_t *plog, int status = ap_run_watchdog_need(s, w->name, 0, w->singleton); if (status == OK) { - /* One of the modules returned OK to this watchog. + /* One of the modules returned OK to this watchdog. * Mark it as active */ w->active = 1; diff --git a/modules/database/mod_dbd.c b/modules/database/mod_dbd.c index 7212665..aa6b764 100644 --- a/modules/database/mod_dbd.c +++ b/modules/database/mod_dbd.c @@ -525,6 +525,7 @@ static apr_status_t dbd_construct(void **data_ptr, "Failed to create memory pool"); return rv; } + apr_pool_tag(rec_pool, "dbd_rec_pool"); rec = apr_pcalloc(rec_pool, sizeof(ap_dbd_t)); @@ -589,6 +590,7 @@ static apr_status_t dbd_construct(void **data_ptr, apr_pool_destroy(rec->pool); return rv; } + apr_pool_tag(prepared_pool, "dbd_prepared_pool"); rv = dbd_prepared_init(prepared_pool, cfg, rec); if (rv != APR_SUCCESS) { @@ -673,6 +675,7 @@ static apr_status_t dbd_setup_init(apr_pool_t *pool, server_rec *s) "Failed to create reslist cleanup memory pool"); return rv2; } + apr_pool_tag(group->pool, "dbd_group"); #if APR_HAS_THREADS rv2 = dbd_setup(s, group); diff --git a/modules/dav/fs/dbm.c b/modules/dav/fs/dbm.c index 821168e..347d75d 100644 --- a/modules/dav/fs/dbm.c +++ b/modules/dav/fs/dbm.c @@ -37,6 +37,11 @@ #define APR_WANT_BYTEFUNC #include "apr_want.h" /* for ntohs and htons */ +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + #include "mod_dav.h" #include "repos.h" #include "http_log.h" @@ -127,11 +132,30 @@ void dav_fs_ensure_state_dir(apr_pool_t * p, const char *dirname) dav_error * dav_dbm_open_direct(apr_pool_t *p, const char *pathname, int ro, dav_db **pdb) { - apr_status_t status; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *file = NULL; + apr_status_t status; *pdb = NULL; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((status = apr_dbm_get_driver(&driver, NULL, &err, p)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, APLOGNO(10289) + "mod_dav_fs: The DBM library '%s' could not be loaded: %s", + err->reason, err->msg); + return dav_new_error(p, HTTP_INTERNAL_SERVER_ERROR, 1, status, + "Could not load library for property database."); + } + if ((status = apr_dbm_open2(&file, driver, pathname, + ro ? APR_DBM_READONLY : APR_DBM_RWCREATE, + APR_OS_DEFAULT, p)) + != APR_SUCCESS && !ro) { + return dav_fs_dbm_error(NULL, p, status); + } +#else if ((status = apr_dbm_open(&file, pathname, ro ? APR_DBM_READONLY : APR_DBM_RWCREATE, APR_OS_DEFAULT, p)) @@ -143,6 +167,7 @@ dav_error * dav_dbm_open_direct(apr_pool_t *p, const char *pathname, int ro, and we need to write */ return dav_fs_dbm_error(NULL, p, status); } +#endif /* may be NULL if we tried to open a non-existent db as read-only */ if (file != NULL) { @@ -355,29 +380,33 @@ static void dav_append_prop(apr_pool_t *pool, /* the property is an empty value */ if (*name == ':') { /* "no namespace" case */ - s = apr_psprintf(pool, "<%s/>" DEBUG_CR, name+1); + s = apr_pstrcat(pool, "<", name+1, "/>" DEBUG_CR, NULL); } else { - s = apr_psprintf(pool, "" DEBUG_CR, name); + s = apr_pstrcat(pool, "" DEBUG_CR, NULL); } } else if (*lang != '\0') { if (*name == ':') { /* "no namespace" case */ - s = apr_psprintf(pool, "<%s xml:lang=\"%s\">%s" DEBUG_CR, - name+1, lang, value, name+1); + s = apr_pstrcat(pool, "<", name+1, " xml:lang=\"", + lang, "\">", value, "" DEBUG_CR, + NULL); } else { - s = apr_psprintf(pool, "%s" DEBUG_CR, - name, lang, value, name); + s = apr_pstrcat(pool, "", value, "" DEBUG_CR, + NULL); } } else if (*name == ':') { /* "no namespace" case */ - s = apr_psprintf(pool, "<%s>%s" DEBUG_CR, name+1, value, name+1); + s = apr_pstrcat(pool, "<", name+1, ">", value, "" + DEBUG_CR, NULL); } else { - s = apr_psprintf(pool, "%s" DEBUG_CR, name, value, name); + s = apr_pstrcat(pool, "", value, "" + DEBUG_CR, NULL); } apr_text_append(pool, phdr, s); diff --git a/modules/dav/fs/lock.c b/modules/dav/fs/lock.c index c058e2e..ef18c4a 100644 --- a/modules/dav/fs/lock.c +++ b/modules/dav/fs/lock.c @@ -953,7 +953,7 @@ static dav_error * dav_fs_add_locknull_state( /* ** dav_fs_remove_locknull_state: Given a request, check to see if r->filename -** is/was a lock-null resource. If so, return it to an existant state, i.e. +** is/was a lock-null resource. If so, return it to an existent state, i.e. ** remove it from the list in the appropriate .DAV/locknull file. */ static dav_error * dav_fs_remove_locknull_state( diff --git a/modules/dav/fs/repos.c b/modules/dav/fs/repos.c index 6a5ff76..64bc894 100644 --- a/modules/dav/fs/repos.c +++ b/modules/dav/fs/repos.c @@ -35,6 +35,7 @@ #include "mod_dav.h" #include "repos.h" +APLOG_USE_MODULE(dav_fs); /* to assist in debugging mod_dav's GET handling */ #define DEBUG_GET_HANDLER 0 @@ -948,15 +949,29 @@ static dav_error * dav_fs_open_stream(const dav_resource *resource, else if (APR_STATUS_IS_EEXIST(rv)) { rv = apr_file_open(&ds->f, ds->pathname, flags, APR_OS_DEFAULT, ds->p); + if (rv != APR_SUCCESS) { + return dav_new_error(p, MAP_IO2HTTP(rv), 0, rv, + apr_psprintf(p, "Could not open an existing " + "resource for writing: %s.", + ds->pathname)); + } } } else { rv = apr_file_open(&ds->f, ds->pathname, flags, APR_OS_DEFAULT, ds->p); + if (rv != APR_SUCCESS) { + return dav_new_error(p, MAP_IO2HTTP(rv), 0, rv, + apr_psprintf(p, "Could not open an existing " + "resource for reading: %s.", + ds->pathname)); + } } if (rv != APR_SUCCESS) { return dav_new_error(p, MAP_IO2HTTP(rv), 0, rv, - "An error occurred while opening a resource."); + apr_psprintf(p, "An error occurred while opening " + "a resource for writing: %s.", + ds->pathname)); } /* (APR registers cleanups for the fd with the pool) */ @@ -1572,6 +1587,19 @@ static dav_error * dav_fs_walker(dav_fs_walker_context *fsctx, int depth) status = apr_stat(&fsctx->info1.finfo, fsctx->path1.buf, DAV_FINFO_MASK, pool); if (status != APR_SUCCESS && status != APR_INCOMPLETE) { + dav_resource_private *ctx = params->root->info; + + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, ctx->r, + APLOGNO(10472) "could not access file (%s) during directory walk", + fsctx->path1.buf); + + /* If being tolerant, ignore failure due to losing a race + * with some other process deleting files out from under + * the directory walk. */ + if ((params->walk_type & DAV_WALKTYPE_TOLERANT) + && APR_STATUS_IS_ENOENT(status)) { + continue; + } /* woah! where'd it go? */ /* ### should have a better error here */ err = dav_new_error(pool, HTTP_NOT_FOUND, 0, status, NULL); @@ -1663,7 +1691,7 @@ static dav_error * dav_fs_walker(dav_fs_walker_context *fsctx, int depth) /* put a slash back on the end of the directory */ fsctx->path1.buf[fsctx->path1.cur_len - 1] = '/'; - /* these are all non-existant (files) */ + /* these are all non-existent (files) */ fsctx->res1.exists = 0; fsctx->res1.collection = 0; memset(&fsctx->info1.finfo, 0, sizeof(fsctx->info1.finfo)); @@ -1853,27 +1881,26 @@ static dav_error * dav_fs_walk(const dav_walk_params *params, int depth, return dav_fs_internal_walk(params, depth, 0, NULL, response); } -/* dav_fs_etag: Stolen from ap_make_etag. Creates a strong etag - * for file path. - * ### do we need to return weak tags sometimes? +/* dav_fs_etag: Creates an etag for the file path. */ static const char *dav_fs_getetag(const dav_resource *resource) { - dav_resource_private *ctx = resource->info; - /* XXX: This should really honor the FileETag setting */ + etag_rec er; - if (!resource->exists) - return apr_pstrdup(ctx->pool, ""); + dav_resource_private *ctx = resource->info; - if (ctx->finfo.filetype != APR_NOFILE) { - return apr_psprintf(ctx->pool, "\"%" APR_UINT64_T_HEX_FMT "-%" - APR_UINT64_T_HEX_FMT "\"", - (apr_uint64_t) ctx->finfo.size, - (apr_uint64_t) ctx->finfo.mtime); + if (!resource->exists || !ctx->r) { + return ""; } - return apr_psprintf(ctx->pool, "\"%" APR_UINT64_T_HEX_FMT "\"", - (apr_uint64_t) ctx->finfo.mtime); + er.vlist_validator = NULL; + er.request_time = ctx->r->request_time; + er.finfo = &ctx->finfo; + er.pathname = ctx->pathname; + er.fd = NULL; + er.force_weak = 0; + + return ap_make_etag_ex(ctx->r, &er); } static const dav_hooks_repository dav_hooks_repository_fs = @@ -1913,7 +1940,7 @@ static dav_prop_insert dav_fs_insert_prop(const dav_resource *resource, const char *s; apr_pool_t *p = resource->info->pool; const dav_liveprop_spec *info; - int global_ns; + long global_ns; /* an HTTP-date can be 29 chars plus a null term */ /* a 64-bit size can be 20 chars plus a null term */ @@ -1994,18 +2021,20 @@ static dav_prop_insert dav_fs_insert_prop(const dav_resource *resource, /* DBG3("FS: inserting lp%d:%s (local %d)", ns, scan->name, scan->ns); */ if (what == DAV_PROP_INSERT_VALUE) { - s = apr_psprintf(p, "%s" DEBUG_CR, + s = apr_psprintf(p, "%s" DEBUG_CR, global_ns, info->name, value, global_ns, info->name); } else if (what == DAV_PROP_INSERT_NAME) { - s = apr_psprintf(p, "" DEBUG_CR, global_ns, info->name); + s = apr_psprintf(p, "" DEBUG_CR, global_ns, info->name); } else { /* assert: what == DAV_PROP_INSERT_SUPPORTED */ - s = apr_psprintf(p, - "" DEBUG_CR, - info->name, dav_fs_namespace_uris[info->ns]); + s = apr_pstrcat(p, + "name, + "\" D:namespace=\"", + dav_fs_namespace_uris[info->ns], + "\"/>" DEBUG_CR, NULL); } apr_text_append(p, phdr, s); diff --git a/modules/dav/lock/locks.c b/modules/dav/lock/locks.c index 17b9ee6..0f072ec 100644 --- a/modules/dav/lock/locks.c +++ b/modules/dav/lock/locks.c @@ -26,8 +26,14 @@ #define APR_WANT_MEMFUNC #include "apr_want.h" +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + #include "httpd.h" #include "http_log.h" +#include "http_main.h" /* for ap_server_conf */ #include "mod_dav.h" @@ -311,16 +317,36 @@ static int dav_generic_compare_locktoken(const dav_locktoken *lt1, */ static dav_error * dav_generic_really_open_lockdb(dav_lockdb *lockdb) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *er; +#endif dav_error *err; - apr_status_t status; + apr_status_t status = APR_SUCCESS; if (lockdb->info->opened) { return NULL; } +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + status = apr_dbm_get_driver(&driver, NULL, &er, lockdb->info->pool); + + if (status) { + ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, APLOGNO(10288) + "mod_dav_lock: The DBM library '%s' could not be loaded: %s", + er->reason, er->msg); + return dav_new_error(lockdb->info->pool, HTTP_INTERNAL_SERVER_ERROR, 1, + status, "Could not load library for property database."); + } + + status = apr_dbm_open2(&lockdb->info->db, driver, lockdb->info->lockdb_path, + lockdb->ro ? APR_DBM_READONLY : APR_DBM_RWCREATE, + APR_OS_DEFAULT, lockdb->info->pool); +#else status = apr_dbm_open(&lockdb->info->db, lockdb->info->lockdb_path, lockdb->ro ? APR_DBM_READONLY : APR_DBM_RWCREATE, APR_OS_DEFAULT, lockdb->info->pool); +#endif if (status) { err = dav_generic_dbm_new_error(lockdb->info->db, lockdb->info->pool, diff --git a/modules/dav/main/mod_dav.c b/modules/dav/main/mod_dav.c index 84012f2..dea3f18 100644 --- a/modules/dav/main/mod_dav.c +++ b/modules/dav/main/mod_dav.c @@ -81,8 +81,10 @@ typedef struct { const char *provider_name; const dav_provider *provider; const char *dir; + const char *base; int locktimeout; int allow_depthinfinity; + int allow_lockdiscovery; } dav_dir_conf; @@ -195,8 +197,11 @@ static void *dav_merge_dir_config(apr_pool_t *p, void *base, void *overrides) newconf->locktimeout = DAV_INHERIT_VALUE(parent, child, locktimeout); newconf->dir = DAV_INHERIT_VALUE(parent, child, dir); + newconf->base = DAV_INHERIT_VALUE(parent, child, base); newconf->allow_depthinfinity = DAV_INHERIT_VALUE(parent, child, allow_depthinfinity); + newconf->allow_lockdiscovery = DAV_INHERIT_VALUE(parent, child, + allow_lockdiscovery); return newconf; } @@ -207,7 +212,7 @@ DAV_DECLARE(const char *) dav_get_provider_name(request_rec *r) return conf ? conf->provider_name : NULL; } -static const dav_provider *dav_get_provider(request_rec *r) +DAV_DECLARE(const dav_provider *) dav_get_provider(request_rec *r) { dav_dir_conf *conf; @@ -279,6 +284,18 @@ static const char *dav_cmd_dav(cmd_parms *cmd, void *config, const char *arg1) return NULL; } +/* + * Command handler for the DAVBasePath directive, which is TAKE1 + */ +static const char *dav_cmd_davbasepath(cmd_parms *cmd, void *config, const char *arg1) +{ + dav_dir_conf *conf = config; + + conf->base = arg1; + + return NULL; +} + /* * Command handler for the DAVDepthInfinity directive, which is FLAG. */ @@ -294,6 +311,21 @@ static const char *dav_cmd_davdepthinfinity(cmd_parms *cmd, void *config, return NULL; } +/* + * Command handler for the DAVLockDiscovery directive, which is FLAG. + */ +static const char *dav_cmd_davlockdiscovery(cmd_parms *cmd, void *config, + int arg) +{ + dav_dir_conf *conf = (dav_dir_conf *)config; + + if (arg) + conf->allow_lockdiscovery = DAV_ENABLED_ON; + else + conf->allow_lockdiscovery = DAV_ENABLED_OFF; + return NULL; +} + /* * Command handler for DAVMinTimeout directive, which is TAKE1 */ @@ -436,7 +468,7 @@ static const char *dav_xml_escape_uri(apr_pool_t *p, const char *uri) } -/* Write a complete RESPONSE object out as a xml +/* Write a complete RESPONSE object out as a xml element. Data is sent into brigade BB, which is auto-flushed into the output filter stack for request R. Use POOL for any temporary allocations. @@ -557,6 +589,7 @@ DAV_DECLARE(void) dav_send_multistatus(request_rec *r, int status, dav_begin_multistatus(bb, r, status, namespaces); apr_pool_create(&subpool, r->pool); + apr_pool_tag(subpool, "mod_dav-multistatus"); for (; first != NULL; first = first->next) { apr_pool_clear(subpool); @@ -662,8 +695,8 @@ static int dav_created(request_rec *r, const char *locn, const char *what, /* Apache doesn't allow us to set a variable body for HTTP_CREATED, so * we must manufacture the entire response. */ - body = apr_psprintf(r->pool, "%s %s has been created.", - what, ap_escape_html(r->pool, locn)); + body = apr_pstrcat(r->pool, what, " ", ap_escape_html(r->pool, locn), + " has been created.", NULL); return dav_error_response(r, HTTP_CREATED, body); } @@ -676,7 +709,7 @@ DAV_DECLARE(int) dav_get_depth(request_rec *r, int def_depth) return def_depth; } - if (strcasecmp(depth, "infinity") == 0) { + if (ap_cstr_casecmp(depth, "infinity") == 0) { return DAV_INFINITY; } else if (strcmp(depth, "0") == 0) { @@ -725,11 +758,11 @@ static int dav_get_overwrite(request_rec *r) * the resource identified by the DAV:checked-in property of the resource * identified by the Request-URI. */ -static dav_error *dav_get_resource(request_rec *r, int label_allowed, +DAV_DECLARE(dav_error *) dav_get_resource(request_rec *r, int label_allowed, int use_checked_in, dav_resource **res_p) { dav_dir_conf *conf; - const char *label = NULL; + const char *label = NULL, *base; dav_error *err; /* if the request target can be overridden, get any target selector */ @@ -746,11 +779,27 @@ static dav_error *dav_get_resource(request_rec *r, int label_allowed, ap_escape_html(r->pool, r->uri))); } + /* Take the repos root from DAVBasePath if configured, else the + * path of the enclosing section. */ + base = conf->base ? conf->base : conf->dir; + /* resolve the resource */ - err = (*conf->provider->repos->get_resource)(r, conf->dir, + err = (*conf->provider->repos->get_resource)(r, base, label, use_checked_in, res_p); if (err != NULL) { + /* In the error path, give a hint that DavBasePath needs to be + * used if the location was configured via a regex match. */ + if (!conf->base) { + core_dir_config *cdc = ap_get_core_module_config(r->per_dir_config); + + if (cdc->r) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, APLOGNO(10484) + "failed to find repository for location configured " + "via regex match - missing DAVBasePath?"); + } + } + err = dav_push_error(r->pool, err->status, 0, "Could not fetch resource information.", err); return err; @@ -774,7 +823,9 @@ static dav_error *dav_get_resource(request_rec *r, int label_allowed, return NULL; } -static dav_error * dav_open_lockdb(request_rec *r, int ro, dav_lockdb **lockdb) +DAV_DECLARE(dav_error *) dav_open_lockdb(request_rec *r, + int ro, + dav_lockdb **lockdb) { const dav_hooks_locks *hooks = DAV_GET_HOOKS_LOCKS(r); @@ -787,6 +838,11 @@ static dav_error * dav_open_lockdb(request_rec *r, int ro, dav_lockdb **lockdb) return (*hooks->open_lockdb)(r, ro, 0, lockdb); } +DAV_DECLARE(void) dav_close_lockdb(dav_lockdb *lockdb) +{ + (lockdb->hooks->close_lockdb)(lockdb); +} + /** * @return 1 if valid content-range, * 0 if no content-range, @@ -799,16 +855,15 @@ static int dav_parse_range(request_rec *r, char *range; char *dash; char *slash; - char *errp; range_c = apr_table_get(r->headers_in, "content-range"); if (range_c == NULL) return 0; range = apr_pstrdup(r->pool, range_c); - if (strncasecmp(range, "bytes ", 6) != 0 - || (dash = ap_strchr(range, '-')) == NULL - || (slash = ap_strchr(range, '/')) == NULL) { + if (ap_cstr_casecmpn(range, "bytes ", 6) != 0 + || (dash = ap_strchr(range + 6, '-')) == NULL + || (slash = ap_strchr(range + 6, '/')) == NULL) { /* malformed header */ return -1; } @@ -816,20 +871,19 @@ static int dav_parse_range(request_rec *r, *dash++ = *slash++ = '\0'; /* detect invalid ranges */ - if (apr_strtoff(range_start, range + 6, &errp, 10) - || *errp || *range_start < 0) { + if (!ap_parse_strict_length(range_start, range + 6)) { return -1; } - if (apr_strtoff(range_end, dash, &errp, 10) - || *errp || *range_end < 0 || *range_end < *range_start) { + if (!ap_parse_strict_length(range_end, dash) + || *range_end < *range_start) { return -1; } if (*slash != '*') { apr_off_t dummy; - if (apr_strtoff(&dummy, slash, &errp, 10) - || *errp || dummy <= *range_end) { + if (!ap_parse_strict_length(&dummy, slash) + || dummy <= *range_end) { return -1; } } @@ -854,6 +908,12 @@ static int dav_method_get(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -901,6 +961,12 @@ static int dav_method_post(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + /* Note: depth == 0. Implies no need for a multistatus response. */ if ((err = dav_validate_request(r, resource, 0, NULL, NULL, DAV_VALIDATE_RESOURCE, NULL)) != NULL) { @@ -934,6 +1000,12 @@ static int dav_method_put(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + /* If not a file or collection resource, PUT not allowed */ if (resource->type != DAV_RESOURCE_TYPE_REGULAR && resource->type != DAV_RESOURCE_TYPE_WORKING) { @@ -996,12 +1068,17 @@ static int dav_method_put(request_rec *r) /* Create the new file in the repository */ if ((err = (*resource->hooks->open_stream)(resource, mode, &stream)) != NULL) { - /* ### assuming FORBIDDEN is probably not quite right... */ - err = dav_push_error(r->pool, HTTP_FORBIDDEN, 0, - apr_psprintf(r->pool, - "Unable to PUT new contents for %s.", - ap_escape_html(r->pool, r->uri)), - err); + int status = err->status ? err->status : HTTP_FORBIDDEN; + if (status > 299) { + err = dav_push_error(r->pool, status, 0, + apr_psprintf(r->pool, + "Unable to PUT new contents for %s.", + ap_escape_html(r->pool, r->uri)), + err); + } + else { + err = NULL; + } } if (err == NULL && has_range) { @@ -1204,6 +1281,13 @@ static int dav_method_delete(request_rec *r) &resource); if (err != NULL) return dav_handle_err(r, err, NULL); + + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -1319,10 +1403,10 @@ static dav_error *dav_gen_supported_methods(request_rec *r, if (elts[i].key == NULL) continue; - s = apr_psprintf(r->pool, - "" - DEBUG_CR, - elts[i].key); + s = apr_pstrcat(r->pool, + "" DEBUG_CR, NULL); apr_text_append(r->pool, body, s); } } @@ -1348,10 +1432,9 @@ static dav_error *dav_gen_supported_methods(request_rec *r, /* see if method is supported */ if (apr_table_get(methods, name) != NULL) { - s = apr_psprintf(r->pool, - "" - DEBUG_CR, - name); + s = apr_pstrcat(r->pool, + "" DEBUG_CR, NULL); apr_text_append(r->pool, body, s); } } @@ -1375,8 +1458,7 @@ static dav_error *dav_gen_supported_live_props(request_rec *r, dav_error *err; /* open lock database, to report on supported lock properties */ - /* ### should open read-only */ - if ((err = dav_open_lockdb(r, 0, &lockdb)) != NULL) { + if ((err = dav_open_lockdb(r, 1, &lockdb)) != NULL) { return dav_push_error(r->pool, err->status, 0, "The lock database could not be opened, " "preventing the reporting of supported lock " @@ -1385,7 +1467,7 @@ static dav_error *dav_gen_supported_live_props(request_rec *r, } /* open the property database (readonly) for the resource */ - if ((err = dav_open_propdb(r, lockdb, resource, 1, NULL, + if ((err = dav_open_propdb(r, lockdb, resource, DAV_PROPDB_RO, NULL, &propdb)) != NULL) { if (lockdb != NULL) (*lockdb->hooks->close_lockdb)(lockdb); @@ -1451,89 +1533,95 @@ static dav_error *dav_gen_supported_live_props(request_rec *r, return err; } + /* generate DAV:supported-report-set OPTIONS response */ static dav_error *dav_gen_supported_reports(request_rec *r, const dav_resource *resource, const apr_xml_elem *elem, - const dav_hooks_vsn *vsn_hooks, apr_text_header *body) { apr_xml_elem *child; apr_xml_attr *attr; - dav_error *err; + dav_error *err = NULL; char *s; + apr_array_header_t *reports; + const dav_report_elem *rp; apr_text_append(r->pool, body, "" DEBUG_CR); - if (vsn_hooks != NULL) { - const dav_report_elem *reports; - const dav_report_elem *rp; + reports = apr_array_make(r->pool, 5, sizeof(const char *)); + dav_run_gather_reports(r, resource, reports, &err); + if (err != NULL) { + return dav_push_error(r->pool, err->status, 0, + "DAV:supported-report-set could not be " + "determined due to a problem fetching the " + "available reports for this resource.", + err); + } - if ((err = (*vsn_hooks->avail_reports)(resource, &reports)) != NULL) { - return dav_push_error(r->pool, err->status, 0, - "DAV:supported-report-set could not be " - "determined due to a problem fetching the " - "available reports for this resource.", - err); + if (elem->first_child == NULL) { + int i; + + /* show all supported reports */ + rp = (const dav_report_elem *)reports->elts; + for (i = 0; i < reports->nelts; i++, rp++) { + /* Note: we presume reports->namespace is + * properly XML/URL quoted */ + s = apr_pstrcat(r->pool, + "name, + "\" D:namespace=\"", + rp->nmspace, + "\"/>" DEBUG_CR, NULL); + apr_text_append(r->pool, body, s); } + } + else { + /* check for support of specific report */ + for (child = elem->first_child; child != NULL; child = child->next) { + if (child->ns == APR_XML_NS_DAV_ID + && strcmp(child->name, "supported-report") == 0) { + const char *name = NULL; + const char *nmspace = NULL; + int i; - if (reports != NULL) { - if (elem->first_child == NULL) { - /* show all supported reports */ - for (rp = reports; rp->nmspace != NULL; ++rp) { - /* Note: we presume reports->namespace is - * properly XML/URL quoted */ - s = apr_psprintf(r->pool, - "" DEBUG_CR, - rp->name, rp->nmspace); - apr_text_append(r->pool, body, s); + /* go through attributes to find name and namespace */ + for (attr = child->attr; attr != NULL; attr = attr->next) { + if (attr->ns == APR_XML_NS_DAV_ID) { + if (strcmp(attr->name, "name") == 0) + name = attr->value; + else if (strcmp(attr->name, "namespace") == 0) + nmspace = attr->value; + } } - } - else { - /* check for support of specific report */ - for (child = elem->first_child; child != NULL; child = child->next) { - if (child->ns == APR_XML_NS_DAV_ID - && strcmp(child->name, "supported-report") == 0) { - const char *name = NULL; - const char *nmspace = NULL; - - /* go through attributes to find name and namespace */ - for (attr = child->attr; attr != NULL; attr = attr->next) { - if (attr->ns == APR_XML_NS_DAV_ID) { - if (strcmp(attr->name, "name") == 0) - name = attr->value; - else if (strcmp(attr->name, "namespace") == 0) - nmspace = attr->value; - } - } - - if (name == NULL) { - return dav_new_error(r->pool, HTTP_BAD_REQUEST, 0, 0, - "A DAV:supported-report element " - "does not have a \"name\" attribute"); - } - - /* default namespace to DAV: */ - if (nmspace == NULL) - nmspace = "DAV:"; - - for (rp = reports; rp->nmspace != NULL; ++rp) { - if (strcmp(name, rp->name) == 0 - && strcmp(nmspace, rp->nmspace) == 0) { - /* Note: we presume reports->nmspace is - * properly XML/URL quoted - */ - s = apr_psprintf(r->pool, - "" - DEBUG_CR, - rp->name, rp->nmspace); - apr_text_append(r->pool, body, s); - break; - } - } + + if (name == NULL) { + return dav_new_error(r->pool, HTTP_BAD_REQUEST, 0, 0, + "A DAV:supported-report element " + "does not have a \"name\" attribute"); + } + + /* default namespace to DAV: */ + if (nmspace == NULL) { + nmspace = "DAV:"; + } + + rp = (const dav_report_elem *)reports->elts; + for (i = 0; i < reports->nelts; i++, rp++) { + if (strcmp(name, rp->name) == 0 + && strcmp(nmspace, rp->nmspace) == 0) { + /* Note: we presume reports->nmspace is + * properly XML/URL quoted + */ + s = apr_pstrcat(r->pool, + "name, + "\" D:namespace=\"", + rp->nmspace, + "\"/>" DEBUG_CR, NULL); + apr_text_append(r->pool, body, s); + break; } } } @@ -1640,6 +1728,12 @@ static int dav_method_options(request_rec *r) } /* note: doc == NULL if no request body */ + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (doc && !dav_validate_root(doc, "options")) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00584) "The \"options\" element was not found."); @@ -1903,7 +1997,7 @@ static int dav_method_options(request_rec *r) core_option = 1; } else if (strcmp(elem->name, "supported-report-set") == 0) { - err = dav_gen_supported_reports(r, resource, elem, vsn_hooks, &body); + err = dav_gen_supported_reports(r, resource, elem, &body); core_option = 1; } } @@ -1968,10 +2062,23 @@ static void dav_cache_badprops(dav_walker_ctx *ctx) static dav_error * dav_propfind_walker(dav_walk_resource *wres, int calltype) { dav_walker_ctx *ctx = wres->walk_ctx; + dav_dir_conf *conf; + int flags = DAV_PROPDB_RO; dav_error *err; dav_propdb *propdb; dav_get_props_result propstats = { 0 }; + /* check for any method preconditions */ + if (dav_run_method_precondition(ctx->r, NULL, wres->resource, ctx->doc, &err) != DECLINED + && err) { + apr_pool_clear(ctx->scratchpool); + return NULL; + } + + conf = ap_get_module_config(ctx->r->per_dir_config, &dav_module); + if (conf && conf->allow_lockdiscovery == DAV_ENABLED_OFF) + flags |= DAV_PROPDB_DISABLE_LOCKDISCOVERY; + /* ** Note: ctx->doc can only be NULL for DAV_PROPFIND_IS_ALLPROP. Since ** dav_get_allprops() does not need to do namespace translation, @@ -1980,8 +2087,9 @@ static dav_error * dav_propfind_walker(dav_walk_resource *wres, int calltype) ** Note: we cast to lose the "const". The propdb won't try to change ** the resource, however, since we are opening readonly. */ - err = dav_open_propdb(ctx->r, ctx->w.lockdb, wres->resource, 1, - ctx->doc ? ctx->doc->namespaces : NULL, &propdb); + err = dav_popen_propdb(ctx->scratchpool, + ctx->r, ctx->w.lockdb, wres->resource, flags, + ctx->doc ? ctx->doc->namespaces : NULL, &propdb); if (err != NULL) { /* ### do something with err! */ @@ -2012,10 +2120,10 @@ static dav_error * dav_propfind_walker(dav_walk_resource *wres, int calltype) : DAV_PROP_INSERT_NAME; propstats = dav_get_allprops(propdb, what); } - dav_close_propdb(propdb); - dav_stream_response(wres, 0, &propstats, ctx->scratchpool); + dav_close_propdb(propdb); + /* at this point, ctx->scratchpool has been used to stream a single response. this function fully controls the pool, and thus has the right to clear it for the next iteration of this @@ -2042,6 +2150,17 @@ static int dav_method_propfind(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + if ((result = ap_xml_parse_input(r, &doc)) != OK) { + return result; + } + /* note: doc == NULL if no request body */ + + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (dav_get_resource_state(r, resource) == DAV_RESOURCE_NULL) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -2069,11 +2188,6 @@ static int dav_method_propfind(request_rec *r) } } - if ((result = ap_xml_parse_input(r, &doc)) != OK) { - return result; - } - /* note: doc == NULL if no request body */ - if (doc && !dav_validate_root(doc, "propfind")) { /* This supplies additional information for the default message. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00585) @@ -2103,7 +2217,7 @@ static int dav_method_propfind(request_rec *r) return HTTP_BAD_REQUEST; } - ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_AUTH; + ctx.w.walk_type = DAV_WALKTYPE_NORMAL | DAV_WALKTYPE_AUTH | DAV_WALKTYPE_TOLERANT; ctx.w.func = dav_propfind_walker; ctx.w.walk_ctx = &ctx; ctx.w.pool = r->pool; @@ -2113,9 +2227,9 @@ static int dav_method_propfind(request_rec *r) ctx.r = r; ctx.bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); apr_pool_create(&ctx.scratchpool, r->pool); + apr_pool_tag(ctx.scratchpool, "mod_dav-scratch"); - /* ### should open read-only */ - if ((err = dav_open_lockdb(r, 0, &ctx.w.lockdb)) != NULL) { + if ((err = dav_open_lockdb(r, 1, &ctx.w.lockdb)) != NULL) { err = dav_push_error(r->pool, err->status, 0, "The lock database could not be opened, " "preventing access to the various lock " @@ -2318,16 +2432,23 @@ static int dav_method_proppatch(request_rec *r) &resource); if (err != NULL) return dav_handle_err(r, err, NULL); - if (!resource->exists) { - /* Apache will supply a default error for this. */ - return HTTP_NOT_FOUND; - } if ((result = ap_xml_parse_input(r, &doc)) != OK) { return result; } /* note: doc == NULL if no request body */ + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + + if (!resource->exists) { + /* Apache will supply a default error for this. */ + return HTTP_NOT_FOUND; + } + if (doc == NULL || !dav_validate_root(doc, "propertyupdate")) { /* This supplies additional information for the default message. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00587) @@ -2352,7 +2473,8 @@ static int dav_method_proppatch(request_rec *r) return dav_handle_err(r, err, NULL); } - if ((err = dav_open_propdb(r, NULL, resource, 0, doc->namespaces, + if ((err = dav_open_propdb(r, NULL, resource, + DAV_PROPDB_NONE, doc->namespaces, &propdb)) != NULL) { /* undo any auto-checkout */ dav_auto_checkin(r, resource, 1 /*undo*/, 0 /*unlock*/, &av_info); @@ -2470,7 +2592,7 @@ static int process_mkcol_body(request_rec *r) r->remaining = 0; if (tenc) { - if (strcasecmp(tenc, "chunked")) { + if (ap_cstr_casecmp(tenc, "chunked")) { /* Use this instead of Apache's default error string */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00589) "Unknown Transfer-Encoding %s", tenc); @@ -2480,20 +2602,13 @@ static int process_mkcol_body(request_rec *r) r->read_chunked = 1; } else if (lenp) { - const char *pos = lenp; - - while (apr_isdigit(*pos) || apr_isspace(*pos)) { - ++pos; - } - - if (*pos != '\0') { + if (!ap_parse_strict_length(&r->remaining, lenp)) { + r->remaining = 0; /* This supplies additional information for the default message. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00590) "Invalid Content-Length %s", lenp); return HTTP_BAD_REQUEST; } - - r->remaining = apr_atoi64(lenp); } if (r->read_chunked || r->remaining > 0) { @@ -2534,6 +2649,12 @@ static int dav_method_mkcol(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (resource->exists) { /* oops. something was already there! */ @@ -2654,6 +2775,12 @@ static int dav_method_copymove(request_rec *r, int is_move) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -2676,7 +2803,7 @@ static int dav_method_copymove(request_rec *r, int is_move) const char *nscp_path = apr_table_get(r->headers_in, "New-uri"); if (nscp_host != NULL && nscp_path != NULL) - dest = apr_psprintf(r->pool, "http://%s%s", nscp_host, nscp_path); + dest = apr_pstrcat(r->pool, "http://", nscp_host, nscp_path, NULL); } if (dest == NULL) { /* This supplies additional information for the default message. */ @@ -2720,6 +2847,12 @@ static int dav_method_copymove(request_rec *r, int is_move) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, resnew, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + /* are the two resources handled by the same repository? */ if (resource->hooks != resnew->hooks) { /* ### this message exposes some backend config, but screw it... */ @@ -3071,6 +3204,12 @@ static int dav_method_lock(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + /* Check if parent collection exists */ if ((err = resource->hooks->get_parent_resource(resource, &parent)) != NULL) { /* ### add a higher-level description? */ @@ -3270,6 +3409,12 @@ static int dav_method_unlock(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + resource_state = dav_get_resource_state(r, resource); /* @@ -3294,8 +3439,8 @@ static int dav_method_unlock(request_rec *r) /* ### RFC 2518 s. 8.11: If this resource is locked by locktoken, * _all_ resources locked by locktoken are released. It does not say - * resource has to be the root of an infinte lock. Thus, an UNLOCK - * on any part of an infinte lock will remove the lock on all resources. + * resource has to be the root of an infinite lock. Thus, an UNLOCK + * on any part of an infinite lock will remove the lock on all resources. * * For us, if r->filename represents an indirect lock (part of an infinity lock), * we must actually perform an UNLOCK on the direct lock for this resource. @@ -3329,15 +3474,21 @@ static int dav_method_vsn_control(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); - /* remember the pre-creation resource state */ - resource_state = dav_get_resource_state(r, resource); - /* parse the request body (may be a version-control element) */ if ((result = ap_xml_parse_input(r, &doc)) != OK) { return result; } /* note: doc == NULL if no request body */ + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + + /* remember the pre-creation resource state */ + resource_state = dav_get_resource_state(r, resource); + if (doc != NULL) { const apr_xml_elem *child; apr_size_t tsize; @@ -3582,6 +3733,12 @@ static int dav_method_checkout(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -3658,6 +3815,12 @@ static int dav_method_uncheckout(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -3735,6 +3898,12 @@ static int dav_method_checkin(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -3856,6 +4025,12 @@ static int dav_method_update(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -3930,6 +4105,9 @@ typedef struct dav_label_walker_ctx /* input: */ dav_walk_params w; + /* original request */ + request_rec *r; + /* label being manipulated */ const char *label; @@ -3949,13 +4127,19 @@ static dav_error * dav_label_walker(dav_walk_resource *wres, int calltype) dav_label_walker_ctx *ctx = wres->walk_ctx; dav_error *err = NULL; + /* check for any method preconditions */ + if (dav_run_method_precondition(ctx->r, NULL, wres->resource, NULL, &err) != DECLINED + && err) { + /* precondition failed, dropping through */ + } + /* Check the state of the resource: must be a version or * non-checkedout version selector */ /* ### need a general mechanism for reporting precondition violations * ### (should be returning XML document for 403/409 responses) */ - if (wres->resource->type != DAV_RESOURCE_TYPE_VERSION && + else if (wres->resource->type != DAV_RESOURCE_TYPE_VERSION && (wres->resource->type != DAV_RESOURCE_TYPE_REGULAR || !wres->resource->versioned)) { err = dav_new_error(ctx->w.pool, HTTP_CONFLICT, 0, 0, @@ -4001,11 +4185,23 @@ static int dav_method_label(request_rec *r) if (vsn_hooks == NULL || vsn_hooks->add_label == NULL) return DECLINED; + /* parse the request body */ + if ((result = ap_xml_parse_input(r, &doc)) != OK) { + return result; + } + /* Ask repository module to resolve the resource */ err = dav_get_resource(r, 1 /* label_allowed */, 0 /* use_checked_in */, &resource); if (err != NULL) return dav_handle_err(r, err, NULL); + + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -4017,11 +4213,6 @@ static int dav_method_label(request_rec *r) return HTTP_BAD_REQUEST; } - /* parse the request body */ - if ((result = ap_xml_parse_input(r, &doc)) != OK) { - return result; - } - if (doc == NULL || !dav_validate_root(doc, "label")) { /* This supplies additional information for the default message. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00610) @@ -4070,6 +4261,7 @@ static int dav_method_label(request_rec *r) ctx.w.walk_ctx = &ctx; ctx.w.pool = r->pool; ctx.w.root = resource; + ctx.r = r; ctx.vsn_hooks = vsn_hooks; err = (*resource->hooks->walk)(&ctx.w, depth, &multi_status); @@ -4109,21 +4301,60 @@ static int dav_method_label(request_rec *r) return DONE; } +static int dav_core_deliver_report(request_rec *r, + const dav_resource *resource, + const apr_xml_doc *doc, + ap_filter_t *output, dav_error **err) +{ + const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r); + + if (vsn_hooks) { + *err = (*vsn_hooks->deliver_report)(r, resource, doc, + r->output_filters); + return OK; + } + + return DECLINED; +} + +static void dav_core_gather_reports( + request_rec *r, + const dav_resource *resource, + apr_array_header_t *reports, + dav_error **err) +{ + const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r); + + if (vsn_hooks) { + const dav_report_elem *rp; + + (*err) = (*vsn_hooks->avail_reports)(resource, &rp); + while (rp && rp->name) { + + dav_report_elem *report = apr_array_push(reports); + + report->nmspace = rp->nmspace; + report->name = rp->name; + + rp++; + } + } + +} + static int dav_method_report(request_rec *r) { dav_resource *resource; const dav_hooks_vsn *vsn_hooks = DAV_GET_HOOKS_VSN(r); - int result; - int label_allowed; apr_xml_doc *doc; - dav_error *err; + dav_error *err = NULL; - /* If no versioning provider, decline the request */ - if (vsn_hooks == NULL) - return DECLINED; + int result; + int label_allowed; - if ((result = ap_xml_parse_input(r, &doc)) != OK) + if ((result = ap_xml_parse_input(r, &doc)) != OK) { return result; + } if (doc == NULL) { /* This supplies additional information for the default msg. */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00614) @@ -4135,11 +4366,18 @@ static int dav_method_report(request_rec *r) * First determine whether a Target-Selector header is allowed * for this report. */ - label_allowed = (*vsn_hooks->report_label_header_allowed)(doc); + label_allowed = vsn_hooks ? (*vsn_hooks->report_label_header_allowed)(doc) : 0; err = dav_get_resource(r, label_allowed, 0 /* use_checked_in */, &resource); - if (err != NULL) + if (err != NULL) { + return dav_handle_err(r, err, NULL); + } + + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { return dav_handle_err(r, err, NULL); + } if (!resource->exists) { /* Apache will supply a default error for this. */ @@ -4149,13 +4387,17 @@ static int dav_method_report(request_rec *r) /* set up defaults for the report response */ r->status = HTTP_OK; ap_set_content_type(r, DAV_XML_CONTENT_TYPE); + err = NULL; /* run report hook */ - if ((err = (*vsn_hooks->deliver_report)(r, resource, doc, - r->output_filters)) != NULL) { - if (! r->sent_bodyct) + result = dav_run_deliver_report(r, resource, doc, + r->output_filters, &err); + if (err != NULL) { + + if (! r->sent_bodyct) { /* No data has been sent to client yet; throw normal error. */ return dav_handle_err(r, err, NULL); + } /* If an error occurred during the report delivery, there's basically nothing we can do but abort the connection and @@ -4168,6 +4410,16 @@ static int dav_method_report(request_rec *r) " a REPORT response.", err); dav_log_err(r, err, APLOG_ERR); r->connection->aborted = 1; + + return DONE; + } + switch (result) { + case OK: + return DONE; + case DECLINED: + /* No one handled the report */ + return HTTP_NOT_IMPLEMENTED; + default: return DONE; } @@ -4199,6 +4451,12 @@ static int dav_method_make_workspace(request_rec *r) return result; } + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (doc == NULL || !dav_validate_root(doc, "mkworkspace")) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00615) @@ -4258,6 +4516,12 @@ static int dav_method_make_activity(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + /* MKACTIVITY does not have a defined request body. */ if ((result = ap_discard_request_body(r)) != OK) { return result; @@ -4383,6 +4647,12 @@ static int dav_method_merge(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, source_resource, NULL, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + no_auto_merge = dav_find_child(doc->root, "no-auto-merge") != NULL; no_checkout = dav_find_child(doc->root, "no-checkout") != NULL; @@ -4400,6 +4670,13 @@ static int dav_method_merge(request_rec *r) &resource); if (err != NULL) return dav_handle_err(r, err, NULL); + + /* check for any method preconditions */ + if (dav_run_method_precondition(r, source_resource, resource, doc, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -4467,6 +4744,12 @@ static int dav_method_bind(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, NULL, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + if (!resource->exists) { /* Apache will supply a default error for this. */ return HTTP_NOT_FOUND; @@ -4517,6 +4800,12 @@ static int dav_method_bind(request_rec *r) if (err != NULL) return dav_handle_err(r, err, NULL); + /* check for any method preconditions */ + if (dav_run_method_precondition(r, resource, binding, NULL, &err) != DECLINED + && err) { + return dav_handle_err(r, err, NULL); + } + /* are the two resources handled by the same repository? */ if (resource->hooks != binding->hooks) { /* ### this message exposes some backend config, but screw it... */ @@ -4886,6 +5175,11 @@ static void register_hooks(apr_pool_t *p) dav_hook_insert_all_liveprops(dav_core_insert_all_liveprops, NULL, NULL, APR_HOOK_MIDDLE); + dav_hook_deliver_report(dav_core_deliver_report, + NULL, NULL, APR_HOOK_LAST); + dav_hook_gather_reports(dav_core_gather_reports, + NULL, NULL, APR_HOOK_LAST); + dav_core_register_uris(p); } @@ -4900,6 +5194,10 @@ static const command_rec dav_cmds[] = AP_INIT_TAKE1("DAV", dav_cmd_dav, NULL, ACCESS_CONF, "specify the DAV provider for a directory or location"), + /* per directory/location */ + AP_INIT_TAKE1("DAVBasePath", dav_cmd_davbasepath, NULL, ACCESS_CONF, + "specify the DAV repository base URL"), + /* per directory/location, or per server */ AP_INIT_TAKE1("DAVMinTimeout", dav_cmd_davmintimeout, NULL, ACCESS_CONF|RSRC_CONF, @@ -4910,6 +5208,11 @@ static const command_rec dav_cmds[] = ACCESS_CONF|RSRC_CONF, "allow Depth infinity PROPFIND requests"), + /* per directory/location, or per server */ + AP_INIT_FLAG("DAVLockDiscovery", dav_cmd_davlockdiscovery, NULL, + ACCESS_CONF|RSRC_CONF, + "allow lock discovery by PROPFIND requests"), + { NULL } }; @@ -4928,6 +5231,9 @@ APR_HOOK_STRUCT( APR_HOOK_LINK(gather_propsets) APR_HOOK_LINK(find_liveprop) APR_HOOK_LINK(insert_all_liveprops) + APR_HOOK_LINK(deliver_report) + APR_HOOK_LINK(gather_reports) + APR_HOOK_LINK(method_precondition) ) APR_IMPLEMENT_EXTERNAL_HOOK_VOID(dav, DAV, gather_propsets, @@ -4944,3 +5250,22 @@ APR_IMPLEMENT_EXTERNAL_HOOK_VOID(dav, DAV, insert_all_liveprops, (request_rec *r, const dav_resource *resource, dav_prop_insert what, apr_text_header *phdr), (r, resource, what, phdr)) + +APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(dav, DAV, int, deliver_report, + (request_rec *r, + const dav_resource *resource, + const apr_xml_doc *doc, + ap_filter_t *output, dav_error **err), + (r, resource, doc, output, err), DECLINED) + +APR_IMPLEMENT_EXTERNAL_HOOK_VOID(dav, DAV, gather_reports, + (request_rec *r, const dav_resource *resource, + apr_array_header_t *reports, dav_error **err), + (r, resource, reports, err)) + +APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(dav, DAV, int, method_precondition, + (request_rec *r, + dav_resource *src, const dav_resource *dest, + const apr_xml_doc *doc, + dav_error **err), + (r, src, dest, doc, err), DECLINED) diff --git a/modules/dav/main/mod_dav.h b/modules/dav/main/mod_dav.h index 80ad117..c8c54f3 100644 --- a/modules/dav/main/mod_dav.h +++ b/modules/dav/main/mod_dav.h @@ -50,7 +50,7 @@ extern "C" { #define DAV_READ_BLOCKSIZE 2048 /* used for reading input blocks */ -#define DAV_RESPONSE_BODY_1 "\n\n" +#define DAV_RESPONSE_BODY_1 "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">\n<html>\n<head>\n<title>" #define DAV_RESPONSE_BODY_2 "\n\n

" #define DAV_RESPONSE_BODY_3 "

\n

" #define DAV_RESPONSE_BODY_4 "

\n" @@ -427,6 +427,9 @@ typedef struct dav_resource { */ typedef struct dav_locktoken dav_locktoken; +DAV_DECLARE(dav_error *) dav_get_resource(request_rec *r, int label_allowed, + int use_checked_in, dav_resource **res_p); + /* -------------------------------------------------------------------- ** @@ -574,8 +577,22 @@ DAV_DECLARE(int) dav_get_depth(request_rec *r, int def_depth); DAV_DECLARE(int) dav_validate_root(const apr_xml_doc *doc, const char *tagname); +DAV_DECLARE(int) dav_validate_root_ns(const apr_xml_doc *doc, + int ns, const char *tagname); DAV_DECLARE(apr_xml_elem *) dav_find_child(const apr_xml_elem *elem, const char *tagname); +DAV_DECLARE(apr_xml_elem *) dav_find_child_ns(const apr_xml_elem *elem, + int ns, const char *tagname); +DAV_DECLARE(apr_xml_elem *) dav_find_next_ns(const apr_xml_elem *elem, + int ns, const char *tagname); + +/* find and return the attribute with a name in the given namespace */ +DAV_DECLARE(apr_xml_attr *) dav_find_attr_ns(const apr_xml_elem *elem, + int ns, const char *attrname); + +/* find and return the attribute with a given DAV: tagname */ +DAV_DECLARE(apr_xml_attr *) dav_find_attr(const apr_xml_elem *elem, + const char *attrname); /* gather up all the CDATA into a single string */ DAV_DECLARE(const char *) dav_xml_get_cdata(const apr_xml_elem *elem, apr_pool_t *pool, @@ -644,10 +661,10 @@ DAV_DECLARE(void) dav_xmlns_generate(dav_xmlns_info *xi, ** mod_dav 1.0). There are too many dependencies between a dav_resource ** (defined by ) and the other functionality. ** -** Live properties are not part of the dav_provider structure because they -** are handled through the APR_HOOK interface (to allow for multiple liveprop -** providers). The core always provides some properties, and then a given -** provider will add more properties. +** Live properties and report extensions are not part of the dav_provider +** structure because they are handled through the APR_HOOK interface (to +** allow for multiple providers). The core always provides some +** properties, and then a given provider will add more properties. ** ** Some providers may need to associate a context with the dav_provider ** structure -- the ctx field is available for storing this context. Just @@ -711,6 +728,68 @@ APR_DECLARE_EXTERNAL_HOOK(dav, DAV, void, insert_all_liveprops, (request_rec *r, const dav_resource *resource, dav_prop_insert what, apr_text_header *phdr)) +/* +** deliver_report: given a parsed report request, process the request +** an deliver the resulting report. +** +** The hook implementer should decide whether it should handle the given +** report, and if so, write the response to the output filter. If the +** report is not relevant, return DECLINED. +*/ +APR_DECLARE_EXTERNAL_HOOK(dav, DAV, int, deliver_report, + (request_rec *r, + const dav_resource *resource, + const apr_xml_doc *doc, + ap_filter_t *output, dav_error **err)) + +/* +** gather_reports: get all reports. +** +** The hook implementor should push one or more dav_report_elem structures +** containing report names into the specified array. These names are returned +** in the DAV:supported-reports-set property to let clients know +** what reports are supported by the installation. +** +*/ +APR_DECLARE_EXTERNAL_HOOK(dav, DAV, void, gather_reports, + (request_rec *r, const dav_resource *resource, + apr_array_header_t *reports, dav_error **err)) + +/* + ** method_precondition: check method preconditions. + ** + ** If a WebDAV extension needs to set any preconditions on a method, this + ** hook is where to do it. If the precondition fails, return an error + ** response with the tagname set to the value of the failed precondition. + ** + ** If the method requires an XML body, this will be read and provided as + ** the doc value. If not, doc is NULL. An extension that needs to verify + ** the non-XML body of a request should register an input filter to do so + ** within this hook. + ** + ** Methods like PUT will supply a single src resource, and the dst will + ** be NULL. + ** + ** Methods like COPY or MOVE will trigger this hook twice. The first + ** invocation will supply just the source resource. The second invocation + ** will supply a source and destination. This allows preconditions on the + ** source resource to be verified before making an attempt to get the + ** destination resource. + ** + ** Methods like PROPFIND and LABEL will trigger this hook initially for + ** the src resource, and then subsequently for each resource that has + ** been walked during processing, with the walked resource passed in dst, + ** and NULL passed in src. + ** + ** As a rule, the src resource originates from a request that has passed + ** through httpd's authn/authz hooks, while the dst resource has not. + */ +APR_DECLARE_EXTERNAL_HOOK(dav, DAV, int, method_precondition, + (request_rec *r, + dav_resource *src, const dav_resource *dst, + const apr_xml_doc *doc, dav_error **err)) + + DAV_DECLARE(const dav_hooks_locks *) dav_get_lock_hooks(request_rec *r); DAV_DECLARE(const dav_hooks_propdb *) dav_get_propdb_hooks(request_rec *r); DAV_DECLARE(const dav_hooks_vsn *) dav_get_vsn_hooks(request_rec *r); @@ -721,6 +800,7 @@ DAV_DECLARE(void) dav_register_provider(apr_pool_t *p, const char *name, const dav_provider *hooks); DAV_DECLARE(const dav_provider *) dav_lookup_provider(const char *name); DAV_DECLARE(const char *) dav_get_provider_name(request_rec *r); +DAV_DECLARE(const dav_provider *) dav_get_provider(request_rec *r); /* ### deprecated */ @@ -827,6 +907,14 @@ struct dav_hooks_liveprop ** property, and does not want it handled as a dead property, it should ** return DAV_PROP_INSERT_NOTSUPP. ** + ** Some DAV extensions, like CalDAV, specify both document elements + ** and property elements that need to be taken into account when + ** generating a property. The document element and property element + ** are made available in the dav_liveprop_elem structure under the + ** resource, accessible as follows: + ** + ** dav_get_liveprop_element(resource); + ** ** Returns one of DAV_PROP_INSERT_* based on what happened. ** ** ### we may need more context... ie. the lock database @@ -974,6 +1062,18 @@ DAV_DECLARE(long) dav_get_liveprop_ns_count(void); DAV_DECLARE(void) dav_add_all_liveprop_xmlns(apr_pool_t *p, apr_text_header *phdr); +typedef struct { + const apr_xml_doc *doc; + const apr_xml_elem *elem; +} dav_liveprop_elem; + +/* + ** When calling insert_prop(), the associated request element and + ** document is accessible using the following call. + */ +DAV_DECLARE(dav_liveprop_elem *) dav_get_liveprop_element(const dav_resource + *resource); + /* ** The following three functions are part of mod_dav's internal handling ** for the core WebDAV properties. They are not part of mod_dav's API. @@ -1314,8 +1414,12 @@ DAV_DECLARE(const char *)dav_lock_get_activelock(request_rec *r, dav_buffer *pbuf); /* LockDB-related public lock functions */ +DAV_DECLARE(dav_error *) dav_open_lockdb(request_rec *r, + int ro, + dav_lockdb **lockdb); +DAV_DECLARE(void) dav_close_lockdb(dav_lockdb *lockdb); DAV_DECLARE(dav_error *) dav_lock_parse_lockinfo(request_rec *r, - const dav_resource *resrouce, + const dav_resource *resource, dav_lockdb *lockdb, const apr_xml_doc *doc, dav_lock **lock_request); @@ -1581,15 +1685,28 @@ struct dav_hooks_locks typedef struct dav_propdb dav_propdb; +#define DAV_PROPDB_NONE 0 +#define DAV_PROPDB_RO 1 +#define DAV_PROPDB_DISABLE_LOCKDISCOVERY 2 DAV_DECLARE(dav_error *) dav_open_propdb( request_rec *r, dav_lockdb *lockdb, const dav_resource *resource, - int ro, + int flags, apr_array_header_t *ns_xlate, dav_propdb **propdb); +DAV_DECLARE(dav_error *) dav_popen_propdb( + apr_pool_t *p, + request_rec *r, + dav_lockdb *lockdb, + const dav_resource *resource, + int flags, + apr_array_header_t *ns_xlate, + dav_propdb **propdb); + + DAV_DECLARE(void) dav_close_propdb(dav_propdb *db); DAV_DECLARE(dav_get_props_result) dav_get_props( @@ -1706,6 +1823,7 @@ typedef struct #define DAV_WALKTYPE_AUTH 0x0001 /* limit to authorized files */ #define DAV_WALKTYPE_NORMAL 0x0002 /* walk normal files */ #define DAV_WALKTYPE_LOCKNULL 0x0004 /* walk locknull resources */ +#define DAV_WALKTYPE_TOLERANT 0x0008 /* tolerate non-fatal errors */ /* callback function and a client context for the walk */ dav_error * (*func)(dav_walk_resource *wres, int calltype); diff --git a/modules/dav/main/props.c b/modules/dav/main/props.c index f64878e..c320f8a 100644 --- a/modules/dav/main/props.c +++ b/modules/dav/main/props.c @@ -167,6 +167,8 @@ #define DAV_EMPTY_VALUE "\0" /* TWO null terms */ +#define DAV_PROP_ELEMENT "mod_dav-element" + struct dav_propdb { apr_pool_t *p; /* the pool we should use */ request_rec *r; /* the request record */ @@ -183,6 +185,8 @@ struct dav_propdb { dav_buffer wb_lock; /* work buffer for lockdiscovery property */ + int flags; /* ro, disable lock discovery */ + /* if we ever run a GET subreq, it will be stored here */ request_rec *subreq; @@ -323,7 +327,7 @@ static void dav_do_prop_subreq(dav_propdb *propdb) { /* need to escape the uri that's in the resource struct because during * the property walker it's not encoded. */ - const char *e_uri = ap_escape_uri(propdb->resource->pool, + const char *e_uri = ap_escape_uri(propdb->p, propdb->resource->uri); /* perform a "GET" on the resource's URI (note that the resource @@ -349,6 +353,11 @@ static dav_error * dav_insert_coreprop(dav_propdb *propdb, switch (propid) { case DAV_PROPID_CORE_lockdiscovery: + if (propdb->flags & DAV_PROPDB_DISABLE_LOCKDISCOVERY) { + value = ""; + break; + } + if (propdb->lockdb != NULL) { dav_lock *locks; @@ -421,18 +430,18 @@ static dav_error * dav_insert_coreprop(dav_propdb *propdb, /* use D: prefix to refer to the DAV: namespace URI, * and let the namespace attribute default to "DAV:" */ - s = apr_psprintf(propdb->p, - "" DEBUG_CR, - name); + s = apr_pstrcat(propdb->p, + "" DEBUG_CR, NULL); } else if (what == DAV_PROP_INSERT_VALUE && *value != '\0') { /* use D: prefix to refer to the DAV: namespace URI */ - s = apr_psprintf(propdb->p, "%s" DEBUG_CR, - name, value, name); + s = apr_pstrcat(propdb->p, "", value, "" DEBUG_CR, NULL); } else { /* use D: prefix to refer to the DAV: namespace URI */ - s = apr_psprintf(propdb->p, "" DEBUG_CR, name); + s = apr_pstrcat(propdb->p, "" DEBUG_CR, NULL); } apr_text_append(propdb->p, phdr, s); @@ -473,11 +482,11 @@ static void dav_output_prop_name(apr_pool_t *pool, const char *s; if (*name->ns == '\0') - s = apr_psprintf(pool, "<%s/>" DEBUG_CR, name->name); + s = apr_pstrcat(pool, "<", name->name, "/>" DEBUG_CR, NULL); else { const char *prefix = dav_xmlns_add_uri(xi, name->ns); - s = apr_psprintf(pool, "<%s:%s/>" DEBUG_CR, prefix, name->name); + s = apr_pstrcat(pool, "<", prefix, ":", name->name, "/>" DEBUG_CR, NULL); } apr_text_append(pool, phdr, s); @@ -520,11 +529,25 @@ static dav_error *dav_really_open_db(dav_propdb *propdb, int ro) DAV_DECLARE(dav_error *)dav_open_propdb(request_rec *r, dav_lockdb *lockdb, const dav_resource *resource, - int ro, + int flags, apr_array_header_t * ns_xlate, dav_propdb **p_propdb) { - dav_propdb *propdb = apr_pcalloc(r->pool, sizeof(*propdb)); + return dav_popen_propdb(r->pool, r, lockdb, resource, + flags, ns_xlate, p_propdb); +} + +DAV_DECLARE(dav_error *)dav_popen_propdb(apr_pool_t *p, + request_rec *r, dav_lockdb *lockdb, + const dav_resource *resource, + int flags, + apr_array_header_t * ns_xlate, + dav_propdb **p_propdb) +{ + dav_propdb *propdb = NULL; + + propdb = apr_pcalloc(p, sizeof(*propdb)); + propdb->p = p; *p_propdb = NULL; @@ -537,7 +560,6 @@ DAV_DECLARE(dav_error *)dav_open_propdb(request_rec *r, dav_lockdb *lockdb, #endif propdb->r = r; - apr_pool_create(&propdb->p, r->pool); propdb->resource = resource; propdb->ns_xlate = ns_xlate; @@ -545,6 +567,8 @@ DAV_DECLARE(dav_error *)dav_open_propdb(request_rec *r, dav_lockdb *lockdb, propdb->lockdb = lockdb; + propdb->flags = flags; + /* always defer actual open, to avoid expense of accessing db * when only live properties are involved */ @@ -562,10 +586,10 @@ DAV_DECLARE(void) dav_close_propdb(dav_propdb *propdb) (*propdb->db_hooks->close)(propdb->db); } - /* Currently, mod_dav's pool usage doesn't allow clearing this pool. */ -#if 0 - apr_pool_destroy(propdb->p); -#endif + if (propdb->subreq) { + ap_destroy_sub_req(propdb->subreq); + propdb->subreq = NULL; + } } DAV_DECLARE(dav_get_props_result) dav_get_allprops(dav_propdb *propdb, @@ -706,10 +730,24 @@ DAV_DECLARE(dav_get_props_result) dav_get_props(dav_propdb *propdb, apr_text_header hdr_ns = { 0 }; int have_good = 0; dav_get_props_result result = { 0 }; + dav_liveprop_elem *element; char *marks_liveprop; dav_xmlns_info *xi; int xi_filled = 0; + /* we lose both the document and the element when calling (insert_prop), + * make these available in the pool. + */ + element = dav_get_liveprop_element(propdb->resource); + if (!element) { + element = apr_pcalloc(propdb->resource->pool, sizeof(dav_liveprop_elem)); + apr_pool_userdata_setn(element, DAV_PROP_ELEMENT, NULL, propdb->resource->pool); + } + else { + memset(element, 0, sizeof(dav_liveprop_elem)); + } + element->doc = doc; + /* ### NOTE: we should pass in TWO buffers -- one for keys, one for the marks */ @@ -733,13 +771,16 @@ DAV_DECLARE(dav_get_props_result) dav_get_props(dav_propdb *propdb, dav_prop_insert inserted; dav_prop_name name; + element->elem = elem; + /* ** First try live property providers; if they don't handle ** the property, then try looking it up in the propdb. */ if (elem->priv == NULL) { - elem->priv = apr_pcalloc(propdb->p, sizeof(*priv)); + /* elem->priv outlives propdb->p. Hence use the request pool */ + elem->priv = apr_pcalloc(propdb->r->pool, sizeof(*priv)); } priv = elem->priv; @@ -909,6 +950,15 @@ DAV_DECLARE(void) dav_get_liveprop_supported(dav_propdb *propdb, } } +DAV_DECLARE(dav_liveprop_elem *) dav_get_liveprop_element(const dav_resource *resource) +{ + dav_liveprop_elem *element; + + apr_pool_userdata_get((void **)&element, DAV_PROP_ELEMENT, resource->pool); + + return element; +} + DAV_DECLARE_NONSTD(void) dav_prop_validate(dav_prop_ctx *ctx) { dav_propdb *propdb = ctx->propdb; @@ -1047,6 +1097,10 @@ DAV_DECLARE_NONSTD(void) dav_prop_exec(dav_prop_ctx *ctx) /* ** Delete the property. Ignore errors -- the property is there, or ** we are deleting it for a second time. + ** + ** http://tools.ietf.org/html/rfc4918#section-14.23 says + ** "Specifying the removal of a property that does not exist is + ** not an error" */ /* ### but what about other errors? */ (void) (*propdb->db_hooks->remove)(propdb->db, &name); diff --git a/modules/dav/main/std_liveprop.c b/modules/dav/main/std_liveprop.c index e760c65..cb46b65 100644 --- a/modules/dav/main/std_liveprop.c +++ b/modules/dav/main/std_liveprop.c @@ -154,10 +154,10 @@ static dav_prop_insert dav_core_insert_prop(const dav_resource *resource, /* assert: info != NULL && info->name != NULL */ if (what == DAV_PROP_INSERT_SUPPORTED) { - s = apr_psprintf(p, - "" DEBUG_CR, - info->name, dav_core_namespace_uris[info->ns]); + s = apr_pstrcat(p, + "name, + "\" D:namespace=\"", dav_core_namespace_uris[info->ns], + "\"/>" DEBUG_CR, NULL); } else if (what == DAV_PROP_INSERT_VALUE && *value != '\0') { s = apr_psprintf(p, "%s" DEBUG_CR, diff --git a/modules/dav/main/util.c b/modules/dav/main/util.c index 9f24604..3f7822f 100644 --- a/modules/dav/main/util.c +++ b/modules/dav/main/util.c @@ -101,6 +101,9 @@ DAV_DECLARE(dav_error*) dav_join_error(dav_error *dest, dav_error *src) return dest; } +/* ### Unclear if this was designed to be used with an uninitialized + * dav_buffer struct, but is used on by dav_lock_get_activelock(). + * Hence check for pbuf->buf. */ DAV_DECLARE(void) dav_check_bufsize(apr_pool_t * p, dav_buffer *pbuf, apr_size_t extra_needed) { @@ -110,7 +113,8 @@ DAV_DECLARE(void) dav_check_bufsize(apr_pool_t * p, dav_buffer *pbuf, pbuf->alloc_len += extra_needed + DAV_BUFFER_PAD; newbuf = apr_palloc(p, pbuf->alloc_len); - memcpy(newbuf, pbuf->buf, pbuf->cur_len); + if (pbuf->buf) + memcpy(newbuf, pbuf->buf, pbuf->cur_len); pbuf->buf = newbuf; } } @@ -240,7 +244,7 @@ DAV_DECLARE(dav_lookup_result) dav_lookup_uri(const char *uri, request. the port must match our port. */ port = r->connection->local_addr->port; - if (strcasecmp(comp.scheme, scheme) != 0 + if (ap_cstr_casecmp(comp.scheme, scheme) != 0 #ifdef APACHE_PORT_HANDLING_IS_BUSTED || comp.port != port #endif @@ -312,26 +316,71 @@ DAV_DECLARE(dav_lookup_result) dav_lookup_uri(const char *uri, */ /* validate that the root element uses a given DAV: tagname (TRUE==valid) */ -DAV_DECLARE(int) dav_validate_root(const apr_xml_doc *doc, - const char *tagname) +DAV_DECLARE(int) dav_validate_root_ns(const apr_xml_doc *doc, + int ns, const char *tagname) { return doc->root && - doc->root->ns == APR_XML_NS_DAV_ID && + doc->root->ns == ns && strcmp(doc->root->name, tagname) == 0; } -/* find and return the (unique) child with a given DAV: tagname */ -DAV_DECLARE(apr_xml_elem *) dav_find_child(const apr_xml_elem *elem, - const char *tagname) +/* validate that the root element uses a given DAV: tagname (TRUE==valid) */ +DAV_DECLARE(int) dav_validate_root(const apr_xml_doc *doc, + const char *tagname) +{ + return dav_validate_root_ns(doc, APR_XML_NS_DAV_ID, tagname); +} + +/* find and return the next child with a tagname in the given namespace */ +DAV_DECLARE(apr_xml_elem *) dav_find_next_ns(const apr_xml_elem *elem, + int ns, const char *tagname) +{ + apr_xml_elem *child = elem->next; + + for (; child; child = child->next) + if (child->ns == ns && !strcmp(child->name, tagname)) + return child; + return NULL; +} + +/* find and return the (unique) child with a tagname in the given namespace */ +DAV_DECLARE(apr_xml_elem *) dav_find_child_ns(const apr_xml_elem *elem, + int ns, const char *tagname) { apr_xml_elem *child = elem->first_child; for (; child; child = child->next) - if (child->ns == APR_XML_NS_DAV_ID && !strcmp(child->name, tagname)) + if (child->ns == ns && !strcmp(child->name, tagname)) return child; return NULL; } +/* find and return the (unique) child with a given DAV: tagname */ +DAV_DECLARE(apr_xml_elem *) dav_find_child(const apr_xml_elem *elem, + const char *tagname) +{ + return dav_find_child_ns(elem, APR_XML_NS_DAV_ID, tagname); +} + +/* find and return the attribute with a name in the given namespace */ +DAV_DECLARE(apr_xml_attr *) dav_find_attr_ns(const apr_xml_elem *elem, + int ns, const char *attrname) +{ + apr_xml_attr *attr = elem->attr; + + for (; attr; attr = attr->next) + if (attr->ns == ns && !strcmp(attr->name, attrname)) + return attr; + return NULL; +} + +/* find and return the attribute with a given DAV: tagname */ +DAV_DECLARE(apr_xml_attr *) dav_find_attr(const apr_xml_elem *elem, + const char *attrname) +{ + return dav_find_attr_ns(elem, APR_XML_NS_DAV_ID, attrname); +} + /* gather up all the CDATA into a single string */ DAV_DECLARE(const char *) dav_xml_get_cdata(const apr_xml_elem *elem, apr_pool_t *pool, int strip_white) @@ -470,8 +519,8 @@ DAV_DECLARE(void) dav_xmlns_generate(dav_xmlns_info *xi, apr_hash_this(hi, &prefix, NULL, &uri); - s = apr_psprintf(xi->pool, " xmlns:%s=\"%s\"", - (const char *)prefix, (const char *)uri); + s = apr_pstrcat(xi->pool, " xmlns:", (const char *)prefix, "=\"", + (const char *)uri, "\"", NULL); apr_text_append(xi->pool, phdr, s); } } @@ -660,7 +709,13 @@ static dav_error * dav_process_if_header(request_rec *r, dav_if_header **p_ih) /* note that parsed_uri.path is allocated; we can trash it */ /* clean up the URI a bit */ - ap_getparents(parsed_uri.path); + if (!ap_normalize_path(parsed_uri.path, + AP_NORMALIZE_NOT_ABOVE_ROOT | + AP_NORMALIZE_DECODE_UNRESERVED)) { + return dav_new_error(r->pool, HTTP_BAD_REQUEST, + DAV_ERR_IF_TAGGED, rv, + "Invalid URI path tagged If-header."); + } /* the resources we will compare to have unencoded paths */ if (ap_unescape_url(parsed_uri.path) != OK) { @@ -746,8 +801,14 @@ static dav_error * dav_process_if_header(request_rec *r, dav_if_header **p_ih) "for the same state."); } condition = DAV_IF_COND_NOT; + list += 2; + } + else { + return dav_new_error(r->pool, HTTP_BAD_REQUEST, + DAV_ERR_IF_UNK_CHAR, 0, + "Invalid \"If:\" header: " + "Unexpected character in List"); } - list += 2; break; case ' ': diff --git a/modules/examples/mod_case_filter_in.c b/modules/examples/mod_case_filter_in.c index 5116e3b..c70a9eb 100644 --- a/modules/examples/mod_case_filter_in.c +++ b/modules/examples/mod_case_filter_in.c @@ -114,7 +114,7 @@ static apr_status_t CaseFilterInFilter(ap_filter_t *f, buf[n] = apr_toupper(data[n]); } - pbktOut = apr_bucket_heap_create(buf, len, 0, c->bucket_alloc); + pbktOut = apr_bucket_heap_create(buf, len, free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut); apr_bucket_delete(pbktIn); } diff --git a/modules/examples/mod_example_hooks.c b/modules/examples/mod_example_hooks.c index d818dc1..f7ef5a5 100644 --- a/modules/examples/mod_example_hooks.c +++ b/modules/examples/mod_example_hooks.c @@ -742,7 +742,7 @@ static int x_pre_config(apr_pool_t *pconf, apr_pool_t *plog, /* * Log the call and exit. */ - trace_startup(ptemp, NULL, NULL, "x_pre_config()"); + trace_startup(pconf, NULL, NULL, "x_pre_config()"); return OK; } @@ -763,7 +763,7 @@ static int x_check_config(apr_pool_t *pconf, apr_pool_t *plog, /* * Log the call and exit. */ - trace_startup(ptemp, s, NULL, "x_check_config()"); + trace_startup(pconf, s, NULL, "x_check_config()"); return OK; } @@ -800,7 +800,7 @@ static int x_open_logs(apr_pool_t *pconf, apr_pool_t *plog, /* * Log the call and exit. */ - trace_startup(ptemp, s, NULL, "x_open_logs()"); + trace_startup(pconf, s, NULL, "x_open_logs()"); return OK; } @@ -820,7 +820,7 @@ static int x_post_config(apr_pool_t *pconf, apr_pool_t *plog, /* * Log the call and exit. */ - trace_startup(ptemp, s, NULL, "x_post_config()"); + trace_startup(pconf, s, NULL, "x_post_config()"); return OK; } @@ -1173,6 +1173,22 @@ static int x_post_read_request(request_rec *r) return DECLINED; } +/* + * This routine gives our module an opportunity to translate the URI into an + * actual filename, before URL decoding happens. + * + * This is a RUN_FIRST hook. + */ +static int x_pre_translate_name(request_rec *r) +{ + /* + * We don't actually *do* anything here, except note the fact that we were + * called. + */ + trace_request(r, "x_pre_translate_name()"); + return DECLINED; +} + /* * This routine gives our module an opportunity to translate the URI into an * actual filename. If we don't do anything special, the server's default @@ -1448,6 +1464,7 @@ static int x_monitor(apr_pool_t *p, server_rec *s) */ static void x_register_hooks(apr_pool_t *p) { + trace = NULL; ap_hook_pre_config(x_pre_config, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_check_config(x_check_config, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_test_config(x_test_config, NULL, NULL, APR_HOOK_MIDDLE); @@ -1466,6 +1483,7 @@ static void x_register_hooks(apr_pool_t *p) ap_hook_log_transaction(x_log_transaction, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_http_scheme(x_http_scheme, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_default_port(x_default_port, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_pre_translate_name(x_pre_translate_name, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_translate_name(x_translate_name, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_map_to_storage(x_map_to_storage, NULL,NULL, APR_HOOK_MIDDLE); ap_hook_header_parser(x_header_parser, NULL, NULL, APR_HOOK_MIDDLE); diff --git a/modules/filters/libsed.h b/modules/filters/libsed.h index 76cbc0c..0256b1e 100644 --- a/modules/filters/libsed.h +++ b/modules/filters/libsed.h @@ -60,7 +60,7 @@ struct sed_label_s { }; typedef apr_status_t (sed_err_fn_t)(void *data, const char *error); -typedef apr_status_t (sed_write_fn_t)(void *ctx, char *buf, int sz); +typedef apr_status_t (sed_write_fn_t)(void *ctx, char *buf, apr_size_t sz); typedef struct sed_commands_s sed_commands_t; #define NWFILES 11 /* 10 plus one for standard output */ @@ -69,7 +69,7 @@ struct sed_commands_s { sed_err_fn_t *errfn; void *data; - unsigned lsize; + apr_size_t lsize; char *linebuf; char *lbend; const char *saveq; @@ -116,15 +116,15 @@ struct sed_eval_s { apr_int64_t lnum; void *fout; - unsigned lsize; + apr_size_t lsize; char *linebuf; char *lspend; - unsigned hsize; + apr_size_t hsize; char *holdbuf; char *hspend; - unsigned gsize; + apr_size_t gsize; char *genbuf; char *lcomend; @@ -160,7 +160,7 @@ apr_status_t sed_init_eval(sed_eval_t *eval, sed_commands_t *commands, sed_err_fn_t *errfn, void *data, sed_write_fn_t *writefn, apr_pool_t *p); apr_status_t sed_reset_eval(sed_eval_t *eval, sed_commands_t *commands, sed_err_fn_t *errfn, void *data); -apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void *fout); +apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz, void *fout); apr_status_t sed_eval_file(sed_eval_t *eval, apr_file_t *fin, void *fout); apr_status_t sed_finalize_eval(sed_eval_t *eval, void *f); void sed_destroy_eval(sed_eval_t *eval); diff --git a/modules/filters/mod_brotli.c b/modules/filters/mod_brotli.c index 56717e7..0f7d770 100644 --- a/modules/filters/mod_brotli.c +++ b/modules/filters/mod_brotli.c @@ -344,6 +344,7 @@ static apr_status_t compress_filter(ap_filter_t *f, apr_bucket_brigade *bb) const char *encoding; const char *token; const char *accepts; + const char *q = NULL; /* Only work on main request, not subrequests, that are not * a 204 response with no content, and are not tagged with the @@ -411,7 +412,19 @@ static apr_status_t compress_filter(ap_filter_t *f, apr_bucket_brigade *bb) token = (*accepts) ? ap_get_token(r->pool, &accepts, 0) : NULL; } - if (!token || token[0] == '\0') { + /* Find the qvalue, if provided */ + if (*accepts) { + while (*accepts == ';') { + ++accepts; + } + q = ap_get_token(r->pool, &accepts, 1); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "token: '%s' - q: '%s'", token ? token : "NULL", q); + } + + /* No acceptable token found or q=0 */ + if (!token || token[0] == '\0' || + (q && strlen(q) >= 3 && strncmp("q=0.000", q, strlen(q)) == 0)) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } diff --git a/modules/filters/mod_charset_lite.c b/modules/filters/mod_charset_lite.c index ed76f61..e3d1ce9 100644 --- a/modules/filters/mod_charset_lite.c +++ b/modules/filters/mod_charset_lite.c @@ -790,7 +790,7 @@ static apr_status_t xlate_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) if (!ctx->noop && ctx->xlate == NULL) { const char *mime_type = f->r->content_type; - if (mime_type && (strncasecmp(mime_type, "text/", 5) == 0 || + if (mime_type && (ap_cstr_casecmpn(mime_type, "text/", 5) == 0 || #if APR_CHARSET_EBCDIC /* On an EBCDIC machine, be willing to translate mod_autoindex- * generated output. Otherwise, it doesn't look too cool. @@ -806,7 +806,7 @@ static apr_status_t xlate_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) */ strcmp(mime_type, DIR_MAGIC_TYPE) == 0 || #endif - strncasecmp(mime_type, "message/", 8) == 0 || + ap_cstr_casecmpn(mime_type, "message/", 8) == 0 || dc->force_xlate == FX_FORCE)) { rv = apr_xlate_open(&ctx->xlate, diff --git a/modules/filters/mod_data.c b/modules/filters/mod_data.c index d083d32..ddadd1b 100644 --- a/modules/filters/mod_data.c +++ b/modules/filters/mod_data.c @@ -107,8 +107,8 @@ static apr_status_t data_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) if (content_length) { apr_off_t len, clen; apr_brigade_length(ctx->bb, 1, &len); - clen = apr_atoi64(content_length); - if (clen >= 0 && clen < APR_INT32_MAX) { + if (ap_parse_strict_length(&clen, content_length) + && clen < APR_INT32_MAX) { ap_set_content_length(r, len + apr_base64_encode_len((int)clen) - 1); } diff --git a/modules/filters/mod_deflate.c b/modules/filters/mod_deflate.c index d60b2de..5a541e7 100644 --- a/modules/filters/mod_deflate.c +++ b/modules/filters/mod_deflate.c @@ -43,10 +43,11 @@ #include "apr_general.h" #include "util_filter.h" #include "apr_buckets.h" +#include "http_protocol.h" #include "http_request.h" +#include "http_ssl.h" #define APR_WANT_STRFUNC #include "apr_want.h" -#include "mod_ssl.h" #include "zlib.h" @@ -56,15 +57,20 @@ module AP_MODULE_DECLARE_DATA deflate_module; #define AP_INFLATE_RATIO_LIMIT 200 #define AP_INFLATE_RATIO_BURST 3 +#define AP_DEFLATE_ETAG_ADDSUFFIX 0 +#define AP_DEFLATE_ETAG_NOCHANGE 1 +#define AP_DEFLATE_ETAG_REMOVE 2 + typedef struct deflate_filter_config_t { int windowSize; int memlevel; int compressionlevel; - apr_size_t bufferSize; + int bufferSize; const char *note_ratio_name; const char *note_input_name; const char *note_output_name; + int etag_opt; } deflate_filter_config; typedef struct deflate_dirconf_t { @@ -94,8 +100,6 @@ static const char deflate_magic[2] = { '\037', '\213' }; #define DEFAULT_MEMLEVEL 9 #define DEFAULT_BUFFERSIZE 8096 -static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *mod_deflate_ssl_var = NULL; - /* Check whether a request is gzipped, so we can un-gzip it. * If a request has multiple encodings, we need the gzip * to be the outermost non-identity encoding. @@ -118,8 +122,8 @@ static int check_gzip(request_rec *r, apr_table_t *hdrs1, apr_table_t *hdrs2) if (encoding && *encoding) { /* check the usual/simple case first */ - if (!strcasecmp(encoding, "gzip") - || !strcasecmp(encoding, "x-gzip")) { + if (!ap_cstr_casecmp(encoding, "gzip") + || !ap_cstr_casecmp(encoding, "x-gzip")) { found = 1; if (hdrs) { apr_table_unset(hdrs, "Content-Encoding"); @@ -137,8 +141,8 @@ static int check_gzip(request_rec *r, apr_table_t *hdrs1, apr_table_t *hdrs2) for(;;) { char *token = ap_strrchr(new_encoding, ','); if (!token) { /* gzip:identity or other:identity */ - if (!strcasecmp(new_encoding, "gzip") - || !strcasecmp(new_encoding, "x-gzip")) { + if (!ap_cstr_casecmp(new_encoding, "gzip") + || !ap_cstr_casecmp(new_encoding, "x-gzip")) { found = 1; if (hdrs) { apr_table_unset(hdrs, "Content-Encoding"); @@ -150,8 +154,8 @@ static int check_gzip(request_rec *r, apr_table_t *hdrs1, apr_table_t *hdrs2) break; /* seen all tokens */ } for (ptr=token+1; apr_isspace(*ptr); ++ptr); - if (!strcasecmp(ptr, "gzip") - || !strcasecmp(ptr, "x-gzip")) { + if (!ap_cstr_casecmp(ptr, "gzip") + || !ap_cstr_casecmp(ptr, "x-gzip")) { *token = '\0'; if (hdrs) { apr_table_setn(hdrs, "Content-Encoding", new_encoding); @@ -161,7 +165,7 @@ static int check_gzip(request_rec *r, apr_table_t *hdrs1, apr_table_t *hdrs2) } found = 1; } - else if (!ptr[0] || !strcasecmp(ptr, "identity")) { + else if (!ptr[0] || !ap_cstr_casecmp(ptr, "identity")) { *token = '\0'; continue; /* strip the token and find the next one */ } @@ -250,7 +254,7 @@ static const char *deflate_set_buffer_size(cmd_parms *cmd, void *dummy, return "DeflateBufferSize should be positive"; } - c->bufferSize = (apr_size_t)n; + c->bufferSize = n; return NULL; } @@ -296,6 +300,29 @@ static const char *deflate_set_memlevel(cmd_parms *cmd, void *dummy, return NULL; } +static const char *deflate_set_etag(cmd_parms *cmd, void *dummy, + const char *arg) +{ + deflate_filter_config *c = ap_get_module_config(cmd->server->module_config, + &deflate_module); + + if (!strcasecmp(arg, "NoChange")) { + c->etag_opt = AP_DEFLATE_ETAG_NOCHANGE; + } + else if (!strcasecmp(arg, "AddSuffix")) { + c->etag_opt = AP_DEFLATE_ETAG_ADDSUFFIX; + } + else if (!strcasecmp(arg, "Remove")) { + c->etag_opt = AP_DEFLATE_ETAG_REMOVE; + } + else { + return "DeflateAlterETAG accepts only 'NoChange', 'AddSuffix', and 'Remove'"; + } + + return NULL; +} + + static const char *deflate_set_compressionlevel(cmd_parms *cmd, void *dummy, const char *arg) { @@ -389,35 +416,40 @@ typedef struct deflate_ctx_t /* Do update ctx->crc, see comment in flush_libz_buffer */ #define UPDATE_CRC 1 +static void consume_buffer(deflate_ctx *ctx, deflate_filter_config *c, + int len, int crc, apr_bucket_brigade *bb) +{ + apr_bucket *b; + + /* + * Do we need to update ctx->crc? Usually this is the case for + * inflate action where we need to do a crc on the output, whereas + * in the deflate case we need to do a crc on the input + */ + if (crc) { + ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); + } + + b = apr_bucket_heap_create((char *)ctx->buffer, len, NULL, + bb->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + + ctx->stream.next_out = ctx->buffer; + ctx->stream.avail_out = c->bufferSize; +} + static int flush_libz_buffer(deflate_ctx *ctx, deflate_filter_config *c, - struct apr_bucket_alloc_t *bucket_alloc, int (*libz_func)(z_streamp, int), int flush, int crc) { int zRC = Z_OK; int done = 0; - unsigned int deflate_len; - apr_bucket *b; + int deflate_len; for (;;) { deflate_len = c->bufferSize - ctx->stream.avail_out; - - if (deflate_len != 0) { - /* - * Do we need to update ctx->crc? Usually this is the case for - * inflate action where we need to do a crc on the output, whereas - * in the deflate case we need to do a crc on the input - */ - if (crc) { - ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, - deflate_len); - } - b = apr_bucket_heap_create((char *)ctx->buffer, - deflate_len, NULL, - bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->bb, b); - ctx->stream.next_out = ctx->buffer; - ctx->stream.avail_out = c->bufferSize; + if (deflate_len > 0) { + consume_buffer(ctx, c, deflate_len, crc, ctx->bb); } if (done) @@ -465,11 +497,16 @@ static apr_status_t deflate_ctx_cleanup(void *data) * value inside the double-quotes if an ETag has already been set * and its value already contains double-quotes. PR 39727 */ -static void deflate_check_etag(request_rec *r, const char *transform) +static void deflate_check_etag(request_rec *r, const char *transform, int etag_opt) { const char *etag = apr_table_get(r->headers_out, "ETag"); apr_size_t etaglen; + if (etag_opt == AP_DEFLATE_ETAG_REMOVE) { + apr_table_unset(r->headers_out, "ETag"); + return; + } + if ((etag && ((etaglen = strlen(etag)) > 2))) { if (etag[etaglen - 1] == '"') { apr_size_t transformlen = strlen(transform); @@ -514,10 +551,8 @@ static int check_ratio(request_rec *r, deflate_ctx *ctx, static int have_ssl_compression(request_rec *r) { const char *comp; - if (mod_deflate_ssl_var == NULL) - return 0; - comp = mod_deflate_ssl_var(r->pool, r->server, r->connection, r, - "SSL_COMPRESS_METHOD"); + comp = ap_ssl_var_lookup(r->pool, r->server, r->connection, r, + "SSL_COMPRESS_METHOD"); if (comp == NULL || *comp == '\0' || strcmp(comp, "NULL") == 0) return 0; return 1; @@ -530,6 +565,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, request_rec *r = f->r; deflate_ctx *ctx = f->ctx; int zRC; + apr_status_t rv; apr_size_t len = 0, blen; const char *data; deflate_filter_config *c; @@ -586,9 +622,14 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, continue; } - rc = apr_bucket_read(e, &data, &blen, APR_BLOCK_READ); - if (rc != APR_SUCCESS) - return rc; + if (e->length == (apr_size_t)-1) { + rc = apr_bucket_read(e, &data, &blen, APR_BLOCK_READ); + if (rc != APR_SUCCESS) + return rc; + } + else { + blen = e->length; + } len += blen; /* 50 is for Content-Encoding and Vary headers and ETag suffix */ if (len > sizeof(gzip_header) + VALIDATION_SIZE + 50) @@ -699,6 +740,8 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, */ if (!apr_table_get(r->subprocess_env, "force-gzip")) { const char *accepts; + const char *q = NULL; + /* if they don't have the line, then they can't play */ accepts = apr_table_get(r->headers_in, "Accept-Encoding"); if (accepts == NULL) { @@ -707,7 +750,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, } token = ap_get_token(r->pool, &accepts, 0); - while (token && token[0] && strcasecmp(token, "gzip")) { + while (token && token[0] && ap_cstr_casecmp(token, "gzip")) { /* skip parameters, XXX: ;q=foo evaluation? */ while (*accepts == ';') { ++accepts; @@ -721,10 +764,21 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, token = (*accepts) ? ap_get_token(r->pool, &accepts, 0) : NULL; } - /* No acceptable token found. */ - if (token == NULL || token[0] == '\0') { + /* Find the qvalue, if provided */ + if (*accepts) { + while (*accepts == ';') { + ++accepts; + } + q = ap_get_token(r->pool, &accepts, 1); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "token: '%s' - q: '%s'", token ? token : "NULL", q); + } + + /* No acceptable token found or q=0 */ + if (!token || token[0] == '\0' || + (q && strlen(q) >= 3 && strncmp("q=0.000", q, strlen(q)) == 0)) { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "Not compressing (no Accept-Encoding: gzip)"); + "Not compressing (no Accept-Encoding: gzip or q=0)"); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } @@ -781,7 +835,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, */ /* If the entire Content-Encoding is "identity", we can replace it. */ - if (!encoding || !strcasecmp(encoding, "identity")) { + if (!encoding || !ap_cstr_casecmp(encoding, "identity")) { apr_table_setn(r->headers_out, "Content-Encoding", "gzip"); } else { @@ -794,7 +848,9 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, } apr_table_unset(r->headers_out, "Content-Length"); apr_table_unset(r->headers_out, "Content-MD5"); - deflate_check_etag(r, "gzip"); + if (c->etag_opt != AP_DEFLATE_ETAG_NOCHANGE) { + deflate_check_etag(r, "gzip", c->etag_opt); + } /* For a 304 response, only change the headers */ if (r->status == HTTP_NOT_MODIFIED) { @@ -841,8 +897,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, ctx->stream.avail_in = 0; /* should be zero already anyway */ /* flush the remaining data from the zlib buffers */ - flush_libz_buffer(ctx, c, f->c->bucket_alloc, deflate, Z_FINISH, - NO_UPDATE_CRC); + flush_libz_buffer(ctx, c, deflate, Z_FINISH, NO_UPDATE_CRC); buf = apr_palloc(r->pool, VALIDATION_SIZE); putLong((unsigned char *)&buf[0], ctx->crc); @@ -852,8 +907,10 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, b); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01384) - "Zlib: Compressed %ld to %ld : URL %s", - ctx->stream.total_in, ctx->stream.total_out, r->uri); + "Zlib: Compressed %" APR_UINT64_T_FMT + " to %" APR_UINT64_T_FMT " : URL %s", + (apr_uint64_t)ctx->stream.total_in, + (apr_uint64_t)ctx->stream.total_out, r->uri); /* leave notes for logging */ if (c->note_input_name) { @@ -866,7 +923,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, if (c->note_output_name) { apr_table_setn(r->notes, c->note_output_name, - (ctx->stream.total_in > 0) + (ctx->stream.total_out > 0) ? apr_off_t_toa(r->pool, ctx->stream.total_out) : "-"); @@ -883,6 +940,10 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, } deflateEnd(&ctx->stream); + + /* We've ended the libz stream, so remove ourselves. */ + ap_remove_output_filter(f); + /* No need for cleanup any longer */ apr_pool_cleanup_kill(r->pool, ctx, deflate_ctx_cleanup); @@ -893,15 +954,15 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, /* Okay, we've seen the EOS. * Time to pass it along down the chain. */ - return ap_pass_brigade(f->next, ctx->bb); + rv = ap_pass_brigade(f->next, ctx->bb); + apr_brigade_cleanup(ctx->bb); + return rv; } if (APR_BUCKET_IS_FLUSH(e)) { - apr_status_t rv; - /* flush the remaining data from the zlib buffers */ - zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, deflate, - Z_SYNC_FLUSH, NO_UPDATE_CRC); + zRC = flush_libz_buffer(ctx, c, deflate, Z_SYNC_FLUSH, + NO_UPDATE_CRC); if (zRC != Z_OK) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01385) "Zlib error %d flushing zlib output buffer (%s)", @@ -913,6 +974,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(ctx->bb, e); rv = ap_pass_brigade(f->next, ctx->bb); + apr_brigade_cleanup(ctx->bb); if (rv != APR_SUCCESS) { return rv; } @@ -930,7 +992,12 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, } /* read */ - apr_bucket_read(e, &data, &len, APR_BLOCK_READ); + rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ); + if (rv) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10298) + "failed reading from %s bucket", e->type->name); + return rv; + } if (!len) { apr_bucket_delete(e); continue; @@ -947,21 +1014,15 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, ctx->stream.next_in = (unsigned char *)data; /* We just lost const-ness, * but we'll just have to * trust zlib */ - ctx->stream.avail_in = len; + ctx->stream.avail_in = (int)len; while (ctx->stream.avail_in != 0) { if (ctx->stream.avail_out == 0) { - apr_status_t rv; - - ctx->stream.next_out = ctx->buffer; - len = c->bufferSize - ctx->stream.avail_out; + consume_buffer(ctx, c, c->bufferSize, NO_UPDATE_CRC, ctx->bb); - b = apr_bucket_heap_create((char *)ctx->buffer, len, - NULL, f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->bb, b); - ctx->stream.avail_out = c->bufferSize; /* Send what we have right now to the next filter. */ rv = ap_pass_brigade(f->next, ctx->bb); + apr_brigade_cleanup(ctx->bb); if (rv != APR_SUCCESS) { return rv; } @@ -1258,44 +1319,40 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, if (APR_BUCKET_IS_FLUSH(bkt)) { apr_bucket *tmp_b; - ctx->inflate_total += ctx->stream.avail_out; - zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH); - ctx->inflate_total -= ctx->stream.avail_out; - if (zRC != Z_OK) { - inflateEnd(&ctx->stream); - ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01391) - "Zlib error %d inflating data (%s)", zRC, - ctx->stream.msg); - return APR_EGENERAL; - } + if (!ctx->done) { + ctx->inflate_total += ctx->stream.avail_out; + zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH); + ctx->inflate_total -= ctx->stream.avail_out; + if (zRC != Z_OK) { + inflateEnd(&ctx->stream); + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01391) + "Zlib error %d inflating data (%s)", zRC, + ctx->stream.msg); + return APR_EGENERAL; + } - if (inflate_limit && ctx->inflate_total > inflate_limit) { - inflateEnd(&ctx->stream); - ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02647) - "Inflated content length of %" APR_OFF_T_FMT - " is larger than the configured limit" - " of %" APR_OFF_T_FMT, - ctx->inflate_total, inflate_limit); - return APR_ENOSPC; - } - - if (!check_ratio(r, ctx, dc)) { - inflateEnd(&ctx->stream); - ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02805) - "Inflated content ratio is larger than the " - "configured limit %i by %i time(s)", - dc->ratio_limit, dc->ratio_burst); - return APR_EINVAL; - } + if (inflate_limit && ctx->inflate_total > inflate_limit) { + inflateEnd(&ctx->stream); + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02647) + "Inflated content length of %" APR_OFF_T_FMT + " is larger than the configured limit" + " of %" APR_OFF_T_FMT, + ctx->inflate_total, inflate_limit); + return APR_ENOSPC; + } - len = c->bufferSize - ctx->stream.avail_out; - ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); - tmp_b = apr_bucket_heap_create((char *)ctx->buffer, len, - NULL, f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_b); + if (!check_ratio(r, ctx, dc)) { + inflateEnd(&ctx->stream); + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02805) + "Inflated content ratio is larger than the " + "configured limit %i by %i time(s)", + dc->ratio_limit, dc->ratio_burst); + return APR_EINVAL; + } - ctx->stream.next_out = ctx->buffer; - ctx->stream.avail_out = c->bufferSize; + consume_buffer(ctx, c, c->bufferSize - ctx->stream.avail_out, + UPDATE_CRC, ctx->proc_bb); + } /* Flush everything so far in the returning brigade, but continue * reading should EOS/more follow (don't lose them). @@ -1338,21 +1395,11 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, ctx->stream.next_in = (unsigned char *)data; ctx->stream.avail_in = (int)len; - zRC = Z_OK; - if (!ctx->validation_buffer) { while (ctx->stream.avail_in != 0) { if (ctx->stream.avail_out == 0) { - apr_bucket *tmp_heap; - - ctx->stream.next_out = ctx->buffer; - len = c->bufferSize - ctx->stream.avail_out; - - ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); - tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len, - NULL, f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap); - ctx->stream.avail_out = c->bufferSize; + consume_buffer(ctx, c, c->bufferSize, UPDATE_CRC, + ctx->proc_bb); } ctx->inflate_total += ctx->stream.avail_out; @@ -1395,7 +1442,6 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, } if (ctx->validation_buffer) { - apr_bucket *tmp_heap; apr_size_t avail, valid; unsigned char *buf = ctx->validation_buffer; @@ -1419,17 +1465,13 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, ctx->validation_buffer_length += valid; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01393) - "Zlib: Inflated %ld to %ld : URL %s", - ctx->stream.total_in, ctx->stream.total_out, - r->uri); + "Zlib: Inflated %" APR_UINT64_T_FMT + " to %" APR_UINT64_T_FMT " : URL %s", + (apr_uint64_t)ctx->stream.total_in, + (apr_uint64_t)ctx->stream.total_out, r->uri); - len = c->bufferSize - ctx->stream.avail_out; - - ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); - tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len, - NULL, f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap); - ctx->stream.avail_out = c->bufferSize; + consume_buffer(ctx, c, c->bufferSize - ctx->stream.avail_out, + UPDATE_CRC, ctx->proc_bb); { unsigned long compCRC, compLen; @@ -1445,9 +1487,10 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, if ((ctx->stream.total_out & 0xFFFFFFFF) != compLen) { inflateEnd(&ctx->stream); ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01395) - "Zlib: Length %ld of inflated data does " - "not match expected value %ld", - ctx->stream.total_out, compLen); + "Zlib: Length %" APR_UINT64_T_FMT + " of inflated data does not match" + " expected value %ld", + (apr_uint64_t)ctx->stream.total_out, compLen); return APR_EGENERAL; } } @@ -1474,16 +1517,8 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, if (block == APR_BLOCK_READ && APR_BRIGADE_EMPTY(ctx->proc_bb) && ctx->stream.avail_out < c->bufferSize) { - apr_bucket *tmp_heap; - apr_size_t len; - ctx->stream.next_out = ctx->buffer; - len = c->bufferSize - ctx->stream.avail_out; - - ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); - tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len, - NULL, f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, tmp_heap); - ctx->stream.avail_out = c->bufferSize; + consume_buffer(ctx, c, c->bufferSize - ctx->stream.avail_out, + UPDATE_CRC, ctx->proc_bb); } if (!APR_BRIGADE_EMPTY(ctx->proc_bb)) { @@ -1549,7 +1584,9 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, */ apr_table_unset(r->headers_out, "Content-Length"); apr_table_unset(r->headers_out, "Content-MD5"); - deflate_check_etag(r, "gunzip"); + if (c->etag_opt != AP_DEFLATE_ETAG_NOCHANGE) { + deflate_check_etag(r, "gunzip", c->etag_opt); + } /* For a 304 response, only change the headers */ if (r->status == HTTP_NOT_MODIFIED) { @@ -1597,7 +1634,6 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, while (!APR_BRIGADE_EMPTY(bb)) { const char *data; - apr_bucket *b; apr_size_t len; e = APR_BRIGADE_FIRST(bb); @@ -1619,11 +1655,12 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, * fails, whereas in the deflate case you can empty a filled output * buffer and call it again until no more output can be created. */ - flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate, Z_SYNC_FLUSH, - UPDATE_CRC); + flush_libz_buffer(ctx, c, inflate, Z_SYNC_FLUSH, UPDATE_CRC); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01398) - "Zlib: Inflated %ld to %ld : URL %s", - ctx->stream.total_in, ctx->stream.total_out, r->uri); + "Zlib: Inflated %" APR_UINT64_T_FMT + " to %" APR_UINT64_T_FMT " : URL %s", + (apr_uint64_t)ctx->stream.total_in, + (apr_uint64_t)ctx->stream.total_out, r->uri); if (ctx->validation_buffer_length == VALIDATION_SIZE) { unsigned long compCRC, compLen; @@ -1660,15 +1697,14 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, * Okay, we've seen the EOS. * Time to pass it along down the chain. */ - return ap_pass_brigade(f->next, ctx->bb); + rv = ap_pass_brigade(f->next, ctx->bb); + apr_brigade_cleanup(ctx->bb); + return rv; } if (APR_BUCKET_IS_FLUSH(e)) { - apr_status_t rv; - /* flush the remaining data from the zlib buffers */ - zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate, - Z_SYNC_FLUSH, UPDATE_CRC); + zRC = flush_libz_buffer(ctx, c, inflate, Z_SYNC_FLUSH, UPDATE_CRC); if (zRC == Z_STREAM_END) { if (ctx->validation_buffer == NULL) { ctx->validation_buffer = apr_pcalloc(f->r->pool, @@ -1686,6 +1722,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(ctx->bb, e); rv = ap_pass_brigade(f->next, ctx->bb); + apr_brigade_cleanup(ctx->bb); if (rv != APR_SUCCESS) { return rv; } @@ -1802,16 +1839,11 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, while (ctx->stream.avail_in != 0) { if (ctx->stream.avail_out == 0) { - ctx->stream.next_out = ctx->buffer; - len = c->bufferSize - ctx->stream.avail_out; - - ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); - b = apr_bucket_heap_create((char *)ctx->buffer, len, - NULL, f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(ctx->bb, b); - ctx->stream.avail_out = c->bufferSize; + consume_buffer(ctx, c, c->bufferSize, UPDATE_CRC, ctx->bb); + /* Send what we have right now to the next filter. */ rv = ap_pass_brigade(f->next, ctx->bb); + apr_brigade_cleanup(ctx->bb); if (rv != APR_SUCCESS) { return rv; } @@ -1826,6 +1858,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, return APR_EGENERAL; } + /* Don't check length limits on inflate_out */ if (!check_ratio(r, ctx, dc)) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02650) "Inflated content ratio is larger than the " @@ -1868,7 +1901,6 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, static int mod_deflate_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { - mod_deflate_ssl_var = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); return OK; } @@ -1904,6 +1936,9 @@ static const command_rec deflate_filter_cmds[] = { AP_INIT_TAKE1("DeflateInflateRatioBurst", deflate_set_inflate_ratio_burst, NULL, OR_ALL, "Set the maximum number of following inflate ratios above limit " "(default: " APR_STRINGIFY(AP_INFLATE_RATIO_BURST) ")"), + AP_INIT_TAKE1("DeflateAlterEtag", deflate_set_etag, NULL, RSRC_CONF, + "Set how mod_deflate should modify ETAG response headers: 'AddSuffix' (default), 'NoChange' (2.2.x behavior), 'Remove'"), + {NULL} }; diff --git a/modules/filters/mod_ext_filter.c b/modules/filters/mod_ext_filter.c index 7aac19d..7afd8dd 100644 --- a/modules/filters/mod_ext_filter.c +++ b/modules/filters/mod_ext_filter.c @@ -655,8 +655,7 @@ static apr_status_t drain_available_output(ap_filter_t *f, if (rv && !APR_STATUS_IS_EAGAIN(rv)) lvl = APLOG_DEBUG; ap_log_rerror(APLOG_MARK, lvl, rv, r, APLOGNO(01460) - "apr_file_read(child output), len %" APR_SIZE_T_FMT, - !rv ? len : -1); + "apr_file_read(child output), len %" APR_SIZE_T_FMT, len); if (rv != APR_SUCCESS) { return rv; } @@ -810,8 +809,7 @@ static int ef_unified_filter(ap_filter_t *f, apr_bucket_brigade *bb) if (rv && !APR_STATUS_IS_EOF(rv) && !APR_STATUS_IS_EAGAIN(rv)) lvl = APLOG_ERR; ap_log_rerror(APLOG_MARK, lvl, rv, r, APLOGNO(01466) - "apr_file_read(child output), len %" APR_SIZE_T_FMT, - !rv ? len : -1); + "apr_file_read(child output), len %" APR_SIZE_T_FMT, len); if (APR_STATUS_IS_EAGAIN(rv)) { if (eos) { /* should not occur, because we have an APR timeout in place */ diff --git a/modules/filters/mod_include.c b/modules/filters/mod_include.c index a46a944..584d8fb 100644 --- a/modules/filters/mod_include.c +++ b/modules/filters/mod_include.c @@ -1967,25 +1967,25 @@ static apr_status_t handle_echo(include_ctx_t *ctx, ap_filter_t *f, token = apr_strtok(d, ", \t", &last); while (token) { - if (!strcasecmp(token, "none")) { + if (!ap_cstr_casecmp(token, "none")) { /* do nothing */ } - else if (!strcasecmp(token, "url")) { + else if (!ap_cstr_casecmp(token, "url")) { char *buf = apr_pstrdup(ctx->pool, echo_text); ap_unescape_url(buf); echo_text = buf; } - else if (!strcasecmp(token, "urlencoded")) { + else if (!ap_cstr_casecmp(token, "urlencoded")) { char *buf = apr_pstrdup(ctx->pool, echo_text); ap_unescape_urlencoded(buf); echo_text = buf; } - else if (!strcasecmp(token, "entity")) { + else if (!ap_cstr_casecmp(token, "entity")) { char *buf = apr_pstrdup(ctx->pool, echo_text); decodehtml(buf); echo_text = buf; } - else if (!strcasecmp(token, "base64")) { + else if (!ap_cstr_casecmp(token, "base64")) { echo_text = ap_pbase64decode(ctx->dpool, echo_text); } else { @@ -2003,19 +2003,19 @@ static apr_status_t handle_echo(include_ctx_t *ctx, ap_filter_t *f, token = apr_strtok(e, ", \t", &last); while (token) { - if (!strcasecmp(token, "none")) { + if (!ap_cstr_casecmp(token, "none")) { /* do nothing */ } - else if (!strcasecmp(token, "url")) { + else if (!ap_cstr_casecmp(token, "url")) { echo_text = ap_escape_uri(ctx->dpool, echo_text); } - else if (!strcasecmp(token, "urlencoded")) { + else if (!ap_cstr_casecmp(token, "urlencoded")) { echo_text = ap_escape_urlencoded(ctx->dpool, echo_text); } - else if (!strcasecmp(token, "entity")) { + else if (!ap_cstr_casecmp(token, "entity")) { echo_text = ap_escape_html2(ctx->dpool, echo_text, 0); } - else if (!strcasecmp(token, "base64")) { + else if (!ap_cstr_casecmp(token, "base64")) { char *buf; buf = ap_pbase64encode(ctx->dpool, (char *)echo_text); echo_text = buf; @@ -2605,25 +2605,25 @@ static apr_status_t handle_set(include_ctx_t *ctx, ap_filter_t *f, token = apr_strtok(d, ", \t", &last); while (token) { - if (!strcasecmp(token, "none")) { + if (!ap_cstr_casecmp(token, "none")) { /* do nothing */ } - else if (!strcasecmp(token, "url")) { + else if (!ap_cstr_casecmp(token, "url")) { char *buf = apr_pstrdup(ctx->pool, parsed_string); ap_unescape_url(buf); parsed_string = buf; } - else if (!strcasecmp(token, "urlencoded")) { + else if (!ap_cstr_casecmp(token, "urlencoded")) { char *buf = apr_pstrdup(ctx->pool, parsed_string); ap_unescape_urlencoded(buf); parsed_string = buf; } - else if (!strcasecmp(token, "entity")) { + else if (!ap_cstr_casecmp(token, "entity")) { char *buf = apr_pstrdup(ctx->pool, parsed_string); decodehtml(buf); parsed_string = buf; } - else if (!strcasecmp(token, "base64")) { + else if (!ap_cstr_casecmp(token, "base64")) { parsed_string = ap_pbase64decode(ctx->dpool, parsed_string); } else { @@ -2641,19 +2641,19 @@ static apr_status_t handle_set(include_ctx_t *ctx, ap_filter_t *f, token = apr_strtok(e, ", \t", &last); while (token) { - if (!strcasecmp(token, "none")) { + if (!ap_cstr_casecmp(token, "none")) { /* do nothing */ } - else if (!strcasecmp(token, "url")) { + else if (!ap_cstr_casecmp(token, "url")) { parsed_string = ap_escape_uri(ctx->dpool, parsed_string); } - else if (!strcasecmp(token, "urlencoded")) { + else if (!ap_cstr_casecmp(token, "urlencoded")) { parsed_string = ap_escape_urlencoded(ctx->dpool, parsed_string); } - else if (!strcasecmp(token, "entity")) { + else if (!ap_cstr_casecmp(token, "entity")) { parsed_string = ap_escape_html2(ctx->dpool, parsed_string, 0); } - else if (!strcasecmp(token, "base64")) { + else if (!ap_cstr_casecmp(token, "base64")) { char *buf; buf = ap_pbase64encode(ctx->dpool, (char *)parsed_string); parsed_string = buf; @@ -3855,6 +3855,7 @@ static apr_status_t includes_filter(ap_filter_t *f, apr_bucket_brigade *b) ctx->intern = intern = apr_palloc(r->pool, sizeof(*ctx->intern)); ctx->pool = r->pool; apr_pool_create(&ctx->dpool, ctx->pool); + apr_pool_tag(ctx->dpool, "includes_dpool"); /* runtime data */ intern->tmp_bb = apr_brigade_create(ctx->pool, f->c->bucket_alloc); diff --git a/modules/filters/mod_proxy_html.c b/modules/filters/mod_proxy_html.c index ea6bf03..7783da1 100644 --- a/modules/filters/mod_proxy_html.c +++ b/modules/filters/mod_proxy_html.c @@ -29,9 +29,28 @@ #define VERBOSEB(x) if (verbose) {x} #endif +/* libxml2 includes unicode/[...].h files which uses C++ comments */ +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic warning "-Wcomment" +#elif defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic warning "-Wcomment" +#endif +#endif + /* libxml2 */ #include +#if defined(__clang__) +#pragma clang diagnostic pop +#elif defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif +#endif + #include "http_protocol.h" #include "http_config.h" #include "http_log.h" @@ -88,7 +107,7 @@ typedef struct { const char *doctype; const char *etag; unsigned int flags; - size_t bufsz; + int bufsz; apr_hash_t *links; apr_array_header_t *events; const char *charset_out; @@ -660,7 +679,7 @@ static meta *metafix(request_rec *r, const char *buf, apr_size_t len) while (!apr_isalpha(*++p)); for (q = p; apr_isalnum(*q) || (*q == '-'); ++q); header = apr_pstrmemdup(r->pool, p, q-p); - if (strncasecmp(header, "Content-", 8)) { + if (ap_cstr_casecmpn(header, "Content-", 8)) { /* find content=... string */ p = apr_strmatch(seek_content, buf+offs+pmatch[0].rm_so, pmatch[0].rm_eo - pmatch[0].rm_so); @@ -677,7 +696,7 @@ static meta *metafix(request_rec *r, const char *buf, apr_size_t len) if ((*p == '\'') || (*p == '"')) { delim = *p++; for (q = p; *q && *q != delim; ++q); - /* No terminating delimiter found? Skip the boggus directive */ + /* No terminating delimiter found? Skip the bogus directive */ if (*q != delim) break; } else { @@ -688,7 +707,7 @@ static meta *metafix(request_rec *r, const char *buf, apr_size_t len) } } } - else if (!strncasecmp(header, "Content-Type", 12)) { + else if (!ap_cstr_casecmpn(header, "Content-Type", 12)) { ret = apr_palloc(r->pool, sizeof(meta)); ret->start = offs+pmatch[0].rm_so; ret->end = offs+pmatch[0].rm_eo; @@ -817,8 +836,8 @@ static saxctxt *check_filter_init (ap_filter_t *f) else if (!f->r->content_type) { errmsg = "No content-type; bailing out of proxy-html filter"; } - else if (strncasecmp(f->r->content_type, "text/html", 9) && - strncasecmp(f->r->content_type, + else if (ap_cstr_casecmpn(f->r->content_type, "text/html", 9) && + ap_cstr_casecmpn(f->r->content_type, "application/xhtml+xml", 21)) { errmsg = "Non-HTML content; not inserting proxy-html filter"; } diff --git a/modules/filters/mod_reflector.c b/modules/filters/mod_reflector.c index 961092d..5979cb8 100644 --- a/modules/filters/mod_reflector.c +++ b/modules/filters/mod_reflector.c @@ -91,11 +91,16 @@ static int reflector_handler(request_rec * r) /* reflect the content length, if present */ if ((content_length = apr_table_get(r->headers_in, "Content-Length"))) { - apr_off_t offset; + apr_off_t clen; - apr_strtoff(&offset, content_length, NULL, 10); - ap_set_content_length(r, offset); + if (!ap_parse_strict_length(&clen, content_length)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10243) + "reflector_handler: invalid content-length '%s'", + content_length); + return HTTP_BAD_REQUEST; + } + ap_set_content_length(r, clen); } /* reflect the content type, if present */ diff --git a/modules/filters/mod_reqtimeout.c b/modules/filters/mod_reqtimeout.c index 538e9b1..0ebd78a 100644 --- a/modules/filters/mod_reqtimeout.c +++ b/modules/filters/mod_reqtimeout.c @@ -29,23 +29,29 @@ module AP_MODULE_DECLARE_DATA reqtimeout_module; #define UNSET -1 -#define MRT_DEFAULT_HEADER_TIMEOUT 20 -#define MRT_DEFAULT_HEADER_MAX_TIMEOUT 40 -#define MRT_DEFAULT_HEADER_MIN_RATE 500 -#define MRT_DEFAULT_BODY_TIMEOUT 20 -#define MRT_DEFAULT_BODY_MAX_TIMEOUT 0 -#define MRT_DEFAULT_BODY_MIN_RATE 500 +#define MRT_DEFAULT_handshake_TIMEOUT 0 /* disabled */ +#define MRT_DEFAULT_handshake_MAX_TIMEOUT 0 +#define MRT_DEFAULT_handshake_MIN_RATE 0 +#define MRT_DEFAULT_header_TIMEOUT 20 +#define MRT_DEFAULT_header_MAX_TIMEOUT 40 +#define MRT_DEFAULT_header_MIN_RATE 500 +#define MRT_DEFAULT_body_TIMEOUT 20 +#define MRT_DEFAULT_body_MAX_TIMEOUT 0 +#define MRT_DEFAULT_body_MIN_RATE 500 typedef struct { - int header_timeout; /* timeout for reading the req hdrs in secs */ - int header_max_timeout; /* max timeout for req hdrs in secs */ - int header_min_rate; /* min rate for reading req hdrs in bytes/s */ - apr_time_t header_rate_factor; - int body_timeout; /* timeout for reading the req body in secs */ - int body_max_timeout; /* max timeout for req body in secs */ - int body_min_rate; /* min rate for reading req body in bytes/s */ - apr_time_t body_rate_factor; + int timeout; /* timeout in secs */ + int max_timeout; /* max timeout in secs */ + int min_rate; /* min rate in bytes/s */ + apr_time_t rate_factor; /* scale factor (#usecs per min_rate) */ +} reqtimeout_stage_t; + +typedef struct +{ + reqtimeout_stage_t handshake; /* Handshaking (TLS) */ + reqtimeout_stage_t header; /* Reading the HTTP header */ + reqtimeout_stage_t body; /* Reading the HTTP body */ } reqtimeout_srv_cfg; /* this struct is used both as conn_config and as filter context */ @@ -53,17 +59,15 @@ typedef struct { apr_time_t timeout_at; apr_time_t max_timeout_at; - int min_rate; - int new_timeout; - int new_max_timeout; + reqtimeout_stage_t cur_stage; int in_keep_alive; char *type; apr_socket_t *socket; - apr_time_t rate_factor; apr_bucket_brigade *tmpbb; } reqtimeout_con_cfg; static const char *const reqtimeout_filter_name = "reqtimeout"; +static int default_handshake_rate_factor; static int default_header_rate_factor; static int default_body_rate_factor; @@ -75,7 +79,7 @@ static void extend_timeout(reqtimeout_con_cfg *ccfg, apr_bucket_brigade *bb) if (apr_brigade_length(bb, 0, &len) != APR_SUCCESS || len <= 0) return; - new_timeout_at = ccfg->timeout_at + len * ccfg->rate_factor; + new_timeout_at = ccfg->timeout_at + len * ccfg->cur_stage.rate_factor; if (ccfg->max_timeout_at > 0 && new_timeout_at > ccfg->max_timeout_at) { ccfg->timeout_at = ccfg->max_timeout_at; } @@ -190,14 +194,14 @@ static apr_status_t reqtimeout_filter(ap_filter_t *f, apr_brigade_cleanup(bb); } - if (ccfg->new_timeout > 0) { + if (ccfg->cur_stage.timeout > 0) { /* set new timeout */ now = apr_time_now(); - ccfg->timeout_at = now + apr_time_from_sec(ccfg->new_timeout); - ccfg->new_timeout = 0; - if (ccfg->new_max_timeout > 0) { - ccfg->max_timeout_at = now + apr_time_from_sec(ccfg->new_max_timeout); - ccfg->new_max_timeout = 0; + ccfg->timeout_at = now + apr_time_from_sec(ccfg->cur_stage.timeout); + ccfg->cur_stage.timeout = 0; + if (ccfg->cur_stage.max_timeout > 0) { + ccfg->max_timeout_at = now + apr_time_from_sec(ccfg->cur_stage.max_timeout); + ccfg->cur_stage.max_timeout = 0; } } else if (ccfg->timeout_at == 0) { @@ -216,7 +220,7 @@ static apr_status_t reqtimeout_filter(ap_filter_t *f, if (block == APR_NONBLOCK_READ || mode == AP_MODE_INIT || mode == AP_MODE_EATCRLF) { rv = ap_get_brigade(f->next, bb, mode, block, readbytes); - if (ccfg->min_rate > 0 && rv == APR_SUCCESS) { + if (ccfg->cur_stage.rate_factor && rv == APR_SUCCESS) { extend_timeout(ccfg, bb); } return rv; @@ -250,7 +254,7 @@ static apr_status_t reqtimeout_filter(ap_filter_t *f, } if (!APR_BRIGADE_EMPTY(bb)) { - if (ccfg->min_rate > 0) { + if (ccfg->cur_stage.rate_factor) { extend_timeout(ccfg, bb); } @@ -309,9 +313,9 @@ static apr_status_t reqtimeout_filter(ap_filter_t *f, rv = ap_get_brigade(f->next, bb, mode, block, readbytes); /* Don't extend the timeout in speculative mode, wait for * the real (relevant) bytes to be asked later, within the - * currently alloted time. + * currently allotted time. */ - if (ccfg->min_rate > 0 && rv == APR_SUCCESS + if (ccfg->cur_stage.rate_factor && rv == APR_SUCCESS && mode != AP_MODE_SPECULATIVE) { extend_timeout(ccfg, bb); } @@ -350,6 +354,19 @@ static apr_status_t reqtimeout_eor(ap_filter_t *f, apr_bucket_brigade *bb) return ap_pass_brigade(f->next, bb); } +#define INIT_STAGE(cfg, ccfg, stage) do { \ + if (cfg->stage.timeout != UNSET) { \ + ccfg->cur_stage.timeout = cfg->stage.timeout; \ + ccfg->cur_stage.max_timeout = cfg->stage.max_timeout; \ + ccfg->cur_stage.rate_factor = cfg->stage.rate_factor; \ + } \ + else { \ + ccfg->cur_stage.timeout = MRT_DEFAULT_##stage##_TIMEOUT; \ + ccfg->cur_stage.max_timeout = MRT_DEFAULT_##stage##_MAX_TIMEOUT; \ + ccfg->cur_stage.rate_factor = default_##stage##_rate_factor; \ + } \ +} while (0) + static int reqtimeout_init(conn_rec *c) { reqtimeout_con_cfg *ccfg; @@ -358,7 +375,11 @@ static int reqtimeout_init(conn_rec *c) cfg = ap_get_module_config(c->base_server->module_config, &reqtimeout_module); AP_DEBUG_ASSERT(cfg != NULL); - if (cfg->header_timeout == 0 && cfg->body_timeout == 0) { + + /* For compatibility, handshake timeout is disabled when UNSET (< 0) */ + if (cfg->handshake.timeout <= 0 + && cfg->header.timeout == 0 + && cfg->body.timeout == 0) { /* disabled for this vhost */ return DECLINED; } @@ -369,6 +390,11 @@ static int reqtimeout_init(conn_rec *c) ap_set_module_config(c->conn_config, &reqtimeout_module, ccfg); ap_add_output_filter(reqtimeout_filter_name, ccfg, NULL, c); ap_add_input_filter(reqtimeout_filter_name, ccfg, NULL, c); + + ccfg->type = "handshake"; + if (cfg->handshake.timeout > 0) { + INIT_STAGE(cfg, ccfg, handshake); + } } /* we are not handling the connection, we just do initialization */ @@ -393,22 +419,11 @@ static void reqtimeout_before_header(request_rec *r, conn_rec *c) /* (Re)set the state for this new request, but ccfg->socket and * ccfg->tmpbb which have the lifetime of the connection. */ + ccfg->type = "header"; ccfg->timeout_at = 0; ccfg->max_timeout_at = 0; ccfg->in_keep_alive = (c->keepalives > 0); - ccfg->type = "header"; - if (cfg->header_timeout != UNSET) { - ccfg->new_timeout = cfg->header_timeout; - ccfg->new_max_timeout = cfg->header_max_timeout; - ccfg->min_rate = cfg->header_min_rate; - ccfg->rate_factor = cfg->header_rate_factor; - } - else { - ccfg->new_timeout = MRT_DEFAULT_HEADER_TIMEOUT; - ccfg->new_max_timeout = MRT_DEFAULT_HEADER_MAX_TIMEOUT; - ccfg->min_rate = MRT_DEFAULT_HEADER_MIN_RATE; - ccfg->rate_factor = default_header_rate_factor; - } + INIT_STAGE(cfg, ccfg, header); } static int reqtimeout_before_body(request_rec *r) @@ -421,64 +436,61 @@ static int reqtimeout_before_body(request_rec *r) /* not configured for this connection */ return OK; } - cfg = ap_get_module_config(r->connection->base_server->module_config, - &reqtimeout_module); + cfg = ap_get_module_config(r->server->module_config, + &reqtimeout_module); AP_DEBUG_ASSERT(cfg != NULL); + ccfg->type = "body"; ccfg->timeout_at = 0; ccfg->max_timeout_at = 0; - ccfg->type = "body"; if (r->method_number == M_CONNECT) { /* disabled for a CONNECT request */ - ccfg->new_timeout = 0; - } - else if (cfg->body_timeout != UNSET) { - ccfg->new_timeout = cfg->body_timeout; - ccfg->new_max_timeout = cfg->body_max_timeout; - ccfg->min_rate = cfg->body_min_rate; - ccfg->rate_factor = cfg->body_rate_factor; + ccfg->cur_stage.timeout = 0; } else { - ccfg->new_timeout = MRT_DEFAULT_BODY_TIMEOUT; - ccfg->new_max_timeout = MRT_DEFAULT_BODY_MAX_TIMEOUT; - ccfg->min_rate = MRT_DEFAULT_BODY_MIN_RATE; - ccfg->rate_factor = default_body_rate_factor; + INIT_STAGE(cfg, ccfg, body); } return OK; } +#define UNSET_STAGE(cfg, stage) do { \ + cfg->stage.timeout = UNSET; \ + cfg->stage.max_timeout = UNSET; \ + cfg->stage.min_rate = UNSET; \ +} while (0) + static void *reqtimeout_create_srv_config(apr_pool_t *p, server_rec *s) { reqtimeout_srv_cfg *cfg = apr_pcalloc(p, sizeof(reqtimeout_srv_cfg)); - cfg->header_timeout = UNSET; - cfg->header_max_timeout = UNSET; - cfg->header_min_rate = UNSET; - cfg->body_timeout = UNSET; - cfg->body_max_timeout = UNSET; - cfg->body_min_rate = UNSET; + UNSET_STAGE(cfg, handshake); + UNSET_STAGE(cfg, header); + UNSET_STAGE(cfg, body); return cfg; } -#define MERGE_INT(cfg, b, a, val) cfg->val = (a->val == UNSET) ? b->val : a->val; +#define MERGE_INT(cfg, base, add, val) \ + cfg->val = (add->val == UNSET) ? base->val : add->val +#define MERGE_STAGE(cfg, base, add, stage) do { \ + MERGE_INT(cfg, base, add, stage.timeout); \ + MERGE_INT(cfg, base, add, stage.max_timeout); \ + MERGE_INT(cfg, base, add, stage.min_rate); \ + cfg->stage.rate_factor = (cfg->stage.min_rate == UNSET) \ + ? base->stage.rate_factor \ + : add->stage.rate_factor; \ +} while (0) + static void *reqtimeout_merge_srv_config(apr_pool_t *p, void *base_, void *add_) { reqtimeout_srv_cfg *base = base_; reqtimeout_srv_cfg *add = add_; reqtimeout_srv_cfg *cfg = apr_pcalloc(p, sizeof(reqtimeout_srv_cfg)); - MERGE_INT(cfg, base, add, header_timeout); - MERGE_INT(cfg, base, add, header_max_timeout); - MERGE_INT(cfg, base, add, header_min_rate); - MERGE_INT(cfg, base, add, body_timeout); - MERGE_INT(cfg, base, add, body_max_timeout); - MERGE_INT(cfg, base, add, body_min_rate); - - cfg->header_rate_factor = (cfg->header_min_rate == UNSET) ? - base->header_rate_factor : add->header_rate_factor; - cfg->body_rate_factor = (cfg->body_min_rate == UNSET) ? - base->body_rate_factor : add->body_rate_factor; + MERGE_STAGE(cfg, base, add, handshake); + MERGE_STAGE(cfg, base, add, header); + MERGE_STAGE(cfg, base, add, body); + return cfg; } @@ -506,66 +518,59 @@ static const char *set_reqtimeout_param(reqtimeout_srv_cfg *conf, { const char *ret = NULL; char *rate_str = NULL, *initial_str, *max_str = NULL; - int rate = 0, initial = 0, max = 0; - enum { PARAM_HEADER, PARAM_BODY } type; + reqtimeout_stage_t *stage; - if (!strcasecmp(key, "header")) { - type = PARAM_HEADER; + if (!strcasecmp(key, "handshake")) { + stage = &conf->handshake; + } + else if (!strcasecmp(key, "header")) { + stage = &conf->header; } else if (!strcasecmp(key, "body")) { - type = PARAM_BODY; + stage = &conf->body; } else { return "Unknown RequestReadTimeout parameter"; } + memset(stage, 0, sizeof(*stage)); + if ((rate_str = ap_strcasestr(val, ",minrate="))) { initial_str = apr_pstrndup(p, val, rate_str - val); rate_str += strlen(",minrate="); - ret = parse_int(p, rate_str, &rate); + ret = parse_int(p, rate_str, &stage->min_rate); if (ret) return ret; - if (rate == 0) + if (stage->min_rate == 0) return "Minimum data rate must be larger than 0"; if ((max_str = strchr(initial_str, '-'))) { *max_str++ = '\0'; - ret = parse_int(p, max_str, &max); + ret = parse_int(p, max_str, &stage->max_timeout); if (ret) return ret; } - ret = parse_int(p, initial_str, &initial); + ret = parse_int(p, initial_str, &stage->timeout); } else { if (ap_strchr_c(val, '-')) return "Must set MinRate option if using timeout range"; - ret = parse_int(p, val, &initial); + ret = parse_int(p, val, &stage->timeout); } - if (ret) return ret; - if (max && initial >= max) { + if (stage->max_timeout && stage->timeout >= stage->max_timeout) { return "Maximum timeout must be larger than initial timeout"; } - if (type == PARAM_HEADER) { - conf->header_timeout = initial; - conf->header_max_timeout = max; - conf->header_min_rate = rate; - if (rate) - conf->header_rate_factor = apr_time_from_sec(1) / rate; - } - else { - conf->body_timeout = initial; - conf->body_max_timeout = max; - conf->body_min_rate = rate; - if (rate) - conf->body_rate_factor = apr_time_from_sec(1) / rate; + if (stage->min_rate) { + stage->rate_factor = apr_time_from_sec(1) / stage->min_rate; } - return ret; + + return NULL; } static const char *set_reqtimeouts(cmd_parms *cmd, void *mconfig, @@ -603,8 +608,7 @@ static void reqtimeout_hooks(apr_pool_t *pool) { /* * mod_ssl is AP_FTYPE_CONNECTION + 5 and mod_reqtimeout needs to - * be called before mod_ssl. Otherwise repeated reads during the ssl - * handshake can prevent the timeout from triggering. + * be called before mod_ssl for the handshake stage to catch SSL traffic. */ ap_register_input_filter(reqtimeout_filter_name, reqtimeout_filter, NULL, AP_FTYPE_CONNECTION + 8); @@ -621,28 +625,37 @@ static void reqtimeout_hooks(apr_pool_t *pool) * mod_reqtimeout needs to be called before ap_process_http_request (which * is run at APR_HOOK_REALLY_LAST) but after all other protocol modules. * This ensures that it only influences normal http connections and not - * e.g. mod_ftp. Also, if mod_reqtimeout used the pre_connection hook, it - * would be inserted on mod_proxy's backend connections. + * e.g. mod_ftp. We still process it first though, for the handshake stage + * to work with/before mod_ssl, but since it's disabled by default it won't + * influence non-HTTP modules unless configured explicitly. Also, if + * mod_reqtimeout used the pre_connection hook, it would be inserted on + * mod_proxy's backend connections, and we don't want this. */ - ap_hook_process_connection(reqtimeout_init, NULL, NULL, APR_HOOK_LAST); + ap_hook_process_connection(reqtimeout_init, NULL, NULL, APR_HOOK_FIRST); ap_hook_pre_read_request(reqtimeout_before_header, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_post_read_request(reqtimeout_before_body, NULL, NULL, APR_HOOK_MIDDLE); -#if MRT_DEFAULT_HEADER_MIN_RATE > 0 - default_header_rate_factor = apr_time_from_sec(1) / MRT_DEFAULT_HEADER_MIN_RATE; +#if MRT_DEFAULT_handshake_MIN_RATE + default_handshake_rate_factor = apr_time_from_sec(1) / + MRT_DEFAULT_handshake_MIN_RATE; +#endif +#if MRT_DEFAULT_header_MIN_RATE + default_header_rate_factor = apr_time_from_sec(1) / + MRT_DEFAULT_header_MIN_RATE; #endif -#if MRT_DEFAULT_BODY_MIN_RATE > 0 - default_body_rate_factor = apr_time_from_sec(1) / MRT_DEFAULT_BODY_MIN_RATE; +#if MRT_DEFAULT_body_MIN_RATE + default_body_rate_factor = apr_time_from_sec(1) / + MRT_DEFAULT_body_MIN_RATE; #endif } static const command_rec reqtimeout_cmds[] = { AP_INIT_RAW_ARGS("RequestReadTimeout", set_reqtimeouts, NULL, RSRC_CONF, - "Set various timeout parameters for reading request " - "headers and body"), + "Set various timeout parameters for TLS handshake and/or " + "reading request headers and body"), {NULL} }; diff --git a/modules/filters/mod_request.c b/modules/filters/mod_request.c index 21db7de..1768edc 100644 --- a/modules/filters/mod_request.c +++ b/modules/filters/mod_request.c @@ -73,10 +73,8 @@ static apr_status_t keep_body_filter(ap_filter_t *f, apr_bucket_brigade *b, apr_bucket *bucket; apr_off_t len = 0; - if (!ctx) { const char *lenp; - char *endstr = NULL; request_dir_conf *dconf = ap_get_module_config(f->r->per_dir_config, &request_module); @@ -93,13 +91,12 @@ static apr_status_t keep_body_filter(ap_filter_t *f, apr_bucket_brigade *b, if (lenp) { /* Protects against over/underflow, non-digit chars in the - * string (excluding leading space) (the endstr checks) - * and a negative number. */ - if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10) - || endstr == lenp || *endstr || ctx->remaining < 0) { - + * string, leading plus/minus signs, trailing characters and + * a negative number. + */ + if (!ap_parse_strict_length(&ctx->remaining, lenp)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01411) - "Invalid Content-Length"); + "Invalid Content-Length '%s'", lenp); ap_remove_input_filter(f); return bail_out_on_error(b, f, HTTP_REQUEST_ENTITY_TOO_LARGE); @@ -121,7 +118,6 @@ static apr_status_t keep_body_filter(ap_filter_t *f, apr_bucket_brigade *b, f->r->kept_body = apr_brigade_create(f->r->pool, f->r->connection->bucket_alloc); ctx->remaining = dconf->keep_body; - } /* get the brigade from upstream, and read it in to get its length */ @@ -264,8 +260,8 @@ static apr_status_t kept_body_filter(ap_filter_t *f, apr_bucket_brigade *b, ctx->remaining -= readbytes; ctx->offset += readbytes; - return APR_SUCCESS; + return APR_SUCCESS; } /** @@ -311,18 +307,18 @@ static void ap_request_insert_filter(request_rec * r) NULL, r, r->connection); } } - } -/** - * Remove the kept_body and keep body filters from this specific request. +/* + * Remove the kept_body and keep_body filters from this specific request. */ -static void ap_request_remove_filter(request_rec * r) +static void ap_request_remove_filter(request_rec *r) { - ap_filter_t * f = r->input_filters; + ap_filter_t *f = r->input_filters; + while (f) { if (f->frec->filter_func.in_func == kept_body_filter || - f->frec->filter_func.in_func == keep_body_filter) { + f->frec->filter_func.in_func == keep_body_filter) { ap_remove_input_filter(f); } f = f->next; diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c index 346c210..12cb04a 100644 --- a/modules/filters/mod_sed.c +++ b/modules/filters/mod_sed.c @@ -51,7 +51,7 @@ typedef struct sed_filter_ctxt apr_bucket_brigade *bbinp; char *outbuf; char *curoutbuf; - int bufsize; + apr_size_t bufsize; apr_pool_t *tpool; int numbuckets; } sed_filter_ctxt; @@ -59,7 +59,7 @@ typedef struct sed_filter_ctxt module AP_MODULE_DECLARE_DATA sed_module; /* This function will be call back from libsed functions if there is any error - * happend during execution of sed scripts + * happened during execution of sed scripts */ static apr_status_t log_sed_errf(void *data, const char *error) { @@ -100,7 +100,7 @@ static void alloc_outbuf(sed_filter_ctxt* ctx) /* append_bucket * Allocate a new bucket from buf and sz and append to ctx->bb */ -static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, int sz) +static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, apr_size_t sz) { apr_status_t status = APR_SUCCESS; apr_bucket *b; @@ -133,7 +133,7 @@ static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, int sz) */ static apr_status_t flush_output_buffer(sed_filter_ctxt *ctx) { - int size = ctx->curoutbuf - ctx->outbuf; + apr_size_t size = ctx->curoutbuf - ctx->outbuf; char *out; apr_status_t status = APR_SUCCESS; if ((ctx->outbuf == NULL) || (size <=0)) @@ -147,12 +147,12 @@ static apr_status_t flush_output_buffer(sed_filter_ctxt *ctx) /* This is a call back function. When libsed wants to generate the output, * this function will be invoked. */ -static apr_status_t sed_write_output(void *dummy, char *buf, int sz) +static apr_status_t sed_write_output(void *dummy, char *buf, apr_size_t sz) { /* dummy is basically filter context. Context is passed during invocation * of sed_eval_buffer */ - int remainbytes = 0; + apr_size_t remainbytes = 0; apr_status_t status = APR_SUCCESS; sed_filter_ctxt *ctx = (sed_filter_ctxt *) dummy; if (ctx->outbuf == NULL) { @@ -168,21 +168,29 @@ static apr_status_t sed_write_output(void *dummy, char *buf, int sz) } /* buffer is now full */ status = append_bucket(ctx, ctx->outbuf, ctx->bufsize); - /* old buffer is now used so allocate new buffer */ - alloc_outbuf(ctx); - /* if size is bigger than the allocated buffer directly add to output - * brigade */ - if ((status == APR_SUCCESS) && (sz >= ctx->bufsize)) { - char* newbuf = apr_pmemdup(ctx->tpool, buf, sz); - status = append_bucket(ctx, newbuf, sz); - /* pool might get clear after append_bucket */ - if (ctx->outbuf == NULL) { + if (status == APR_SUCCESS) { + /* if size is bigger than the allocated buffer directly add to output + * brigade */ + if (sz >= ctx->bufsize) { + char* newbuf = apr_pmemdup(ctx->tpool, buf, sz); + status = append_bucket(ctx, newbuf, sz); + if (status == APR_SUCCESS) { + /* old buffer is now used so allocate new buffer */ + alloc_outbuf(ctx); + } + else { + clear_ctxpool(ctx); + } + } + else { + /* old buffer is now used so allocate new buffer */ alloc_outbuf(ctx); + memcpy(ctx->curoutbuf, buf, sz); + ctx->curoutbuf += sz; } } else { - memcpy(ctx->curoutbuf, buf, sz); - ctx->curoutbuf += sz; + clear_ctxpool(ctx); } } else { @@ -254,6 +262,7 @@ static apr_status_t init_context(ap_filter_t *f, sed_expr_config *sed_cfg, int u ctx->bufsize = MODSED_OUTBUF_SIZE; if (usetpool) { apr_pool_create(&(ctx->tpool), r->pool); + apr_pool_tag(ctx->tpool, "sed_tpool"); } else { ctx->tpool = r->pool; @@ -268,7 +277,7 @@ static apr_status_t sed_response_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *b; - apr_status_t status; + apr_status_t status = APR_SUCCESS; sed_config *cfg = ap_get_module_config(f->r->per_dir_config, &sed_module); sed_filter_ctxt *ctx = f->ctx; @@ -293,9 +302,9 @@ static apr_status_t sed_response_filter(ap_filter_t *f, return status; ctx = f->ctx; apr_table_unset(f->r->headers_out, "Content-Length"); - } - ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); + ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); + } /* Here is the main logic. Iterate through all the buckets, read the * content of the bucket, call sed_eval_buffer on the data. @@ -317,63 +326,52 @@ static apr_status_t sed_response_filter(ap_filter_t *f, * in sed's internal buffer which can't be flushed until new line * character is arrived. */ - for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb);) { - const char *buf = NULL; - apr_size_t bytes = 0; + while (!APR_BRIGADE_EMPTY(bb)) { + b = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_EOS(b)) { - apr_bucket *b1 = APR_BUCKET_NEXT(b); /* Now clean up the internal sed buffer */ sed_finalize_eval(&ctx->eval, ctx); status = flush_output_buffer(ctx); if (status != APR_SUCCESS) { - clear_ctxpool(ctx); - return status; + break; } + /* Move the eos bucket to ctx->bb brigade */ APR_BUCKET_REMOVE(b); - /* Insert the eos bucket to ctx->bb brigade */ APR_BRIGADE_INSERT_TAIL(ctx->bb, b); - b = b1; } else if (APR_BUCKET_IS_FLUSH(b)) { - apr_bucket *b1 = APR_BUCKET_NEXT(b); - APR_BUCKET_REMOVE(b); status = flush_output_buffer(ctx); if (status != APR_SUCCESS) { - clear_ctxpool(ctx); - return status; + break; } + /* Move the flush bucket to ctx->bb brigade */ + APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bb, b); - b = b1; } - else if (APR_BUCKET_IS_METADATA(b)) { - b = APR_BUCKET_NEXT(b); - } - else if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) - == APR_SUCCESS) { - apr_bucket *b1 = APR_BUCKET_NEXT(b); - status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx); - if (status != APR_SUCCESS) { - clear_ctxpool(ctx); - return status; + else { + if (!APR_BUCKET_IS_METADATA(b)) { + const char *buf = NULL; + apr_size_t bytes = 0; + + status = apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ); + if (status == APR_SUCCESS) { + status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx); + } + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10394) "error evaluating sed on output"); + break; + } } - APR_BUCKET_REMOVE(b); apr_bucket_delete(b); - b = b1; - } - else { - apr_bucket *b1 = APR_BUCKET_NEXT(b); - APR_BUCKET_REMOVE(b); - b = b1; } } - apr_brigade_cleanup(bb); - status = flush_output_buffer(ctx); - if (status != APR_SUCCESS) { - clear_ctxpool(ctx); - return status; + if (status == APR_SUCCESS) { + status = flush_output_buffer(ctx); } if (!APR_BRIGADE_EMPTY(ctx->bb)) { - status = ap_pass_brigade(f->next, ctx->bb); + if (status == APR_SUCCESS) { + status = ap_pass_brigade(f->next, ctx->bb); + } apr_brigade_cleanup(ctx->bb); } clear_ctxpool(ctx); @@ -424,7 +422,7 @@ static apr_status_t sed_request_filter(ap_filter_t *f, * the buckets in bbinp and read the data from buckets and invoke * sed_eval_buffer on the data. libsed will generate its output using * sed_write_output which will add data in ctx->bb. Do it until it have - * atleast one bucket in ctx->bb. At the end of data eos bucket + * at least one bucket in ctx->bb. At the end of data eos bucket * should be there. * * Once eos bucket is seen, then invoke sed_finalize_eval to clear the @@ -466,8 +464,10 @@ static apr_status_t sed_request_filter(ap_filter_t *f, if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS) { status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx); - if (status != APR_SUCCESS) + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10395) "error evaluating sed on input"); return status; + } flush_output_buffer(ctx); } } diff --git a/modules/filters/mod_substitute.c b/modules/filters/mod_substitute.c index b7d5296..d454bf3 100644 --- a/modules/filters/mod_substitute.c +++ b/modules/filters/mod_substitute.c @@ -306,7 +306,7 @@ static apr_status_t do_pattmatch(ap_filter_t *f, apr_bucket *inb, } else { apr_size_t repl_len; - /* acount for string before the match */ + /* account for string before the match */ if (space_left <= regm[0].rm_so) return APR_ENOMEM; space_left -= regm[0].rm_so; @@ -402,6 +402,7 @@ static apr_status_t substitute_filter(ap_filter_t *f, apr_bucket_brigade *bb) ctx->passbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); /* Create our temporary pool only once */ apr_pool_create(&(ctx->tpool), f->r->pool); + apr_pool_tag(ctx->tpool, "substitute_tpool"); apr_table_unset(f->r->headers_out, "Content-Length"); } @@ -667,8 +668,10 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line) /* first see if we can compile the regex */ if (!is_pattern) { - r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED | - (ignore_case ? AP_REG_ICASE : 0)); + int flags = AP_REG_NO_DEFAULT + | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY) + | (ignore_case ? AP_REG_ICASE : 0); + r = ap_pregcomp(cmd->pool, from, flags); if (!r) return "Substitute could not compile regex"; } diff --git a/modules/filters/mod_xml2enc.c b/modules/filters/mod_xml2enc.c index 05a4e9a..eb05c18 100644 --- a/modules/filters/mod_xml2enc.c +++ b/modules/filters/mod_xml2enc.c @@ -23,9 +23,28 @@ #include +/* libxml2 includes unicode/[...].h files which uses C++ comments */ +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic warning "-Wcomment" +#elif defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic warning "-Wcomment" +#endif +#endif + /* libxml2 */ #include +#if defined(__clang__) +#pragma clang diagnostic pop +#elif defined(__GNUC__) +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif +#endif + #include "http_protocol.h" #include "http_config.h" #include "http_log.h" @@ -51,7 +70,7 @@ module AP_MODULE_DECLARE_DATA xml2enc_module; (((enc)!=XML_CHAR_ENCODING_NONE)&&((enc)!=XML_CHAR_ENCODING_ERROR)) /* - * XXX: Check all those ap_assert()s ans replace those that should not happen + * XXX: Check all those ap_assert()s and replace those that should not happen * XXX: with AP_DEBUG_ASSERT and those that may happen with proper error * XXX: handling. */ @@ -187,11 +206,11 @@ static void sniff_encoding(request_rec* r, xml2ctx* ctx) } } } - + /* to sniff, first we look for BOM */ if (ctx->xml2enc == XML_CHAR_ENCODING_NONE) { - ctx->xml2enc = xmlDetectCharEncoding((const xmlChar*)ctx->buf, - ctx->bytes); + ctx->xml2enc = xmlDetectCharEncoding((const unsigned char*)ctx->buf, + ctx->bytes); if (HAVE_ENCODING(ctx->xml2enc)) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01432) "Got charset from XML rules.") ; @@ -304,7 +323,7 @@ static apr_status_t xml2enc_ffunc(ap_filter_t* f, apr_bucket_brigade* bb) apr_bucket* bstart; apr_size_t insz = 0; int pending_meta = 0; - char *ctype; + char *mtype; char *p; if (!ctx || !f->r->content_type) { @@ -313,13 +332,17 @@ static apr_status_t xml2enc_ffunc(ap_filter_t* f, apr_bucket_brigade* bb) return ap_pass_brigade(f->next, bb) ; } - ctype = apr_pstrdup(f->r->pool, f->r->content_type); - for (p = ctype; *p; ++p) - if (isupper(*p)) - *p = tolower(*p); - - /* only act if starts-with "text/" or contains "xml" */ - if (strncmp(ctype, "text/", 5) && !strstr(ctype, "xml")) { + /* Extract the media type, ignoring parameters in content-type. */ + mtype = apr_pstrdup(f->r->pool, f->r->content_type); + if ((p = ap_strchr(mtype, ';')) != NULL) *p = '\0'; + ap_str_tolower(mtype); + + /* Accept text/ types, plus any XML media type per RFC 7303. */ + if (!(strncmp(mtype, "text/", 5) == 0 + || strcmp(mtype, "application/xml") == 0 + || (strlen(mtype) > 7 /* minimum 'a/b+xml' length */ + && (p = strstr(mtype, "+xml")) != NULL + && strlen(p) == 4 /* ensures +xml is a suffix */))) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb) ; } diff --git a/modules/filters/regexp.h b/modules/filters/regexp.h index 6af8912..dc4993a 100644 --- a/modules/filters/regexp.h +++ b/modules/filters/regexp.h @@ -93,8 +93,8 @@ extern void command_errf(sed_commands_t *commands, const char *fmt, ...) #define SEDERR_COMES "cannot open %s" #define SEDERR_CCMES "cannot create %s" #define SEDERR_TMLNMES "too many line numbers" -#define SEDERR_TMAMES "too many appends after line %lld" -#define SEDERR_TMRMES "too many reads after line %lld" +#define SEDERR_TMAMES "too many appends after line %" APR_INT64_T_FMT +#define SEDERR_TMRMES "too many reads after line %" APR_INT64_T_FMT #define SEDERR_DOORNG "``\\digit'' out of range: %s" #define SEDERR_EDMOSUB "ending delimiter missing on substitution: %s" #define SEDERR_EDMOSTR "ending delimiter missing on string: %s" diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c index be03506..047f49b 100644 --- a/modules/filters/sed1.c +++ b/modules/filters/sed1.c @@ -71,7 +71,7 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n, static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2); static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc, step_vars_storage *step_vars); -static apr_status_t wline(sed_eval_t *eval, char *buf, int sz); +static apr_status_t wline(sed_eval_t *eval, char *buf, apr_size_t sz); static apr_status_t arout(sed_eval_t *eval); static void eval_errf(sed_eval_t *eval, const char *fmt, ...) @@ -87,18 +87,20 @@ static void eval_errf(sed_eval_t *eval, const char *fmt, ...) } #define INIT_BUF_SIZE 1024 +#define MAX_BUF_SIZE 1024*8192 /* * grow_buffer */ -static void grow_buffer(apr_pool_t *pool, char **buffer, - char **spend, unsigned int *cursize, - unsigned int newsize) +static apr_status_t grow_buffer(apr_pool_t *pool, char **buffer, + char **spend, apr_size_t *cursize, + apr_size_t newsize) { char* newbuffer = NULL; - int spendsize = 0; - if (*cursize >= newsize) - return; + apr_size_t spendsize = 0; + if (*cursize >= newsize) { + return APR_SUCCESS; + } /* Avoid number of times realloc is called. It could cause huge memory * requirement if line size is huge e.g 2 MB */ if (newsize < *cursize * 2) { @@ -107,6 +109,9 @@ static void grow_buffer(apr_pool_t *pool, char **buffer, /* Align it to 4 KB boundary */ newsize = (newsize + ((1 << 12) - 1)) & ~((1 << 12) - 1); + if (newsize > MAX_BUF_SIZE) { + return APR_ENOMEM; + } newbuffer = apr_pcalloc(pool, newsize); if (*spend && *buffer && (*cursize > 0)) { spendsize = *spend - *buffer; @@ -119,123 +124,168 @@ static void grow_buffer(apr_pool_t *pool, char **buffer, if (spend != buffer) { *spend = *buffer + spendsize; } + return APR_SUCCESS; } /* * grow_line_buffer */ -static void grow_line_buffer(sed_eval_t *eval, int newsize) +static apr_status_t grow_line_buffer(sed_eval_t *eval, apr_size_t newsize) { - grow_buffer(eval->pool, &eval->linebuf, &eval->lspend, + return grow_buffer(eval->pool, &eval->linebuf, &eval->lspend, &eval->lsize, newsize); } /* * grow_hold_buffer */ -static void grow_hold_buffer(sed_eval_t *eval, int newsize) +static apr_status_t grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize) { - grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend, + return grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend, &eval->hsize, newsize); } /* * grow_gen_buffer */ -static void grow_gen_buffer(sed_eval_t *eval, int newsize, +static apr_status_t grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize, char **gspend) { + apr_status_t rc = 0; if (gspend == NULL) { gspend = &eval->genbuf; } - grow_buffer(eval->pool, &eval->genbuf, gspend, - &eval->gsize, newsize); - eval->lcomend = &eval->genbuf[71]; + rc = grow_buffer(eval->pool, &eval->genbuf, gspend, + &eval->gsize, newsize); + if (rc == APR_SUCCESS) { + eval->lcomend = &eval->genbuf[71]; + } + return rc; } /* * appendmem_to_linebuf */ -static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, int len) +static apr_status_t appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len) { - unsigned int reqsize = (eval->lspend - eval->linebuf) + len; + apr_status_t rc = 0; + apr_size_t reqsize = (eval->lspend - eval->linebuf) + len; if (eval->lsize < reqsize) { - grow_line_buffer(eval, reqsize); + rc = grow_line_buffer(eval, reqsize); + if (rc != APR_SUCCESS) { + return rc; + } } memcpy(eval->lspend, sz, len); eval->lspend += len; + return APR_SUCCESS; } /* * append_to_linebuf */ -static void append_to_linebuf(sed_eval_t *eval, const char* sz) +static apr_status_t append_to_linebuf(sed_eval_t *eval, const char* sz, + step_vars_storage *step_vars) { - int len = strlen(sz); + apr_size_t len = strlen(sz); + char *old_linebuf = eval->linebuf; + apr_status_t rc = 0; /* Copy string including null character */ - appendmem_to_linebuf(eval, sz, len + 1); + rc = appendmem_to_linebuf(eval, sz, len + 1); + if (rc != APR_SUCCESS) { + return rc; + } --eval->lspend; /* lspend will now point to NULL character */ + /* Sync step_vars after a possible linebuf expansion */ + if (step_vars && old_linebuf != eval->linebuf) { + if (step_vars->loc1) { + step_vars->loc1 = step_vars->loc1 - old_linebuf + eval->linebuf; + } + if (step_vars->loc2) { + step_vars->loc2 = step_vars->loc2 - old_linebuf + eval->linebuf; + } + if (step_vars->locs) { + step_vars->locs = step_vars->locs - old_linebuf + eval->linebuf; + } + } + return APR_SUCCESS; } /* * copy_to_linebuf */ -static void copy_to_linebuf(sed_eval_t *eval, const char* sz) +static apr_status_t copy_to_linebuf(sed_eval_t *eval, const char* sz, + step_vars_storage *step_vars) { eval->lspend = eval->linebuf; - append_to_linebuf(eval, sz); + return append_to_linebuf(eval, sz, step_vars); } /* * append_to_holdbuf */ -static void append_to_holdbuf(sed_eval_t *eval, const char* sz) +static apr_status_t append_to_holdbuf(sed_eval_t *eval, const char* sz) { - int len = strlen(sz); - unsigned int reqsize = (eval->hspend - eval->holdbuf) + len + 1; + apr_size_t len = strlen(sz); + apr_size_t reqsize = (eval->hspend - eval->holdbuf) + len + 1; + apr_status_t rc = 0; if (eval->hsize <= reqsize) { - grow_hold_buffer(eval, reqsize); + rc = grow_hold_buffer(eval, reqsize); + if (rc != APR_SUCCESS) { + return rc; + } } memcpy(eval->hspend, sz, len + 1); /* hspend will now point to NULL character */ eval->hspend += len; + return APR_SUCCESS; } /* * copy_to_holdbuf */ -static void copy_to_holdbuf(sed_eval_t *eval, const char* sz) +static apr_status_t copy_to_holdbuf(sed_eval_t *eval, const char* sz) { eval->hspend = eval->holdbuf; - append_to_holdbuf(eval, sz); + return append_to_holdbuf(eval, sz); } /* * append_to_genbuf */ -static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend) +static apr_status_t append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend) { - int len = strlen(sz); - unsigned int reqsize = (*gspend - eval->genbuf) + len + 1; + apr_size_t len = strlen(sz); + apr_size_t reqsize = (*gspend - eval->genbuf) + len + 1; + apr_status_t rc = 0; if (eval->gsize < reqsize) { - grow_gen_buffer(eval, reqsize, gspend); + rc = grow_gen_buffer(eval, reqsize, gspend); + if (rc != APR_SUCCESS) { + return rc; + } } memcpy(*gspend, sz, len + 1); /* *gspend will now point to NULL character */ *gspend += len; + return APR_SUCCESS; } /* * copy_to_genbuf */ -static void copy_to_genbuf(sed_eval_t *eval, const char* sz) +static apr_status_t copy_to_genbuf(sed_eval_t *eval, const char* sz) { - int len = strlen(sz); - unsigned int reqsize = len + 1; + apr_size_t len = strlen(sz); + apr_size_t reqsize = len + 1; + apr_status_t rc = APR_SUCCESS;; if (eval->gsize < reqsize) { - grow_gen_buffer(eval, reqsize, NULL); + rc = grow_gen_buffer(eval, reqsize, NULL); + if (rc != APR_SUCCESS) { + return rc; + } } memcpy(eval->genbuf, sz, len + 1); + return rc; } /* @@ -353,7 +403,7 @@ apr_status_t sed_eval_file(sed_eval_t *eval, apr_file_t *fin, void *fout) /* * sed_eval_buffer */ -apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void *fout) +apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz, void *fout) { apr_status_t rv; @@ -382,8 +432,9 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void } while (bufsz) { + apr_status_t rc = 0; char *n; - int llen; + apr_size_t llen; n = memchr(buf, '\n', bufsz); if (n == NULL) @@ -396,7 +447,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void break; } - appendmem_to_linebuf(eval, buf, llen + 1); + rc = appendmem_to_linebuf(eval, buf, llen + 1); + if (rc != APR_SUCCESS) { + return rc; + } --eval->lspend; /* replace new line character with NULL */ *eval->lspend = '\0'; @@ -411,7 +465,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void /* Save the leftovers for later */ if (bufsz) { - appendmem_to_linebuf(eval, buf, bufsz); + apr_status_t rc = appendmem_to_linebuf(eval, buf, bufsz); + if (rc != APR_SUCCESS) { + return rc; + } } return APR_SUCCESS; @@ -433,6 +490,7 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout) /* Process leftovers */ if (eval->lspend > eval->linebuf) { apr_status_t rv; + apr_status_t rc = 0; if (eval->lreadyflag) { eval->lreadyflag = 0; @@ -442,7 +500,10 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout) * buffer is not a newline. */ /* Assure space for NULL */ - append_to_linebuf(eval, ""); + rc = append_to_linebuf(eval, "", NULL); + if (rc != APR_SUCCESS) { + return rc; + } } *eval->lspend = '\0'; @@ -640,11 +701,15 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n, sp = eval->genbuf; rp = rhsbuf; sp = place(eval, sp, lp, step_vars->loc1); + if (sp == NULL) { + return APR_EGENERAL; + } while ((c = *rp++) != 0) { if (c == '&') { sp = place(eval, sp, step_vars->loc1, step_vars->loc2); - if (sp == NULL) + if (sp == NULL) { return APR_EGENERAL; + } } else if (c == '\\') { c = *rp++; @@ -660,13 +725,19 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n, *sp++ = c; if (sp >= eval->genbuf + eval->gsize) { /* expand genbuf and set the sp appropriately */ - grow_gen_buffer(eval, eval->gsize + 1024, &sp); + rv = grow_gen_buffer(eval, eval->gsize + 1024, &sp); + if (rv != APR_SUCCESS) { + return rv; + } } } lp = step_vars->loc2; step_vars->loc2 = sp - eval->genbuf + eval->linebuf; - append_to_genbuf(eval, lp, &sp); - copy_to_linebuf(eval, eval->genbuf); + rv = append_to_genbuf(eval, lp, &sp); + if (rv != APR_SUCCESS) { + return rv; + } + rv = copy_to_linebuf(eval, eval->genbuf, step_vars); return rv; } @@ -676,11 +747,14 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n, static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2) { char *sp = asp; - int n = al2 - al1; - unsigned int reqsize = (sp - eval->genbuf) + n + 1; + apr_size_t n = al2 - al1; + apr_size_t reqsize = (sp - eval->genbuf) + n + 1; if (eval->gsize < reqsize) { - grow_gen_buffer(eval, reqsize, &sp); + apr_status_t rc = grow_gen_buffer(eval, reqsize, &sp); + if (rc != APR_SUCCESS) { + return NULL; + } } memcpy(sp, al1, n); return sp + n; @@ -735,7 +809,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc, } p1++; - copy_to_linebuf(eval, p1); + rv = copy_to_linebuf(eval, p1, step_vars); + if (rv != APR_SUCCESS) return rv; eval->jflag++; break; @@ -745,21 +820,27 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc, break; case GCOM: - copy_to_linebuf(eval, eval->holdbuf); + rv = copy_to_linebuf(eval, eval->holdbuf, step_vars); + if (rv != APR_SUCCESS) return rv; break; case CGCOM: - append_to_linebuf(eval, "\n"); - append_to_linebuf(eval, eval->holdbuf); + rv = append_to_linebuf(eval, "\n", step_vars); + if (rv != APR_SUCCESS) return rv; + rv = append_to_linebuf(eval, eval->holdbuf, step_vars); + if (rv != APR_SUCCESS) return rv; break; case HCOM: - copy_to_holdbuf(eval, eval->linebuf); + rv = copy_to_holdbuf(eval, eval->linebuf); + if (rv != APR_SUCCESS) return rv; break; case CHCOM: - append_to_holdbuf(eval, "\n"); - append_to_holdbuf(eval, eval->linebuf); + rv = append_to_holdbuf(eval, "\n"); + if (rv != APR_SUCCESS) return rv; + rv = append_to_holdbuf(eval, eval->linebuf); + if (rv != APR_SUCCESS) return rv; break; case ICOM: @@ -881,7 +962,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc, if (rv != APR_SUCCESS) return rv; } - append_to_linebuf(eval, "\n"); + rv = append_to_linebuf(eval, "\n", step_vars); + if (rv != APR_SUCCESS) return rv; eval->pending = ipc->next; break; @@ -955,9 +1037,12 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc, break; case XCOM: - copy_to_genbuf(eval, eval->linebuf); - copy_to_linebuf(eval, eval->holdbuf); - copy_to_holdbuf(eval, eval->genbuf); + rv = copy_to_genbuf(eval, eval->linebuf); + if (rv != APR_SUCCESS) return rv; + rv = copy_to_linebuf(eval, eval->holdbuf, step_vars); + if (rv != APR_SUCCESS) return rv; + rv = copy_to_holdbuf(eval, eval->genbuf); + if (rv != APR_SUCCESS) return rv; break; case YCOM: @@ -1013,7 +1098,7 @@ static apr_status_t arout(sed_eval_t *eval) /* * wline */ -static apr_status_t wline(sed_eval_t *eval, char *buf, int sz) +static apr_status_t wline(sed_eval_t *eval, char *buf, apr_size_t sz) { apr_status_t rv = APR_SUCCESS; rv = eval->writefn(eval->fout, buf, sz); diff --git a/modules/generators/mod_autoindex.c b/modules/generators/mod_autoindex.c index 9094e30..cb44603 100644 --- a/modules/generators/mod_autoindex.c +++ b/modules/generators/mod_autoindex.c @@ -1070,7 +1070,7 @@ static void emit_head(request_rec *r, char *header_fname, int suppress_amble, emit_H1 = 1; } } - else if (!strncasecmp("text/", rr->content_type, 5)) { + else if (!ap_cstr_casecmpn("text/", rr->content_type, 5)) { /* * If we can open the file, prefix it with the preamble * regardless; since we'll be sending a
 block around
@@ -1165,7 +1165,7 @@ static void emit_tail(request_rec *r, char *readme_fname, int suppress_amble)
                     suppress_post = suppress_amble;
                 }
             }
-            else if (!strncasecmp("text/", rr->content_type, 5)) {
+            else if (!ap_cstr_casecmpn("text/", rr->content_type, 5)) {
                 /*
                  * If we can open the file, suppress the signature.
                  */
@@ -1266,8 +1266,9 @@ static struct ent *make_parent_entry(apr_int32_t autoindex_opts,
     if (!(p->name = ap_make_full_path(r->pool, r->uri, "../"))) {
         return (NULL);
     }
-    ap_getparents(p->name);
-    if (!*p->name) {
+    if (!ap_normalize_path(p->name, AP_NORMALIZE_ALLOW_RELATIVE |
+                                    AP_NORMALIZE_NOT_ABOVE_ROOT)
+            || p->name[0] == '\0') {
         return (NULL);
     }
 
@@ -1517,6 +1518,7 @@ static void output_directories(struct ent **ar, int n,
     char *breakrow = "";
 
     apr_pool_create(&scratch, r->pool);
+    apr_pool_tag(scratch, "autoindex_scratch");
 
     name_width = d->name_width;
     desc_width = d->desc_width;
diff --git a/modules/generators/mod_cgi.c b/modules/generators/mod_cgi.c
index 8c4a2c6..1f77786 100644
--- a/modules/generators/mod_cgi.c
+++ b/modules/generators/mod_cgi.c
@@ -92,6 +92,10 @@ typedef struct {
     apr_size_t  bufbytes;
 } cgi_server_conf;
 
+typedef struct {
+    apr_interval_time_t timeout;
+} cgi_dirconf;
+
 static void *create_cgi_config(apr_pool_t *p, server_rec *s)
 {
     cgi_server_conf *c =
@@ -112,6 +116,12 @@ static void *merge_cgi_config(apr_pool_t *p, void *basev, void *overridesv)
     return overrides->logname ? overrides : base;
 }
 
+static void *create_cgi_dirconf(apr_pool_t *p, char *dummy)
+{
+    cgi_dirconf *c = (cgi_dirconf *) apr_pcalloc(p, sizeof(cgi_dirconf));
+    return c;
+}
+
 static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg)
 {
     server_rec *s = cmd->server;
@@ -150,6 +160,17 @@ static const char *set_scriptlog_buffer(cmd_parms *cmd, void *dummy,
     return NULL;
 }
 
+static const char *set_script_timeout(cmd_parms *cmd, void *dummy, const char *arg)
+{
+    cgi_dirconf *dc = dummy;
+
+    if (ap_timeout_parameter_parse(arg, &dc->timeout, "s") != APR_SUCCESS) {
+        return "CGIScriptTimeout has wrong format";
+    }
+
+    return NULL;
+}
+
 static const command_rec cgi_cmds[] =
 {
 AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF,
@@ -158,6 +179,9 @@ AP_INIT_TAKE1("ScriptLogLength", set_scriptlog_length, NULL, RSRC_CONF,
      "the maximum length (in bytes) of the script debug log"),
 AP_INIT_TAKE1("ScriptLogBuffer", set_scriptlog_buffer, NULL, RSRC_CONF,
      "the maximum size (in bytes) to record of a POST request"),
+AP_INIT_TAKE1("CGIScriptTimeout", set_script_timeout, NULL, RSRC_CONF | ACCESS_CONF,
+     "The amount of time to wait between successful reads from "
+     "the CGI script, in seconds."),
     {NULL}
 };
 
@@ -466,23 +490,26 @@ static apr_status_t run_cgi_child(apr_file_t **script_out,
                           apr_filepath_name_get(r->filename));
         }
         else {
+            cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module);
+            apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout;
+
             apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT);
 
             *script_in = procnew->out;
             if (!*script_in)
                 return APR_EBADF;
-            apr_file_pipe_timeout_set(*script_in, r->server->timeout);
+            apr_file_pipe_timeout_set(*script_in, timeout);
 
             if (e_info->prog_type == RUN_AS_CGI) {
                 *script_out = procnew->in;
                 if (!*script_out)
                     return APR_EBADF;
-                apr_file_pipe_timeout_set(*script_out, r->server->timeout);
+                apr_file_pipe_timeout_set(*script_out, timeout);
 
                 *script_err = procnew->err;
                 if (!*script_err)
                     return APR_EBADF;
-                apr_file_pipe_timeout_set(*script_err, r->server->timeout);
+                apr_file_pipe_timeout_set(*script_err, timeout);
             }
         }
     }
@@ -541,19 +568,15 @@ static void discard_script_output(apr_bucket_brigade *bb)
     apr_bucket *e;
     const char *buf;
     apr_size_t len;
-    apr_status_t rv;
 
     for (e = APR_BRIGADE_FIRST(bb);
-         e != APR_BRIGADE_SENTINEL(bb);
-         e = APR_BUCKET_NEXT(e))
+         e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e);
+         e = APR_BRIGADE_FIRST(bb))
     {
-        if (APR_BUCKET_IS_EOS(e)) {
-            break;
-        }
-        rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
-        if (rv != APR_SUCCESS) {
+        if (apr_bucket_read(e, &buf, &len, APR_BLOCK_READ)) {
             break;
         }
+        apr_bucket_delete(e);
     }
 }
 
@@ -679,11 +702,14 @@ static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str,
                                     apr_size_t *len, apr_read_type_e block)
 {
     struct cgi_bucket_data *data = b->data;
-    apr_interval_time_t timeout;
+    apr_interval_time_t timeout = 0;
     apr_status_t rv;
     int gotdata = 0;
+    cgi_dirconf *dc = ap_get_module_config(data->r->per_dir_config, &cgi_module);
 
-    timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout;
+    if (block != APR_NONBLOCK_READ) {
+        timeout = dc->timeout > 0 ? dc->timeout : data->r->server->timeout;
+    }
 
     do {
         const apr_pollfd_t *results;
@@ -761,6 +787,8 @@ static int cgi_handler(request_rec *r)
     apr_status_t rv;
     cgi_exec_info_t e_info;
     conn_rec *c;
+    cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module);
+    apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout;
 
     if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) {
         return DECLINED;
@@ -939,9 +967,18 @@ static int cgi_handler(request_rec *r)
         char sbuf[MAX_STRING_LEN];
         int ret;
 
-        if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
-                                                        APLOG_MODULE_INDEX)))
-        {
+        ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
+                                                   APLOG_MODULE_INDEX);
+
+        /* xCGI has its own body framing mechanism which we don't
+         * match against any provided Content-Length, so let the
+         * core determine C-L vs T-E based on what's actually sent.
+         */
+        if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+            apr_table_unset(r->headers_out, "Content-Length");
+        apr_table_unset(r->headers_out, "Transfer-Encoding");
+
+        if (ret != OK) {
             ret = log_script(r, conf, ret, dbuf, sbuf, bb, script_err);
 
             /*
@@ -980,7 +1017,7 @@ static int cgi_handler(request_rec *r)
              * stderr output, as normal. */
             discard_script_output(bb);
             apr_brigade_destroy(bb);
-            apr_file_pipe_timeout_set(script_err, r->server->timeout);
+            apr_file_pipe_timeout_set(script_err, timeout);
             log_script_err(r, script_err);
         }
 
@@ -1031,7 +1068,7 @@ static int cgi_handler(request_rec *r)
      * connection drops or we stopped sending output for some other
      * reason */
     if (rv == APR_SUCCESS && !r->connection->aborted) {
-        apr_file_pipe_timeout_set(script_err, r->server->timeout);
+        apr_file_pipe_timeout_set(script_err, timeout);
         log_script_err(r, script_err);
     }
 
@@ -1272,7 +1309,7 @@ static void register_hooks(apr_pool_t *p)
 AP_DECLARE_MODULE(cgi) =
 {
     STANDARD20_MODULE_STUFF,
-    NULL,                        /* dir config creater */
+    create_cgi_dirconf,          /* dir config creater */
     NULL,                        /* dir merger --- default is to override */
     create_cgi_config,           /* server config */
     merge_cgi_config,            /* merge server config */
diff --git a/modules/generators/mod_cgid.c b/modules/generators/mod_cgid.c
index b827ed6..4bab59f 100644
--- a/modules/generators/mod_cgid.c
+++ b/modules/generators/mod_cgid.c
@@ -608,6 +608,7 @@ static int cgid_server(void *data)
     apr_status_t rv;
 
     apr_pool_create(&ptrans, pcgi);
+    apr_pool_tag(ptrans, "cgid_ptrans");
 
     apr_signal(SIGCHLD, SIG_IGN);
     apr_signal(SIGHUP, daemon_signal_handler);
@@ -626,6 +627,9 @@ static int cgid_server(void *data)
         return errno;
     }
 
+    apr_pool_cleanup_register(pcgi, (void *)((long)sd),
+                              close_unix_socket, close_unix_socket);
+
     omask = umask(0077); /* so that only Apache can use socket */
     rc = bind(sd, (struct sockaddr *)server_addr, server_addr_len);
     umask(omask); /* can't fail, so can't clobber errno */
@@ -660,9 +664,6 @@ static int cgid_server(void *data)
         }
     }
 
-    apr_pool_cleanup_register(pcgi, (void *)((long)sd),
-                              close_unix_socket, close_unix_socket);
-
     /* if running as root, switch to configured user/group */
     if ((rc = ap_run_drop_privileges(pcgi, ap_server_conf)) != 0) {
         return rc;
@@ -879,6 +880,7 @@ static int cgid_start(apr_pool_t *p, server_rec *main_server,
     else if (daemon_pid == 0) {
         if (pcgi == NULL) {
             apr_pool_create(&pcgi, p);
+            apr_pool_tag(pcgi, "cgid_pcgi");
         }
         exit(cgid_server(main_server) > 0 ? DAEMON_STARTUP_ERROR : -1);
     }
@@ -1275,19 +1277,15 @@ static void discard_script_output(apr_bucket_brigade *bb)
     apr_bucket *e;
     const char *buf;
     apr_size_t len;
-    apr_status_t rv;
 
     for (e = APR_BRIGADE_FIRST(bb);
-         e != APR_BRIGADE_SENTINEL(bb);
-         e = APR_BUCKET_NEXT(e))
+         e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e);
+         e = APR_BRIGADE_FIRST(bb))
     {
-        if (APR_BUCKET_IS_EOS(e)) {
-            break;
-        }
-        rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
-        if (rv != APR_SUCCESS) {
+        if (apr_bucket_read(e, &buf, &len, APR_BLOCK_READ)) {
             break;
         }
+        apr_bucket_delete(e);
     }
 }
 
@@ -1618,9 +1616,18 @@ static int cgid_handler(request_rec *r)
         b = apr_bucket_eos_create(c->bucket_alloc);
         APR_BRIGADE_INSERT_TAIL(bb, b);
 
-        if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
-                                                        APLOG_MODULE_INDEX)))
-        {
+        ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf,
+                                                   APLOG_MODULE_INDEX);
+
+        /* xCGI has its own body framing mechanism which we don't
+         * match against any provided Content-Length, so let the
+         * core determine C-L vs T-E based on what's actually sent.
+         */
+        if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR))
+            apr_table_unset(r->headers_out, "Content-Length");
+        apr_table_unset(r->headers_out, "Transfer-Encoding");
+
+        if (ret != OK) {
             ret = log_script(r, conf, ret, dbuf, sbuf, bb, NULL);
 
             /*
diff --git a/modules/generators/mod_info.c b/modules/generators/mod_info.c
index e5e63de..1662242 100644
--- a/modules/generators/mod_info.c
+++ b/modules/generators/mod_info.c
@@ -80,9 +80,9 @@ typedef struct
 module AP_MODULE_DECLARE_DATA info_module;
 
 /* current file name when doing -DDUMP_CONFIG */
-const char *dump_config_fn_info;
+static const char *dump_config_fn_info;
 /* file handle when doing -DDUMP_CONFIG */
-apr_file_t *out = NULL;
+static apr_file_t *out = NULL;
 
 static void *create_info_config(apr_pool_t * p, server_rec * s)
 {
@@ -230,7 +230,7 @@ static int mod_info_has_cmd(const command_rec * cmds, ap_directive_t * dir)
     if (cmds == NULL)
         return 1;
     for (cmd = cmds; cmd->name; ++cmd) {
-        if (strcasecmp(cmd->name, dir->directive) == 0)
+        if (ap_cstr_casecmp(cmd->name, dir->directive) == 0)
             return 1;
     }
     return 0;
@@ -297,7 +297,7 @@ typedef struct
     hook_get_t get;
 } hook_lookup_t;
 
-static hook_lookup_t startup_hooks[] = {
+static const hook_lookup_t startup_hooks[] = {
     {"Pre-Config", ap_hook_get_pre_config},
     {"Check Configuration", ap_hook_get_check_config},
     {"Test Configuration", ap_hook_get_test_config},
@@ -311,7 +311,7 @@ static hook_lookup_t startup_hooks[] = {
     {NULL},
 };
 
-static hook_lookup_t request_hooks[] = {
+static const hook_lookup_t request_hooks[] = {
     {"Pre-Connection", ap_hook_get_pre_connection},
     {"Create Connection", ap_hook_get_create_connection},
     {"Process Connection", ap_hook_get_process_connection},
@@ -322,6 +322,7 @@ static hook_lookup_t request_hooks[] = {
     {"HTTP Scheme", ap_hook_get_http_scheme},
     {"Default Port", ap_hook_get_default_port},
     {"Quick Handler", ap_hook_get_quick_handler},
+    {"Pre-Translate Name", ap_hook_get_pre_translate_name},
     {"Translate Name", ap_hook_get_translate_name},
     {"Map to Storage", ap_hook_get_map_to_storage},
     {"Check Access", ap_hook_get_access_checker_ex},
@@ -339,7 +340,7 @@ static hook_lookup_t request_hooks[] = {
     {NULL},
 };
 
-static hook_lookup_t other_hooks[] = {
+static const hook_lookup_t other_hooks[] = {
     {"Monitor", ap_hook_get_monitor},
     {"Child Status", ap_hook_get_child_status},
     {"End Generation", ap_hook_get_end_generation},
@@ -378,7 +379,7 @@ static int module_find_hook(module * modp, hook_get_t hook_get)
 
 static void module_participate(request_rec * r,
                                module * modp,
-                               hook_lookup_t * lookup, int *comma)
+                               const hook_lookup_t *lookup, int *comma)
 {
     if (module_find_hook(modp, lookup->get)) {
         if (*comma) {
@@ -453,6 +454,12 @@ static int show_server_settings(request_rec * r)
                "
Compiled with APU Version: " "%s
\n", APU_VERSION_STRING); #endif + ap_rprintf(r, + "
Server loaded PCRE Version: " + "%s
\n", ap_pcre_version_string(AP_REG_PCRE_LOADED)); + ap_rprintf(r, + "
Compiled with PCRE Version: " + "%s
\n", ap_pcre_version_string(AP_REG_PCRE_COMPILED)); ap_rprintf(r, "
Module Magic Number: " "%d:%d
\n", MODULE_MAGIC_NUMBER_MAJOR, @@ -577,7 +584,7 @@ static int show_server_settings(request_rec * r) #ifdef BUFFERED_LOGS ap_rputs(" -D BUFFERED_LOGS\n", r); #ifdef PIPE_BUF - ap_rputs(" -D PIPE_BUF=%ld\n", (long) PIPE_BUF, r); + ap_rprintf(r, " -D PIPE_BUF=%ld\n", (long) PIPE_BUF); #endif #endif @@ -785,7 +792,7 @@ static int display_info(request_rec * r) " Server Information\n" "\n", r); ap_rputs("

" "Apache Server Information

\n", r); - if (!r->args || strcasecmp(r->args, "list")) { + if (!r->args || ap_cstr_casecmp(r->args, "list")) { if (!r->args) { ap_rputs("
Subpages:
", r); ap_rputs("Configuration Files, " @@ -819,19 +826,19 @@ static int display_info(request_rec * r) ap_rputs("

", r); } - if (!r->args || !strcasecmp(r->args, "server")) { + if (!r->args || !ap_cstr_casecmp(r->args, "server")) { show_server_settings(r); } - if (!r->args || !strcasecmp(r->args, "hooks")) { + if (!r->args || !ap_cstr_casecmp(r->args, "hooks")) { show_active_hooks(r); } - if (!r->args || !strcasecmp(r->args, "providers")) { + if (!r->args || !ap_cstr_casecmp(r->args, "providers")) { show_providers(r); } - if (r->args && 0 == strcasecmp(r->args, "config")) { + if (r->args && 0 == ap_cstr_casecmp(r->args, "config")) { ap_rputs("
Configuration:\n", r); mod_info_module_cmds(r, NULL, ap_conftree, 0, 0); ap_rputs("

", r); @@ -842,7 +849,7 @@ static int display_info(request_rec * r) modules = get_sorted_modules(r->pool); for (i = 0; i < modules->nelts; i++) { modp = APR_ARRAY_IDX(modules, i, module *); - if (!r->args || !strcasecmp(modp->name, r->args)) { + if (!r->args || !ap_cstr_casecmp(modp->name, r->args)) { ap_rprintf(r, "
Module Name: " "%s
\n", @@ -940,7 +947,7 @@ static int display_info(request_rec * r) } } } - if (!modp && r->args && strcasecmp(r->args, "server")) { + if (!modp && r->args && ap_cstr_casecmp(r->args, "server")) { ap_rputs("

No such module

\n", r); } } diff --git a/modules/generators/mod_status.c b/modules/generators/mod_status.c index 5917953..5bada07 100644 --- a/modules/generators/mod_status.c +++ b/modules/generators/mod_status.c @@ -186,7 +186,8 @@ static int status_handler(request_rec *r) apr_uint32_t up_time; ap_loadavg_t t; int j, i, res, written; - int ready; + int idle; + int graceful; int busy; unsigned long count; unsigned long lres, my_lres, conn_lres; @@ -203,6 +204,7 @@ static int status_handler(request_rec *r) char *stat_buffer; pid_t *pid_buffer, worker_pid; int *thread_idle_buffer = NULL; + int *thread_graceful_buffer = NULL; int *thread_busy_buffer = NULL; clock_t tu, ts, tcu, tcs; clock_t gu, gs, gcu, gcs; @@ -231,7 +233,8 @@ static int status_handler(request_rec *r) #endif #endif - ready = 0; + idle = 0; + graceful = 0; busy = 0; count = 0; bcount = 0; @@ -250,6 +253,7 @@ static int status_handler(request_rec *r) stat_buffer = apr_palloc(r->pool, server_limit * thread_limit * sizeof(char)); if (is_async) { thread_idle_buffer = apr_palloc(r->pool, server_limit * sizeof(int)); + thread_graceful_buffer = apr_palloc(r->pool, server_limit * sizeof(int)); thread_busy_buffer = apr_palloc(r->pool, server_limit * sizeof(int)); } @@ -318,6 +322,7 @@ static int status_handler(request_rec *r) ps_record = ap_get_scoreboard_process(i); if (is_async) { thread_idle_buffer[i] = 0; + thread_graceful_buffer[i] = 0; thread_busy_buffer[i] = 0; } for (j = 0; j < thread_limit; ++j) { @@ -336,18 +341,20 @@ static int status_handler(request_rec *r) && ps_record->pid) { if (res == SERVER_READY) { if (ps_record->generation == mpm_generation) - ready++; + idle++; if (is_async) thread_idle_buffer[i]++; } else if (res != SERVER_DEAD && res != SERVER_STARTING && res != SERVER_IDLE_KILL) { - busy++; - if (is_async) { - if (res == SERVER_GRACEFUL) - thread_idle_buffer[i]++; - else + if (res == SERVER_GRACEFUL) { + graceful++; + if (is_async) + thread_graceful_buffer[i]++; + } else { + busy++; + if (is_async) thread_busy_buffer[i]++; } } @@ -548,10 +555,10 @@ static int status_handler(request_rec *r) } /* ap_extended_status */ if (!short_report) - ap_rprintf(r, "
%d requests currently being processed, " - "%d idle workers
\n", busy, ready); + ap_rprintf(r, "
%d requests currently being processed, %d workers gracefully restarting, " + "%d idle workers
\n", busy, graceful, idle); else - ap_rprintf(r, "BusyWorkers: %d\nIdleWorkers: %d\n", busy, ready); + ap_rprintf(r, "BusyWorkers: %d\nGracefulWorkers: %d\nIdleWorkers: %d\n", busy, graceful, idle); if (!short_report) ap_rputs("
", r); @@ -559,11 +566,6 @@ static int status_handler(request_rec *r) if (is_async) { int write_completion = 0, lingering_close = 0, keep_alive = 0, connections = 0, stopping = 0, procs = 0; - /* - * These differ from 'busy' and 'ready' in how gracefully finishing - * threads are counted. XXX: How to make this clear in the html? - */ - int busy_workers = 0, idle_workers = 0; if (!short_report) ap_rputs("\n\n\n" "" @@ -573,7 +575,7 @@ static int status_handler(request_rec *r) "" "\n" "" - "" + "" "\n", r); for (i = 0; i < server_limit; ++i) { ps_record = ap_get_scoreboard_process(i); @@ -582,8 +584,6 @@ static int status_handler(request_rec *r) write_completion += ps_record->write_completion; keep_alive += ps_record->keep_alive; lingering_close += ps_record->lingering_close; - busy_workers += thread_busy_buffer[i]; - idle_workers += thread_idle_buffer[i]; procs++; if (ps_record->quiescing) { stopping++; @@ -599,7 +599,7 @@ static int status_handler(request_rec *r) ap_rprintf(r, "" "" "" - "" + "" "" "\n", i, ps_record->pid, @@ -607,6 +607,7 @@ static int status_handler(request_rec *r) ps_record->connections, ps_record->not_accepting ? "no" : "yes", thread_busy_buffer[i], + thread_graceful_buffer[i], thread_idle_buffer[i], ps_record->write_completion, ps_record->keep_alive, @@ -618,25 +619,22 @@ static int status_handler(request_rec *r) ap_rprintf(r, "" "" "" - "" + "" "" "\n
SlotThreadsAsync connections
totalacceptingbusyidlebusygracefulidlewritingkeep-aliveclosing
%u%" APR_PID_T_FMT "%s%s%u%s%u%u%u%u%u%u%u%u
Sum%d%d%d %d%d%d%d%d%d%d%d
\n", procs, stopping, connections, - busy_workers, idle_workers, + busy, graceful, idle, write_completion, keep_alive, lingering_close); } else { ap_rprintf(r, "Processes: %d\n" "Stopping: %d\n" - "BusyWorkers: %d\n" - "IdleWorkers: %d\n" "ConnsTotal: %d\n" "ConnsAsyncWriting: %d\n" "ConnsAsyncKeepAlive: %d\n" "ConnsAsyncClosing: %d\n", procs, stopping, - busy_workers, idle_workers, connections, write_completion, keep_alive, lingering_close); } diff --git a/modules/http/byterange_filter.c b/modules/http/byterange_filter.c index de585c5..5ebe853 100644 --- a/modules/http/byterange_filter.c +++ b/modules/http/byterange_filter.c @@ -152,7 +152,6 @@ static int ap_set_byterange(request_rec *r, apr_off_t clength, *indexes = apr_array_make(r->pool, ranges, sizeof(indexes_t)); while ((cur = ap_getword(r->pool, &range, ','))) { char *dash; - char *errp; apr_off_t number, start, end; if (!*cur) @@ -169,7 +168,7 @@ static int ap_set_byterange(request_rec *r, apr_off_t clength, if (dash == cur) { /* In the form "-5" */ - if (apr_strtoff(&number, dash+1, &errp, 10) || *errp) { + if (!ap_parse_strict_length(&number, dash+1)) { return 0; } if (number < 1) { @@ -180,12 +179,12 @@ static int ap_set_byterange(request_rec *r, apr_off_t clength, } else { *dash++ = '\0'; - if (apr_strtoff(&number, cur, &errp, 10) || *errp) { + if (!ap_parse_strict_length(&number, cur)) { return 0; } start = number; if (*dash) { - if (apr_strtoff(&number, dash, &errp, 10) || *errp) { + if (!ap_parse_strict_length(&number, dash)) { return 0; } end = number; diff --git a/modules/http/http_core.c b/modules/http/http_core.c index 35869b4..c6cb473 100644 --- a/modules/http/http_core.c +++ b/modules/http/http_core.c @@ -140,16 +140,17 @@ static int ap_process_http_async_connection(conn_rec *c) AP_DEBUG_ASSERT(cs != NULL); AP_DEBUG_ASSERT(cs->state == CONN_STATE_READ_REQUEST_LINE); - while (cs->state == CONN_STATE_READ_REQUEST_LINE) { + if (cs->state == CONN_STATE_READ_REQUEST_LINE) { ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); - + if (ap_extended_status) { + ap_set_conn_count(c->sbh, r, c->keepalives); + } if ((r = ap_read_request(c))) { - - c->keepalive = AP_CONN_UNKNOWN; - /* process the request if it was read without error */ - if (r->status == HTTP_OK) { cs->state = CONN_STATE_HANDLER; + if (ap_extended_status) { + ap_set_conn_count(c->sbh, r, c->keepalives + 1); + } ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r); ap_process_async_request(r); /* After the call to ap_process_request, the @@ -200,9 +201,6 @@ static int ap_process_http_sync_connection(conn_rec *c) keep_alive_timeout = c->base_server->keep_alive_timeout; } - c->keepalive = AP_CONN_UNKNOWN; - /* process the request if it was read without error */ - if (r->status == HTTP_OK) { if (cs) cs->state = CONN_STATE_HANDLER; diff --git a/modules/http/http_etag.c b/modules/http/http_etag.c index 7f3c6d9..af74549 100644 --- a/modules/http/http_etag.c +++ b/modules/http/http_etag.c @@ -16,6 +16,9 @@ #include "apr_strings.h" #include "apr_thread_proc.h" /* for RLIMIT stuff */ +#include "apr_sha1.h" +#include "apr_base64.h" +#include "apr_buckets.h" #define APR_WANT_STRFUNC #include "apr_want.h" @@ -24,9 +27,16 @@ #include "http_config.h" #include "http_connection.h" #include "http_core.h" +#include "http_log.h" #include "http_protocol.h" /* For index_of_response(). Grump. */ #include "http_request.h" +#if APR_HAS_MMAP +#include "apr_mmap.h" +#endif /* APR_HAS_MMAP */ + +#define SHA1_DIGEST_BASE64_LEN 4*(APR_SHA1_DIGESTSIZE/3) + /* Generate the human-readable hex representation of an apr_uint64_t * (basically a faster version of 'sprintf("%llx")') */ @@ -53,19 +63,159 @@ static char *etag_uint64_to_hex(char *next, apr_uint64_t u) #define ETAG_WEAK "W/" #define CHARS_PER_UINT64 (sizeof(apr_uint64_t) * 2) + +static void etag_start(char *etag, const char *weak, char **next) +{ + if (weak) { + while (*weak) { + *etag++ = *weak++; + } + } + *etag++ = '"'; + + *next = etag; +} + +static void etag_end(char *next, const char *vlv, apr_size_t vlv_len) +{ + if (vlv) { + *next++ = ';'; + apr_cpystrn(next, vlv, vlv_len); + } + else { + *next++ = '"'; + *next = '\0'; + } +} + +/* + * Construct a strong ETag by creating a SHA1 hash across the file content. + */ +static char *make_digest_etag(request_rec *r, etag_rec *er, char *vlv, + apr_size_t vlv_len, char *weak, apr_size_t weak_len) +{ + apr_sha1_ctx_t context; + unsigned char digest[APR_SHA1_DIGESTSIZE]; + apr_file_t *fd = NULL; + core_dir_config *cfg; + char *etag, *next; + apr_bucket_brigade *bb; + apr_bucket *e; + + apr_size_t nbytes; + apr_off_t offset = 0, zero = 0, len = 0; + apr_status_t status; + + cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config); + + if (er->fd) { + fd = er->fd; + } + else if (er->pathname) { + if ((status = apr_file_open(&fd, er->pathname, APR_READ | APR_BINARY, + 0, r->pool)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10251) + "Make etag: could not open %s", er->pathname); + return ""; + } + } + if (!fd) { + return ""; + } + + if ((status = apr_file_seek(fd, APR_CUR, &offset)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10252) + "Make etag: could not seek"); + if (er->pathname) { + apr_file_close(fd); + } + return ""; + } + + if ((status = apr_file_seek(fd, APR_END, &len)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10258) + "Make etag: could not seek"); + if (er->pathname) { + apr_file_close(fd); + } + return ""; + } + + if ((status = apr_file_seek(fd, APR_SET, &zero)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10253) + "Make etag: could not seek"); + if (er->pathname) { + apr_file_close(fd); + } + return ""; + } + + bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + + e = apr_brigade_insert_file(bb, fd, 0, len, r->pool); + +#if APR_HAS_MMAP + if (cfg->enable_mmap == ENABLE_MMAP_OFF) { + (void)apr_bucket_file_enable_mmap(e, 0); + } +#endif + + apr_sha1_init(&context); + while (!APR_BRIGADE_EMPTY(bb)) + { + const char *str; + + e = APR_BRIGADE_FIRST(bb); + + if ((status = apr_bucket_read(e, &str, &nbytes, APR_BLOCK_READ)) != APR_SUCCESS) { + apr_brigade_destroy(bb); + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10254) + "Make etag: could not read"); + if (er->pathname) { + apr_file_close(fd); + } + return ""; + } + + apr_sha1_update(&context, str, nbytes); + apr_bucket_delete(e); + } + + if ((status = apr_file_seek(fd, APR_SET, &offset)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(10255) + "Make etag: could not seek"); + if (er->pathname) { + apr_file_close(fd); + } + return ""; + } + apr_sha1_final(digest, &context); + + etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") + + SHA1_DIGEST_BASE64_LEN + vlv_len + 4); + + etag_start(etag, weak, &next); + next += apr_base64_encode_binary(next, digest, APR_SHA1_DIGESTSIZE) - 1; + etag_end(next, vlv, vlv_len); + + if (er->pathname) { + apr_file_close(fd); + } + + return etag; +} + /* * Construct an entity tag (ETag) from resource information. If it's a real * file, build in some of the file characteristics. If the modification time * is newer than (request-time minus 1 second), mark the ETag as weak - it - * could be modified again in as short an interval. We rationalize the - * modification time we're given to keep it from being in the future. + * could be modified again in as short an interval. */ -AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak) +AP_DECLARE(char *) ap_make_etag_ex(request_rec *r, etag_rec *er) { - char *weak; - apr_size_t weak_len; - char *etag; - char *next; + char *weak = NULL; + apr_size_t weak_len = 0, vlv_len = 0; + char *etag, *next, *vlv; core_dir_config *cfg; etag_components_t etag_bits; etag_components_t bits_added; @@ -73,13 +223,62 @@ AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak) cfg = (core_dir_config *)ap_get_core_module_config(r->per_dir_config); etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add; + if (er->force_weak) { + weak = ETAG_WEAK; + weak_len = sizeof(ETAG_WEAK); + } + + if (r->vlist_validator) { + + /* If we have a variant list validator (vlv) due to the + * response being negotiated, then we create a structured + * entity tag which merges the variant etag with the variant + * list validator (vlv). This merging makes revalidation + * somewhat safer, ensures that caches which can deal with + * Vary will (eventually) be updated if the set of variants is + * changed, and is also a protocol requirement for transparent + * content negotiation. + */ + + /* if the variant list validator is weak, we make the whole + * structured etag weak. If we would not, then clients could + * have problems merging range responses if we have different + * variants with the same non-globally-unique strong etag. + */ + + vlv = r->vlist_validator; + if (vlv[0] == 'W') { + vlv += 3; + weak = ETAG_WEAK; + weak_len = sizeof(ETAG_WEAK); + } + else { + vlv++; + } + vlv_len = strlen(vlv); + + } + else { + vlv = NULL; + vlv_len = 0; + } + + /* + * Did a module flag the need for a strong etag, or did the + * configuration tell us to generate a digest? + */ + if (er->finfo->filetype == APR_REG && + (AP_REQUEST_IS_STRONG_ETAG(r) || (etag_bits & ETAG_DIGEST))) { + + return make_digest_etag(r, er, vlv, vlv_len, weak, weak_len); + } + /* * If it's a file (or we wouldn't be here) and no ETags * should be set for files, return an empty string and * note it for the header-sender to ignore. */ if (etag_bits & ETAG_NONE) { - apr_table_setn(r->notes, "no-etag", "omit"); return ""; } @@ -98,123 +297,117 @@ AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak) * be modified again later in the second, and the validation * would be incorrect. */ - if ((r->request_time - r->mtime > (1 * APR_USEC_PER_SEC)) && - !force_weak) { - weak = NULL; - weak_len = 0; - } - else { + if ((er->request_time - er->finfo->mtime < (1 * APR_USEC_PER_SEC))) { weak = ETAG_WEAK; weak_len = sizeof(ETAG_WEAK); } - if (r->finfo.filetype != APR_NOFILE) { + if (er->finfo->filetype != APR_NOFILE) { /* * ETag gets set to [W/]"inode-size-mtime", modulo any * FileETag keywords. */ etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") + - 3 * CHARS_PER_UINT64 + 1); - next = etag; - if (weak) { - while (*weak) { - *next++ = *weak++; - } - } - *next++ = '"'; + 3 * CHARS_PER_UINT64 + vlv_len + 2); + + etag_start(etag, weak, &next); + bits_added = 0; if (etag_bits & ETAG_INODE) { - next = etag_uint64_to_hex(next, r->finfo.inode); + next = etag_uint64_to_hex(next, er->finfo->inode); bits_added |= ETAG_INODE; } if (etag_bits & ETAG_SIZE) { if (bits_added != 0) { *next++ = '-'; } - next = etag_uint64_to_hex(next, r->finfo.size); + next = etag_uint64_to_hex(next, er->finfo->size); bits_added |= ETAG_SIZE; } if (etag_bits & ETAG_MTIME) { if (bits_added != 0) { *next++ = '-'; } - next = etag_uint64_to_hex(next, r->mtime); + next = etag_uint64_to_hex(next, er->finfo->mtime); } - *next++ = '"'; - *next = '\0'; + + etag_end(next, vlv, vlv_len); + } else { /* * Not a file document, so just use the mtime: [W/]"mtime" */ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") + - CHARS_PER_UINT64 + 1); - next = etag; - if (weak) { - while (*weak) { - *next++ = *weak++; - } - } - *next++ = '"'; - next = etag_uint64_to_hex(next, r->mtime); - *next++ = '"'; - *next = '\0'; + CHARS_PER_UINT64 + vlv_len + 2); + + etag_start(etag, weak, &next); + next = etag_uint64_to_hex(next, er->finfo->mtime); + etag_end(next, vlv, vlv_len); + } return etag; } +AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak) +{ + etag_rec er; + + er.vlist_validator = NULL; + er.request_time = r->request_time; + er.finfo = &r->finfo; + er.pathname = r->filename; + er.fd = NULL; + er.force_weak = force_weak; + + return ap_make_etag_ex(r, &er); +} + AP_DECLARE(void) ap_set_etag(request_rec *r) { char *etag; - char *variant_etag, *vlv; - int vlv_weak; - if (!r->vlist_validator) { - etag = ap_make_etag(r, 0); + etag_rec er; - /* If we get a blank etag back, don't set the header. */ - if (!etag[0]) { - return; - } + er.vlist_validator = r->vlist_validator; + er.request_time = r->request_time; + er.finfo = &r->finfo; + er.pathname = r->filename; + er.fd = NULL; + er.force_weak = 0; + + etag = ap_make_etag_ex(r, &er); + + if (etag && etag[0]) { + apr_table_setn(r->headers_out, "ETag", etag); } else { - /* If we have a variant list validator (vlv) due to the - * response being negotiated, then we create a structured - * entity tag which merges the variant etag with the variant - * list validator (vlv). This merging makes revalidation - * somewhat safer, ensures that caches which can deal with - * Vary will (eventually) be updated if the set of variants is - * changed, and is also a protocol requirement for transparent - * content negotiation. - */ + apr_table_setn(r->notes, "no-etag", "omit"); + } - /* if the variant list validator is weak, we make the whole - * structured etag weak. If we would not, then clients could - * have problems merging range responses if we have different - * variants with the same non-globally-unique strong etag. - */ +} - vlv = r->vlist_validator; - vlv_weak = (vlv[0] == 'W'); +AP_DECLARE(void) ap_set_etag_fd(request_rec *r, apr_file_t *fd) +{ + char *etag; - variant_etag = ap_make_etag(r, vlv_weak); + etag_rec er; - /* If we get a blank etag back, don't append vlv and stop now. */ - if (!variant_etag[0]) { - return; - } + er.vlist_validator = r->vlist_validator; + er.request_time = r->request_time; + er.finfo = &r->finfo; + er.pathname = NULL; + er.fd = fd; + er.force_weak = 0; - /* merge variant_etag and vlv into a structured etag */ - variant_etag[strlen(variant_etag) - 1] = '\0'; - if (vlv_weak) { - vlv += 3; - } - else { - vlv++; - } - etag = apr_pstrcat(r->pool, variant_etag, ";", vlv, NULL); + etag = ap_make_etag_ex(r, &er); + + if (etag && etag[0]) { + apr_table_setn(r->headers_out, "ETag", etag); + } + else { + apr_table_setn(r->notes, "no-etag", "omit"); } - apr_table_setn(r->headers_out, "ETag", etag); } diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c index 9828cdf..f20aee7 100644 --- a/modules/http/http_filters.c +++ b/modules/http/http_filters.c @@ -79,7 +79,8 @@ typedef struct http_filter_ctx BODY_CHUNK_END_LF, /* got CR after data, expect LF */ BODY_CHUNK_TRAILER /* trailers */ } state; - unsigned int eos_sent :1; + unsigned int eos_sent :1, + seen_data:1; apr_bucket_brigade *bb; } http_ctx_t; @@ -348,7 +349,6 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, http_ctx_t *ctx = f->ctx; apr_status_t rv; int http_error = HTTP_REQUEST_ENTITY_TOO_LARGE; - apr_bucket_brigade *bb; int again; /* just get out of the way of things we don't want. */ @@ -361,7 +361,6 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); ctx->state = BODY_NONE; ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); - bb = ctx->bb; /* LimitRequestBody does not apply to proxied responses. * Consider implementing this check in its own filter. @@ -379,8 +378,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, lenp = apr_table_get(f->r->headers_in, "Content-Length"); if (tenc) { - if (strcasecmp(tenc, "chunked") == 0 /* fast path */ - || ap_find_last_token(f->r->pool, tenc, "chunked")) { + if (ap_is_chunked(f->r->pool, tenc)) { ctx->state = BODY_CHUNK; } else if (f->r->proxyreq == PROXYREQ_RESPONSE) { @@ -406,16 +404,13 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, lenp = NULL; } if (lenp) { - char *endstr; - ctx->state = BODY_LENGTH; /* Protects against over/underflow, non-digit chars in the - * string (excluding leading space) (the endstr checks) - * and a negative number. */ - if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10) - || endstr == lenp || *endstr || ctx->remaining < 0) { - + * string, leading plus/minus signs, trailing characters and + * a negative number. + */ + if (!ap_parse_strict_length(&ctx->remaining, lenp)) { ctx->remaining = 0; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587) "Invalid Content-Length"); @@ -452,42 +447,46 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, ctx->eos_sent = 1; return APR_SUCCESS; } + } - /* Since we're about to read data, send 100-Continue if needed. - * Only valid on chunked and C-L bodies where the C-L is > 0. */ - if ((ctx->state == BODY_CHUNK || - (ctx->state == BODY_LENGTH && ctx->remaining > 0)) && - f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1) && - !(f->r->eos_sent || f->r->bytes_sent)) { - if (!ap_is_HTTP_SUCCESS(f->r->status)) { - ctx->state = BODY_NONE; - ctx->eos_sent = 1; - } - else { - char *tmp; - int len; - - /* if we send an interim response, we're no longer - * in a state of expecting one. - */ - f->r->expecting_100 = 0; - tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL " ", - ap_get_status_line(HTTP_CONTINUE), CRLF CRLF, - NULL); - len = strlen(tmp); - ap_xlate_proto_to_ascii(tmp, len); - apr_brigade_cleanup(bb); - e = apr_bucket_pool_create(tmp, len, f->r->pool, - f->c->bucket_alloc); - APR_BRIGADE_INSERT_HEAD(bb, e); - e = apr_bucket_flush_create(f->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - - rv = ap_pass_brigade(f->c->output_filters, bb); - if (rv != APR_SUCCESS) { - return AP_FILTER_ERROR; - } - } + /* Since we're about to read data, send 100-Continue if needed. + * Only valid on chunked and C-L bodies where the C-L is > 0. + * + * If the read is to be nonblocking though, the caller may not want to + * handle this just now (e.g. mod_proxy_http), and is prepared to read + * nothing if the client really waits for 100 continue, so we don't + * send it now and wait for later blocking read. + * + * In any case, even if r->expecting remains set at the end of the + * request handling, ap_set_keepalive() will finally do the right + * thing (i.e. "Connection: close" the connection). + */ + if (block == APR_BLOCK_READ + && (ctx->state == BODY_CHUNK + || (ctx->state == BODY_LENGTH && ctx->remaining > 0)) + && f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1) + && !(ctx->eos_sent || f->r->eos_sent || f->r->bytes_sent)) { + if (!ap_is_HTTP_SUCCESS(f->r->status)) { + ctx->state = BODY_NONE; + ctx->eos_sent = 1; /* send EOS below */ + } + else if (!ctx->seen_data) { + int saved_status = f->r->status; + const char *saved_status_line = f->r->status_line; + f->r->status = HTTP_CONTINUE; + f->r->status_line = NULL; + ap_send_interim_response(f->r, 0); + AP_DEBUG_ASSERT(!f->r->expecting_100); + f->r->status_line = saved_status_line; + f->r->status = saved_status; + } + else { + /* https://tools.ietf.org/html/rfc7231#section-5.1.1 + * A server MAY omit sending a 100 (Continue) response if it + * has already received some or all of the message body for + * the corresponding request [...] + */ + f->r->expecting_100 = 0; } } @@ -538,9 +537,11 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, int parsing = 0; rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ); - if (rv == APR_SUCCESS) { parsing = 1; + if (len > 0) { + ctx->seen_data = 1; + } rv = parse_chunk_size(ctx, buffer, len, f->r->server->limit_req_fieldsize, strict); } @@ -602,6 +603,9 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, /* How many bytes did we just read? */ apr_brigade_length(b, 0, &totalread); + if (totalread > 0) { + ctx->seen_data = 1; + } /* If this happens, we have a bucket of unknown length. Die because * it means our assumptions have changed. */ @@ -774,6 +778,18 @@ static APR_INLINE int check_headers(request_rec *r) struct check_header_ctx ctx; core_server_config *conf = ap_get_core_module_config(r->server->module_config); + const char *val; + + if ((val = apr_table_get(r->headers_out, "Transfer-Encoding"))) { + if (apr_table_get(r->headers_out, "Content-Length")) { + apr_table_unset(r->headers_out, "Content-Length"); + r->connection->keepalive = AP_CONN_CLOSE; + } + if (!ap_is_chunked(r->pool, val)) { + r->connection->keepalive = AP_CONN_CLOSE; + return 0; + } + } ctx.r = r; ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); @@ -872,7 +888,7 @@ static int uniq_field_values(void *d, const char *key, const char *val) */ for (i = 0, strpp = (char **) values->elts; i < values->nelts; ++i, ++strpp) { - if (*strpp && strcasecmp(*strpp, start) == 0) { + if (*strpp && ap_cstr_casecmp(*strpp, start) == 0) { break; } } @@ -1290,6 +1306,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, request_rec *r = f->r; conn_rec *c = r->connection; const char *clheader; + int header_only = (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)); const char *protocol = NULL; apr_bucket *e; apr_bucket_brigade *b2; @@ -1307,7 +1324,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, } else if (ctx->headers_sent) { /* Eat body if response must not have one. */ - if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) { + if (header_only) { /* Still next filters may be waiting for EOS, so pass it (alone) * when encountered and be done with this filter. */ @@ -1348,6 +1365,9 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, */ apr_table_clear(r->headers_out); apr_table_clear(r->err_headers_out); + r->content_type = r->content_encoding = NULL; + r->content_languages = NULL; + r->clength = r->chunked = 0; apr_brigade_cleanup(b); /* Don't recall ap_die() if we come back here (from its own internal @@ -1364,8 +1384,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, APR_BRIGADE_INSERT_TAIL(b, e); e = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); - r->content_type = r->content_encoding = NULL; - r->content_languages = NULL; ap_set_content_length(r, 0); recursive_error = 1; } @@ -1392,6 +1410,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, if (!apr_is_empty_table(r->err_headers_out)) { r->headers_out = apr_table_overlay(r->pool, r->err_headers_out, r->headers_out); + apr_table_clear(r->err_headers_out); } /* @@ -1411,6 +1430,17 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, fixup_vary(r); } + + /* + * Control cachability for non-cacheable responses if not already set by + * some other part of the server configuration. + */ + if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) { + char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r->request_time); + apr_table_addn(r->headers_out, "Expires", date); + } + /* * Now remove any ETag response header field if earlier processing * says so (such as a 'FileETag None' directive). @@ -1423,6 +1453,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, basic_http_header_check(r, &protocol); ap_set_keepalive(r); + /* 204/304 responses don't have content related headers */ if (AP_STATUS_IS_HEADER_ONLY(r->status)) { apr_table_unset(r->headers_out, "Transfer-Encoding"); apr_table_unset(r->headers_out, "Content-Length"); @@ -1453,7 +1484,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) { for (i = 0; i < r->content_languages->nelts; ++i) { - if (!strcasecmp(token, languages[i])) + if (!ap_cstr_casecmp(token, languages[i])) break; } if (i == r->content_languages->nelts) { @@ -1465,16 +1496,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, apr_table_setn(r->headers_out, "Content-Language", field); } - /* - * Control cachability for non-cacheable responses if not already set by - * some other part of the server configuration. - */ - if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) { - char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); - ap_recent_rfc822_date(date, r->request_time); - apr_table_addn(r->headers_out, "Expires", date); - } - /* This is a hack, but I can't find anyway around it. The idea is that * we don't want to send out 0 Content-Lengths if it is a head request. * This happens when modules try to outsmart the server, and return @@ -1499,37 +1520,25 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, h.pool = r->pool; h.bb = b2; - if (r->status == HTTP_NOT_MODIFIED) { - apr_table_do((int (*)(void *, const char *, const char *)) form_header_field, - (void *) &h, r->headers_out, - "Connection", - "Keep-Alive", - "ETag", - "Content-Location", - "Expires", - "Cache-Control", - "Vary", - "Warning", - "WWW-Authenticate", - "Proxy-Authenticate", - "Set-Cookie", - "Set-Cookie2", - NULL); - } - else { - send_all_header_fields(&h, r); - } + send_all_header_fields(&h, r); terminate_header(b2); - rv = ap_pass_brigade(f->next, b2); - if (rv != APR_SUCCESS) { - goto out; + if (header_only) { + e = APR_BRIGADE_LAST(b); + if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) { + APR_BUCKET_REMOVE(e); + APR_BRIGADE_INSERT_TAIL(b2, e); + ap_remove_output_filter(f); + } + apr_brigade_cleanup(b); } + + rv = ap_pass_brigade(f->next, b2); + apr_brigade_cleanup(b2); ctx->headers_sent = 1; - if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) { - apr_brigade_cleanup(b); + if (rv != APR_SUCCESS || header_only) { goto out; } @@ -1605,9 +1614,9 @@ AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status) */ AP_DECLARE(int) ap_discard_request_body(request_rec *r) { + int rc = OK; + conn_rec *c = r->connection; apr_bucket_brigade *bb; - int seen_eos; - apr_status_t rv; /* Sometimes we'll get in a state where the input handling has * detected an error where we want to drop the connection, so if @@ -1616,54 +1625,57 @@ AP_DECLARE(int) ap_discard_request_body(request_rec *r) * * This function is also a no-op on a subrequest. */ - if (r->main || r->connection->keepalive == AP_CONN_CLOSE || - ap_status_drops_connection(r->status)) { + if (r->main || c->keepalive == AP_CONN_CLOSE) { + return OK; + } + if (ap_status_drops_connection(r->status)) { + c->keepalive = AP_CONN_CLOSE; return OK; } bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); - seen_eos = 0; - do { - apr_bucket *bucket; + for (;;) { + apr_status_t rv; rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, APR_BLOCK_READ, HUGE_STRING_LEN); - if (rv != APR_SUCCESS) { - apr_brigade_destroy(bb); - return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); + rc = ap_map_http_request_error(rv, HTTP_BAD_REQUEST); + goto cleanup; } - for (bucket = APR_BRIGADE_FIRST(bb); - bucket != APR_BRIGADE_SENTINEL(bb); - bucket = APR_BUCKET_NEXT(bucket)) - { - const char *data; - apr_size_t len; - - if (APR_BUCKET_IS_EOS(bucket)) { - seen_eos = 1; - break; - } + while (!APR_BRIGADE_EMPTY(bb)) { + apr_bucket *b = APR_BRIGADE_FIRST(bb); - /* These are metadata buckets. */ - if (bucket->length == 0) { - continue; + if (APR_BUCKET_IS_EOS(b)) { + goto cleanup; } - /* We MUST read because in case we have an unknown-length - * bucket or one that morphs, we want to exhaust it. + /* There is no need to read empty or metadata buckets or + * buckets of known length, but we MUST read buckets of + * unknown length in order to exhaust them. */ - rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); - if (rv != APR_SUCCESS) { - apr_brigade_destroy(bb); - return HTTP_BAD_REQUEST; + if (b->length == (apr_size_t)-1) { + apr_size_t len; + const char *data; + + rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { + rc = HTTP_BAD_REQUEST; + goto cleanup; + } } + + apr_bucket_delete(b); } - apr_brigade_cleanup(bb); - } while (!seen_eos); + } - return OK; +cleanup: + apr_brigade_cleanup(bb); + if (rc != OK) { + c->keepalive = AP_CONN_CLOSE; + } + return rc; } /* Here we deal with getting the request message body from the client. @@ -1707,13 +1719,14 @@ AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy) { const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); const char *lenp = apr_table_get(r->headers_in, "Content-Length"); + apr_off_t limit_req_body = ap_get_limit_req_body(r); r->read_body = read_policy; r->read_chunked = 0; r->remaining = 0; if (tenc) { - if (strcasecmp(tenc, "chunked")) { + if (ap_cstr_casecmp(tenc, "chunked")) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592) "Unknown Transfer-Encoding %s", tenc); return HTTP_NOT_IMPLEMENTED; @@ -1727,13 +1740,10 @@ AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy) r->read_chunked = 1; } else if (lenp) { - char *endstr; - - if (apr_strtoff(&r->remaining, lenp, &endstr, 10) - || *endstr || r->remaining < 0) { + if (!ap_parse_strict_length(&r->remaining, lenp)) { r->remaining = 0; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01594) - "Invalid Content-Length"); + "Invalid Content-Length '%s'", lenp); return HTTP_BAD_REQUEST; } } @@ -1745,6 +1755,11 @@ AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy) return HTTP_REQUEST_ENTITY_TOO_LARGE; } + if (limit_req_body > 0 && (r->remaining > limit_req_body)) { + /* will be logged when the body is discarded */ + return HTTP_REQUEST_ENTITY_TOO_LARGE; + } + #ifdef AP_DEBUG { /* Make sure ap_getline() didn't leave any droppings. */ @@ -1852,6 +1867,7 @@ AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer, /* Context struct for ap_http_outerror_filter */ typedef struct { int seen_eoc; + int first_error; } outerror_filter_ctx_t; /* Filter to handle any error buckets on output */ @@ -1880,10 +1896,18 @@ apr_status_t ap_http_outerror_filter(ap_filter_t *f, /* stream aborted and we have not ended it yet */ r->connection->keepalive = AP_CONN_CLOSE; } + /* + * Memorize the status code of the first error bucket for possible + * later use. + */ + if (!ctx->first_error) { + ctx->first_error = ((ap_bucket_error *)(e->data))->status; + } continue; } /* Detect EOC buckets and memorize this in the context. */ if (AP_BUCKET_IS_EOC(e)) { + r->connection->keepalive = AP_CONN_CLOSE; ctx->seen_eoc = 1; } } @@ -1907,6 +1931,18 @@ apr_status_t ap_http_outerror_filter(ap_filter_t *f, * EOS bucket. */ if (ctx->seen_eoc) { + /* + * Set the request status to the status of the first error bucket. + * This should ensure that we log an appropriate status code in + * the access log. + * We need to set r->status on each call after we noticed an EOC as + * data bucket generators like ap_die might have changed the status + * code. But we know better in this case and insist on the status + * code that we have seen in the error bucket. + */ + if (ctx->first_error) { + r->status = ctx->first_error; + } for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c index e419eb6..d031f24 100644 --- a/modules/http/http_protocol.c +++ b/modules/http/http_protocol.c @@ -60,7 +60,7 @@ APLOG_USE_MODULE(http); -/* New Apache routine to map status codes into array indicies +/* New Apache routine to map status codes into array indices * e.g. 100 -> 0, 101 -> 1, 200 -> 2 ... * The number of status lines must equal the value of * RESPONSE_CODES (httpd.h) and must be listed in order. @@ -257,10 +257,9 @@ AP_DECLARE(int) ap_set_keepalive(request_rec *r) && (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status) || apr_table_get(r->headers_out, "Content-Length") - || ap_find_last_token(r->pool, + || ap_is_chunked(r->pool, apr_table_get(r->headers_out, - "Transfer-Encoding"), - "chunked") + "Transfer-Encoding")) || ((r->proto_num >= HTTP_VERSION(1,1)) && (r->chunked = 1))) /* THIS CODE IS CORRECT, see above. */ && r->server->keep_alive @@ -987,14 +986,17 @@ AP_DECLARE(const char *) ap_method_name_of(apr_pool_t *p, int methnum) * from status_lines[shortcut[i]] to status_lines[shortcut[i+1]-1]; * or use NULL to fill the gaps. */ -AP_DECLARE(int) ap_index_of_response(int status) +static int index_of_response(int status) { - static int shortcut[6] = {0, LEVEL_200, LEVEL_300, LEVEL_400, - LEVEL_500, RESPONSE_CODES}; + static int shortcut[6] = {0, LEVEL_200, LEVEL_300, LEVEL_400, LEVEL_500, + RESPONSE_CODES}; int i, pos; - if (status < 100) { /* Below 100 is illegal for HTTP status */ - return LEVEL_500; + if (status < 100) { /* Below 100 is illegal for HTTP status */ + return -1; + } + if (status > 999) { /* Above 999 is also illegal for HTTP status */ + return -1; } for (i = 0; i < 5; i++) { @@ -1005,11 +1007,29 @@ AP_DECLARE(int) ap_index_of_response(int status) return pos; } else { - return LEVEL_500; /* status unknown (falls in gap) */ + break; } } } - return LEVEL_500; /* 600 or above is also illegal */ + return -2; /* Status unknown (falls in gap) or above 600 */ +} + +AP_DECLARE(int) ap_index_of_response(int status) +{ + int index = index_of_response(status); + return (index < 0) ? LEVEL_500 : index; +} + +AP_DECLARE(const char *) ap_get_status_line_ex(apr_pool_t *p, int status) +{ + int index = index_of_response(status); + if (index >= 0) { + return status_lines[index]; + } + else if (index == -2) { + return apr_psprintf(p, "%i Status %i", status, status); + } + return status_lines[LEVEL_500]; } AP_DECLARE(const char *) ap_get_status_line(int status) @@ -1132,13 +1152,10 @@ static const char *get_canned_error_string(int status, "\">here.

\n", NULL)); case HTTP_USE_PROXY: - return(apr_pstrcat(p, - "

This resource is only accessible " - "through the proxy\n", - ap_escape_html(r->pool, location), - "
\nYou will need to configure " - "your client to use that proxy.

\n", - NULL)); + return("

This resource is only accessible " + "through the proxy\n" + "
\nYou will need to configure " + "your client to use that proxy.

\n"); case HTTP_PROXY_AUTHENTICATION_REQUIRED: case HTTP_UNAUTHORIZED: return("

This server could not verify that you\n" @@ -1154,34 +1171,20 @@ static const char *get_canned_error_string(int status, "error-notes", "

\n")); case HTTP_FORBIDDEN: - s1 = apr_pstrcat(p, - "

You don't have permission to access ", - ap_escape_html(r->pool, r->uri), - "\non this server.
\n", - NULL); - return(add_optional_notes(r, s1, "error-notes", "

\n")); + return(add_optional_notes(r, "

You don't have permission to access this resource.", "error-notes", "

\n")); case HTTP_NOT_FOUND: - return(apr_pstrcat(p, - "

The requested URL ", - ap_escape_html(r->pool, r->uri), - " was not found on this server.

\n", - NULL)); + return("

The requested URL was not found on this server.

\n"); case HTTP_METHOD_NOT_ALLOWED: return(apr_pstrcat(p, "

The requested method ", ap_escape_html(r->pool, r->method), - " is not allowed for the URL ", - ap_escape_html(r->pool, r->uri), - ".

\n", + " is not allowed for this URL.

\n", NULL)); case HTTP_NOT_ACCEPTABLE: - s1 = apr_pstrcat(p, - "

An appropriate representation of the " - "requested resource ", - ap_escape_html(r->pool, r->uri), - " could not be found on this server.

\n", - NULL); - return(add_optional_notes(r, s1, "variant-list", "")); + return(add_optional_notes(r, + "

An appropriate representation of the requested resource " + "could not be found on this server.

\n", + "variant-list", "")); case HTTP_MULTIPLE_CHOICES: return(add_optional_notes(r, "", "variant-list", "")); case HTTP_LENGTH_REQUIRED: @@ -1192,18 +1195,13 @@ static const char *get_canned_error_string(int status, NULL); return(add_optional_notes(r, s1, "error-notes", "

\n")); case HTTP_PRECONDITION_FAILED: - return(apr_pstrcat(p, - "

The precondition on the request " - "for the URL ", - ap_escape_html(r->pool, r->uri), - " evaluated to false.

\n", - NULL)); + return("

The precondition on the request " + "for this URL evaluated to false.

\n"); case HTTP_NOT_IMPLEMENTED: s1 = apr_pstrcat(p, "

", - ap_escape_html(r->pool, r->method), " to ", - ap_escape_html(r->pool, r->uri), - " not supported.
\n", + ap_escape_html(r->pool, r->method), + " not supported for current URL.
\n", NULL); return(add_optional_notes(r, s1, "error-notes", "

\n")); case HTTP_BAD_GATEWAY: @@ -1211,29 +1209,19 @@ static const char *get_canned_error_string(int status, "response from an upstream server.
" CRLF; return(add_optional_notes(r, s1, "error-notes", "

\n")); case HTTP_VARIANT_ALSO_VARIES: - return(apr_pstrcat(p, - "

A variant for the requested " - "resource\n

\n",
-                           ap_escape_html(r->pool, r->uri),
-                           "\n
\nis itself a negotiable resource. " - "This indicates a configuration error.

\n", - NULL)); + return("

A variant for the requested " + "resource\n

\n"
+               "\n
\nis itself a negotiable resource. " + "This indicates a configuration error.

\n"); case HTTP_REQUEST_TIME_OUT: return("

Server timeout waiting for the HTTP request from the client.

\n"); case HTTP_GONE: - return(apr_pstrcat(p, - "

The requested resource
", - ap_escape_html(r->pool, r->uri), - "
\nis no longer available on this server " - "and there is no forwarding address.\n" - "Please remove all references to this " - "resource.

\n", - NULL)); + return("

The requested resource is no longer available on this server" + " and there is no forwarding address.\n" + "Please remove all references to this resource.

\n"); case HTTP_REQUEST_ENTITY_TOO_LARGE: return(apr_pstrcat(p, - "The requested resource
", - ap_escape_html(r->pool, r->uri), "
\n", - "does not allow request data with ", + "The requested resource does not allow request data with ", ap_escape_html(r->pool, r->method), " requests, or the amount of data provided in\n" "the request exceeds the capacity limit.\n", @@ -1317,11 +1305,9 @@ static const char *get_canned_error_string(int status, "the Server Name Indication (SNI) in use for this\n" "connection.

\n"); case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS: - s1 = apr_pstrcat(p, - "

Access to ", ap_escape_html(r->pool, r->uri), - "\nhas been denied for legal reasons.
\n", - NULL); - return(add_optional_notes(r, s1, "error-notes", "

\n")); + return(add_optional_notes(r, + "

Access to this URL has been denied for legal reasons.
\n", + "error-notes", "

\n")); default: /* HTTP_INTERNAL_SERVER_ERROR */ /* * This comparison to expose error-notes could be modified to diff --git a/modules/http/http_request.c b/modules/http/http_request.c index 9e7c4db..d59cfe2 100644 --- a/modules/http/http_request.c +++ b/modules/http/http_request.c @@ -249,7 +249,7 @@ AP_DECLARE(apr_status_t) ap_check_pipeline(conn_rec *c, apr_bucket_brigade *bb, apr_brigade_cleanup(bb); rv = ap_get_brigade(c->input_filters, bb, mode, APR_NONBLOCK_READ, len); - if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb) || !max_blank_lines) { + if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) { if (mode == AP_MODE_READBYTES) { /* Unexpected error, stop with this connection */ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(02967) @@ -257,23 +257,22 @@ AP_DECLARE(apr_status_t) ap_check_pipeline(conn_rec *c, apr_bucket_brigade *bb, c->keepalive = AP_CONN_CLOSE; rv = APR_EGENERAL; } - else if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) { - if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) { - /* Pipe is dead */ - c->keepalive = AP_CONN_CLOSE; - } - else { - /* Pipe is up and empty */ - rv = APR_EAGAIN; - } + else if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) { + /* Pipe is dead */ + c->keepalive = AP_CONN_CLOSE; } else { - apr_off_t n = 0; - /* Single read asked, (non-meta-)data available? */ - rv = apr_brigade_length(bb, 0, &n); - if (rv == APR_SUCCESS && n <= 0) { - rv = APR_EAGAIN; - } + /* Pipe is up and empty */ + rv = APR_EAGAIN; + } + break; + } + if (!max_blank_lines) { + apr_off_t n = 0; + /* Single read asked, (non-meta-)data available? */ + rv = apr_brigade_length(bb, 0, &n); + if (rv == APR_SUCCESS && n <= 0) { + rv = APR_EAGAIN; } break; } @@ -681,7 +680,7 @@ static request_rec *internal_internal_redirect(const char *new_uri, * to do their thing on internal redirects as well. Perhaps this is a * misnamed function. */ - if ((access_status = ap_run_post_read_request(new))) { + if ((access_status = ap_post_read_request(new))) { ap_die(access_status, new); return NULL; } diff --git a/modules/http/mod_mime.c b/modules/http/mod_mime.c index 28c53be..700f824 100644 --- a/modules/http/mod_mime.c +++ b/modules/http/mod_mime.c @@ -755,7 +755,7 @@ static int find_ct(request_rec *r) mime_dir_config *conf; apr_array_header_t *exception_list; char *ext; - const char *fn, *fntmp, *type, *charset = NULL, *resource_name; + const char *fn, *fntmp, *type, *charset = NULL, *resource_name, *qm; int found_metadata = 0; if (r->finfo.filetype == APR_DIR) { @@ -775,6 +775,19 @@ static int find_ct(request_rec *r) if (conf->use_path_info & 1) { resource_name = apr_pstrcat(r->pool, r->filename, r->path_info, NULL); } + /* + * In the reverse proxy case r->filename might contain a query string if + * the nocanon option was used with ProxyPass. + * If this is the case cut off the query string as the last parameter in + * this query string might end up on an extension we take care about, but + * we only want to match against path components not against query + * parameters. + */ + else if ((r->proxyreq == PROXYREQ_REVERSE) + && (apr_table_get(r->notes, "proxy-nocanon")) + && ((qm = ap_strchr_c(r->filename, '?')) != NULL)) { + resource_name = apr_pstrmemdup(r->pool, r->filename, qm - r->filename); + } else { resource_name = r->filename; } @@ -989,9 +1002,7 @@ static int find_ct(request_rec *r) if (!r->content_languages && conf->default_language) { const char **new; - if (!r->content_languages) { - r->content_languages = apr_array_make(r->pool, 2, sizeof(char *)); - } + r->content_languages = apr_array_make(r->pool, 2, sizeof(char *)); new = (const char **)apr_array_push(r->content_languages); *new = conf->default_language; } diff --git a/modules/http2/.gitignore b/modules/http2/.gitignore deleted file mode 100644 index ca49620..0000000 --- a/modules/http2/.gitignore +++ /dev/null @@ -1,35 +0,0 @@ -*.xcuserstate -sandbox/httpd/packages/httpd-2.4.x.tar.gz -sandbox/test/conf/sites/mod-h2.greenbytes.de.conf -*.o -*.slo -*.lo -*.la -*.pcap -.libs -.configured -.deps -compile -aclocal.m4 -autom4te.cache -autoscan.log -config.guess -config.log -config.status -config.sub -config.h -config.h.in -config.h.in~ -configure -configure.scan -depcomp -install-sh -libtool -ltmain.sh -missing -stamp-h1 -Makefile.in -Makefile -mod_h2-*.tar.gz -mod_h2/h2_version.h -m4 diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4 index e8cefe3..c4579c4 100644 --- a/modules/http2/config2.m4 +++ b/modules/http2/config2.m4 @@ -19,27 +19,25 @@ APACHE_MODPATH_INIT(http2) dnl # list of module object files http2_objs="dnl mod_http2.lo dnl -h2_alt_svc.lo dnl h2_bucket_beam.lo dnl h2_bucket_eos.lo dnl +h2_c1.lo dnl +h2_c1_io.lo dnl +h2_c2.lo dnl +h2_c2_filter.lo dnl h2_config.lo dnl -h2_conn.lo dnl -h2_conn_io.lo dnl -h2_ctx.lo dnl -h2_filter.lo dnl -h2_from_h1.lo dnl -h2_h2.lo dnl +h2_conn_ctx.lo dnl h2_headers.lo dnl h2_mplx.lo dnl -h2_ngn_shed.lo dnl +h2_protocol.lo dnl h2_push.lo dnl h2_request.lo dnl h2_session.lo dnl h2_stream.lo dnl h2_switch.lo dnl -h2_task.lo dnl h2_util.lo dnl h2_workers.lo dnl +h2_ws.lo dnl " dnl @@ -164,6 +162,12 @@ dnl # nghttp2 >= 1.14.0: invalid header callback dnl # nghttp2 >= 1.15.0: get/set stream window sizes AC_CHECK_FUNCS([nghttp2_session_get_stream_local_window_size], [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_LOCAL_WIN_SIZE"])], []) +dnl # nghttp2 >= 1.15.0: don't keep info on closed streams + AC_CHECK_FUNCS([nghttp2_option_set_no_closed_streams], + [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_NO_CLOSED_STREAMS"])], []) +dnl # nghttp2 >= 1.50.0: rfc9113 leading/trailing whitespec strictness + AC_CHECK_FUNCS([nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation], + [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_RFC9113_STRICTNESS"])], []) else AC_MSG_WARN([nghttp2 version is too old]) fi diff --git a/modules/http2/h2.h b/modules/http2/h2.h index 38b4019..f496a6d 100644 --- a/modules/http2/h2.h +++ b/modules/http2/h2.h @@ -17,6 +17,38 @@ #ifndef __mod_h2__h2__ #define __mod_h2__h2__ +#include +#include + +#include + +struct h2_session; +struct h2_stream; + +/* + * When apr pollsets can poll file descriptors (e.g. pipes), + * we use it for polling stream input/output. + */ +#ifdef H2_NO_PIPES +#define H2_USE_PIPES 0 +#else +#define H2_USE_PIPES (APR_FILES_AS_SOCKETS && APR_VERSION_AT_LEAST(1,6,0)) +#endif + +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 129) +#define H2_USE_POLLFD_FROM_CONN 1 +#else +#define H2_USE_POLLFD_FROM_CONN 0 +#endif + +/* WebSockets support requires apr 1.7.0 for apr_encode.h, plus the + * WebSockets features of nghttp2 1.34.0 and later. */ +#if H2_USE_PIPES && defined(NGHTTP2_VERSION_NUM) && NGHTTP2_VERSION_NUM >= 0x012200 && APR_VERSION_AT_LEAST(1,7,0) +#define H2_USE_WEBSOCKETS 1 +#else +#define H2_USE_WEBSOCKETS 0 +#endif + /** * The magic PRIamble of RFC 7540 that is always sent when starting * a h2 communication. @@ -46,14 +78,16 @@ extern const char *H2_MAGIC_TOKEN; #define H2_HEADER_AUTH_LEN 10 #define H2_HEADER_PATH ":path" #define H2_HEADER_PATH_LEN 5 +#define H2_HEADER_PROTO ":protocol" +#define H2_HEADER_PROTO_LEN 9 #define H2_CRLF "\r\n" -/* Max data size to write so it fits inside a TLS record */ -#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9) - /* Size of the frame header itself in HTTP/2 */ #define H2_FRAME_HDR_LEN 9 +/* Max data size to write so it fits inside a TLS record */ +#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - H2_FRAME_HDR_LEN) + /* Maximum number of padding bytes in a frame, rfc7540 */ #define H2_MAX_PADLEN 256 /* Initial default window size, RFC 7540 ch. 6.5.2 */ @@ -89,7 +123,7 @@ typedef enum { H2_SESSION_ST_DONE, /* finished, connection close */ H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */ H2_SESSION_ST_BUSY, /* read/write without stop */ - H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */ + H2_SESSION_ST_WAIT, /* waiting for c1 incoming + c2s output */ H2_SESSION_ST_CLEANUP, /* pool is being cleaned up */ } h2_session_state; @@ -99,6 +133,7 @@ typedef struct h2_session_props { int emitted_count; /* the number of local streams sent */ int emitted_max; /* the highest local stream id sent */ int error; /* the last session error encountered */ + const char *error_msg; /* the short message given on the error */ unsigned int accepting : 1; /* if the session is accepting new streams */ unsigned int shutdown : 1; /* if the final GOAWAY has been sent */ } h2_session_props; @@ -120,7 +155,9 @@ typedef enum { H2_SEV_CLOSED_R, H2_SEV_CANCELLED, H2_SEV_EOS_SENT, + H2_SEV_IN_ERROR, H2_SEV_IN_DATA_PENDING, + H2_SEV_OUT_C1_BLOCK, } h2_stream_event_t; @@ -129,38 +166,46 @@ typedef enum { * become a request_rec to be handled by soemone. */ typedef struct h2_request h2_request; - struct h2_request { const char *method; /* pseudo header values, see ch. 8.1.2.3 */ const char *scheme; const char *authority; const char *path; + const char *protocol; apr_table_t *headers; apr_time_t request_time; - unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ - unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */ + int http_status; /* Store a possible HTTP status code that gets + * defined before creating the dummy HTTP/1.1 + * request e.g. due to an error already + * detected. + */ }; -typedef struct h2_headers h2_headers; - -struct h2_headers { - int status; - apr_table_t *headers; - apr_table_t *notes; - apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */ -}; +/* + * A possible HTTP status code is not defined yet. See the http_status field + * in struct h2_request above for further explanation. + */ +#define H2_HTTP_STATUS_UNSET (0) typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len); -typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx); - -/* Note key to attach connection task id to conn_rec/request_rec instances */ +typedef int h2_stream_pri_cmp_fn(int stream_id1, int stream_id2, void *session); +typedef struct h2_stream *h2_stream_get_fn(struct h2_session *session, int stream_id); -#define H2_TASK_ID_NOTE "http2-task-id" -#define H2_FILTER_DEBUG_NOTE "http2-debug" +/* Note key to attach stream id to conn_rec/request_rec instances */ #define H2_HDR_CONFORMANCE "http2-hdr-conformance" #define H2_HDR_CONFORMANCE_UNSAFE "unsafe" +#define H2_PUSH_MODE_NOTE "http2-push-mode" + + +#if AP_MODULE_MAGIC_AT_LEAST(20211221, 6) +#define AP_HAS_RESPONSE_BUCKETS 1 + +#else /* AP_MODULE_MAGIC_AT_LEAST(20211221, 6) */ +#define AP_HAS_RESPONSE_BUCKETS 0 + +#endif /* else AP_MODULE_MAGIC_AT_LEAST(20211221, 6) */ #endif /* defined(__mod_h2__h2__) */ diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c deleted file mode 100644 index 295a16d..0000000 --- a/modules/http2/h2_alt_svc.c +++ /dev/null @@ -1,131 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include - -#include "h2_private.h" -#include "h2_alt_svc.h" -#include "h2_ctx.h" -#include "h2_config.h" -#include "h2_h2.h" -#include "h2_util.h" - -static int h2_alt_svc_handler(request_rec *r); - -void h2_alt_svc_register_hooks(void) -{ - ap_hook_post_read_request(h2_alt_svc_handler, NULL, NULL, APR_HOOK_MIDDLE); -} - -/** - * Parse an Alt-Svc specifier as described in "HTTP Alternative Services" - * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04) - * with the following changes: - * - do not percent encode token values - * - do not use quotation marks - */ -h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool) -{ - const char *sep = ap_strchr_c(s, '='); - if (sep) { - const char *alpn = apr_pstrmemdup(pool, s, sep - s); - const char *host = NULL; - int port = 0; - s = sep + 1; - sep = ap_strchr_c(s, ':'); /* mandatory : */ - if (sep) { - if (sep != s) { /* optional host */ - host = apr_pstrmemdup(pool, s, sep - s); - } - s = sep + 1; - if (*s) { /* must be a port number */ - port = (int)apr_atoi64(s); - if (port > 0 && port < (0x1 << 16)) { - h2_alt_svc *as = apr_pcalloc(pool, sizeof(*as)); - as->alpn = alpn; - as->host = host; - as->port = port; - return as; - } - } - } - } - return NULL; -} - -#define h2_alt_svc_IDX(list, i) ((h2_alt_svc**)(list)->elts)[i] - -static int h2_alt_svc_handler(request_rec *r) -{ - const h2_config *cfg; - int i; - - if (r->connection->keepalives > 0) { - /* Only announce Alt-Svc on the first response */ - return DECLINED; - } - - if (h2_ctx_rget(r)) { - return DECLINED; - } - - cfg = h2_config_sget(r->server); - if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) { - const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used"); - if (!alt_svc_used) { - /* We have alt-svcs defined and client is not already using - * one, announce the services that were configured and match. - * The security of this connection determines if we allow - * other host names or ports only. - */ - const char *alt_svc = ""; - const char *svc_ma = ""; - int secure = h2_h2_is_tls(r->connection); - int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE); - if (ma >= 0) { - svc_ma = apr_psprintf(r->pool, "; ma=%d", ma); - } - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03043) - "h2_alt_svc: announce %s for %s:%d", - (secure? "secure" : "insecure"), - r->hostname, (int)r->server->port); - for (i = 0; i < cfg->alt_svcs->nelts; ++i) { - h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i); - const char *ahost = as->host; - if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) { - ahost = NULL; - } - if (secure || !ahost) { - alt_svc = apr_psprintf(r->pool, "%s%s%s=\"%s:%d\"%s", - alt_svc, - (*alt_svc? ", " : ""), as->alpn, - ahost? ahost : "", as->port, - svc_ma); - } - } - if (*alt_svc) { - apr_table_setn(r->headers_out, "Alt-Svc", alt_svc); - } - } - } - - return DECLINED; -} diff --git a/modules/http2/h2_alt_svc.h b/modules/http2/h2_alt_svc.h deleted file mode 100644 index 479e4d1..0000000 --- a/modules/http2/h2_alt_svc.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_alt_svc__ -#define __mod_h2__h2_alt_svc__ - -typedef struct h2_alt_svc h2_alt_svc; - -struct h2_alt_svc { - const char *alpn; - const char *host; - int port; -}; - -void h2_alt_svc_register_hooks(void); - -/** - * Parse an Alt-Svc specifier as described in "HTTP Alternative Services" - * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04) - * with the following changes: - * - do not percent encode token values - * - do not use quotation marks - */ -h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool); - - -#endif /* defined(__mod_h2__h2_alt_svc__) */ diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c index f79cbe3..6978254 100644 --- a/modules/http2/h2_bucket_beam.c +++ b/modules/http2/h2_bucket_beam.c @@ -24,261 +24,123 @@ #include #include +#include #include #include "h2_private.h" +#include "h2_conn_ctx.h" +#include "h2_headers.h" #include "h2_util.h" #include "h2_bucket_beam.h" -static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy); -#define H2_BPROXY_NEXT(e) APR_RING_NEXT((e), link) -#define H2_BPROXY_PREV(e) APR_RING_PREV((e), link) -#define H2_BPROXY_REMOVE(e) APR_RING_REMOVE((e), link) - -#define H2_BPROXY_LIST_INIT(b) APR_RING_INIT(&(b)->list, h2_beam_proxy, link); -#define H2_BPROXY_LIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, h2_beam_proxy, link) -#define H2_BPROXY_LIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, h2_beam_proxy, link) -#define H2_BPROXY_LIST_FIRST(b) APR_RING_FIRST(&(b)->list) -#define H2_BPROXY_LIST_LAST(b) APR_RING_LAST(&(b)->list) -#define H2_PROXY_BLIST_INSERT_HEAD(b, e) do { \ - h2_beam_proxy *ap__b = (e); \ - APR_RING_INSERT_HEAD(&(b)->list, ap__b, h2_beam_proxy, link); \ +#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link); +#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link) +#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link) +#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list) +#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list) +#define H2_BLIST_INSERT_HEAD(b, e) do { \ + apr_bucket *ap__b = (e); \ + APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \ } while (0) -#define H2_BPROXY_LIST_INSERT_TAIL(b, e) do { \ - h2_beam_proxy *ap__b = (e); \ - APR_RING_INSERT_TAIL(&(b)->list, ap__b, h2_beam_proxy, link); \ +#define H2_BLIST_INSERT_TAIL(b, e) do { \ + apr_bucket *ap__b = (e); \ + APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \ } while (0) -#define H2_BPROXY_LIST_CONCAT(a, b) do { \ - APR_RING_CONCAT(&(a)->list, &(b)->list, h2_beam_proxy, link); \ +#define H2_BLIST_CONCAT(a, b) do { \ + APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \ } while (0) -#define H2_BPROXY_LIST_PREPEND(a, b) do { \ - APR_RING_PREPEND(&(a)->list, &(b)->list, h2_beam_proxy, link); \ +#define H2_BLIST_PREPEND(a, b) do { \ + APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \ } while (0) -/******************************************************************************* - * beam bucket with reference to beam and bucket it represents - ******************************************************************************/ - -const apr_bucket_type_t h2_bucket_type_beam; - -#define H2_BUCKET_IS_BEAM(e) (e->type == &h2_bucket_type_beam) - -struct h2_beam_proxy { - apr_bucket_refcount refcount; - APR_RING_ENTRY(h2_beam_proxy) link; - h2_bucket_beam *beam; - apr_bucket *bsender; - apr_size_t n; -}; - -static const char Dummy = '\0'; - -static apr_status_t beam_bucket_read(apr_bucket *b, const char **str, - apr_size_t *len, apr_read_type_e block) -{ - h2_beam_proxy *d = b->data; - if (d->bsender) { - const char *data; - apr_status_t status = apr_bucket_read(d->bsender, &data, len, block); - if (status == APR_SUCCESS) { - *str = data + b->start; - *len = b->length; - } - return status; - } - *str = &Dummy; - *len = 0; - return APR_ECONNRESET; -} - -static void beam_bucket_destroy(void *data) -{ - h2_beam_proxy *d = data; - - if (apr_bucket_shared_destroy(d)) { - /* When the beam gets destroyed before this bucket, it will - * NULLify its reference here. This is not protected by a mutex, - * so it will not help with race conditions. - * But it lets us shut down memory pool with circulare beam - * references. */ - if (d->beam) { - h2_beam_emitted(d->beam, d); - } - apr_bucket_free(d); - } -} - -static apr_bucket * h2_beam_bucket_make(apr_bucket *b, - h2_bucket_beam *beam, - apr_bucket *bsender, apr_size_t n) -{ - h2_beam_proxy *d; - - d = apr_bucket_alloc(sizeof(*d), b->list); - H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d); - d->beam = beam; - d->bsender = bsender; - d->n = n; - - b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0); - b->type = &h2_bucket_type_beam; - - return b; -} - -static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam, - apr_bucket *bsender, - apr_bucket_alloc_t *list, - apr_size_t n) -{ - apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); - - APR_BUCKET_INIT(b); - b->free = apr_bucket_free; - b->list = list; - return h2_beam_bucket_make(b, beam, bsender, n); -} - -const apr_bucket_type_t h2_bucket_type_beam = { - "BEAM", 5, APR_BUCKET_DATA, - beam_bucket_destroy, - beam_bucket_read, - apr_bucket_setaside_noop, - apr_bucket_shared_split, - apr_bucket_shared_copy -}; - -/******************************************************************************* - * h2_blist, a brigade without allocations - ******************************************************************************/ - -static apr_array_header_t *beamers; +static int buffer_is_empty(h2_bucket_beam *beam); +static apr_off_t get_buffered_data_len(h2_bucket_beam *beam); -static apr_status_t cleanup_beamers(void *dummy) +static int h2_blist_count(h2_blist *blist) { - (void)dummy; - beamers = NULL; - return APR_SUCCESS; -} - -void h2_register_bucket_beamer(h2_bucket_beamer *beamer) -{ - if (!beamers) { - apr_pool_cleanup_register(apr_hook_global_pool, NULL, - cleanup_beamers, apr_pool_cleanup_null); - beamers = apr_array_make(apr_hook_global_pool, 10, - sizeof(h2_bucket_beamer*)); - } - APR_ARRAY_PUSH(beamers, h2_bucket_beamer*) = beamer; -} - -static apr_bucket *h2_beam_bucket(h2_bucket_beam *beam, - apr_bucket_brigade *dest, - const apr_bucket *src) -{ - apr_bucket *b = NULL; - int i; - if (beamers) { - for (i = 0; i < beamers->nelts && b == NULL; ++i) { - h2_bucket_beamer *beamer; - - beamer = APR_ARRAY_IDX(beamers, i, h2_bucket_beamer*); - b = beamer(beam, dest, src); - } - } - return b; -} - - -/******************************************************************************* - * bucket beam that can transport buckets across threads - ******************************************************************************/ - -static void mutex_leave(void *ctx, apr_thread_mutex_t *lock) -{ - apr_thread_mutex_unlock(lock); -} + apr_bucket *b; + int count = 0; -static apr_status_t mutex_enter(void *ctx, h2_beam_lock *pbl) -{ - h2_bucket_beam *beam = ctx; - pbl->mutex = beam->lock; - pbl->leave = mutex_leave; - return apr_thread_mutex_lock(pbl->mutex); -} + for (b = H2_BLIST_FIRST(blist); b != H2_BLIST_SENTINEL(blist); + b = APR_BUCKET_NEXT(b)) { + ++count; + } + return count; +} + +#define H2_BEAM_LOG(beam, c, level, rv, msg, bb) \ + do { \ + if (APLOG_C_IS_LEVEL((c),(level))) { \ + char buffer[4 * 1024]; \ + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \ + len = bb? h2_util_bb_print(buffer, bmax, "", "", bb) : 0; \ + ap_log_cerror(APLOG_MARK, (level), rv, (c), \ + "BEAM[%s,%s%sdata=%ld,buckets(send/consumed)=%d/%d]: %s %s", \ + (beam)->name, \ + (beam)->aborted? "aborted," : "", \ + buffer_is_empty(beam)? "empty," : "", \ + (long)get_buffered_data_len(beam), \ + h2_blist_count(&(beam)->buckets_to_send), \ + h2_blist_count(&(beam)->buckets_consumed), \ + (msg), len? buffer : ""); \ + } \ + } while (0) -static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl) -{ - return mutex_enter(beam, pbl); -} -static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl) +static int bucket_is_mmap(apr_bucket *b) { - if (pbl->leave) { - pbl->leave(pbl->leave_ctx, pbl->mutex); - } +#if APR_HAS_MMAP + return APR_BUCKET_IS_MMAP(b); +#else + /* if it is not defined as enabled, it should always be no */ + return 0; +#endif } static apr_off_t bucket_mem_used(apr_bucket *b) { - if (APR_BUCKET_IS_FILE(b)) { + if (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b)) { return 0; } else { /* should all have determinate length */ - return b->length; + return (apr_off_t)b->length; } } -static int report_consumption(h2_bucket_beam *beam, h2_beam_lock *pbl) +static int report_consumption(h2_bucket_beam *beam, int locked) { int rv = 0; - apr_off_t len = beam->received_bytes - beam->cons_bytes_reported; + apr_off_t len = beam->recv_bytes - beam->recv_bytes_reported; h2_beam_io_callback *cb = beam->cons_io_cb; if (len > 0) { if (cb) { void *ctx = beam->cons_ctx; - if (pbl) leave_yellow(beam, pbl); + if (locked) apr_thread_mutex_unlock(beam->lock); cb(ctx, beam, len); - if (pbl) enter_yellow(beam, pbl); + if (locked) apr_thread_mutex_lock(beam->lock); rv = 1; } - beam->cons_bytes_reported += len; + beam->recv_bytes_reported += len; } return rv; } -static void report_prod_io(h2_bucket_beam *beam, int force, h2_beam_lock *pbl) -{ - apr_off_t len = beam->sent_bytes - beam->prod_bytes_reported; - if (force || len > 0) { - h2_beam_io_callback *cb = beam->prod_io_cb; - if (cb) { - void *ctx = beam->prod_ctx; - - leave_yellow(beam, pbl); - cb(ctx, beam, len); - enter_yellow(beam, pbl); - } - beam->prod_bytes_reported += len; - } -} - static apr_size_t calc_buffered(h2_bucket_beam *beam) { apr_size_t len = 0; apr_bucket *b; - for (b = H2_BLIST_FIRST(&beam->send_list); - b != H2_BLIST_SENTINEL(&beam->send_list); + for (b = H2_BLIST_FIRST(&beam->buckets_to_send); + b != H2_BLIST_SENTINEL(&beam->buckets_to_send); b = APR_BUCKET_NEXT(b)) { if (b->length == ((apr_size_t)-1)) { /* do not count */ } - else if (APR_BUCKET_IS_FILE(b)) { + else if (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b)) { /* if unread, has no real mem footprint. */ } else { @@ -288,13 +150,30 @@ static apr_size_t calc_buffered(h2_bucket_beam *beam) return len; } -static void r_purge_sent(h2_bucket_beam *beam) +static void purge_consumed_buckets(h2_bucket_beam *beam) +{ + apr_bucket *b; + /* delete all sender buckets in purge brigade, needs to be called + * from sender thread only */ + while (!H2_BLIST_EMPTY(&beam->buckets_consumed)) { + b = H2_BLIST_FIRST(&beam->buckets_consumed); + if(AP_BUCKET_IS_EOR(b)) { + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->buckets_eor, b); + } + else { + apr_bucket_delete(b); + } + } +} + +static void purge_eor_buckets(h2_bucket_beam *beam) { apr_bucket *b; /* delete all sender buckets in purge brigade, needs to be called * from sender thread only */ - while (!H2_BLIST_EMPTY(&beam->purge_list)) { - b = H2_BLIST_FIRST(&beam->purge_list); + while (!H2_BLIST_EMPTY(&beam->buckets_eor)) { + b = H2_BLIST_FIRST(&beam->buckets_eor); apr_bucket_delete(b); } } @@ -302,7 +181,7 @@ static void r_purge_sent(h2_bucket_beam *beam) static apr_size_t calc_space_left(h2_bucket_beam *beam) { if (beam->max_buf_size > 0) { - apr_off_t len = calc_buffered(beam); + apr_size_t len = calc_buffered(beam); return (beam->max_buf_size > len? (beam->max_buf_size - len) : 0); } return APR_SIZE_MAX; @@ -310,31 +189,10 @@ static apr_size_t calc_space_left(h2_bucket_beam *beam) static int buffer_is_empty(h2_bucket_beam *beam) { - return ((!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer)) - && H2_BLIST_EMPTY(&beam->send_list)); + return H2_BLIST_EMPTY(&beam->buckets_to_send); } -static apr_status_t wait_empty(h2_bucket_beam *beam, apr_read_type_e block, - apr_thread_mutex_t *lock) -{ - apr_status_t rv = APR_SUCCESS; - - while (!buffer_is_empty(beam) && APR_SUCCESS == rv) { - if (APR_BLOCK_READ != block || !lock) { - rv = APR_EAGAIN; - } - else if (beam->timeout > 0) { - rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout); - } - else { - rv = apr_thread_cond_wait(beam->change, lock); - } - } - return rv; -} - -static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block, - apr_thread_mutex_t *lock) +static apr_status_t wait_not_empty(h2_bucket_beam *beam, conn_rec *c, apr_read_type_e block) { apr_status_t rv = APR_SUCCESS; @@ -345,21 +203,24 @@ static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block, else if (beam->closed) { rv = APR_EOF; } - else if (APR_BLOCK_READ != block || !lock) { + else if (APR_BLOCK_READ != block) { rv = APR_EAGAIN; } else if (beam->timeout > 0) { - rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout); + H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_empty, timeout", NULL); + rv = apr_thread_cond_timedwait(beam->change, beam->lock, beam->timeout); } else { - rv = apr_thread_cond_wait(beam->change, lock); + H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_empty, forever", NULL); + rv = apr_thread_cond_wait(beam->change, beam->lock); } } return rv; } -static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block, - apr_size_t *pspace_left, h2_beam_lock *bl) +static apr_status_t wait_not_full(h2_bucket_beam *beam, conn_rec *c, + apr_read_type_e block, + apr_size_t *pspace_left) { apr_status_t rv = APR_SUCCESS; apr_size_t left; @@ -368,15 +229,17 @@ static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block, if (beam->aborted) { rv = APR_ECONNABORTED; } - else if (block != APR_BLOCK_READ || !bl->mutex) { + else if (block != APR_BLOCK_READ) { rv = APR_EAGAIN; } else { if (beam->timeout > 0) { - rv = apr_thread_cond_timedwait(beam->change, bl->mutex, beam->timeout); + H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_full, timeout", NULL); + rv = apr_thread_cond_timedwait(beam->change, beam->lock, beam->timeout); } else { - rv = apr_thread_cond_wait(beam->change, bl->mutex); + H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_full, forever", NULL); + rv = apr_thread_cond_wait(beam->change, beam->lock); } } } @@ -384,73 +247,6 @@ static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block, return rv; } -static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy) -{ - h2_beam_lock bl; - apr_bucket *b, *next; - - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - /* even when beam buckets are split, only the one where - * refcount drops to 0 will call us */ - H2_BPROXY_REMOVE(proxy); - /* invoked from receiver thread, the last beam bucket for the send - * bucket is about to be destroyed. - * remove it from the hold, where it should be now */ - if (proxy->bsender) { - for (b = H2_BLIST_FIRST(&beam->hold_list); - b != H2_BLIST_SENTINEL(&beam->hold_list); - b = APR_BUCKET_NEXT(b)) { - if (b == proxy->bsender) { - break; - } - } - if (b != H2_BLIST_SENTINEL(&beam->hold_list)) { - /* bucket is in hold as it should be, mark this one - * and all before it for purging. We might have placed meta - * buckets without a receiver proxy into the hold before it - * and schedule them for purging now */ - for (b = H2_BLIST_FIRST(&beam->hold_list); - b != H2_BLIST_SENTINEL(&beam->hold_list); - b = next) { - next = APR_BUCKET_NEXT(b); - if (b == proxy->bsender) { - APR_BUCKET_REMOVE(b); - H2_BLIST_INSERT_TAIL(&beam->purge_list, b); - break; - } - else if (APR_BUCKET_IS_METADATA(b)) { - APR_BUCKET_REMOVE(b); - H2_BLIST_INSERT_TAIL(&beam->purge_list, b); - } - else { - /* another data bucket before this one in hold. this - * is normal since DATA buckets need not be destroyed - * in order */ - } - } - - proxy->bsender = NULL; - } - else { - /* it should be there unless we screwed up */ - ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->send_pool, - APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not " - "in hold, n=%d", beam->id, beam->tag, - (int)proxy->n); - ap_assert(!proxy->bsender); - } - } - /* notify anyone waiting on space to become available */ - if (!bl.mutex) { - r_purge_sent(beam); - } - else { - apr_thread_cond_broadcast(beam->change); - } - leave_yellow(beam, &bl); - } -} - static void h2_blist_cleanup(h2_blist *bl) { apr_bucket *e; @@ -461,335 +257,203 @@ static void h2_blist_cleanup(h2_blist *bl) } } -static apr_status_t beam_close(h2_bucket_beam *beam) +static void beam_shutdown(h2_bucket_beam *beam, apr_shutdown_how_e how) { - if (!beam->closed) { - beam->closed = 1; - apr_thread_cond_broadcast(beam->change); + if (!beam->pool) { + /* pool being cleared already */ + return; } - return APR_SUCCESS; -} - -int h2_beam_is_closed(h2_bucket_beam *beam) -{ - return beam->closed; -} -static int pool_register(h2_bucket_beam *beam, apr_pool_t *pool, - apr_status_t (*cleanup)(void *)) -{ - if (pool && pool != beam->pool) { - apr_pool_pre_cleanup_register(pool, beam, cleanup); - return 1; + /* shutdown both receiver and sender? */ + if (how == APR_SHUTDOWN_READWRITE) { + beam->cons_io_cb = NULL; + beam->recv_cb = NULL; + beam->eagain_cb = NULL; } - return 0; -} -static int pool_kill(h2_bucket_beam *beam, apr_pool_t *pool, - apr_status_t (*cleanup)(void *)) { - if (pool && pool != beam->pool) { - apr_pool_cleanup_kill(pool, beam, cleanup); - return 1; + /* shutdown sender (or both)? */ + if (how != APR_SHUTDOWN_READ) { + purge_consumed_buckets(beam); + h2_blist_cleanup(&beam->buckets_to_send); } - return 0; } -static apr_status_t beam_recv_cleanup(void *data) +static apr_status_t beam_cleanup(void *data) { h2_bucket_beam *beam = data; - /* receiver pool has gone away, clear references */ - beam->recv_buffer = NULL; - beam->recv_pool = NULL; + beam_shutdown(beam, APR_SHUTDOWN_READWRITE); + purge_eor_buckets(beam); + beam->pool = NULL; /* the pool is clearing now */ return APR_SUCCESS; } -static apr_status_t beam_send_cleanup(void *data) +apr_status_t h2_beam_destroy(h2_bucket_beam *beam, conn_rec *c) { - h2_bucket_beam *beam = data; - /* sender is going away, clear up all references to its memory */ - r_purge_sent(beam); - h2_blist_cleanup(&beam->send_list); - report_consumption(beam, NULL); - while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) { - h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies); - H2_BPROXY_REMOVE(proxy); - proxy->beam = NULL; - proxy->bsender = NULL; + if (beam->pool) { + H2_BEAM_LOG(beam, c, APLOG_TRACE2, 0, "destroy", NULL); + apr_pool_cleanup_run(beam->pool, beam, beam_cleanup); } - h2_blist_cleanup(&beam->purge_list); - h2_blist_cleanup(&beam->hold_list); - beam->send_pool = NULL; + H2_BEAM_LOG(beam, c, APLOG_TRACE2, 0, "destroyed", NULL); return APR_SUCCESS; } -static void beam_set_send_pool(h2_bucket_beam *beam, apr_pool_t *pool) -{ - if (beam->send_pool != pool) { - if (beam->send_pool && beam->send_pool != beam->pool) { - pool_kill(beam, beam->send_pool, beam_send_cleanup); - beam_send_cleanup(beam); - } - beam->send_pool = pool; - pool_register(beam, beam->send_pool, beam_send_cleanup); - } -} - -static void recv_buffer_cleanup(h2_bucket_beam *beam, h2_beam_lock *bl) -{ - if (beam->recv_buffer && !APR_BRIGADE_EMPTY(beam->recv_buffer)) { - apr_bucket_brigade *bb = beam->recv_buffer; - apr_off_t bblen = 0; - - beam->recv_buffer = NULL; - apr_brigade_length(bb, 0, &bblen); - beam->received_bytes += bblen; - - /* need to do this unlocked since bucket destroy might - * call this beam again. */ - if (bl) leave_yellow(beam, bl); - apr_brigade_destroy(bb); - if (bl) enter_yellow(beam, bl); - - apr_thread_cond_broadcast(beam->change); - if (beam->cons_ev_cb) { - beam->cons_ev_cb(beam->cons_ctx, beam); - } - } -} - -static apr_status_t beam_cleanup(h2_bucket_beam *beam, int from_pool) -{ - apr_status_t status = APR_SUCCESS; - int safe_send = (beam->owner == H2_BEAM_OWNER_SEND); - int safe_recv = (beam->owner == H2_BEAM_OWNER_RECV); - - /* - * Owner of the beam is going away, depending on which side it owns, - * cleanup strategies will differ. - * - * In general, receiver holds references to memory from sender. - * Clean up receiver first, if safe, then cleanup sender, if safe. - */ - - /* When called from pool destroy, io callbacks are disabled */ - if (from_pool) { - beam->cons_io_cb = NULL; - } - - /* When modify send is not safe, this means we still have multi-thread - * protection and the owner is receiving the buckets. If the sending - * side has not gone away, this means we could have dangling buckets - * in our lists that never get destroyed. This should not happen. */ - ap_assert(safe_send || !beam->send_pool); - if (!H2_BLIST_EMPTY(&beam->send_list)) { - ap_assert(beam->send_pool); - } - - if (safe_recv) { - if (beam->recv_pool) { - pool_kill(beam, beam->recv_pool, beam_recv_cleanup); - beam->recv_pool = NULL; - } - recv_buffer_cleanup(beam, NULL); - } - else { - beam->recv_buffer = NULL; - beam->recv_pool = NULL; - } - - if (safe_send && beam->send_pool) { - pool_kill(beam, beam->send_pool, beam_send_cleanup); - status = beam_send_cleanup(beam); - } - - if (safe_recv) { - ap_assert(H2_BPROXY_LIST_EMPTY(&beam->proxies)); - ap_assert(H2_BLIST_EMPTY(&beam->send_list)); - ap_assert(H2_BLIST_EMPTY(&beam->hold_list)); - ap_assert(H2_BLIST_EMPTY(&beam->purge_list)); - } - return status; -} - -static apr_status_t beam_pool_cleanup(void *data) -{ - return beam_cleanup(data, 1); -} - -apr_status_t h2_beam_destroy(h2_bucket_beam *beam) -{ - apr_pool_cleanup_kill(beam->pool, beam, beam_pool_cleanup); - return beam_cleanup(beam, 0); -} - -apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *pool, - int id, const char *tag, - h2_beam_owner_t owner, +apr_status_t h2_beam_create(h2_bucket_beam **pbeam, conn_rec *from, + apr_pool_t *pool, int id, const char *tag, apr_size_t max_buf_size, apr_interval_time_t timeout) { h2_bucket_beam *beam; - apr_status_t rv = APR_SUCCESS; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(from); + apr_status_t rv; beam = apr_pcalloc(pool, sizeof(*beam)); - if (!beam) { - return APR_ENOMEM; - } - - beam->id = id; - beam->tag = tag; beam->pool = pool; - beam->owner = owner; - H2_BLIST_INIT(&beam->send_list); - H2_BLIST_INIT(&beam->hold_list); - H2_BLIST_INIT(&beam->purge_list); - H2_BPROXY_LIST_INIT(&beam->proxies); + beam->from = from; + beam->id = id; + beam->name = apr_psprintf(pool, "%s-%d-%s", + conn_ctx->id, id, tag); + + H2_BLIST_INIT(&beam->buckets_to_send); + H2_BLIST_INIT(&beam->buckets_consumed); + H2_BLIST_INIT(&beam->buckets_eor); beam->tx_mem_limits = 1; beam->max_buf_size = max_buf_size; beam->timeout = timeout; rv = apr_thread_mutex_create(&beam->lock, APR_THREAD_MUTEX_DEFAULT, pool); - if (APR_SUCCESS == rv) { - rv = apr_thread_cond_create(&beam->change, pool); - if (APR_SUCCESS == rv) { - apr_pool_pre_cleanup_register(pool, beam, beam_pool_cleanup); - *pbeam = beam; - } - } + if (APR_SUCCESS != rv) goto cleanup; + rv = apr_thread_cond_create(&beam->change, pool); + if (APR_SUCCESS != rv) goto cleanup; + apr_pool_pre_cleanup_register(pool, beam, beam_cleanup); + +cleanup: + H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "created", NULL); + *pbeam = (APR_SUCCESS == rv)? beam : NULL; return rv; } void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size) { - h2_beam_lock bl; - - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - beam->max_buf_size = buffer_size; - leave_yellow(beam, &bl); - } + apr_thread_mutex_lock(beam->lock); + beam->max_buf_size = buffer_size; + apr_thread_mutex_unlock(beam->lock); } -apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam) +void h2_beam_set_copy_files(h2_bucket_beam * beam, int enabled) { - h2_beam_lock bl; - apr_size_t buffer_size = 0; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - buffer_size = beam->max_buf_size; - leave_yellow(beam, &bl); - } - return buffer_size; + apr_thread_mutex_lock(beam->lock); + beam->copy_files = enabled; + apr_thread_mutex_unlock(beam->lock); } -void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout) +apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam) { - h2_beam_lock bl; + apr_size_t buffer_size = 0; - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - beam->timeout = timeout; - leave_yellow(beam, &bl); - } + apr_thread_mutex_lock(beam->lock); + buffer_size = beam->max_buf_size; + apr_thread_mutex_unlock(beam->lock); + return buffer_size; } apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam) { - h2_beam_lock bl; - apr_interval_time_t timeout = 0; - - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - timeout = beam->timeout; - leave_yellow(beam, &bl); - } + apr_interval_time_t timeout; + + apr_thread_mutex_lock(beam->lock); + timeout = beam->timeout; + apr_thread_mutex_unlock(beam->lock); return timeout; } -void h2_beam_abort(h2_bucket_beam *beam) +void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout) { - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - beam->aborted = 1; - r_purge_sent(beam); - h2_blist_cleanup(&beam->send_list); - report_consumption(beam, &bl); - apr_thread_cond_broadcast(beam->change); - leave_yellow(beam, &bl); - } + apr_thread_mutex_lock(beam->lock); + beam->timeout = timeout; + apr_thread_mutex_unlock(beam->lock); } -apr_status_t h2_beam_close(h2_bucket_beam *beam) +void h2_beam_abort(h2_bucket_beam *beam, conn_rec *c) { - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - r_purge_sent(beam); - beam_close(beam); - report_consumption(beam, &bl); - leave_yellow(beam, &bl); - } - return beam->aborted? APR_ECONNABORTED : APR_SUCCESS; -} + apr_thread_mutex_lock(beam->lock); + beam->aborted = 1; + if (c == beam->from) { + /* sender aborts */ + if (beam->send_cb) { + beam->send_cb(beam->send_ctx, beam); + } + if (beam->was_empty_cb && buffer_is_empty(beam)) { + beam->was_empty_cb(beam->was_empty_ctx, beam); + } + /* no more consumption reporting to sender */ + report_consumption(beam, 1); + beam->cons_ctx = NULL; -apr_status_t h2_beam_leave(h2_bucket_beam *beam) -{ - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - recv_buffer_cleanup(beam, &bl); - beam->aborted = 1; - beam_close(beam); - leave_yellow(beam, &bl); + beam_shutdown(beam, APR_SHUTDOWN_WRITE); } - return APR_SUCCESS; -} - -apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block) -{ - apr_status_t status; - h2_beam_lock bl; - - if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) { - status = wait_empty(beam, block, bl.mutex); - leave_yellow(beam, &bl); + else { + /* receiver aborts */ + beam_shutdown(beam, APR_SHUTDOWN_READ); } - return status; + apr_thread_cond_broadcast(beam->change); + apr_thread_mutex_unlock(beam->lock); } -static void move_to_hold(h2_bucket_beam *beam, - apr_bucket_brigade *sender_bb) +void h2_beam_close(h2_bucket_beam *beam, conn_rec *c) { - apr_bucket *b; - while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) { - b = APR_BRIGADE_FIRST(sender_bb); - APR_BUCKET_REMOVE(b); - H2_BLIST_INSERT_TAIL(&beam->send_list, b); + apr_thread_mutex_lock(beam->lock); + if (!beam->closed) { + /* should only be called from sender */ + ap_assert(c == beam->from); + beam->closed = 1; + if (beam->send_cb) { + beam->send_cb(beam->send_ctx, beam); + } + if (beam->was_empty_cb && buffer_is_empty(beam)) { + beam->was_empty_cb(beam->was_empty_ctx, beam); + } + apr_thread_cond_broadcast(beam->change); } + apr_thread_mutex_unlock(beam->lock); } -static apr_status_t append_bucket(h2_bucket_beam *beam, - apr_bucket *b, +static apr_status_t append_bucket(h2_bucket_beam *beam, + apr_bucket_brigade *bb, apr_read_type_e block, apr_size_t *pspace_left, - h2_beam_lock *pbl) + apr_off_t *pwritten) { + apr_bucket *b; const char *data; apr_size_t len; - apr_status_t status; - int can_beam = 0, check_len; + apr_status_t rv = APR_SUCCESS; + int can_beam = 0; + (void)block; if (beam->aborted) { - return APR_ECONNABORTED; + rv = APR_ECONNABORTED; + goto cleanup; } - + + ap_assert(beam->pool); + + b = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_METADATA(b)) { - if (APR_BUCKET_IS_EOS(b)) { - beam->closed = 1; - } APR_BUCKET_REMOVE(b); - H2_BLIST_INSERT_TAIL(&beam->send_list, b); - return APR_SUCCESS; + apr_bucket_setaside(b, beam->pool); + H2_BLIST_INSERT_TAIL(&beam->buckets_to_send, b); + goto cleanup; + } + /* non meta bucket */ + + /* in case of indeterminate length, we need to read the bucket, + * so that it transforms itself into something stable. */ + if (b->length == ((apr_size_t)-1)) { + rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) goto cleanup; } - else if (APR_BUCKET_IS_FILE(b)) { + + if (APR_BUCKET_IS_FILE(b)) { /* For file buckets the problem is their internal readpool that * is used on the first read to allocate buffer/mmap. * Since setting aside a file bucket will de-register the @@ -806,478 +470,414 @@ static apr_status_t append_bucket(h2_bucket_beam *beam, * of open file handles and rather use a less efficient beam * transport. */ apr_bucket_file *bf = b->data; - apr_file_t *fd = bf->fd; - can_beam = (bf->refcount.refcount == 1); - if (can_beam && beam->can_beam_fn) { - can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd); - } - check_len = !can_beam; + can_beam = !beam->copy_files && (bf->refcount.refcount == 1); } - else { - if (b->length == ((apr_size_t)-1)) { - const char *data; - status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); - if (status != APR_SUCCESS) { - return status; - } - } - check_len = 1; + else if (bucket_is_mmap(b)) { + can_beam = !beam->copy_files; } - - if (check_len) { - if (b->length > *pspace_left) { - apr_bucket_split(b, *pspace_left); - } - *pspace_left -= b->length; + + if (b->length == 0) { + apr_bucket_delete(b); + rv = APR_SUCCESS; + goto cleanup; } - /* The fundamental problem is that reading a sender bucket from - * a receiver thread is a total NO GO, because the bucket might use - * its pool/bucket_alloc from a foreign thread and that will - * corrupt. */ - status = APR_ENOTIMPL; - if (APR_BUCKET_IS_TRANSIENT(b)) { - /* this takes care of transient buckets and converts them - * into heap ones. Other bucket types might or might not be - * affected by this. */ - status = apr_bucket_setaside(b, beam->send_pool); + if (!*pspace_left) { + rv = APR_EAGAIN; + goto cleanup; } - else if (APR_BUCKET_IS_HEAP(b)) { - /* For heap buckets read from a receiver thread is fine. The + + /* bucket is accepted and added to beam->buckets_to_send */ + if (APR_BUCKET_IS_HEAP(b)) { + /* For heap buckets, a read from a receiver thread is fine. The * data will be there and live until the bucket itself is * destroyed. */ - status = APR_SUCCESS; + rv = apr_bucket_setaside(b, beam->pool); + if (rv != APR_SUCCESS) goto cleanup; } - else if (APR_BUCKET_IS_POOL(b)) { - /* pool buckets are bastards that register at pool cleanup - * to morph themselves into heap buckets. That may happen anytime, - * even after the bucket data pointer has been read. So at - * any time inside the receiver thread, the pool bucket memory - * may disappear. yikes. */ - status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); - if (status == APR_SUCCESS) { - apr_bucket_heap_make(b, data, len, NULL); - } + else if (can_beam && (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b))) { + rv = apr_bucket_setaside(b, beam->pool); + if (rv != APR_SUCCESS) goto cleanup; } - else if (APR_BUCKET_IS_FILE(b) && can_beam) { - status = apr_bucket_setaside(b, beam->send_pool); + else { + /* we know of no special shortcut to transfer the bucket to + * another pool without copying. So we make it a heap bucket. */ + apr_bucket *b2; + + rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) goto cleanup; + /* this allocates and copies data */ + b2 = apr_bucket_heap_create(data, len, NULL, bb->bucket_alloc); + apr_bucket_delete(b); + b = b2; + APR_BRIGADE_INSERT_HEAD(bb, b); } - if (status == APR_ENOTIMPL) { - /* we have no knowledge about the internals of this bucket, - * but hope that after read, its data stays immutable for the - * lifetime of the bucket. (see pool bucket handling above for - * a counter example). - * We do the read while in the sender thread, so that the bucket may - * use pools/allocators safely. */ - status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); - if (status == APR_SUCCESS) { - status = apr_bucket_setaside(b, beam->send_pool); - } + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->buckets_to_send, b); + *pwritten += (apr_off_t)b->length; + if (b->length > *pspace_left) { + *pspace_left = 0; } - - if (status != APR_SUCCESS && status != APR_ENOTIMPL) { - return status; + else { + *pspace_left -= b->length; } - - APR_BUCKET_REMOVE(b); - H2_BLIST_INSERT_TAIL(&beam->send_list, b); - beam->sent_bytes += b->length; - - return APR_SUCCESS; -} -void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p) -{ - h2_beam_lock bl; - /* Called from the sender thread to add buckets to the beam */ - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - r_purge_sent(beam); - beam_set_send_pool(beam, p); - leave_yellow(beam, &bl); - } +cleanup: + return rv; } -apr_status_t h2_beam_send(h2_bucket_beam *beam, +apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from, apr_bucket_brigade *sender_bb, - apr_read_type_e block) + apr_read_type_e block, + apr_off_t *pwritten) { - apr_bucket *b; apr_status_t rv = APR_SUCCESS; apr_size_t space_left = 0; - h2_beam_lock bl; + int was_empty; + + ap_assert(beam->pool); /* Called from the sender thread to add buckets to the beam */ - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - ap_assert(beam->send_pool); - r_purge_sent(beam); - + apr_thread_mutex_lock(beam->lock); + ap_assert(beam->from == from); + ap_assert(sender_bb); + H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "start send", sender_bb); + purge_consumed_buckets(beam); + *pwritten = 0; + was_empty = buffer_is_empty(beam); + + space_left = calc_space_left(beam); + while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) { + rv = append_bucket(beam, sender_bb, block, &space_left, pwritten); if (beam->aborted) { - move_to_hold(beam, sender_bb); - rv = APR_ECONNABORTED; - } - else if (sender_bb) { - int force_report = !APR_BRIGADE_EMPTY(sender_bb); - - space_left = calc_space_left(beam); - while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) { - if (space_left <= 0) { - report_prod_io(beam, force_report, &bl); - r_purge_sent(beam); - rv = wait_not_full(beam, block, &space_left, &bl); - if (APR_SUCCESS != rv) { - break; - } - } - b = APR_BRIGADE_FIRST(sender_bb); - rv = append_bucket(beam, b, block, &space_left, &bl); + goto cleanup; + } + else if (APR_EAGAIN == rv) { + /* bucket was not added, as beam buffer has no space left. + * Trigger event callbacks, so receiver can know there is something + * to receive before we do a conditional wait. */ + purge_consumed_buckets(beam); + if (beam->send_cb) { + beam->send_cb(beam->send_ctx, beam); } - - report_prod_io(beam, force_report, &bl); - apr_thread_cond_broadcast(beam->change); + if (was_empty && beam->was_empty_cb) { + beam->was_empty_cb(beam->was_empty_ctx, beam); + } + rv = wait_not_full(beam, from, block, &space_left); + if (APR_SUCCESS != rv) { + break; + } + was_empty = buffer_is_empty(beam); } - report_consumption(beam, &bl); - leave_yellow(beam, &bl); } + +cleanup: + if (beam->send_cb && !buffer_is_empty(beam)) { + beam->send_cb(beam->send_ctx, beam); + } + if (was_empty && beam->was_empty_cb && !buffer_is_empty(beam)) { + beam->was_empty_cb(beam->was_empty_ctx, beam); + } + apr_thread_cond_broadcast(beam->change); + + report_consumption(beam, 1); + if (beam->aborted) { + rv = APR_ECONNABORTED; + } + H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "end send", sender_bb); + if(rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv) && sender_bb != NULL) { + apr_brigade_cleanup(sender_bb); + } + apr_thread_mutex_unlock(beam->lock); return rv; } -apr_status_t h2_beam_receive(h2_bucket_beam *beam, +apr_status_t h2_beam_receive(h2_bucket_beam *beam, + conn_rec *to, apr_bucket_brigade *bb, apr_read_type_e block, apr_off_t readbytes) { - h2_beam_lock bl; apr_bucket *bsender, *brecv, *ng; int transferred = 0; - apr_status_t status = APR_SUCCESS; + apr_status_t rv = APR_SUCCESS; apr_off_t remain; - int transferred_buckets = 0; - - /* Called from the receiver thread to take buckets from the beam */ - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - if (readbytes <= 0) { - readbytes = APR_SIZE_MAX; - } - remain = readbytes; - + int consumed_buckets = 0; + + apr_thread_mutex_lock(beam->lock); + H2_BEAM_LOG(beam, to, APLOG_TRACE2, 0, "start receive", bb); + if (readbytes <= 0) { + readbytes = (apr_off_t)APR_SIZE_MAX; + } + remain = readbytes; + transfer: - if (beam->aborted) { - recv_buffer_cleanup(beam, &bl); - status = APR_ECONNABORTED; - goto leave; - } + if (beam->aborted) { + beam_shutdown(beam, APR_SHUTDOWN_READ); + rv = APR_ECONNABORTED; + goto leave; + } - /* transfer enough buckets from our receiver brigade, if we have one */ - while (remain >= 0 - && beam->recv_buffer - && !APR_BRIGADE_EMPTY(beam->recv_buffer)) { - - brecv = APR_BRIGADE_FIRST(beam->recv_buffer); - if (brecv->length > 0 && remain <= 0) { - break; - } - APR_BUCKET_REMOVE(brecv); - APR_BRIGADE_INSERT_TAIL(bb, brecv); - remain -= brecv->length; - ++transferred; + ap_assert(beam->pool); + + /* transfer from our sender brigade, transforming sender buckets to + * receiver ones until we have enough */ + while (remain >= 0 && !H2_BLIST_EMPTY(&beam->buckets_to_send)) { + + brecv = NULL; + bsender = H2_BLIST_FIRST(&beam->buckets_to_send); + if (bsender->length > 0 && remain <= 0) { + break; } - /* transfer from our sender brigade, transforming sender buckets to - * receiver ones until we have enough */ - while (remain >= 0 && !H2_BLIST_EMPTY(&beam->send_list)) { - - brecv = NULL; - bsender = H2_BLIST_FIRST(&beam->send_list); - if (bsender->length > 0 && remain <= 0) { - break; + if (APR_BUCKET_IS_METADATA(bsender)) { + /* we need a real copy into the receivers bucket_alloc */ + if (APR_BUCKET_IS_EOS(bsender)) { + /* this closes the beam */ + beam->closed = 1; + brecv = apr_bucket_eos_create(bb->bucket_alloc); } - - if (APR_BUCKET_IS_METADATA(bsender)) { - if (APR_BUCKET_IS_EOS(bsender)) { - brecv = apr_bucket_eos_create(bb->bucket_alloc); - beam->close_sent = 1; - } - else if (APR_BUCKET_IS_FLUSH(bsender)) { - brecv = apr_bucket_flush_create(bb->bucket_alloc); - } - else if (AP_BUCKET_IS_ERROR(bsender)) { - ap_bucket_error *eb = (ap_bucket_error *)bsender; - brecv = ap_bucket_error_create(eb->status, eb->data, - bb->p, bb->bucket_alloc); - } + else if (APR_BUCKET_IS_FLUSH(bsender)) { + brecv = apr_bucket_flush_create(bb->bucket_alloc); } - else if (bsender->length == 0) { - APR_BUCKET_REMOVE(bsender); - H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender); - continue; +#if AP_HAS_RESPONSE_BUCKETS + else if (AP_BUCKET_IS_RESPONSE(bsender)) { + brecv = ap_bucket_response_clone(bsender, bb->p, bb->bucket_alloc); } - else if (APR_BUCKET_IS_FILE(bsender)) { - /* This is set aside into the target brigade pool so that - * any read operation messes with that pool and not - * the sender one. */ - apr_bucket_file *f = (apr_bucket_file *)bsender->data; - apr_file_t *fd = f->fd; - int setaside = (f->readpool != bb->p); - - if (setaside) { - status = apr_file_setaside(&fd, fd, bb->p); - if (status != APR_SUCCESS) { - goto leave; - } - ++beam->files_beamed; - } - ng = apr_brigade_insert_file(bb, fd, bsender->start, bsender->length, - bb->p); -#if APR_HAS_MMAP - /* disable mmap handling as this leads to segfaults when - * the underlying file is changed while memory pointer has - * been handed out. See also PR 59348 */ - apr_bucket_file_enable_mmap(ng, 0); -#endif - APR_BUCKET_REMOVE(bsender); - H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender); - - remain -= bsender->length; - ++transferred; - ++transferred_buckets; - continue; + else if (AP_BUCKET_IS_REQUEST(bsender)) { + brecv = ap_bucket_request_clone(bsender, bb->p, bb->bucket_alloc); } - else { - /* create a "receiver" standin bucket. we took care about the - * underlying sender bucket and its data when we placed it into - * the sender brigade. - * the beam bucket will notify us on destruction that bsender is - * no longer needed. */ - brecv = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc, - beam->buckets_sent++); + else if (AP_BUCKET_IS_HEADERS(bsender)) { + brecv = ap_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc); } - - /* Place the sender bucket into our hold, to be destroyed when no - * receiver bucket references it any more. */ - APR_BUCKET_REMOVE(bsender); - H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender); - - beam->received_bytes += bsender->length; - ++transferred_buckets; - - if (brecv) { - APR_BRIGADE_INSERT_TAIL(bb, brecv); - remain -= brecv->length; - ++transferred; +#else + else if (H2_BUCKET_IS_HEADERS(bsender)) { + brecv = h2_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc); } - else { - /* let outside hook determine how bucket is beamed */ - leave_yellow(beam, &bl); - brecv = h2_beam_bucket(beam, bb, bsender); - enter_yellow(beam, &bl); - - while (brecv && brecv != APR_BRIGADE_SENTINEL(bb)) { - ++transferred; - remain -= brecv->length; - brecv = APR_BUCKET_NEXT(brecv); - } +#endif /* AP_HAS_RESPONSE_BUCKETS */ + else if (AP_BUCKET_IS_ERROR(bsender)) { + ap_bucket_error *eb = bsender->data; + brecv = ap_bucket_error_create(eb->status, eb->data, + bb->p, bb->bucket_alloc); } } - - if (remain < 0) { - /* too much, put some back into out recv_buffer */ - remain = readbytes; - for (brecv = APR_BRIGADE_FIRST(bb); - brecv != APR_BRIGADE_SENTINEL(bb); - brecv = APR_BUCKET_NEXT(brecv)) { - remain -= (beam->tx_mem_limits? bucket_mem_used(brecv) - : brecv->length); - if (remain < 0) { - apr_bucket_split(brecv, brecv->length+remain); - beam->recv_buffer = apr_brigade_split_ex(bb, - APR_BUCKET_NEXT(brecv), - beam->recv_buffer); - break; - } - } + else if (bsender->length == 0) { + /* nop */ } - - if (beam->closed && buffer_is_empty(beam)) { - /* beam is closed and we have nothing more to receive */ - if (!beam->close_sent) { - apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); - beam->close_sent = 1; - ++transferred; - status = APR_SUCCESS; - } +#if APR_HAS_MMAP + else if (APR_BUCKET_IS_MMAP(bsender)) { + apr_bucket_mmap *bmmap = bsender->data; + apr_mmap_t *mmap; + rv = apr_mmap_dup(&mmap, bmmap->mmap, bb->p); + if (rv != APR_SUCCESS) goto leave; + brecv = apr_bucket_mmap_create(mmap, bsender->start, bsender->length, bb->bucket_alloc); } - - if (transferred_buckets > 0) { - if (beam->cons_ev_cb) { - beam->cons_ev_cb(beam->cons_ctx, beam); +#endif + else if (APR_BUCKET_IS_FILE(bsender)) { + /* This is setaside into the target brigade pool so that + * any read operation messes with that pool and not + * the sender one. */ + apr_bucket_file *f = (apr_bucket_file *)bsender->data; + apr_file_t *fd = f->fd; + int setaside = (f->readpool != bb->p); + + if (setaside) { + rv = apr_file_setaside(&fd, fd, bb->p); + if (rv != APR_SUCCESS) goto leave; } - } - - if (transferred) { - apr_thread_cond_broadcast(beam->change); - status = APR_SUCCESS; + ng = apr_brigade_insert_file(bb, fd, bsender->start, (apr_off_t)bsender->length, + bb->p); +#if APR_HAS_MMAP + /* disable mmap handling as this leads to segfaults when + * the underlying file is changed while memory pointer has + * been handed out. See also PR 59348 */ + apr_bucket_file_enable_mmap(ng, 0); +#endif + remain -= bsender->length; + ++transferred; } else { - status = wait_not_empty(beam, block, bl.mutex); - if (status != APR_SUCCESS) { - goto leave; - } - goto transfer; + const char *data; + apr_size_t dlen; + /* we did that when the bucket was added, so this should + * give us the same data as before without changing the bucket + * or anything (pool) connected to it. */ + rv = apr_bucket_read(bsender, &data, &dlen, APR_BLOCK_READ); + if (rv != APR_SUCCESS) goto leave; + rv = apr_brigade_write(bb, NULL, NULL, data, dlen); + if (rv != APR_SUCCESS) goto leave; + + remain -= dlen; + ++transferred; + } + + if (brecv) { + /* we have a proxy that we can give the receiver */ + APR_BRIGADE_INSERT_TAIL(bb, brecv); + remain -= brecv->length; + ++transferred; + } + APR_BUCKET_REMOVE(bsender); + H2_BLIST_INSERT_TAIL(&beam->buckets_consumed, bsender); + beam->recv_bytes += bsender->length; + ++consumed_buckets; + } + + if (beam->recv_cb && consumed_buckets > 0) { + beam->recv_cb(beam->recv_ctx, beam); + } + + if (transferred) { + apr_thread_cond_broadcast(beam->change); + rv = APR_SUCCESS; + } + else if (beam->aborted) { + rv = APR_ECONNABORTED; + } + else if (beam->closed) { + rv = APR_EOF; + } + else { + rv = wait_not_empty(beam, to, block); + if (rv != APR_SUCCESS) { + goto leave; } -leave: - leave_yellow(beam, &bl); + goto transfer; + } + +leave: + H2_BEAM_LOG(beam, to, APLOG_TRACE2, rv, "end receive", bb); + if (rv == APR_EAGAIN && beam->eagain_cb) { + beam->eagain_cb(beam->eagain_ctx, beam); } - return status; + apr_thread_mutex_unlock(beam->lock); + return rv; } void h2_beam_on_consumed(h2_bucket_beam *beam, - h2_beam_ev_callback *ev_cb, h2_beam_io_callback *io_cb, void *ctx) { - h2_beam_lock bl; - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - beam->cons_ev_cb = ev_cb; - beam->cons_io_cb = io_cb; - beam->cons_ctx = ctx; - leave_yellow(beam, &bl); - } + apr_thread_mutex_lock(beam->lock); + beam->cons_io_cb = io_cb; + beam->cons_ctx = ctx; + apr_thread_mutex_unlock(beam->lock); } -void h2_beam_on_produced(h2_bucket_beam *beam, - h2_beam_io_callback *io_cb, void *ctx) +void h2_beam_on_received(h2_bucket_beam *beam, + h2_beam_ev_callback *recv_cb, void *ctx) { - h2_beam_lock bl; - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - beam->prod_io_cb = io_cb; - beam->prod_ctx = ctx; - leave_yellow(beam, &bl); - } + apr_thread_mutex_lock(beam->lock); + beam->recv_cb = recv_cb; + beam->recv_ctx = ctx; + apr_thread_mutex_unlock(beam->lock); } -void h2_beam_on_file_beam(h2_bucket_beam *beam, - h2_beam_can_beam_callback *cb, void *ctx) +void h2_beam_on_eagain(h2_bucket_beam *beam, + h2_beam_ev_callback *eagain_cb, void *ctx) { - h2_beam_lock bl; - - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - beam->can_beam_fn = cb; - beam->can_beam_ctx = ctx; - leave_yellow(beam, &bl); - } + apr_thread_mutex_lock(beam->lock); + beam->eagain_cb = eagain_cb; + beam->eagain_ctx = ctx; + apr_thread_mutex_unlock(beam->lock); } +void h2_beam_on_send(h2_bucket_beam *beam, + h2_beam_ev_callback *send_cb, void *ctx) +{ + apr_thread_mutex_lock(beam->lock); + beam->send_cb = send_cb; + beam->send_ctx = ctx; + apr_thread_mutex_unlock(beam->lock); +} -apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam) +void h2_beam_on_was_empty(h2_bucket_beam *beam, + h2_beam_ev_callback *was_empty_cb, void *ctx) { - apr_bucket *b; - apr_off_t l = 0; - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - for (b = H2_BLIST_FIRST(&beam->send_list); - b != H2_BLIST_SENTINEL(&beam->send_list); - b = APR_BUCKET_NEXT(b)) { - /* should all have determinate length */ - l += b->length; - } - leave_yellow(beam, &bl); - } - return l; + apr_thread_mutex_lock(beam->lock); + beam->was_empty_cb = was_empty_cb; + beam->was_empty_ctx = ctx; + apr_thread_mutex_unlock(beam->lock); } -apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam) + +static apr_off_t get_buffered_data_len(h2_bucket_beam *beam) { apr_bucket *b; apr_off_t l = 0; - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - for (b = H2_BLIST_FIRST(&beam->send_list); - b != H2_BLIST_SENTINEL(&beam->send_list); - b = APR_BUCKET_NEXT(b)) { - l += bucket_mem_used(b); - } - leave_yellow(beam, &bl); + + for (b = H2_BLIST_FIRST(&beam->buckets_to_send); + b != H2_BLIST_SENTINEL(&beam->buckets_to_send); + b = APR_BUCKET_NEXT(b)) { + /* should all have determinate length */ + l += b->length; } return l; } -int h2_beam_empty(h2_bucket_beam *beam) +apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam) { - int empty = 1; - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - empty = (H2_BLIST_EMPTY(&beam->send_list) - && (!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer))); - leave_yellow(beam, &bl); - } - return empty; -} + apr_off_t l = 0; -int h2_beam_holds_proxies(h2_bucket_beam *beam) -{ - int has_proxies = 1; - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - has_proxies = !H2_BPROXY_LIST_EMPTY(&beam->proxies); - leave_yellow(beam, &bl); - } - return has_proxies; + apr_thread_mutex_lock(beam->lock); + l = get_buffered_data_len(beam); + apr_thread_mutex_unlock(beam->lock); + return l; } -int h2_beam_was_received(h2_bucket_beam *beam) +apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam) { - int happend = 0; - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - happend = (beam->received_bytes > 0); - leave_yellow(beam, &bl); - } - return happend; -} + apr_bucket *b; + apr_off_t l = 0; -apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam) -{ - apr_size_t n = 0; - h2_beam_lock bl; - - if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { - n = beam->files_beamed; - leave_yellow(beam, &bl); + apr_thread_mutex_lock(beam->lock); + for (b = H2_BLIST_FIRST(&beam->buckets_to_send); + b != H2_BLIST_SENTINEL(&beam->buckets_to_send); + b = APR_BUCKET_NEXT(b)) { + l += bucket_mem_used(b); } - return n; + apr_thread_mutex_unlock(beam->lock); + return l; } -int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file) +int h2_beam_empty(h2_bucket_beam *beam) { - return 0; + int empty = 1; + + apr_thread_mutex_lock(beam->lock); + empty = buffer_is_empty(beam); + apr_thread_mutex_unlock(beam->lock); + return empty; } int h2_beam_report_consumption(h2_bucket_beam *beam) { - h2_beam_lock bl; int rv = 0; - if (enter_yellow(beam, &bl) == APR_SUCCESS) { - rv = report_consumption(beam, &bl); - leave_yellow(beam, &bl); - } + + apr_thread_mutex_lock(beam->lock); + rv = report_consumption(beam, 1); + apr_thread_mutex_unlock(beam->lock); return rv; } -void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg) +int h2_beam_is_complete(h2_bucket_beam *beam) { - if (beam && APLOG_C_IS_LEVEL(c,level)) { - ap_log_cerror(APLOG_MARK, level, 0, c, - "beam(%ld-%d,%s,closed=%d,aborted=%d,empty=%d,buf=%ld): %s", - (c->master? c->master->id : c->id), beam->id, beam->tag, - beam->closed, beam->aborted, h2_beam_empty(beam), - (long)h2_beam_get_buffered(beam), msg); + int rv = 0; + + apr_thread_mutex_lock(beam->lock); + if (beam->closed) + rv = 1; + else { + apr_bucket *b; + for (b = H2_BLIST_FIRST(&beam->buckets_to_send); + b != H2_BLIST_SENTINEL(&beam->buckets_to_send); + b = APR_BUCKET_NEXT(b)) { + if (APR_BUCKET_IS_EOS(b)) { + rv = 1; + break; + } + } } + apr_thread_mutex_unlock(beam->lock); + return rv; } - - diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h index f260762..c58ce98 100644 --- a/modules/http2/h2_bucket_beam.h +++ b/modules/http2/h2_bucket_beam.h @@ -17,191 +17,63 @@ #ifndef h2_bucket_beam_h #define h2_bucket_beam_h +#include "h2_conn_ctx.h" + struct apr_thread_mutex_t; struct apr_thread_cond_t; -/******************************************************************************* - * apr_bucket list without bells and whistles - ******************************************************************************/ - -/** - * h2_blist can hold a list of buckets just like apr_bucket_brigade, but - * does not to any allocations or related features. - */ -typedef struct { - APR_RING_HEAD(h2_bucket_list, apr_bucket) list; -} h2_blist; - -#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link); -#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link) -#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link) -#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list) -#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list) -#define H2_BLIST_INSERT_HEAD(b, e) do { \ - apr_bucket *ap__b = (e); \ - APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \ - } while (0) -#define H2_BLIST_INSERT_TAIL(b, e) do { \ - apr_bucket *ap__b = (e); \ - APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \ - } while (0) -#define H2_BLIST_CONCAT(a, b) do { \ - APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \ - } while (0) -#define H2_BLIST_PREPEND(a, b) do { \ - APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \ - } while (0) - -/******************************************************************************* - * h2_bucket_beam - ******************************************************************************/ - /** * A h2_bucket_beam solves the task of transferring buckets, esp. their data, - * across threads with zero buffer copies. - * - * When a thread, let's call it the sender thread, wants to send buckets to - * another, the green thread, it creates a h2_bucket_beam and adds buckets - * via the h2_beam_send(). It gives the beam to the green thread which then - * can receive buckets into its own brigade via h2_beam_receive(). - * - * Sending and receiving can happen concurrently. - * - * The beam can limit the amount of data it accepts via the buffer_size. This - * can also be adjusted during its lifetime. Sends and receives can be done blocking. - * A timeout can be set for such blocks. - * - * Care needs to be taken when terminating the beam. The beam registers at - * the pool it was created with and will cleanup after itself. However, if - * received buckets do still exist, already freed memory might be accessed. - * The beam does a assertion on this condition. - * - * The proper way of shutting down a beam is to first make sure there are no - * more green buckets out there, then cleanup the beam to purge eventually - * still existing sender buckets and then, possibly, terminate the beam itself - * (or the pool it was created with). - * - * The following restrictions apply to bucket transport: - * - only EOS and FLUSH meta buckets are copied through. All other meta buckets - * are kept in the beams hold. - * - all kind of data buckets are transported through: - * - transient buckets are converted to heap ones on send - * - heap and pool buckets require no extra handling - * - buckets with indeterminate length are read on send - * - file buckets will transfer the file itself into a new bucket, if allowed - * - all other buckets are read on send to make sure data is present - * - * This assures that when the sender thread sends its sender buckets, the data - * is made accessible while still on the sender side. The sender bucket then enters - * the beams hold storage. - * When the green thread calls receive, sender buckets in the hold are wrapped - * into special beam buckets. Beam buckets on read present the data directly - * from the internal sender one, but otherwise live on the green side. When a - * beam bucket gets destroyed, it notifies its beam that the corresponding - * sender bucket from the hold may be destroyed. - * Since the destruction of green buckets happens in the green thread, any - * corresponding sender bucket can not immediately be destroyed, as that would - * result in race conditions. - * Instead, the beam transfers such sender buckets from the hold to the purge - * storage. Next time there is a call from the sender side, the buckets in - * purge will be deleted. - * - * There are callbacks that can be registesender with a beam: - * - a "consumed" callback that gets called on the sender side with the - * amount of data that has been received by the green side. The amount - * is a delta from the last callback invocation. The sender side can trigger - * these callbacks by calling h2_beam_send() with a NULL brigade. - * - a "can_beam_file" callback that can prohibit the transfer of file handles - * through the beam. This will cause file buckets to be read on send and - * its data buffer will then be transports just like a heap bucket would. - * When no callback is registered, no restrictions apply and all files are - * passed through. - * File handles transfersender to the green side will stay there until the - * receiving brigade's pool is destroyed/cleared. If the pool lives very - * long or if many different files are beamed, the process might run out - * of available file handles. - * - * The name "beam" of course is inspired by good old transporter - * technology where humans are kept inside the transporter's memory - * buffers until the transmission is complete. Star gates use a similar trick. + * across threads with as little copying as possible. */ -typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock); - -typedef struct { - apr_thread_mutex_t *mutex; - h2_beam_mutex_leave *leave; - void *leave_ctx; -} h2_beam_lock; - typedef struct h2_bucket_beam h2_bucket_beam; -typedef apr_status_t h2_beam_mutex_enter(void *ctx, h2_beam_lock *pbl); - typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam, apr_off_t bytes); typedef void h2_beam_ev_callback(void *ctx, h2_bucket_beam *beam); -typedef struct h2_beam_proxy h2_beam_proxy; -typedef struct { - APR_RING_HEAD(h2_beam_proxy_list, h2_beam_proxy) list; -} h2_bproxy_list; - -typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam, - apr_file_t *file); - -typedef enum { - H2_BEAM_OWNER_SEND, - H2_BEAM_OWNER_RECV -} h2_beam_owner_t; - /** - * Will deny all transfer of apr_file_t across the beam and force - * a data copy instead. + * h2_blist can hold a list of buckets just like apr_bucket_brigade, but + * does not to any allocations or related features. */ -int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file); +typedef struct { + APR_RING_HEAD(h2_bucket_list, apr_bucket) list; +} h2_blist; struct h2_bucket_beam { int id; - const char *tag; + const char *name; + conn_rec *from; apr_pool_t *pool; - h2_beam_owner_t owner; - h2_blist send_list; - h2_blist hold_list; - h2_blist purge_list; - apr_bucket_brigade *recv_buffer; - h2_bproxy_list proxies; - apr_pool_t *send_pool; - apr_pool_t *recv_pool; - + h2_blist buckets_to_send; + h2_blist buckets_consumed; + h2_blist buckets_eor; + apr_size_t max_buf_size; apr_interval_time_t timeout; - apr_off_t sent_bytes; /* amount of bytes send */ - apr_off_t received_bytes; /* amount of bytes received */ - - apr_size_t buckets_sent; /* # of beam buckets sent */ - apr_size_t files_beamed; /* how many file handles have been set aside */ - - unsigned int aborted : 1; - unsigned int closed : 1; - unsigned int close_sent : 1; - unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */ + int aborted; + int closed; + int tx_mem_limits; /* only memory size counts on transfers */ + int copy_files; struct apr_thread_mutex_t *lock; struct apr_thread_cond_t *change; - apr_off_t cons_bytes_reported; /* amount of bytes reported as consumed */ - h2_beam_ev_callback *cons_ev_cb; - h2_beam_io_callback *cons_io_cb; + h2_beam_ev_callback *was_empty_cb; /* event: beam changed to non-empty in h2_beam_send() */ + void *was_empty_ctx; + h2_beam_ev_callback *recv_cb; /* event: buckets were transfered in h2_beam_receive() */ + void *recv_ctx; + h2_beam_ev_callback *send_cb; /* event: buckets were added in h2_beam_send() */ + void *send_ctx; + h2_beam_ev_callback *eagain_cb; /* event: a receive results in ARP_EAGAIN */ + void *eagain_ctx; + + apr_off_t recv_bytes; /* amount of bytes transferred in h2_beam_receive() */ + apr_off_t recv_bytes_reported; /* amount of bytes reported as received via callback */ + h2_beam_io_callback *cons_io_cb; /* report: recv_bytes deltas for sender */ void *cons_ctx; - - apr_off_t prod_bytes_reported; /* amount of bytes reported as produced */ - h2_beam_io_callback *prod_io_cb; - void *prod_ctx; - - h2_beam_can_beam_callback *can_beam_fn; - void *can_beam_ctx; }; /** @@ -212,56 +84,66 @@ struct h2_bucket_beam { * that is only used inside that same mutex. * * @param pbeam will hold the created beam on return + * @param c_from connection from which buchets are sent * @param pool pool owning the beam, beam will cleanup when pool released * @param id identifier of the beam * @param tag tag identifying beam for logging - * @param owner if the beam is owned by the sender or receiver, e.g. if - * the pool owner is using this beam for sending or receiving * @param buffer_size maximum memory footprint of buckets buffered in beam, or * 0 for no limitation * @param timeout timeout for blocking operations */ apr_status_t h2_beam_create(h2_bucket_beam **pbeam, + conn_rec *from, apr_pool_t *pool, int id, const char *tag, - h2_beam_owner_t owner, apr_size_t buffer_size, apr_interval_time_t timeout); /** * Destroys the beam immediately without cleanup. */ -apr_status_t h2_beam_destroy(h2_bucket_beam *beam); +apr_status_t h2_beam_destroy(h2_bucket_beam *beam, conn_rec *c); /** - * Send buckets from the given brigade through the beam. Will hold buckets - * internally as long as they have not been processed by the receiving side. - * All accepted buckets are removed from the given brigade. Will return with - * APR_EAGAIN on non-blocking sends when not all buckets could be accepted. - * - * Call from the sender side only. + * Switch copying of file buckets on/off. */ -apr_status_t h2_beam_send(h2_bucket_beam *beam, - apr_bucket_brigade *bb, - apr_read_type_e block); +void h2_beam_set_copy_files(h2_bucket_beam * beam, int enabled); /** - * Register the pool from which future buckets are send. This defines - * the lifetime of the buckets, e.g. the pool should not be cleared/destroyed - * until the data is no longer needed (or has been received). + * Send buckets from the given brigade through the beam. + * This can block of the amount of bucket data is above the buffer limit. + * @param beam the beam to add buckets to + * @param from the connection the sender operates on, must be the same as + * used to create the beam + * @param bb the brigade to take buckets from + * @param block if the sending should block when the buffer is full + * @param pwritten on return, contains the number of data bytes sent + * @return APR_SUCCESS when buckets were added to the beam. This can be + * a partial transfer and other buckets may still remain in bb + * APR_EAGAIN on non-blocking send when the buffer is full + * APR_TIMEUP on blocking semd that time out + * APR_ECONNABORTED when beam has been aborted */ -void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p); +apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from, + apr_bucket_brigade *bb, + apr_read_type_e block, + apr_off_t *pwritten); /** - * Receive buckets from the beam into the given brigade. Will return APR_EOF - * when reading past an EOS bucket. Reads can be blocking until data is - * available or the beam has been closed. Non-blocking calls return APR_EAGAIN - * if no data is available. - * - * Call from the receiver side only. + * Receive buckets from the beam into the given brigade. The caller is + * operating on connection `to`. + * @param beam the beam to receive buckets from + * @param to the connection the receiver is working with + * @param bb the bucket brigade to append to + * @param block if the read should block when buckets are unavailable + * @param readbytes the amount of data the receiver wants + * @return APR_SUCCESS when buckets were appended + * APR_EAGAIN on non-blocking read when no buckets are available + * APR_TIMEUP on blocking reads that time out + * APR_ECONNABORTED when beam has been aborted */ -apr_status_t h2_beam_receive(h2_bucket_beam *beam, - apr_bucket_brigade *green_buckets, +apr_status_t h2_beam_receive(h2_bucket_beam *beam, conn_rec *to, + apr_bucket_brigade *bb, apr_read_type_e block, apr_off_t readbytes); @@ -271,53 +153,27 @@ apr_status_t h2_beam_receive(h2_bucket_beam *beam, int h2_beam_empty(h2_bucket_beam *beam); /** - * Determine if beam has handed out proxy buckets that are not destroyed. - */ -int h2_beam_holds_proxies(h2_bucket_beam *beam); - -/** - * Abort the beam. Will cleanup any buffered buckets and answer all send - * and receives with APR_ECONNABORTED. - * - * Call from the sender side only. - */ -void h2_beam_abort(h2_bucket_beam *beam); - -/** - * Close the beam. Sending an EOS bucket serves the same purpose. - * - * Call from the sender side only. - */ -apr_status_t h2_beam_close(h2_bucket_beam *beam); - -/** - * Receives leaves the beam, e.g. will no longer read. This will - * interrupt any sender blocked writing and fail future send. - * - * Call from the receiver side only. + * Abort the beam, either from receiving or sending side. + * + * @param beam the beam to abort + * @param c the connection the caller is working with */ -apr_status_t h2_beam_leave(h2_bucket_beam *beam); - -int h2_beam_is_closed(h2_bucket_beam *beam); +void h2_beam_abort(h2_bucket_beam *beam, conn_rec *c); /** - * Return APR_SUCCESS when all buckets in transit have been handled. - * When called with APR_BLOCK_READ and a mutex set, will wait until the green - * side has consumed all data. Otherwise APR_EAGAIN is returned. - * With clear_buffers set, any queued data is discarded. - * If a timeout is set on the beam, waiting might also time out and - * return APR_ETIMEUP. + * Close the beam. Make certain an EOS is sent. * - * Call from the sender side only. + * @param beam the beam to abort + * @param c the connection the caller is working with */ -apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block); +void h2_beam_close(h2_bucket_beam *beam, conn_rec *c); -/** - * Set/get the timeout for blocking read/write operations. Only works - * if a mutex has been set for the beam. +/** + * Set/get the timeout for blocking sebd/receive operations. */ void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout); + apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam); /** @@ -332,7 +188,6 @@ apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam); * amount of bytes that have been consumed by the receiver, since the * last callback invocation or reset. * @param beam the beam to set the callback on - * @param ev_cb the callback or NULL, called when bytes are consumed * @param io_cb the callback or NULL, called on sender with bytes consumed * @param ctx the context to use in callback invocation * @@ -340,43 +195,58 @@ apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam); * from any side. */ void h2_beam_on_consumed(h2_bucket_beam *beam, - h2_beam_ev_callback *ev_cb, h2_beam_io_callback *io_cb, void *ctx); /** - * Call any registered consumed handler, if any changes have happened - * since the last invocation. - * @return !=0 iff a handler has been called - * - * Needs to be invoked from the sending side. + * Register a callback to be invoked on the receiver side whenever + * buckets have been transfered in a h2_beam_receive() call. + * @param beam the beam to set the callback on + * @param recv_cb the callback or NULL, called when buckets are received + * @param ctx the context to use in callback invocation */ -int h2_beam_report_consumption(h2_bucket_beam *beam); +void h2_beam_on_received(h2_bucket_beam *beam, + h2_beam_ev_callback *recv_cb, void *ctx); /** - * Register a callback to be invoked on the receiver side with the - * amount of bytes that have been produces by the sender, since the - * last callback invocation or reset. + * Register a callback to be invoked on the receiver side whenever + * APR_EAGAIN is being returned in h2_beam_receive(). * @param beam the beam to set the callback on - * @param io_cb the callback or NULL, called on receiver with bytes produced + * @param egain_cb the callback or NULL, called before APR_EAGAIN is returned * @param ctx the context to use in callback invocation - * - * Call from the receiver side, callbacks invoked on either side. */ -void h2_beam_on_produced(h2_bucket_beam *beam, - h2_beam_io_callback *io_cb, void *ctx); +void h2_beam_on_eagain(h2_bucket_beam *beam, + h2_beam_ev_callback *eagain_cb, void *ctx); /** - * Register a callback that may prevent a file from being beam as - * file handle, forcing the file content to be copied. Then no callback - * is set (NULL), file handles are transferred directly. + * Register a call back from the sender side to be invoked when send + * has added buckets to the beam. + * Unregister by passing a NULL on_send_cb. * @param beam the beam to set the callback on - * @param io_cb the callback or NULL, called on receiver with bytes produced + * @param on_send_cb the callback to invoke after buckets were added * @param ctx the context to use in callback invocation - * - * Call from the receiver side, callbacks invoked on either side. */ -void h2_beam_on_file_beam(h2_bucket_beam *beam, - h2_beam_can_beam_callback *cb, void *ctx); +void h2_beam_on_send(h2_bucket_beam *beam, + h2_beam_ev_callback *on_send_cb, void *ctx); + +/** + * Register a call back from the sender side to be invoked when send + * has added to a previously empty beam. + * Unregister by passing a NULL was_empty_cb. + * @param beam the beam to set the callback on + * @param was_empty_cb the callback to invoke on blocked send + * @param ctx the context to use in callback invocation + */ +void h2_beam_on_was_empty(h2_bucket_beam *beam, + h2_beam_ev_callback *was_empty_cb, void *ctx); + +/** + * Call any registered consumed handler, if any changes have happened + * since the last invocation. + * @return !=0 iff a handler has been called + * + * Needs to be invoked from the sending side. + */ +int h2_beam_report_consumption(h2_bucket_beam *beam); /** * Get the amount of bytes currently buffered in the beam (unread). @@ -389,18 +259,9 @@ apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam); apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam); /** - * Return != 0 iff (some) data from the beam has been received. + * @return != 0 iff beam has been closed or has an EOS bucket buffered + * waiting to be received. */ -int h2_beam_was_received(h2_bucket_beam *beam); - -apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam); - -typedef apr_bucket *h2_bucket_beamer(h2_bucket_beam *beam, - apr_bucket_brigade *dest, - const apr_bucket *src); - -void h2_register_bucket_beamer(h2_bucket_beamer *beamer); - -void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg); +int h2_beam_is_complete(h2_bucket_beam *beam); #endif /* h2_bucket_beam_h */ diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c index c89d499..fa46a30 100644 --- a/modules/http2/h2_bucket_eos.c +++ b/modules/http2/h2_bucket_eos.c @@ -13,22 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ #include #include @@ -37,6 +21,7 @@ #include #include #include +#include #include "h2_private.h" #include "h2.h" diff --git a/modules/http2/h2_c1.c b/modules/http2/h2_c1.c new file mode 100644 index 0000000..afb26fc --- /dev/null +++ b/modules/http2/h2_c1.c @@ -0,0 +1,323 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "h2_private.h" +#include "h2.h" +#include "h2_bucket_beam.h" +#include "h2_config.h" +#include "h2_conn_ctx.h" +#include "h2_mplx.h" +#include "h2_session.h" +#include "h2_stream.h" +#include "h2_protocol.h" +#include "h2_workers.h" +#include "h2_c1.h" +#include "h2_version.h" +#include "h2_util.h" + +static struct h2_workers *workers; + +static int async_mpm; + +APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c_logio_add_bytes_in; +APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c_logio_add_bytes_out; + +apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s) +{ + apr_status_t status = APR_SUCCESS; + int minw, maxw; + apr_time_t idle_limit; + + status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm); + if (status != APR_SUCCESS) { + /* some MPMs do not implemnent this */ + async_mpm = 0; + status = APR_SUCCESS; + } + + h2_config_init(pool); + + h2_get_workers_config(s, &minw, &maxw, &idle_limit); + workers = h2_workers_create(s, pool, maxw, minw, idle_limit); + + h2_c_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in); + h2_c_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out); + + return h2_mplx_c1_child_init(pool, s); +} + +void h2_c1_child_stopping(apr_pool_t *pool, int graceful) +{ + if (workers) { + h2_workers_shutdown(workers, graceful); + } +} + + +apr_status_t h2_c1_setup(conn_rec *c, request_rec *r, server_rec *s) +{ + h2_session *session; + h2_conn_ctx_t *ctx; + apr_status_t rv; + + if (!workers) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911) + "workers not initialized"); + rv = APR_EGENERAL; + goto cleanup; + } + + rv = h2_session_create(&session, c, r, s, workers); + if (APR_SUCCESS != rv) goto cleanup; + + ctx = h2_conn_ctx_get(c); + ap_assert(ctx); + h2_conn_ctx_assign_session(ctx, session); + /* remove the input filter of mod_reqtimeout, now that the connection + * is established and we have switched to h2. reqtimeout has supervised + * possibly configured handshake timeouts and needs to get out of the way + * now since the rest of its state handling assumes http/1.x to take place. */ + ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout"); + +cleanup: + return rv; +} + +apr_status_t h2_c1_run(conn_rec *c) +{ + apr_status_t status; + int mpm_state = 0; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + + ap_assert(conn_ctx); + ap_assert(conn_ctx->session); + do { + if (c->cs) { + c->cs->sense = CONN_SENSE_DEFAULT; + c->cs->state = CONN_STATE_HANDLER; + } + + status = h2_session_process(conn_ctx->session, async_mpm); + + if (APR_STATUS_IS_EOF(status)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + H2_SSSN_LOG(APLOGNO(03045), conn_ctx->session, + "process, closing conn")); + c->keepalive = AP_CONN_CLOSE; + } + else { + c->keepalive = AP_CONN_KEEPALIVE; + } + + if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) { + break; + } + } while (!async_mpm + && c->keepalive == AP_CONN_KEEPALIVE + && mpm_state != AP_MPMQ_STOPPING); + + if (c->cs) { + switch (conn_ctx->session->state) { + case H2_SESSION_ST_INIT: + case H2_SESSION_ST_IDLE: + case H2_SESSION_ST_BUSY: + case H2_SESSION_ST_WAIT: + c->cs->state = CONN_STATE_WRITE_COMPLETION; + if (c->cs && !conn_ctx->session->remote.emitted_count) { + /* let the MPM know that we are not done and want + * the Timeout behaviour instead of a KeepAliveTimeout + * See PR 63534. + */ + c->cs->sense = CONN_SENSE_WANT_READ; + } + break; + case H2_SESSION_ST_CLEANUP: + case H2_SESSION_ST_DONE: + default: + c->cs->state = CONN_STATE_LINGER; + break; + } + } + + return APR_SUCCESS; +} + +apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + + if (conn_ctx && conn_ctx->session) { + apr_status_t status = h2_session_pre_close(conn_ctx->session, async_mpm); + return (status == APR_SUCCESS)? DONE : status; + } + return DONE; +} + +int h2_c1_allows_direct(conn_rec *c) +{ + if (!c->master) { + int is_tls = ap_ssl_conn_is_ssl(c); + const char *needed_protocol = is_tls? "h2" : "h2c"; + int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT); + + if (h2_direct < 0) { + h2_direct = is_tls? 0 : 1; + } + return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol)); + } + return 0; +} + +int h2_c1_can_upgrade(request_rec *r) +{ + if (!r->connection->master) { + int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE); + return h2_upgrade > 0 || (h2_upgrade < 0 && !ap_ssl_conn_is_ssl(r->connection)); + } + return 0; +} + +static int h2_c1_hook_process_connection(conn_rec* c) +{ + apr_status_t status; + h2_conn_ctx_t *ctx; + + if (c->master) goto declined; + ctx = h2_conn_ctx_get(c); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn"); + if (!ctx && c->keepalives == 0) { + const char *proto = ap_get_protocol(c); + + if (APLOGctrace1(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, " + "new connection using protocol '%s', direct=%d, " + "tls acceptable=%d", proto, h2_c1_allows_direct(c), + h2_protocol_is_acceptable_c1(c, NULL, 1)); + } + + if (!strcmp(AP_PROTOCOL_HTTP1, proto) + && h2_c1_allows_direct(c) + && h2_protocol_is_acceptable_c1(c, NULL, 1)) { + /* Fresh connection still is on http/1.1 and H2Direct is enabled. + * Otherwise connection is in a fully acceptable state. + * -> peek at the first 24 incoming bytes + */ + apr_bucket_brigade *temp; + char *peek = NULL; + apr_size_t peeklen; + + temp = apr_brigade_create(c->pool, c->bucket_alloc); + status = ap_get_brigade(c->input_filters, temp, + AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24); + + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054) + "h2_h2, error reading 24 bytes speculative"); + apr_brigade_destroy(temp); + return DECLINED; + } + + apr_brigade_pflatten(temp, &peek, &peeklen, c->pool); + if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_h2, direct mode detected"); + ctx = h2_conn_ctx_create_for_c1(c, c->base_server, + ap_ssl_conn_is_ssl(c)? "h2" : "h2c"); + } + else if (APLOGctrace2(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_h2, not detected in %d bytes(base64): %s", + (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool)); + } + apr_brigade_destroy(temp); + } + } + + if (!ctx) goto declined; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn"); + if (!ctx->session) { + status = h2_c1_setup(c, NULL, ctx->server? ctx->server : c->base_server); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup"); + if (status != APR_SUCCESS) { + h2_conn_ctx_detach(c); + return !OK; + } + } + h2_c1_run(c); + return OK; + +declined: + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined"); + return DECLINED; +} + +static int h2_c1_hook_pre_close(conn_rec *c) +{ + h2_conn_ctx_t *ctx; + + /* secondary connection? */ + if (c->master) { + return DECLINED; + } + + ctx = h2_conn_ctx_get(c); + if (ctx) { + /* If the session has been closed correctly already, we will not + * find a h2_conn_ctx_there. The presence indicates that the session + * is still ongoing. */ + return h2_c1_pre_close(ctx, c); + } + return DECLINED; +} + +static const char* const mod_ssl[] = { "mod_ssl.c", NULL}; +static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL}; + +void h2_c1_register_hooks(void) +{ + /* Our main processing needs to run quite late. Definitely after mod_ssl, + * as we need its connection filters, but also before reqtimeout as its + * method of timeouts is specific to HTTP/1.1 (as of now). + * The core HTTP/1 processing run as REALLY_LAST, so we will have + * a chance to take over before it. + */ + ap_hook_process_connection(h2_c1_hook_process_connection, + mod_reqtimeout, NULL, APR_HOOK_LAST); + + /* One last chance to properly say goodbye if we have not done so + * already. */ + ap_hook_pre_close_connection(h2_c1_hook_pre_close, NULL, mod_ssl, APR_HOOK_LAST); +} + diff --git a/modules/http2/h2_c1.h b/modules/http2/h2_c1.h new file mode 100644 index 0000000..41527f6 --- /dev/null +++ b/modules/http2/h2_c1.h @@ -0,0 +1,83 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_c1__ +#define __mod_h2__h2_c1__ + +#include + +struct h2_conn_ctx_t; + +extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c_logio_add_bytes_in; +extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c_logio_add_bytes_out; + +/* Initialize this child process for h2 primary connection work, + * to be called once during child init before multi processing + * starts. + */ +apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s); + +/** + * Setup the primary connection and our context for HTTP/2 processing + * + * @param c the connection HTTP/2 is starting on + * @param r the upgrade request that still awaits an answer, optional + * @param s the server selected for this connection (can be != c->base_server) + */ +apr_status_t h2_c1_setup(conn_rec *c, request_rec *r, server_rec *s); + +/** + * Run the HTTP/2 primary connection in synchronous fashion. + * Return when the HTTP/2 session is done + * and the connection will close or a fatal error occurred. + * + * @param c the http2 connection to run + * @return APR_SUCCESS when session is done. + */ +apr_status_t h2_c1_run(conn_rec *c); + +/** + * The primary connection is about to close. If we have not send a GOAWAY + * yet, this is the last chance. + */ +apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c); + +/** + * Check if the connection allows a direct detection of HTTPP/2, + * as configurable by the H2Direct directive. + * @param c the connection to check on + * @return != 0 if direct detection is enabled + */ +int h2_c1_allows_direct(conn_rec *c); + +/** + * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled + * for the given request. + * @param r the request to check + * @return != 0 iff Upgrade switching is enabled + */ +int h2_c1_can_upgrade(request_rec *r); + +/* Register hooks for h2 handling on primary connections. + */ +void h2_c1_register_hooks(void); + +/** + * Child is about to be stopped, release unused resources + */ +void h2_c1_child_stopping(apr_pool_t *pool, int graceful); + +#endif /* defined(__mod_h2__h2_c1__) */ diff --git a/modules/http2/h2_c1_io.c b/modules/http2/h2_c1_io.c new file mode 100644 index 0000000..5ed4ee8 --- /dev/null +++ b/modules/http2/h2_c1_io.c @@ -0,0 +1,559 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "h2_private.h" +#include "h2_bucket_eos.h" +#include "h2_config.h" +#include "h2_c1.h" +#include "h2_c1_io.h" +#include "h2_protocol.h" +#include "h2_session.h" +#include "h2_util.h" + +#define TLS_DATA_MAX (16*1024) + +/* Calculated like this: assuming MTU 1500 bytes + * 1500 - 40 (IP) - 20 (TCP) - 40 (TCP options) + * - TLS overhead (60-100) + * ~= 1300 bytes */ +#define WRITE_SIZE_INITIAL 1300 + +/* The maximum we'd like to write in one chunk is + * the max size of a TLS record. When pushing + * many frames down the h2 connection, this might + * align differently because of headers and other + * frames or simply as not sufficient data is + * in a response body. + * However keeping frames at or below this limit + * should make optimizations at the layer that writes + * to TLS easier. + */ +#define WRITE_SIZE_MAX (TLS_DATA_MAX) + +#define BUF_REMAIN ((apr_size_t)(bmax-off)) + +static void h2_c1_io_bb_log(conn_rec *c, int stream_id, int level, + const char *tag, apr_bucket_brigade *bb) +{ + char buffer[16 * 1024]; + const char *line = "(null)"; + int bmax = sizeof(buffer)/sizeof(buffer[0]); + int off = 0; + apr_bucket *b; + + (void)stream_id; + if (bb) { + memset(buffer, 0, bmax--); + for (b = APR_BRIGADE_FIRST(bb); + bmax && (b != APR_BRIGADE_SENTINEL(bb)); + b = APR_BUCKET_NEXT(b)) { + + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + off += apr_snprintf(buffer+off, BUF_REMAIN, "eos "); + } + else if (APR_BUCKET_IS_FLUSH(b)) { + off += apr_snprintf(buffer+off, BUF_REMAIN, "flush "); + } + else if (AP_BUCKET_IS_EOR(b)) { + off += apr_snprintf(buffer+off, BUF_REMAIN, "eor "); + } + else if (H2_BUCKET_IS_H2EOS(b)) { + off += apr_snprintf(buffer+off, BUF_REMAIN, "h2eos "); + } + else { + off += apr_snprintf(buffer+off, BUF_REMAIN, "meta(unknown) "); + } + } + else { + const char *btype = "data"; + if (APR_BUCKET_IS_FILE(b)) { + btype = "file"; + } + else if (APR_BUCKET_IS_PIPE(b)) { + btype = "pipe"; + } + else if (APR_BUCKET_IS_SOCKET(b)) { + btype = "socket"; + } + else if (APR_BUCKET_IS_HEAP(b)) { + btype = "heap"; + } + else if (APR_BUCKET_IS_TRANSIENT(b)) { + btype = "transient"; + } + else if (APR_BUCKET_IS_IMMORTAL(b)) { + btype = "immortal"; + } +#if APR_HAS_MMAP + else if (APR_BUCKET_IS_MMAP(b)) { + btype = "mmap"; + } +#endif + else if (APR_BUCKET_IS_POOL(b)) { + btype = "pool"; + } + + off += apr_snprintf(buffer+off, BUF_REMAIN, "%s[%ld] ", + btype, + (long)(b->length == ((apr_size_t)-1)? -1UL : b->length)); + } + } + line = *buffer? buffer : "(empty)"; + } + /* Intentional no APLOGNO */ + ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%ld)-%s: %s", + c->id, tag, line); + +} +#define C1_IO_BB_LOG(c, stream_id, level, tag, bb) \ + if (APLOG_C_IS_LEVEL(c, level)) { \ + h2_c1_io_bb_log((c), (stream_id), (level), (tag), (bb)); \ + } + + +apr_status_t h2_c1_io_init(h2_c1_io *io, h2_session *session) +{ + conn_rec *c = session->c1; + + io->session = session; + io->output = apr_brigade_create(c->pool, c->bucket_alloc); + io->is_tls = ap_ssl_conn_is_ssl(session->c1); + io->buffer_output = io->is_tls; + io->flush_threshold = 4 * (apr_size_t)h2_config_sgeti64(session->s, H2_CONF_STREAM_MAX_MEM); + + if (io->buffer_output) { + /* This is what we start with, + * see https://issues.apache.org/jira/browse/TS-2503 + */ + io->warmup_size = h2_config_sgeti64(session->s, H2_CONF_TLS_WARMUP_SIZE); + io->cooldown_usecs = (h2_config_sgeti(session->s, H2_CONF_TLS_COOLDOWN_SECS) + * APR_USEC_PER_SEC); + io->cooldown_usecs = 0; + io->write_size = (io->cooldown_usecs > 0? + WRITE_SIZE_INITIAL : WRITE_SIZE_MAX); + } + else { + io->warmup_size = 0; + io->cooldown_usecs = 0; + io->write_size = 0; + } + + if (APLOGctrace1(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, + "h2_c1_io(%ld): init, buffering=%d, warmup_size=%ld, " + "cd_secs=%f", c->id, io->buffer_output, + (long)io->warmup_size, + ((double)io->cooldown_usecs/APR_USEC_PER_SEC)); + } + + return APR_SUCCESS; +} + +static void append_scratch(h2_c1_io *io) +{ + if (io->scratch && io->slen > 0) { + apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen, + apr_bucket_free, + io->session->c1->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(io->output, b); + io->buffered_len += io->slen; + io->scratch = NULL; + io->slen = io->ssize = 0; + } +} + +static apr_size_t assure_scratch_space(h2_c1_io *io) { + apr_size_t remain = io->ssize - io->slen; + if (io->scratch && remain == 0) { + append_scratch(io); + } + if (!io->scratch) { + /* we control the size and it is larger than what buckets usually + * allocate. */ + io->scratch = apr_bucket_alloc(io->write_size, io->session->c1->bucket_alloc); + io->ssize = io->write_size; + io->slen = 0; + remain = io->ssize; + } + return remain; +} + +static apr_status_t read_to_scratch(h2_c1_io *io, apr_bucket *b) +{ + apr_status_t status; + const char *data; + apr_size_t len; + + if (!b->length) { + return APR_SUCCESS; + } + + ap_assert(b->length <= (io->ssize - io->slen)); + if (APR_BUCKET_IS_FILE(b)) { + apr_bucket_file *f = (apr_bucket_file *)b->data; + apr_file_t *fd = f->fd; + apr_off_t offset = b->start; + + len = b->length; + /* file buckets will read 8000 byte chunks and split + * themselves. However, we do know *exactly* how many + * bytes we need where. So we read the file directly to + * where we need it. + */ + status = apr_file_seek(fd, APR_SET, &offset); + if (status != APR_SUCCESS) { + return status; + } + status = apr_file_read(fd, io->scratch + io->slen, &len); + if (status != APR_SUCCESS && status != APR_EOF) { + return status; + } + io->slen += len; + } + else if (APR_BUCKET_IS_MMAP(b)) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, io->session->c1, + "h2_c1_io(%ld): seeing mmap bucket of size %ld, scratch remain=%ld", + io->session->c1->id, (long)b->length, (long)(io->ssize - io->slen)); + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (status == APR_SUCCESS) { + memcpy(io->scratch+io->slen, data, len); + io->slen += len; + } + } + else { + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (status == APR_SUCCESS) { + memcpy(io->scratch+io->slen, data, len); + io->slen += len; + } + } + return status; +} + +static apr_status_t pass_output(h2_c1_io *io, int flush) +{ + conn_rec *c = io->session->c1; + apr_off_t bblen = 0; + apr_status_t rv; + + if (io->is_passing) { + /* recursive call, may be triggered by an H2EOS bucket + * being destroyed and triggering sending more data? */ + AP_DEBUG_ASSERT(0); + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10456) + "h2_c1_io(%ld): recursive call of h2_c1_io_pass. " + "Denied to prevent output corruption. This " + "points to a bug in the HTTP/2 implementation.", + c->id); + return APR_EGENERAL; + } + io->is_passing = 1; + + append_scratch(io); + if (flush) { + if (!APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output))) { + apr_bucket *b = apr_bucket_flush_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(io->output, b); + } + } + if (APR_BRIGADE_EMPTY(io->output)) { + rv = APR_SUCCESS; + goto cleanup; + } + + io->unflushed = !APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output)); + apr_brigade_length(io->output, 0, &bblen); + C1_IO_BB_LOG(c, 0, APLOG_TRACE2, "out", io->output); + + rv = ap_pass_brigade(c->output_filters, io->output); + if (APR_SUCCESS != rv) goto cleanup; + io->bytes_written += (apr_size_t)bblen; + + if (io->write_size < WRITE_SIZE_MAX + && io->bytes_written >= io->warmup_size) { + /* connection is hot, use max size */ + io->write_size = WRITE_SIZE_MAX; + } + else if (io->cooldown_usecs > 0 + && io->write_size > WRITE_SIZE_INITIAL) { + apr_time_t now = apr_time_now(); + if ((now - io->last_write) >= io->cooldown_usecs) { + /* long time not written, reset write size */ + io->write_size = WRITE_SIZE_INITIAL; + io->bytes_written = 0; + } + else { + io->last_write = now; + } + } + +cleanup: + if (APR_SUCCESS != rv) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(03044) + "h2_c1_io(%ld): pass_out brigade %ld bytes", + c->id, (long)bblen); + } + apr_brigade_cleanup(io->output); + io->buffered_len = 0; + io->is_passing = 0; + return rv; +} + +int h2_c1_io_needs_flush(h2_c1_io *io) +{ + return io->buffered_len >= io->flush_threshold; +} + +int h2_c1_io_pending(h2_c1_io *io) +{ + return !APR_BRIGADE_EMPTY(io->output) || (io->scratch && io->slen > 0); +} + +apr_status_t h2_c1_io_pass(h2_c1_io *io) +{ + apr_status_t rv = APR_SUCCESS; + + if (h2_c1_io_pending(io)) { + rv = pass_output(io, 0); + } + return rv; +} + +apr_status_t h2_c1_io_assure_flushed(h2_c1_io *io) +{ + apr_status_t rv = APR_SUCCESS; + + if (h2_c1_io_pending(io) || io->unflushed) { + rv = pass_output(io, 1); + if (APR_SUCCESS != rv) goto cleanup; + } +cleanup: + return rv; +} + +apr_status_t h2_c1_io_add_data(h2_c1_io *io, const char *data, size_t length) +{ + apr_status_t status = APR_SUCCESS; + apr_size_t remain; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->session->c1, + "h2_c1_io(%ld): adding %ld data bytes", + io->session->c1->id, (long)length); + if (io->buffer_output) { + while (length > 0) { + remain = assure_scratch_space(io); + if (remain >= length) { + memcpy(io->scratch + io->slen, data, length); + io->slen += length; + length = 0; + } + else { + memcpy(io->scratch + io->slen, data, remain); + io->slen += remain; + data += remain; + length -= remain; + } + } + } + else { + status = apr_brigade_write(io->output, NULL, NULL, data, length); + io->buffered_len += length; + } + return status; +} + +apr_status_t h2_c1_io_append(h2_c1_io *io, apr_bucket_brigade *bb) +{ + apr_bucket *b; + apr_status_t rv = APR_SUCCESS; + + while (!APR_BRIGADE_EMPTY(bb)) { + b = APR_BRIGADE_FIRST(bb); + if (APR_BUCKET_IS_METADATA(b) || APR_BUCKET_IS_MMAP(b)) { + /* need to finish any open scratch bucket, as meta data + * needs to be forward "in order". */ + append_scratch(io); + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(io->output, b); + } + else if (io->buffer_output) { + apr_size_t remain = assure_scratch_space(io); + if (b->length > remain) { + apr_bucket_split(b, remain); + if (io->slen == 0) { + /* complete write_size bucket, append unchanged */ + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(io->output, b); + io->buffered_len += b->length; + continue; + } + } + else { + /* bucket fits in remain, copy to scratch */ + rv = read_to_scratch(io, b); + apr_bucket_delete(b); + if (APR_SUCCESS != rv) goto cleanup; + continue; + } + } + else { + /* no buffering, forward buckets setaside on flush */ + apr_bucket_setaside(b, io->session->c1->pool); + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(io->output, b); + io->buffered_len += b->length; + } + } +cleanup: + return rv; +} + +static apr_status_t c1_in_feed_bucket(h2_session *session, + apr_bucket *b, apr_ssize_t *inout_len) +{ + apr_status_t rv = APR_SUCCESS; + apr_size_t len; + const char *data; + ssize_t n; + + rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + while (APR_SUCCESS == rv && len > 0) { + n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, session->c1, + H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"), + (long)len, (long)n); + if (n < 0) { + if (nghttp2_is_fatal((int)n)) { + h2_session_event(session, H2_SESSION_EV_PROTO_ERROR, + (int)n, nghttp2_strerror((int)n)); + rv = APR_EGENERAL; + } + } + else { + *inout_len += n; + if ((apr_ssize_t)len <= n) { + break; + } + len -= (apr_size_t)n; + data += n; + } + } + + return rv; +} + +static apr_status_t c1_in_feed_brigade(h2_session *session, + apr_bucket_brigade *bb, + apr_ssize_t *inout_len) +{ + apr_status_t rv = APR_SUCCESS; + apr_bucket* b; + + *inout_len = 0; + while (!APR_BRIGADE_EMPTY(bb)) { + b = APR_BRIGADE_FIRST(bb); + if (!APR_BUCKET_IS_METADATA(b)) { + rv = c1_in_feed_bucket(session, b, inout_len); + if (APR_SUCCESS != rv) goto cleanup; + } + apr_bucket_delete(b); + } +cleanup: + apr_brigade_cleanup(bb); + return rv; +} + +static apr_status_t read_and_feed(h2_session *session) +{ + apr_ssize_t bytes_fed, bytes_requested; + apr_status_t rv; + + bytes_requested = H2MAX(APR_BUCKET_BUFF_SIZE, session->max_stream_mem * 4); + rv = ap_get_brigade(session->c1->input_filters, + session->bbtmp, AP_MODE_READBYTES, + APR_NONBLOCK_READ, bytes_requested); + + if (APR_SUCCESS == rv) { + if (!APR_BRIGADE_EMPTY(session->bbtmp)) { + h2_util_bb_log(session->c1, session->id, APLOG_TRACE2, "c1 in", + session->bbtmp); + rv = c1_in_feed_brigade(session, session->bbtmp, &bytes_fed); + session->io.bytes_read += bytes_fed; + } + else { + rv = APR_EAGAIN; + } + } + return rv; +} + +apr_status_t h2_c1_read(h2_session *session) +{ + apr_status_t rv; + + /* H2_IN filter handles all incoming data against the session. + * We just pull at the filter chain to make it happen */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_SSSN_MSG(session, "session_read start")); + rv = read_and_feed(session); + + if (APR_SUCCESS == rv) { + h2_session_dispatch_event(session, H2_SESSION_EV_INPUT_PENDING, 0, NULL); + } + else if (APR_STATUS_IS_EAGAIN(rv)) { + /* Signal that we have exhausted the input momentarily. + * This might switch to polling the socket */ + h2_session_dispatch_event(session, H2_SESSION_EV_INPUT_EXHAUSTED, 0, NULL); + } + else if (APR_SUCCESS != rv) { + if (APR_STATUS_IS_ETIMEDOUT(rv) + || APR_STATUS_IS_ECONNABORTED(rv) + || APR_STATUS_IS_ECONNRESET(rv) + || APR_STATUS_IS_EOF(rv) + || APR_STATUS_IS_EBADF(rv)) { + /* common status for a client that has left */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, session->c1, + H2_SSSN_MSG(session, "input gone")); + } + else { + /* uncommon status, log on INFO so that we see this */ + ap_log_cerror( APLOG_MARK, APLOG_DEBUG, rv, session->c1, + H2_SSSN_LOG(APLOGNO(02950), session, + "error reading, terminating")); + } + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + + apr_brigade_cleanup(session->bbtmp); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, session->c1, + H2_SSSN_MSG(session, "session_read done")); + return rv; +} diff --git a/modules/http2/h2_c1_io.h b/modules/http2/h2_c1_io.h new file mode 100644 index 0000000..c4dac38 --- /dev/null +++ b/modules/http2/h2_c1_io.h @@ -0,0 +1,101 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_c1_io__ +#define __mod_h2__h2_c1_io__ + +struct h2_config; +struct h2_session; + +/* h2_io is the basic handler of a httpd connection. It keeps two brigades, + * one for input, one for output and works with the installed connection + * filters. + * The read is done via a callback function, so that input can be processed + * directly without copying. + */ +typedef struct { + struct h2_session *session; + apr_bucket_brigade *output; + + int is_tls; + int unflushed; + apr_time_t cooldown_usecs; + apr_int64_t warmup_size; + + apr_size_t write_size; + apr_time_t last_write; + apr_int64_t bytes_read; + apr_int64_t bytes_written; + + int buffer_output; + apr_off_t buffered_len; + apr_off_t flush_threshold; + unsigned int is_flushed : 1; + unsigned int is_passing : 1; + + char *scratch; + apr_size_t ssize; + apr_size_t slen; +} h2_c1_io; + +apr_status_t h2_c1_io_init(h2_c1_io *io, struct h2_session *session); + +/** + * Append data to the buffered output. + * @param buf the data to append + * @param length the length of the data to append + */ +apr_status_t h2_c1_io_add_data(h2_c1_io *io, + const char *buf, + size_t length); + +apr_status_t h2_c1_io_add(h2_c1_io *io, apr_bucket *b); + +apr_status_t h2_c1_io_append(h2_c1_io *io, apr_bucket_brigade *bb); + +/** + * Pass any buffered data on to the connection output filters. + * @param io the connection io + */ +apr_status_t h2_c1_io_pass(h2_c1_io *io); + +/** + * if there is any data pendiong or was any data send + * since the last FLUSH, send out a FLUSH now. + */ +apr_status_t h2_c1_io_assure_flushed(h2_c1_io *io); + +/** + * Check if the buffered amount of data needs flushing. + */ +int h2_c1_io_needs_flush(h2_c1_io *io); + +/** + * Check if we have output pending. + */ +int h2_c1_io_pending(h2_c1_io *io); + +struct h2_session; + +/** + * Read c1 input and pass it on to nghttp2. + * @param session the session + * @param when_pending != 0 if only pending input (sitting in filters) + * needs to be read + */ +apr_status_t h2_c1_read(struct h2_session *session); + +#endif /* defined(__mod_h2__h2_c1_io__) */ diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c new file mode 100644 index 0000000..a955200 --- /dev/null +++ b/modules/http2/h2_c2.c @@ -0,0 +1,942 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "h2_private.h" +#include "h2.h" +#include "h2_bucket_beam.h" +#include "h2_c1.h" +#include "h2_config.h" +#include "h2_conn_ctx.h" +#include "h2_c2_filter.h" +#include "h2_protocol.h" +#include "h2_mplx.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_session.h" +#include "h2_stream.h" +#include "h2_ws.h" +#include "h2_c2.h" +#include "h2_util.h" +#include "mod_http2.h" + + +static module *mpm_module; +static int mpm_supported = 1; +static apr_socket_t *dummy_socket; + +#if AP_HAS_RESPONSE_BUCKETS + +static ap_filter_rec_t *c2_net_in_filter_handle; +static ap_filter_rec_t *c2_net_out_filter_handle; +static ap_filter_rec_t *c2_request_in_filter_handle; +static ap_filter_rec_t *c2_notes_out_filter_handle; + +#endif /* AP_HAS_RESPONSE_BUCKETS */ + +static void check_modules(int force) +{ + static int checked = 0; + int i; + + if (force || !checked) { + for (i = 0; ap_loaded_modules[i]; ++i) { + module *m = ap_loaded_modules[i]; + + if (!strcmp("event.c", m->name)) { + mpm_module = m; + break; + } + else if (!strcmp("motorz.c", m->name)) { + mpm_module = m; + break; + } + else if (!strcmp("mpm_netware.c", m->name)) { + mpm_module = m; + break; + } + else if (!strcmp("prefork.c", m->name)) { + mpm_module = m; + /* While http2 can work really well on prefork, it collides + * today's use case for prefork: running single-thread app engines + * like php. If we restrict h2_workers to 1 per process, php will + * work fine, but browser will be limited to 1 active request at a + * time. */ + mpm_supported = 0; + break; + } + else if (!strcmp("simple_api.c", m->name)) { + mpm_module = m; + mpm_supported = 0; + break; + } + else if (!strcmp("mpm_winnt.c", m->name)) { + mpm_module = m; + break; + } + else if (!strcmp("worker.c", m->name)) { + mpm_module = m; + break; + } + } + checked = 1; + } +} + +const char *h2_conn_mpm_name(void) +{ + check_modules(0); + return mpm_module? mpm_module->name : "unknown"; +} + +int h2_mpm_supported(void) +{ + check_modules(0); + return mpm_supported; +} + +apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s) +{ + check_modules(1); + return apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM, + APR_PROTO_TCP, pool); +} + +static void h2_c2_log_io(conn_rec *c2, apr_off_t bytes_sent) +{ + if (bytes_sent && h2_c_logio_add_bytes_out) { + h2_c_logio_add_bytes_out(c2, bytes_sent); + } +} + +void h2_c2_destroy(conn_rec *c2) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2, + "h2_c2(%s): destroy", c2->log_id); + if(!c2->aborted && conn_ctx && conn_ctx->bytes_sent) { + h2_c2_log_io(c2, conn_ctx->bytes_sent); + } + apr_pool_destroy(c2->pool); +} + +void h2_c2_abort(conn_rec *c2, conn_rec *from) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2); + + AP_DEBUG_ASSERT(conn_ctx); + AP_DEBUG_ASSERT(conn_ctx->stream_id); + if(!c2->aborted && conn_ctx->bytes_sent) { + h2_c2_log_io(c2, conn_ctx->bytes_sent); + } + + if (conn_ctx->beam_in) { + h2_beam_abort(conn_ctx->beam_in, from); + } + if (conn_ctx->beam_out) { + h2_beam_abort(conn_ctx->beam_out, from); + } + c2->aborted = 1; +} + +typedef struct { + apr_bucket_brigade *bb; /* c2: data in holding area */ + unsigned did_upgrade_eos:1; /* for Upgrade, we added an extra EOS */ +} h2_c2_fctx_in_t; + +static apr_status_t h2_c2_filter_in(ap_filter_t* f, + apr_bucket_brigade* bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_conn_ctx_t *conn_ctx; + h2_c2_fctx_in_t *fctx = f->ctx; + apr_status_t status = APR_SUCCESS; + apr_bucket *b; + apr_off_t bblen; + apr_size_t rmax = (readbytes < APR_INT32_MAX)? + (apr_size_t)readbytes : APR_INT32_MAX; + + conn_ctx = h2_conn_ctx_get(f->c); + AP_DEBUG_ASSERT(conn_ctx); + + if (mode == AP_MODE_INIT) { + return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes); + } + + if (f->c->aborted) { + return APR_ECONNABORTED; + } + + if (APLOGctrace3(f->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, f->c, + "h2_c2_in(%s-%d): read, mode=%d, block=%d, readbytes=%ld", + conn_ctx->id, conn_ctx->stream_id, mode, block, + (long)readbytes); + } + + if (!fctx) { + fctx = apr_pcalloc(f->c->pool, sizeof(*fctx)); + f->ctx = fctx; + fctx->bb = apr_brigade_create(f->c->pool, f->c->bucket_alloc); + if (!conn_ctx->beam_in) { + b = apr_bucket_eos_create(f->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(fctx->bb, b); + } + } + + /* If this is a HTTP Upgrade, it means the request we process + * has not Content, although the stream is not necessarily closed. + * On first read, we insert an EOS to signal processing that it + * has the complete body. */ + if (conn_ctx->is_upgrade && !fctx->did_upgrade_eos) { + b = apr_bucket_eos_create(f->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(fctx->bb, b); + fctx->did_upgrade_eos = 1; + } + + while (APR_BRIGADE_EMPTY(fctx->bb)) { + /* Get more input data for our request. */ + if (APLOGctrace2(f->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, + "h2_c2_in(%s-%d): get more data from mplx, block=%d, " + "readbytes=%ld", + conn_ctx->id, conn_ctx->stream_id, block, (long)readbytes); + } + if (conn_ctx->beam_in) { + if (conn_ctx->pipe_in[H2_PIPE_OUT]) { +receive: + status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, APR_NONBLOCK_READ, + conn_ctx->mplx->stream_max_mem); + if (APR_STATUS_IS_EAGAIN(status) && APR_BLOCK_READ == block) { + status = h2_util_wait_on_pipe(conn_ctx->pipe_in[H2_PIPE_OUT]); + if (APR_SUCCESS == status) { + goto receive; + } + } + } + else { + status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, block, + conn_ctx->mplx->stream_max_mem); + } + } + else { + status = APR_EOF; + } + + if (APLOGctrace3(f->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c, + "h2_c2_in(%s-%d): read returned", + conn_ctx->id, conn_ctx->stream_id); + } + if (APR_STATUS_IS_EAGAIN(status) + && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) { + /* chunked input handling does not seem to like it if we + * return with APR_EAGAIN from a GETLINE read... + * upload 100k test on test-ser.example.org hangs */ + status = APR_SUCCESS; + } + else if (APR_STATUS_IS_EOF(status)) { + break; + } + else if (status != APR_SUCCESS) { + conn_ctx->last_err = status; + return status; + } + + if (APLOGctrace3(f->c)) { + h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE3, + "c2 input recv raw", fctx->bb); + } + if (h2_c_logio_add_bytes_in) { + apr_brigade_length(bb, 0, &bblen); + h2_c_logio_add_bytes_in(f->c, bblen); + } + } + + /* Nothing there, no more data to get. Return. */ + if (status == APR_EOF && APR_BRIGADE_EMPTY(fctx->bb)) { + return status; + } + + if (APLOGctrace3(f->c)) { + h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE3, + "c2 input.bb", fctx->bb); + } + + if (APR_BRIGADE_EMPTY(fctx->bb)) { + if (APLOGctrace3(f->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, f->c, + "h2_c2_in(%s-%d): no data", + conn_ctx->id, conn_ctx->stream_id); + } + return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF; + } + + if (mode == AP_MODE_EXHAUSTIVE) { + /* return all we have */ + APR_BRIGADE_CONCAT(bb, fctx->bb); + } + else if (mode == AP_MODE_READBYTES) { + status = h2_brigade_concat_length(bb, fctx->bb, rmax); + } + else if (mode == AP_MODE_SPECULATIVE) { + status = h2_brigade_copy_length(bb, fctx->bb, rmax); + } + else if (mode == AP_MODE_GETLINE) { + /* we are reading a single LF line, e.g. the HTTP headers. + * this has the nasty side effect to split the bucket, even + * though it ends with CRLF and creates a 0 length bucket */ + status = apr_brigade_split_line(bb, fctx->bb, block, + HUGE_STRING_LEN); + if (APLOGctrace3(f->c)) { + char buffer[1024]; + apr_size_t len = sizeof(buffer)-1; + apr_brigade_flatten(bb, buffer, &len); + buffer[len] = 0; + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c, + "h2_c2_in(%s-%d): getline: %s", + conn_ctx->id, conn_ctx->stream_id, buffer); + } + } + else { + /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not + * to support it. Seems to work. */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, + APLOGNO(03472) + "h2_c2_in(%s-%d), unsupported READ mode %d", + conn_ctx->id, conn_ctx->stream_id, mode); + status = APR_ENOTIMPL; + } + + if (APLOGctrace3(f->c)) { + apr_brigade_length(bb, 0, &bblen); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c, + "h2_c2_in(%s-%d): %ld data bytes", + conn_ctx->id, conn_ctx->stream_id, (long)bblen); + } + return status; +} + +static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_brigade* bb) +{ + apr_off_t written = 0; + apr_status_t rv; + + rv = h2_beam_send(conn_ctx->beam_out, c2, bb, APR_BLOCK_READ, &written); + if (APR_STATUS_IS_EAGAIN(rv)) { + rv = APR_SUCCESS; + } + return rv; +} + +static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c); + apr_status_t rv; + + ap_assert(conn_ctx); +#if AP_HAS_RESPONSE_BUCKETS + if (!conn_ctx->has_final_response) { + apr_bucket *e; + + for (e = APR_BRIGADE_FIRST(bb); + e != APR_BRIGADE_SENTINEL(bb); + e = APR_BUCKET_NEXT(e)) + { + if (AP_BUCKET_IS_RESPONSE(e)) { + ap_bucket_response *resp = e->data; + if (resp->status >= 200) { + conn_ctx->has_final_response = 1; + break; + } + } + if (APR_BUCKET_IS_EOS(e)) { + break; + } + } + } +#endif /* AP_HAS_RESPONSE_BUCKETS */ + rv = beam_out(f->c, conn_ctx, bb); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c, + "h2_c2(%s-%d): output leave", + conn_ctx->id, conn_ctx->stream_id); + if (APR_SUCCESS != rv) { + h2_c2_abort(f->c, f->c); + } + return rv; +} + +static int addn_headers(void *udata, const char *name, const char *value) +{ + apr_table_t *dest = udata; + apr_table_addn(dest, name, value); + return 1; +} + +static void check_early_hints(request_rec *r, const char *tag) +{ + apr_array_header_t *push_list = h2_config_push_list(r); + apr_table_t *early_headers = h2_config_early_headers(r); + + if (!r->expecting_100 && + ((push_list && push_list->nelts > 0) || + (early_headers && !apr_is_empty_table(early_headers)))) { + int have_hints = 0, i; + + if (push_list && push_list->nelts > 0) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "%s, early announcing %d resources for push", + tag, push_list->nelts); + for (i = 0; i < push_list->nelts; ++i) { + h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res); + apr_table_add(r->headers_out, "Link", + apr_psprintf(r->pool, "<%s>; rel=preload%s", + push->uri_ref, push->critical? "; critical" : "")); + } + have_hints = 1; + } + if (early_headers && !apr_is_empty_table(early_headers)) { + apr_table_do(addn_headers, r->headers_out, early_headers, NULL); + have_hints = 1; + } + + if (have_hints) { + int old_status; + const char *old_line; + + if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && + h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) { + apr_table_setn(r->connection->notes, H2_PUSH_MODE_NOTE, "0"); + } + old_status = r->status; + old_line = r->status_line; + r->status = 103; + r->status_line = "103 Early Hints"; + ap_send_interim_response(r, 1); + r->status = old_status; + r->status_line = old_line; + } + } +} + +static int c2_hook_fixups(request_rec *r) +{ + conn_rec *c2 = r->connection; + h2_conn_ctx_t *conn_ctx; + + if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) { + return DECLINED; + } + + check_early_hints(r, "late_fixup"); + + return DECLINED; +} + +static apr_status_t http2_get_pollfd_from_conn(conn_rec *c, + struct apr_pollfd_t *pfd, + apr_interval_time_t *ptimeout) +{ +#if H2_USE_PIPES + if (c->master) { + h2_conn_ctx_t *ctx = h2_conn_ctx_get(c); + if (ctx) { + if (ctx->beam_in && ctx->pipe_in[H2_PIPE_OUT]) { + pfd->desc_type = APR_POLL_FILE; + pfd->desc.f = ctx->pipe_in[H2_PIPE_OUT]; + if (ptimeout) + *ptimeout = h2_beam_timeout_get(ctx->beam_in); + } + else { + /* no input */ + pfd->desc_type = APR_NO_DESC; + if (ptimeout) + *ptimeout = -1; + } + return APR_SUCCESS; + } + } +#else + (void)c; + (void)pfd; + (void)ptimeout; +#endif /* H2_USE_PIPES */ + return APR_ENOTIMPL; +} + +#if AP_HAS_RESPONSE_BUCKETS + +static void c2_pre_read_request(request_rec *r, conn_rec *c2) +{ + h2_conn_ctx_t *conn_ctx; + + if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) { + return; + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, + "h2_c2(%s-%d): adding request filters", + conn_ctx->id, conn_ctx->stream_id); + ap_add_input_filter_handle(c2_request_in_filter_handle, NULL, r, r->connection); + ap_add_output_filter_handle(c2_notes_out_filter_handle, NULL, r, r->connection); +} + +static int c2_post_read_request(request_rec *r) +{ + h2_conn_ctx_t *conn_ctx; + conn_rec *c2 = r->connection; + apr_time_t timeout; + + if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) { + return DECLINED; + } + /* Now that the request_rec is fully initialized, set relevant params */ + conn_ctx->server = r->server; + timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT); + if (timeout <= 0) { + timeout = r->server->timeout; + } + h2_conn_ctx_set_timeout(conn_ctx, timeout); + /* We only handle this one request on the connection and tell everyone + * that there is no need to keep it "clean" if something fails. Also, + * this prevents mod_reqtimeout from doing funny business with monitoring + * keepalive timeouts. + */ + r->connection->keepalive = AP_CONN_CLOSE; + + if (conn_ctx->beam_in && !apr_table_get(r->headers_in, "Content-Length")) { + r->body_indeterminate = 1; + } + + if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "h2_mplx(%s-%d): copy_files in output", + conn_ctx->id, conn_ctx->stream_id); + h2_beam_set_copy_files(conn_ctx->beam_out, 1); + } + + /* Add the raw bytes of the request (e.g. header frame lengths to + * the logio for this request. */ + if (conn_ctx->request->raw_bytes && h2_c_logio_add_bytes_in) { + h2_c_logio_add_bytes_in(c2, conn_ctx->request->raw_bytes); + } + return OK; +} + +static int c2_hook_pre_connection(conn_rec *c2, void *csd) +{ + h2_conn_ctx_t *conn_ctx; + + if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) { + return DECLINED; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, + "h2_c2(%s-%d), adding filters", + conn_ctx->id, conn_ctx->stream_id); + ap_add_input_filter_handle(c2_net_in_filter_handle, NULL, NULL, c2); + ap_add_output_filter_handle(c2_net_out_filter_handle, NULL, NULL, c2); + if (c2->keepalives == 0) { + /* Simulate that we had already a request on this connection. Some + * hooks trigger special behaviour when keepalives is 0. + * (Not necessarily in pre_connection, but later. Set it here, so it + * is in place.) */ + c2->keepalives = 1; + /* We signal that this connection will be closed after the request. + * Which is true in that sense that we throw away all traffic data + * on this c2 connection after each requests. Although we might + * reuse internal structures like memory pools. + * The wanted effect of this is that httpd does not try to clean up + * any dangling data on this connection when a request is done. Which + * is unnecessary on a h2 stream. + */ + c2->keepalive = AP_CONN_CLOSE; + } + return OK; +} + +void h2_c2_register_hooks(void) +{ + /* When the connection processing actually starts, we might + * take over, if the connection is for a h2 stream. + */ + ap_hook_pre_connection(c2_hook_pre_connection, + NULL, NULL, APR_HOOK_MIDDLE); + + /* We need to manipulate the standard HTTP/1.1 protocol filters and + * install our own. This needs to be done very early. */ + ap_hook_pre_read_request(c2_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_post_read_request(c2_post_read_request, NULL, NULL, + APR_HOOK_REALLY_FIRST); + ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST); +#if H2_USE_POLLFD_FROM_CONN + ap_hook_get_pollfd_from_conn(http2_get_pollfd_from_conn, NULL, NULL, + APR_HOOK_MIDDLE); +#endif + APR_REGISTER_OPTIONAL_FN(http2_get_pollfd_from_conn); + + c2_net_in_filter_handle = + ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in, + NULL, AP_FTYPE_NETWORK); + c2_net_out_filter_handle = + ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out, + NULL, AP_FTYPE_NETWORK); + c2_request_in_filter_handle = + ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in, + NULL, AP_FTYPE_PROTOCOL); + c2_notes_out_filter_handle = + ap_register_output_filter("H2_C2_NOTES_OUT", h2_c2_filter_notes_out, + NULL, AP_FTYPE_PROTOCOL); +} + +#else /* AP_HAS_RESPONSE_BUCKETS */ + +static apr_status_t c2_run_pre_connection(conn_rec *c2, apr_socket_t *csd) +{ + if (c2->keepalives == 0) { + /* Simulate that we had already a request on this connection. Some + * hooks trigger special behaviour when keepalives is 0. + * (Not necessarily in pre_connection, but later. Set it here, so it + * is in place.) */ + c2->keepalives = 1; + /* We signal that this connection will be closed after the request. + * Which is true in that sense that we throw away all traffic data + * on this c2 connection after each requests. Although we might + * reuse internal structures like memory pools. + * The wanted effect of this is that httpd does not try to clean up + * any dangling data on this connection when a request is done. Which + * is unnecessary on a h2 stream. + */ + c2->keepalive = AP_CONN_CLOSE; + return ap_run_pre_connection(c2, csd); + } + ap_assert(c2->output_filters); + return APR_SUCCESS; +} + +apr_status_t h2_c2_process(conn_rec *c2, apr_thread_t *thread, int worker_id) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2); + + ap_assert(conn_ctx); + ap_assert(conn_ctx->mplx); + + /* See the discussion at + * + * Each conn_rec->id is supposed to be unique at a point in time. Since + * some modules (and maybe external code) uses this id as an identifier + * for the request_rec they handle, it needs to be unique for secondary + * connections also. + * + * The MPM module assigns the connection ids and mod_unique_id is using + * that one to generate identifier for requests. While the implementation + * works for HTTP/1.x, the parallel execution of several requests per + * connection will generate duplicate identifiers on load. + * + * The original implementation for secondary connection identifiers used + * to shift the master connection id up and assign the stream id to the + * lower bits. This was cramped on 32 bit systems, but on 64bit there was + * enough space. + * + * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the + * connection id, even on 64bit systems. Therefore collisions in request ids. + * + * The way master connection ids are generated, there is some space "at the + * top" of the lower 32 bits on allmost all systems. If you have a setup + * with 64k threads per child and 255 child processes, you live on the edge. + * + * The new implementation shifts 8 bits and XORs in the worker + * id. This will experience collisions with > 256 h2 workers and heavy + * load still. There seems to be no way to solve this in all possible + * configurations by mod_h2 alone. + */ + c2->id = (c2->master->id << 8)^worker_id; + + if (!conn_ctx->pre_conn_done) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, + "h2_c2(%s-%d), adding filters", + conn_ctx->id, conn_ctx->stream_id); + ap_add_input_filter("H2_C2_NET_IN", NULL, NULL, c2); + ap_add_output_filter("H2_C2_NET_CATCH_H1", NULL, NULL, c2); + ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2); + + c2_run_pre_connection(c2, ap_get_conn_socket(c2)); + conn_ctx->pre_conn_done = 1; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): process connection", + conn_ctx->id, conn_ctx->stream_id); + + c2->current_thread = thread; + ap_run_process_connection(c2); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): processing done", + conn_ctx->id, conn_ctx->stream_id); + + return APR_SUCCESS; +} + +static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c) +{ + const h2_request *req = conn_ctx->request; + conn_state_t *cs = c->cs; + request_rec *r = NULL; + const char *tenc; + apr_time_t timeout; + apr_status_t rv = APR_SUCCESS; + + if (req->protocol && !strcmp("websocket", req->protocol)) { + req = h2_ws_rewrite_request(req, c, conn_ctx->beam_in == NULL); + if (!req) { + rv = APR_EGENERAL; + goto cleanup; + } + } + + r = h2_create_request_rec(req, c, conn_ctx->beam_in == NULL); + + if (!r) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_c2(%s-%d): create request_rec failed, r=NULL", + conn_ctx->id, conn_ctx->stream_id); + goto cleanup; + } + if (r->status != HTTP_OK) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_c2(%s-%d): create request_rec failed, r->status=%d", + conn_ctx->id, conn_ctx->stream_id, r->status); + goto cleanup; + } + + tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); + conn_ctx->input_chunked = tenc && ap_is_chunked(r->pool, tenc); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_c2(%s-%d): created request_rec for %s", + conn_ctx->id, conn_ctx->stream_id, r->the_request); + conn_ctx->server = r->server; + timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT); + if (timeout <= 0) { + timeout = r->server->timeout; + } + h2_conn_ctx_set_timeout(conn_ctx, timeout); + + if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_mplx(%s-%d): copy_files in output", + conn_ctx->id, conn_ctx->stream_id); + h2_beam_set_copy_files(conn_ctx->beam_out, 1); + } + + ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r); + if (cs) { + cs->state = CONN_STATE_HANDLER; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_c2(%s-%d): start process_request", + conn_ctx->id, conn_ctx->stream_id); + + /* Add the raw bytes of the request (e.g. header frame lengths to + * the logio for this request. */ + if (req->raw_bytes && h2_c_logio_add_bytes_in) { + h2_c_logio_add_bytes_in(c, req->raw_bytes); + } + + ap_process_request(r); + /* After the call to ap_process_request, the + * request pool may have been deleted. */ + r = NULL; + if (conn_ctx->beam_out) { + h2_beam_close(conn_ctx->beam_out, c); + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_c2(%s-%d): process_request done", + conn_ctx->id, conn_ctx->stream_id); + if (cs) + cs->state = CONN_STATE_WRITE_COMPLETION; + +cleanup: + return rv; +} + +conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent, + apr_bucket_alloc_t *buckt_alloc) +{ + apr_pool_t *pool; + conn_rec *c2; + void *cfg; + + ap_assert(c1); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c1, + "h2_c2: create for c1(%ld)", c1->id); + + /* We create a pool with its own allocator to be used for + * processing a request. This is the only way to have the processing + * independent of its parent pool in the sense that it can work in + * another thread. + */ + apr_pool_create(&pool, parent); + apr_pool_tag(pool, "h2_c2_conn"); + + c2 = (conn_rec *) apr_palloc(pool, sizeof(conn_rec)); + memcpy(c2, c1, sizeof(conn_rec)); + + c2->master = c1; + c2->pool = pool; + c2->conn_config = ap_create_conn_config(pool); + c2->notes = apr_table_make(pool, 5); + c2->input_filters = NULL; + c2->output_filters = NULL; + c2->keepalives = 0; +#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1) + c2->filter_conn_ctx = NULL; +#endif + c2->bucket_alloc = apr_bucket_alloc_create(pool); +#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1) + c2->data_in_input_filters = 0; + c2->data_in_output_filters = 0; +#endif + /* prevent mpm_event from making wrong assumptions about this connection, + * like e.g. using its socket for an async read check. */ + c2->clogging_input_filters = 1; + c2->log = NULL; + c2->aborted = 0; + /* We cannot install the master connection socket on the secondary, as + * modules mess with timeouts/blocking of the socket, with + * unwanted side effects to the master connection processing. + * Fortunately, since we never use the secondary socket, we can just install + * a single, process-wide dummy and everyone is happy. + */ + ap_set_module_config(c2->conn_config, &core_module, dummy_socket); + /* TODO: these should be unique to this thread */ + c2->sbh = NULL; /*c1->sbh;*/ + /* TODO: not all mpm modules have learned about secondary connections yet. + * copy their config from master to secondary. + */ + if (mpm_module) { + cfg = ap_get_module_config(c1->conn_config, mpm_module); + ap_set_module_config(c2->conn_config, mpm_module, cfg); + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2, + "h2_c2(%s): created", c2->log_id); + return c2; +} + +static int h2_c2_hook_post_read_request(request_rec *r) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection); + + if (conn_ctx && conn_ctx->stream_id && ap_is_initial_req(r)) { + + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, + "h2_c2(%s-%d): adding request filters", + conn_ctx->id, conn_ctx->stream_id); + + /* setup the correct filters to process the request for h2 */ + ap_add_input_filter("H2_C2_REQUEST_IN", NULL, r, r->connection); + + /* replace the core http filter that formats response headers + * in HTTP/1 with our own that collects status and headers */ + ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); + + ap_add_output_filter("H2_C2_RESPONSE_OUT", NULL, r, r->connection); + ap_add_output_filter("H2_C2_TRAILERS_OUT", NULL, r, r->connection); + } + return DECLINED; +} + +static int h2_c2_hook_process(conn_rec* c) +{ + h2_conn_ctx_t *ctx; + + if (!c->master) { + return DECLINED; + } + + ctx = h2_conn_ctx_get(c); + if (ctx->stream_id) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_h2, processing request directly"); + c2_process(ctx, c); + return DONE; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "secondary_conn(%ld): no h2 stream assing?", c->id); + } + return DECLINED; +} + +void h2_c2_register_hooks(void) +{ + /* When the connection processing actually starts, we might + * take over, if the connection is for a h2 stream. + */ + ap_hook_process_connection(h2_c2_hook_process, + NULL, NULL, APR_HOOK_FIRST); + /* We need to manipulate the standard HTTP/1.1 protocol filters and + * install our own. This needs to be done very early. */ + ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST); + ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST); +#if H2_USE_POLLFD_FROM_CONN + ap_hook_get_pollfd_from_conn(http2_get_pollfd_from_conn, NULL, NULL, + APR_HOOK_MIDDLE); +#endif + APR_REGISTER_OPTIONAL_FN(http2_get_pollfd_from_conn); + + ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in, + NULL, AP_FTYPE_NETWORK); + ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out, + NULL, AP_FTYPE_NETWORK); + ap_register_output_filter("H2_C2_NET_CATCH_H1", h2_c2_filter_catch_h1_out, + NULL, AP_FTYPE_NETWORK); + + ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in, + NULL, AP_FTYPE_PROTOCOL); + ap_register_output_filter("H2_C2_RESPONSE_OUT", h2_c2_filter_response_out, + NULL, AP_FTYPE_PROTOCOL); + ap_register_output_filter("H2_C2_TRAILERS_OUT", h2_c2_filter_trailers_out, + NULL, AP_FTYPE_PROTOCOL); +} + +#endif /* else AP_HAS_RESPONSE_BUCKETS */ diff --git a/modules/http2/h2_c2.h b/modules/http2/h2_c2.h new file mode 100644 index 0000000..f278382 --- /dev/null +++ b/modules/http2/h2_c2.h @@ -0,0 +1,57 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_c2__ +#define __mod_h2__h2_c2__ + +#include + +#include "h2.h" + +const char *h2_conn_mpm_name(void); +int h2_mpm_supported(void); + +/* Initialize this child process for h2 secondary connection work, + * to be called once during child init before multi processing + * starts. + */ +apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s); + +#if !AP_HAS_RESPONSE_BUCKETS + +conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent, + apr_bucket_alloc_t *buckt_alloc); + +/** + * Process a secondary connection for a HTTP/2 stream request. + */ +apr_status_t h2_c2_process(conn_rec *c, apr_thread_t *thread, int worker_id); + +#endif /* !AP_HAS_RESPONSE_BUCKETS */ + +void h2_c2_destroy(conn_rec *c2); + +/** + * Abort the I/O processing of a secondary connection. And + * in-/output beams will return errors and c2->aborted is set. + * @param c2 the secondary connection to abort + * @param from the connection this is invoked from + */ +void h2_c2_abort(conn_rec *c2, conn_rec *from); + +void h2_c2_register_hooks(void); + +#endif /* defined(__mod_h2__h2_c2__) */ diff --git a/modules/http2/h2_c2_filter.c b/modules/http2/h2_c2_filter.c new file mode 100644 index 0000000..554f88b --- /dev/null +++ b/modules/http2/h2_c2_filter.c @@ -0,0 +1,1056 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "h2_private.h" +#include "h2.h" +#include "h2_config.h" +#include "h2_conn_ctx.h" +#include "h2_headers.h" +#include "h2_c1.h" +#include "h2_c2_filter.h" +#include "h2_c2.h" +#include "h2_mplx.h" +#include "h2_request.h" +#include "h2_ws.h" +#include "h2_util.h" + + +#if AP_HAS_RESPONSE_BUCKETS + +apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb) +{ + apr_bucket *b; + request_rec *r_prev; + ap_bucket_response *resp; + const char *err; + + if (!f->r) { + goto pass; + } + + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + if (AP_BUCKET_IS_RESPONSE(b)) { + resp = b->data; + if (resp->status >= 400 && f->r->prev) { + /* Error responses are commonly handled via internal + * redirects to error documents. That creates a new + * request_rec with 'prev' set to the original. + * Each of these has its onw 'notes'. + * We'd like to copy interesting ones into the current 'r->notes' + * as we reset HTTP/2 stream with H2 specific error codes then. + */ + for (r_prev = f->r; r_prev != NULL; r_prev = r_prev->prev) { + if ((err = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden"))) { + if (r_prev != f->r) { + apr_table_setn(resp->notes, "ssl-renegotiate-forbidden", err); + } + break; + } + } + } + else if (h2_config_rgeti(f->r, H2_CONF_PUSH) == 0 + && h2_config_sgeti(f->r->server, H2_CONF_PUSH) != 0) { + /* location configuration turns off H2 PUSH handling */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c, + "h2_c2_filter_notes_out, turning PUSH off"); + apr_table_setn(resp->notes, H2_PUSH_MODE_NOTE, "0"); + } + } + } +pass: + return ap_pass_brigade(f->next, bb); +} + +apr_status_t h2_c2_filter_request_in(ap_filter_t *f, + apr_bucket_brigade *bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_conn_ctx_t *conn_ctx; + apr_bucket *b; + + /* just get out of the way for things we don't want to handle. */ + if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) { + return ap_get_brigade(f->next, bb, mode, block, readbytes); + } + + /* This filter is a one-time wonder */ + ap_remove_input_filter(f); + + if (f->c->master && (conn_ctx = h2_conn_ctx_get(f->c)) && + conn_ctx->stream_id) { + const h2_request *req = conn_ctx->request; + + if (req->http_status == H2_HTTP_STATUS_UNSET && + req->protocol && !strcmp("websocket", req->protocol)) { + req = h2_ws_rewrite_request(req, f->c, conn_ctx->beam_in == NULL); + if (!req) + return APR_EGENERAL; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c, + "h2_c2_filter_request_in(%s): adding request bucket", + conn_ctx->id); + b = h2_request_create_bucket(req, f->r); + APR_BRIGADE_INSERT_TAIL(bb, b); + + if (req->http_status != H2_HTTP_STATUS_UNSET) { + /* error was encountered preparing this request */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c, + "h2_c2_filter_request_in(%s): adding error bucket %d", + conn_ctx->id, req->http_status); + b = ap_bucket_error_create(req->http_status, NULL, f->r->pool, + f->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + return APR_SUCCESS; + } + + if (!conn_ctx->beam_in) { + b = apr_bucket_eos_create(f->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + } + + return APR_SUCCESS; + } + + return ap_get_brigade(f->next, bb, mode, block, readbytes); +} + +#else /* AP_HAS_RESPONSE_BUCKETS */ + +#define H2_FILTER_LOG(name, c, level, rv, msg, bb) \ + do { \ + if (APLOG_C_IS_LEVEL((c),(level))) { \ + char buffer[4 * 1024]; \ + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \ + len = h2_util_bb_print(buffer, bmax, "", "", (bb)); \ + ap_log_cerror(APLOG_MARK, (level), rv, (c), \ + "FILTER[%s]: %s %s", \ + (name), (msg), len? buffer : ""); \ + } \ + } while (0) + + +/* This routine is called by apr_table_do and merges all instances of + * the passed field values into a single array that will be further + * processed by some later routine. Originally intended to help split + * and recombine multiple Vary fields, though it is generic to any field + * consisting of comma/space-separated tokens. + */ +static int uniq_field_values(void *d, const char *key, const char *val) +{ + apr_array_header_t *values; + char *start; + char *e; + char **strpp; + int i; + + (void)key; + values = (apr_array_header_t *)d; + + e = apr_pstrdup(values->pool, val); + + do { + /* Find a non-empty fieldname */ + + while (*e == ',' || apr_isspace(*e)) { + ++e; + } + if (*e == '\0') { + break; + } + start = e; + while (*e != '\0' && *e != ',' && !apr_isspace(*e)) { + ++e; + } + if (*e != '\0') { + *e++ = '\0'; + } + + /* Now add it to values if it isn't already represented. + * Could be replaced by a ap_array_strcasecmp() if we had one. + */ + for (i = 0, strpp = (char **) values->elts; i < values->nelts; + ++i, ++strpp) { + if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) { + break; + } + } + if (i == values->nelts) { /* if not found */ + *(char **)apr_array_push(values) = start; + } + } while (*e != '\0'); + + return 1; +} + +/* + * Since some clients choke violently on multiple Vary fields, or + * Vary fields with duplicate tokens, combine any multiples and remove + * any duplicates. + */ +static void fix_vary(request_rec *r) +{ + apr_array_header_t *varies; + + varies = apr_array_make(r->pool, 5, sizeof(char *)); + + /* Extract all Vary fields from the headers_out, separate each into + * its comma-separated fieldname values, and then add them to varies + * if not already present in the array. + */ + apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL); + + /* If we found any, replace old Vary fields with unique-ified value */ + + if (varies->nelts > 0) { + apr_table_setn(r->headers_out, "Vary", + apr_array_pstrcat(r->pool, varies, ',')); + } +} + +static h2_headers *create_response(request_rec *r) +{ + const char *clheader; + const char *ctype; + + /* + * Now that we are ready to send a response, we need to combine the two + * header field tables into a single table. If we don't do this, our + * later attempts to set or unset a given fieldname might be bypassed. + */ + if (!apr_is_empty_table(r->err_headers_out)) { + r->headers_out = apr_table_overlay(r->pool, r->err_headers_out, + r->headers_out); + apr_table_clear(r->err_headers_out); + } + + /* + * Remove the 'Vary' header field if the client can't handle it. + * Since this will have nasty effects on HTTP/1.1 caches, force + * the response into HTTP/1.0 mode. + */ + if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) { + apr_table_unset(r->headers_out, "Vary"); + r->proto_num = HTTP_VERSION(1,0); + apr_table_setn(r->subprocess_env, "force-response-1.0", "1"); + } + else { + fix_vary(r); + } + + /* + * Now remove any ETag response header field if earlier processing + * says so (such as a 'FileETag None' directive). + */ + if (apr_table_get(r->notes, "no-etag") != NULL) { + apr_table_unset(r->headers_out, "ETag"); + } + + /* determine the protocol and whether we should use keepalives. */ + ap_set_keepalive(r); + + if (AP_STATUS_IS_HEADER_ONLY(r->status)) { + apr_table_unset(r->headers_out, "Transfer-Encoding"); + apr_table_unset(r->headers_out, "Content-Length"); + r->content_type = r->content_encoding = NULL; + r->content_languages = NULL; + r->clength = r->chunked = 0; + } + else if (r->chunked) { + apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked"); + apr_table_unset(r->headers_out, "Content-Length"); + } + + ctype = ap_make_content_type(r, r->content_type); + if (ctype) { + apr_table_setn(r->headers_out, "Content-Type", ctype); + } + + if (r->content_encoding) { + apr_table_setn(r->headers_out, "Content-Encoding", + r->content_encoding); + } + + if (!apr_is_empty_array(r->content_languages)) { + int i; + char *token; + char **languages = (char **)(r->content_languages->elts); + const char *field = apr_table_get(r->headers_out, "Content-Language"); + + while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) { + for (i = 0; i < r->content_languages->nelts; ++i) { + if (!apr_strnatcasecmp(token, languages[i])) + break; + } + if (i == r->content_languages->nelts) { + *((char **) apr_array_push(r->content_languages)) = token; + } + } + + field = apr_array_pstrcat(r->pool, r->content_languages, ','); + apr_table_setn(r->headers_out, "Content-Language", field); + } + + /* + * Control cachability for non-cachable responses if not already set by + * some other part of the server configuration. + */ + if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) { + char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r->request_time); + apr_table_add(r->headers_out, "Expires", date); + } + + /* This is a hack, but I can't find anyway around it. The idea is that + * we don't want to send out 0 Content-Lengths if it is a head request. + * This happens when modules try to outsmart the server, and return + * if they see a HEAD request. Apache 1.3 handlers were supposed to + * just return in that situation, and the core handled the HEAD. In + * 2.0, if a handler returns, then the core sends an EOS bucket down + * the filter stack, and the content-length filter computes a C-L of + * zero and that gets put in the headers, and we end up sending a + * zero C-L to the client. We can't just remove the C-L filter, + * because well behaved 2.0 handlers will send their data down the stack, + * and we will compute a real C-L for the head request. RBB + */ + if (r->header_only + && (clheader = apr_table_get(r->headers_out, "Content-Length")) + && !strcmp(clheader, "0")) { + apr_table_unset(r->headers_out, "Content-Length"); + } + + /* + * keep the set-by-proxy server and date headers, otherwise + * generate a new server header / date header + */ + if (r->proxyreq == PROXYREQ_NONE + || !apr_table_get(r->headers_out, "Date")) { + char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r->request_time); + apr_table_setn(r->headers_out, "Date", date ); + } + if (r->proxyreq == PROXYREQ_NONE + || !apr_table_get(r->headers_out, "Server")) { + const char *us = ap_get_server_banner(); + if (us && *us) { + apr_table_setn(r->headers_out, "Server", us); + } + } + + return h2_headers_rcreate(r, r->status, r->headers_out, r->pool); +} + +typedef enum { + H2_RP_STATUS_LINE, + H2_RP_HEADER_LINE, + H2_RP_DONE +} h2_rp_state_t; + +typedef struct h2_response_parser h2_response_parser; +struct h2_response_parser { + const char *id; + h2_rp_state_t state; + conn_rec *c; + apr_pool_t *pool; + int http_status; + apr_array_header_t *hlines; + apr_bucket_brigade *tmp; + apr_bucket_brigade *saveto; +}; + +static apr_status_t parse_header(h2_response_parser *parser, char *line) { + const char *hline; + if (line[0] == ' ' || line[0] == '\t') { + char **plast; + /* continuation line from the header before this */ + while (line[0] == ' ' || line[0] == '\t') { + ++line; + } + + plast = apr_array_pop(parser->hlines); + if (plast == NULL) { + /* not well formed */ + return APR_EINVAL; + } + hline = apr_psprintf(parser->pool, "%s %s", *plast, line); + } + else { + /* new header line */ + hline = apr_pstrdup(parser->pool, line); + } + APR_ARRAY_PUSH(parser->hlines, const char*) = hline; + return APR_SUCCESS; +} + +static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb, + char *line, apr_size_t len) +{ + apr_status_t status; + + if (!parser->tmp) { + parser->tmp = apr_brigade_create(parser->pool, parser->c->bucket_alloc); + } + status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ, + len); + if (status == APR_SUCCESS) { + --len; + status = apr_brigade_flatten(parser->tmp, line, &len); + if (status == APR_SUCCESS) { + /* we assume a non-0 containing line and remove trailing crlf. */ + line[len] = '\0'; + /* + * XXX: What to do if there is an LF but no CRLF? + * Should we error out? + */ + if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) { + len -= 2; + line[len] = '\0'; + apr_brigade_cleanup(parser->tmp); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, + "h2_c2(%s): read response line: %s", + parser->id, line); + } + else { + apr_off_t brigade_length; + + /* + * If the brigade parser->tmp becomes longer than our buffer + * for flattening we never have a chance to get a complete + * line. This can happen if we are called multiple times after + * previous calls did not find a H2_CRLF and we returned + * APR_EAGAIN. In this case parser->tmp (correctly) grows + * with each call to apr_brigade_split_line. + * + * XXX: Currently a stack based buffer of HUGE_STRING_LEN is + * used. This means we cannot cope with lines larger than + * HUGE_STRING_LEN which might be an issue. + */ + status = apr_brigade_length(parser->tmp, 0, &brigade_length); + if ((status != APR_SUCCESS) || (brigade_length > (apr_off_t)len)) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, parser->c, APLOGNO(10257) + "h2_c2(%s): read response, line too long", + parser->id); + return APR_ENOSPC; + } + /* this does not look like a complete line yet */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, + "h2_c2(%s): read response, incomplete line: %s", + parser->id, line); + if (!parser->saveto) { + parser->saveto = apr_brigade_create(parser->pool, + parser->c->bucket_alloc); + } + /* + * Be on the save side and save the parser->tmp brigade + * as it could contain transient buckets which could be + * invalid next time we are here. + * + * NULL for the filter parameter is ok since we + * provide our own brigade as second parameter + * and ap_save_brigade does not need to create one. + */ + ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp), + parser->tmp->p); + APR_BRIGADE_CONCAT(parser->tmp, parser->saveto); + return APR_EAGAIN; + } + } + } + apr_brigade_cleanup(parser->tmp); + return status; +} + +static apr_table_t *make_table(h2_response_parser *parser) +{ + apr_array_header_t *hlines = parser->hlines; + if (hlines) { + apr_table_t *headers = apr_table_make(parser->pool, hlines->nelts); + int i; + + for (i = 0; i < hlines->nelts; ++i) { + char *hline = ((char **)hlines->elts)[i]; + char *sep = ap_strchr(hline, ':'); + if (!sep) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, parser->c, + APLOGNO(02955) "h2_c2(%s): invalid header[%d] '%s'", + parser->id, i, (char*)hline); + /* not valid format, abort */ + return NULL; + } + (*sep++) = '\0'; + while (*sep == ' ' || *sep == '\t') { + ++sep; + } + + if (!h2_util_ignore_resp_header(hline)) { + apr_table_merge(headers, hline, sep); + } + } + return headers; + } + else { + return apr_table_make(parser->pool, 0); + } +} + +static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f, + h2_response_parser *parser) +{ + apr_bucket *b; + apr_status_t status; + h2_headers *response = h2_headers_create(parser->http_status, + make_table(parser), + parser->c->notes, + 0, parser->pool); + apr_brigade_cleanup(parser->tmp); + b = h2_bucket_headers_create(parser->c->bucket_alloc, response); + APR_BRIGADE_INSERT_TAIL(parser->tmp, b); + b = apr_bucket_flush_create(parser->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(parser->tmp, b); + status = ap_pass_brigade(f->next, parser->tmp); + apr_brigade_cleanup(parser->tmp); + + /* reset parser for possible next response */ + parser->state = H2_RP_STATUS_LINE; + apr_array_clear(parser->hlines); + + if (response->status >= 200) { + conn_ctx->has_final_response = 1; + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, + APLOGNO(03197) "h2_c2(%s): passed response %d", + parser->id, response->status); + return status; +} + +static apr_status_t parse_status(h2_response_parser *parser, char *line) +{ + int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 : + (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0)); + if (sindex > 0) { + int k = sindex + 3; + char keepchar = line[k]; + line[k] = '\0'; + parser->http_status = atoi(&line[sindex]); + line[k] = keepchar; + parser->state = H2_RP_HEADER_LINE; + + return APR_SUCCESS; + } + /* Seems like there is garbage on the connection. May be a leftover + * from a previous proxy request. + * This should only happen if the H2_RESPONSE filter is not yet in + * place (post_read_request has not been reached and the handler wants + * to write something. Probably just the interim response we are + * waiting for. But if there is other data hanging around before + * that, this needs to fail. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, APLOGNO(03467) + "h2_c2(%s): unable to parse status line: %s", + parser->id, line); + return APR_EINVAL; +} + +static apr_status_t parse_response(h2_response_parser *parser, + h2_conn_ctx_t *conn_ctx, + ap_filter_t* f, apr_bucket_brigade *bb) +{ + char line[HUGE_STRING_LEN]; + apr_status_t status = APR_SUCCESS; + + while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) { + switch (parser->state) { + case H2_RP_STATUS_LINE: + case H2_RP_HEADER_LINE: + status = get_line(parser, bb, line, sizeof(line)); + if (status == APR_EAGAIN) { + /* need more data */ + return APR_SUCCESS; + } + else if (status != APR_SUCCESS) { + return status; + } + if (parser->state == H2_RP_STATUS_LINE) { + /* instead of parsing, just take it directly */ + status = parse_status(parser, line); + } + else if (line[0] == '\0') { + /* end of headers, pass response onward */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c, + "h2_c2(%s): end of response", parser->id); + return pass_response(conn_ctx, f, parser); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c, + "h2_c2(%s): response header %s", parser->id, line); + status = parse_header(parser, line); + } + break; + + default: + return status; + } + } + return status; +} + +apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c); + h2_response_parser *parser = f->ctx; + apr_status_t rv; + + ap_assert(conn_ctx); + H2_FILTER_LOG("c2_catch_h1_out", f->c, APLOG_TRACE2, 0, "check", bb); + + if (!f->c->aborted && !conn_ctx->has_final_response) { + if (!parser) { + parser = apr_pcalloc(f->c->pool, sizeof(*parser)); + parser->id = apr_psprintf(f->c->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id); + parser->pool = f->c->pool; + parser->c = f->c; + parser->state = H2_RP_STATUS_LINE; + parser->hlines = apr_array_make(parser->pool, 10, sizeof(char *)); + f->ctx = parser; + } + + if (!APR_BRIGADE_EMPTY(bb)) { + apr_bucket *b = APR_BRIGADE_FIRST(bb); + if (AP_BUCKET_IS_EOR(b)) { + /* TODO: Yikes, this happens when errors are encountered on input + * before anything from the repsonse has been processed. The + * ap_die_r() call will do nothing in certain conditions. + */ + int result = ap_map_http_request_error(conn_ctx->last_err, + HTTP_INTERNAL_SERVER_ERROR); + request_rec *r = h2_create_request_rec(conn_ctx->request, f->c, 1); + if (r) { + ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r); + b = ap_bucket_eor_create(f->c->bucket_alloc, r); + APR_BRIGADE_INSERT_TAIL(bb, b); + } + } + } + /* There are cases where we need to parse a serialized http/1.1 response. + * One example is a 100-continue answer via a mod_proxy setup. */ + while (bb && !f->c->aborted && !conn_ctx->has_final_response) { + rv = parse_response(parser, conn_ctx, f, bb); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c, + "h2_c2(%s): parsed response", parser->id); + if (APR_BRIGADE_EMPTY(bb) || APR_SUCCESS != rv) { + return rv; + } + } + } + + return ap_pass_brigade(f->next, bb); +} + +apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c); + request_rec *r = f->r; + apr_bucket *b, *bresp, *body_bucket = NULL, *next; + ap_bucket_error *eb = NULL; + h2_headers *response = NULL; + int headers_passing = 0; + + H2_FILTER_LOG("c2_response_out", f->c, APLOG_TRACE1, 0, "called with", bb); + + if (f->c->aborted || !conn_ctx || conn_ctx->has_final_response) { + return ap_pass_brigade(f->next, bb); + } + + if (!conn_ctx->has_final_response) { + /* check, if we need to send the response now. Until we actually + * see a DATA bucket or some EOS/EOR, we do not do so. */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + if (AP_BUCKET_IS_ERROR(b) && !eb) { + eb = b->data; + } + else if (AP_BUCKET_IS_EOC(b)) { + /* If we see an EOC bucket it is a signal that we should get out + * of the way doing nothing. + */ + ap_remove_output_filter(f); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c, + "h2_c2(%s): eoc bucket passed", conn_ctx->id); + return ap_pass_brigade(f->next, bb); + } + else if (H2_BUCKET_IS_HEADERS(b)) { + headers_passing = 1; + } + else if (!APR_BUCKET_IS_FLUSH(b)) { + body_bucket = b; + break; + } + } + + if (eb) { + int st = eb->status; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047) + "h2_c2(%s): err bucket status=%d", + conn_ctx->id, st); + /* throw everything away and replace it with the error response + * generated by ap_die() */ + apr_brigade_cleanup(bb); + ap_die(st, r); + return AP_FILTER_ERROR; + } + + if (body_bucket || !headers_passing) { + /* time to insert the response bucket before the body or if + * no h2_headers is passed, e.g. the response is empty */ + response = create_response(r); + if (response == NULL) { + ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048) + "h2_c2(%s): unable to create response", conn_ctx->id); + return APR_ENOMEM; + } + + bresp = h2_bucket_headers_create(f->c->bucket_alloc, response); + if (body_bucket) { + APR_BUCKET_INSERT_BEFORE(body_bucket, bresp); + } + else { + APR_BRIGADE_INSERT_HEAD(bb, bresp); + } + conn_ctx->has_final_response = 1; + r->sent_bodyct = 1; + ap_remove_output_filter_byhandle(f->r->output_filters, "H2_C2_NET_CATCH_H1"); + } + } + + if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_c2(%s): headers only, cleanup output brigade", conn_ctx->id); + b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb); + while (b != APR_BRIGADE_SENTINEL(bb)) { + next = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) { + break; + } + if (!H2_BUCKET_IS_HEADERS(b)) { + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + b = next; + } + } + if (conn_ctx->has_final_response) { + /* lets get out of the way, our task is done */ + ap_remove_output_filter(f); + } + return ap_pass_brigade(f->next, bb); +} + + +struct h2_chunk_filter_t { + const char *id; + int eos_chunk_added; + apr_bucket_brigade *bbchunk; + apr_off_t chunked_total; +}; +typedef struct h2_chunk_filter_t h2_chunk_filter_t; + + +static void make_chunk(conn_rec *c, h2_chunk_filter_t *fctx, apr_bucket_brigade *bb, + apr_bucket *first, apr_off_t chunk_len, + apr_bucket *tail) +{ + /* Surround the buckets [first, tail[ with new buckets carrying the + * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends + * to the end of the brigade. */ + char buffer[128]; + apr_bucket *b; + apr_size_t len; + + len = (apr_size_t)apr_snprintf(buffer, H2_ALEN(buffer), + "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len); + b = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc); + APR_BUCKET_INSERT_BEFORE(first, b); + b = apr_bucket_immortal_create("\r\n", 2, bb->bucket_alloc); + if (tail) { + APR_BUCKET_INSERT_BEFORE(tail, b); + } + else { + APR_BRIGADE_INSERT_TAIL(bb, b); + } + fctx->chunked_total += chunk_len; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_c2(%s): added chunk %ld, total %ld", + fctx->id, (long)chunk_len, (long)fctx->chunked_total); +} + +static int ser_header(void *ctx, const char *name, const char *value) +{ + apr_bucket_brigade *bb = ctx; + apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value); + return 1; +} + +static apr_status_t read_and_chunk(ap_filter_t *f, h2_conn_ctx_t *conn_ctx, + apr_read_type_e block) { + h2_chunk_filter_t *fctx = f->ctx; + request_rec *r = f->r; + apr_status_t status = APR_SUCCESS; + + if (!fctx->bbchunk) { + fctx->bbchunk = apr_brigade_create(r->pool, f->c->bucket_alloc); + } + + if (APR_BRIGADE_EMPTY(fctx->bbchunk)) { + apr_bucket *b, *next, *first_data = NULL; + apr_bucket_brigade *tmp; + apr_off_t bblen = 0; + + /* get more data from the lower layer filters. Always do this + * in larger pieces, since we handle the read modes ourself. */ + status = ap_get_brigade(f->next, fctx->bbchunk, + AP_MODE_READBYTES, block, conn_ctx->mplx->stream_max_mem); + if (status != APR_SUCCESS) { + return status; + } + + for (b = APR_BRIGADE_FIRST(fctx->bbchunk); + b != APR_BRIGADE_SENTINEL(fctx->bbchunk); + b = next) { + next = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { + if (first_data) { + make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, b); + first_data = NULL; + } + + if (H2_BUCKET_IS_HEADERS(b)) { + h2_headers *headers = h2_bucket_headers_get(b); + + ap_assert(headers); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "h2_c2(%s-%d): receiving trailers", + conn_ctx->id, conn_ctx->stream_id); + tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL); + if (!apr_is_empty_table(headers->headers)) { + status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n"); + apr_table_do(ser_header, fctx->bbchunk, headers->headers, NULL); + status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "\r\n"); + } + else { + status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n"); + } + r->trailers_in = apr_table_clone(r->pool, headers->headers); + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + APR_BRIGADE_CONCAT(fctx->bbchunk, tmp); + apr_brigade_destroy(tmp); + fctx->eos_chunk_added = 1; + } + else if (APR_BUCKET_IS_EOS(b)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "h2_c2(%s-%d): receiving eos", + conn_ctx->id, conn_ctx->stream_id); + if (!fctx->eos_chunk_added) { + tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL); + status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n"); + APR_BRIGADE_CONCAT(fctx->bbchunk, tmp); + apr_brigade_destroy(tmp); + } + fctx->eos_chunk_added = 0; + } + } + else if (b->length == 0) { + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + else { + if (!first_data) { + first_data = b; + bblen = 0; + } + bblen += b->length; + } + } + + if (first_data) { + make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, NULL); + } + } + return status; +} + +apr_status_t h2_c2_filter_request_in(ap_filter_t* f, + apr_bucket_brigade* bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c); + h2_chunk_filter_t *fctx = f->ctx; + request_rec *r = f->r; + apr_status_t status = APR_SUCCESS; + apr_bucket *b, *next; + core_server_config *conf = + (core_server_config *) ap_get_module_config(r->server->module_config, + &core_module); + ap_assert(conn_ctx); + + if (!fctx) { + fctx = apr_pcalloc(r->pool, sizeof(*fctx)); + fctx->id = apr_psprintf(r->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id); + f->ctx = fctx; + } + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r, + "h2_c2(%s-%d): request input, mode=%d, block=%d, " + "readbytes=%ld, exp=%d", + conn_ctx->id, conn_ctx->stream_id, mode, block, + (long)readbytes, r->expecting_100); + if (!conn_ctx->input_chunked) { + status = ap_get_brigade(f->next, bb, mode, block, readbytes); + /* pipe data through, just take care of trailers */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); b = next) { + next = APR_BUCKET_NEXT(b); + if (H2_BUCKET_IS_HEADERS(b)) { + h2_headers *headers = h2_bucket_headers_get(b); + ap_assert(headers); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "h2_c2(%s-%d): receiving trailers", + conn_ctx->id, conn_ctx->stream_id); + r->trailers_in = headers->headers; + if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) { + r->headers_in = apr_table_overlay(r->pool, r->headers_in, + r->trailers_in); + } + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + ap_remove_input_filter(f); + + if (headers->raw_bytes && h2_c_logio_add_bytes_in) { + h2_c_logio_add_bytes_in(f->c, headers->raw_bytes); + } + break; + } + } + return status; + } + + /* Things are more complicated. The standard HTTP input filter, which + * does a lot what we do not want to duplicate, also cares about chunked + * transfer encoding and trailers. + * We need to simulate chunked encoding for it to be happy. + */ + if ((status = read_and_chunk(f, conn_ctx, block)) != APR_SUCCESS) { + return status; + } + + if (mode == AP_MODE_EXHAUSTIVE) { + /* return all we have */ + APR_BRIGADE_CONCAT(bb, fctx->bbchunk); + } + else if (mode == AP_MODE_READBYTES) { + status = h2_brigade_concat_length(bb, fctx->bbchunk, readbytes); + } + else if (mode == AP_MODE_SPECULATIVE) { + status = h2_brigade_copy_length(bb, fctx->bbchunk, readbytes); + } + else if (mode == AP_MODE_GETLINE) { + /* we are reading a single LF line, e.g. the HTTP headers. + * this has the nasty side effect to split the bucket, even + * though it ends with CRLF and creates a 0 length bucket */ + status = apr_brigade_split_line(bb, fctx->bbchunk, block, HUGE_STRING_LEN); + if (APLOGctrace1(f->c)) { + char buffer[1024]; + apr_size_t len = sizeof(buffer)-1; + apr_brigade_flatten(bb, buffer, &len); + buffer[len] = 0; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "h2_c2(%s-%d): getline: %s", + conn_ctx->id, conn_ctx->stream_id, buffer); + } + } + else { + /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not + * to support it. Seems to work. */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, + APLOGNO(02942) + "h2_c2, unsupported READ mode %d", mode); + status = APR_ENOTIMPL; + } + + h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2, "returning input", bb); + return status; +} + +apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c); + request_rec *r = f->r; + apr_bucket *b, *e; + + if (conn_ctx && r) { + /* Detect the EOS/EOR bucket and forward any trailers that may have + * been set to our h2_headers. + */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) + && r->trailers_out && !apr_is_empty_table(r->trailers_out)) { + h2_headers *headers; + apr_table_t *trailers; + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049) + "h2_c2(%s-%d): sending trailers", + conn_ctx->id, conn_ctx->stream_id); + trailers = apr_table_clone(r->pool, r->trailers_out); + headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool); + e = h2_bucket_headers_create(bb->bucket_alloc, headers); + APR_BUCKET_INSERT_BEFORE(b, e); + apr_table_clear(r->trailers_out); + ap_remove_output_filter(f); + break; + } + } + } + + return ap_pass_brigade(f->next, bb); +} + +#endif /* else #if AP_HAS_RESPONSE_BUCKETS */ diff --git a/modules/http2/h2_c2_filter.h b/modules/http2/h2_c2_filter.h new file mode 100644 index 0000000..c6f50dd --- /dev/null +++ b/modules/http2/h2_c2_filter.h @@ -0,0 +1,68 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_c2_filter__ +#define __mod_h2__h2_c2_filter__ + +#include "h2.h" + +/** + * Input filter on secondary connections that insert the REQUEST bucket + * with the request to perform and then removes itself. + */ +apr_status_t h2_c2_filter_request_in(ap_filter_t *f, + apr_bucket_brigade *bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes); + +#if AP_HAS_RESPONSE_BUCKETS + +/** + * Output filter that inspects the request_rec->notes of the request + * itself and possible internal redirects to detect conditions that + * merit specific HTTP/2 response codes, such as 421. + */ +apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb); + +#else /* AP_HAS_RESPONSE_BUCKETS */ + +/** + * h2_from_h1 parses a HTTP/1.1 response into + * - response status + * - a list of header values + * - a series of bytes that represent the response body alone, without + * any meta data, such as inserted by chunked transfer encoding. + * + * All data is allocated from the stream memory pool. + * + * Again, see comments in h2_request: ideally we would take the headers + * and status from the httpd structures instead of parsing them here, but + * we need to have all handlers and filters involved in request/response + * processing, so this seems to be the way for now. + */ +struct h2_headers; +struct h2_response_parser; + +apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb); + +apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb); + +apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb); + +#endif /* else AP_HAS_RESPONSE_BUCKETS */ + +#endif /* defined(__mod_h2__h2_c2_filter__) */ diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c index 8766355..22653d4 100644 --- a/modules/http2/h2_config.c +++ b/modules/http2/h2_config.c @@ -30,11 +30,10 @@ #include #include "h2.h" -#include "h2_alt_svc.h" -#include "h2_ctx.h" -#include "h2_conn.h" +#include "h2_conn_ctx.h" +#include "h2_c1.h" #include "h2_config.h" -#include "h2_h2.h" +#include "h2_protocol.h" #include "h2_private.h" #define DEF_VAL (-1) @@ -42,17 +41,65 @@ #define H2_CONFIG_GET(a, b, n) \ (((a)->n == DEF_VAL)? (b) : (a))->n +#define H2_CONFIG_SET(a, n, v) \ + ((a)->n = v) + +#define CONFIG_CMD_SET(cmd,dir,var,val) \ + h2_config_seti(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val) + +#define CONFIG_CMD_SET64(cmd,dir,var,val) \ + h2_config_seti64(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val) + +/* Apache httpd module configuration for h2. */ +typedef struct h2_config { + const char *name; + int h2_max_streams; /* max concurrent # streams (http2) */ + int h2_window_size; /* stream window size (http2) */ + int min_workers; /* min # of worker threads/child */ + int max_workers; /* max # of worker threads/child */ + apr_interval_time_t idle_limit; /* max duration for idle workers */ + int stream_max_mem_size; /* max # bytes held in memory/stream */ + int h2_direct; /* if mod_h2 is active directly */ + int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */ + int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ + apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */ + int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */ + int h2_push; /* if HTTP/2 server push is enabled */ + struct apr_hash_t *priorities; /* map of content-type to h2_priority records */ + + int push_diary_size; /* # of entries in push diary */ + int copy_files; /* if files shall be copied vs setaside on output */ + apr_array_header_t *push_list; /* list of h2_push_res configurations */ + apr_table_t *early_headers; /* HTTP headers for a 103 response */ + int early_hints; /* support status code 103 */ + int padding_bits; + int padding_always; + int output_buffered; + apr_interval_time_t stream_timeout;/* beam timeout */ + int max_data_frame_len; /* max # bytes in a single h2 DATA frame */ + int proxy_requests; /* act as forward proxy */ + int h2_websockets; /* if mod_h2 negotiating WebSockets */ +} h2_config; + +typedef struct h2_dir_config { + const char *name; + int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ + int h2_push; /* if HTTP/2 server push is enabled */ + apr_array_header_t *push_list; /* list of h2_push_res configurations */ + apr_table_t *early_headers; /* HTTP headers for a 103 response */ + int early_hints; /* support status code 103 */ + apr_interval_time_t stream_timeout;/* beam timeout */ +} h2_dir_config; + + static h2_config defconf = { "default", 100, /* max_streams */ H2_INITIAL_WINDOW_SIZE, /* window_size */ -1, /* min workers */ -1, /* max workers */ - 10 * 60, /* max workers idle secs */ + apr_time_from_sec(10 * 60), /* workers idle limit */ 32 * 1024, /* stream max mem size */ - NULL, /* no alt-svcs */ - -1, /* alt-svc max age */ - 0, /* serialize headers */ -1, /* h2 direct mode */ 1, /* modern TLS only */ -1, /* HTTP/1 Upgrade support */ @@ -63,7 +110,25 @@ static h2_config defconf = { 256, /* push diary size */ 0, /* copy files across threads */ NULL, /* push list */ + NULL, /* early headers */ 0, /* early hints, http status 103 */ + 0, /* padding bits */ + 1, /* padding always */ + 1, /* stream output buffered */ + -1, /* beam timeout */ + 0, /* max DATA frame len, 0 == no extra limit */ + 0, /* forward proxy */ + 0, /* WebSockets negotiation, enabled */ +}; + +static h2_dir_config defdconf = { + "default", + -1, /* HTTP/1 Upgrade support */ + -1, /* HTTP/2 server push enabled */ + NULL, /* push list */ + NULL, /* early headers */ + -1, /* early hints, http status 103 */ + -1, /* beam timeout */ }; void h2_config_init(apr_pool_t *pool) @@ -71,22 +136,18 @@ void h2_config_init(apr_pool_t *pool) (void)pool; } -static void *h2_config_create(apr_pool_t *pool, - const char *prefix, const char *x) +void *h2_config_create_svr(apr_pool_t *pool, server_rec *s) { h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config)); - const char *s = x? x : "unknown"; - char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL); + char *name = apr_pstrcat(pool, "srv[", s->defn_name, "]", NULL); conf->name = name; conf->h2_max_streams = DEF_VAL; conf->h2_window_size = DEF_VAL; conf->min_workers = DEF_VAL; conf->max_workers = DEF_VAL; - conf->max_worker_idle_secs = DEF_VAL; + conf->idle_limit = DEF_VAL; conf->stream_max_mem_size = DEF_VAL; - conf->alt_svc_max_age = DEF_VAL; - conf->serialize_headers = DEF_VAL; conf->h2_direct = DEF_VAL; conf->modern_tls_only = DEF_VAL; conf->h2_upgrade = DEF_VAL; @@ -97,20 +158,18 @@ static void *h2_config_create(apr_pool_t *pool, conf->push_diary_size = DEF_VAL; conf->copy_files = DEF_VAL; conf->push_list = NULL; + conf->early_headers = NULL; conf->early_hints = DEF_VAL; + conf->padding_bits = DEF_VAL; + conf->padding_always = DEF_VAL; + conf->output_buffered = DEF_VAL; + conf->stream_timeout = DEF_VAL; + conf->max_data_frame_len = DEF_VAL; + conf->proxy_requests = DEF_VAL; + conf->h2_websockets = DEF_VAL; return conf; } -void *h2_config_create_svr(apr_pool_t *pool, server_rec *s) -{ - return h2_config_create(pool, "srv", s->defn_name); -} - -void *h2_config_create_dir(apr_pool_t *pool, char *x) -{ - return h2_config_create(pool, "dir", x); -} - static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) { h2_config *base = (h2_config *)basev; @@ -123,11 +182,8 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) n->h2_window_size = H2_CONFIG_GET(add, base, h2_window_size); n->min_workers = H2_CONFIG_GET(add, base, min_workers); n->max_workers = H2_CONFIG_GET(add, base, max_workers); - n->max_worker_idle_secs = H2_CONFIG_GET(add, base, max_worker_idle_secs); + n->idle_limit = H2_CONFIG_GET(add, base, idle_limit); n->stream_max_mem_size = H2_CONFIG_GET(add, base, stream_max_mem_size); - n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs; - n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age); - n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers); n->h2_direct = H2_CONFIG_GET(add, base, h2_direct); n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only); n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade); @@ -142,32 +198,75 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) } n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size); n->copy_files = H2_CONFIG_GET(add, base, copy_files); + n->output_buffered = H2_CONFIG_GET(add, base, output_buffered); if (add->push_list && base->push_list) { n->push_list = apr_array_append(pool, base->push_list, add->push_list); } else { n->push_list = add->push_list? add->push_list : base->push_list; } + if (add->early_headers && base->early_headers) { + n->early_headers = apr_table_overlay(pool, add->early_headers, base->early_headers); + } + else { + n->early_headers = add->early_headers? add->early_headers : base->early_headers; + } n->early_hints = H2_CONFIG_GET(add, base, early_hints); + n->padding_bits = H2_CONFIG_GET(add, base, padding_bits); + n->padding_always = H2_CONFIG_GET(add, base, padding_always); + n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout); + n->max_data_frame_len = H2_CONFIG_GET(add, base, max_data_frame_len); + n->proxy_requests = H2_CONFIG_GET(add, base, proxy_requests); + n->h2_websockets = H2_CONFIG_GET(add, base, h2_websockets); return n; } -void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv) +void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv) { return h2_config_merge(pool, basev, addv); } -void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv) +void *h2_config_create_dir(apr_pool_t *pool, char *x) { - return h2_config_merge(pool, basev, addv); + h2_dir_config *conf = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config)); + const char *s = x? x : "unknown"; + char *name = apr_pstrcat(pool, "dir[", s, "]", NULL); + + conf->name = name; + conf->h2_upgrade = DEF_VAL; + conf->h2_push = DEF_VAL; + conf->early_hints = DEF_VAL; + conf->stream_timeout = DEF_VAL; + return conf; } -int h2_config_geti(const h2_config *conf, h2_config_var_t var) +void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv) { - return (int)h2_config_geti64(conf, var); + h2_dir_config *base = (h2_dir_config *)basev; + h2_dir_config *add = (h2_dir_config *)addv; + h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config)); + + n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL); + n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade); + n->h2_push = H2_CONFIG_GET(add, base, h2_push); + if (add->push_list && base->push_list) { + n->push_list = apr_array_append(pool, base->push_list, add->push_list); + } + else { + n->push_list = add->push_list? add->push_list : base->push_list; + } + if (add->early_headers && base->early_headers) { + n->early_headers = apr_table_overlay(pool, add->early_headers, base->early_headers); + } + else { + n->early_headers = add->early_headers? add->early_headers : base->early_headers; + } + n->early_hints = H2_CONFIG_GET(add, base, early_hints); + n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout); + return n; } -apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) +static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t var) { switch(var) { case H2_CONF_MAX_STREAMS: @@ -178,14 +277,10 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) return H2_CONFIG_GET(conf, &defconf, min_workers); case H2_CONF_MAX_WORKERS: return H2_CONFIG_GET(conf, &defconf, max_workers); - case H2_CONF_MAX_WORKER_IDLE_SECS: - return H2_CONFIG_GET(conf, &defconf, max_worker_idle_secs); + case H2_CONF_MAX_WORKER_IDLE_LIMIT: + return H2_CONFIG_GET(conf, &defconf, idle_limit); case H2_CONF_STREAM_MAX_MEM: return H2_CONFIG_GET(conf, &defconf, stream_max_mem_size); - case H2_CONF_ALT_SVC_MAX_AGE: - return H2_CONFIG_GET(conf, &defconf, alt_svc_max_age); - case H2_CONF_SER_HEADERS: - return H2_CONFIG_GET(conf, &defconf, serialize_headers); case H2_CONF_MODERN_TLS_ONLY: return H2_CONFIG_GET(conf, &defconf, modern_tls_only); case H2_CONF_UPGRADE: @@ -204,12 +299,112 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) return H2_CONFIG_GET(conf, &defconf, copy_files); case H2_CONF_EARLY_HINTS: return H2_CONFIG_GET(conf, &defconf, early_hints); + case H2_CONF_PADDING_BITS: + return H2_CONFIG_GET(conf, &defconf, padding_bits); + case H2_CONF_PADDING_ALWAYS: + return H2_CONFIG_GET(conf, &defconf, padding_always); + case H2_CONF_OUTPUT_BUFFER: + return H2_CONFIG_GET(conf, &defconf, output_buffered); + case H2_CONF_STREAM_TIMEOUT: + return H2_CONFIG_GET(conf, &defconf, stream_timeout); + case H2_CONF_MAX_DATA_FRAME_LEN: + return H2_CONFIG_GET(conf, &defconf, max_data_frame_len); + case H2_CONF_PROXY_REQUESTS: + return H2_CONFIG_GET(conf, &defconf, proxy_requests); + case H2_CONF_WEBSOCKETS: + return H2_CONFIG_GET(conf, &defconf, h2_websockets); default: return DEF_VAL; } } -const h2_config *h2_config_sget(server_rec *s) +static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val) +{ + switch(var) { + case H2_CONF_MAX_STREAMS: + H2_CONFIG_SET(conf, h2_max_streams, val); + break; + case H2_CONF_WIN_SIZE: + H2_CONFIG_SET(conf, h2_window_size, val); + break; + case H2_CONF_MIN_WORKERS: + H2_CONFIG_SET(conf, min_workers, val); + break; + case H2_CONF_MAX_WORKERS: + H2_CONFIG_SET(conf, max_workers, val); + break; + case H2_CONF_STREAM_MAX_MEM: + H2_CONFIG_SET(conf, stream_max_mem_size, val); + break; + case H2_CONF_MODERN_TLS_ONLY: + H2_CONFIG_SET(conf, modern_tls_only, val); + break; + case H2_CONF_UPGRADE: + H2_CONFIG_SET(conf, h2_upgrade, val); + break; + case H2_CONF_DIRECT: + H2_CONFIG_SET(conf, h2_direct, val); + break; + case H2_CONF_TLS_WARMUP_SIZE: + H2_CONFIG_SET(conf, tls_warmup_size, val); + break; + case H2_CONF_TLS_COOLDOWN_SECS: + H2_CONFIG_SET(conf, tls_cooldown_secs, val); + break; + case H2_CONF_PUSH: + H2_CONFIG_SET(conf, h2_push, val); + break; + case H2_CONF_PUSH_DIARY_SIZE: + H2_CONFIG_SET(conf, push_diary_size, val); + break; + case H2_CONF_COPY_FILES: + H2_CONFIG_SET(conf, copy_files, val); + break; + case H2_CONF_EARLY_HINTS: + H2_CONFIG_SET(conf, early_hints, val); + break; + case H2_CONF_PADDING_BITS: + H2_CONFIG_SET(conf, padding_bits, val); + break; + case H2_CONF_PADDING_ALWAYS: + H2_CONFIG_SET(conf, padding_always, val); + break; + case H2_CONF_OUTPUT_BUFFER: + H2_CONFIG_SET(conf, output_buffered, val); + break; + case H2_CONF_MAX_DATA_FRAME_LEN: + H2_CONFIG_SET(conf, max_data_frame_len, val); + break; + case H2_CONF_PROXY_REQUESTS: + H2_CONFIG_SET(conf, proxy_requests, val); + break; + case H2_CONF_WEBSOCKETS: + H2_CONFIG_SET(conf, h2_websockets, val); + break; + default: + break; + } +} + +static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64_t val) +{ + switch(var) { + case H2_CONF_TLS_WARMUP_SIZE: + H2_CONFIG_SET(conf, tls_warmup_size, val); + break; + case H2_CONF_STREAM_TIMEOUT: + H2_CONFIG_SET(conf, stream_timeout, val); + break; + case H2_CONF_MAX_WORKER_IDLE_LIMIT: + H2_CONFIG_SET(conf, idle_limit, val); + break; + default: + h2_srv_config_seti(conf, var, (int)val); + break; + } +} + +static h2_config *h2_config_sget(server_rec *s) { h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config, &http2_module); @@ -217,177 +412,310 @@ const h2_config *h2_config_sget(server_rec *s) return cfg; } -const struct h2_priority *h2_config_get_priority(const h2_config *conf, - const char *content_type) +static const h2_dir_config *h2_config_rget(request_rec *r) +{ + h2_dir_config *cfg = (h2_dir_config *)ap_get_module_config(r->per_dir_config, + &http2_module); + ap_assert(cfg); + return cfg; +} + +static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var) +{ + switch(var) { + case H2_CONF_UPGRADE: + return H2_CONFIG_GET(conf, &defdconf, h2_upgrade); + case H2_CONF_PUSH: + return H2_CONFIG_GET(conf, &defdconf, h2_push); + case H2_CONF_EARLY_HINTS: + return H2_CONFIG_GET(conf, &defdconf, early_hints); + case H2_CONF_STREAM_TIMEOUT: + return H2_CONFIG_GET(conf, &defdconf, stream_timeout); + + default: + return DEF_VAL; + } +} + +static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, int val) +{ + int set_srv = !dconf; + if (dconf) { + switch(var) { + case H2_CONF_UPGRADE: + H2_CONFIG_SET(dconf, h2_upgrade, val); + break; + case H2_CONF_PUSH: + H2_CONFIG_SET(dconf, h2_push, val); + break; + case H2_CONF_EARLY_HINTS: + H2_CONFIG_SET(dconf, early_hints, val); + break; + default: + /* not handled in dir_conf */ + set_srv = 1; + break; + } + } + + if (set_srv) { + h2_srv_config_seti(conf, var, val); + } +} + +static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, apr_int64_t val) +{ + int set_srv = !dconf; + if (dconf) { + switch(var) { + case H2_CONF_STREAM_TIMEOUT: + H2_CONFIG_SET(dconf, stream_timeout, val); + break; + default: + /* not handled in dir_conf */ + set_srv = 1; + break; + } + } + + if (set_srv) { + h2_srv_config_seti64(conf, var, val); + } +} + +static const h2_config *h2_config_get(conn_rec *c) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + + if (conn_ctx && conn_ctx->server) { + return h2_config_sget(conn_ctx->server); + } + return h2_config_sget(c->base_server); +} + +int h2_config_cgeti(conn_rec *c, h2_config_var_t var) { + return (int)h2_srv_config_geti64(h2_config_get(c), var); +} + +apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var) +{ + return h2_srv_config_geti64(h2_config_get(c), var); +} + +int h2_config_sgeti(server_rec *s, h2_config_var_t var) +{ + return (int)h2_srv_config_geti64(h2_config_sget(s), var); +} + +apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var) +{ + return h2_srv_config_geti64(h2_config_sget(s), var); +} + +int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var) +{ + return (int)h2_config_geti64(r, s, var); +} + +apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var) +{ + apr_int64_t mode = r? (int)h2_dir_config_geti64(h2_config_rget(r), var) : DEF_VAL; + return (mode != DEF_VAL)? mode : h2_config_sgeti64(s, var); +} + +int h2_config_rgeti(request_rec *r, h2_config_var_t var) +{ + return h2_config_geti(r, r->server, var); +} + +apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var) +{ + return h2_config_geti64(r, r->server, var); +} + +apr_array_header_t *h2_config_push_list(request_rec *r) +{ + const h2_config *sconf; + const h2_dir_config *conf = h2_config_rget(r); + + if (conf && conf->push_list) { + return conf->push_list; + } + sconf = h2_config_sget(r->server); + return sconf? sconf->push_list : NULL; +} + +apr_table_t *h2_config_early_headers(request_rec *r) +{ + const h2_config *sconf; + const h2_dir_config *conf = h2_config_rget(r); + + if (conf && conf->early_headers) { + return conf->early_headers; + } + sconf = h2_config_sget(r->server); + return sconf? sconf->early_headers : NULL; +} + +const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type) +{ + const h2_config *conf = h2_config_get(c); if (content_type && conf->priorities) { - size_t len = strcspn(content_type, "; \t"); + apr_ssize_t len = (apr_ssize_t)strcspn(content_type, "; \t"); h2_priority *prio = apr_hash_get(conf->priorities, content_type, len); return prio? prio : apr_hash_get(conf->priorities, "*", 1); } return NULL; } -static const char *h2_conf_set_max_streams(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_max_streams(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->h2_max_streams = (int)apr_atoi64(value); - (void)arg; - if (cfg->h2_max_streams < 1) { + apr_int64_t ival = (int)apr_atoi64(value); + if (ival < 1) { return "value must be > 0"; } + CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_STREAMS, ival); return NULL; } -static const char *h2_conf_set_window_size(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_window_size(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->h2_window_size = (int)apr_atoi64(value); - (void)arg; - if (cfg->h2_window_size < 1024) { + int val = (int)apr_atoi64(value); + if (val < 1024) { return "value must be >= 1024"; } + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WIN_SIZE, val); return NULL; } -static const char *h2_conf_set_min_workers(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_min_workers(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->min_workers = (int)apr_atoi64(value); - (void)arg; - if (cfg->min_workers < 1) { + int val = (int)apr_atoi64(value); + if (val < 1) { return "value must be > 0"; } + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MIN_WORKERS, val); return NULL; } -static const char *h2_conf_set_max_workers(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_max_workers(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->max_workers = (int)apr_atoi64(value); - (void)arg; - if (cfg->max_workers < 1) { + int val = (int)apr_atoi64(value); + if (val < 1) { return "value must be > 0"; } + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKERS, val); return NULL; } -static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_max_worker_idle_limit(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->max_worker_idle_secs = (int)apr_atoi64(value); - (void)arg; - if (cfg->max_worker_idle_secs < 1) { - return "value must be > 0"; + apr_interval_time_t timeout; + apr_status_t rv = ap_timeout_parameter_parse(value, &timeout, "s"); + if (rv != APR_SUCCESS) { + return "Invalid idle limit value"; } + if (timeout <= 0) { + timeout = DEF_VAL; + } + CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_LIMIT, timeout); return NULL; } -static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - - - cfg->stream_max_mem_size = (int)apr_atoi64(value); - (void)arg; - if (cfg->stream_max_mem_size < 1024) { + int val = (int)apr_atoi64(value); + if (val < 1024) { return "value must be >= 1024"; } + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_STREAM_MAX_MEM, val); return NULL; } -static const char *h2_add_alt_svc(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_max_data_frame_len(cmd_parms *cmd, + void *dirconf, const char *value) { - if (value && *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool); - if (!as) { - return "unable to parse alt-svc specifier"; - } - if (!cfg->alt_svcs) { - cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*)); - } - APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as; + int val = (int)apr_atoi64(value); + if (val < 0) { + return "value must be 0 or larger"; } - (void)arg; - return NULL; -} - -static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms, - void *arg, const char *value) -{ - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->alt_svc_max_age = (int)apr_atoi64(value); - (void)arg; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_DATA_FRAME_LEN, val); return NULL; } -static const char *h2_conf_set_session_extra_files(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_session_extra_files(cmd_parms *cmd, + void *dirconf, const char *value) { /* deprecated, ignore */ - (void)arg; + (void)dirconf; (void)value; - ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */ + ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, /* NO LOGNO */ "H2SessionExtraFiles is obsolete and will be ignored"); return NULL; } static const char *h2_conf_set_serialize_headers(cmd_parms *parms, - void *arg, const char *value) + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); if (!strcasecmp(value, "On")) { - cfg->serialize_headers = 1; + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, parms->server, APLOGNO(10307) + "%s: this feature has been disabled and the directive " + "to enable it is ignored.", parms->cmd->name); + } + return NULL; +} + +static const char *h2_conf_set_direct(cmd_parms *cmd, + void *dirconf, const char *value) +{ + if (!strcasecmp(value, "On")) { + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 1); return NULL; } else if (!strcasecmp(value, "Off")) { - cfg->serialize_headers = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } -static const char *h2_conf_set_direct(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); if (!strcasecmp(value, "On")) { - cfg->h2_direct = 1; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 1); return NULL; } else if (!strcasecmp(value, "Off")) { - cfg->h2_direct = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } -static const char *h2_conf_set_push(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_websockets(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); if (!strcasecmp(value, "On")) { - cfg->h2_push = 1; +#if H2_USE_WEBSOCKETS + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WEBSOCKETS, 1); return NULL; +#elif !H2_USE_PIPES + return "HTTP/2 WebSockets are not supported on this platform"; +#else + return "HTTP/2 WebSockets are not supported in this server version"; +#endif } else if (!strcasecmp(value, "Off")) { - cfg->h2_push = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WEBSOCKETS, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } @@ -400,7 +728,8 @@ static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg, h2_dependency dependency; h2_priority *priority; int weight; - + + (void)_cfg; if (!*ctype) { return "1st argument must be a mime-type, like 'text/css' or '*'"; } @@ -419,7 +748,7 @@ static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg, else if (!strcasecmp("BEFORE", sdependency)) { dependency = H2_DEPENDANT_BEFORE; if (sweight) { - return "dependecy 'Before' does not allow a weight"; + return "dependency 'Before' does not allow a weight"; } } else if (!strcasecmp("INTERLEAVED", sdependency)) { @@ -443,104 +772,92 @@ static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg, if (!cfg->priorities) { cfg->priorities = apr_hash_make(cmd->pool); } - apr_hash_set(cfg->priorities, ctype, strlen(ctype), priority); + apr_hash_set(cfg->priorities, ctype, (apr_ssize_t)strlen(ctype), priority); return NULL; } -static const char *h2_conf_set_modern_tls_only(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_modern_tls_only(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); if (!strcasecmp(value, "On")) { - cfg->modern_tls_only = 1; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 1); return NULL; } else if (!strcasecmp(value, "Off")) { - cfg->modern_tls_only = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } -static const char *h2_conf_set_upgrade(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_upgrade(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); if (!strcasecmp(value, "On")) { - cfg->h2_upgrade = 1; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 1); return NULL; } else if (!strcasecmp(value, "Off")) { - cfg->h2_upgrade = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } -static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_tls_warmup_size(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->tls_warmup_size = apr_atoi64(value); - (void)arg; + apr_int64_t val = apr_atoi64(value); + CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_WARMUP_SIZE, val); return NULL; } -static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - cfg->tls_cooldown_secs = (int)apr_atoi64(value); - (void)arg; + apr_int64_t val = (int)apr_atoi64(value); + CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_COOLDOWN_SECS, val); return NULL; } -static const char *h2_conf_set_push_diary_size(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_push_diary_size(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); - (void)arg; - cfg->push_diary_size = (int)apr_atoi64(value); - if (cfg->push_diary_size < 0) { + int val = (int)apr_atoi64(value); + if (val < 0) { return "value must be >= 0"; } - if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) { + if (val > 0 && (val & (val-1))) { return "value must a power of 2"; } - if (cfg->push_diary_size > (1 << 15)) { + if (val > (1 << 15)) { return "value must <= 65536"; } + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH_DIARY_SIZE, val); return NULL; } -static const char *h2_conf_set_copy_files(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_copy_files(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)arg; if (!strcasecmp(value, "On")) { - cfg->copy_files = 1; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 1); return NULL; } else if (!strcasecmp(value, "Off")) { - cfg->copy_files = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } -static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push) +static void add_push(apr_array_header_t **plist, apr_pool_t *pool, h2_push_res *push) { h2_push_res *new; - if (!conf->push_list) { - conf->push_list = apr_array_make(pool, 10, sizeof(*push)); + if (!*plist) { + *plist = apr_array_make(pool, 10, sizeof(*push)); } - new = apr_array_push(conf->push_list); + new = apr_array_push(*plist); new->uri_ref = push->uri_ref; new->critical = push->critical; } @@ -549,8 +866,6 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf, const char *arg1, const char *arg2, const char *arg3) { - h2_config *dconf = (h2_config*)dirconf ; - h2_config *sconf = (h2_config*)h2_config_sget(cmd->server); h2_push_res push; const char *last = arg3; @@ -575,57 +890,136 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf, } } - /* server command? set both */ - if (cmd->path == NULL) { - add_push(cmd->pool, sconf, &push); - add_push(cmd->pool, dconf, &push); + if (cmd->path) { + add_push(&(((h2_dir_config*)dirconf)->push_list), cmd->pool, &push); } else { - add_push(cmd->pool, dconf, &push); + add_push(&(h2_config_sget(cmd->server)->push_list), cmd->pool, &push); } + return NULL; +} + +static const char *h2_conf_add_early_hint(cmd_parms *cmd, void *dirconf, + const char *name, const char *value) +{ + apr_table_t *hds, **phds; + + if(!name || !*name) + return "Early Hint header name must not be empty"; + if(!value) + return "Early Hint header value must not be empty"; + while (apr_isspace(*value)) + ++value; + if(!*value) + return "Early Hint header value must not be empty/only space"; + if (*ap_scan_http_field_content(value)) + return "Early Hint header value contains invalid characters"; + + if (cmd->path) { + phds = &((h2_dir_config*)dirconf)->early_headers; + } + else { + phds = &(h2_config_sget(cmd->server))->early_headers; + } + hds = *phds; + if (!hds) { + *phds = hds = apr_table_make(cmd->pool, 10); + } + apr_table_add(hds, name, value); return NULL; } -static const char *h2_conf_set_early_hints(cmd_parms *parms, - void *arg, const char *value) +static const char *h2_conf_set_early_hints(cmd_parms *cmd, + void *dirconf, const char *value) +{ + int val; + + if (!strcasecmp(value, "On")) val = 1; + else if (!strcasecmp(value, "Off")) val = 0; + else return "value must be On or Off"; + + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_EARLY_HINTS, val); + if (cmd->path) { + ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, + "H2EarlyHints = %d on path %s", val, cmd->path); + } + return NULL; +} + +static const char *h2_conf_set_padding(cmd_parms *cmd, void *dirconf, const char *value) +{ + int val; + + val = (int)apr_atoi64(value); + if (val < 0) { + return "number of bits must be >= 0"; + } + if (val > 8) { + return "number of bits must be <= 8"; + } + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PADDING_BITS, val); + return NULL; +} + +static const char *h2_conf_set_output_buffer(cmd_parms *cmd, + void *dirconf, const char *value) { - h2_config *cfg = (h2_config *)h2_config_sget(parms->server); if (!strcasecmp(value, "On")) { - cfg->early_hints = 1; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_OUTPUT_BUFFER, 1); return NULL; } else if (!strcasecmp(value, "Off")) { - cfg->early_hints = 0; + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_OUTPUT_BUFFER, 0); return NULL; } - - (void)arg; return "value must be On or Off"; } -void h2_get_num_workers(server_rec *s, int *minw, int *maxw) +static const char *h2_conf_set_stream_timeout(cmd_parms *cmd, + void *dirconf, const char *value) +{ + apr_status_t rv; + apr_interval_time_t timeout; + + rv = ap_timeout_parameter_parse(value, &timeout, "s"); + if (rv != APR_SUCCESS) { + return "Invalid timeout value"; + } + CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_STREAM_TIMEOUT, timeout); + return NULL; +} + +static const char *h2_conf_set_proxy_requests(cmd_parms *cmd, + void *dirconf, const char *value) +{ + if (!strcasecmp(value, "On")) { + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PROXY_REQUESTS, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { + CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PROXY_REQUESTS, 0); + return NULL; + } + return "value must be On or Off"; +} + +void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw, + apr_time_t *pidle_limit) { int threads_per_child = 0; - const h2_config *config = h2_config_sget(s); - *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS); - *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS); - ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child); + *pminw = h2_config_sgeti(s, H2_CONF_MIN_WORKERS); + *pmaxw = h2_config_sgeti(s, H2_CONF_MAX_WORKERS); - if (*minw <= 0) { - *minw = threads_per_child; - } - if (*maxw <= 0) { - /* As a default, this seems to work quite well under mpm_event. - * For people enabling http2 under mpm_prefork, start 4 threads unless - * configured otherwise. People get unhappy if their http2 requests are - * blocking each other. */ - *maxw = 3 * (*minw) / 2; - if (*maxw < 4) { - *maxw = 4; - } + ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child); + if (*pminw <= 0) { + *pminw = threads_per_child; + } + if (*pmaxw <= 0) { + *pmaxw = H2MAX(4, 3 * (*pminw) / 2); } + *pidle_limit = h2_config_sgeti64(s, H2_CONF_MAX_WORKER_IDLE_LIMIT); } #define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL) @@ -639,20 +1033,16 @@ const command_rec h2_cmds[] = { RSRC_CONF, "minimum number of worker threads per child"), AP_INIT_TAKE1("H2MaxWorkers", h2_conf_set_max_workers, NULL, RSRC_CONF, "maximum number of worker threads per child"), - AP_INIT_TAKE1("H2MaxWorkerIdleSeconds", h2_conf_set_max_worker_idle_secs, NULL, + AP_INIT_TAKE1("H2MaxWorkerIdleSeconds", h2_conf_set_max_worker_idle_limit, NULL, RSRC_CONF, "maximum number of idle seconds before a worker shuts down"), AP_INIT_TAKE1("H2StreamMaxMemSize", h2_conf_set_stream_max_mem_size, NULL, RSRC_CONF, "maximum number of bytes buffered in memory for a stream"), - AP_INIT_TAKE1("H2AltSvc", h2_add_alt_svc, NULL, - RSRC_CONF, "adds an Alt-Svc for this server"), - AP_INIT_TAKE1("H2AltSvcMaxAge", h2_conf_set_alt_svc_max_age, NULL, - RSRC_CONF, "set the maximum age (in seconds) that client can rely on alt-svc information"), AP_INIT_TAKE1("H2SerializeHeaders", h2_conf_set_serialize_headers, NULL, - RSRC_CONF, "on to enable header serialization for compatibility"), + RSRC_CONF, "disabled, this directive has no longer an effect."), AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL, RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"), AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL, - RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"), + RSRC_CONF|OR_AUTHCFG, "on to allow HTTP/1 Upgrades to h2/h2c"), AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL, RSRC_CONF, "on to enable direct HTTP/2 mode"), AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL, @@ -662,7 +1052,7 @@ const command_rec h2_cmds[] = { AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL, RSRC_CONF, "seconds of idle time on TLS before shrinking writes"), AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL, - RSRC_CONF, "off to disable HTTP/2 server push"), + RSRC_CONF|OR_AUTHCFG, "off to disable HTTP/2 server push"), AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL, RSRC_CONF, "define priority of PUSHed resources per content type"), AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL, @@ -670,33 +1060,24 @@ const command_rec h2_cmds[] = { AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL, OR_FILEINFO, "on to perform copy of file data"), AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL, - OR_FILEINFO, "add a resource to be pushed in this location/on this server."), + OR_FILEINFO|OR_AUTHCFG, "add a resource to be pushed in this location/on this server."), AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL, RSRC_CONF, "on to enable interim status 103 responses"), + AP_INIT_TAKE1("H2Padding", h2_conf_set_padding, NULL, + RSRC_CONF, "set payload padding"), + AP_INIT_TAKE1("H2OutputBuffering", h2_conf_set_output_buffer, NULL, + RSRC_CONF, "set stream output buffer on/off"), + AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL, + RSRC_CONF, "set stream timeout"), + AP_INIT_TAKE1("H2MaxDataFrameLen", h2_conf_set_max_data_frame_len, NULL, + RSRC_CONF, "maximum number of bytes in a single HTTP/2 DATA frame"), + AP_INIT_TAKE2("H2EarlyHint", h2_conf_add_early_hint, NULL, + OR_FILEINFO|OR_AUTHCFG, "add a a 'Link:' header for a 103 Early Hints response."), + AP_INIT_TAKE1("H2ProxyRequests", h2_conf_set_proxy_requests, NULL, + OR_FILEINFO, "Enables forward proxy requests via HTTP/2"), + AP_INIT_TAKE1("H2WebSockets", h2_conf_set_websockets, NULL, + RSRC_CONF, "off to disable WebSockets over HTTP/2"), AP_END_CMD }; -const h2_config *h2_config_rget(request_rec *r) -{ - h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config, - &http2_module); - return cfg? cfg : h2_config_sget(r->server); -} - -const h2_config *h2_config_get(conn_rec *c) -{ - h2_ctx *ctx = h2_ctx_get(c, 0); - - if (ctx) { - if (ctx->config) { - return ctx->config; - } - else if (ctx->server) { - ctx->config = h2_config_sget(ctx->server); - return ctx->config; - } - } - - return h2_config_sget(c->base_server); -} diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h index 17d75d6..15242db 100644 --- a/modules/http2/h2_config.h +++ b/modules/http2/h2_config.h @@ -28,11 +28,8 @@ typedef enum { H2_CONF_WIN_SIZE, H2_CONF_MIN_WORKERS, H2_CONF_MAX_WORKERS, - H2_CONF_MAX_WORKER_IDLE_SECS, + H2_CONF_MAX_WORKER_IDLE_LIMIT, H2_CONF_STREAM_MAX_MEM, - H2_CONF_ALT_SVCS, - H2_CONF_ALT_SVC_MAX_AGE, - H2_CONF_SER_HEADERS, H2_CONF_DIRECT, H2_CONF_MODERN_TLS_ONLY, H2_CONF_UPGRADE, @@ -42,6 +39,13 @@ typedef enum { H2_CONF_PUSH_DIARY_SIZE, H2_CONF_COPY_FILES, H2_CONF_EARLY_HINTS, + H2_CONF_PADDING_BITS, + H2_CONF_PADDING_ALWAYS, + H2_CONF_OUTPUT_BUFFER, + H2_CONF_STREAM_TIMEOUT, + H2_CONF_MAX_DATA_FRAME_LEN, + H2_CONF_PROXY_REQUESTS, + H2_CONF_WEBSOCKETS, } h2_config_var_t; struct apr_hash_t; @@ -53,33 +57,6 @@ typedef struct h2_push_res { int critical; } h2_push_res; -/* Apache httpd module configuration for h2. */ -typedef struct h2_config { - const char *name; - int h2_max_streams; /* max concurrent # streams (http2) */ - int h2_window_size; /* stream window size (http2) */ - int min_workers; /* min # of worker threads/child */ - int max_workers; /* max # of worker threads/child */ - int max_worker_idle_secs; /* max # of idle seconds for worker */ - int stream_max_mem_size; /* max # bytes held in memory/stream */ - apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */ - int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/ - int serialize_headers; /* Use serialized HTTP/1.1 headers for - processing, better compatibility */ - int h2_direct; /* if mod_h2 is active directly */ - int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */ - int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ - apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */ - int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */ - int h2_push; /* if HTTP/2 server push is enabled */ - struct apr_hash_t *priorities;/* map of content-type to h2_priority records */ - - int push_diary_size; /* # of entries in push diary */ - int copy_files; /* if files shall be copied vs setaside on output */ - apr_array_header_t *push_list;/* list of h2_push_res configurations */ - int early_hints; /* support status code 103 */ -} h2_config; - void *h2_config_create_dir(apr_pool_t *pool, char *x); void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv); @@ -88,19 +65,38 @@ void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv); extern const command_rec h2_cmds[]; -const h2_config *h2_config_get(conn_rec *c); -const h2_config *h2_config_sget(server_rec *s); -const h2_config *h2_config_rget(request_rec *r); +int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var); +apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var); + +/** + * Get the configured value for variable at the given connection. + */ +int h2_config_cgeti(conn_rec *c, h2_config_var_t var); +apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var); + +/** + * Get the configured value for variable at the given server. + */ +int h2_config_sgeti(server_rec *s, h2_config_var_t var); +apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var); + +/** + * Get the configured value for variable at the given request, + * if configured for the request location. + * Fallback to request server config otherwise. + */ +int h2_config_rgeti(request_rec *r, h2_config_var_t var); +apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var); -int h2_config_geti(const h2_config *conf, h2_config_var_t var); -apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var); +apr_array_header_t *h2_config_push_list(request_rec *r); +apr_table_t *h2_config_early_headers(request_rec *r); -void h2_get_num_workers(server_rec *s, int *minw, int *maxw); +void h2_get_workers_config(server_rec *s, int *pminw, int *pmaxw, + apr_time_t *pidle_limit); void h2_config_init(apr_pool_t *pool); -const struct h2_priority *h2_config_get_priority(const h2_config *conf, - const char *content_type); +const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type); #endif /* __mod_h2__h2_config_h__ */ diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c deleted file mode 100644 index 88da2ba..0000000 --- a/modules/http2/h2_conn.c +++ /dev/null @@ -1,370 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "h2_private.h" -#include "h2.h" -#include "h2_config.h" -#include "h2_ctx.h" -#include "h2_filter.h" -#include "h2_mplx.h" -#include "h2_session.h" -#include "h2_stream.h" -#include "h2_h2.h" -#include "h2_task.h" -#include "h2_workers.h" -#include "h2_conn.h" -#include "h2_version.h" - -static struct h2_workers *workers; - -static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN; -static module *mpm_module; -static int async_mpm; -static int mpm_supported = 1; -static apr_socket_t *dummy_socket; - -static void check_modules(int force) -{ - static int checked = 0; - int i; - - if (force || !checked) { - for (i = 0; ap_loaded_modules[i]; ++i) { - module *m = ap_loaded_modules[i]; - - if (!strcmp("event.c", m->name)) { - mpm_type = H2_MPM_EVENT; - mpm_module = m; - break; - } - else if (!strcmp("motorz.c", m->name)) { - mpm_type = H2_MPM_MOTORZ; - mpm_module = m; - break; - } - else if (!strcmp("mpm_netware.c", m->name)) { - mpm_type = H2_MPM_NETWARE; - mpm_module = m; - break; - } - else if (!strcmp("prefork.c", m->name)) { - mpm_type = H2_MPM_PREFORK; - mpm_module = m; - /* While http2 can work really well on prefork, it collides - * today's use case for prefork: runnning single-thread app engines - * like php. If we restrict h2_workers to 1 per process, php will - * work fine, but browser will be limited to 1 active request at a - * time. */ - mpm_supported = 0; - break; - } - else if (!strcmp("simple_api.c", m->name)) { - mpm_type = H2_MPM_SIMPLE; - mpm_module = m; - mpm_supported = 0; - break; - } - else if (!strcmp("mpm_winnt.c", m->name)) { - mpm_type = H2_MPM_WINNT; - mpm_module = m; - break; - } - else if (!strcmp("worker.c", m->name)) { - mpm_type = H2_MPM_WORKER; - mpm_module = m; - break; - } - } - checked = 1; - } -} - -apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) -{ - const h2_config *config = h2_config_sget(s); - apr_status_t status = APR_SUCCESS; - int minw, maxw; - int max_threads_per_child = 0; - int idle_secs = 0; - - check_modules(1); - ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child); - - status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm); - if (status != APR_SUCCESS) { - /* some MPMs do not implemnent this */ - async_mpm = 0; - status = APR_SUCCESS; - } - - h2_config_init(pool); - - h2_get_num_workers(s, &minw, &maxw); - - idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS); - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, - "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d", - minw, maxw, max_threads_per_child, idle_secs); - workers = h2_workers_create(s, pool, minw, maxw, idle_secs); - - ap_register_input_filter("H2_IN", h2_filter_core_input, - NULL, AP_FTYPE_CONNECTION); - - status = h2_mplx_child_init(pool, s); - - if (status == APR_SUCCESS) { - status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM, - APR_PROTO_TCP, pool); - } - - return status; -} - -h2_mpm_type_t h2_conn_mpm_type(void) -{ - check_modules(0); - return mpm_type; -} - -const char *h2_conn_mpm_name(void) -{ - check_modules(0); - return mpm_module? mpm_module->name : "unknown"; -} - -int h2_mpm_supported(void) -{ - check_modules(0); - return mpm_supported; -} - -static module *h2_conn_mpm_module(void) -{ - check_modules(0); - return mpm_module; -} - -apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r) -{ - h2_session *session; - apr_status_t status; - - if (!workers) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911) - "workers not initialized"); - return APR_EGENERAL; - } - - if (r) { - status = h2_session_rcreate(&session, r, ctx, workers); - } - else { - status = h2_session_create(&session, c, ctx, workers); - } - - if (status == APR_SUCCESS) { - h2_ctx_session_set(ctx, session); - } - return status; -} - -apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c) -{ - apr_status_t status; - int mpm_state = 0; - h2_session *session = h2_ctx_session_get(ctx); - - ap_assert(session); - do { - if (c->cs) { - c->cs->sense = CONN_SENSE_DEFAULT; - c->cs->state = CONN_STATE_HANDLER; - } - - status = h2_session_process(session, async_mpm); - - if (APR_STATUS_IS_EOF(status)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, - H2_SSSN_LOG(APLOGNO(03045), session, - "process, closing conn")); - c->keepalive = AP_CONN_CLOSE; - } - else { - c->keepalive = AP_CONN_KEEPALIVE; - } - - if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) { - break; - } - } while (!async_mpm - && c->keepalive == AP_CONN_KEEPALIVE - && mpm_state != AP_MPMQ_STOPPING); - - if (c->cs) { - switch (session->state) { - case H2_SESSION_ST_INIT: - case H2_SESSION_ST_IDLE: - case H2_SESSION_ST_BUSY: - case H2_SESSION_ST_WAIT: - c->cs->state = CONN_STATE_WRITE_COMPLETION; - break; - case H2_SESSION_ST_CLEANUP: - case H2_SESSION_ST_DONE: - default: - c->cs->state = CONN_STATE_LINGER; - break; - } - } - - return APR_SUCCESS; -} - -apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c) -{ - h2_session *session = h2_ctx_session_get(ctx); - if (session) { - apr_status_t status = h2_session_pre_close(session, async_mpm); - return (status == APR_SUCCESS)? DONE : status; - } - return DONE; -} - -conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) -{ - apr_allocator_t *allocator; - apr_status_t status; - apr_pool_t *pool; - conn_rec *c; - void *cfg; - module *mpm; - - ap_assert(master); - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master, - "h2_stream(%ld-%d): create slave", master->id, slave_id); - - /* We create a pool with its own allocator to be used for - * processing a request. This is the only way to have the processing - * independant of its parent pool in the sense that it can work in - * another thread. Also, the new allocator needs its own mutex to - * synchronize sub-pools. - */ - apr_allocator_create(&allocator); - apr_allocator_max_free_set(allocator, ap_max_mem_free); - status = apr_pool_create_ex(&pool, parent, NULL, allocator); - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, - APLOGNO(10004) "h2_session(%ld-%d): create slave pool", - master->id, slave_id); - return NULL; - } - apr_allocator_owner_set(allocator, pool); - apr_pool_tag(pool, "h2_slave_conn"); - - c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec)); - if (c == NULL) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, - APLOGNO(02913) "h2_session(%ld-%d): create slave", - master->id, slave_id); - apr_pool_destroy(pool); - return NULL; - } - - memcpy(c, master, sizeof(conn_rec)); - - c->master = master; - c->pool = pool; - c->conn_config = ap_create_conn_config(pool); - c->notes = apr_table_make(pool, 5); - c->input_filters = NULL; - c->output_filters = NULL; - c->bucket_alloc = apr_bucket_alloc_create(pool); - c->data_in_input_filters = 0; - c->data_in_output_filters = 0; - /* prevent mpm_event from making wrong assumptions about this connection, - * like e.g. using its socket for an async read check. */ - c->clogging_input_filters = 1; - c->log = NULL; - c->log_id = apr_psprintf(pool, "%ld-%d", - master->id, slave_id); - c->aborted = 0; - /* We cannot install the master connection socket on the slaves, as - * modules mess with timeouts/blocking of the socket, with - * unwanted side effects to the master connection processing. - * Fortunately, since we never use the slave socket, we can just install - * a single, process-wide dummy and everyone is happy. - */ - ap_set_module_config(c->conn_config, &core_module, dummy_socket); - /* TODO: these should be unique to this thread */ - c->sbh = master->sbh; - /* TODO: not all mpm modules have learned about slave connections yet. - * copy their config from master to slave. - */ - if ((mpm = h2_conn_mpm_module()) != NULL) { - cfg = ap_get_module_config(master->conn_config, mpm); - ap_set_module_config(c->conn_config, mpm, cfg); - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, - "h2_stream(%ld-%d): created slave", master->id, slave_id); - return c; -} - -void h2_slave_destroy(conn_rec *slave) -{ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave, - "h2_stream(%s): destroy slave", - apr_table_get(slave->notes, H2_TASK_ID_NOTE)); - slave->sbh = NULL; - apr_pool_destroy(slave->pool); -} - -apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd) -{ - if (slave->keepalives == 0) { - /* Simulate that we had already a request on this connection. Some - * hooks trigger special behaviour when keepalives is 0. - * (Not necessarily in pre_connection, but later. Set it here, so it - * is in place.) */ - slave->keepalives = 1; - /* We signal that this connection will be closed after the request. - * Which is true in that sense that we throw away all traffic data - * on this slave connection after each requests. Although we might - * reuse internal structures like memory pools. - * The wanted effect of this is that httpd does not try to clean up - * any dangling data on this connection when a request is done. Which - * is unneccessary on a h2 stream. - */ - slave->keepalive = AP_CONN_CLOSE; - return ap_run_pre_connection(slave, csd); - } - return APR_SUCCESS; -} - diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h deleted file mode 100644 index e45ff31..0000000 --- a/modules/http2/h2_conn.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_conn__ -#define __mod_h2__h2_conn__ - -struct h2_ctx; -struct h2_task; - -/** - * Setup the connection and our context for HTTP/2 processing - * - * @param ctx the http2 context to setup - * @param c the connection HTTP/2 is starting on - * @param r the upgrade request that still awaits an answer, optional - */ -apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r); - -/** - * Run the HTTP/2 connection in synchronous fashion. - * Return when the HTTP/2 session is done - * and the connection will close or a fatal error occurred. - * - * @param ctx the http2 context to run - * @return APR_SUCCESS when session is done. - */ -apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c); - -/** - * The connection is about to close. If we have not send a GOAWAY - * yet, this is the last chance. - */ -apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c); - -/* Initialize this child process for h2 connection work, - * to be called once during child init before multi processing - * starts. - */ -apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s); - - -typedef enum { - H2_MPM_UNKNOWN, - H2_MPM_WORKER, - H2_MPM_EVENT, - H2_MPM_PREFORK, - H2_MPM_MOTORZ, - H2_MPM_SIMPLE, - H2_MPM_NETWARE, - H2_MPM_WINNT, -} h2_mpm_type_t; - -/* Returns the type of MPM module detected */ -h2_mpm_type_t h2_conn_mpm_type(void); -const char *h2_conn_mpm_name(void); -int h2_mpm_supported(void); - -conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent); -void h2_slave_destroy(conn_rec *slave); - -apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd); -void h2_slave_run_connection(conn_rec *slave); - -#endif /* defined(__mod_h2__h2_conn__) */ diff --git a/modules/http2/h2_conn_ctx.c b/modules/http2/h2_conn_ctx.c new file mode 100644 index 0000000..b8a0fb3 --- /dev/null +++ b/modules/http2/h2_conn_ctx.c @@ -0,0 +1,123 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "h2_private.h" +#include "h2_session.h" +#include "h2_bucket_beam.h" +#include "h2_c2.h" +#include "h2_mplx.h" +#include "h2_stream.h" +#include "h2_util.h" +#include "h2_conn_ctx.h" + + +void h2_conn_ctx_detach(conn_rec *c) +{ + ap_set_module_config(c->conn_config, &http2_module, NULL); +} + +static h2_conn_ctx_t *ctx_create(conn_rec *c, const char *id) +{ + h2_conn_ctx_t *conn_ctx = apr_pcalloc(c->pool, sizeof(*conn_ctx)); + conn_ctx->id = id; + conn_ctx->server = c->base_server; + apr_atomic_set32(&conn_ctx->started, 1); + conn_ctx->started_at = apr_time_now(); + + ap_set_module_config(c->conn_config, &http2_module, conn_ctx); + return conn_ctx; +} + +h2_conn_ctx_t *h2_conn_ctx_create_for_c1(conn_rec *c1, server_rec *s, const char *protocol) +{ + h2_conn_ctx_t *ctx; + + ctx = ctx_create(c1, apr_psprintf(c1->pool, "%ld", c1->id)); + ctx->server = s; + ctx->protocol = apr_pstrdup(c1->pool, protocol); + + ctx->pfd.desc_type = APR_POLL_SOCKET; + ctx->pfd.desc.s = ap_get_conn_socket(c1); + ctx->pfd.reqevents = APR_POLLIN | APR_POLLERR | APR_POLLHUP; + ctx->pfd.client_data = ctx; + apr_socket_opt_set(ctx->pfd.desc.s, APR_SO_NONBLOCK, 1); + + return ctx; +} + +void h2_conn_ctx_assign_session(h2_conn_ctx_t *ctx, struct h2_session *session) +{ + ctx->session = session; + ctx->id = apr_psprintf(session->pool, "%d-%lu", session->child_num, (unsigned long)session->id); +} + +apr_status_t h2_conn_ctx_init_for_c2(h2_conn_ctx_t **pctx, conn_rec *c2, + struct h2_mplx *mplx, struct h2_stream *stream, + struct h2_c2_transit *transit) +{ + h2_conn_ctx_t *conn_ctx; + apr_status_t rv = APR_SUCCESS; + + ap_assert(c2->master); + conn_ctx = h2_conn_ctx_get(c2); + if (!conn_ctx) { + h2_conn_ctx_t *c1_ctx; + + c1_ctx = h2_conn_ctx_get(c2->master); + ap_assert(c1_ctx); + ap_assert(c1_ctx->session); + + conn_ctx = ctx_create(c2, c1_ctx->id); + conn_ctx->server = c2->master->base_server; + } + + conn_ctx->mplx = mplx; + conn_ctx->transit = transit; + conn_ctx->stream_id = stream->id; + apr_pool_create(&conn_ctx->req_pool, c2->pool); + apr_pool_tag(conn_ctx->req_pool, "H2_C2_REQ"); + conn_ctx->request = stream->request; + apr_atomic_set32(&conn_ctx->started, 1); + conn_ctx->started_at = apr_time_now(); + conn_ctx->done = 0; + conn_ctx->done_at = 0; + + *pctx = conn_ctx; + return rv; +} + +void h2_conn_ctx_set_timeout(h2_conn_ctx_t *conn_ctx, apr_interval_time_t timeout) +{ + if (conn_ctx->beam_out) { + h2_beam_timeout_set(conn_ctx->beam_out, timeout); + } + if (conn_ctx->beam_in) { + h2_beam_timeout_set(conn_ctx->beam_in, timeout); + } + if (conn_ctx->pipe_in[H2_PIPE_OUT]) { + apr_file_pipe_timeout_set(conn_ctx->pipe_in[H2_PIPE_OUT], timeout); + } +} diff --git a/modules/http2/h2_conn_ctx.h b/modules/http2/h2_conn_ctx.h new file mode 100644 index 0000000..3b44856 --- /dev/null +++ b/modules/http2/h2_conn_ctx.h @@ -0,0 +1,100 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_conn_ctx__ +#define __mod_h2__h2_conn_ctx__ + +#include "h2.h" + +struct h2_session; +struct h2_stream; +struct h2_mplx; +struct h2_bucket_beam; +struct h2_response_parser; +struct h2_c2_transit; + +#define H2_PIPE_OUT 0 +#define H2_PIPE_IN 1 + +/** + * The h2 module context associated with a connection. + * + * It keeps track of the different types of connections: + * - those from clients that use HTTP/2 protocol + * - those from clients that do not use HTTP/2 + * - those created by ourself to perform work on HTTP/2 streams + */ +struct h2_conn_ctx_t { + const char *id; /* c*: our identifier of this connection */ + server_rec *server; /* c*: httpd server selected. */ + const char *protocol; /* c1: the protocol negotiated */ + struct h2_session *session; /* c1: the h2 session established */ + struct h2_mplx *mplx; /* c2: the multiplexer */ + struct h2_c2_transit *transit; /* c2: transit pool and bucket_alloc */ + +#if !AP_HAS_RESPONSE_BUCKETS + int pre_conn_done; /* has pre_connection setup run? */ +#endif + int stream_id; /* c1: 0, c2: stream id processed */ + apr_pool_t *req_pool; /* c2: a c2 child pool for a request */ + const struct h2_request *request; /* c2: the request to process */ + struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */ + struct h2_bucket_beam *beam_in; /* c2: data in or NULL, borrowed from request stream */ + unsigned input_chunked:1; /* c2: if input needs HTTP/1.1 chunking applied */ + unsigned is_upgrade:1; /* c2: if requst is a HTTP Upgrade */ + + apr_file_t *pipe_in[2]; /* c2: input produced notification pipe */ + apr_pollfd_t pfd; /* c1: poll socket input, c2: NUL */ + + int has_final_response; /* final HTTP response passed on out */ + apr_status_t last_err; /* APR_SUCCES or last error encountered in filters */ + + apr_off_t bytes_sent; /* c2: bytes acutaly sent via c1 */ + /* atomic */ apr_uint32_t started; /* c2: processing was started */ + apr_time_t started_at; /* c2: when processing started */ + /* atomic */ apr_uint32_t done; /* c2: processing has finished */ + apr_time_t done_at; /* c2: when processing was done */ +}; +typedef struct h2_conn_ctx_t h2_conn_ctx_t; + +/** + * Get the h2 connection context. + * @param c the connection to look at + * @return h2 context of this connection + */ +#define h2_conn_ctx_get(c) \ + ((c)? (h2_conn_ctx_t*)ap_get_module_config((c)->conn_config, &http2_module) : NULL) + +/** + * Create the h2 connection context. + * @param c the connection to create it at + * @param s the server in use + * @param protocol the protocol selected + * @return created h2 context of this connection + */ +h2_conn_ctx_t *h2_conn_ctx_create_for_c1(conn_rec *c, server_rec *s, const char *protocol); + +void h2_conn_ctx_assign_session(h2_conn_ctx_t *ctx, struct h2_session *session); + +apr_status_t h2_conn_ctx_init_for_c2(h2_conn_ctx_t **pctx, conn_rec *c, + struct h2_mplx *mplx, struct h2_stream *stream, + struct h2_c2_transit *transit); + +void h2_conn_ctx_detach(conn_rec *c); + +void h2_conn_ctx_set_timeout(h2_conn_ctx_t *conn_ctx, apr_interval_time_t timeout); + +#endif /* defined(__mod_h2__h2_conn_ctx__) */ diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c deleted file mode 100644 index eb6ec92..0000000 --- a/modules/http2/h2_conn_io.c +++ /dev/null @@ -1,389 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "h2_private.h" -#include "h2_bucket_eos.h" -#include "h2_config.h" -#include "h2_conn_io.h" -#include "h2_h2.h" -#include "h2_session.h" -#include "h2_util.h" - -#define TLS_DATA_MAX (16*1024) - -/* Calculated like this: assuming MTU 1500 bytes - * 1500 - 40 (IP) - 20 (TCP) - 40 (TCP options) - * - TLS overhead (60-100) - * ~= 1300 bytes */ -#define WRITE_SIZE_INITIAL 1300 - -/* Calculated like this: max TLS record size 16*1024 - * - 40 (IP) - 20 (TCP) - 40 (TCP options) - * - TLS overhead (60-100) - * which seems to create less TCP packets overall - */ -#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100) - - -static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level, - const char *tag, apr_bucket_brigade *bb) -{ - char buffer[16 * 1024]; - const char *line = "(null)"; - apr_size_t bmax = sizeof(buffer)/sizeof(buffer[0]); - int off = 0; - apr_bucket *b; - - if (bb) { - memset(buffer, 0, bmax--); - for (b = APR_BRIGADE_FIRST(bb); - bmax && (b != APR_BRIGADE_SENTINEL(bb)); - b = APR_BUCKET_NEXT(b)) { - - if (APR_BUCKET_IS_METADATA(b)) { - if (APR_BUCKET_IS_EOS(b)) { - off += apr_snprintf(buffer+off, bmax-off, "eos "); - } - else if (APR_BUCKET_IS_FLUSH(b)) { - off += apr_snprintf(buffer+off, bmax-off, "flush "); - } - else if (AP_BUCKET_IS_EOR(b)) { - off += apr_snprintf(buffer+off, bmax-off, "eor "); - } - else if (H2_BUCKET_IS_H2EOS(b)) { - off += apr_snprintf(buffer+off, bmax-off, "h2eos "); - } - else { - off += apr_snprintf(buffer+off, bmax-off, "meta(unknown) "); - } - } - else { - const char *btype = "data"; - if (APR_BUCKET_IS_FILE(b)) { - btype = "file"; - } - else if (APR_BUCKET_IS_PIPE(b)) { - btype = "pipe"; - } - else if (APR_BUCKET_IS_SOCKET(b)) { - btype = "socket"; - } - else if (APR_BUCKET_IS_HEAP(b)) { - btype = "heap"; - } - else if (APR_BUCKET_IS_TRANSIENT(b)) { - btype = "transient"; - } - else if (APR_BUCKET_IS_IMMORTAL(b)) { - btype = "immortal"; - } -#if APR_HAS_MMAP - else if (APR_BUCKET_IS_MMAP(b)) { - btype = "mmap"; - } -#endif - else if (APR_BUCKET_IS_POOL(b)) { - btype = "pool"; - } - - off += apr_snprintf(buffer+off, bmax-off, "%s[%ld] ", - btype, - (long)(b->length == ((apr_size_t)-1)? - -1 : b->length)); - } - } - line = *buffer? buffer : "(empty)"; - } - /* Intentional no APLOGNO */ - ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%ld)-%s: %s", - c->id, tag, line); - -} - -apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, - const h2_config *cfg) -{ - io->c = c; - io->output = apr_brigade_create(c->pool, c->bucket_alloc); - io->is_tls = h2_h2_is_tls(c); - io->buffer_output = io->is_tls; - io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM); - - if (io->is_tls) { - /* This is what we start with, - * see https://issues.apache.org/jira/browse/TS-2503 - */ - io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE); - io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS) - * APR_USEC_PER_SEC); - io->write_size = (io->cooldown_usecs > 0? - WRITE_SIZE_INITIAL : WRITE_SIZE_MAX); - } - else { - io->warmup_size = 0; - io->cooldown_usecs = 0; - io->write_size = 0; - } - - if (APLOGctrace1(c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, - "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, " - "cd_secs=%f", io->c->id, io->buffer_output, - (long)io->warmup_size, - ((float)io->cooldown_usecs/APR_USEC_PER_SEC)); - } - - return APR_SUCCESS; -} - -static void append_scratch(h2_conn_io *io) -{ - if (io->scratch && io->slen > 0) { - apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen, - apr_bucket_free, - io->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(io->output, b); - io->scratch = NULL; - io->slen = io->ssize = 0; - } -} - -static apr_size_t assure_scratch_space(h2_conn_io *io) { - apr_size_t remain = io->ssize - io->slen; - if (io->scratch && remain == 0) { - append_scratch(io); - } - if (!io->scratch) { - /* we control the size and it is larger than what buckets usually - * allocate. */ - io->scratch = apr_bucket_alloc(io->write_size, io->c->bucket_alloc); - io->ssize = io->write_size; - io->slen = 0; - remain = io->ssize; - } - return remain; -} - -static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b) -{ - apr_status_t status; - const char *data; - apr_size_t len; - - if (!b->length) { - return APR_SUCCESS; - } - - ap_assert(b->length <= (io->ssize - io->slen)); - if (APR_BUCKET_IS_FILE(b)) { - apr_bucket_file *f = (apr_bucket_file *)b->data; - apr_file_t *fd = f->fd; - apr_off_t offset = b->start; - apr_size_t len = b->length; - - /* file buckets will either mmap (which we do not want) or - * read 8000 byte chunks and split themself. However, we do - * know *exactly* how many bytes we need where. - */ - status = apr_file_seek(fd, APR_SET, &offset); - if (status != APR_SUCCESS) { - return status; - } - status = apr_file_read(fd, io->scratch + io->slen, &len); - if (status != APR_SUCCESS && status != APR_EOF) { - return status; - } - io->slen += len; - } - else { - status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); - if (status == APR_SUCCESS) { - memcpy(io->scratch+io->slen, data, len); - io->slen += len; - } - } - return status; -} - -static void check_write_size(h2_conn_io *io) -{ - if (io->write_size > WRITE_SIZE_INITIAL - && (io->cooldown_usecs > 0) - && (apr_time_now() - io->last_write) >= io->cooldown_usecs) { - /* long time not written, reset write size */ - io->write_size = WRITE_SIZE_INITIAL; - io->bytes_written = 0; - } - else if (io->write_size < WRITE_SIZE_MAX - && io->bytes_written >= io->warmup_size) { - /* connection is hot, use max size */ - io->write_size = WRITE_SIZE_MAX; - } -} - -static apr_status_t pass_output(h2_conn_io *io, int flush) -{ - conn_rec *c = io->c; - apr_bucket_brigade *bb = io->output; - apr_bucket *b; - apr_off_t bblen; - apr_status_t status; - - append_scratch(io); - if (flush && !io->is_flushed) { - b = apr_bucket_flush_create(c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); - } - - if (APR_BRIGADE_EMPTY(bb)) { - return APR_SUCCESS; - } - - ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL); - apr_brigade_length(bb, 0, &bblen); - h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb); - - status = ap_pass_brigade(c->output_filters, bb); - if (status == APR_SUCCESS) { - io->bytes_written += (apr_size_t)bblen; - io->last_write = apr_time_now(); - if (flush) { - io->is_flushed = 1; - } - } - apr_brigade_cleanup(bb); - - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044) - "h2_conn_io(%ld): pass_out brigade %ld bytes", - c->id, (long)bblen); - } - return status; -} - -int h2_conn_io_needs_flush(h2_conn_io *io) -{ - if (!io->is_flushed) { - apr_off_t len = h2_brigade_mem_size(io->output); - if (len > io->flush_threshold) { - return 1; - } - /* if we do not exceed flush length due to memory limits, - * we want at least flush when we have that amount of data. */ - apr_brigade_length(io->output, 0, &len); - return len > (4 * io->flush_threshold); - } - return 0; -} - -apr_status_t h2_conn_io_flush(h2_conn_io *io) -{ - apr_status_t status; - status = pass_output(io, 1); - check_write_size(io); - return status; -} - -apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length) -{ - apr_status_t status = APR_SUCCESS; - apr_size_t remain; - - if (length > 0) { - io->is_flushed = 0; - } - - if (io->buffer_output) { - while (length > 0) { - remain = assure_scratch_space(io); - if (remain >= length) { - memcpy(io->scratch + io->slen, data, length); - io->slen += length; - length = 0; - } - else { - memcpy(io->scratch + io->slen, data, remain); - io->slen += remain; - data += remain; - length -= remain; - } - } - } - else { - status = apr_brigade_write(io->output, NULL, NULL, data, length); - } - return status; -} - -apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb) -{ - apr_bucket *b; - apr_status_t status = APR_SUCCESS; - - if (!APR_BRIGADE_EMPTY(bb)) { - io->is_flushed = 0; - } - - while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) { - b = APR_BRIGADE_FIRST(bb); - - if (APR_BUCKET_IS_METADATA(b)) { - /* need to finish any open scratch bucket, as meta data - * needs to be forward "in order". */ - append_scratch(io); - APR_BUCKET_REMOVE(b); - APR_BRIGADE_INSERT_TAIL(io->output, b); - } - else if (io->buffer_output) { - apr_size_t remain = assure_scratch_space(io); - if (b->length > remain) { - apr_bucket_split(b, remain); - if (io->slen == 0) { - /* complete write_size bucket, append unchanged */ - APR_BUCKET_REMOVE(b); - APR_BRIGADE_INSERT_TAIL(io->output, b); - continue; - } - } - else { - /* bucket fits in remain, copy to scratch */ - status = read_to_scratch(io, b); - apr_bucket_delete(b); - continue; - } - } - else { - /* no buffering, forward buckets setaside on flush */ - if (APR_BUCKET_IS_TRANSIENT(b)) { - apr_bucket_setaside(b, io->c->pool); - } - APR_BUCKET_REMOVE(b); - APR_BRIGADE_INSERT_TAIL(io->output, b); - } - } - return status; -} - diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h deleted file mode 100644 index 2c3be1c..0000000 --- a/modules/http2/h2_conn_io.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_conn_io__ -#define __mod_h2__h2_conn_io__ - -struct h2_config; -struct h2_session; - -/* h2_io is the basic handler of a httpd connection. It keeps two brigades, - * one for input, one for output and works with the installed connection - * filters. - * The read is done via a callback function, so that input can be processed - * directly without copying. - */ -typedef struct { - conn_rec *c; - apr_bucket_brigade *output; - - int is_tls; - apr_time_t cooldown_usecs; - apr_int64_t warmup_size; - - apr_size_t write_size; - apr_time_t last_write; - apr_int64_t bytes_read; - apr_int64_t bytes_written; - - int buffer_output; - apr_size_t flush_threshold; - unsigned int is_flushed : 1; - - char *scratch; - apr_size_t ssize; - apr_size_t slen; -} h2_conn_io; - -apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, - const struct h2_config *cfg); - -/** - * Append data to the buffered output. - * @param buf the data to append - * @param length the length of the data to append - */ -apr_status_t h2_conn_io_write(h2_conn_io *io, - const char *buf, - size_t length); - -apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb); - -/** - * Pass any buffered data on to the connection output filters. - * @param io the connection io - * @param flush if a flush bucket should be appended to any output - */ -apr_status_t h2_conn_io_flush(h2_conn_io *io); - -/** - * Check if the buffered amount of data needs flushing. - */ -int h2_conn_io_needs_flush(h2_conn_io *io); - -#endif /* defined(__mod_h2__h2_conn_io__) */ diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c deleted file mode 100644 index d5ccc24..0000000 --- a/modules/http2/h2_ctx.c +++ /dev/null @@ -1,121 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include - -#include "h2_private.h" -#include "h2_session.h" -#include "h2_task.h" -#include "h2_ctx.h" - -static h2_ctx *h2_ctx_create(const conn_rec *c) -{ - h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx)); - ap_assert(ctx); - ap_set_module_config(c->conn_config, &http2_module, ctx); - h2_ctx_server_set(ctx, c->base_server); - return ctx; -} - -void h2_ctx_clear(const conn_rec *c) -{ - ap_assert(c); - ap_set_module_config(c->conn_config, &http2_module, NULL); -} - -h2_ctx *h2_ctx_create_for(const conn_rec *c, h2_task *task) -{ - h2_ctx *ctx = h2_ctx_create(c); - if (ctx) { - ctx->task = task; - } - return ctx; -} - -h2_ctx *h2_ctx_get(const conn_rec *c, int create) -{ - h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module); - if (ctx == NULL && create) { - ctx = h2_ctx_create(c); - } - return ctx; -} - -h2_ctx *h2_ctx_rget(const request_rec *r) -{ - return h2_ctx_get(r->connection, 0); -} - -const char *h2_ctx_protocol_get(const conn_rec *c) -{ - h2_ctx *ctx; - if (c->master) { - c = c->master; - } - ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module); - return ctx? ctx->protocol : NULL; -} - -h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto) -{ - ctx->protocol = proto; - return ctx; -} - -h2_session *h2_ctx_session_get(h2_ctx *ctx) -{ - return ctx? ctx->session : NULL; -} - -void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session) -{ - ctx->session = session; -} - -server_rec *h2_ctx_server_get(h2_ctx *ctx) -{ - return ctx? ctx->server : NULL; -} - -h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s) -{ - ctx->server = s; - return ctx; -} - -int h2_ctx_is_task(h2_ctx *ctx) -{ - return ctx && ctx->task; -} - -h2_task *h2_ctx_get_task(h2_ctx *ctx) -{ - return ctx? ctx->task : NULL; -} - -h2_task *h2_ctx_cget_task(conn_rec *c) -{ - return h2_ctx_get_task(h2_ctx_get(c, 0)); -} - -h2_task *h2_ctx_rget_task(request_rec *r) -{ - return h2_ctx_get_task(h2_ctx_rget(r)); -} diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h deleted file mode 100644 index cb111c9..0000000 --- a/modules/http2/h2_ctx.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_ctx__ -#define __mod_h2__h2_ctx__ - -struct h2_session; -struct h2_task; -struct h2_config; - -/** - * The h2 module context associated with a connection. - * - * It keeps track of the different types of connections: - * - those from clients that use HTTP/2 protocol - * - those from clients that do not use HTTP/2 - * - those created by ourself to perform work on HTTP/2 streams - */ -typedef struct h2_ctx { - const char *protocol; /* the protocol negotiated */ - struct h2_session *session; /* the session established */ - struct h2_task *task; /* the h2_task executing or NULL */ - const char *hostname; /* hostname negotiated via SNI, optional */ - server_rec *server; /* httpd server config selected. */ - const struct h2_config *config; /* effective config in this context */ -} h2_ctx; - -/** - * Get (or create) a h2 context record for this connection. - * @param c the connection to look at - * @param create != 0 iff missing context shall be created - * @return h2 context of this connection - */ -h2_ctx *h2_ctx_get(const conn_rec *c, int create); -void h2_ctx_clear(const conn_rec *c); - -h2_ctx *h2_ctx_rget(const request_rec *r); -h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task); - - -/* Set the h2 protocol established on this connection context or - * NULL when other protocols are in place. - */ -h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto); - -/* Set the server_rec relevant for this context. - */ -h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s); -server_rec *h2_ctx_server_get(h2_ctx *ctx); - -struct h2_session *h2_ctx_session_get(h2_ctx *ctx); -void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session); - -/** - * Get the h2 protocol negotiated for this connection, or NULL. - */ -const char *h2_ctx_protocol_get(const conn_rec *c); - -int h2_ctx_is_task(h2_ctx *ctx); - -struct h2_task *h2_ctx_get_task(h2_ctx *ctx); -struct h2_task *h2_ctx_cget_task(conn_rec *c); -struct h2_task *h2_ctx_rget_task(request_rec *r); - -#endif /* defined(__mod_h2__h2_ctx__) */ diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c deleted file mode 100644 index 8b254b1..0000000 --- a/modules/http2/h2_filter.c +++ /dev/null @@ -1,568 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "h2_private.h" -#include "h2.h" -#include "h2_config.h" -#include "h2_conn_io.h" -#include "h2_ctx.h" -#include "h2_mplx.h" -#include "h2_push.h" -#include "h2_task.h" -#include "h2_stream.h" -#include "h2_request.h" -#include "h2_headers.h" -#include "h2_stream.h" -#include "h2_session.h" -#include "h2_util.h" -#include "h2_version.h" - -#include "h2_filter.h" - -#define UNSET -1 -#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) - -static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin, - apr_bucket *b, apr_read_type_e block) -{ - h2_session *session = cin->session; - apr_status_t status = APR_SUCCESS; - apr_size_t len; - const char *data; - ssize_t n; - - status = apr_bucket_read(b, &data, &len, block); - - while (status == APR_SUCCESS && len > 0) { - n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len); - - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"), - (long)len, (long)n); - if (n < 0) { - if (nghttp2_is_fatal((int)n)) { - h2_session_event(session, H2_SESSION_EV_PROTO_ERROR, - (int)n, nghttp2_strerror((int)n)); - status = APR_EGENERAL; - } - } - else { - session->io.bytes_read += n; - if (len <= n) { - break; - } - len -= n; - data += n; - } - } - - return status; -} - -static apr_status_t recv_RAW_brigade(conn_rec *c, h2_filter_cin *cin, - apr_bucket_brigade *bb, - apr_read_type_e block) -{ - apr_status_t status = APR_SUCCESS; - apr_bucket* b; - int consumed = 0; - - h2_util_bb_log(c, c->id, APLOG_TRACE2, "RAW_in", bb); - while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) { - b = APR_BRIGADE_FIRST(bb); - - if (APR_BUCKET_IS_METADATA(b)) { - /* nop */ - } - else { - status = recv_RAW_DATA(c, cin, b, block); - } - consumed = 1; - apr_bucket_delete(b); - } - - if (!consumed && status == APR_SUCCESS && block == APR_NONBLOCK_READ) { - return APR_EAGAIN; - } - return status; -} - -h2_filter_cin *h2_filter_cin_create(h2_session *session) -{ - h2_filter_cin *cin; - - cin = apr_pcalloc(session->pool, sizeof(*cin)); - if (!cin) { - return NULL; - } - cin->session = session; - return cin; -} - -void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout) -{ - cin->timeout = timeout; -} - -apr_status_t h2_filter_core_input(ap_filter_t* f, - apr_bucket_brigade* brigade, - ap_input_mode_t mode, - apr_read_type_e block, - apr_off_t readbytes) -{ - h2_filter_cin *cin = f->ctx; - apr_status_t status = APR_SUCCESS; - apr_interval_time_t saved_timeout = UNSET; - const int trace1 = APLOGctrace1(f->c); - - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_session(%ld): read, %s, mode=%d, readbytes=%ld", - (long)f->c->id, (block == APR_BLOCK_READ)? - "BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes); - } - - if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) { - return ap_get_brigade(f->next, brigade, mode, block, readbytes); - } - - if (mode != AP_MODE_READBYTES) { - return (block == APR_BLOCK_READ)? APR_SUCCESS : APR_EAGAIN; - } - - if (!cin->bb) { - cin->bb = apr_brigade_create(cin->session->pool, f->c->bucket_alloc); - } - - if (!cin->socket) { - cin->socket = ap_get_conn_socket(f->c); - } - - if (APR_BRIGADE_EMPTY(cin->bb)) { - /* We only do a blocking read when we have no streams to process. So, - * in httpd scoreboard lingo, we are in a KEEPALIVE connection state. - */ - if (block == APR_BLOCK_READ) { - if (cin->timeout > 0) { - apr_socket_timeout_get(cin->socket, &saved_timeout); - apr_socket_timeout_set(cin->socket, cin->timeout); - } - } - status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES, - block, readbytes); - if (saved_timeout != UNSET) { - apr_socket_timeout_set(cin->socket, saved_timeout); - } - } - - switch (status) { - case APR_SUCCESS: - status = recv_RAW_brigade(f->c, cin, cin->bb, block); - break; - case APR_EOF: - case APR_EAGAIN: - case APR_TIMEUP: - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "h2_session(%ld): read", f->c->id); - } - break; - default: - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046) - "h2_session(%ld): error reading", f->c->id); - break; - } - return status; -} - -/******************************************************************************* - * http2 connection status handler + stream out source - ******************************************************************************/ - -typedef struct { - apr_bucket_refcount refcount; - h2_bucket_event_cb *cb; - void *ctx; -} h2_bucket_observer; - -static apr_status_t bucket_read(apr_bucket *b, const char **str, - apr_size_t *len, apr_read_type_e block) -{ - (void)b; - (void)block; - *str = NULL; - *len = 0; - return APR_SUCCESS; -} - -static void bucket_destroy(void *data) -{ - h2_bucket_observer *h = data; - if (apr_bucket_shared_destroy(h)) { - if (h->cb) { - h->cb(h->ctx, H2_BUCKET_EV_BEFORE_DESTROY, NULL); - } - apr_bucket_free(h); - } -} - -apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb, - void *ctx) -{ - h2_bucket_observer *br; - - br = apr_bucket_alloc(sizeof(*br), b->list); - br->cb = cb; - br->ctx = ctx; - - b = apr_bucket_shared_make(b, br, 0, 0); - b->type = &h2_bucket_type_observer; - return b; -} - -apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list, - h2_bucket_event_cb *cb, void *ctx) -{ - apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); - - APR_BUCKET_INIT(b); - b->free = apr_bucket_free; - b->list = list; - b = h2_bucket_observer_make(b, cb, ctx); - return b; -} - -apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event) -{ - if (H2_BUCKET_IS_OBSERVER(b)) { - h2_bucket_observer *l = (h2_bucket_observer *)b->data; - return l->cb(l->ctx, event, b); - } - return APR_EINVAL; -} - -const apr_bucket_type_t h2_bucket_type_observer = { - "H2OBS", 5, APR_BUCKET_METADATA, - bucket_destroy, - bucket_read, - apr_bucket_setaside_noop, - apr_bucket_split_notimpl, - apr_bucket_shared_copy -}; - -apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam, - apr_bucket_brigade *dest, - const apr_bucket *src) -{ - if (H2_BUCKET_IS_OBSERVER(src)) { - h2_bucket_observer *l = (h2_bucket_observer *)src->data; - apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc, - l->cb, l->ctx); - APR_BRIGADE_INSERT_TAIL(dest, b); - l->cb = NULL; - l->ctx = NULL; - h2_bucket_observer_fire(b, H2_BUCKET_EV_BEFORE_MASTER_SEND); - return b; - } - return NULL; -} - -static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...) - __attribute__((format(printf,2,3))); -static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...) -{ - va_list args; - apr_status_t rv; - - va_start(args, fmt); - rv = apr_brigade_vprintf(bb, NULL, NULL, fmt, args); - va_end(args); - - return rv; -} - -static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last) -{ - h2_mplx *m = s->mplx; - - bbout(bb, " \"settings\": {\n"); - bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams); - bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024); - bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", - h2_config_geti(s->config, H2_CONF_WIN_SIZE)); - bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s)); - bbout(bb, " }%s\n", last? "" : ","); -} - -static void add_peer_settings(apr_bucket_brigade *bb, h2_session *s, int last) -{ - bbout(bb, " \"peerSettings\": {\n"); - bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", - nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS)); - bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", - nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_FRAME_SIZE)); - bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", - nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE)); - bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d,\n", - nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_ENABLE_PUSH)); - bbout(bb, " \"SETTINGS_HEADER_TABLE_SIZE\": %d,\n", - nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_HEADER_TABLE_SIZE)); - bbout(bb, " \"SETTINGS_MAX_HEADER_LIST_SIZE\": %d\n", - nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE)); - bbout(bb, " }%s\n", last? "" : ","); -} - -typedef struct { - apr_bucket_brigade *bb; - h2_session *s; - int idx; -} stream_ctx_t; - -static int add_stream(h2_stream *stream, void *ctx) -{ - stream_ctx_t *x = ctx; - int32_t flowIn, flowOut; - - flowIn = nghttp2_session_get_stream_effective_local_window_size(x->s->ngh2, stream->id); - flowOut = nghttp2_session_get_stream_remote_window_size(x->s->ngh2, stream->id); - bbout(x->bb, "%s\n \"%d\": {\n", (x->idx? "," : ""), stream->id); - bbout(x->bb, " \"state\": \"%s\",\n", h2_stream_state_str(stream)); - bbout(x->bb, " \"created\": %f,\n", ((double)stream->created)/APR_USEC_PER_SEC); - bbout(x->bb, " \"flowIn\": %d,\n", flowIn); - bbout(x->bb, " \"flowOut\": %d,\n", flowOut); - bbout(x->bb, " \"dataIn\": %"APR_OFF_T_FMT",\n", stream->in_data_octets); - bbout(x->bb, " \"dataOut\": %"APR_OFF_T_FMT"\n", stream->out_data_octets); - bbout(x->bb, " }"); - - ++x->idx; - return 1; -} - -static void add_streams(apr_bucket_brigade *bb, h2_session *s, int last) -{ - stream_ctx_t x; - - x.bb = bb; - x.s = s; - x.idx = 0; - bbout(bb, " \"streams\": {"); - h2_mplx_stream_do(s->mplx, add_stream, &x); - bbout(bb, "\n }%s\n", last? "" : ","); -} - -static void add_push(apr_bucket_brigade *bb, h2_session *s, - h2_stream *stream, int last) -{ - h2_push_diary *diary; - apr_status_t status; - - bbout(bb, " \"push\": {\n"); - diary = s->push_diary; - if (diary) { - const char *data; - const char *base64_digest; - apr_size_t len; - - status = h2_push_diary_digest_get(diary, bb->p, 256, - stream->request->authority, - &data, &len); - if (status == APR_SUCCESS) { - base64_digest = h2_util_base64url_encode(data, len, bb->p); - bbout(bb, " \"cacheDigest\": \"%s\",\n", base64_digest); - } - } - bbout(bb, " \"promises\": %d,\n", s->pushes_promised); - bbout(bb, " \"submits\": %d,\n", s->pushes_submitted); - bbout(bb, " \"resets\": %d\n", s->pushes_reset); - bbout(bb, " }%s\n", last? "" : ","); -} - -static void add_in(apr_bucket_brigade *bb, h2_session *s, int last) -{ - bbout(bb, " \"in\": {\n"); - bbout(bb, " \"requests\": %d,\n", s->remote.emitted_count); - bbout(bb, " \"resets\": %d, \n", s->streams_reset); - bbout(bb, " \"frames\": %ld,\n", (long)s->frames_received); - bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_read); - bbout(bb, " }%s\n", last? "" : ","); -} - -static void add_out(apr_bucket_brigade *bb, h2_session *s, int last) -{ - bbout(bb, " \"out\": {\n"); - bbout(bb, " \"responses\": %d,\n", s->responses_submitted); - bbout(bb, " \"frames\": %ld,\n", (long)s->frames_sent); - bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_written); - bbout(bb, " }%s\n", last? "" : ","); -} - -static void add_stats(apr_bucket_brigade *bb, h2_session *s, - h2_stream *stream, int last) -{ - bbout(bb, " \"stats\": {\n"); - add_in(bb, s, 0); - add_out(bb, s, 0); - add_push(bb, s, stream, 1); - bbout(bb, " }%s\n", last? "" : ","); -} - -static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b) -{ - conn_rec *c = task->c->master; - h2_ctx *h2ctx = h2_ctx_get(c, 0); - h2_session *session; - h2_stream *stream; - apr_bucket_brigade *bb; - apr_bucket *e; - int32_t connFlowIn, connFlowOut; - - - if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) { - return APR_SUCCESS; - } - - stream = h2_session_stream_get(session, task->stream_id); - if (!stream) { - /* stream already done */ - return APR_SUCCESS; - } - - bb = apr_brigade_create(stream->pool, c->bucket_alloc); - - connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2); - connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2); - - bbout(bb, "{\n"); - bbout(bb, " \"version\": \"draft-01\",\n"); - add_settings(bb, session, 0); - add_peer_settings(bb, session, 0); - bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn); - bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut); - bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown); - - add_streams(bb, session, 0); - - add_stats(bb, session, stream, 1); - bbout(bb, "}\n"); - - while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) { - APR_BUCKET_REMOVE(e); - APR_BUCKET_INSERT_AFTER(b, e); - b = e; - } - apr_brigade_destroy(bb); - - return APR_SUCCESS; -} - -static apr_status_t status_event(void *ctx, h2_bucket_event event, - apr_bucket *b) -{ - h2_task *task = ctx; - - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, task->c->master, - "status_event(%s): %d", task->id, event); - switch (event) { - case H2_BUCKET_EV_BEFORE_MASTER_SEND: - h2_status_insert(task, b); - break; - default: - break; - } - return APR_SUCCESS; -} - -int h2_filter_h2_status_handler(request_rec *r) -{ - h2_ctx *ctx = h2_ctx_rget(r); - conn_rec *c = r->connection; - h2_task *task; - apr_bucket_brigade *bb; - apr_bucket *b; - apr_status_t status; - - if (strcmp(r->handler, "http2-status")) { - return DECLINED; - } - if (r->method_number != M_GET && r->method_number != M_POST) { - return DECLINED; - } - - task = ctx? h2_ctx_get_task(ctx) : NULL; - if (task) { - - if ((status = ap_discard_request_body(r)) != OK) { - return status; - } - - /* We need to handle the actual output on the main thread, as - * we need to access h2_session information. */ - r->status = 200; - r->clength = -1; - r->chunked = 1; - apr_table_unset(r->headers_out, "Content-Length"); - /* Discourage content-encodings */ - apr_table_unset(r->headers_out, "Content-Encoding"); - apr_table_setn(r->subprocess_env, "no-brotli", "1"); - apr_table_setn(r->subprocess_env, "no-gzip", "1"); - - ap_set_content_type(r, "application/json"); - apr_table_setn(r->notes, H2_FILTER_DEBUG_NOTE, "on"); - - bb = apr_brigade_create(r->pool, c->bucket_alloc); - b = h2_bucket_observer_create(c->bucket_alloc, status_event, task); - APR_BRIGADE_INSERT_TAIL(bb, b); - b = apr_bucket_eos_create(c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); - - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "status_handler(%s): checking for incoming trailers", - task->id); - if (r->trailers_in && !apr_is_empty_table(r->trailers_in)) { - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "status_handler(%s): seeing incoming trailers", - task->id); - apr_table_setn(r->trailers_out, "h2-trailers-in", - apr_itoa(r->pool, 1)); - } - - status = ap_pass_brigade(r->output_filters, bb); - if (status == APR_SUCCESS - || r->status != HTTP_OK - || c->aborted) { - return OK; - } - else { - /* no way to know what type of error occurred */ - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, status, r, - "status_handler(%s): ap_pass_brigade failed", - task->id); - return AP_FILTER_ERROR; - } - } - return DECLINED; -} - diff --git a/modules/http2/h2_filter.h b/modules/http2/h2_filter.h deleted file mode 100644 index 12810d8..0000000 --- a/modules/http2/h2_filter.h +++ /dev/null @@ -1,73 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_filter__ -#define __mod_h2__h2_filter__ - -struct h2_bucket_beam; -struct h2_headers; -struct h2_stream; -struct h2_session; - -typedef struct h2_filter_cin { - apr_pool_t *pool; - apr_socket_t *socket; - apr_interval_time_t timeout; - apr_bucket_brigade *bb; - struct h2_session *session; - apr_bucket *cur; -} h2_filter_cin; - -h2_filter_cin *h2_filter_cin_create(struct h2_session *session); - -void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout); - -apr_status_t h2_filter_core_input(ap_filter_t* filter, - apr_bucket_brigade* brigade, - ap_input_mode_t mode, - apr_read_type_e block, - apr_off_t readbytes); - -/******* observer bucket ******************************************************/ - -typedef enum { - H2_BUCKET_EV_BEFORE_DESTROY, - H2_BUCKET_EV_BEFORE_MASTER_SEND -} h2_bucket_event; - -extern const apr_bucket_type_t h2_bucket_type_observer; - -typedef apr_status_t h2_bucket_event_cb(void *ctx, h2_bucket_event event, apr_bucket *b); - -#define H2_BUCKET_IS_OBSERVER(e) (e->type == &h2_bucket_type_observer) - -apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb, - void *ctx); - -apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list, - h2_bucket_event_cb *cb, void *ctx); - -apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event); - -apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam, - apr_bucket_brigade *dest, - const apr_bucket *src); - -/******* /.well-known/h2/state handler ****************************************/ - -int h2_filter_h2_status_handler(request_rec *r); - -#endif /* __mod_h2__h2_filter__ */ diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c deleted file mode 100644 index d69c53c..0000000 --- a/modules/http2/h2_from_h1.c +++ /dev/null @@ -1,875 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "h2_private.h" -#include "h2_headers.h" -#include "h2_from_h1.h" -#include "h2_task.h" -#include "h2_util.h" - - -/* This routine is called by apr_table_do and merges all instances of - * the passed field values into a single array that will be further - * processed by some later routine. Originally intended to help split - * and recombine multiple Vary fields, though it is generic to any field - * consisting of comma/space-separated tokens. - */ -static int uniq_field_values(void *d, const char *key, const char *val) -{ - apr_array_header_t *values; - char *start; - char *e; - char **strpp; - int i; - - (void)key; - values = (apr_array_header_t *)d; - - e = apr_pstrdup(values->pool, val); - - do { - /* Find a non-empty fieldname */ - - while (*e == ',' || apr_isspace(*e)) { - ++e; - } - if (*e == '\0') { - break; - } - start = e; - while (*e != '\0' && *e != ',' && !apr_isspace(*e)) { - ++e; - } - if (*e != '\0') { - *e++ = '\0'; - } - - /* Now add it to values if it isn't already represented. - * Could be replaced by a ap_array_strcasecmp() if we had one. - */ - for (i = 0, strpp = (char **) values->elts; i < values->nelts; - ++i, ++strpp) { - if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) { - break; - } - } - if (i == values->nelts) { /* if not found */ - *(char **)apr_array_push(values) = start; - } - } while (*e != '\0'); - - return 1; -} - -/* - * Since some clients choke violently on multiple Vary fields, or - * Vary fields with duplicate tokens, combine any multiples and remove - * any duplicates. - */ -static void fix_vary(request_rec *r) -{ - apr_array_header_t *varies; - - varies = apr_array_make(r->pool, 5, sizeof(char *)); - - /* Extract all Vary fields from the headers_out, separate each into - * its comma-separated fieldname values, and then add them to varies - * if not already present in the array. - */ - apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL); - - /* If we found any, replace old Vary fields with unique-ified value */ - - if (varies->nelts > 0) { - apr_table_setn(r->headers_out, "Vary", - apr_array_pstrcat(r->pool, varies, ',')); - } -} - -static void set_basic_http_header(apr_table_t *headers, request_rec *r, - apr_pool_t *pool) -{ - char *date = NULL; - const char *proxy_date = NULL; - const char *server = NULL; - const char *us = ap_get_server_banner(); - - /* - * keep the set-by-proxy server and date headers, otherwise - * generate a new server header / date header - */ - if (r && r->proxyreq != PROXYREQ_NONE) { - proxy_date = apr_table_get(r->headers_out, "Date"); - if (!proxy_date) { - /* - * proxy_date needs to be const. So use date for the creation of - * our own Date header and pass it over to proxy_date later to - * avoid a compiler warning. - */ - date = apr_palloc(pool, APR_RFC822_DATE_LEN); - ap_recent_rfc822_date(date, r->request_time); - } - server = apr_table_get(r->headers_out, "Server"); - } - else { - date = apr_palloc(pool, APR_RFC822_DATE_LEN); - ap_recent_rfc822_date(date, r? r->request_time : apr_time_now()); - } - - apr_table_setn(headers, "Date", proxy_date ? proxy_date : date ); - if (r) { - apr_table_unset(r->headers_out, "Date"); - } - - if (!server && *us) { - server = us; - } - if (server) { - apr_table_setn(headers, "Server", server); - if (r) { - apr_table_unset(r->headers_out, "Server"); - } - } -} - -static int copy_header(void *ctx, const char *name, const char *value) -{ - apr_table_t *headers = ctx; - - apr_table_add(headers, name, value); - return 1; -} - -static h2_headers *create_response(h2_task *task, request_rec *r) -{ - const char *clheader; - const char *ctype; - apr_table_t *headers; - /* - * Now that we are ready to send a response, we need to combine the two - * header field tables into a single table. If we don't do this, our - * later attempts to set or unset a given fieldname might be bypassed. - */ - if (!apr_is_empty_table(r->err_headers_out)) { - r->headers_out = apr_table_overlay(r->pool, r->err_headers_out, - r->headers_out); - apr_table_clear(r->err_headers_out); - } - - /* - * Remove the 'Vary' header field if the client can't handle it. - * Since this will have nasty effects on HTTP/1.1 caches, force - * the response into HTTP/1.0 mode. - */ - if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) { - apr_table_unset(r->headers_out, "Vary"); - r->proto_num = HTTP_VERSION(1,0); - apr_table_setn(r->subprocess_env, "force-response-1.0", "1"); - } - else { - fix_vary(r); - } - - /* - * Now remove any ETag response header field if earlier processing - * says so (such as a 'FileETag None' directive). - */ - if (apr_table_get(r->notes, "no-etag") != NULL) { - apr_table_unset(r->headers_out, "ETag"); - } - - /* determine the protocol and whether we should use keepalives. */ - ap_set_keepalive(r); - - if (AP_STATUS_IS_HEADER_ONLY(r->status)) { - apr_table_unset(r->headers_out, "Transfer-Encoding"); - apr_table_unset(r->headers_out, "Content-Length"); - r->content_type = r->content_encoding = NULL; - r->content_languages = NULL; - r->clength = r->chunked = 0; - } - else if (r->chunked) { - apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked"); - apr_table_unset(r->headers_out, "Content-Length"); - } - - ctype = ap_make_content_type(r, r->content_type); - if (ctype) { - apr_table_setn(r->headers_out, "Content-Type", ctype); - } - - if (r->content_encoding) { - apr_table_setn(r->headers_out, "Content-Encoding", - r->content_encoding); - } - - if (!apr_is_empty_array(r->content_languages)) { - unsigned int i; - char *token; - char **languages = (char **)(r->content_languages->elts); - const char *field = apr_table_get(r->headers_out, "Content-Language"); - - while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) { - for (i = 0; i < r->content_languages->nelts; ++i) { - if (!apr_strnatcasecmp(token, languages[i])) - break; - } - if (i == r->content_languages->nelts) { - *((char **) apr_array_push(r->content_languages)) = token; - } - } - - field = apr_array_pstrcat(r->pool, r->content_languages, ','); - apr_table_setn(r->headers_out, "Content-Language", field); - } - - /* - * Control cachability for non-cachable responses if not already set by - * some other part of the server configuration. - */ - if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) { - char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); - ap_recent_rfc822_date(date, r->request_time); - apr_table_add(r->headers_out, "Expires", date); - } - - /* This is a hack, but I can't find anyway around it. The idea is that - * we don't want to send out 0 Content-Lengths if it is a head request. - * This happens when modules try to outsmart the server, and return - * if they see a HEAD request. Apache 1.3 handlers were supposed to - * just return in that situation, and the core handled the HEAD. In - * 2.0, if a handler returns, then the core sends an EOS bucket down - * the filter stack, and the content-length filter computes a C-L of - * zero and that gets put in the headers, and we end up sending a - * zero C-L to the client. We can't just remove the C-L filter, - * because well behaved 2.0 handlers will send their data down the stack, - * and we will compute a real C-L for the head request. RBB - */ - if (r->header_only - && (clheader = apr_table_get(r->headers_out, "Content-Length")) - && !strcmp(clheader, "0")) { - apr_table_unset(r->headers_out, "Content-Length"); - } - - headers = apr_table_make(r->pool, 10); - - set_basic_http_header(headers, r, r->pool); - if (r->status == HTTP_NOT_MODIFIED) { - apr_table_do(copy_header, headers, r->headers_out, - "ETag", - "Content-Location", - "Expires", - "Cache-Control", - "Vary", - "Warning", - "WWW-Authenticate", - "Proxy-Authenticate", - "Set-Cookie", - "Set-Cookie2", - NULL); - } - else { - apr_table_do(copy_header, headers, r->headers_out, NULL); - } - - return h2_headers_rcreate(r, r->status, headers, r->pool); -} - -typedef enum { - H2_RP_STATUS_LINE, - H2_RP_HEADER_LINE, - H2_RP_DONE -} h2_rp_state_t; - -typedef struct h2_response_parser { - h2_rp_state_t state; - h2_task *task; - int http_status; - apr_array_header_t *hlines; - apr_bucket_brigade *tmp; -} h2_response_parser; - -static apr_status_t parse_header(h2_response_parser *parser, char *line) { - const char *hline; - if (line[0] == ' ' || line[0] == '\t') { - char **plast; - /* continuation line from the header before this */ - while (line[0] == ' ' || line[0] == '\t') { - ++line; - } - - plast = apr_array_pop(parser->hlines); - if (plast == NULL) { - /* not well formed */ - return APR_EINVAL; - } - hline = apr_psprintf(parser->task->pool, "%s %s", *plast, line); - } - else { - /* new header line */ - hline = apr_pstrdup(parser->task->pool, line); - } - APR_ARRAY_PUSH(parser->hlines, const char*) = hline; - return APR_SUCCESS; -} - -static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb, - char *line, apr_size_t len) -{ - h2_task *task = parser->task; - apr_status_t status; - - if (!parser->tmp) { - parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc); - } - status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ, - HUGE_STRING_LEN); - if (status == APR_SUCCESS) { - --len; - status = apr_brigade_flatten(parser->tmp, line, &len); - if (status == APR_SUCCESS) { - /* we assume a non-0 containing line and remove trailing crlf. */ - line[len] = '\0'; - if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) { - len -= 2; - line[len] = '\0'; - apr_brigade_cleanup(parser->tmp); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, - "h2_task(%s): read response line: %s", - task->id, line); - } - else { - /* this does not look like a complete line yet */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, - "h2_task(%s): read response, incomplete line: %s", - task->id, line); - return APR_EAGAIN; - } - } - } - apr_brigade_cleanup(parser->tmp); - return status; -} - -static apr_table_t *make_table(h2_response_parser *parser) -{ - h2_task *task = parser->task; - apr_array_header_t *hlines = parser->hlines; - if (hlines) { - apr_table_t *headers = apr_table_make(task->pool, hlines->nelts); - int i; - - for (i = 0; i < hlines->nelts; ++i) { - char *hline = ((char **)hlines->elts)[i]; - char *sep = ap_strchr(hline, ':'); - if (!sep) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, task->c, - APLOGNO(02955) "h2_task(%s): invalid header[%d] '%s'", - task->id, i, (char*)hline); - /* not valid format, abort */ - return NULL; - } - (*sep++) = '\0'; - while (*sep == ' ' || *sep == '\t') { - ++sep; - } - - if (!h2_util_ignore_header(hline)) { - apr_table_merge(headers, hline, sep); - } - } - return headers; - } - else { - return apr_table_make(task->pool, 0); - } -} - -static apr_status_t pass_response(h2_task *task, ap_filter_t *f, - h2_response_parser *parser) -{ - apr_bucket *b; - apr_status_t status; - - h2_headers *response = h2_headers_create(parser->http_status, - make_table(parser), - NULL, 0, task->pool); - apr_brigade_cleanup(parser->tmp); - b = h2_bucket_headers_create(task->c->bucket_alloc, response); - APR_BRIGADE_INSERT_TAIL(parser->tmp, b); - b = apr_bucket_flush_create(task->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(parser->tmp, b); - status = ap_pass_brigade(f->next, parser->tmp); - apr_brigade_cleanup(parser->tmp); - - /* reset parser for possible next response */ - parser->state = H2_RP_STATUS_LINE; - apr_array_clear(parser->hlines); - - if (response->status >= 200) { - task->output.sent_response = 1; - } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, - APLOGNO(03197) "h2_task(%s): passed response %d", - task->id, response->status); - return status; -} - -static apr_status_t parse_status(h2_task *task, char *line) -{ - h2_response_parser *parser = task->output.rparser; - int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 : - (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0)); - if (sindex > 0) { - int k = sindex + 3; - char keepchar = line[k]; - line[k] = '\0'; - parser->http_status = atoi(&line[sindex]); - line[k] = keepchar; - parser->state = H2_RP_HEADER_LINE; - - return APR_SUCCESS; - } - /* Seems like there is garbage on the connection. May be a leftover - * from a previous proxy request. - * This should only happen if the H2_RESPONSE filter is not yet in - * place (post_read_request has not been reached and the handler wants - * to write something. Probably just the interim response we are - * waiting for. But if there is other data hanging around before - * that, this needs to fail. */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03467) - "h2_task(%s): unable to parse status line: %s", - task->id, line); - return APR_EINVAL; -} - -apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f, - apr_bucket_brigade *bb) -{ - h2_response_parser *parser = task->output.rparser; - char line[HUGE_STRING_LEN]; - apr_status_t status = APR_SUCCESS; - - if (!parser) { - parser = apr_pcalloc(task->pool, sizeof(*parser)); - parser->task = task; - parser->state = H2_RP_STATUS_LINE; - parser->hlines = apr_array_make(task->pool, 10, sizeof(char *)); - task->output.rparser = parser; - } - - while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) { - switch (parser->state) { - case H2_RP_STATUS_LINE: - case H2_RP_HEADER_LINE: - status = get_line(parser, bb, line, sizeof(line)); - if (status == APR_EAGAIN) { - /* need more data */ - return APR_SUCCESS; - } - else if (status != APR_SUCCESS) { - return status; - } - if (parser->state == H2_RP_STATUS_LINE) { - /* instead of parsing, just take it directly */ - status = parse_status(task, line); - } - else if (line[0] == '\0') { - /* end of headers, pass response onward */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_task(%s): end of response", task->id); - return pass_response(task, f, parser); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_task(%s): response header %s", task->id, line); - status = parse_header(parser, line); - } - break; - - default: - return status; - } - } - return status; -} - -apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb) -{ - h2_task *task = f->ctx; - request_rec *r = f->r; - apr_bucket *b, *bresp, *body_bucket = NULL, *next; - ap_bucket_error *eb = NULL; - h2_headers *response = NULL; - int headers_passing = 0; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_task(%s): output_filter called", task->id); - - if (!task->output.sent_response && !f->c->aborted) { - /* check, if we need to send the response now. Until we actually - * see a DATA bucket or some EOS/EOR, we do not do so. */ - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb); - b = APR_BUCKET_NEXT(b)) - { - if (AP_BUCKET_IS_ERROR(b) && !eb) { - eb = b->data; - } - else if (AP_BUCKET_IS_EOC(b)) { - /* If we see an EOC bucket it is a signal that we should get out - * of the way doing nothing. - */ - ap_remove_output_filter(f); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c, - "h2_task(%s): eoc bucket passed", task->id); - return ap_pass_brigade(f->next, bb); - } - else if (H2_BUCKET_IS_HEADERS(b)) { - headers_passing = 1; - } - else if (!APR_BUCKET_IS_FLUSH(b)) { - body_bucket = b; - break; - } - } - - if (eb) { - int st = eb->status; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047) - "h2_task(%s): err bucket status=%d", task->id, st); - /* throw everything away and replace it with the error response - * generated by ap_die() */ - apr_brigade_cleanup(bb); - ap_die(st, r); - return AP_FILTER_ERROR; - } - - if (body_bucket || !headers_passing) { - /* time to insert the response bucket before the body or if - * no h2_headers is passed, e.g. the response is empty */ - response = create_response(task, r); - if (response == NULL) { - ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048) - "h2_task(%s): unable to create response", task->id); - return APR_ENOMEM; - } - - bresp = h2_bucket_headers_create(f->c->bucket_alloc, response); - if (body_bucket) { - APR_BUCKET_INSERT_BEFORE(body_bucket, bresp); - } - else { - APR_BRIGADE_INSERT_HEAD(bb, bresp); - } - task->output.sent_response = 1; - r->sent_bodyct = 1; - } - } - - if (r->header_only) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_task(%s): header_only, cleanup output brigade", - task->id); - b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb); - while (b != APR_BRIGADE_SENTINEL(bb)) { - next = APR_BUCKET_NEXT(b); - if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) { - break; - } - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - b = next; - } - } - else if (task->output.sent_response) { - /* lets get out of the way, our task is done */ - ap_remove_output_filter(f); - } - return ap_pass_brigade(f->next, bb); -} - -static void make_chunk(h2_task *task, apr_bucket_brigade *bb, - apr_bucket *first, apr_off_t chunk_len, - apr_bucket *tail) -{ - /* Surround the buckets [first, tail[ with new buckets carrying the - * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends - * to the end of the brigade. */ - char buffer[128]; - apr_bucket *c; - int len; - - len = apr_snprintf(buffer, H2_ALEN(buffer), - "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len); - c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc); - APR_BUCKET_INSERT_BEFORE(first, c); - c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc); - if (tail) { - APR_BUCKET_INSERT_BEFORE(tail, c); - } - else { - APR_BRIGADE_INSERT_TAIL(bb, c); - } - task->input.chunked_total += chunk_len; - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, - "h2_task(%s): added chunk %ld, total %ld", - task->id, (long)chunk_len, (long)task->input.chunked_total); -} - -static int ser_header(void *ctx, const char *name, const char *value) -{ - apr_bucket_brigade *bb = ctx; - apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value); - return 1; -} - -static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task, - apr_read_type_e block) { - request_rec *r = f->r; - apr_status_t status = APR_SUCCESS; - apr_bucket_brigade *bb = task->input.bbchunk; - - if (!bb) { - bb = apr_brigade_create(r->pool, f->c->bucket_alloc); - task->input.bbchunk = bb; - } - - if (APR_BRIGADE_EMPTY(bb)) { - apr_bucket *b, *next, *first_data = NULL; - apr_bucket_brigade *tmp; - apr_off_t bblen = 0; - - /* get more data from the lower layer filters. Always do this - * in larger pieces, since we handle the read modes ourself. */ - status = ap_get_brigade(f->next, bb, - AP_MODE_READBYTES, block, 32*1024); - if (status == APR_EOF) { - if (!task->input.eos) { - status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); - task->input.eos = 1; - return APR_SUCCESS; - } - ap_remove_input_filter(f); - return status; - - } - else if (status != APR_SUCCESS) { - return status; - } - - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb) && !task->input.eos; - b = next) { - next = APR_BUCKET_NEXT(b); - if (APR_BUCKET_IS_METADATA(b)) { - if (first_data) { - make_chunk(task, bb, first_data, bblen, b); - first_data = NULL; - } - - if (H2_BUCKET_IS_HEADERS(b)) { - h2_headers *headers = h2_bucket_headers_get(b); - - ap_assert(headers); - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "h2_task(%s): receiving trailers", task->id); - tmp = apr_brigade_split_ex(bb, b, NULL); - if (!apr_is_empty_table(headers->headers)) { - status = apr_brigade_puts(bb, NULL, NULL, "0\r\n"); - apr_table_do(ser_header, bb, headers->headers, NULL); - status = apr_brigade_puts(bb, NULL, NULL, "\r\n"); - } - else { - status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); - } - r->trailers_in = apr_table_clone(r->pool, headers->headers); - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - APR_BRIGADE_CONCAT(bb, tmp); - apr_brigade_destroy(tmp); - task->input.eos = 1; - } - else if (APR_BUCKET_IS_EOS(b)) { - tmp = apr_brigade_split_ex(bb, b, NULL); - status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); - APR_BRIGADE_CONCAT(bb, tmp); - apr_brigade_destroy(tmp); - task->input.eos = 1; - } - } - else if (b->length == 0) { - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - } - else { - if (!first_data) { - first_data = b; - bblen = 0; - } - bblen += b->length; - } - } - - if (first_data) { - make_chunk(task, bb, first_data, bblen, NULL); - } - } - return status; -} - -apr_status_t h2_filter_request_in(ap_filter_t* f, - apr_bucket_brigade* bb, - ap_input_mode_t mode, - apr_read_type_e block, - apr_off_t readbytes) -{ - h2_task *task = f->ctx; - request_rec *r = f->r; - apr_status_t status = APR_SUCCESS; - apr_bucket *b, *next; - core_server_config *conf = - (core_server_config *) ap_get_module_config(r->server->module_config, - &core_module); - - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r, - "h2_task(%s): request filter, exp=%d", task->id, r->expecting_100); - if (!task->request->chunked) { - status = ap_get_brigade(f->next, bb, mode, block, readbytes); - /* pipe data through, just take care of trailers */ - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb); b = next) { - next = APR_BUCKET_NEXT(b); - if (H2_BUCKET_IS_HEADERS(b)) { - h2_headers *headers = h2_bucket_headers_get(b); - ap_assert(headers); - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "h2_task(%s): receiving trailers", task->id); - r->trailers_in = headers->headers; - if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) { - r->headers_in = apr_table_overlay(r->pool, r->headers_in, - r->trailers_in); - } - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - ap_remove_input_filter(f); - - if (headers->raw_bytes && h2_task_logio_add_bytes_in) { - h2_task_logio_add_bytes_in(task->c, headers->raw_bytes); - } - break; - } - } - return status; - } - - /* Things are more complicated. The standard HTTP input filter, which - * does a lot what we do not want to duplicate, also cares about chunked - * transfer encoding and trailers. - * We need to simulate chunked encoding for it to be happy. - */ - if ((status = read_and_chunk(f, task, block)) != APR_SUCCESS) { - return status; - } - - if (mode == AP_MODE_EXHAUSTIVE) { - /* return all we have */ - APR_BRIGADE_CONCAT(bb, task->input.bbchunk); - } - else if (mode == AP_MODE_READBYTES) { - status = h2_brigade_concat_length(bb, task->input.bbchunk, readbytes); - } - else if (mode == AP_MODE_SPECULATIVE) { - status = h2_brigade_copy_length(bb, task->input.bbchunk, readbytes); - } - else if (mode == AP_MODE_GETLINE) { - /* we are reading a single LF line, e.g. the HTTP headers. - * this has the nasty side effect to split the bucket, even - * though it ends with CRLF and creates a 0 length bucket */ - status = apr_brigade_split_line(bb, task->input.bbchunk, block, - HUGE_STRING_LEN); - if (APLOGctrace1(f->c)) { - char buffer[1024]; - apr_size_t len = sizeof(buffer)-1; - apr_brigade_flatten(bb, buffer, &len); - buffer[len] = 0; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "h2_task(%s): getline: %s", - task->id, buffer); - } - } - else { - /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not - * to support it. Seems to work. */ - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, - APLOGNO(02942) - "h2_task, unsupported READ mode %d", mode); - status = APR_ENOTIMPL; - } - - h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, "forwarding input", bb); - return status; -} - -apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb) -{ - h2_task *task = f->ctx; - request_rec *r = f->r; - apr_bucket *b, *e; - - if (task && r) { - /* Detect the EOS/EOR bucket and forward any trailers that may have - * been set to our h2_headers. - */ - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb); - b = APR_BUCKET_NEXT(b)) - { - if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) - && r->trailers_out && !apr_is_empty_table(r->trailers_out)) { - h2_headers *headers; - apr_table_t *trailers; - - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049) - "h2_task(%s): sending trailers", task->id); - trailers = apr_table_clone(r->pool, r->trailers_out); - headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool); - e = h2_bucket_headers_create(bb->bucket_alloc, headers); - APR_BUCKET_INSERT_BEFORE(b, e); - apr_table_clear(r->trailers_out); - ap_remove_output_filter(f); - break; - } - } - } - - return ap_pass_brigade(f->next, bb); -} - diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_from_h1.h deleted file mode 100644 index 68a24fd..0000000 --- a/modules/http2/h2_from_h1.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_from_h1__ -#define __mod_h2__h2_from_h1__ - -/** - * h2_from_h1 parses a HTTP/1.1 response into - * - response status - * - a list of header values - * - a series of bytes that represent the response body alone, without - * any meta data, such as inserted by chunked transfer encoding. - * - * All data is allocated from the stream memory pool. - * - * Again, see comments in h2_request: ideally we would take the headers - * and status from the httpd structures instead of parsing them here, but - * we need to have all handlers and filters involved in request/response - * processing, so this seems to be the way for now. - */ -struct h2_headers; -struct h2_task; - -apr_status_t h2_from_h1_parse_response(struct h2_task *task, ap_filter_t *f, - apr_bucket_brigade *bb); - -apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb); - -apr_status_t h2_filter_request_in(ap_filter_t* f, - apr_bucket_brigade* brigade, - ap_input_mode_t mode, - apr_read_type_e block, - apr_off_t readbytes); - -apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb); - -#endif /* defined(__mod_h2__h2_from_h1__) */ diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c deleted file mode 100644 index 5580cef..0000000 --- a/modules/http2/h2_h2.c +++ /dev/null @@ -1,765 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "mod_ssl.h" - -#include "mod_http2.h" -#include "h2_private.h" - -#include "h2_bucket_beam.h" -#include "h2_stream.h" -#include "h2_task.h" -#include "h2_config.h" -#include "h2_ctx.h" -#include "h2_conn.h" -#include "h2_filter.h" -#include "h2_request.h" -#include "h2_headers.h" -#include "h2_session.h" -#include "h2_util.h" -#include "h2_h2.h" -#include "mod_http2.h" - -const char *h2_tls_protos[] = { - "h2", NULL -}; - -const char *h2_clear_protos[] = { - "h2c", NULL -}; - -const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; - -/******************************************************************************* - * The optional mod_ssl functions we need. - */ -static APR_OPTIONAL_FN_TYPE(ssl_is_https) *opt_ssl_is_https; -static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *opt_ssl_var_lookup; - - -/******************************************************************************* - * HTTP/2 error stuff - */ -static const char *h2_err_descr[] = { - "no error", /* 0x0 */ - "protocol error", - "internal error", - "flow control error", - "settings timeout", - "stream closed", /* 0x5 */ - "frame size error", - "refused stream", - "cancel", - "compression error", - "connect error", /* 0xa */ - "enhance your calm", - "inadequate security", - "http/1.1 required", -}; - -const char *h2_h2_err_description(unsigned int h2_error) -{ - if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) { - return h2_err_descr[h2_error]; - } - return "unknown http/2 error code"; -} - -/******************************************************************************* - * Check connection security requirements of RFC 7540 - */ - -/* - * Black Listed Ciphers from RFC 7549 Appendix A - * - */ -static const char *RFC7540_names[] = { - /* ciphers with NULL encrpytion */ - "NULL-MD5", /* TLS_NULL_WITH_NULL_NULL */ - /* same */ /* TLS_RSA_WITH_NULL_MD5 */ - "NULL-SHA", /* TLS_RSA_WITH_NULL_SHA */ - "NULL-SHA256", /* TLS_RSA_WITH_NULL_SHA256 */ - "PSK-NULL-SHA", /* TLS_PSK_WITH_NULL_SHA */ - "DHE-PSK-NULL-SHA", /* TLS_DHE_PSK_WITH_NULL_SHA */ - "RSA-PSK-NULL-SHA", /* TLS_RSA_PSK_WITH_NULL_SHA */ - "PSK-NULL-SHA256", /* TLS_PSK_WITH_NULL_SHA256 */ - "PSK-NULL-SHA384", /* TLS_PSK_WITH_NULL_SHA384 */ - "DHE-PSK-NULL-SHA256", /* TLS_DHE_PSK_WITH_NULL_SHA256 */ - "DHE-PSK-NULL-SHA384", /* TLS_DHE_PSK_WITH_NULL_SHA384 */ - "RSA-PSK-NULL-SHA256", /* TLS_RSA_PSK_WITH_NULL_SHA256 */ - "RSA-PSK-NULL-SHA384", /* TLS_RSA_PSK_WITH_NULL_SHA384 */ - "ECDH-ECDSA-NULL-SHA", /* TLS_ECDH_ECDSA_WITH_NULL_SHA */ - "ECDHE-ECDSA-NULL-SHA", /* TLS_ECDHE_ECDSA_WITH_NULL_SHA */ - "ECDH-RSA-NULL-SHA", /* TLS_ECDH_RSA_WITH_NULL_SHA */ - "ECDHE-RSA-NULL-SHA", /* TLS_ECDHE_RSA_WITH_NULL_SHA */ - "AECDH-NULL-SHA", /* TLS_ECDH_anon_WITH_NULL_SHA */ - "ECDHE-PSK-NULL-SHA", /* TLS_ECDHE_PSK_WITH_NULL_SHA */ - "ECDHE-PSK-NULL-SHA256", /* TLS_ECDHE_PSK_WITH_NULL_SHA256 */ - "ECDHE-PSK-NULL-SHA384", /* TLS_ECDHE_PSK_WITH_NULL_SHA384 */ - - /* DES/3DES ciphers */ - "PSK-3DES-EDE-CBC-SHA", /* TLS_PSK_WITH_3DES_EDE_CBC_SHA */ - "DHE-PSK-3DES-EDE-CBC-SHA", /* TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA */ - "RSA-PSK-3DES-EDE-CBC-SHA", /* TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA */ - "ECDH-ECDSA-DES-CBC3-SHA", /* TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA */ - "ECDHE-ECDSA-DES-CBC3-SHA", /* TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA */ - "ECDH-RSA-DES-CBC3-SHA", /* TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA */ - "ECDHE-RSA-DES-CBC3-SHA", /* TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA */ - "AECDH-DES-CBC3-SHA", /* TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA */ - "SRP-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA */ - "SRP-RSA-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA */ - "SRP-DSS-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA */ - "ECDHE-PSK-3DES-EDE-CBC-SHA", /* TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA */ - "DES-CBC-SHA", /* TLS_RSA_WITH_DES_CBC_SHA */ - "DES-CBC3-SHA", /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */ - "DHE-DSS-DES-CBC3-SHA", /* TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA */ - "DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_WITH_DES_CBC_SHA */ - "DHE-RSA-DES-CBC3-SHA", /* TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA */ - "ADH-DES-CBC-SHA", /* TLS_DH_anon_WITH_DES_CBC_SHA */ - "ADH-DES-CBC3-SHA", /* TLS_DH_anon_WITH_3DES_EDE_CBC_SHA */ - "EXP-DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA */ - "DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_WITH_DES_CBC_SHA */ - "DH-DSS-DES-CBC3-SHA", /* TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA */ - "EXP-DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA */ - "DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_WITH_DES_CBC_SHA */ - "DH-RSA-DES-CBC3-SHA", /* TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA */ - - /* blacklisted EXPORT ciphers */ - "EXP-RC4-MD5", /* TLS_RSA_EXPORT_WITH_RC4_40_MD5 */ - "EXP-RC2-CBC-MD5", /* TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 */ - "EXP-DES-CBC-SHA", /* TLS_RSA_EXPORT_WITH_DES40_CBC_SHA */ - "EXP-DHE-DSS-DES-CBC-SHA", /* TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA */ - "EXP-DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA */ - "EXP-ADH-DES-CBC-SHA", /* TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA */ - "EXP-ADH-RC4-MD5", /* TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 */ - - /* blacklisted RC4 encryption */ - "RC4-MD5", /* TLS_RSA_WITH_RC4_128_MD5 */ - "RC4-SHA", /* TLS_RSA_WITH_RC4_128_SHA */ - "ADH-RC4-MD5", /* TLS_DH_anon_WITH_RC4_128_MD5 */ - "KRB5-RC4-SHA", /* TLS_KRB5_WITH_RC4_128_SHA */ - "KRB5-RC4-MD5", /* TLS_KRB5_WITH_RC4_128_MD5 */ - "EXP-KRB5-RC4-SHA", /* TLS_KRB5_EXPORT_WITH_RC4_40_SHA */ - "EXP-KRB5-RC4-MD5", /* TLS_KRB5_EXPORT_WITH_RC4_40_MD5 */ - "PSK-RC4-SHA", /* TLS_PSK_WITH_RC4_128_SHA */ - "DHE-PSK-RC4-SHA", /* TLS_DHE_PSK_WITH_RC4_128_SHA */ - "RSA-PSK-RC4-SHA", /* TLS_RSA_PSK_WITH_RC4_128_SHA */ - "ECDH-ECDSA-RC4-SHA", /* TLS_ECDH_ECDSA_WITH_RC4_128_SHA */ - "ECDHE-ECDSA-RC4-SHA", /* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA */ - "ECDH-RSA-RC4-SHA", /* TLS_ECDH_RSA_WITH_RC4_128_SHA */ - "ECDHE-RSA-RC4-SHA", /* TLS_ECDHE_RSA_WITH_RC4_128_SHA */ - "AECDH-RC4-SHA", /* TLS_ECDH_anon_WITH_RC4_128_SHA */ - "ECDHE-PSK-RC4-SHA", /* TLS_ECDHE_PSK_WITH_RC4_128_SHA */ - - /* blacklisted AES128 encrpytion ciphers */ - "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA */ - "DH-DSS-AES128-SHA", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA */ - "DH-RSA-AES128-SHA", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA */ - "DHE-DSS-AES128-SHA", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA */ - "DHE-RSA-AES128-SHA", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA */ - "ADH-AES128-SHA", /* TLS_DH_anon_WITH_AES_128_CBC_SHA */ - "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA256 */ - "DH-DSS-AES128-SHA256", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA256 */ - "DH-RSA-AES128-SHA256", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA256 */ - "DHE-DSS-AES128-SHA256", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 */ - "DHE-RSA-AES128-SHA256", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 */ - "ECDH-ECDSA-AES128-SHA", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA */ - "ECDHE-ECDSA-AES128-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */ - "ECDH-RSA-AES128-SHA", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA */ - "ECDHE-RSA-AES128-SHA", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */ - "AECDH-AES128-SHA", /* TLS_ECDH_anon_WITH_AES_128_CBC_SHA */ - "ECDHE-ECDSA-AES128-SHA256", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 */ - "ECDH-ECDSA-AES128-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 */ - "ECDHE-RSA-AES128-SHA256", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 */ - "ECDH-RSA-AES128-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 */ - "ADH-AES128-SHA256", /* TLS_DH_anon_WITH_AES_128_CBC_SHA256 */ - "PSK-AES128-CBC-SHA", /* TLS_PSK_WITH_AES_128_CBC_SHA */ - "DHE-PSK-AES128-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA */ - "RSA-PSK-AES128-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA */ - "PSK-AES128-CBC-SHA256", /* TLS_PSK_WITH_AES_128_CBC_SHA256 */ - "DHE-PSK-AES128-CBC-SHA256", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 */ - "RSA-PSK-AES128-CBC-SHA256", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 */ - "ECDHE-PSK-AES128-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA */ - "ECDHE-PSK-AES128-CBC-SHA256", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 */ - "AES128-CCM", /* TLS_RSA_WITH_AES_128_CCM */ - "AES128-CCM8", /* TLS_RSA_WITH_AES_128_CCM_8 */ - "PSK-AES128-CCM", /* TLS_PSK_WITH_AES_128_CCM */ - "PSK-AES128-CCM8", /* TLS_PSK_WITH_AES_128_CCM_8 */ - "AES128-GCM-SHA256", /* TLS_RSA_WITH_AES_128_GCM_SHA256 */ - "DH-RSA-AES128-GCM-SHA256", /* TLS_DH_RSA_WITH_AES_128_GCM_SHA256 */ - "DH-DSS-AES128-GCM-SHA256", /* TLS_DH_DSS_WITH_AES_128_GCM_SHA256 */ - "ADH-AES128-GCM-SHA256", /* TLS_DH_anon_WITH_AES_128_GCM_SHA256 */ - "PSK-AES128-GCM-SHA256", /* TLS_PSK_WITH_AES_128_GCM_SHA256 */ - "RSA-PSK-AES128-GCM-SHA256", /* TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 */ - "ECDH-ECDSA-AES128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 */ - "ECDH-RSA-AES128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 */ - "SRP-AES-128-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_128_CBC_SHA */ - "SRP-RSA-AES-128-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA */ - "SRP-DSS-AES-128-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA */ - - /* blacklisted AES256 encrpytion ciphers */ - "AES256-SHA", /* TLS_RSA_WITH_AES_256_CBC_SHA */ - "DH-DSS-AES256-SHA", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA */ - "DH-RSA-AES256-SHA", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA */ - "DHE-DSS-AES256-SHA", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA */ - "DHE-RSA-AES256-SHA", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA */ - "ADH-AES256-SHA", /* TLS_DH_anon_WITH_AES_256_CBC_SHA */ - "AES256-SHA256", /* TLS_RSA_WITH_AES_256_CBC_SHA256 */ - "DH-DSS-AES256-SHA256", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA256 */ - "DH-RSA-AES256-SHA256", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA256 */ - "DHE-DSS-AES256-SHA256", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 */ - "DHE-RSA-AES256-SHA256", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 */ - "ADH-AES256-SHA256", /* TLS_DH_anon_WITH_AES_256_CBC_SHA256 */ - "ECDH-ECDSA-AES256-SHA", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA */ - "ECDHE-ECDSA-AES256-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */ - "ECDH-RSA-AES256-SHA", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA */ - "ECDHE-RSA-AES256-SHA", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */ - "AECDH-AES256-SHA", /* TLS_ECDH_anon_WITH_AES_256_CBC_SHA */ - "ECDHE-ECDSA-AES256-SHA384", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 */ - "ECDH-ECDSA-AES256-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 */ - "ECDHE-RSA-AES256-SHA384", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 */ - "ECDH-RSA-AES256-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 */ - "PSK-AES256-CBC-SHA", /* TLS_PSK_WITH_AES_256_CBC_SHA */ - "DHE-PSK-AES256-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA */ - "RSA-PSK-AES256-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA */ - "PSK-AES256-CBC-SHA384", /* TLS_PSK_WITH_AES_256_CBC_SHA384 */ - "DHE-PSK-AES256-CBC-SHA384", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 */ - "RSA-PSK-AES256-CBC-SHA384", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 */ - "ECDHE-PSK-AES256-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA */ - "ECDHE-PSK-AES256-CBC-SHA384", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 */ - "SRP-AES-256-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_256_CBC_SHA */ - "SRP-RSA-AES-256-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA */ - "SRP-DSS-AES-256-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA */ - "AES256-CCM", /* TLS_RSA_WITH_AES_256_CCM */ - "AES256-CCM8", /* TLS_RSA_WITH_AES_256_CCM_8 */ - "PSK-AES256-CCM", /* TLS_PSK_WITH_AES_256_CCM */ - "PSK-AES256-CCM8", /* TLS_PSK_WITH_AES_256_CCM_8 */ - "AES256-GCM-SHA384", /* TLS_RSA_WITH_AES_256_GCM_SHA384 */ - "DH-RSA-AES256-GCM-SHA384", /* TLS_DH_RSA_WITH_AES_256_GCM_SHA384 */ - "DH-DSS-AES256-GCM-SHA384", /* TLS_DH_DSS_WITH_AES_256_GCM_SHA384 */ - "ADH-AES256-GCM-SHA384", /* TLS_DH_anon_WITH_AES_256_GCM_SHA384 */ - "PSK-AES256-GCM-SHA384", /* TLS_PSK_WITH_AES_256_GCM_SHA384 */ - "RSA-PSK-AES256-GCM-SHA384", /* TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 */ - "ECDH-ECDSA-AES256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 */ - "ECDH-RSA-AES256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 */ - - /* blacklisted CAMELLIA128 encrpytion ciphers */ - "CAMELLIA128-SHA", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA */ - "DH-DSS-CAMELLIA128-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA */ - "DH-RSA-CAMELLIA128-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA */ - "DHE-DSS-CAMELLIA128-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA */ - "DHE-RSA-CAMELLIA128-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA */ - "ADH-CAMELLIA128-SHA", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA */ - "ECDHE-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "ECDH-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "ECDHE-RSA-CAMELLIA128-SHA256", /* TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "ECDH-RSA-CAMELLIA128-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "PSK-CAMELLIA128-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ - "DHE-PSK-CAMELLIA128-SHA256", /* TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ - "RSA-PSK-CAMELLIA128-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ - "ECDHE-PSK-CAMELLIA128-SHA256", /* TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ - "CAMELLIA128-GCM-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ - "DH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ - "DH-DSS-CAMELLIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 */ - "ADH-CAMELLIA128-GCM-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 */ - "ECDH-ECDSA-CAMELLIA128-GCM-SHA256",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 */ - "ECDH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ - "PSK-CAMELLIA128-GCM-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 */ - "RSA-PSK-CAMELLIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 */ - "CAMELLIA128-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "DH-DSS-CAMELLIA128-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 */ - "DH-RSA-CAMELLIA128-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "DHE-DSS-CAMELLIA128-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 */ - "DHE-RSA-CAMELLIA128-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ - "ADH-CAMELLIA128-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 */ - - /* blacklisted CAMELLIA256 encrpytion ciphers */ - "CAMELLIA256-SHA", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA */ - "DH-RSA-CAMELLIA256-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA */ - "DH-DSS-CAMELLIA256-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA */ - "DHE-DSS-CAMELLIA256-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA */ - "DHE-RSA-CAMELLIA256-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA */ - "ADH-CAMELLIA256-SHA", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA */ - "ECDHE-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */ - "ECDH-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */ - "ECDHE-RSA-CAMELLIA256-SHA384", /* TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 */ - "ECDH-RSA-CAMELLIA256-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 */ - "PSK-CAMELLIA256-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ - "DHE-PSK-CAMELLIA256-SHA384", /* TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ - "RSA-PSK-CAMELLIA256-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ - "ECDHE-PSK-CAMELLIA256-SHA384", /* TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ - "CAMELLIA256-SHA256", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ - "DH-DSS-CAMELLIA256-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 */ - "DH-RSA-CAMELLIA256-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ - "DHE-DSS-CAMELLIA256-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 */ - "DHE-RSA-CAMELLIA256-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ - "ADH-CAMELLIA256-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 */ - "CAMELLIA256-GCM-SHA384", /* TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ - "DH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ - "DH-DSS-CAMELLIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 */ - "ADH-CAMELLIA256-GCM-SHA384", /* TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 */ - "ECDH-ECDSA-CAMELLIA256-GCM-SHA384",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 */ - "ECDH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ - "PSK-CAMELLIA256-GCM-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 */ - "RSA-PSK-CAMELLIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 */ - - /* The blacklisted ARIA encrpytion ciphers */ - "ARIA128-SHA256", /* TLS_RSA_WITH_ARIA_128_CBC_SHA256 */ - "ARIA256-SHA384", /* TLS_RSA_WITH_ARIA_256_CBC_SHA384 */ - "DH-DSS-ARIA128-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 */ - "DH-DSS-ARIA256-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 */ - "DH-RSA-ARIA128-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 */ - "DH-RSA-ARIA256-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 */ - "DHE-DSS-ARIA128-SHA256", /* TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 */ - "DHE-DSS-ARIA256-SHA384", /* TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 */ - "DHE-RSA-ARIA128-SHA256", /* TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 */ - "DHE-RSA-ARIA256-SHA384", /* TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 */ - "ADH-ARIA128-SHA256", /* TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 */ - "ADH-ARIA256-SHA384", /* TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 */ - "ECDHE-ECDSA-ARIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 */ - "ECDHE-ECDSA-ARIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 */ - "ECDH-ECDSA-ARIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 */ - "ECDH-ECDSA-ARIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 */ - "ECDHE-RSA-ARIA128-SHA256", /* TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 */ - "ECDHE-RSA-ARIA256-SHA384", /* TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 */ - "ECDH-RSA-ARIA128-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 */ - "ECDH-RSA-ARIA256-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 */ - "ARIA128-GCM-SHA256", /* TLS_RSA_WITH_ARIA_128_GCM_SHA256 */ - "ARIA256-GCM-SHA384", /* TLS_RSA_WITH_ARIA_256_GCM_SHA384 */ - "DH-DSS-ARIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 */ - "DH-DSS-ARIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 */ - "DH-RSA-ARIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 */ - "DH-RSA-ARIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 */ - "ADH-ARIA128-GCM-SHA256", /* TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 */ - "ADH-ARIA256-GCM-SHA384", /* TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 */ - "ECDH-ECDSA-ARIA128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 */ - "ECDH-ECDSA-ARIA256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 */ - "ECDH-RSA-ARIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 */ - "ECDH-RSA-ARIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 */ - "PSK-ARIA128-SHA256", /* TLS_PSK_WITH_ARIA_128_CBC_SHA256 */ - "PSK-ARIA256-SHA384", /* TLS_PSK_WITH_ARIA_256_CBC_SHA384 */ - "DHE-PSK-ARIA128-SHA256", /* TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 */ - "DHE-PSK-ARIA256-SHA384", /* TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 */ - "RSA-PSK-ARIA128-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 */ - "RSA-PSK-ARIA256-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 */ - "ARIA128-GCM-SHA256", /* TLS_PSK_WITH_ARIA_128_GCM_SHA256 */ - "ARIA256-GCM-SHA384", /* TLS_PSK_WITH_ARIA_256_GCM_SHA384 */ - "RSA-PSK-ARIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 */ - "RSA-PSK-ARIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 */ - "ECDHE-PSK-ARIA128-SHA256", /* TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 */ - "ECDHE-PSK-ARIA256-SHA384", /* TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 */ - - /* blacklisted SEED encryptions */ - "SEED-SHA", /*TLS_RSA_WITH_SEED_CBC_SHA */ - "DH-DSS-SEED-SHA", /* TLS_DH_DSS_WITH_SEED_CBC_SHA */ - "DH-RSA-SEED-SHA", /* TLS_DH_RSA_WITH_SEED_CBC_SHA */ - "DHE-DSS-SEED-SHA", /* TLS_DHE_DSS_WITH_SEED_CBC_SHA */ - "DHE-RSA-SEED-SHA", /* TLS_DHE_RSA_WITH_SEED_CBC_SHA */ - "ADH-SEED-SHA", /* TLS_DH_anon_WITH_SEED_CBC_SHA */ - - /* blacklisted KRB5 ciphers */ - "KRB5-DES-CBC-SHA", /* TLS_KRB5_WITH_DES_CBC_SHA */ - "KRB5-DES-CBC3-SHA", /* TLS_KRB5_WITH_3DES_EDE_CBC_SHA */ - "KRB5-IDEA-CBC-SHA", /* TLS_KRB5_WITH_IDEA_CBC_SHA */ - "KRB5-DES-CBC-MD5", /* TLS_KRB5_WITH_DES_CBC_MD5 */ - "KRB5-DES-CBC3-MD5", /* TLS_KRB5_WITH_3DES_EDE_CBC_MD5 */ - "KRB5-IDEA-CBC-MD5", /* TLS_KRB5_WITH_IDEA_CBC_MD5 */ - "EXP-KRB5-DES-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA */ - "EXP-KRB5-DES-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 */ - "EXP-KRB5-RC2-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA */ - "EXP-KRB5-RC2-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 */ - - /* blacklisted exoticas */ - "DHE-DSS-CBC-SHA", /* TLS_DHE_DSS_WITH_DES_CBC_SHA */ - "IDEA-CBC-SHA", /* TLS_RSA_WITH_IDEA_CBC_SHA */ - - /* not really sure if the following names are correct */ - "SSL3_CK_SCSV", /* TLS_EMPTY_RENEGOTIATION_INFO_SCSV */ - "SSL3_CK_FALLBACK_SCSV" -}; -static size_t RFC7540_names_LEN = sizeof(RFC7540_names)/sizeof(RFC7540_names[0]); - - -static apr_hash_t *BLCNames; - -static void cipher_init(apr_pool_t *pool) -{ - apr_hash_t *hash = apr_hash_make(pool); - const char *source; - unsigned int i; - - source = "rfc7540"; - for (i = 0; i < RFC7540_names_LEN; ++i) { - apr_hash_set(hash, RFC7540_names[i], APR_HASH_KEY_STRING, source); - } - - BLCNames = hash; -} - -static int cipher_is_blacklisted(const char *cipher, const char **psource) -{ - *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING); - return !!*psource; -} - -/******************************************************************************* - * Hooks for processing incoming connections: - * - process_conn take over connection in case of h2 - */ -static int h2_h2_process_conn(conn_rec* c); -static int h2_h2_pre_close_conn(conn_rec* c); -static int h2_h2_post_read_req(request_rec *r); -static int h2_h2_late_fixups(request_rec *r); - -/******************************************************************************* - * Once per lifetime init, retrieve optional functions - */ -apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s) -{ - (void)pool; - ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init"); - opt_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); - opt_ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); - - if (!opt_ssl_is_https || !opt_ssl_var_lookup) { - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, - APLOGNO(02951) "mod_ssl does not seem to be enabled"); - } - - cipher_init(pool); - - return APR_SUCCESS; -} - -int h2_h2_is_tls(conn_rec *c) -{ - return opt_ssl_is_https && opt_ssl_is_https(c); -} - -int h2_is_acceptable_connection(conn_rec *c, int require_all) -{ - int is_tls = h2_h2_is_tls(c); - const h2_config *cfg = h2_config_get(c); - - if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) { - /* Check TLS connection for modern TLS parameters, as defined in - * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - */ - apr_pool_t *pool = c->pool; - server_rec *s = c->base_server; - char *val; - - if (!opt_ssl_var_lookup) { - /* unable to check */ - return 0; - } - - /* Need Tlsv1.2 or higher, rfc 7540, ch. 9.2 - */ - val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_PROTOCOL"); - if (val && *val) { - if (strncmp("TLS", val, 3) - || !strcmp("TLSv1", val) - || !strcmp("TLSv1.1", val)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050) - "h2_h2(%ld): tls protocol not suitable: %s", - (long)c->id, val); - return 0; - } - } - else if (require_all) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051) - "h2_h2(%ld): tls protocol is indetermined", (long)c->id); - return 0; - } - - /* Check TLS cipher blacklist - */ - val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_CIPHER"); - if (val && *val) { - const char *source; - if (cipher_is_blacklisted(val, &source)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052) - "h2_h2(%ld): tls cipher %s blacklisted by %s", - (long)c->id, val, source); - return 0; - } - } - else if (require_all) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053) - "h2_h2(%ld): tls cipher is indetermined", (long)c->id); - return 0; - } - } - return 1; -} - -int h2_allows_h2_direct(conn_rec *c) -{ - const h2_config *cfg = h2_config_get(c); - int is_tls = h2_h2_is_tls(c); - const char *needed_protocol = is_tls? "h2" : "h2c"; - int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT); - - if (h2_direct < 0) { - h2_direct = is_tls? 0 : 1; - } - return (h2_direct - && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol)); -} - -int h2_allows_h2_upgrade(conn_rec *c) -{ - const h2_config *cfg = h2_config_get(c); - int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE); - - return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c)); -} - -/******************************************************************************* - * Register various hooks - */ -static const char* const mod_ssl[] = { "mod_ssl.c", NULL}; -static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL}; - -void h2_h2_register_hooks(void) -{ - /* Our main processing needs to run quite late. Definitely after mod_ssl, - * as we need its connection filters, but also before reqtimeout as its - * method of timeouts is specific to HTTP/1.1 (as of now). - * The core HTTP/1 processing run as REALLY_LAST, so we will have - * a chance to take over before it. - */ - ap_hook_process_connection(h2_h2_process_conn, - mod_ssl, mod_reqtimeout, APR_HOOK_LAST); - - /* One last chance to properly say goodbye if we have not done so - * already. */ - ap_hook_pre_close_connection(h2_h2_pre_close_conn, NULL, mod_ssl, APR_HOOK_LAST); - - /* With "H2SerializeHeaders On", we install the filter in this hook - * that parses the response. This needs to happen before any other post - * read function terminates the request with an error. Otherwise we will - * never see the response. - */ - ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST); - ap_hook_fixups(h2_h2_late_fixups, NULL, NULL, APR_HOOK_LAST); - - /* special bucket type transfer through a h2_bucket_beam */ - h2_register_bucket_beamer(h2_bucket_headers_beam); - h2_register_bucket_beamer(h2_bucket_observer_beam); -} - -int h2_h2_process_conn(conn_rec* c) -{ - apr_status_t status; - h2_ctx *ctx; - - if (c->master) { - return DECLINED; - } - - ctx = h2_ctx_get(c, 0); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn"); - if (h2_ctx_is_task(ctx)) { - /* our stream pseudo connection */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined"); - return DECLINED; - } - - if (!ctx && c->keepalives == 0) { - const char *proto = ap_get_protocol(c); - - if (APLOGctrace1(c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, " - "new connection using protocol '%s', direct=%d, " - "tls acceptable=%d", proto, h2_allows_h2_direct(c), - h2_is_acceptable_connection(c, 1)); - } - - if (!strcmp(AP_PROTOCOL_HTTP1, proto) - && h2_allows_h2_direct(c) - && h2_is_acceptable_connection(c, 1)) { - /* Fresh connection still is on http/1.1 and H2Direct is enabled. - * Otherwise connection is in a fully acceptable state. - * -> peek at the first 24 incoming bytes - */ - apr_bucket_brigade *temp; - char *s = NULL; - apr_size_t slen; - - temp = apr_brigade_create(c->pool, c->bucket_alloc); - status = ap_get_brigade(c->input_filters, temp, - AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24); - - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054) - "h2_h2, error reading 24 bytes speculative"); - apr_brigade_destroy(temp); - return DECLINED; - } - - apr_brigade_pflatten(temp, &s, &slen, c->pool); - if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_h2, direct mode detected"); - if (!ctx) { - ctx = h2_ctx_get(c, 1); - } - h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c"); - } - else if (APLOGctrace2(c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, - "h2_h2, not detected in %d bytes(base64): %s", - (int)slen, h2_util_base64url_encode(s, slen, c->pool)); - } - - apr_brigade_destroy(temp); - } - } - - if (ctx) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn"); - if (!h2_ctx_session_get(ctx)) { - status = h2_conn_setup(ctx, c, NULL); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup"); - if (status != APR_SUCCESS) { - h2_ctx_clear(c); - return !OK; - } - } - h2_conn_run(ctx, c); - return OK; - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined"); - return DECLINED; -} - -static int h2_h2_pre_close_conn(conn_rec *c) -{ - h2_ctx *ctx; - - /* slave connection? */ - if (c->master) { - return DECLINED; - } - - ctx = h2_ctx_get(c, 0); - if (ctx) { - /* If the session has been closed correctly already, we will not - * find a h2_ctx here. The presence indicates that the session - * is still ongoing. */ - return h2_conn_pre_close(ctx, c); - } - return DECLINED; -} - -static void check_push(request_rec *r, const char *tag) -{ - const h2_config *conf = h2_config_rget(r); - if (!r->expecting_100 - && conf && conf->push_list && conf->push_list->nelts > 0) { - int i, old_status; - const char *old_line; - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "%s, early announcing %d resources for push", - tag, conf->push_list->nelts); - for (i = 0; i < conf->push_list->nelts; ++i) { - h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res); - apr_table_add(r->headers_out, "Link", - apr_psprintf(r->pool, "<%s>; rel=preload%s", - push->uri_ref, push->critical? "; critical" : "")); - } - old_status = r->status; - old_line = r->status_line; - r->status = 103; - r->status_line = "103 Early Hints"; - ap_send_interim_response(r, 1); - r->status = old_status; - r->status_line = old_line; - } -} - -static int h2_h2_post_read_req(request_rec *r) -{ - /* slave connection? */ - if (r->connection->master) { - h2_ctx *ctx = h2_ctx_rget(r); - struct h2_task *task = h2_ctx_get_task(ctx); - /* This hook will get called twice on internal redirects. Take care - * that we manipulate filters only once. */ - if (task && !task->filters_set) { - ap_filter_t *f; - ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, - "h2_task(%s): adding request filters", task->id); - - /* setup the correct filters to process the request for h2 */ - ap_add_input_filter("H2_REQUEST", task, r, r->connection); - - /* replace the core http filter that formats response headers - * in HTTP/1 with our own that collects status and headers */ - ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); - ap_add_output_filter("H2_RESPONSE", task, r, r->connection); - - for (f = r->input_filters; f; f = f->next) { - if (!strcmp("H2_SLAVE_IN", f->frec->name)) { - f->r = r; - break; - } - } - ap_add_output_filter("H2_TRAILERS_OUT", task, r, r->connection); - task->filters_set = 1; - } - } - return DECLINED; -} - -static int h2_h2_late_fixups(request_rec *r) -{ - /* slave connection? */ - if (r->connection->master) { - h2_ctx *ctx = h2_ctx_rget(r); - struct h2_task *task = h2_ctx_get_task(ctx); - if (task) { - /* check if we copy vs. setaside files in this location */ - task->output.copy_files = h2_config_geti(h2_config_rget(r), - H2_CONF_COPY_FILES); - if (task->output.copy_files) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_slave_out(%s): copy_files on", task->id); - h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL); - } - check_push(r, "late_fixup"); - } - } - return DECLINED; -} - diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_h2.h deleted file mode 100644 index 367823d..0000000 --- a/modules/http2/h2_h2.h +++ /dev/null @@ -1,79 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_h2__ -#define __mod_h2__h2_h2__ - -/** - * List of ALPN protocol identifiers that we support in cleartext - * negotiations. NULL terminated. - */ -extern const char *h2_clear_protos[]; - -/** - * List of ALPN protocol identifiers that we support in TLS encrypted - * negotiations. NULL terminated. - */ -extern const char *h2_tls_protos[]; - -/** - * Provide a user readable description of the HTTP/2 error code- - * @param h2_error http/2 error code, as in rfc 7540, ch. 7 - * @return textual description of code or that it is unknown. - */ -const char *h2_h2_err_description(unsigned int h2_error); - -/* - * One time, post config initialization. - */ -apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s); - -/* Is the connection a TLS connection? - */ -int h2_h2_is_tls(conn_rec *c); - -/* Register apache hooks for h2 protocol - */ -void h2_h2_register_hooks(void); - -/** - * Check if the given connection fulfills the requirements as configured. - * @param c the connection - * @param require_all != 0 iff any missing connection properties make - * the test fail. For example, a cipher might not have been selected while - * the handshake is still ongoing. - * @return != 0 iff connection requirements are met - */ -int h2_is_acceptable_connection(conn_rec *c, int require_all); - -/** - * Check if the "direct" HTTP/2 mode of protocol handling is enabled - * for the given connection. - * @param c the connection to check - * @return != 0 iff direct mode is enabled - */ -int h2_allows_h2_direct(conn_rec *c); - -/** - * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled - * for the given connection. - * @param c the connection to check - * @return != 0 iff Upgrade switching is enabled - */ -int h2_allows_h2_upgrade(conn_rec *c); - - -#endif /* defined(__mod_h2__h2_h2__) */ diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c index 8b7add6..d9b3fd0 100644 --- a/modules/http2/h2_headers.c +++ b/modules/http2/h2_headers.c @@ -27,11 +27,13 @@ #include #include "h2_private.h" -#include "h2_h2.h" +#include "h2_protocol.h" +#include "h2_config.h" #include "h2_util.h" #include "h2_request.h" #include "h2_headers.h" +#if !AP_HAS_RESPONSE_BUCKETS static int is_unsafe(server_rec *s) { @@ -63,6 +65,7 @@ apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r) b = apr_bucket_shared_make(b, br, 0, 0); b->type = &h2_bucket_type_headers; + b->length = 0; return b; } @@ -87,32 +90,35 @@ h2_headers *h2_bucket_headers_get(apr_bucket *b) return NULL; } +static void bucket_destroy(void *data) +{ + h2_bucket_headers *h = data; + + if (apr_bucket_shared_destroy(h)) { + apr_bucket_free(h); + } +} + const apr_bucket_type_t h2_bucket_type_headers = { "H2HEADERS", 5, APR_BUCKET_METADATA, - apr_bucket_destroy_noop, + bucket_destroy, bucket_read, apr_bucket_setaside_noop, apr_bucket_split_notimpl, apr_bucket_shared_copy }; -apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam, - apr_bucket_brigade *dest, - const apr_bucket *src) +apr_bucket *h2_bucket_headers_clone(apr_bucket *b, apr_pool_t *pool, + apr_bucket_alloc_t *list) { - if (H2_BUCKET_IS_HEADERS(src)) { - h2_headers *r = ((h2_bucket_headers *)src->data)->headers; - apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r); - APR_BRIGADE_INSERT_TAIL(dest, b); - return b; - } - return NULL; + h2_headers *hdrs = ((h2_bucket_headers *)b->data)->headers; + return h2_bucket_headers_create(list, h2_headers_clone(pool, hdrs)); } -h2_headers *h2_headers_create(int status, apr_table_t *headers_in, - apr_table_t *notes, apr_off_t raw_bytes, - apr_pool_t *pool) +h2_headers *h2_headers_create(int status, const apr_table_t *headers_in, + const apr_table_t *notes, apr_off_t raw_bytes, + apr_pool_t *pool) { h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers)); headers->status = status; @@ -123,26 +129,55 @@ h2_headers *h2_headers_create(int status, apr_table_t *headers_in, return headers; } +static int add_header_lengths(void *ctx, const char *name, const char *value) +{ + apr_size_t *plen = ctx; + *plen += strlen(name) + strlen(value); + return 1; +} + +apr_size_t h2_headers_length(h2_headers *headers) +{ + apr_size_t len = 0; + apr_table_do(add_header_lengths, &len, headers->headers, NULL); + return len; +} + +apr_size_t h2_bucket_headers_headers_length(apr_bucket *b) +{ + h2_headers *h = h2_bucket_headers_get(b); + return h? h2_headers_length(h) : 0; +} + h2_headers *h2_headers_rcreate(request_rec *r, int status, - apr_table_t *header, apr_pool_t *pool) + const apr_table_t *header, apr_pool_t *pool) { h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, headers->status, r, + "h2_headers_rcreate(%ld): status=%d", + (long)r->connection->id, status); if (headers->status == HTTP_FORBIDDEN) { - const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden"); - if (cause) { - /* This request triggered a TLS renegotiation that is now allowed - * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. - */ - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r, - APLOGNO(03061) - "h2_headers(%ld): renegotiate forbidden, cause: %s", - (long)r->connection->id, cause); - headers->status = H2_ERR_HTTP_1_1_REQUIRED; + request_rec *r_prev; + for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) { + const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden"); + if (cause) { + /* This request triggered a TLS renegotiation that is not allowed + * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. + */ + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r, + APLOGNO(10399) + "h2_headers(%ld): renegotiate forbidden, cause: %s", + (long)r->connection->id, cause); + headers->status = H2_ERR_HTTP_1_1_REQUIRED; + break; + } } } if (is_unsafe(r->server)) { - apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, - H2_HDR_CONFORMANCE_UNSAFE); + apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE); + } + if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) { + apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0"); } return headers; } @@ -152,6 +187,11 @@ h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h) return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool); } +h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h) +{ + return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool); +} + h2_headers *h2_headers_die(apr_status_t type, const h2_request *req, apr_pool_t *pool) { @@ -171,8 +211,9 @@ h2_headers *h2_headers_die(apr_status_t type, return headers; } -int h2_headers_are_response(h2_headers *headers) +int h2_headers_are_final_response(h2_headers *headers) { return headers->status >= 200; } +#endif /* !AP_HAS_RESPONSE_BUCKETS */ diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h index 840e8c4..3d78dc3 100644 --- a/modules/http2/h2_headers.h +++ b/modules/http2/h2_headers.h @@ -19,8 +19,19 @@ #include "h2.h" +#if !AP_HAS_RESPONSE_BUCKETS + struct h2_bucket_beam; +typedef struct h2_headers h2_headers; +struct h2_headers { + int status; + apr_table_t *headers; + apr_table_t *notes; + apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */ +}; + + extern const apr_bucket_type_t h2_bucket_type_headers; #define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers) @@ -32,10 +43,6 @@ apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list, h2_headers *h2_bucket_headers_get(apr_bucket *b); -apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam, - apr_bucket_brigade *dest, - const apr_bucket *src); - /** * Create the headers from the given status and headers * @param status the headers status @@ -44,8 +51,8 @@ apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam, * @param raw_bytes the raw network bytes (if known) used to transmit these * @param pool the memory pool to use */ -h2_headers *h2_headers_create(int status, apr_table_t *header, - apr_table_t *notes, apr_off_t raw_bytes, +h2_headers *h2_headers_create(int status, const apr_table_t *header, + const apr_table_t *notes, apr_off_t raw_bytes, apr_pool_t *pool); /** @@ -56,24 +63,45 @@ h2_headers *h2_headers_create(int status, apr_table_t *header, * @param pool the memory pool to use */ h2_headers *h2_headers_rcreate(request_rec *r, int status, - apr_table_t *header, apr_pool_t *pool); + const apr_table_t *header, apr_pool_t *pool); /** - * Clone the headers into another pool. This will not copy any + * Copy the headers into another pool. This will not copy any * header strings. */ h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h); +/** + * Clone the headers into another pool. This will also clone any + * header strings. + */ +h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h); + /** * Create the headers for the given error. - * @param stream_id id of the stream to create the headers for * @param type the error code * @param req the original h2_request * @param pool the memory pool to use */ h2_headers *h2_headers_die(apr_status_t type, - const struct h2_request *req, apr_pool_t *pool); + const struct h2_request *req, apr_pool_t *pool); + +int h2_headers_are_final_response(h2_headers *headers); + +/** + * Give the number of bytes of all contained header strings. + */ +apr_size_t h2_headers_length(h2_headers *headers); + +/** + * For H2HEADER buckets, return the length of all contained header strings. + * For all other buckets, return 0. + */ +apr_size_t h2_bucket_headers_headers_length(apr_bucket *b); + +apr_bucket *h2_bucket_headers_clone(apr_bucket *b, apr_pool_t *pool, + apr_bucket_alloc_t *list); -int h2_headers_are_response(h2_headers *headers); +#endif /* !AP_HAS_RESPONSE_BUCKETS */ #endif /* defined(__mod_h2__h2_headers__) */ diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c index 0fae117..2aeea42 100644 --- a/modules/http2/h2_mplx.c +++ b/modules/http2/h2_mplx.c @@ -26,7 +26,9 @@ #include #include +#include #include +#include #include @@ -36,15 +38,14 @@ #include "h2_private.h" #include "h2_bucket_beam.h" #include "h2_config.h" -#include "h2_conn.h" -#include "h2_ctx.h" -#include "h2_h2.h" +#include "h2_c1.h" +#include "h2_conn_ctx.h" +#include "h2_protocol.h" #include "h2_mplx.h" -#include "h2_ngn_shed.h" #include "h2_request.h" #include "h2_stream.h" #include "h2_session.h" -#include "h2_task.h" +#include "h2_c2.h" #include "h2_workers.h" #include "h2_util.h" @@ -54,16 +55,40 @@ typedef struct { h2_mplx *m; h2_stream *stream; apr_time_t now; + apr_size_t count; } stream_iter_ctx; -apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) +static conn_rec *c2_prod_next(void *baton, int *phas_more); +static void c2_prod_done(void *baton, conn_rec *c2); +static void workers_shutdown(void *baton, int graceful); + +static void s_mplx_be_happy(h2_mplx *m, conn_rec *c, h2_conn_ctx_t *conn_ctx); +static void m_be_annoyed(h2_mplx *m); + +static apr_status_t mplx_pollset_create(h2_mplx *m); +static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout, + stream_ev_callback *on_stream_input, + stream_ev_callback *on_stream_output, + void *on_ctx); + +static apr_pool_t *pchild; + +/* APR callback invoked if allocation fails. */ +static int abort_on_oom(int retcode) { + ap_abort_on_oom(); + return retcode; /* unreachable, hopefully. */ +} + +apr_status_t h2_mplx_c1_child_init(apr_pool_t *pool, server_rec *s) +{ + pchild = pool; return APR_SUCCESS; } #define H2_MPLX_ENTER(m) \ - do { apr_status_t rv; if ((rv = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\ - return rv;\ + do { apr_status_t rv_lock; if ((rv_lock = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\ + return rv_lock;\ } } while(0) #define H2_MPLX_LEAVE(m) \ @@ -72,71 +97,150 @@ apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) #define H2_MPLX_ENTER_ALWAYS(m) \ apr_thread_mutex_lock(m->lock) -#define H2_MPLX_ENTER_MAYBE(m, lock) \ - if (lock) apr_thread_mutex_lock(m->lock) - -#define H2_MPLX_LEAVE_MAYBE(m, lock) \ - if (lock) apr_thread_mutex_unlock(m->lock) +#define H2_MPLX_ENTER_MAYBE(m, dolock) \ + if (dolock) apr_thread_mutex_lock(m->lock) -static void check_data_for(h2_mplx *m, h2_stream *stream, int lock); +#define H2_MPLX_LEAVE_MAYBE(m, dolock) \ + if (dolock) apr_thread_mutex_unlock(m->lock) -static void stream_output_consumed(void *ctx, - h2_bucket_beam *beam, apr_off_t length) +static void c1_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length) { - h2_stream *stream = ctx; - h2_task *task = stream->task; - - if (length > 0 && task && task->assigned) { - h2_req_engine_out_consumed(task->assigned, task->c, length); - } + h2_stream_in_consumed(ctx, length); } -static void stream_input_ev(void *ctx, h2_bucket_beam *beam) +static int stream_is_running(h2_stream *stream) { - h2_stream *stream = ctx; - h2_mplx *m = stream->session->mplx; - apr_atomic_set32(&m->event_pending, 1); + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2); + return conn_ctx && apr_atomic_read32(&conn_ctx->started) != 0 + && apr_atomic_read32(&conn_ctx->done) == 0; } -static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length) +int h2_mplx_c1_stream_is_running(h2_mplx *m, h2_stream *stream) { - h2_stream_in_consumed(ctx, length); + int rv; + + H2_MPLX_ENTER(m); + rv = stream_is_running(stream); + H2_MPLX_LEAVE(m); + return rv; } -static void stream_joined(h2_mplx *m, h2_stream *stream) +static void c1c2_stream_joined(h2_mplx *m, h2_stream *stream) { - ap_assert(!stream->task || stream->task->worker_done); + ap_assert(!stream_is_running(stream)); h2_ihash_remove(m->shold, stream->id); - h2_ihash_add(m->spurge, stream); + APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream; } -static void stream_cleanup(h2_mplx *m, h2_stream *stream) +static void m_stream_cleanup(h2_mplx *m, h2_stream *stream) { - ap_assert(stream->state == H2_SS_CLEANUP); + h2_conn_ctx_t *c2_ctx = h2_conn_ctx_get(stream->c2); - if (stream->input) { - h2_beam_on_consumed(stream->input, NULL, NULL, NULL); - h2_beam_abort(stream->input); - } - if (stream->output) { - h2_beam_on_produced(stream->output, NULL, NULL); - h2_beam_leave(stream->output); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_STRM_MSG(stream, "cleanup, unsubscribing from beam events")); + if (c2_ctx) { + if (c2_ctx->beam_out) { + h2_beam_on_was_empty(c2_ctx->beam_out, NULL, NULL); + } + if (c2_ctx->beam_in) { + h2_beam_on_send(c2_ctx->beam_in, NULL, NULL); + h2_beam_on_received(c2_ctx->beam_in, NULL, NULL); + h2_beam_on_eagain(c2_ctx->beam_in, NULL, NULL); + h2_beam_on_consumed(c2_ctx->beam_in, NULL, NULL); + } } - - h2_stream_cleanup(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_STRM_MSG(stream, "cleanup, removing from registries")); + ap_assert(stream->state == H2_SS_CLEANUP); + h2_stream_cleanup(stream); h2_ihash_remove(m->streams, stream->id); h2_iq_remove(m->q, stream->id); - h2_ififo_remove(m->readyq, stream->id); - h2_ihash_add(m->shold, stream); - - if (!stream->task || stream->task->worker_done) { - stream_joined(m, stream); + + if (c2_ctx) { + if (!stream_is_running(stream)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_STRM_MSG(stream, "cleanup, c2 is done, move to spurge")); + /* processing has finished */ + APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_STRM_MSG(stream, "cleanup, c2 is running, abort")); + /* c2 is still running */ + h2_c2_abort(stream->c2, m->c1); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_STRM_MSG(stream, "cleanup, c2 is done, move to shold")); + h2_ihash_add(m->shold, stream); + } } - else if (stream->task) { - stream->task->c->aborted = 1; - apr_thread_cond_broadcast(m->task_thawed); + else { + /* never started */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_STRM_MSG(stream, "cleanup, never started, move to spurge")); + APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream; + } +} + +static h2_c2_transit *c2_transit_create(h2_mplx *m) +{ + apr_allocator_t *allocator; + apr_pool_t *ptrans; + h2_c2_transit *transit; + apr_status_t rv; + + /* We create a pool with its own allocator to be used for + * processing a request. This is the only way to have the processing + * independent of its parent pool in the sense that it can work in + * another thread. + */ + + rv = apr_allocator_create(&allocator); + if (rv == APR_SUCCESS) { + apr_allocator_max_free_set(allocator, ap_max_mem_free); + rv = apr_pool_create_ex(&ptrans, m->pool, NULL, allocator); + } + if (rv != APR_SUCCESS) { + /* maybe the log goes through, maybe not. */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1, + APLOGNO(10004) "h2_mplx: create transit pool"); + ap_abort_on_oom(); + return NULL; /* should never be reached. */ + } + + apr_allocator_owner_set(allocator, ptrans); + apr_pool_abort_set(abort_on_oom, ptrans); + apr_pool_tag(ptrans, "h2_c2_transit"); + + transit = apr_pcalloc(ptrans, sizeof(*transit)); + transit->pool = ptrans; + transit->bucket_alloc = apr_bucket_alloc_create(ptrans); + return transit; +} + +static void c2_transit_destroy(h2_c2_transit *transit) +{ + apr_pool_destroy(transit->pool); +} + +static h2_c2_transit *c2_transit_get(h2_mplx *m) +{ + h2_c2_transit **ptransit = apr_array_pop(m->c2_transits); + if (ptransit) { + return *ptransit; + } + return c2_transit_create(m); +} + +static void c2_transit_recycle(h2_mplx *m, h2_c2_transit *transit) +{ + if (m->c2_transits->nelts >= APR_INT32_MAX || + (apr_uint32_t)m->c2_transits->nelts >= m->max_spare_transits) { + c2_transit_destroy(transit); + } + else { + APR_ARRAY_PUSH(m->c2_transits, h2_c2_transit*) = transit; } } @@ -151,208 +255,118 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream) * their HTTP/1 cousins, the separate allocator seems to work better * than protecting a shared h2_session one with an own lock. */ -h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, - const h2_config *conf, - h2_workers *workers) +h2_mplx *h2_mplx_c1_create(int child_num, apr_uint32_t id, h2_stream *stream0, + server_rec *s, apr_pool_t *parent, + h2_workers *workers) { + h2_conn_ctx_t *conn_ctx; apr_status_t status = APR_SUCCESS; apr_allocator_t *allocator; - apr_thread_mutex_t *mutex; - h2_mplx *m; - h2_ctx *ctx = h2_ctx_get(c, 0); - ap_assert(conf); + apr_thread_mutex_t *mutex = NULL; + h2_mplx *m = NULL; m = apr_pcalloc(parent, sizeof(h2_mplx)); - if (m) { - m->id = c->id; - m->c = c; - m->s = (ctx? h2_ctx_server_get(ctx) : NULL); - if (!m->s) { - m->s = c->base_server; - } - - /* We create a pool with its own allocator to be used for - * processing slave connections. This is the only way to have the - * processing independant of its parent pool in the sense that it - * can work in another thread. Also, the new allocator needs its own - * mutex to synchronize sub-pools. - */ - status = apr_allocator_create(&allocator); - if (status != APR_SUCCESS) { - return NULL; - } - apr_allocator_max_free_set(allocator, ap_max_mem_free); - apr_pool_create_ex(&m->pool, parent, NULL, allocator); - if (!m->pool) { - apr_allocator_destroy(allocator); - return NULL; - } - apr_pool_tag(m->pool, "h2_mplx"); - apr_allocator_owner_set(allocator, m->pool); - status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, - m->pool); - if (status != APR_SUCCESS) { - apr_pool_destroy(m->pool); - return NULL; - } - apr_allocator_mutex_set(allocator, mutex); - - status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, - m->pool); - if (status != APR_SUCCESS) { - apr_pool_destroy(m->pool); - return NULL; - } - - status = apr_thread_cond_create(&m->task_thawed, m->pool); - if (status != APR_SUCCESS) { - apr_pool_destroy(m->pool); - return NULL; - } - - m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS); - m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); - - m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id)); - m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id)); - m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id)); - m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id)); - m->q = h2_iq_create(m->pool, m->max_streams); - - status = h2_ififo_set_create(&m->readyq, m->pool, m->max_streams); - if (status != APR_SUCCESS) { - apr_pool_destroy(m->pool); - return NULL; - } + m->stream0 = stream0; + m->c1 = stream0->c2; + m->s = s; + m->child_num = child_num; + m->id = id; + + /* We create a pool with its own allocator to be used for + * processing secondary connections. This is the only way to have the + * processing independent of its parent pool in the sense that it + * can work in another thread. Also, the new allocator needs its own + * mutex to synchronize sub-pools. + */ + status = apr_allocator_create(&allocator); + if (status != APR_SUCCESS) { + allocator = NULL; + goto failure; + } + + apr_allocator_max_free_set(allocator, ap_max_mem_free); + apr_pool_create_ex(&m->pool, parent, NULL, allocator); + if (!m->pool) goto failure; + + apr_pool_tag(m->pool, "h2_mplx"); + apr_allocator_owner_set(allocator, m->pool); + + status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, + m->pool); + if (APR_SUCCESS != status) goto failure; + apr_allocator_mutex_set(allocator, mutex); + + status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, + m->pool); + if (APR_SUCCESS != status) goto failure; + + m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS); + m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM); + + m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->spurge = apr_array_make(m->pool, 10, sizeof(h2_stream*)); + m->q = h2_iq_create(m->pool, m->max_streams); + + m->workers = workers; + m->processing_max = H2MIN(h2_workers_get_max_workers(workers), m->max_streams); + m->processing_limit = 6; /* the original h1 max parallel connections */ + m->last_mood_change = apr_time_now(); + m->mood_update_interval = apr_time_from_msec(100); + + status = mplx_pollset_create(m); + if (APR_SUCCESS != status) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c1, APLOGNO(10308) + "nghttp2: could not create pollset"); + goto failure; + } + m->streams_ev_in = apr_array_make(m->pool, 10, sizeof(h2_stream*)); + m->streams_ev_out = apr_array_make(m->pool, 10, sizeof(h2_stream*)); + + m->streams_input_read = h2_iq_create(m->pool, 10); + m->streams_output_written = h2_iq_create(m->pool, 10); + status = apr_thread_mutex_create(&m->poll_lock, APR_THREAD_MUTEX_DEFAULT, + m->pool); + if (APR_SUCCESS != status) goto failure; + + conn_ctx = h2_conn_ctx_get(m->c1); + if (conn_ctx->pfd.reqevents) { + apr_pollset_add(m->pollset, &conn_ctx->pfd); + } + + m->max_spare_transits = 3; + m->c2_transits = apr_array_make(m->pool, (int)m->max_spare_transits, + sizeof(h2_c2_transit*)); + + m->producer = h2_workers_register(workers, m->pool, + apr_psprintf(m->pool, "h2-%u", + (unsigned int)m->id), + c2_prod_next, c2_prod_done, + workers_shutdown, m); + return m; - m->workers = workers; - m->max_active = workers->max_workers; - m->limit_active = 6; /* the original h1 max parallel connections */ - m->last_limit_change = m->last_idle_block = apr_time_now(); - m->limit_change_interval = apr_time_from_msec(100); - - m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); - - m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, - m->stream_max_mem); - h2_ngn_shed_set_ctx(m->ngn_shed , m); +failure: + if (m->pool) { + apr_pool_destroy(m->pool); } - return m; + else if (allocator) { + apr_allocator_destroy(allocator); + } + return NULL; } -int h2_mplx_shutdown(h2_mplx *m) +int h2_mplx_c1_shutdown(h2_mplx *m) { - int max_stream_started = 0; + int max_stream_id_started = 0; H2_MPLX_ENTER(m); - max_stream_started = m->max_stream_started; + max_stream_id_started = m->max_stream_id_started; /* Clear schedule queue, disabling existing streams from starting */ h2_iq_clear(m->q); H2_MPLX_LEAVE(m); - return max_stream_started; -} - -static int input_consumed_signal(h2_mplx *m, h2_stream *stream) -{ - if (stream->input) { - return h2_beam_report_consumption(stream->input); - } - return 0; -} - -static int report_consumption_iter(void *ctx, void *val) -{ - h2_stream *stream = val; - h2_mplx *m = ctx; - - input_consumed_signal(m, stream); - if (stream->state == H2_SS_CLOSED_L - && (!stream->task || stream->task->worker_done)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, - H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing")); - nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE, - stream->id, NGHTTP2_NO_ERROR); - } - return 1; -} - -static int output_consumed_signal(h2_mplx *m, h2_task *task) -{ - if (task->output.beam) { - return h2_beam_report_consumption(task->output.beam); - } - return 0; -} - -static int stream_destroy_iter(void *ctx, void *val) -{ - h2_mplx *m = ctx; - h2_stream *stream = val; - - h2_ihash_remove(m->spurge, stream->id); - ap_assert(stream->state == H2_SS_CLEANUP); - - if (stream->input) { - /* Process outstanding events before destruction */ - input_consumed_signal(m, stream); - h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy"); - h2_beam_destroy(stream->input); - stream->input = NULL; - } - - if (stream->task) { - h2_task *task = stream->task; - conn_rec *slave; - int reuse_slave = 0; - - stream->task = NULL; - slave = task->c; - if (slave) { - /* On non-serialized requests, the IO logging has not accounted for any - * meta data send over the network: response headers and h2 frame headers. we - * counted this on the stream and need to add this now. - * This is supposed to happen before the EOR bucket triggers the - * logging of the transaction. *fingers crossed* */ - if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) { - apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets; - if (unaccounted > 0) { - h2_task_logio_add_bytes_out(slave, unaccounted); - } - } - - if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) { - reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2)) - && !task->rst_error); - } - - if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) { - h2_beam_log(task->output.beam, m->c, APLOG_DEBUG, - APLOGNO(03385) "h2_task_destroy, reuse slave"); - h2_task_destroy(task); - APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave; - } - else { - h2_beam_log(task->output.beam, m->c, APLOG_TRACE1, - "h2_task_destroy, destroy slave"); - h2_slave_destroy(slave); - } - } - } - h2_stream_destroy(stream); - return 0; -} - -static void purge_streams(h2_mplx *m, int lock) -{ - if (!h2_ihash_empty(m->spurge)) { - H2_MPLX_ENTER_MAYBE(m, lock); - while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) { - /* repeat until empty */ - } - H2_MPLX_LEAVE_MAYBE(m, lock); - } + return max_stream_id_started; } typedef struct { @@ -360,13 +374,13 @@ typedef struct { void *ctx; } stream_iter_ctx_t; -static int stream_iter_wrap(void *ctx, void *stream) +static int m_stream_iter_wrap(void *ctx, void *stream) { stream_iter_ctx_t *x = ctx; return x->cb(stream, x->ctx); } -apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) +apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) { stream_iter_ctx_t x; @@ -374,276 +388,260 @@ apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) x.cb = cb; x.ctx = ctx; - h2_ihash_iter(m->streams, stream_iter_wrap, &x); + h2_ihash_iter(m->streams, m_stream_iter_wrap, &x); H2_MPLX_LEAVE(m); return APR_SUCCESS; } -static int report_stream_iter(void *ctx, void *val) { +typedef struct { + int stream_count; + int stream_want_send; +} stream_iter_aws_t; + +static int m_stream_want_send_data(void *ctx, void *stream) +{ + stream_iter_aws_t *x = ctx; + ++x->stream_count; + if (h2_stream_wants_send_data(stream)) + ++x->stream_want_send; + return 1; +} + +int h2_mplx_c1_all_streams_want_send_data(h2_mplx *m) +{ + stream_iter_aws_t x; + x.stream_count = 0; + x.stream_want_send = 0; + H2_MPLX_ENTER(m); + h2_ihash_iter(m->streams, m_stream_want_send_data, &x); + H2_MPLX_LEAVE(m); + return x.stream_count && (x.stream_count == x.stream_want_send); +} + +static int m_report_stream_iter(void *ctx, void *val) { h2_mplx *m = ctx; h2_stream *stream = val; - h2_task *task = stream->task; - if (APLOGctrace1(m->c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"), - !!stream->task, stream->scheduled, h2_stream_is_ready(stream), - (long)h2_beam_get_buffered(stream->output)); - } - if (task) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2); + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, + H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"), + !!stream->c2, stream->scheduled, h2_stream_is_ready(stream), + (long)(stream->output? h2_beam_get_buffered(stream->output) : -1)); + if (conn_ctx) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */ H2_STRM_MSG(stream, "->03198: %s %s %s" - "[started=%d/done=%d/frozen=%d]"), - task->request->method, task->request->authority, - task->request->path, task->worker_started, - task->worker_done, task->frozen); + "[started=%u/done=%u]"), + conn_ctx->request->method, conn_ctx->request->authority, + conn_ctx->request->path, + apr_atomic_read32(&conn_ctx->started), + apr_atomic_read32(&conn_ctx->done)); } else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */ - H2_STRM_MSG(stream, "->03198: no task")); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */ + H2_STRM_MSG(stream, "->03198: not started")); } return 1; } -static int unexpected_stream_iter(void *ctx, void *val) { +static int m_unexpected_stream_iter(void *ctx, void *val) { h2_mplx *m = ctx; h2_stream *stream = val; - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, /* NO APLOGNO */ H2_STRM_MSG(stream, "unexpected, started=%d, scheduled=%d, ready=%d"), - !!stream->task, stream->scheduled, h2_stream_is_ready(stream)); + !!stream->c2, stream->scheduled, h2_stream_is_ready(stream)); return 1; } -static int stream_cancel_iter(void *ctx, void *val) { +static int m_stream_cancel_iter(void *ctx, void *val) { h2_mplx *m = ctx; h2_stream *stream = val; - /* disabled input consumed reporting */ - if (stream->input) { - h2_beam_on_consumed(stream->input, NULL, NULL, NULL); - } /* take over event monitoring */ h2_stream_set_monitor(stream, NULL); /* Reset, should transit to CLOSED state */ h2_stream_rst(stream, H2_ERR_NO_ERROR); /* All connection data has been sent, simulate cleanup */ h2_stream_dispatch(stream, H2_SEV_EOS_SENT); - stream_cleanup(m, stream); + m_stream_cleanup(m, stream); return 0; } -void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) +static void c1_purge_streams(h2_mplx *m); + +void h2_mplx_c1_destroy(h2_mplx *m) { apr_status_t status; - int i, wait_secs = 60; + unsigned int i, wait_secs = 60; + int old_aborted; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_MPLX_MSG(m, "start release")); /* How to shut down a h2 connection: - * 0. abort and tell the workers that no more tasks will come from us */ - m->aborted = 1; - h2_workers_unregister(m->workers, m); - + * 0. abort and tell the workers that no more work will come from us */ + m->shutdown = m->aborted = 1; + H2_MPLX_ENTER_ALWAYS(m); + /* While really terminating any c2 connections, treat the master + * connection as aborted. It's not as if we could send any more data + * at this point. */ + old_aborted = m->c1->aborted; + m->c1->aborted = 1; + /* How to shut down a h2 connection: * 1. cancel all streams still active */ - while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_MPLX_MSG(m, "release, %u/%u/%d streams (total/hold/purge), %d streams"), + h2_ihash_count(m->streams), + h2_ihash_count(m->shold), + m->spurge->nelts, m->processing_count); + while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) { /* until empty */ } - /* 2. terminate ngn_shed, no more streams - * should be scheduled or in the active set */ - h2_ngn_shed_abort(m->ngn_shed); + /* 2. no more streams should be scheduled or in the active set */ ap_assert(h2_ihash_empty(m->streams)); ap_assert(h2_iq_empty(m->q)); /* 3. while workers are busy on this connection, meaning they - * are processing tasks from this connection, wait on them finishing + * are processing streams from this connection, wait on them finishing * in order to wake us and let us check again. - * Eventually, this has to succeed. */ - m->join_wait = wait; - for (i = 0; h2_ihash_count(m->shold) > 0; ++i) { - status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs)); + * Eventually, this has to succeed. */ + if (!m->join_wait) { + apr_thread_cond_create(&m->join_wait, m->pool); + } + + for (i = 0; h2_ihash_count(m->shold) > 0; ++i) { + status = apr_thread_cond_timedwait(m->join_wait, m->lock, apr_time_from_sec(wait_secs)); if (APR_STATUS_IS_TIMEUP(status)) { /* This can happen if we have very long running requests * that do not time out on IO. */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198) - "h2_mplx(%ld): waited %d sec for %d tasks", - m->id, i*wait_secs, (int)h2_ihash_count(m->shold)); - h2_ihash_iter(m->shold, report_stream_iter, m); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, APLOGNO(03198) + H2_MPLX_MSG(m, "waited %u sec for %u streams"), + i*wait_secs, h2_ihash_count(m->shold)); + h2_ihash_iter(m->shold, m_report_stream_iter, m); } } - ap_assert(m->tasks_active == 0); - m->join_wait = NULL; - - /* 4. close the h2_req_enginge shed */ - h2_ngn_shed_destroy(m->ngn_shed); - m->ngn_shed = NULL; - + + H2_MPLX_LEAVE(m); + h2_workers_join(m->workers, m->producer); + H2_MPLX_ENTER_ALWAYS(m); + /* 4. With all workers done, all streams should be in spurge */ + ap_assert(m->processing_count == 0); if (!h2_ihash_empty(m->shold)) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516) - "h2_mplx(%ld): unexpected %d streams in hold", - m->id, (int)h2_ihash_count(m->shold)); - h2_ihash_iter(m->shold, unexpected_stream_iter, m); + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, APLOGNO(03516) + H2_MPLX_MSG(m, "unexpected %u streams in hold"), + h2_ihash_count(m->shold)); + h2_ihash_iter(m->shold, m_unexpected_stream_iter, m); } - + + c1_purge_streams(m); + + m->c1->aborted = old_aborted; H2_MPLX_LEAVE(m); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): released", m->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_MPLX_MSG(m, "released")); } -apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream) +apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, h2_stream *stream, + unsigned int *pstream_count) { H2_MPLX_ENTER(m); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, H2_STRM_MSG(stream, "cleanup")); - stream_cleanup(m, stream); - + m_stream_cleanup(m, stream); + *pstream_count = h2_ihash_count(m->streams); H2_MPLX_LEAVE(m); return APR_SUCCESS; } -h2_stream *h2_mplx_stream_get(h2_mplx *m, int id) +const h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id) { h2_stream *s = NULL; H2_MPLX_ENTER_ALWAYS(m); - - s = h2_ihash_get(m->streams, id); - + s = h2_ihash_get(m->streams, stream_id); H2_MPLX_LEAVE(m); + return s; } -static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes) -{ - h2_stream *stream = ctx; - h2_mplx *m = stream->session->mplx; - - check_data_for(m, stream, 1); -} -static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) +static void c1_purge_streams(h2_mplx *m) { - apr_status_t status = APR_SUCCESS; - h2_stream *stream = h2_ihash_get(m->streams, stream_id); - - if (!stream || !stream->task || m->aborted) { - return APR_ECONNABORTED; - } - - ap_assert(stream->output == NULL); - stream->output = beam; - - if (APLOGctrace2(m->c)) { - h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open"); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, - "h2_mplx(%s): out open", stream->task->id); - } - - h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream); - h2_beam_on_produced(stream->output, output_produced, stream); - if (stream->task->output.copy_files) { - h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL); - } - - /* we might see some file buckets in the output, see - * if we have enough handles reserved. */ - check_data_for(m, stream, 0); - return status; -} + h2_stream *stream; + int i; -apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) -{ - apr_status_t status; - - H2_MPLX_ENTER(m); + for (i = 0; i < m->spurge->nelts; ++i) { + stream = APR_ARRAY_IDX(m->spurge, i, h2_stream*); + ap_assert(stream->state == H2_SS_CLEANUP); - if (m->aborted) { - status = APR_ECONNABORTED; - } - else { - status = out_open(m, stream_id, beam); + if (stream->input) { + h2_beam_destroy(stream->input, m->c1); + stream->input = NULL; + } + if (stream->c2) { + conn_rec *c2 = stream->c2; + h2_conn_ctx_t *c2_ctx = h2_conn_ctx_get(c2); + h2_c2_transit *transit; + + stream->c2 = NULL; + ap_assert(c2_ctx); + transit = c2_ctx->transit; + h2_c2_destroy(c2); /* c2_ctx is gone as well */ + if (transit) { + c2_transit_recycle(m, transit); + } + } + h2_stream_destroy(stream); } - - H2_MPLX_LEAVE(m); - return status; + apr_array_clear(m->spurge); } -static apr_status_t out_close(h2_mplx *m, h2_task *task) +void h2_mplx_c1_going_keepalive(h2_mplx *m) { - apr_status_t status = APR_SUCCESS; - h2_stream *stream; - - if (!task) { - return APR_ECONNABORTED; - } - if (task->c) { - ++task->c->keepalives; - } - - stream = h2_ihash_get(m->streams, task->stream_id); - if (!stream) { - return APR_ECONNABORTED; + H2_MPLX_ENTER_ALWAYS(m); + if (m->spurge->nelts) { + c1_purge_streams(m); } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, - "h2_mplx(%s): close", task->id); - status = h2_beam_close(task->output.beam); - h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close"); - output_consumed_signal(m, task); - check_data_for(m, stream, 0); - return status; + H2_MPLX_LEAVE(m); } -apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, - apr_thread_cond_t *iowait) +apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout, + stream_ev_callback *on_stream_input, + stream_ev_callback *on_stream_output, + void *on_ctx) { - apr_status_t status; - + apr_status_t rv; + H2_MPLX_ENTER(m); if (m->aborted) { - status = APR_ECONNABORTED; - } - else if (h2_mplx_has_master_events(m)) { - status = APR_SUCCESS; - } - else { - purge_streams(m, 0); - h2_ihash_iter(m->streams, report_consumption_iter, m); - m->added_output = iowait; - status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout); - if (APLOGctrace2(m->c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - "h2_mplx(%ld): trywait on data for %f ms)", - m->id, timeout/1000.0); - } - m->added_output = NULL; + rv = APR_ECONNABORTED; + goto cleanup; + } + /* Purge (destroy) streams outside of pollset processing. + * Streams that are registered in the pollset, will be removed + * when they are destroyed, but the pollset works on copies + * of these registrations. So, if we destroy streams while + * processing pollset events, we might access freed memory. + */ + if (m->spurge->nelts) { + c1_purge_streams(m); } + rv = mplx_pollset_poll(m, timeout, on_stream_input, on_stream_output, on_ctx); +cleanup: H2_MPLX_LEAVE(m); - return status; -} - -static void check_data_for(h2_mplx *m, h2_stream *stream, int lock) -{ - if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) { - apr_atomic_set32(&m->event_pending, 1); - H2_MPLX_ENTER_MAYBE(m, lock); - if (m->added_output) { - apr_thread_cond_signal(m->added_output); - } - H2_MPLX_LEAVE_MAYBE(m, lock); - } + return rv; } -apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) +apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp, + h2_session *session) { apr_status_t status; @@ -653,9 +651,9 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) status = APR_ECONNABORTED; } else { - h2_iq_sort(m->q, cmp, ctx); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): reprioritize tasks", m->id); + h2_iq_sort(m->q, cmp, session); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_MPLX_MSG(m, "reprioritize streams")); status = APR_SUCCESS; } @@ -663,560 +661,435 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) return status; } -static void register_if_needed(h2_mplx *m) -{ - if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) { - apr_status_t status = h2_workers_register(m->workers, m); - if (status == APR_SUCCESS) { - m->is_registered = 1; - } - else { - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021) - "h2_mplx(%ld): register at workers", m->id); - } - } -} - -apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, - h2_stream_pri_cmp *cmp, void *ctx) +static apr_status_t c1_process_stream(h2_mplx *m, + h2_stream *stream, + h2_stream_pri_cmp_fn *cmp, + h2_session *session) { - apr_status_t status; - - H2_MPLX_ENTER(m); + apr_status_t rv = APR_SUCCESS; if (m->aborted) { - status = APR_ECONNABORTED; + rv = APR_ECONNABORTED; + goto cleanup; + } + if (!stream->request) { + rv = APR_EINVAL; + goto cleanup; + } + if (APLOGctrace1(m->c1)) { + const h2_request *r = stream->request; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_STRM_MSG(stream, "process %s%s%s %s%s%s%s"), + r->protocol? r->protocol : "", + r->protocol? " " : "", + r->method, r->scheme? r->scheme : "", + r->scheme? "://" : "", + r->authority, r->path? r->path: ""); + } + + stream->scheduled = 1; + h2_ihash_add(m->streams, stream); + if (h2_stream_is_ready(stream)) { + /* already have a response */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_STRM_MSG(stream, "process, ready already")); } else { - status = APR_SUCCESS; - h2_ihash_add(m->streams, stream); - if (h2_stream_is_ready(stream)) { - /* already have a response */ - check_data_for(m, stream, 0); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - H2_STRM_MSG(stream, "process, add to readyq")); - } - else { - h2_iq_add(m->q, stream->id, cmp, ctx); - register_if_needed(m); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - H2_STRM_MSG(stream, "process, added to q")); - } + /* last chance to set anything up before stream is processed + * by worker threads. */ + rv = h2_stream_prepare_processing(stream); + if (APR_SUCCESS != rv) goto cleanup; + h2_iq_add(m->q, stream->id, cmp, session); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_STRM_MSG(stream, "process, added to q")); } - H2_MPLX_LEAVE(m); - return status; +cleanup: + return rv; } -static h2_task *next_stream_task(h2_mplx *m) +void h2_mplx_c1_process(h2_mplx *m, + h2_iqueue *ready_to_process, + h2_stream_get_fn *get_stream, + h2_stream_pri_cmp_fn *stream_pri_cmp, + h2_session *session, + unsigned int *pstream_count) { - h2_stream *stream; + apr_status_t rv; int sid; - while (!m->aborted && (m->tasks_active < m->limit_active) - && (sid = h2_iq_shift(m->q)) > 0) { - - stream = h2_ihash_get(m->streams, sid); - if (stream) { - conn_rec *slave, **pslave; - pslave = (conn_rec **)apr_array_pop(m->spare_slaves); - if (pslave) { - slave = *pslave; - slave->aborted = 0; - } - else { - slave = h2_slave_create(m->c, stream->id, m->pool); - } - - if (!stream->task) { + H2_MPLX_ENTER_ALWAYS(m); - if (sid > m->max_stream_started) { - m->max_stream_started = sid; - } - if (stream->input) { - h2_beam_on_consumed(stream->input, stream_input_ev, - stream_input_consumed, stream); - } - - stream->task = h2_task_create(slave, stream->id, - stream->request, m, stream->input, - stream->session->s->timeout, - m->stream_max_mem); - if (!stream->task) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave, - H2_STRM_LOG(APLOGNO(02941), stream, - "create task")); - return NULL; - } - + while ((sid = h2_iq_shift(ready_to_process)) > 0) { + h2_stream *stream = get_stream(session, sid); + if (stream) { + ap_assert(!stream->scheduled); + rv = c1_process_stream(session->mplx, stream, stream_pri_cmp, session); + if (APR_SUCCESS != rv) { + h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); } - - ++m->tasks_active; - return stream->task; - } - } - return NULL; -} - -apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) -{ - apr_status_t rv = APR_EOF; - - *ptask = NULL; - ap_assert(m); - ap_assert(m->lock); - - if (APR_SUCCESS != (rv = apr_thread_mutex_lock(m->lock))) { - return rv; - } - - if (m->aborted) { - rv = APR_EOF; - } - else { - *ptask = next_stream_task(m); - rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS; - } - if (APR_EAGAIN != rv) { - m->is_registered = 0; /* h2_workers will discard this mplx */ - } - H2_MPLX_LEAVE(m); - return rv; -} - -static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) -{ - h2_stream *stream; - - if (task->frozen) { - /* this task was handed over to an engine for processing - * and the original worker has finished. That means the - * engine may start processing now. */ - h2_task_thaw(task); - apr_thread_cond_broadcast(m->task_thawed); - return; - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): task(%s) done", m->id, task->id); - out_close(m, task); - - if (ngn) { - apr_off_t bytes = 0; - h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ); - bytes += h2_beam_get_buffered(task->output.beam); - if (bytes > 0) { - /* we need to report consumed and current buffered output - * to the engine. The request will be streamed out or cancelled, - * no more data is coming from it and the engine should update - * its calculations before we destroy this information. */ - h2_req_engine_out_consumed(ngn, task->c, bytes); - } - } - - if (task->engine) { - if (!m->aborted && !task->c->aborted - && !h2_req_engine_is_shutdown(task->engine)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022) - "h2_mplx(%ld): task(%s) has not-shutdown " - "engine(%s)", m->id, task->id, - h2_req_engine_get_id(task->engine)); - } - h2_ngn_shed_done_ngn(m->ngn_shed, task->engine); - } - - task->worker_done = 1; - task->done_at = apr_time_now(); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - "h2_mplx(%s): request done, %f ms elapsed", task->id, - (task->done_at - task->started_at) / 1000.0); - - if (task->started_at > m->last_idle_block) { - /* this task finished without causing an 'idle block', e.g. - * a block by flow control. - */ - if (task->done_at- m->last_limit_change >= m->limit_change_interval - && m->limit_active < m->max_active) { - /* Well behaving stream, allow it more workers */ - m->limit_active = H2MIN(m->limit_active * 2, - m->max_active); - m->last_limit_change = task->done_at; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): increase worker limit to %d", - m->id, m->limit_active); - } - } - - stream = h2_ihash_get(m->streams, task->stream_id); - if (stream) { - /* stream not done yet. */ - if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) { - /* reset and schedule again */ - h2_task_redo(task); - h2_ihash_remove(m->sredo, stream->id); - h2_iq_add(m->q, stream->id, NULL, NULL); } else { - /* stream not cleaned up, stay around */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - H2_STRM_MSG(stream, "task_done, stream open")); - if (stream->input) { - h2_beam_leave(stream->input); - } - - /* more data will not arrive, resume the stream */ - check_data_for(m, stream, 0); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_MPLX_MSG(m, "stream %d not found to process"), sid); } } - else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) { - /* stream is done, was just waiting for this. */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - H2_STRM_MSG(stream, "task_done, in hold")); - if (stream->input) { - h2_beam_leave(stream->input); + if ((m->processing_count < m->processing_limit) && !h2_iq_empty(m->q)) { + H2_MPLX_LEAVE(m); + rv = h2_workers_activate(m->workers, m->producer); + H2_MPLX_ENTER_ALWAYS(m); + if (rv != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1, APLOGNO(10021) + H2_MPLX_MSG(m, "activate at workers")); } - stream_joined(m, stream); - } - else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, - H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge")); - ap_assert("stream should not be in spurge" == NULL); } - else { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518) - "h2_mplx(%s): task_done, stream not found", - task->id); - ap_assert("stream should still be available" == NULL); - } -} + *pstream_count = h2_ihash_count(m->streams); -void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) -{ - H2_MPLX_ENTER_ALWAYS(m); +#if APR_POOL_DEBUG + do { + apr_size_t mem_g, mem_m, mem_s, mem_c1; - task_done(m, task, NULL); - --m->tasks_active; - - if (m->join_wait) { - apr_thread_cond_signal(m->join_wait); - } - if (ptask) { - /* caller wants another task */ - *ptask = next_stream_task(m); - } - register_if_needed(m); + mem_g = pchild? apr_pool_num_bytes(pchild, 1) : 0; + mem_m = apr_pool_num_bytes(m->pool, 1); + mem_s = apr_pool_num_bytes(session->pool, 1); + mem_c1 = apr_pool_num_bytes(m->c1->pool, 1); + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c1, + H2_MPLX_MSG(m, "child mem=%ld, mplx mem=%ld, session mem=%ld, c1=%ld"), + (long)mem_g, (long)mem_m, (long)mem_s, (long)mem_c1); + + } while (0); +#endif H2_MPLX_LEAVE(m); } -/******************************************************************************* - * h2_mplx DoS protection - ******************************************************************************/ +static void c2_beam_input_write_notify(void *ctx, h2_bucket_beam *beam) +{ + conn_rec *c = ctx; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); -static int latest_repeatable_unsubmitted_iter(void *data, void *val) + (void)beam; + if (conn_ctx && conn_ctx->stream_id && conn_ctx->pipe_in[H2_PIPE_IN]) { + apr_file_putc(1, conn_ctx->pipe_in[H2_PIPE_IN]); + } +} + +static void add_stream_poll_event(h2_mplx *m, int stream_id, h2_iqueue *q) { - stream_iter_ctx *ctx = data; - h2_stream *stream = val; - - if (stream->task && !stream->task->worker_done - && h2_task_can_redo(stream->task) - && !h2_ihash_get(ctx->m->sredo, stream->id)) { - if (!h2_stream_is_ready(stream)) { - /* this task occupies a worker, the response has not been submitted - * yet, not been cancelled and it is a repeatable request - * -> it can be re-scheduled later */ - if (!ctx->stream - || (ctx->stream->task->started_at < stream->task->started_at)) { - /* we did not have one or this one was started later */ - ctx->stream = stream; - } - } + apr_thread_mutex_lock(m->poll_lock); + if (h2_iq_append(q, stream_id) && h2_iq_count(q) == 1) { + /* newly added first */ + apr_pollset_wakeup(m->pollset); } - return 1; + apr_thread_mutex_unlock(m->poll_lock); } -static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m) +static void c2_beam_input_read_notify(void *ctx, h2_bucket_beam *beam) { - stream_iter_ctx ctx; - ctx.m = m; - ctx.stream = NULL; - h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx); - return ctx.stream; + conn_rec *c = ctx; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + + if (conn_ctx && conn_ctx->stream_id) { + add_stream_poll_event(conn_ctx->mplx, conn_ctx->stream_id, + conn_ctx->mplx->streams_input_read); + } } -static int timed_out_busy_iter(void *data, void *val) +static void c2_beam_input_read_eagain(void *ctx, h2_bucket_beam *beam) { - stream_iter_ctx *ctx = data; - h2_stream *stream = val; - if (stream->task && !stream->task->worker_done - && (ctx->now - stream->task->started_at) > stream->task->timeout) { - /* timed out stream occupying a worker, found */ - ctx->stream = stream; - return 0; + conn_rec *c = ctx; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + /* installed in the input bucket beams when we use pipes. + * Drain the pipe just before the beam returns APR_EAGAIN. + * A clean state for allowing polling on the pipe to rest + * when the beam is empty */ + if (conn_ctx && conn_ctx->pipe_in[H2_PIPE_OUT]) { + h2_util_drain_pipe(conn_ctx->pipe_in[H2_PIPE_OUT]); } - return 1; } -static h2_stream *get_timed_out_busy_stream(h2_mplx *m) +static void c2_beam_output_write_notify(void *ctx, h2_bucket_beam *beam) { - stream_iter_ctx ctx; - ctx.m = m; - ctx.stream = NULL; - ctx.now = apr_time_now(); - h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx); - return ctx.stream; + conn_rec *c = ctx; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + + if (conn_ctx && conn_ctx->stream_id) { + add_stream_poll_event(conn_ctx->mplx, conn_ctx->stream_id, + conn_ctx->mplx->streams_output_written); + } } -static apr_status_t unschedule_slow_tasks(h2_mplx *m) +static apr_status_t c2_setup_io(h2_mplx *m, conn_rec *c2, h2_stream *stream, h2_c2_transit *transit) { - h2_stream *stream; - int n; - - /* Try to get rid of streams that occupy workers. Look for safe requests - * that are repeatable. If none found, fail the connection. - */ - n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo)); - while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) { - h2_task_rst(stream->task, H2_ERR_CANCEL); - h2_ihash_add(m->sredo, stream); - --n; + h2_conn_ctx_t *conn_ctx; + apr_status_t rv = APR_SUCCESS; + const char *action = "init"; + + rv = h2_conn_ctx_init_for_c2(&conn_ctx, c2, m, stream, transit); + if (APR_SUCCESS != rv) goto cleanup; + + if (!conn_ctx->beam_out) { + action = "create output beam"; + rv = h2_beam_create(&conn_ctx->beam_out, c2, conn_ctx->req_pool, + stream->id, "output", 0, c2->base_server->timeout); + if (APR_SUCCESS != rv) goto cleanup; + + h2_beam_buffer_size_set(conn_ctx->beam_out, m->stream_max_mem); + h2_beam_on_was_empty(conn_ctx->beam_out, c2_beam_output_write_notify, c2); } - - if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) { - h2_stream *stream = get_timed_out_busy_stream(m); - if (stream) { - /* Too many busy workers, unable to cancel enough streams - * and with a busy, timed out stream, we tell the client - * to go away... */ - return APR_TIMEUP; - } + + memset(&conn_ctx->pipe_in, 0, sizeof(conn_ctx->pipe_in)); + if (stream->input) { + conn_ctx->beam_in = stream->input; + h2_beam_on_send(stream->input, c2_beam_input_write_notify, c2); + h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2); + h2_beam_on_consumed(stream->input, c1_input_consumed, stream); +#if H2_USE_PIPES + action = "create input write pipe"; + rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in[H2_PIPE_OUT], + &conn_ctx->pipe_in[H2_PIPE_IN], + APR_READ_BLOCK, + c2->pool, c2->pool); + if (APR_SUCCESS != rv) goto cleanup; +#endif + h2_beam_on_eagain(stream->input, c2_beam_input_read_eagain, c2); + if (!h2_beam_empty(stream->input)) + c2_beam_input_write_notify(c2, stream->input); + } + +cleanup: + stream->output = (APR_SUCCESS == rv)? conn_ctx->beam_out : NULL; + if (APR_SUCCESS != rv) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c2, + H2_STRM_LOG(APLOGNO(10309), stream, + "error %s"), action); } - return APR_SUCCESS; + return rv; } -apr_status_t h2_mplx_idle(h2_mplx *m) +static conn_rec *s_next_c2(h2_mplx *m) { - apr_status_t status = APR_SUCCESS; - apr_time_t now; - apr_size_t scount; - - H2_MPLX_ENTER(m); + h2_stream *stream = NULL; + apr_status_t rv = APR_SUCCESS; + apr_uint32_t sid; + conn_rec *c2 = NULL; + h2_c2_transit *transit = NULL; - scount = h2_ihash_count(m->streams); - if (scount > 0) { - if (m->tasks_active) { - /* If we have streams in connection state 'IDLE', meaning - * all streams are ready to sent data out, but lack - * WINDOW_UPDATEs. - * - * This is ok, unless we have streams that still occupy - * h2 workers. As worker threads are a scarce resource, - * we need to take measures that we do not get DoSed. - * - * This is what we call an 'idle block'. Limit the amount - * of busy workers we allow for this connection until it - * well behaves. - */ - now = apr_time_now(); - m->last_idle_block = now; - if (m->limit_active > 2 - && now - m->last_limit_change >= m->limit_change_interval) { - if (m->limit_active > 16) { - m->limit_active = 16; - } - else if (m->limit_active > 8) { - m->limit_active = 8; - } - else if (m->limit_active > 4) { - m->limit_active = 4; - } - else if (m->limit_active > 2) { - m->limit_active = 2; - } - m->last_limit_change = now; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): decrease worker limit to %d", - m->id, m->limit_active); - } - - if (m->tasks_active > m->limit_active) { - status = unschedule_slow_tasks(m); - } - } - else if (!h2_iq_empty(m->q)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): idle, but %d streams to process", - m->id, (int)h2_iq_count(m->q)); - status = APR_EAGAIN; - } - else { - /* idle, have streams, but no tasks active. what are we waiting for? - * WINDOW_UPDATEs from client? */ - h2_stream *stream = NULL; - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): idle, no tasks ongoing, %d streams", - m->id, (int)h2_ihash_count(m->streams)); - h2_ihash_shift(m->streams, (void**)&stream, 1); - if (stream) { - h2_ihash_add(m->streams, stream); - if (stream->output && !stream->out_checked) { - /* FIXME: this looks like a race between the session thinking - * it is idle and the EOF on a stream not being sent. - * Signal to caller to leave IDLE state. - */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - H2_STRM_MSG(stream, "output closed=%d, mplx idle" - ", out has %ld bytes buffered"), - h2_beam_is_closed(stream->output), - (long)h2_beam_get_buffered(stream->output)); - h2_ihash_add(m->streams, stream); - check_data_for(m, stream, 0); - stream->out_checked = 1; - status = APR_EAGAIN; - } - } + while (!m->aborted && !stream && (m->processing_count < m->processing_limit) + && (sid = h2_iq_shift(m->q)) > 0) { + stream = h2_ihash_get(m->streams, sid); + } + + if (!stream) { + if (m->processing_count >= m->processing_limit && !h2_iq_empty(m->q)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, + H2_MPLX_MSG(m, "delaying request processing. " + "Current limit is %d and %d workers are in use."), + m->processing_limit, m->processing_count); } + goto cleanup; } - register_if_needed(m); - H2_MPLX_LEAVE(m); - return status; -} + if (sid > m->max_stream_id_started) { + m->max_stream_id_started = sid; + } -/******************************************************************************* - * HTTP/2 request engines - ******************************************************************************/ + transit = c2_transit_get(m); +#if AP_HAS_RESPONSE_BUCKETS + c2 = ap_create_secondary_connection(transit->pool, m->c1, transit->bucket_alloc); +#else + c2 = h2_c2_create(m->c1, transit->pool, transit->bucket_alloc); +#endif + if (!c2) goto cleanup; + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c1, + H2_STRM_MSG(stream, "created new c2")); -typedef struct { - h2_mplx * m; - h2_req_engine *ngn; - int streams_updated; -} ngn_update_ctx; + rv = c2_setup_io(m, c2, stream, transit); + if (APR_SUCCESS != rv) goto cleanup; -static int ngn_update_window(void *ctx, void *val) -{ - ngn_update_ctx *uctx = ctx; - h2_stream *stream = val; - if (stream->task && stream->task->assigned == uctx->ngn - && output_consumed_signal(uctx->m, stream->task)) { - ++uctx->streams_updated; + stream->c2 = c2; + ++m->processing_count; + +cleanup: + if (APR_SUCCESS != rv && c2) { + h2_c2_destroy(c2); + c2 = NULL; } - return 1; + if (transit && !c2) { + c2_transit_recycle(m, transit); + } + return c2; } -static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn) +static conn_rec *c2_prod_next(void *baton, int *phas_more) { - ngn_update_ctx ctx; - - ctx.m = m; - ctx.ngn = ngn; - ctx.streams_updated = 0; - h2_ihash_iter(m->streams, ngn_update_window, &ctx); - - return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN; + h2_mplx *m = baton; + conn_rec *c = NULL; + + H2_MPLX_ENTER_ALWAYS(m); + if (!m->aborted) { + c = s_next_c2(m); + *phas_more = (c != NULL && !h2_iq_empty(m->q)); + } + H2_MPLX_LEAVE(m); + return c; } -apr_status_t h2_mplx_req_engine_push(const char *ngn_type, - request_rec *r, - http2_req_engine_init *einit) +static void s_c2_done(h2_mplx *m, conn_rec *c2, h2_conn_ctx_t *conn_ctx) { - apr_status_t status; - h2_mplx *m; - h2_task *task; h2_stream *stream; + + ap_assert(conn_ctx); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_mplx(%s-%d): c2 done", conn_ctx->id, conn_ctx->stream_id); + + AP_DEBUG_ASSERT(apr_atomic_read32(&conn_ctx->done) == 0); + apr_atomic_set32(&conn_ctx->done, 1); + conn_ctx->done_at = apr_time_now(); + ++c2->keepalives; + /* From here on, the final handling of c2 is done by c1 processing. + * Which means we can give it c1's scoreboard handle for updates. */ + c2->sbh = m->c1->sbh; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, + "h2_mplx(%s-%d): request done, %f ms elapsed", + conn_ctx->id, conn_ctx->stream_id, + (conn_ctx->done_at - conn_ctx->started_at) / 1000.0); - task = h2_ctx_rget_task(r); - if (!task) { - return APR_ECONNABORTED; + if (!conn_ctx->has_final_response) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, conn_ctx->last_err, c2, + "h2_c2(%s-%d): processing finished without final response", + conn_ctx->id, conn_ctx->stream_id); + c2->aborted = 1; + if (conn_ctx->beam_out) + h2_beam_abort(conn_ctx->beam_out, c2); + } + else if (!conn_ctx->beam_out || !h2_beam_is_complete(conn_ctx->beam_out)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, conn_ctx->last_err, c2, + "h2_c2(%s-%d): processing finished with incomplete output", + conn_ctx->id, conn_ctx->stream_id); + c2->aborted = 1; + h2_beam_abort(conn_ctx->beam_out, c2); + } + else if (!c2->aborted) { + s_mplx_be_happy(m, c2, conn_ctx); } - m = task->mplx; - H2_MPLX_ENTER(m); - - stream = h2_ihash_get(m->streams, task->stream_id); + stream = h2_ihash_get(m->streams, conn_ctx->stream_id); if (stream) { - status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit); + /* stream not done yet. trigger a potential polling on the output + * since nothing more will happening here. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, + H2_STRM_MSG(stream, "c2_done, stream open")); + c2_beam_output_write_notify(c2, NULL); } - else { - status = APR_ECONNABORTED; + else if ((stream = h2_ihash_get(m->shold, conn_ctx->stream_id)) != NULL) { + /* stream is done, was just waiting for this. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, + H2_STRM_MSG(stream, "c2_done, in hold")); + c1c2_stream_joined(m, stream); } + else { + int i; + + for (i = 0; i < m->spurge->nelts; ++i) { + if (stream == APR_ARRAY_IDX(m->spurge, i, h2_stream*)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2, + H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge")); + ap_assert("stream should not be in spurge" == NULL); + return; + } + } - H2_MPLX_LEAVE(m); - return status; + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2, APLOGNO(03518) + "h2_mplx(%s-%d): c2_done, stream not found", + conn_ctx->id, conn_ctx->stream_id); + ap_assert("stream should still be available" == NULL); + } } -apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn, - apr_read_type_e block, - int capacity, - request_rec **pr) -{ - h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn); - h2_mplx *m = h2_ngn_shed_get_ctx(shed); - apr_status_t status; - int want_shutdown; - - H2_MPLX_ENTER(m); +static void c2_prod_done(void *baton, conn_rec *c2) +{ + h2_mplx *m = baton; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2); - want_shutdown = (block == APR_BLOCK_READ); + AP_DEBUG_ASSERT(conn_ctx); + H2_MPLX_ENTER_ALWAYS(m); - /* Take this opportunity to update output consummation - * for this engine */ - ngn_out_update_windows(m, ngn); - - if (want_shutdown && !h2_iq_empty(m->q)) { - /* For a blocking read, check first if requests are to be - * had and, if not, wait a short while before doing the - * blocking, and if unsuccessful, terminating read. - */ - status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr); - if (APR_STATUS_IS_EAGAIN(status)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - "h2_mplx(%ld): start block engine pull", m->id); - apr_thread_cond_timedwait(m->task_thawed, m->lock, - apr_time_from_msec(20)); - status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr); - } - } - else { - status = h2_ngn_shed_pull_request(shed, ngn, capacity, - want_shutdown, pr); - } + --m->processing_count; + s_c2_done(m, c2, conn_ctx); + if (m->join_wait) apr_thread_cond_signal(m->join_wait); H2_MPLX_LEAVE(m); - return status; } - -void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, - apr_status_t status) + +static void workers_shutdown(void *baton, int graceful) { - h2_task *task = h2_ctx_cget_task(r_conn); - - if (task) { - h2_mplx *m = task->mplx; - h2_stream *stream; + h2_mplx *m = baton; + + apr_thread_mutex_lock(m->poll_lock); + /* time to wakeup and assess what to do */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_MPLX_MSG(m, "workers shutdown, waking pollset")); + m->shutdown = 1; + if (!graceful) { + m->aborted = 1; + } + apr_pollset_wakeup(m->pollset); + apr_thread_mutex_unlock(m->poll_lock); +} - H2_MPLX_ENTER_ALWAYS(m); +/******************************************************************************* + * h2_mplx DoS protection + ******************************************************************************/ - stream = h2_ihash_get(m->streams, task->stream_id); - - ngn_out_update_windows(m, ngn); - h2_ngn_shed_done_task(m->ngn_shed, ngn, task); - - if (status != APR_SUCCESS && stream - && h2_task_can_redo(task) - && !h2_ihash_get(m->sredo, stream->id)) { - h2_ihash_add(m->sredo, stream); - } +static void s_mplx_be_happy(h2_mplx *m, conn_rec *c, h2_conn_ctx_t *conn_ctx) +{ + apr_time_t now; - if (task->engine) { - /* cannot report that as done until engine returns */ - } - else { - task_done(m, task, ngn); + if (m->processing_limit < m->processing_max + && conn_ctx->started_at > m->last_mood_change) { + --m->irritations_since; + if (m->processing_limit < m->processing_max + && ((now = apr_time_now()) - m->last_mood_change >= m->mood_update_interval + || m->irritations_since < -m->processing_limit)) { + m->processing_limit = H2MIN(m->processing_limit * 2, m->processing_max); + m->last_mood_change = now; + m->irritations_since = 0; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + H2_MPLX_MSG(m, "mood update, increasing worker limit to %d"), + m->processing_limit); } + } +} - H2_MPLX_LEAVE(m); +static void m_be_annoyed(h2_mplx *m) +{ + apr_time_t now; + + if (m->processing_limit > 2) { + ++m->irritations_since; + if (((now = apr_time_now()) - m->last_mood_change >= m->mood_update_interval) + || (m->irritations_since >= m->processing_limit)) { + + if (m->processing_limit > 16) { + m->processing_limit = 16; + } + else if (m->processing_limit > 8) { + m->processing_limit = 8; + } + else if (m->processing_limit > 4) { + m->processing_limit = 4; + } + else if (m->processing_limit > 2) { + m->processing_limit = 2; + } + m->last_mood_change = now; + m->irritations_since = 0; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, + H2_MPLX_MSG(m, "mood update, decreasing worker limit to %d"), + m->processing_limit); + } } } @@ -1224,59 +1097,168 @@ void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, * mplx master events dispatching ******************************************************************************/ -int h2_mplx_has_master_events(h2_mplx *m) +static int reset_is_acceptable(h2_stream *stream) { - return apr_atomic_read32(&m->event_pending) > 0; + /* client may terminate a stream via H2 RST_STREAM message at any time. + * This is annyoing when we have committed resources (e.g. worker threads) + * to it, so our mood (e.g. willingness to commit resources on this + * connection in the future) goes down. + * + * This is a DoS protection. We do not want to make it too easy for + * a client to eat up server resources. + * + * However: there are cases where a RST_STREAM is the only way to end + * a request. This includes websockets and server-side-event streams (SSEs). + * The responses to such requests continue forever otherwise. + * + */ + if (!stream_is_running(stream)) return 1; + if (!(stream->id & 0x01)) return 1; /* stream initiated by us. acceptable. */ + if (!stream->response) return 0; /* no response headers produced yet. bad. */ + if (!stream->out_data_frames) return 0; /* no response body data sent yet. bad. */ + return 1; /* otherwise, be forgiving */ } -apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, - stream_ev_callback *on_resume, - void *on_ctx) +apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id, h2_stream *stream) { - h2_stream *stream; - int n, id; - - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - "h2_mplx(%ld): dispatch events", m->id); - apr_atomic_set32(&m->event_pending, 0); + apr_status_t status = APR_SUCCESS; + int registered; - /* update input windows for streams */ - h2_ihash_iter(m->streams, report_consumption_iter, m); - purge_streams(m, 1); - - n = h2_ififo_count(m->readyq); - while (n > 0 - && (h2_ififo_try_pull(m->readyq, &id) == APR_SUCCESS)) { - --n; - stream = h2_ihash_get(m->streams, id); - if (stream) { - on_resume(on_ctx, stream); - } + H2_MPLX_ENTER_ALWAYS(m); + registered = (h2_ihash_get(m->streams, stream_id) != NULL); + if (!stream) { + /* a RST might arrive so late, we have already forgotten + * about it. Seems ok. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, + H2_MPLX_MSG(m, "RST on unknown stream %d"), stream_id); + AP_DEBUG_ASSERT(!registered); + } + else if (!registered) { + /* a RST on a stream that mplx has not been told about, but + * which the session knows. Very early and annoying. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, + H2_STRM_MSG(stream, "very early RST, drop")); + h2_stream_set_monitor(stream, NULL); + h2_stream_rst(stream, H2_ERR_STREAM_CLOSED); + h2_stream_dispatch(stream, H2_SEV_EOS_SENT); + m_stream_cleanup(m, stream); + m_be_annoyed(m); + } + else if (!reset_is_acceptable(stream)) { + m_be_annoyed(m); } - - return APR_SUCCESS; + H2_MPLX_LEAVE(m); + return status; } -apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream) +static apr_status_t mplx_pollset_create(h2_mplx *m) { - check_data_for(m, stream, 1); - return APR_SUCCESS; + /* stream0 output only */ + return apr_pollset_create(&m->pollset, 1, m->pool, + APR_POLLSET_WAKEABLE); } -int h2_mplx_awaits_data(h2_mplx *m) +static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout, + stream_ev_callback *on_stream_input, + stream_ev_callback *on_stream_output, + void *on_ctx) { - int waiting = 1; - - H2_MPLX_ENTER_ALWAYS(m); + apr_status_t rv; + const apr_pollfd_t *results, *pfd; + apr_int32_t nresults, i; + h2_conn_ctx_t *conn_ctx; + h2_stream *stream; - if (h2_ihash_empty(m->streams)) { - waiting = 0; - } - else if (!m->tasks_active && !h2_ififo_count(m->readyq) - && h2_iq_empty(m->q)) { - waiting = 0; - } + /* Make sure we are not called recursively. */ + ap_assert(!m->polling); + m->polling = 1; + do { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_MPLX_MSG(m, "enter polling timeout=%d"), + (int)apr_time_sec(timeout)); + + apr_array_clear(m->streams_ev_in); + apr_array_clear(m->streams_ev_out); + + do { + /* add streams we started processing in the meantime */ + apr_thread_mutex_lock(m->poll_lock); + if (!h2_iq_empty(m->streams_input_read) + || !h2_iq_empty(m->streams_output_written)) { + while ((i = h2_iq_shift(m->streams_input_read))) { + stream = h2_ihash_get(m->streams, i); + if (stream) { + APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = stream; + } + } + while ((i = h2_iq_shift(m->streams_output_written))) { + stream = h2_ihash_get(m->streams, i); + if (stream) { + APR_ARRAY_PUSH(m->streams_ev_out, h2_stream*) = stream; + } + } + nresults = 0; + rv = APR_SUCCESS; + apr_thread_mutex_unlock(m->poll_lock); + break; + } + apr_thread_mutex_unlock(m->poll_lock); + + H2_MPLX_LEAVE(m); + rv = apr_pollset_poll(m->pollset, timeout >= 0? timeout : -1, &nresults, &results); + H2_MPLX_ENTER_ALWAYS(m); + if (APR_STATUS_IS_EINTR(rv) && m->shutdown) { + if (!m->aborted) { + rv = APR_SUCCESS; + } + goto cleanup; + } + } while (APR_STATUS_IS_EINTR(rv)); - H2_MPLX_LEAVE(m); - return waiting; + if (APR_SUCCESS != rv) { + if (APR_STATUS_IS_TIMEUP(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1, + H2_MPLX_MSG(m, "polling timed out ")); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1, APLOGNO(10310) \ + H2_MPLX_MSG(m, "polling failed")); + } + goto cleanup; + } + + for (i = 0; i < nresults; i++) { + pfd = &results[i]; + conn_ctx = pfd->client_data; + + AP_DEBUG_ASSERT(conn_ctx); + if (conn_ctx->stream_id == 0) { + if (on_stream_input) { + APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = m->stream0; + } + continue; + } + } + + if (on_stream_input && m->streams_ev_in->nelts) { + H2_MPLX_LEAVE(m); + for (i = 0; i < m->streams_ev_in->nelts; ++i) { + on_stream_input(on_ctx, APR_ARRAY_IDX(m->streams_ev_in, i, h2_stream*)); + } + H2_MPLX_ENTER_ALWAYS(m); + } + if (on_stream_output && m->streams_ev_out->nelts) { + H2_MPLX_LEAVE(m); + for (i = 0; i < m->streams_ev_out->nelts; ++i) { + on_stream_output(on_ctx, APR_ARRAY_IDX(m->streams_ev_out, i, h2_stream*)); + } + H2_MPLX_ENTER_ALWAYS(m); + } + break; + } while(1); + +cleanup: + m->polling = 0; + return rv; } + diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h index 2890b98..860f916 100644 --- a/modules/http2/h2_mplx.h +++ b/modules/http2/h2_mplx.h @@ -18,21 +18,16 @@ #define __mod_h2__h2_mplx__ /** - * The stream multiplexer. It pushes buckets from the connection - * thread to the stream threads and vice versa. It's thread-safe - * to use. + * The stream multiplexer. It performs communication between the + * primary HTTP/2 connection (c1) to the secondary connections (c2) + * that process the requests, aka. HTTP/2 streams. * - * There is one h2_mplx instance for each h2_session, which sits on top - * of a particular httpd conn_rec. Input goes from the connection to - * the stream tasks. Output goes from the stream tasks to the connection, - * e.g. the client. + * There is one h2_mplx instance for each h2_session. * - * For each stream, there can be at most "H2StreamMaxMemSize" output bytes - * queued in the multiplexer. If a task thread tries to write more - * data, it is blocked until space becomes available. - * - * Writing input is never blocked. In order to use flow control on the input, - * the mplx can be polled for input data consumption. + * Naming Convention: + * "h2_mplx_c1_" are methods only to be called by the primary connection + * "h2_mplx_c2_" are methods only to be called by a secondary connection + * "h2_mplx_worker_" are methods only to be called by a h2 worker thread */ struct apr_pool_t; @@ -41,108 +36,97 @@ struct apr_thread_cond_t; struct h2_bucket_beam; struct h2_config; struct h2_ihash_t; -struct h2_task; struct h2_stream; struct h2_request; struct apr_thread_cond_t; struct h2_workers; struct h2_iqueue; -struct h2_ngn_shed; -struct h2_req_engine; #include +#include "h2_workers.h" + +typedef struct h2_c2_transit h2_c2_transit; + +struct h2_c2_transit { + apr_pool_t *pool; + apr_bucket_alloc_t *bucket_alloc; +}; + typedef struct h2_mplx h2_mplx; struct h2_mplx { - long id; - conn_rec *c; + int child_num; /* child this runs in */ + apr_uint32_t id; /* id unique per child */ + conn_rec *c1; /* the main connection */ apr_pool_t *pool; + struct h2_stream *stream0; /* HTTP/2's stream 0 */ server_rec *s; /* server for master conn */ - unsigned int event_pending; - unsigned int aborted; - unsigned int is_registered; /* is registered at h2_workers */ + int shutdown; /* we are shutting down */ + int aborted; /* we need to get out of here asap */ + int polling; /* is waiting/processing pollset events */ + ap_conn_producer_t *producer; /* registered producer at h2_workers */ - struct h2_ihash_t *streams; /* all streams currently processing */ - struct h2_ihash_t *sredo; /* all streams that need to be re-started */ - struct h2_ihash_t *shold; /* all streams done with task ongoing */ - struct h2_ihash_t *spurge; /* all streams done, ready for destroy */ + struct h2_ihash_t *streams; /* all streams active */ + struct h2_ihash_t *shold; /* all streams done with c2 processing ongoing */ + apr_array_header_t *spurge; /* all streams done, ready for destroy */ struct h2_iqueue *q; /* all stream ids that need to be started */ - struct h2_ififo *readyq; /* all stream ids ready for output */ - - struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */ + + apr_size_t stream_max_mem; /* max memory to buffer for a stream */ + apr_uint32_t max_streams; /* max # of concurrent streams */ + apr_uint32_t max_stream_id_started; /* highest stream id that started processing */ + + apr_uint32_t processing_count; /* # of c2 working for this mplx */ + apr_uint32_t processing_limit; /* current limit on processing c2s, dynamic */ + apr_uint32_t processing_max; /* max, hard limit of processing c2s */ - int max_streams; /* max # of concurrent streams */ - int max_stream_started; /* highest stream id that started processing */ - int tasks_active; /* # of tasks being processed from this mplx */ - int limit_active; /* current limit on active tasks, dynamic */ - int max_active; /* max, hard limit # of active tasks in a process */ - apr_time_t last_idle_block; /* last time, this mplx entered IDLE while - * streams were ready */ - apr_time_t last_limit_change; /* last time, worker limit changed */ - apr_interval_time_t limit_change_interval; + apr_time_t last_mood_change; /* last time, processing limit changed */ + apr_interval_time_t mood_update_interval; /* how frequent we update at most */ + apr_uint32_t irritations_since; /* irritations (>0) or happy events (<0) since last mood change */ apr_thread_mutex_t *lock; - struct apr_thread_cond_t *added_output; - struct apr_thread_cond_t *task_thawed; struct apr_thread_cond_t *join_wait; - apr_size_t stream_max_mem; - - apr_pool_t *spare_io_pool; - apr_array_header_t *spare_slaves; /* spare slave connections */ - - struct h2_workers *workers; - - struct h2_ngn_shed *ngn_shed; -}; + apr_pollset_t *pollset; /* pollset for c1/c2 IO events */ + apr_array_header_t *streams_ev_in; + apr_array_header_t *streams_ev_out; + apr_thread_mutex_t *poll_lock; /* protect modifications of queues below */ + struct h2_iqueue *streams_input_read; /* streams whose input has been read from */ + struct h2_iqueue *streams_output_written; /* streams whose output has been written to */ + struct h2_workers *workers; /* h2 workers process wide instance */ -/******************************************************************************* - * Object lifecycle and information. - ******************************************************************************/ + apr_uint32_t max_spare_transits; /* max number of transit pools idling */ + apr_array_header_t *c2_transits; /* base pools for running c2 connections */ +}; -apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s); +apr_status_t h2_mplx_c1_child_init(apr_pool_t *pool, server_rec *s); /** * Create the multiplexer for the given HTTP2 session. * Implicitly has reference count 1. */ -h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master, - const struct h2_config *conf, - struct h2_workers *workers); +h2_mplx *h2_mplx_c1_create(int child_id, apr_uint32_t id, + struct h2_stream *stream0, + server_rec *s, apr_pool_t *master, + struct h2_workers *workers); /** - * Decreases the reference counter of this mplx and waits for it - * to reached 0, destroy the mplx afterwards. - * This is to be called from the thread that created the mplx in - * the first place. - * @param m the mplx to be released and destroyed + * Destroy the mplx, shutting down all ongoing processing. + * @param m the mplx destroyed * @param wait condition var to wait on for ref counter == 0 */ -void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait); - -apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask); - -void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask); +void h2_mplx_c1_destroy(h2_mplx *m); /** * Shut down the multiplexer gracefully. Will no longer schedule new streams * but let the ongoing ones finish normally. * @return the highest stream id being/been processed */ -int h2_mplx_shutdown(h2_mplx *m); - -int h2_mplx_is_busy(h2_mplx *m); - -/******************************************************************************* - * IO lifetime of streams. - ******************************************************************************/ - -struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id); +int h2_mplx_c1_shutdown(h2_mplx *m); /** * Notifies mplx that a stream has been completely handled on the main @@ -150,33 +134,28 @@ struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id); * * @param m the mplx itself * @param stream the stream ready for cleanup + * @param pstream_count return the number of streams active */ -apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream); - -/** - * Waits on output data from any stream in this session to become available. - * Returns APR_TIMEUP if no data arrived in the given time. - */ -apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, - struct apr_thread_cond_t *iowait); +apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, struct h2_stream *stream, + unsigned int *pstream_count); -apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream); - -/******************************************************************************* - * Stream processing. - ******************************************************************************/ +int h2_mplx_c1_stream_is_running(h2_mplx *m, struct h2_stream *stream); /** * Process a stream request. * * @param m the multiplexer - * @param stream the identifier of the stream - * @param r the request to be processed + * @param read_to_process + * @param input_pending * @param cmp the stream priority compare function - * @param ctx context data for the compare function + * @param pstream_count on return the number of streams active in mplx */ -apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, - h2_stream_pri_cmp *cmp, void *ctx); +void h2_mplx_c1_process(h2_mplx *m, + struct h2_iqueue *read_to_process, + h2_stream_get_fn *get_stream, + h2_stream_pri_cmp_fn *cmp, + struct h2_session *session, + unsigned int *pstream_count); /** * Stream priorities have changed, reschedule pending requests. @@ -185,146 +164,67 @@ apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, * @param cmp the stream priority compare function * @param ctx context data for the compare function */ -apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx); - -typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream); +apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp, + struct h2_session *session); -/** - * Check if the multiplexer has events for the master connection pending. - * @return != 0 iff there are events pending - */ -int h2_mplx_has_master_events(h2_mplx *m); +typedef void stream_ev_callback(void *ctx, struct h2_stream *stream); /** - * Dispatch events for the master connection, such as - ± @param m the multiplexer - * @param on_resume new output data has arrived for a suspended stream - * @param ctx user supplied argument to invocation. + * Poll the primary connection for input and the active streams for output. + * Invoke the callback for any stream where an event happened. */ -apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, - stream_ev_callback *on_resume, - void *ctx); - -int h2_mplx_awaits_data(h2_mplx *m); +apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout, + stream_ev_callback *on_stream_input, + stream_ev_callback *on_stream_output, + void *on_ctx); -typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx); +void h2_mplx_c2_input_read(h2_mplx *m, conn_rec *c2); +void h2_mplx_c2_output_written(h2_mplx *m, conn_rec *c2); -apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx); - -/******************************************************************************* - * Output handling of streams. - ******************************************************************************/ +typedef int h2_mplx_stream_cb(struct h2_stream *s, void *userdata); /** - * Opens the output for the given stream with the specified response. + * Iterate over all streams known to mplx from the primary connection. + * @param m the mplx + * @param cb the callback to invoke on each stream + * @param ctx userdata passed to the callback */ -apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id, - struct h2_bucket_beam *beam); - -/******************************************************************************* - * h2_mplx list Manipulation. - ******************************************************************************/ +apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx); /** - * The magic pointer value that indicates the head of a h2_mplx list - * @param b The mplx list - * @return The magic pointer value + * Return != 0 iff all open streams want to send data */ -#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link) +int h2_mplx_c1_all_streams_want_send_data(h2_mplx *m); /** - * Determine if the mplx list is empty - * @param b The list to check - * @return true or false + * A stream has been RST_STREAM by the client. Abort + * any processing going on and remove from processing + * queue. */ -#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link) +apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id, + struct h2_stream *stream); /** - * Return the first mplx in a list - * @param b The list to query - * @return The first mplx in the list + * Get readonly access to a stream for a secondary connection. */ -#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b) +const struct h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id); /** - * Return the last mplx in a list - * @param b The list to query - * @return The last mplx int he list + * A h2 worker asks for a secondary connection to process. + * @param out_c2 non-NULL, a pointer where to reveive the next + * secondary connection to process. */ -#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b) +apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c2); -/** - * Insert a single mplx at the front of a list - * @param b The list to add to - * @param e The mplx to insert - */ -#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \ -h2_mplx *ap__b = (e); \ -APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \ -} while (0) /** - * Insert a single mplx at the end of a list - * @param b The list to add to - * @param e The mplx to insert + * Session processing is entering KEEPALIVE, e.g. giving control + * to the MPM for monitoring incoming socket events only. + * Last chance for maintenance work before losing control. */ -#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \ -h2_mplx *ap__b = (e); \ -APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \ -} while (0) +void h2_mplx_c1_going_keepalive(h2_mplx *m); -/** - * Get the next mplx in the list - * @param e The current mplx - * @return The next mplx - */ -#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link) -/** - * Get the previous mplx in the list - * @param e The current mplx - * @return The previous mplx - */ -#define H2_MPLX_PREV(e) APR_RING_PREV((e), link) - -/** - * Remove a mplx from its list - * @param e The mplx to remove - */ -#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link) - -/******************************************************************************* - * h2_mplx DoS protection - ******************************************************************************/ - -/** - * Master connection has entered idle mode. - * @param m the mplx instance of the master connection - * @return != SUCCESS iff connection should be terminated - */ -apr_status_t h2_mplx_idle(h2_mplx *m); - -/******************************************************************************* - * h2_req_engine handling - ******************************************************************************/ - -typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); -typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine, - const char *id, - const char *type, - apr_pool_t *pool, - apr_size_t req_buffer_size, - request_rec *r, - h2_output_consumed **pconsumed, - void **pbaton); - -apr_status_t h2_mplx_req_engine_push(const char *ngn_type, - request_rec *r, - h2_mplx_req_engine_init *einit); -apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn, - apr_read_type_e block, - int capacity, - request_rec **pr); -void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn, - apr_status_t status); +#define H2_MPLX_MSG(m, msg) \ + "h2_mplx(%d-%lu): "msg, m->child_num, (unsigned long)m->id #endif /* defined(__mod_h2__h2_mplx__) */ diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c deleted file mode 100644 index fb85776..0000000 --- a/modules/http2/h2_ngn_shed.c +++ /dev/null @@ -1,392 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include "mod_http2.h" - -#include "h2_private.h" -#include "h2.h" -#include "h2_config.h" -#include "h2_conn.h" -#include "h2_ctx.h" -#include "h2_h2.h" -#include "h2_mplx.h" -#include "h2_request.h" -#include "h2_task.h" -#include "h2_util.h" -#include "h2_ngn_shed.h" - - -typedef struct h2_ngn_entry h2_ngn_entry; -struct h2_ngn_entry { - APR_RING_ENTRY(h2_ngn_entry) link; - h2_task *task; - request_rec *r; -}; - -#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link) -#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link) -#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link) - -#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link) -#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link) -#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b) -#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b) - -#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \ -h2_ngn_entry *ap__b = (e); \ -APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \ -} while (0) - -#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \ -h2_ngn_entry *ap__b = (e); \ -APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \ -} while (0) - -struct h2_req_engine { - const char *id; /* identifier */ - const char *type; /* name of the engine type */ - apr_pool_t *pool; /* pool for engine specific allocations */ - conn_rec *c; /* connection this engine is assigned to */ - h2_task *task; /* the task this engine is based on, running in */ - h2_ngn_shed *shed; - - unsigned int shutdown : 1; /* engine is being shut down */ - unsigned int done : 1; /* engine has finished */ - - APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries; - int capacity; /* maximum concurrent requests */ - int no_assigned; /* # of assigned requests */ - int no_live; /* # of live */ - int no_finished; /* # of finished */ - - h2_output_consumed *out_consumed; - void *out_consumed_ctx; -}; - -const char *h2_req_engine_get_id(h2_req_engine *engine) -{ - return engine->id; -} - -int h2_req_engine_is_shutdown(h2_req_engine *engine) -{ - return engine->shutdown; -} - -void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, - apr_off_t bytes) -{ - if (engine->out_consumed) { - engine->out_consumed(engine->out_consumed_ctx, c, bytes); - } -} - -h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, - int default_capacity, - apr_size_t req_buffer_size) -{ - h2_ngn_shed *shed; - - shed = apr_pcalloc(pool, sizeof(*shed)); - shed->c = c; - shed->pool = pool; - shed->default_capacity = default_capacity; - shed->req_buffer_size = req_buffer_size; - shed->ngns = apr_hash_make(pool); - - return shed; -} - -void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx) -{ - shed->user_ctx = user_ctx; -} - -void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed) -{ - return shed->user_ctx; -} - -h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn) -{ - return ngn->shed; -} - -void h2_ngn_shed_abort(h2_ngn_shed *shed) -{ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394) - "h2_ngn_shed(%ld): abort", shed->c->id); - shed->aborted = 1; -} - -static void ngn_add_task(h2_req_engine *ngn, h2_task *task, request_rec *r) -{ - h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry)); - APR_RING_ELEM_INIT(entry, link); - entry->task = task; - entry->r = r; - H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry); - ngn->no_assigned++; -} - - -apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, - request_rec *r, - http2_req_engine_init *einit) -{ - h2_req_engine *ngn; - h2_task *task = h2_ctx_rget_task(r); - - ap_assert(task); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, - "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id, - task->id); - if (task->request->serialize) { - /* Max compatibility, deny processing of this */ - return APR_EOF; - } - - if (task->assigned) { - --task->assigned->no_assigned; - --task->assigned->no_live; - task->assigned = NULL; - } - - if (task->engine) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_ngn_shed(%ld): push task(%s) hosting engine %s " - "already with %d tasks", - shed->c->id, task->id, task->engine->id, - task->engine->no_assigned); - task->assigned = task->engine; - ngn_add_task(task->engine, task, r); - return APR_SUCCESS; - } - - ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING); - if (ngn && !ngn->shutdown) { - /* this task will be processed in another thread, - * freeze any I/O for the time being. */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_ngn_shed(%ld): pushing request %s to %s", - shed->c->id, task->id, ngn->id); - if (!h2_task_has_thawed(task)) { - h2_task_freeze(task); - } - ngn_add_task(ngn, task, r); - return APR_SUCCESS; - } - - /* no existing engine or being shut down, start a new one */ - if (einit) { - apr_status_t status; - apr_pool_t *pool = task->pool; - h2_req_engine *newngn; - - newngn = apr_pcalloc(pool, sizeof(*ngn)); - newngn->pool = pool; - newngn->id = apr_psprintf(pool, "ngn-%s", task->id); - newngn->type = apr_pstrdup(pool, ngn_type); - newngn->c = task->c; - newngn->shed = shed; - newngn->capacity = shed->default_capacity; - newngn->no_assigned = 1; - newngn->no_live = 1; - APR_RING_INIT(&newngn->entries, h2_ngn_entry, link); - - status = einit(newngn, newngn->id, newngn->type, newngn->pool, - shed->req_buffer_size, r, - &newngn->out_consumed, &newngn->out_consumed_ctx); - - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395) - "h2_ngn_shed(%ld): create engine %s (%s)", - shed->c->id, newngn->id, newngn->type); - if (status == APR_SUCCESS) { - newngn->task = task; - task->engine = newngn; - task->assigned = newngn; - apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn); - } - return status; - } - return APR_EOF; -} - -static h2_ngn_entry *pop_detached(h2_req_engine *ngn) -{ - h2_ngn_entry *entry; - for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); - entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); - entry = H2_NGN_ENTRY_NEXT(entry)) { - if (h2_task_has_thawed(entry->task) - || (entry->task->engine == ngn)) { - /* The task hosting this engine can always be pulled by it. - * For other task, they need to become detached, e.g. no longer - * assigned to another worker. */ - H2_NGN_ENTRY_REMOVE(entry); - return entry; - } - } - return NULL; -} - -apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, - h2_req_engine *ngn, - int capacity, - int want_shutdown, - request_rec **pr) -{ - h2_ngn_entry *entry; - - ap_assert(ngn); - *pr = NULL; - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396) - "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d", - shed->c->id, ngn->id, want_shutdown); - if (shed->aborted) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397) - "h2_ngn_shed(%ld): abort while pulling requests %s", - shed->c->id, ngn->id); - ngn->shutdown = 1; - return APR_ECONNABORTED; - } - - ngn->capacity = capacity; - if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { - if (want_shutdown) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, - "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s", - shed->c->id, ngn->id); - ngn->shutdown = 1; - } - return ngn->shutdown? APR_EOF : APR_EAGAIN; - } - - if ((entry = pop_detached(ngn))) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398) - "h2_ngn_shed(%ld): pulled request %s for engine %s", - shed->c->id, entry->task->id, ngn->id); - ngn->no_live++; - *pr = entry->r; - entry->task->assigned = ngn; - /* task will now run in ngn's own thread. Modules like lua - * seem to require the correct thread set in the conn_rec. - * See PR 59542. */ - if (entry->task->c && ngn->c) { - entry->task->c->current_thread = ngn->c->current_thread; - } - if (entry->task->engine == ngn) { - /* If an engine pushes its own base task, and then pulls - * it back to itself again, it needs to be thawed. - */ - h2_task_thaw(entry->task); - } - return APR_SUCCESS; - } - - if (1) { - h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399) - "h2_ngn_shed(%ld): pull task, nothing, first task %s", - shed->c->id, entry->task->id); - } - return APR_EAGAIN; -} - -static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn, - h2_task *task, int waslive, int aborted) -{ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400) - "h2_ngn_shed(%ld): task %s %s by %s", - shed->c->id, task->id, aborted? "aborted":"done", ngn->id); - ngn->no_finished++; - if (waslive) ngn->no_live--; - ngn->no_assigned--; - task->assigned = NULL; - - return APR_SUCCESS; -} - -apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, - struct h2_req_engine *ngn, h2_task *task) -{ - return ngn_done_task(shed, ngn, task, 1, 0); -} - -void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) -{ - if (ngn->done) { - return; - } - - if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { - h2_ngn_entry *entry; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, - "h2_ngn_shed(%ld): exit engine %s (%s), " - "has still requests queued, shutdown=%d," - "assigned=%ld, live=%ld, finished=%ld", - shed->c->id, ngn->id, ngn->type, - ngn->shutdown, - (long)ngn->no_assigned, (long)ngn->no_live, - (long)ngn->no_finished); - for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); - entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); - entry = H2_NGN_ENTRY_NEXT(entry)) { - h2_task *task = entry->task; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, - "h2_ngn_shed(%ld): engine %s has queued task %s, " - "frozen=%d, aborting", - shed->c->id, ngn->id, task->id, task->frozen); - ngn_done_task(shed, ngn, task, 0, 1); - task->engine = task->assigned = NULL; - } - } - if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, - "h2_ngn_shed(%ld): exit engine %s (%s), " - "assigned=%ld, live=%ld, finished=%ld", - shed->c->id, ngn->id, ngn->type, - (long)ngn->no_assigned, (long)ngn->no_live, - (long)ngn->no_finished); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, - "h2_ngn_shed(%ld): exit engine %s", - shed->c->id, ngn->id); - } - - apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL); - ngn->done = 1; -} - -void h2_ngn_shed_destroy(h2_ngn_shed *shed) -{ - ap_assert(apr_hash_count(shed->ngns) == 0); -} - diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h deleted file mode 100644 index 7764c18..0000000 --- a/modules/http2/h2_ngn_shed.h +++ /dev/null @@ -1,79 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef h2_req_shed_h -#define h2_req_shed_h - -struct h2_req_engine; -struct h2_task; - -typedef struct h2_ngn_shed h2_ngn_shed; -struct h2_ngn_shed { - conn_rec *c; - apr_pool_t *pool; - apr_hash_t *ngns; - void *user_ctx; - - unsigned int aborted : 1; - - int default_capacity; - apr_size_t req_buffer_size; /* preferred buffer size for responses */ -}; - -const char *h2_req_engine_get_id(h2_req_engine *engine); -int h2_req_engine_is_shutdown(h2_req_engine *engine); - -void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, - apr_off_t bytes); - -typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine, - const char *id, - const char *type, - apr_pool_t *pool, - apr_size_t req_buffer_size, - request_rec *r, - h2_output_consumed **pconsumed, - void **pbaton); - -h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, - int default_capactiy, - apr_size_t req_buffer_size); - -void h2_ngn_shed_destroy(h2_ngn_shed *shed); - -void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx); -void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed); - -h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn); - -void h2_ngn_shed_abort(h2_ngn_shed *shed); - -apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, - request_rec *r, - h2_shed_ngn_init *init_cb); - -apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, h2_req_engine *pub_ngn, - int capacity, - int want_shutdown, request_rec **pr); - -apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, - struct h2_req_engine *ngn, - struct h2_task *task); - -void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn); - - -#endif /* h2_req_shed_h */ diff --git a/modules/http2/h2_protocol.c b/modules/http2/h2_protocol.c new file mode 100644 index 0000000..874753e --- /dev/null +++ b/modules/http2/h2_protocol.c @@ -0,0 +1,485 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mod_http2.h" +#include "h2_private.h" + +#include "h2_bucket_beam.h" +#include "h2_stream.h" +#include "h2_c2.h" +#include "h2_config.h" +#include "h2_conn_ctx.h" +#include "h2_c1.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_session.h" +#include "h2_util.h" +#include "h2_protocol.h" +#include "mod_http2.h" + +const char *h2_protocol_ids_tls[] = { + "h2", NULL +}; + +const char *h2_protocol_ids_clear[] = { + "h2c", NULL +}; + +const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/******************************************************************************* + * HTTP/2 error stuff + */ +static const char *h2_err_descr[] = { + "no error", /* 0x0 */ + "protocol error", + "internal error", + "flow control error", + "settings timeout", + "stream closed", /* 0x5 */ + "frame size error", + "refused stream", + "cancel", + "compression error", + "connect error", /* 0xa */ + "enhance your calm", + "inadequate security", + "http/1.1 required", +}; + +const char *h2_protocol_err_description(unsigned int h2_error) +{ + if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) { + return h2_err_descr[h2_error]; + } + return "unknown http/2 error code"; +} + +/******************************************************************************* + * Check connection security requirements of RFC 7540 + */ + +/* + * Black Listed Ciphers from RFC 7549 Appendix A + * + */ +static const char *RFC7540_names[] = { + /* ciphers with NULL encrpytion */ + "NULL-MD5", /* TLS_NULL_WITH_NULL_NULL */ + /* same */ /* TLS_RSA_WITH_NULL_MD5 */ + "NULL-SHA", /* TLS_RSA_WITH_NULL_SHA */ + "NULL-SHA256", /* TLS_RSA_WITH_NULL_SHA256 */ + "PSK-NULL-SHA", /* TLS_PSK_WITH_NULL_SHA */ + "DHE-PSK-NULL-SHA", /* TLS_DHE_PSK_WITH_NULL_SHA */ + "RSA-PSK-NULL-SHA", /* TLS_RSA_PSK_WITH_NULL_SHA */ + "PSK-NULL-SHA256", /* TLS_PSK_WITH_NULL_SHA256 */ + "PSK-NULL-SHA384", /* TLS_PSK_WITH_NULL_SHA384 */ + "DHE-PSK-NULL-SHA256", /* TLS_DHE_PSK_WITH_NULL_SHA256 */ + "DHE-PSK-NULL-SHA384", /* TLS_DHE_PSK_WITH_NULL_SHA384 */ + "RSA-PSK-NULL-SHA256", /* TLS_RSA_PSK_WITH_NULL_SHA256 */ + "RSA-PSK-NULL-SHA384", /* TLS_RSA_PSK_WITH_NULL_SHA384 */ + "ECDH-ECDSA-NULL-SHA", /* TLS_ECDH_ECDSA_WITH_NULL_SHA */ + "ECDHE-ECDSA-NULL-SHA", /* TLS_ECDHE_ECDSA_WITH_NULL_SHA */ + "ECDH-RSA-NULL-SHA", /* TLS_ECDH_RSA_WITH_NULL_SHA */ + "ECDHE-RSA-NULL-SHA", /* TLS_ECDHE_RSA_WITH_NULL_SHA */ + "AECDH-NULL-SHA", /* TLS_ECDH_anon_WITH_NULL_SHA */ + "ECDHE-PSK-NULL-SHA", /* TLS_ECDHE_PSK_WITH_NULL_SHA */ + "ECDHE-PSK-NULL-SHA256", /* TLS_ECDHE_PSK_WITH_NULL_SHA256 */ + "ECDHE-PSK-NULL-SHA384", /* TLS_ECDHE_PSK_WITH_NULL_SHA384 */ + + /* DES/3DES ciphers */ + "PSK-3DES-EDE-CBC-SHA", /* TLS_PSK_WITH_3DES_EDE_CBC_SHA */ + "DHE-PSK-3DES-EDE-CBC-SHA", /* TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA */ + "RSA-PSK-3DES-EDE-CBC-SHA", /* TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA */ + "ECDH-ECDSA-DES-CBC3-SHA", /* TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA */ + "ECDHE-ECDSA-DES-CBC3-SHA", /* TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA */ + "ECDH-RSA-DES-CBC3-SHA", /* TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA */ + "ECDHE-RSA-DES-CBC3-SHA", /* TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA */ + "AECDH-DES-CBC3-SHA", /* TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA */ + "SRP-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA */ + "SRP-RSA-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA */ + "SRP-DSS-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA */ + "ECDHE-PSK-3DES-EDE-CBC-SHA", /* TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA */ + "DES-CBC-SHA", /* TLS_RSA_WITH_DES_CBC_SHA */ + "DES-CBC3-SHA", /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */ + "DHE-DSS-DES-CBC3-SHA", /* TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA */ + "DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_WITH_DES_CBC_SHA */ + "DHE-RSA-DES-CBC3-SHA", /* TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA */ + "ADH-DES-CBC-SHA", /* TLS_DH_anon_WITH_DES_CBC_SHA */ + "ADH-DES-CBC3-SHA", /* TLS_DH_anon_WITH_3DES_EDE_CBC_SHA */ + "EXP-DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA */ + "DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_WITH_DES_CBC_SHA */ + "DH-DSS-DES-CBC3-SHA", /* TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA */ + "EXP-DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA */ + "DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_WITH_DES_CBC_SHA */ + "DH-RSA-DES-CBC3-SHA", /* TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA */ + + /* blacklisted EXPORT ciphers */ + "EXP-RC4-MD5", /* TLS_RSA_EXPORT_WITH_RC4_40_MD5 */ + "EXP-RC2-CBC-MD5", /* TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 */ + "EXP-DES-CBC-SHA", /* TLS_RSA_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-DHE-DSS-DES-CBC-SHA", /* TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-ADH-DES-CBC-SHA", /* TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-ADH-RC4-MD5", /* TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 */ + + /* blacklisted RC4 encryption */ + "RC4-MD5", /* TLS_RSA_WITH_RC4_128_MD5 */ + "RC4-SHA", /* TLS_RSA_WITH_RC4_128_SHA */ + "ADH-RC4-MD5", /* TLS_DH_anon_WITH_RC4_128_MD5 */ + "KRB5-RC4-SHA", /* TLS_KRB5_WITH_RC4_128_SHA */ + "KRB5-RC4-MD5", /* TLS_KRB5_WITH_RC4_128_MD5 */ + "EXP-KRB5-RC4-SHA", /* TLS_KRB5_EXPORT_WITH_RC4_40_SHA */ + "EXP-KRB5-RC4-MD5", /* TLS_KRB5_EXPORT_WITH_RC4_40_MD5 */ + "PSK-RC4-SHA", /* TLS_PSK_WITH_RC4_128_SHA */ + "DHE-PSK-RC4-SHA", /* TLS_DHE_PSK_WITH_RC4_128_SHA */ + "RSA-PSK-RC4-SHA", /* TLS_RSA_PSK_WITH_RC4_128_SHA */ + "ECDH-ECDSA-RC4-SHA", /* TLS_ECDH_ECDSA_WITH_RC4_128_SHA */ + "ECDHE-ECDSA-RC4-SHA", /* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA */ + "ECDH-RSA-RC4-SHA", /* TLS_ECDH_RSA_WITH_RC4_128_SHA */ + "ECDHE-RSA-RC4-SHA", /* TLS_ECDHE_RSA_WITH_RC4_128_SHA */ + "AECDH-RC4-SHA", /* TLS_ECDH_anon_WITH_RC4_128_SHA */ + "ECDHE-PSK-RC4-SHA", /* TLS_ECDHE_PSK_WITH_RC4_128_SHA */ + + /* blacklisted AES128 encrpytion ciphers */ + "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA */ + "DH-DSS-AES128-SHA", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA */ + "DH-RSA-AES128-SHA", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA */ + "DHE-DSS-AES128-SHA", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA */ + "DHE-RSA-AES128-SHA", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA */ + "ADH-AES128-SHA", /* TLS_DH_anon_WITH_AES_128_CBC_SHA */ + "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA256 */ + "DH-DSS-AES128-SHA256", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA256 */ + "DH-RSA-AES128-SHA256", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA256 */ + "DHE-DSS-AES128-SHA256", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 */ + "DHE-RSA-AES128-SHA256", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 */ + "ECDH-ECDSA-AES128-SHA", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA */ + "ECDHE-ECDSA-AES128-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */ + "ECDH-RSA-AES128-SHA", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA */ + "ECDHE-RSA-AES128-SHA", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */ + "AECDH-AES128-SHA", /* TLS_ECDH_anon_WITH_AES_128_CBC_SHA */ + "ECDHE-ECDSA-AES128-SHA256", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 */ + "ECDH-ECDSA-AES128-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 */ + "ECDHE-RSA-AES128-SHA256", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 */ + "ECDH-RSA-AES128-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 */ + "ADH-AES128-SHA256", /* TLS_DH_anon_WITH_AES_128_CBC_SHA256 */ + "PSK-AES128-CBC-SHA", /* TLS_PSK_WITH_AES_128_CBC_SHA */ + "DHE-PSK-AES128-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA */ + "RSA-PSK-AES128-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA */ + "PSK-AES128-CBC-SHA256", /* TLS_PSK_WITH_AES_128_CBC_SHA256 */ + "DHE-PSK-AES128-CBC-SHA256", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 */ + "RSA-PSK-AES128-CBC-SHA256", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 */ + "ECDHE-PSK-AES128-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA */ + "ECDHE-PSK-AES128-CBC-SHA256", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 */ + "AES128-CCM", /* TLS_RSA_WITH_AES_128_CCM */ + "AES128-CCM8", /* TLS_RSA_WITH_AES_128_CCM_8 */ + "PSK-AES128-CCM", /* TLS_PSK_WITH_AES_128_CCM */ + "PSK-AES128-CCM8", /* TLS_PSK_WITH_AES_128_CCM_8 */ + "AES128-GCM-SHA256", /* TLS_RSA_WITH_AES_128_GCM_SHA256 */ + "DH-RSA-AES128-GCM-SHA256", /* TLS_DH_RSA_WITH_AES_128_GCM_SHA256 */ + "DH-DSS-AES128-GCM-SHA256", /* TLS_DH_DSS_WITH_AES_128_GCM_SHA256 */ + "ADH-AES128-GCM-SHA256", /* TLS_DH_anon_WITH_AES_128_GCM_SHA256 */ + "PSK-AES128-GCM-SHA256", /* TLS_PSK_WITH_AES_128_GCM_SHA256 */ + "RSA-PSK-AES128-GCM-SHA256", /* TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 */ + "ECDH-ECDSA-AES128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 */ + "ECDH-RSA-AES128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 */ + "SRP-AES-128-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_128_CBC_SHA */ + "SRP-RSA-AES-128-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA */ + "SRP-DSS-AES-128-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA */ + + /* blacklisted AES256 encrpytion ciphers */ + "AES256-SHA", /* TLS_RSA_WITH_AES_256_CBC_SHA */ + "DH-DSS-AES256-SHA", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA */ + "DH-RSA-AES256-SHA", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA */ + "DHE-DSS-AES256-SHA", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA */ + "DHE-RSA-AES256-SHA", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA */ + "ADH-AES256-SHA", /* TLS_DH_anon_WITH_AES_256_CBC_SHA */ + "AES256-SHA256", /* TLS_RSA_WITH_AES_256_CBC_SHA256 */ + "DH-DSS-AES256-SHA256", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA256 */ + "DH-RSA-AES256-SHA256", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA256 */ + "DHE-DSS-AES256-SHA256", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 */ + "DHE-RSA-AES256-SHA256", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 */ + "ADH-AES256-SHA256", /* TLS_DH_anon_WITH_AES_256_CBC_SHA256 */ + "ECDH-ECDSA-AES256-SHA", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA */ + "ECDHE-ECDSA-AES256-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */ + "ECDH-RSA-AES256-SHA", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA */ + "ECDHE-RSA-AES256-SHA", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */ + "AECDH-AES256-SHA", /* TLS_ECDH_anon_WITH_AES_256_CBC_SHA */ + "ECDHE-ECDSA-AES256-SHA384", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 */ + "ECDH-ECDSA-AES256-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 */ + "ECDHE-RSA-AES256-SHA384", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 */ + "ECDH-RSA-AES256-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 */ + "PSK-AES256-CBC-SHA", /* TLS_PSK_WITH_AES_256_CBC_SHA */ + "DHE-PSK-AES256-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA */ + "RSA-PSK-AES256-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA */ + "PSK-AES256-CBC-SHA384", /* TLS_PSK_WITH_AES_256_CBC_SHA384 */ + "DHE-PSK-AES256-CBC-SHA384", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 */ + "RSA-PSK-AES256-CBC-SHA384", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 */ + "ECDHE-PSK-AES256-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA */ + "ECDHE-PSK-AES256-CBC-SHA384", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 */ + "SRP-AES-256-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_256_CBC_SHA */ + "SRP-RSA-AES-256-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA */ + "SRP-DSS-AES-256-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA */ + "AES256-CCM", /* TLS_RSA_WITH_AES_256_CCM */ + "AES256-CCM8", /* TLS_RSA_WITH_AES_256_CCM_8 */ + "PSK-AES256-CCM", /* TLS_PSK_WITH_AES_256_CCM */ + "PSK-AES256-CCM8", /* TLS_PSK_WITH_AES_256_CCM_8 */ + "AES256-GCM-SHA384", /* TLS_RSA_WITH_AES_256_GCM_SHA384 */ + "DH-RSA-AES256-GCM-SHA384", /* TLS_DH_RSA_WITH_AES_256_GCM_SHA384 */ + "DH-DSS-AES256-GCM-SHA384", /* TLS_DH_DSS_WITH_AES_256_GCM_SHA384 */ + "ADH-AES256-GCM-SHA384", /* TLS_DH_anon_WITH_AES_256_GCM_SHA384 */ + "PSK-AES256-GCM-SHA384", /* TLS_PSK_WITH_AES_256_GCM_SHA384 */ + "RSA-PSK-AES256-GCM-SHA384", /* TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 */ + "ECDH-ECDSA-AES256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 */ + "ECDH-RSA-AES256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 */ + + /* blacklisted CAMELLIA128 encrpytion ciphers */ + "CAMELLIA128-SHA", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA */ + "DH-DSS-CAMELLIA128-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA */ + "DH-RSA-CAMELLIA128-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA */ + "DHE-DSS-CAMELLIA128-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA */ + "DHE-RSA-CAMELLIA128-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA */ + "ADH-CAMELLIA128-SHA", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA */ + "ECDHE-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDH-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDHE-RSA-CAMELLIA128-SHA256", /* TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDH-RSA-CAMELLIA128-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "PSK-CAMELLIA128-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "DHE-PSK-CAMELLIA128-SHA256", /* TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "RSA-PSK-CAMELLIA128-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDHE-PSK-CAMELLIA128-SHA256", /* TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "CAMELLIA128-GCM-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "DH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "DH-DSS-CAMELLIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 */ + "ADH-CAMELLIA128-GCM-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 */ + "ECDH-ECDSA-CAMELLIA128-GCM-SHA256",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "ECDH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "PSK-CAMELLIA128-GCM-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 */ + "RSA-PSK-CAMELLIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 */ + "CAMELLIA128-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "DH-DSS-CAMELLIA128-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 */ + "DH-RSA-CAMELLIA128-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "DHE-DSS-CAMELLIA128-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 */ + "DHE-RSA-CAMELLIA128-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ADH-CAMELLIA128-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 */ + + /* blacklisted CAMELLIA256 encrpytion ciphers */ + "CAMELLIA256-SHA", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA */ + "DH-RSA-CAMELLIA256-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA */ + "DH-DSS-CAMELLIA256-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA */ + "DHE-DSS-CAMELLIA256-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA */ + "DHE-RSA-CAMELLIA256-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA */ + "ADH-CAMELLIA256-SHA", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA */ + "ECDHE-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDH-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDHE-RSA-CAMELLIA256-SHA384", /* TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDH-RSA-CAMELLIA256-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "PSK-CAMELLIA256-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "DHE-PSK-CAMELLIA256-SHA384", /* TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "RSA-PSK-CAMELLIA256-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDHE-PSK-CAMELLIA256-SHA384", /* TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "CAMELLIA256-SHA256", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ + "DH-DSS-CAMELLIA256-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 */ + "DH-RSA-CAMELLIA256-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ + "DHE-DSS-CAMELLIA256-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 */ + "DHE-RSA-CAMELLIA256-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ + "ADH-CAMELLIA256-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 */ + "CAMELLIA256-GCM-SHA384", /* TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "DH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "DH-DSS-CAMELLIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 */ + "ADH-CAMELLIA256-GCM-SHA384", /* TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 */ + "ECDH-ECDSA-CAMELLIA256-GCM-SHA384",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "ECDH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "PSK-CAMELLIA256-GCM-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 */ + "RSA-PSK-CAMELLIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 */ + + /* The blacklisted ARIA encrpytion ciphers */ + "ARIA128-SHA256", /* TLS_RSA_WITH_ARIA_128_CBC_SHA256 */ + "ARIA256-SHA384", /* TLS_RSA_WITH_ARIA_256_CBC_SHA384 */ + "DH-DSS-ARIA128-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 */ + "DH-DSS-ARIA256-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 */ + "DH-RSA-ARIA128-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 */ + "DH-RSA-ARIA256-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 */ + "DHE-DSS-ARIA128-SHA256", /* TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 */ + "DHE-DSS-ARIA256-SHA384", /* TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 */ + "DHE-RSA-ARIA128-SHA256", /* TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 */ + "DHE-RSA-ARIA256-SHA384", /* TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 */ + "ADH-ARIA128-SHA256", /* TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 */ + "ADH-ARIA256-SHA384", /* TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 */ + "ECDHE-ECDSA-ARIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDHE-ECDSA-ARIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 */ + "ECDH-ECDSA-ARIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDH-ECDSA-ARIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 */ + "ECDHE-RSA-ARIA128-SHA256", /* TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDHE-RSA-ARIA256-SHA384", /* TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 */ + "ECDH-RSA-ARIA128-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDH-RSA-ARIA256-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 */ + "ARIA128-GCM-SHA256", /* TLS_RSA_WITH_ARIA_128_GCM_SHA256 */ + "ARIA256-GCM-SHA384", /* TLS_RSA_WITH_ARIA_256_GCM_SHA384 */ + "DH-DSS-ARIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 */ + "DH-DSS-ARIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 */ + "DH-RSA-ARIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 */ + "DH-RSA-ARIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 */ + "ADH-ARIA128-GCM-SHA256", /* TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 */ + "ADH-ARIA256-GCM-SHA384", /* TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 */ + "ECDH-ECDSA-ARIA128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 */ + "ECDH-ECDSA-ARIA256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 */ + "ECDH-RSA-ARIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 */ + "ECDH-RSA-ARIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 */ + "PSK-ARIA128-SHA256", /* TLS_PSK_WITH_ARIA_128_CBC_SHA256 */ + "PSK-ARIA256-SHA384", /* TLS_PSK_WITH_ARIA_256_CBC_SHA384 */ + "DHE-PSK-ARIA128-SHA256", /* TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 */ + "DHE-PSK-ARIA256-SHA384", /* TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 */ + "RSA-PSK-ARIA128-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 */ + "RSA-PSK-ARIA256-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 */ + "ARIA128-GCM-SHA256", /* TLS_PSK_WITH_ARIA_128_GCM_SHA256 */ + "ARIA256-GCM-SHA384", /* TLS_PSK_WITH_ARIA_256_GCM_SHA384 */ + "RSA-PSK-ARIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 */ + "RSA-PSK-ARIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 */ + "ECDHE-PSK-ARIA128-SHA256", /* TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 */ + "ECDHE-PSK-ARIA256-SHA384", /* TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 */ + + /* blacklisted SEED encryptions */ + "SEED-SHA", /*TLS_RSA_WITH_SEED_CBC_SHA */ + "DH-DSS-SEED-SHA", /* TLS_DH_DSS_WITH_SEED_CBC_SHA */ + "DH-RSA-SEED-SHA", /* TLS_DH_RSA_WITH_SEED_CBC_SHA */ + "DHE-DSS-SEED-SHA", /* TLS_DHE_DSS_WITH_SEED_CBC_SHA */ + "DHE-RSA-SEED-SHA", /* TLS_DHE_RSA_WITH_SEED_CBC_SHA */ + "ADH-SEED-SHA", /* TLS_DH_anon_WITH_SEED_CBC_SHA */ + + /* blacklisted KRB5 ciphers */ + "KRB5-DES-CBC-SHA", /* TLS_KRB5_WITH_DES_CBC_SHA */ + "KRB5-DES-CBC3-SHA", /* TLS_KRB5_WITH_3DES_EDE_CBC_SHA */ + "KRB5-IDEA-CBC-SHA", /* TLS_KRB5_WITH_IDEA_CBC_SHA */ + "KRB5-DES-CBC-MD5", /* TLS_KRB5_WITH_DES_CBC_MD5 */ + "KRB5-DES-CBC3-MD5", /* TLS_KRB5_WITH_3DES_EDE_CBC_MD5 */ + "KRB5-IDEA-CBC-MD5", /* TLS_KRB5_WITH_IDEA_CBC_MD5 */ + "EXP-KRB5-DES-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA */ + "EXP-KRB5-DES-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 */ + "EXP-KRB5-RC2-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA */ + "EXP-KRB5-RC2-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 */ + + /* blacklisted exoticas */ + "DHE-DSS-CBC-SHA", /* TLS_DHE_DSS_WITH_DES_CBC_SHA */ + "IDEA-CBC-SHA", /* TLS_RSA_WITH_IDEA_CBC_SHA */ + + /* not really sure if the following names are correct */ + "SSL3_CK_SCSV", /* TLS_EMPTY_RENEGOTIATION_INFO_SCSV */ + "SSL3_CK_FALLBACK_SCSV" +}; +static size_t RFC7540_names_LEN = sizeof(RFC7540_names)/sizeof(RFC7540_names[0]); + + +static apr_hash_t *BLCNames; + +static void cipher_init(apr_pool_t *pool) +{ + apr_hash_t *hash = apr_hash_make(pool); + const char *source; + unsigned int i; + + source = "rfc7540"; + for (i = 0; i < RFC7540_names_LEN; ++i) { + apr_hash_set(hash, RFC7540_names[i], APR_HASH_KEY_STRING, source); + } + + BLCNames = hash; +} + +static int cipher_is_blacklisted(const char *cipher, const char **psource) +{ + *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING); + return !!*psource; +} + +apr_status_t h2_protocol_init(apr_pool_t *pool, server_rec *s) +{ + (void)pool; + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init"); + cipher_init(pool); + + return APR_SUCCESS; +} + +int h2_protocol_is_acceptable_c1(conn_rec *c, request_rec *r, int require_all) +{ + int is_tls = ap_ssl_conn_is_ssl(c); + + if (is_tls && h2_config_cgeti(c, H2_CONF_MODERN_TLS_ONLY) > 0) { + /* Check TLS connection for modern TLS parameters, as defined in + * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + */ + apr_pool_t *pool = c->pool; + server_rec *s = c->base_server; + const char *val; + + /* Need Tlsv1.2 or higher, rfc 7540, ch. 9.2 + */ + val = ap_ssl_var_lookup(pool, s, c, NULL, "SSL_PROTOCOL"); + if (val && *val) { + if (strncmp("TLS", val, 3) + || !strcmp("TLSv1", val) + || !strcmp("TLSv1.1", val)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050) + "h2_h2(%ld): tls protocol not suitable: %s", + (long)c->id, val); + return 0; + } + } + else if (require_all) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051) + "h2_h2(%ld): tls protocol is indetermined", (long)c->id); + return 0; + } + + if (val && !strcmp("TLSv1.2", val)) { + /* Check TLS cipher blacklist, defined pre-TLSv1.3, so only + * checking for 1.2 */ + val = ap_ssl_var_lookup(pool, s, c, NULL, "SSL_CIPHER"); + if (val && *val) { + const char *source; + if (cipher_is_blacklisted(val, &source)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052) + "h2_h2(%ld): tls cipher %s blacklisted by %s", + (long)c->id, val, source); + return 0; + } + } + else if (require_all) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053) + "h2_h2(%ld): tls cipher is indetermined", (long)c->id); + return 0; + } + } + } + return 1; +} + diff --git a/modules/http2/h2_protocol.h b/modules/http2/h2_protocol.h new file mode 100644 index 0000000..ed48e89 --- /dev/null +++ b/modules/http2/h2_protocol.h @@ -0,0 +1,56 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_protocol__ +#define __mod_h2__h2_protocol__ + +/** + * List of protocol identifiers that we support in cleartext + * negotiations. NULL terminated. + */ +extern const char *h2_protocol_ids_clear[]; + +/** + * List of protocol identifiers that we support in TLS encrypted + * negotiations (ALPN). NULL terminated. + */ +extern const char *h2_protocol_ids_tls[]; + +/** + * Provide a user readable description of the HTTP/2 error code- + * @param h2_error http/2 error code, as in rfc 7540, ch. 7 + * @return textual description of code or that it is unknown. + */ +const char *h2_protocol_err_description(unsigned int h2_error); + +/* + * One time, post config initialization. + */ +apr_status_t h2_protocol_init(apr_pool_t *pool, server_rec *s); + +/** + * Check if the given primary connection fulfills the protocol + * requirements for HTTP/2. + * @param c the connection + * @param require_all != 0 iff any missing connection properties make + * the test fail. For example, a cipher might not have been selected while + * the handshake is still ongoing. + * @return != 0 iff protocol requirements are met + */ +int h2_protocol_is_acceptable_c1(conn_rec *c, request_rec *r, int require_all); + + +#endif /* defined(__mod_h2__h2_protocol__) */ diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c index 8389c7c..db22301 100644 --- a/modules/http2/h2_proxy_session.c +++ b/modules/http2/h2_proxy_session.c @@ -20,6 +20,7 @@ #include #include +#include #include #include "mod_http2.h" @@ -36,6 +37,7 @@ typedef struct h2_proxy_stream { const char *url; request_rec *r; + conn_rec *cfront; h2_proxy_request *req; const char *real_server_uri; const char *p_server_uri; @@ -45,6 +47,7 @@ typedef struct h2_proxy_stream { unsigned int suspended : 1; unsigned int waiting_on_100 : 1; unsigned int waiting_on_ping : 1; + unsigned int headers_ended : 1; uint32_t error_code; apr_bucket_brigade *input; @@ -61,7 +64,123 @@ static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev, static void ping_arrived(h2_proxy_session *session); static apr_status_t check_suspended(h2_proxy_session *session); static void stream_resume(h2_proxy_stream *stream); +static apr_status_t submit_trailers(h2_proxy_stream *stream); + +/* + * The H2_PING connection sub-state: a state independant of the H2_SESSION state + * of the connection: + * - H2_PING_ST_NONE: no interference with request handling, ProxyTimeout in effect. + * When entered, all suspended streams are unsuspended again. + * - H2_PING_ST_AWAIT_ANY: new requests are suspended, a possibly configured "ping" + * timeout is in effect. Any frame received transits to H2_PING_ST_NONE. + * - H2_PING_ST_AWAIT_PING: same as above, but only a PING frame transits + * to H2_PING_ST_NONE. + * + * An AWAIT state is entered on a new connection or when re-using a connection and + * the last frame received has been some time ago. The latter sends a PING frame + * and insists on an answer, the former is satisfied by any frame received from the + * backend. + * + * This works for new connections as there is always at least one SETTINGS frame + * that the backend sends. When re-using connection, we send a PING and insist on + * receiving one back, as there might be frames in our connection buffers from + * some time ago. Since some servers have protections against PING flooding, we + * only ever have one PING unanswered. + * + * Requests are suspended while in a PING state, as we do not want to send data + * before we can be reasonably sure that the connection is working (at least on + * the h2 protocol level). This also means that the session can do blocking reads + * when expecting PING answers. + */ +static void set_ping_timeout(h2_proxy_session *session) +{ + if (session->ping_timeout != -1 && session->save_timeout == -1) { + apr_socket_t *socket = NULL; + socket = ap_get_conn_socket(session->c); + if (socket) { + apr_socket_timeout_get(socket, &session->save_timeout); + apr_socket_timeout_set(socket, session->ping_timeout); + } + } +} + +static void unset_ping_timeout(h2_proxy_session *session) +{ + if (session->save_timeout != -1) { + apr_socket_t *socket = NULL; + + socket = ap_get_conn_socket(session->c); + if (socket) { + apr_socket_timeout_set(socket, session->save_timeout); + session->save_timeout = -1; + } + } +} + +static void enter_ping_state(h2_proxy_session *session, h2_ping_state_t state) +{ + if (session->ping_state == state) return; + switch (session->ping_state) { + case H2_PING_ST_NONE: + /* leaving NONE, enforce timeout, send frame maybe */ + if (H2_PING_ST_AWAIT_PING == state) { + unset_ping_timeout(session); + nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup"); + } + set_ping_timeout(session); + session->ping_state = state; + break; + default: + /* no switching between the != NONE states */ + if (H2_PING_ST_NONE == state) { + session->ping_state = state; + unset_ping_timeout(session); + ping_arrived(session); + } + break; + } +} + +static void ping_new_session(h2_proxy_session *session, proxy_conn_rec *p_conn) +{ + session->save_timeout = -1; + session->ping_timeout = (p_conn->worker->s->ping_timeout_set? + p_conn->worker->s->ping_timeout : -1); + session->ping_state = H2_PING_ST_NONE; + enter_ping_state(session, H2_PING_ST_AWAIT_ANY); +} + +static void ping_reuse_session(h2_proxy_session *session) +{ + if (H2_PING_ST_NONE == session->ping_state) { + apr_interval_time_t age = apr_time_now() - session->last_frame_received; + if (age > apr_time_from_sec(1)) { + enter_ping_state(session, H2_PING_ST_AWAIT_PING); + } + } +} + +static void ping_ev_frame_received(h2_proxy_session *session, const nghttp2_frame *frame) +{ + session->last_frame_received = apr_time_now(); + switch (session->ping_state) { + case H2_PING_ST_NONE: + /* nop */ + break; + case H2_PING_ST_AWAIT_ANY: + enter_ping_state(session, H2_PING_ST_NONE); + break; + case H2_PING_ST_AWAIT_PING: + if (NGHTTP2_PING == frame->hd.type) { + enter_ping_state(session, H2_PING_ST_NONE); + } + /* we may receive many other frames while we are waiting for the + * PING answer. They may come all from our connection buffers and + * say nothing about the current state of the backend. */ + break; + } +} static apr_status_t proxy_session_pre_close(void *theconn) { @@ -152,7 +271,8 @@ static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame, session->id, buffer); } - session->last_frame_received = apr_time_now(); + ping_ev_frame_received(session, frame); + /* Action for frame types: */ switch (frame->hd.type) { case NGHTTP2_HEADERS: stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id); @@ -193,10 +313,6 @@ static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame, stream_resume(stream); break; case NGHTTP2_PING: - if (session->check_ping) { - session->check_ping = 0; - ping_arrived(session); - } break; case NGHTTP2_PUSH_PROMISE: break; @@ -241,7 +357,8 @@ static int add_header(void *table, const char *n, const char *v) return 1; } -static void process_proxy_header(h2_proxy_stream *stream, const char *n, const char *v) +static void process_proxy_header(apr_table_t *headers, h2_proxy_stream *stream, + const char *n, const char *v) { static const struct { const char *name; @@ -262,20 +379,18 @@ static void process_proxy_header(h2_proxy_stream *stream, const char *n, const c if (!dconf->preserve_host) { for (i = 0; transform_hdrs[i].name; ++i) { if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) { - apr_table_add(r->headers_out, n, - (*transform_hdrs[i].func)(r, dconf, v)); + apr_table_add(headers, n, (*transform_hdrs[i].func)(r, dconf, v)); return; } } if (!ap_cstr_casecmp("Link", n)) { dconf = ap_get_module_config(r->per_dir_config, &proxy_module); - apr_table_add(r->headers_out, n, - h2_proxy_link_reverse_map(r, dconf, - stream->real_server_uri, stream->p_server_uri, v)); + apr_table_add(headers, n, h2_proxy_link_reverse_map(r, dconf, + stream->real_server_uri, stream->p_server_uri, v)); return; } } - apr_table_add(r->headers_out, n, v); + apr_table_add(headers, n, v); } static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream, @@ -287,7 +402,7 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream, char *s = apr_pstrndup(stream->r->pool, v, vlen); apr_table_setn(stream->r->notes, "proxy-status", s); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront, "h2_proxy_stream(%s-%d): got status %s", stream->session->id, stream->id, s); stream->r->status = (int)apr_atoi64(s); @@ -299,17 +414,22 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream, return APR_SUCCESS; } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront, + "h2_proxy_stream(%s-%d): on_header %s: %s", + stream->session->id, stream->id, n, v); if (!h2_proxy_res_ignore_header(n, nlen)) { char *hname, *hvalue; + apr_table_t *headers = (stream->headers_ended? + stream->r->trailers_out : stream->r->headers_out); hname = apr_pstrndup(stream->pool, n, nlen); h2_proxy_util_camel_case_header(hname, nlen); hvalue = apr_pstrndup(stream->pool, v, vlen); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront, "h2_proxy_stream(%s-%d): got header %s: %s", stream->session->id, stream->id, hname, hvalue); - process_proxy_header(stream, hname, hvalue); + process_proxy_header(headers, stream, hname, hvalue); } return APR_SUCCESS; } @@ -328,6 +448,7 @@ static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream) h2_proxy_session *session = stream->session; request_rec *r = stream->r; apr_pool_t *p = r->pool; + const char *buf; /* Now, add in the cookies from the response to the ones already saved */ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL); @@ -337,6 +458,10 @@ static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream) apr_table_unset(r->headers_out, "Set-Cookie"); r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves); } + + if ((buf = apr_table_get(r->headers_out, "Content-Type"))) { + ap_set_content_type(r, apr_pstrdup(p, buf)); + } /* handle Via header in response */ if (session->conf->viaopt != via_off @@ -374,6 +499,7 @@ static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream) server_name, portstr) ); } + if (r->status >= 200) stream->headers_ended = 1; if (APLOGrtrace2(stream->r)) { ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, @@ -407,34 +533,27 @@ static int stream_response_data(nghttp2_session *ngh2, uint8_t flags, h2_proxy_stream_end_headers_out(stream); } stream->data_received += len; - - b = apr_bucket_transient_create((const char*)data, len, - stream->r->connection->bucket_alloc); + b = apr_bucket_transient_create((const char*)data, len, + stream->cfront->bucket_alloc); APR_BRIGADE_INSERT_TAIL(stream->output, b); /* always flush after a DATA frame, as we have no other indication * of buffer use */ - b = apr_bucket_flush_create(stream->r->connection->bucket_alloc); + b = apr_bucket_flush_create(stream->cfront->bucket_alloc); APR_BRIGADE_INSERT_TAIL(stream->output, b); - + status = ap_pass_brigade(stream->r->output_filters, stream->output); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359) "h2_proxy_session(%s): stream=%d, response DATA %ld, %ld" " total", session->id, stream_id, (long)len, (long)stream->data_received); if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344) + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03344) "h2_proxy_session(%s): passing output on stream %d", session->id, stream->id); nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id, NGHTTP2_STREAM_CLOSED); return NGHTTP2_ERR_STREAM_CLOSING; } - if (stream->standalone) { - nghttp2_session_consume(ngh2, stream_id, len); - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, - "h2_proxy_session(%s): stream %d, win_update %d bytes", - session->id, stream_id, (int)len); - } return 0; } @@ -493,12 +612,12 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id, stream = nghttp2_session_get_stream_user_data(ngh2, stream_id); if (!stream) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361) - "h2_proxy_stream(%s): data_read, stream %d not found", - stream->session->id, stream_id); + "h2_proxy_stream(NULL): data_read, stream %d not found", + stream_id); return NGHTTP2_ERR_CALLBACK_FAILURE; } - if (stream->session->check_ping) { + if (stream->session->ping_state != H2_PING_ST_NONE) { /* suspend until we hear from the other side */ stream->waiting_on_ping = 1; status = APR_EAGAIN; @@ -518,7 +637,7 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id, } if (status == APR_SUCCESS) { - ssize_t readlen = 0; + size_t readlen = 0; while (status == APR_SUCCESS && (readlen < length) && !APR_BRIGADE_EMPTY(stream->input)) { @@ -537,7 +656,7 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id, status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ); if (status == APR_SUCCESS && blen > 0) { - ssize_t copylen = H2MIN(length - readlen, blen); + size_t copylen = H2MIN(length - readlen, blen); memcpy(buf, bdata, copylen); buf += copylen; readlen += copylen; @@ -553,9 +672,14 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id, stream->data_sent += readlen; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468) "h2_proxy_stream(%d): request DATA %ld, %ld" - " total, flags=%d", - stream->id, (long)readlen, (long)stream->data_sent, + " total, flags=%d", stream->id, (long)readlen, (long)stream->data_sent, (int)*data_flags); + if ((*data_flags & NGHTTP2_DATA_FLAG_EOF) && !apr_is_empty_table(stream->r->trailers_in)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(10179) + "h2_proxy_stream(%d): submit trailers", stream->id); + *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM; + submit_trailers(stream); + } return readlen; } else if (APR_STATUS_IS_EAGAIN(status)) { @@ -575,7 +699,7 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id, } #ifdef H2_NG2_INVALID_HEADER_CB -static int on_invalid_header_cb(nghttp2_session *ngh2, +static int on_invalid_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, const uint8_t *name, size_t namelen, const uint8_t *value, size_t valuelen, @@ -638,26 +762,22 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, #ifdef H2_NG2_INVALID_HEADER_CB nghttp2_session_callbacks_set_on_invalid_header_callback(cbs, on_invalid_header_cb); #endif - nghttp2_option_new(&option); nghttp2_option_set_peer_max_concurrent_streams(option, 100); - nghttp2_option_set_no_auto_window_update(option, 1); + nghttp2_option_set_no_auto_window_update(option, 0); nghttp2_session_client_new2(&session->ngh2, cbs, session, option); nghttp2_option_del(option); nghttp2_session_callbacks_del(cbs); + ping_new_session(session, p_conn); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362) "setup session for %s", p_conn->hostname); } else { h2_proxy_session *session = p_conn->data; - apr_interval_time_t age = apr_time_now() - session->last_frame_received; - if (age > apr_time_from_sec(1)) { - session->check_ping = 1; - nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup"); - } + ping_reuse_session(session); } return p_conn->data; } @@ -698,7 +818,7 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url, { h2_proxy_stream *stream; apr_uri_t puri; - const char *authority, *scheme, *path; + const char *authority, *scheme, *path, *orig_host; apr_status_t status; proxy_dir_conf *dconf; @@ -707,24 +827,29 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url, stream->pool = r->pool; stream->url = url; stream->r = r; + stream->cfront = r->connection; stream->standalone = standalone; stream->session = session; stream->state = H2_STREAM_ST_IDLE; - stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc); - stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc); + stream->input = apr_brigade_create(stream->pool, stream->cfront->bucket_alloc); + stream->output = apr_brigade_create(stream->pool, stream->cfront->bucket_alloc); - stream->req = h2_proxy_req_create(1, stream->pool, 0); + stream->req = h2_proxy_req_create(1, stream->pool); status = apr_uri_parse(stream->pool, url, &puri); if (status != APR_SUCCESS) return status; scheme = (strcmp(puri.scheme, "h2")? "http" : "https"); - + orig_host = apr_table_get(r->headers_in, "Host"); + if (orig_host == NULL) { + orig_host = r->hostname; + } + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); if (dconf->preserve_host) { - authority = r->hostname; + authority = orig_host; } else { authority = puri.hostname; @@ -733,20 +858,27 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url, /* port info missing and port is not default for scheme: append */ authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port); } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront, + "authority=%s from uri.hostname=%s and uri.port=%d", + authority, puri.hostname, puri.port); } - + /* See #235, we use only :authority when available and remove Host: + * since differing values are not acceptable, see RFC 9113 ch. 8.3.1 */ + if (authority && strlen(authority)) { + apr_table_unset(r->headers_in, "Host"); + } + /* we need this for mapping relative uris in headers ("Link") back * to local uris */ stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority); stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority); path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART); + h2_proxy_req_make(stream->req, stream->pool, r->method, scheme, authority, path, r->headers_in); if (dconf->add_forwarded_headers) { if (PROXYREQ_REVERSE == r->proxyreq) { - const char *buf; - /* Add X-Forwarded-For: so that the upstream has a chance to * determine, where the original request came from. */ @@ -756,8 +888,9 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url, /* Add X-Forwarded-Host: so that upstream knows what the * original request hostname was. */ - if ((buf = apr_table_get(r->headers_in, "Host"))) { - apr_table_mergen(stream->req->headers, "X-Forwarded-Host", buf); + if (orig_host) { + apr_table_mergen(stream->req->headers, "X-Forwarded-Host", + orig_host); } /* Add X-Forwarded-Server: so that upstream knows what the @@ -768,7 +901,7 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url, r->server->server_hostname); } } - + /* Tuck away all already existing cookies */ stream->saves = apr_table_make(r->pool, 2); apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL); @@ -811,7 +944,7 @@ static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *st rv = nghttp2_submit_request(session->ngh2, NULL, hd->nv, hd->nvlen, pp, stream); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363) + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03363) "h2_proxy_session(%s): submit %s%s -> %d", session->id, stream->req->authority, stream->req->path, rv); @@ -826,12 +959,22 @@ static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *st return APR_EGENERAL; } +static apr_status_t submit_trailers(h2_proxy_stream *stream) +{ + h2_proxy_ngheader *hd; + int rv; + + hd = h2_proxy_util_nghd_make(stream->pool, stream->r->trailers_in); + rv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, hd->nv, hd->nvlen); + return rv == 0? APR_SUCCESS: APR_EGENERAL; +} + static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb) { apr_status_t status = APR_SUCCESS; apr_size_t readlen = 0; ssize_t n; - + while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) { apr_bucket* b = APR_BRIGADE_FIRST(bb); @@ -854,9 +997,10 @@ static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade * } } else { - readlen += n; - if (n < blen) { - apr_bucket_split(b, n); + size_t rlen = (size_t)n; + readlen += rlen; + if (rlen < blen) { + apr_bucket_split(b, rlen); } } } @@ -882,7 +1026,7 @@ static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block, apr_socket_t *socket = NULL; apr_time_t save_timeout = -1; - if (block) { + if (block && timeout > 0) { socket = ap_get_conn_socket(session->c); if (socket) { apr_socket_timeout_get(socket, &save_timeout); @@ -945,7 +1089,7 @@ apr_status_t h2_proxy_session_submit(h2_proxy_session *session, static void stream_resume(h2_proxy_stream *stream) { h2_proxy_session *session = stream->session; - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront, "h2_proxy_stream(%s-%d): resuming", session->id, stream->id); stream->suspended = 0; @@ -954,6 +1098,14 @@ static void stream_resume(h2_proxy_stream *stream) dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL); } +static int is_waiting_for_backend(h2_proxy_session *session) +{ + return ((session->ping_state != H2_PING_ST_NONE) + || ((session->suspended->nelts <= 0) + && !nghttp2_session_want_write(session->ngh2) + && nghttp2_session_want_read(session->ngh2))); +} + static apr_status_t check_suspended(h2_proxy_session *session) { h2_proxy_stream *stream; @@ -978,7 +1130,7 @@ static apr_status_t check_suspended(h2_proxy_session *session) return APR_SUCCESS; } else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, stream->cfront, APLOGNO(03382) "h2_proxy_stream(%s-%d): check input", session->id, stream_id); stream_resume(stream); @@ -1006,7 +1158,7 @@ static apr_status_t session_shutdown(h2_proxy_session *session, int reason, if (!err && reason) { err = nghttp2_strerror(reason); } - nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0, + nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0, reason, (uint8_t*)err, err? strlen(err):0); status = nghttp2_session_send(session->ngh2); dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err); @@ -1208,39 +1360,56 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id, const char *msg) { h2_proxy_stream *stream; - + apr_bucket *b; + stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id); if (stream) { - int touched = (stream->data_sent || - stream_id <= session->last_stream_id); + /* if the stream's connection is aborted, do not send anything + * more on it. */ apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364) - "h2_proxy_sesssion(%s): stream(%d) closed " - "(touched=%d, error=%d)", - session->id, stream_id, touched, stream->error_code); - - if (status != APR_SUCCESS) { - stream->r->status = 500; - } - else if (!stream->data_received) { - apr_bucket *b; - /* if the response had no body, this is the time to flush - * an empty brigade which will also write the resonse - * headers */ - h2_proxy_stream_end_headers_out(stream); - stream->data_received = 1; - b = apr_bucket_flush_create(stream->r->connection->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(stream->output, b); - b = apr_bucket_eos_create(stream->r->connection->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(stream->output, b); - ap_pass_brigade(stream->r->output_filters, stream->output); + int touched = (stream->data_sent || stream->data_received || + stream_id <= session->last_stream_id); + if (!stream->cfront->aborted) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03364) + "h2_proxy_sesssion(%s): stream(%d) closed " + "(touched=%d, error=%d)", + session->id, stream_id, touched, stream->error_code); + + if (status != APR_SUCCESS) { + /* stream failed. If we have received (and forwarded) response + * data already, we need to append an error buckt to inform + * consumers. + * Otherwise, we have an early fail on the connection and may + * retry this request on a new one. In that case, keep the + * output virgin so that a new attempt can be made. */ + if (stream->data_received) { + int http_status = ap_map_http_request_error(status, HTTP_BAD_REQUEST); + b = ap_bucket_error_create(http_status, NULL, stream->r->pool, + stream->cfront->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + b = apr_bucket_eos_create(stream->cfront->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + ap_pass_brigade(stream->r->output_filters, stream->output); + } + } + else if (!stream->data_received) { + /* if the response had no body, this is the time to flush + * an empty brigade which will also write the response headers */ + h2_proxy_stream_end_headers_out(stream); + stream->data_received = 1; + b = apr_bucket_flush_create(stream->cfront->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + b = apr_bucket_eos_create(stream->cfront->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + ap_pass_brigade(stream->r->output_filters, stream->output); + } } - + stream->state = H2_STREAM_ST_CLOSED; h2_proxy_ihash_remove(session->streams, stream_id); h2_proxy_iq_remove(session->suspended, stream_id); if (session->done) { - session->done(session, stream->r, status, touched); + session->done(session, stream->r, status, touched, stream->error_code); } } @@ -1408,7 +1577,22 @@ run_loop: break; case H2_PROXYS_ST_WAIT: - if (check_suspended(session) == APR_EAGAIN) { + if (is_waiting_for_backend(session)) { + /* we can do a blocking read with the default timeout (as + * configured via ProxyTimeout in our socket. There is + * nothing we want to send or check until we get more data + * from the backend. */ + status = h2_proxy_session_read(session, 1, 0); + if (status == APR_SUCCESS) { + have_read = 1; + dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL); + } + else { + dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL); + return status; + } + } + else if (check_suspended(session) == APR_EAGAIN) { /* no stream has become resumed. Do a blocking read with * ever increasing timeouts... */ if (session->wait_timeout < 25) { @@ -1423,7 +1607,7 @@ run_loop: ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, APLOGNO(03365) "h2_proxy_session(%s): WAIT read, timeout=%fms", - session->id, (float)session->wait_timeout/1000.0); + session->id, session->wait_timeout/1000.0); if (status == APR_SUCCESS) { have_read = 1; dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL); @@ -1495,9 +1679,19 @@ static int done_iter(void *udata, void *val) { cleanup_iter_ctx *ctx = udata; h2_proxy_stream *stream = val; - int touched = (stream->data_sent || + int touched = (stream->data_sent || stream->data_received || stream->id <= ctx->session->last_stream_id); - ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched); + if (touched && stream->output) { + apr_bucket *b = ap_bucket_error_create(HTTP_BAD_GATEWAY, NULL, + stream->r->pool, + stream->cfront->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + b = apr_bucket_eos_create(stream->cfront->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + ap_pass_brigade(stream->r->output_filters, stream->output); + } + ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched, + stream->error_code); return 1; } @@ -1516,6 +1710,12 @@ void h2_proxy_session_cleanup(h2_proxy_session *session, } } +int h2_proxy_session_is_reusable(h2_proxy_session *session) +{ + return (session->state != H2_PROXYS_ST_DONE) && + h2_proxy_ihash_empty(session->streams); +} + static int ping_arrived_iter(void *udata, void *val) { h2_proxy_stream *stream = val; @@ -1543,42 +1743,3 @@ typedef struct { int updated; } win_update_ctx; -static int win_update_iter(void *udata, void *val) -{ - win_update_ctx *ctx = udata; - h2_proxy_stream *stream = val; - - if (stream->r && stream->r->connection == ctx->c) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c, - "h2_proxy_session(%s-%d): win_update %ld bytes", - ctx->session->id, (int)stream->id, (long)ctx->bytes); - nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes); - ctx->updated = 1; - return 0; - } - return 1; -} - - -void h2_proxy_session_update_window(h2_proxy_session *session, - conn_rec *c, apr_off_t bytes) -{ - if (!h2_proxy_ihash_empty(session->streams)) { - win_update_ctx ctx; - ctx.session = session; - ctx.c = c; - ctx.bytes = bytes; - ctx.updated = 0; - h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx); - - if (!ctx.updated) { - /* could not find the stream any more, possibly closed, update - * the connection window at least */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - "h2_proxy_session(%s): win_update conn %ld bytes", - session->id, (long)bytes); - nghttp2_session_consume_connection(session->ngh2, (size_t)bytes); - } - } -} - diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h index ecebb61..3bc16d7 100644 --- a/modules/http2/h2_proxy_session.h +++ b/modules/http2/h2_proxy_session.h @@ -60,10 +60,16 @@ typedef enum { H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */ } h2_proxys_event_t; +typedef enum { + H2_PING_ST_NONE, /* normal connection mode, ProxyTimeout rules */ + H2_PING_ST_AWAIT_ANY, /* waiting for any frame from backend */ + H2_PING_ST_AWAIT_PING, /* waiting for PING frame from backend */ +} h2_ping_state_t; typedef struct h2_proxy_session h2_proxy_session; typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r, - apr_status_t status, int touched); + apr_status_t status, int touched, + int error_code); struct h2_proxy_session { const char *id; @@ -74,7 +80,6 @@ struct h2_proxy_session { nghttp2_session *ngh2; /* the nghttp2 session itself */ unsigned int aborted : 1; - unsigned int check_ping : 1; unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */ h2_proxy_request_done *done; @@ -94,6 +99,10 @@ struct h2_proxy_session { apr_bucket_brigade *input; apr_bucket_brigade *output; + + h2_ping_state_t ping_state; + apr_time_t ping_timeout; + apr_time_t save_timeout; }; h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, @@ -120,9 +129,8 @@ void h2_proxy_session_cancel_all(h2_proxy_session *s); void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done); -void h2_proxy_session_update_window(h2_proxy_session *s, - conn_rec *c, apr_off_t bytes); - #define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url" +int h2_proxy_session_is_reusable(h2_proxy_session *s); + #endif /* h2_proxy_session_h */ diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c index bd45294..dc69ec0 100644 --- a/modules/http2/h2_proxy_util.c +++ b/modules/http2/h2_proxy_util.c @@ -452,6 +452,22 @@ h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p, return ngh; } +h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers) +{ + + h2_proxy_ngheader *ngh; + size_t n; + + n = 0; + apr_table_do(count_header, &n, headers, NULL); + + ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader)); + ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv)); + apr_table_do(add_table_header, ngh, headers, NULL); + + return ngh; +} + /******************************************************************************* * header HTTP/1 <-> HTTP/2 conversions ******************************************************************************/ @@ -480,7 +496,7 @@ static int ignore_header(const literal *lits, size_t llen, const char *name, size_t nlen) { const literal *lit; - int i; + size_t i; for (i = 0; i < llen; ++i) { lit = &lits[i]; @@ -567,8 +583,7 @@ static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool, static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const char *method, const char *scheme, const char *authority, - const char *path, apr_table_t *header, - int serialize) + const char *path, apr_table_t *header) { h2_proxy_request *req = apr_pcalloc(pool, sizeof(h2_proxy_request)); @@ -578,14 +593,13 @@ static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const ch req->path = path; req->headers = header? header : apr_table_make(pool, 10); req->request_time = apr_time_now(); - req->serialize = serialize; - + return req; } -h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize) +h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool) { - return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize); + return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL); } typedef struct { @@ -609,6 +623,7 @@ apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool, apr_table_t *headers) { h1_ctx x; + const char *val; req->method = method; req->scheme = scheme; @@ -623,6 +638,11 @@ apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool, x.pool = pool; x.headers = req->headers; apr_table_do(set_h1_header, &x, headers, NULL); + if ((val = apr_table_get(headers, "TE")) && ap_find_token(pool, val, "trailers")) { + /* client accepts trailers, forward this information */ + apr_table_addn(req->headers, "TE", "trailers"); + } + apr_table_setn(req->headers, "te", "trailers"); return APR_SUCCESS; } @@ -915,12 +935,12 @@ static size_t subst_str(link_ctx *ctx, int start, int end, const char *ns) nlen = (int)strlen(ns); delta = nlen - olen; plen = ctx->slen + delta + 1; - p = apr_pcalloc(ctx->pool, plen); + p = apr_palloc(ctx->pool, plen); memcpy(p, ctx->s, start); memcpy(p + start, ns, nlen); strcpy(p + start + nlen, ctx->s + end); ctx->s = p; - ctx->slen = (int)strlen(p); + ctx->slen = plen - 1; /* (int)strlen(p) */ if (ctx->i >= end) { ctx->i += delta; } @@ -931,7 +951,7 @@ static void map_link(link_ctx *ctx) { if (ctx->link_start < ctx->link_end) { char buffer[HUGE_STRING_LEN]; - int need_len, link_len, buffer_len, prepend_p_server; + size_t need_len, link_len, buffer_len, prepend_p_server; const char *mapped; buffer[0] = '\0'; diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h index a88fb7e..202363d 100644 --- a/modules/http2/h2_proxy_util.h +++ b/modules/http2/h2_proxy_util.h @@ -168,6 +168,8 @@ typedef struct h2_proxy_ngheader { h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p, const struct h2_proxy_request *req); +h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers); + /******************************************************************************* * h2_proxy_request helpers ******************************************************************************/ @@ -183,11 +185,10 @@ struct h2_proxy_request { apr_time_t request_time; - unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ - unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */ + int chunked; /* iff request body needs to be forwarded as chunked */ }; -h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize); +h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool); apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool, const char *method, const char *scheme, const char *authority, const char *path, diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c index 9a3b19b..e6a10c5 100644 --- a/modules/http2/h2_push.c +++ b/modules/http2/h2_push.c @@ -23,19 +23,19 @@ #include #ifdef H2_OPENSSL -#include +#include #endif #include #include #include +#include #include "h2_private.h" -#include "h2_h2.h" +#include "h2_protocol.h" #include "h2_util.h" #include "h2_push.h" #include "h2_request.h" -#include "h2_headers.h" #include "h2_session.h" #include "h2_stream.h" @@ -59,7 +59,7 @@ static const char *policy_str(h2_push_policy policy) typedef struct { const h2_request *req; - int push_policy; + apr_uint32_t push_policy; apr_pool_t *pool; apr_array_header_t *pushes; const char *s; @@ -348,11 +348,10 @@ static int add_push(link_ctx *ctx) } headers = apr_table_make(ctx->pool, 5); apr_table_do(set_push_header, headers, ctx->req->headers, NULL); - req = h2_req_create(0, ctx->pool, method, ctx->req->scheme, - ctx->req->authority, path, headers, - ctx->req->serialize); + req = h2_request_create(0, ctx->pool, method, ctx->req->scheme, + ctx->req->authority, path, headers); /* atm, we do not push on pushes */ - h2_request_end_headers(req, ctx->pool, 1, 0); + h2_request_end_headers(req, ctx->pool, 0); push->req = req; if (has_param(ctx, "critical")) { h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio)); @@ -427,14 +426,23 @@ static void inspect_link(link_ctx *ctx, const char *s, size_t slen) static int head_iter(void *ctx, const char *key, const char *value) { - if (!apr_strnatcasecmp("link", key)) { + if (!ap_cstr_casecmp("link", key)) { inspect_link(ctx, value, strlen(value)); } return 1; } -apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req, - int push_policy, const h2_headers *res) +#if AP_HAS_RESPONSE_BUCKETS +apr_array_header_t *h2_push_collect(apr_pool_t *p, + const struct h2_request *req, + apr_uint32_t push_policy, + const ap_bucket_response *res) +#else +apr_array_header_t *h2_push_collect(apr_pool_t *p, + const struct h2_request *req, + apr_uint32_t push_policy, + const struct h2_headers *res) +#endif { if (req && push_policy != H2_PUSH_NONE) { /* Collect push candidates from the request/response pair. @@ -464,33 +472,6 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req, return NULL; } -/******************************************************************************* - * push diary - * - * - The push diary keeps track of resources already PUSHed via HTTP/2 on this - * connection. It records a hash value from the absolute URL of the resource - * pushed. - * - Lacking openssl, it uses 'apr_hashfunc_default' for the value - * - with openssl, it uses SHA256 to calculate the hash value - * - whatever the method to generate the hash, the diary keeps a maximum of 64 - * bits per hash, limiting the memory consumption to about - * H2PushDiarySize * 8 - * bytes. Entries are sorted by most recently used and oldest entries are - * forgotten first. - * - Clients can initialize/replace the push diary by sending a 'Cache-Digest' - * header. Currently, this is the base64url encoded value of the cache digest - * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ - * This draft can be expected to evolve and the definition of the header - * will be added there and refined. - * - The cache digest header is a Golomb Coded Set of hash values, but it may - * limit the amount of bits per hash value even further. For a good description - * of GCS, read here: - * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters - * - The means that the push diary might be initialized with hash values of much - * less than 64 bits, leading to more false positives, but smaller digest size. - ******************************************************************************/ - - #define GCSLOG_LEVEL APLOG_TRACE1 typedef struct h2_push_diary_entry { @@ -499,27 +480,32 @@ typedef struct h2_push_diary_entry { #ifdef H2_OPENSSL -static void sha256_update(SHA256_CTX *ctx, const char *s) +static void sha256_update(EVP_MD_CTX *ctx, const char *s) { - SHA256_Update(ctx, s, strlen(s)); + EVP_DigestUpdate(ctx, s, strlen(s)); } static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push) { - SHA256_CTX sha256; + EVP_MD_CTX *md; apr_uint64_t val; - unsigned char hash[SHA256_DIGEST_LENGTH]; - int i; - - SHA256_Init(&sha256); - sha256_update(&sha256, push->req->scheme); - sha256_update(&sha256, "://"); - sha256_update(&sha256, push->req->authority); - sha256_update(&sha256, push->req->path); - SHA256_Final(hash, &sha256); + unsigned char hash[EVP_MAX_MD_SIZE]; + unsigned len, i; + + md = EVP_MD_CTX_create(); + ap_assert(md != NULL); + + i = EVP_DigestInit_ex(md, EVP_sha256(), NULL); + ap_assert(i == 1); + sha256_update(md, push->req->scheme); + sha256_update(md, "://"); + sha256_update(md, push->req->authority); + sha256_update(md, push->req->path); + EVP_DigestFinal(md, hash, &len); + EVP_MD_CTX_destroy(md); val = 0; - for (i = 0; i != sizeof(val); ++i) + for (i = 0; i != len; ++i) val = val * 256 + hash[i]; *phash = val >> (64 - diary->mask_bits); } @@ -528,13 +514,14 @@ static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push static unsigned int val_apr_hash(const char *str) { - apr_ssize_t len = strlen(str); + apr_ssize_t len = (apr_ssize_t)strlen(str); return apr_hashfunc_default(str, &len); } static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push) { apr_uint64_t val; + (void)diary; #if APR_UINT64_MAX > UINT_MAX val = ((apr_uint64_t)(val_apr_hash(push->req->scheme)) << 32); val ^= ((apr_uint64_t)(val_apr_hash(push->req->authority)) << 16); @@ -617,38 +604,48 @@ static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash) return -1; } -static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx) +static void move_to_last(h2_push_diary *diary, apr_size_t idx) { h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; h2_push_diary_entry e; - apr_size_t lastidx = diary->entries->nelts-1; + apr_size_t lastidx; + /* Move an existing entry to the last place */ + if (diary->entries->nelts <= 0) + return; + /* move entry[idx] to the end */ + lastidx = diary->entries->nelts - 1; if (idx < lastidx) { e = entries[idx]; - memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx)); + memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx)); entries[lastidx] = e; } - return &entries[lastidx]; } -static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) +static void remove_first(h2_push_diary *diary) { - h2_push_diary_entry *ne; + h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; + int lastidx; - if (diary->entries->nelts < diary->N) { - /* append a new diary entry at the end */ - APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; - ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry); + /* move remaining entries to index 0 */ + lastidx = diary->entries->nelts - 1; + if (lastidx > 0) { + --diary->entries->nelts; + memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts); } - else { - /* replace content with new digest. keeps memory usage constant once diary is full */ - ne = move_to_last(diary, 0); - *ne = *e; +} + +static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) +{ + while (diary->entries->nelts >= diary->N) { + remove_first(diary); } + /* append a new diary entry at the end */ + APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool, - "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash); + "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash); } apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes) @@ -668,13 +665,13 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t idx = h2_push_diary_find(session->push_diary, e.hash); if (idx >= 0) { /* Intentional no APLOGNO */ - ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c, + ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c1, "push_diary_update: already there PUSH %s", push->req->path); - move_to_last(session->push_diary, idx); + move_to_last(session->push_diary, (apr_size_t)idx); } else { /* Intentional no APLOGNO */ - ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c, + ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c1, "push_diary_update: adding PUSH %s", push->req->path); if (!npushes) { npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*)); @@ -687,34 +684,22 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t return npushes; } -apr_array_header_t *h2_push_collect_update(h2_stream *stream, - const struct h2_request *req, +#if AP_HAS_RESPONSE_BUCKETS +apr_array_header_t *h2_push_collect_update(struct h2_stream *stream, + const struct h2_request *req, + const ap_bucket_response *res) +#else +apr_array_header_t *h2_push_collect_update(struct h2_stream *stream, + const struct h2_request *req, const struct h2_headers *res) +#endif { - h2_session *session = stream->session; - const char *cache_digest = apr_table_get(req->headers, "Cache-Digest"); apr_array_header_t *pushes; - apr_status_t status; - if (cache_digest && session->push_diary) { - status = h2_push_diary_digest64_set(session->push_diary, req->authority, - cache_digest, stream->pool); - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, - H2_SSSN_LOG(APLOGNO(03057), session, - "push diary set from Cache-Digest: %s"), cache_digest); - } - } pushes = h2_push_collect(stream->pool, req, stream->push_policy, res); return h2_push_diary_update(stream->session, pushes); } -static apr_int32_t h2_log2inv(unsigned char log2) -{ - return log2? (1 << log2) : 1; -} - - typedef struct { h2_push_diary *diary; unsigned char log2p; @@ -822,23 +807,18 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, int maxP, const char *authority, const char **pdata, apr_size_t *plen) { - int nelts, N, i; + int nelts, N; unsigned char log2n, log2pmax; gset_encoder encoder; apr_uint64_t *hashes; - apr_size_t hash_count; + apr_size_t hash_count, i; nelts = diary->entries->nelts; - - if (nelts > APR_UINT32_MAX) { - /* should not happen */ - return APR_ENOTIMPL; - } N = ceil_power_of_2(nelts); log2n = h2_log2(N); /* Now log2p is the max number of relevant bits, so that - * log2p + log2n == mask_bits. We can uise a lower log2p + * log2p + log2n == mask_bits. We can use a lower log2p * and have a shorter set encoding... */ log2pmax = h2_log2(ceil_power_of_2(maxP)); @@ -895,166 +875,3 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, return APR_SUCCESS; } -typedef struct { - h2_push_diary *diary; - apr_pool_t *pool; - unsigned char log2p; - const unsigned char *data; - apr_size_t datalen; - apr_size_t offset; - unsigned int bit; - apr_uint64_t last_val; -} gset_decoder; - -static int gset_decode_next_bit(gset_decoder *decoder) -{ - if (++decoder->bit >= 8) { - if (++decoder->offset >= decoder->datalen) { - return -1; - } - decoder->bit = 0; - } - return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0; -} - -static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash) -{ - apr_uint64_t flex = 0, fixed = 0, delta; - int i; - - /* read 1 bits until we encounter 0, then read log2n(diary-P) bits. - * On a malformed bit-string, this will not fail, but produce results - * which are pbly too large. Luckily, the diary will modulo the hash. - */ - while (1) { - int bit = gset_decode_next_bit(decoder); - if (bit == -1) { - return APR_EINVAL; - } - if (!bit) { - break; - } - ++flex; - } - - for (i = 0; i < decoder->log2p; ++i) { - int bit = gset_decode_next_bit(decoder); - if (bit == -1) { - return APR_EINVAL; - } - fixed = (fixed << 1) | bit; - } - - delta = (flex << decoder->log2p) | fixed; - *phash = delta + decoder->last_val; - decoder->last_val = *phash; - - /* Intentional no APLOGNO */ - ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool, - "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%" - APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT, - *phash, delta, (int)flex, fixed); - - return APR_SUCCESS; -} - -/** - * Initialize the push diary by a cache digest as described in - * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ - * . - * @param diary the diary to set the digest into - * @param data the binary cache digest - * @param len the length of the cache digest - * @return APR_EINVAL if digest was not successfully parsed - */ -apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, - const char *data, apr_size_t len) -{ - gset_decoder decoder; - unsigned char log2n, log2p; - int N, i; - apr_pool_t *pool = diary->entries->pool; - h2_push_diary_entry e; - apr_status_t status = APR_SUCCESS; - - if (len < 2) { - /* at least this should be there */ - return APR_EINVAL; - } - log2n = data[0]; - log2p = data[1]; - diary->mask_bits = log2n + log2p; - if (diary->mask_bits > 64) { - /* cannot handle */ - return APR_ENOTIMPL; - } - - /* whatever is in the digest, it replaces the diary entries */ - apr_array_clear(diary->entries); - if (!authority || !strcmp("*", authority)) { - diary->authority = NULL; - } - else if (!diary->authority || strcmp(diary->authority, authority)) { - diary->authority = apr_pstrdup(diary->entries->pool, authority); - } - - N = h2_log2inv(log2n + log2p); - - decoder.diary = diary; - decoder.pool = pool; - decoder.log2p = log2p; - decoder.data = (const unsigned char*)data; - decoder.datalen = len; - decoder.offset = 1; - decoder.bit = 8; - decoder.last_val = 0; - - diary->N = N; - /* Determine effective N we use for storage */ - if (!N) { - /* a totally empty cache digest. someone tells us that she has no - * entries in the cache at all. Use our own preferences for N+mask - */ - diary->N = diary->NMax; - return APR_SUCCESS; - } - else if (N > diary->NMax) { - /* Store not more than diary is configured to hold. We open us up - * to DOS attacks otherwise. */ - diary->N = diary->NMax; - } - - /* Intentional no APLOGNO */ - ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, - "h2_push_diary_digest_set: N=%d, log2n=%d, " - "diary->mask_bits=%d, dec.log2p=%d", - (int)diary->N, (int)log2n, diary->mask_bits, - (int)decoder.log2p); - - for (i = 0; i < diary->N; ++i) { - if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) { - /* the data may have less than N values */ - break; - } - h2_push_diary_append(diary, &e); - } - - /* Intentional no APLOGNO */ - ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, - "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d", - (int)diary->entries->nelts, diary->mask_bits); - return status; -} - -apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, - const char *data64url, apr_pool_t *pool) -{ - const char *data; - apr_size_t len = h2_util_base64url_decode(&data, data64url, pool); - /* Intentional no APLOGNO */ - ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, - "h2_push_diary_digest64_set: digest=%s, dlen=%d", - data64url, (int)len); - return h2_push_diary_digest_set(diary, authority, data, len); -} - diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h index bc24e68..947b73b 100644 --- a/modules/http2/h2_push.h +++ b/modules/http2/h2_push.h @@ -17,10 +17,12 @@ #ifndef __mod_h2__h2_push__ #define __mod_h2__h2_push__ +#include + #include "h2.h" +#include "h2_headers.h" struct h2_request; -struct h2_headers; struct h2_ngheader; struct h2_session; struct h2_stream; @@ -35,6 +37,44 @@ typedef enum { H2_PUSH_DIGEST_SHA256 } h2_push_digest_type; +/******************************************************************************* + * push diary + * + * - The push diary keeps track of resources already PUSHed via HTTP/2 on this + * connection. It records a hash value from the absolute URL of the resource + * pushed. + * - Lacking openssl, + * - with openssl, it uses SHA256 to calculate the hash value, otherwise it + * falls back to apr_hashfunc_default() + * - whatever the method to generate the hash, the diary keeps a maximum of 64 + * bits per hash, limiting the memory consumption to about + * H2PushDiarySize * 8 + * bytes. Entries are sorted by most recently used and oldest entries are + * forgotten first. + * - While useful by itself to avoid duplicated PUSHes on the same connection, + * the original idea was that clients provided a 'Cache-Digest' header with + * the values of *their own* cached resources. This was described in + * + * and some subsequent revisions that tweaked values but kept the overall idea. + * - The draft was abandoned by the IETF http-wg, as support from major clients, + * e.g. browsers, was lacking for various reasons. + * - For these reasons, mod_h2 abandoned its support for client supplied values + * but keeps the diary. It seems to provide value for applications using PUSH, + * is configurable in size and defaults to a very moderate amount of memory + * used. + * - The cache digest header is a Golomb Coded Set of hash values, but it may + * limit the amount of bits per hash value even further. For a good description + * of GCS, read here: + * + ******************************************************************************/ + + +/* + * The push diary is based on the abandoned draft + * + * that describes how to use golomb filters. + */ + typedef struct h2_push_diary h2_push_diary; typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push); @@ -59,14 +99,21 @@ struct h2_push_diary { * @param res the response from the server * @return array of h2_push addresses or NULL */ -apr_array_header_t *h2_push_collect(apr_pool_t *p, - const struct h2_request *req, - int push_policy, +#if AP_HAS_RESPONSE_BUCKETS +apr_array_header_t *h2_push_collect(apr_pool_t *p, + const struct h2_request *req, + apr_uint32_t push_policy, + const ap_bucket_response *res); +#else +apr_array_header_t *h2_push_collect(apr_pool_t *p, + const struct h2_request *req, + apr_uint32_t push_policy, const struct h2_headers *res); +#endif /** * Create a new push diary for the given maximum number of entries. - * + * * @param p the pool to use * @param N the max number of entries, rounded up to 2^x * @return the created diary, might be NULL of max_entries is 0 @@ -83,14 +130,21 @@ apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_h * Collect pushes for the given request/response pair, enter them into the * diary and return those pushes newly entered. */ -apr_array_header_t *h2_push_collect_update(struct h2_stream *stream, - const struct h2_request *req, +#if AP_HAS_RESPONSE_BUCKETS +apr_array_header_t *h2_push_collect_update(struct h2_stream *stream, + const struct h2_request *req, + const ap_bucket_response *res); +#else +apr_array_header_t *h2_push_collect_update(struct h2_stream *stream, + const struct h2_request *req, const struct h2_headers *res); +#endif + /** * Get a cache digest as described in * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ * from the contents of the push diary. - * + * * @param diary the diary to calculdate the digest from * @param p the pool to use * @param authority the authority to get the data for, use NULL/"*" for all @@ -101,20 +155,4 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p, int maxP, const char *authority, const char **pdata, apr_size_t *plen); -/** - * Initialize the push diary by a cache digest as described in - * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ - * . - * @param diary the diary to set the digest into - * @param authority the authority to set the data for - * @param data the binary cache digest - * @param len the length of the cache digest - * @return APR_EINVAL if digest was not successfully parsed - */ -apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, - const char *data, apr_size_t len); - -apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, - const char *data64url, apr_pool_t *pool); - #endif /* defined(__mod_h2__h2_push__) */ diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c index 8899c4f..2713947 100644 --- a/modules/http2/h2_request.c +++ b/modules/http2/h2_request.c @@ -16,7 +16,12 @@ #include -#include +#include "apr.h" +#include "apr_strings.h" +#include "apr_lib.h" +#include "apr_strmatch.h" + +#include #include #include @@ -24,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -32,11 +38,28 @@ #include "h2_private.h" #include "h2_config.h" +#include "h2_conn_ctx.h" #include "h2_push.h" #include "h2_request.h" #include "h2_util.h" +h2_request *h2_request_create(int id, apr_pool_t *pool, const char *method, + const char *scheme, const char *authority, + const char *path, apr_table_t *header) +{ + h2_request *req = apr_pcalloc(pool, sizeof(h2_request)); + + req->method = method; + req->scheme = scheme; + req->authority = authority; + req->path = path; + req->headers = header? header : apr_table_make(pool, 10); + req->request_time = apr_time_now(); + + return req; +} + typedef struct { apr_table_t *headers; apr_pool_t *pool; @@ -46,9 +69,9 @@ typedef struct { static int set_h1_header(void *ctx, const char *key, const char *value) { h1_ctx *x = ctx; - x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key), - value, strlen(value)); - return (x->status == APR_SUCCESS)? 1 : 0; + int was_added; + h2_req_add_header(x->headers, x->pool, key, strlen(key), value, strlen(value), 0, &was_added); + return 1; } apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, @@ -68,54 +91,67 @@ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, return APR_EINVAL; } - if (!ap_strchr_c(authority, ':') && r->server && r->server->port) { - apr_port_t defport = apr_uri_port_of_scheme(scheme); - if (defport != r->server->port) { - /* port info missing and port is not default for scheme: append */ - authority = apr_psprintf(pool, "%s:%d", authority, - (int)r->server->port); + /* The authority we carry in h2_request is the 'authority' part of + * the URL for the request. r->hostname has stripped any port info that + * might have been present. Do we need to add it? + */ + if (!ap_strchr_c(authority, ':')) { + if (r->parsed_uri.port_str) { + /* Yes, it was there, add it again. */ + authority = apr_pstrcat(pool, authority, ":", r->parsed_uri.port_str, NULL); + } + else if (!r->parsed_uri.hostname && r->server && r->server->port) { + /* If there was no hostname in the parsed URL, the URL was relative. + * In that case, we restore port from our server->port, if it + * is known and not the default port for the scheme. */ + apr_port_t defport = apr_uri_port_of_scheme(scheme); + if (defport != r->server->port) { + /* port info missing and port is not default for scheme: append */ + authority = apr_psprintf(pool, "%s:%d", authority, + (int)r->server->port); + } } } - + req = apr_pcalloc(pool, sizeof(*req)); - req->method = apr_pstrdup(pool, r->method); - req->scheme = scheme; - req->authority = authority; - req->path = path; - req->headers = apr_table_make(pool, 10); - if (r->server) { - req->serialize = h2_config_geti(h2_config_sget(r->server), - H2_CONF_SER_HEADERS); - } + req->method = apr_pstrdup(pool, r->method); + req->scheme = scheme; + req->authority = authority; + req->path = path; + req->headers = apr_table_make(pool, 10); + req->http_status = H2_HTTP_STATUS_UNSET; + req->request_time = apr_time_now(); x.pool = pool; x.headers = req->headers; x.status = APR_SUCCESS; apr_table_do(set_h1_header, &x, r->headers_in, NULL); - + *preq = req; return x.status; } -apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, +apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, const char *name, size_t nlen, - const char *value, size_t vlen) + const char *value, size_t vlen, + size_t max_field_len, int *pwas_added) { apr_status_t status = APR_SUCCESS; - + + *pwas_added = 0; if (nlen <= 0) { return status; } - + if (name[0] == ':') { /* pseudo header, see ch. 8.1.2.3, always should come first */ if (!apr_is_empty_table(req->headers)) { ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool, - APLOGNO(02917) + APLOGNO(02917) "h2_request: pseudo header after request start"); return APR_EGENERAL; } - + if (H2_HEADER_METHOD_LEN == nlen && !strncmp(H2_HEADER_METHOD, name, nlen)) { req->method = apr_pstrndup(pool, value, vlen); @@ -132,32 +168,36 @@ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, && !strncmp(H2_HEADER_AUTH, name, nlen)) { req->authority = apr_pstrndup(pool, value, vlen); } + else if (H2_HEADER_PROTO_LEN == nlen + && !strncmp(H2_HEADER_PROTO, name, nlen)) { + req->protocol = apr_pstrndup(pool, value, vlen); + } else { char buffer[32]; memset(buffer, 0, 32); strncpy(buffer, name, (nlen > 31)? 31 : nlen); ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool, - APLOGNO(02954) + APLOGNO(02954) "h2_request: ignoring unknown pseudo header %s", buffer); } } else { - /* non-pseudo header, append to work bucket of stream */ - status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen); + /* non-pseudo header, add to table */ + status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen, + max_field_len, pwas_added); } - + return status; } -apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes) +apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, + size_t raw_bytes) { - const char *s; - - /* rfc7540, ch. 8.1.2.3: - * - if we have :authority, it overrides any Host header - * - :authority MUST be ommited when converting h1->h2, so we - * might get a stream without, but then Host needs to be there */ + /* rfc7540, ch. 8.1.2.3: without :authority, Host: must be there */ + if (req->authority && !strlen(req->authority)) { + req->authority = NULL; + } if (!req->authority) { const char *host = apr_table_get(req->headers, "Host"); if (!host) { @@ -168,30 +208,8 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, else { apr_table_setn(req->headers, "Host", req->authority); } - - s = apr_table_get(req->headers, "Content-Length"); - if (!s) { - /* HTTP/2 does not need a Content-Length for framing, but our - * internal request processing is used to HTTP/1.1, so we - * need to either add a Content-Length or a Transfer-Encoding - * if any content can be expected. */ - if (!eos) { - /* We have not seen a content-length and have no eos, - * simulate a chunked encoding for our HTTP/1.1 infrastructure, - * in case we have "H2SerializeHeaders on" here - */ - req->chunked = 1; - apr_table_mergen(req->headers, "Transfer-Encoding", "chunked"); - } - else if (apr_table_get(req->headers, "Content-Type")) { - /* If we have a content-type, but already seen eos, no more - * data will come. Signal a zero content length explicitly. - */ - apr_table_setn(req->headers, "Content-Length", "0"); - } - } req->raw_bytes += raw_bytes; - + return APR_SUCCESS; } @@ -202,17 +220,16 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src) dst->scheme = apr_pstrdup(p, src->scheme); dst->authority = apr_pstrdup(p, src->authority); dst->path = apr_pstrdup(p, src->path); + dst->protocol = apr_pstrdup(p, src->protocol); dst->headers = apr_table_clone(p, src->headers); return dst; } -request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) +#if !AP_MODULE_MAGIC_AT_LEAST(20120211, 106) +static request_rec *my_ap_create_request(conn_rec *c) { - int access_status = HTTP_OK; - const char *rpath; apr_pool_t *p; request_rec *r; - const char *s; apr_pool_create(&p, c->pool); apr_pool_tag(p, "request"); @@ -221,86 +238,294 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) r->pool = p; r->connection = c; r->server = c->base_server; - + r->user = NULL; r->ap_auth_type = NULL; - + r->allowed_methods = ap_make_method_list(p, 2); - - r->headers_in = apr_table_clone(r->pool, req->headers); + + r->headers_in = apr_table_make(r->pool, 5); r->trailers_in = apr_table_make(r->pool, 5); r->subprocess_env = apr_table_make(r->pool, 25); r->headers_out = apr_table_make(r->pool, 12); r->err_headers_out = apr_table_make(r->pool, 5); r->trailers_out = apr_table_make(r->pool, 5); r->notes = apr_table_make(r->pool, 5); - + r->request_config = ap_create_request_config(r->pool); /* Must be set before we run create request hook */ - + r->proto_output_filters = c->output_filters; r->output_filters = r->proto_output_filters; r->proto_input_filters = c->input_filters; r->input_filters = r->proto_input_filters; ap_run_create_request(r); r->per_dir_config = r->server->lookup_defaults; - + r->sent_bodyct = 0; /* bytect isn't for body */ - + r->read_length = 0; r->read_body = REQUEST_NO_BODY; - + r->status = HTTP_OK; /* Until further notice */ r->header_only = 0; r->the_request = NULL; - + /* Begin by presuming any module can make its own path_info assumptions, * until some module interjects and changes the value. */ r->used_path_info = AP_REQ_DEFAULT_PATH_INFO; - + r->useragent_addr = c->client_addr; r->useragent_ip = c->client_ip; - + return r; +} +#endif + +#if AP_HAS_RESPONSE_BUCKETS +apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r) +{ + conn_rec *c = r->connection; + apr_table_t *headers = apr_table_clone(r->pool, req->headers); + const char *uri = req->path; + + AP_DEBUG_ASSERT(req->method); + AP_DEBUG_ASSERT(req->authority); + if (!ap_cstr_casecmp("CONNECT", req->method)) { + uri = req->authority; + } + else if (h2_config_cgeti(c, H2_CONF_PROXY_REQUESTS)) { + /* Forward proxying: always absolute uris */ + uri = apr_psprintf(r->pool, "%s://%s%s", + req->scheme, req->authority, + req->path ? req->path : ""); + } + else if (req->scheme && ap_cstr_casecmp(req->scheme, "http") + && ap_cstr_casecmp(req->scheme, "https")) { + /* Client sent a non-http ':scheme', use an absolute URI */ + uri = apr_psprintf(r->pool, "%s://%s%s", + req->scheme, req->authority, req->path ? req->path : ""); + } + + return ap_bucket_request_create(req->method, uri, "HTTP/2.0", headers, + r->pool, c->bucket_alloc); +} +#endif + +static void assign_headers(request_rec *r, const h2_request *req, + int no_body, int is_connect) +{ + const char *cl; + + r->headers_in = apr_table_clone(r->pool, req->headers); + + if (req->authority && !is_connect) { + /* for internal handling, we have to simulate that :authority + * came in as Host:, RFC 9113 ch. says that mismatches between + * :authority and Host: SHOULD be rejected as malformed. However, + * we are more lenient and just replace any Host: if we have + * an :authority. + */ + const char *orig_host = apr_table_get(req->headers, "Host"); + if (orig_host && strcmp(req->authority, orig_host)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10401) + "overwriting 'Host: %s' with :authority: %s'", + orig_host, req->authority); + apr_table_setn(r->subprocess_env, "H2_ORIGINAL_HOST", orig_host); + } + apr_table_setn(r->headers_in, "Host", req->authority); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "set 'Host: %s' from :authority", req->authority); + } + + /* Unless we open a byte stream via CONNECT, apply content-length guards. */ + if (!is_connect) { + cl = apr_table_get(req->headers, "Content-Length"); + if (no_body) { + if (!cl && apr_table_get(req->headers, "Content-Type")) { + /* If we have a content-type, but already seen eos, no more + * data will come. Signal a zero content length explicitly. + */ + apr_table_setn(req->headers, "Content-Length", "0"); + } + } +#if !AP_HAS_RESPONSE_BUCKETS + else if (!cl) { + /* there may be a body and we have internal HTTP/1.1 processing. + * If the Content-Length is unspecified, we MUST simulate + * chunked Transfer-Encoding. + * + * HTTP/2 does not need a Content-Length for framing. Ideally + * all clients set the EOS flag on the header frame if they + * do not intent to send a body. However, forwarding proxies + * might just no know at the time and send an empty DATA + * frame with EOS much later. + */ + apr_table_mergen(r->headers_in, "Transfer-Encoding", "chunked"); + } +#endif /* else AP_HAS_RESPONSE_BUCKETS */ + } +} + +request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c, + int no_body) +{ + int access_status = HTTP_OK; + int is_connect = !ap_cstr_casecmp("CONNECT", req->method); + +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 106) + request_rec *r = ap_create_request(c); +#else + request_rec *r = my_ap_create_request(c); +#endif + +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 107) + assign_headers(r, req, no_body, is_connect); ap_run_pre_read_request(r, c); - + /* Time to populate r with the data we have. */ r->request_time = req->request_time; - r->method = req->method; - /* Provide quick information about the request method as soon as known */ - r->method_number = ap_method_number_of(r->method); - if (r->method_number == M_GET && r->method[0] == 'H') { - r->header_only = 1; + AP_DEBUG_ASSERT(req->authority); + if (req->http_status != H2_HTTP_STATUS_UNSET) { + access_status = req->http_status; + goto die; + } + else if (is_connect) { + /* CONNECT MUST NOT have scheme or path */ + r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0", + req->method, req->authority); + if (req->scheme) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10458) + "':scheme: %s' header present in CONNECT request", + req->scheme); + access_status = HTTP_BAD_REQUEST; + goto die; + } + else if (req->path) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10459) + "':path: %s' header present in CONNECT request", + req->path); + access_status = HTTP_BAD_REQUEST; + goto die; + } + } + else if (req->protocol) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10470) + "':protocol: %s' header present in %s request", + req->protocol, req->method); + access_status = HTTP_BAD_REQUEST; + goto die; + } + else if (h2_config_cgeti(c, H2_CONF_PROXY_REQUESTS)) { + if (!req->scheme) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10468) + "H2ProxyRequests on, but request misses :scheme"); + access_status = HTTP_BAD_REQUEST; + goto die; + } + if (!req->authority) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10469) + "H2ProxyRequests on, but request misses :authority"); + access_status = HTTP_BAD_REQUEST; + goto die; + } + r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0", + req->method, req->scheme, req->authority, + req->path ? req->path : ""); + } + else if (req->scheme && ap_cstr_casecmp(req->scheme, "http") + && ap_cstr_casecmp(req->scheme, "https")) { + /* Client sent a ':scheme' pseudo header for something else + * than what we have on this connection. Make an absolute URI. */ + r->the_request = apr_psprintf(r->pool, "%s %s://%s%s HTTP/2.0", + req->method, req->scheme, req->authority, + req->path ? req->path : ""); + } + else if (req->path) { + r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0", + req->method, req->path); + } + else { + /* We should only come here on a request that is errored already. + * create a request line that passes parsing, we'll die anyway. + */ + AP_DEBUG_ASSERT(req->http_status != H2_HTTP_STATUS_UNSET); + r->the_request = apr_psprintf(r->pool, "%s / HTTP/2.0", req->method); } - rpath = (req->path ? req->path : ""); - ap_parse_uri(r, rpath); - r->protocol = (char*)"HTTP/2.0"; - r->proto_num = HTTP_VERSION(2, 0); - - r->the_request = apr_psprintf(r->pool, "%s %s %s", - r->method, rpath, r->protocol); - - /* update what we think the virtual host is based on the headers we've - * now read. may update status. - * Leave r->hostname empty, vhost will parse if form our Host: header, - * otherwise we get complains about port numbers. + /* Start with r->hostname = NULL, ap_check_request_header() will get it + * form Host: header, otherwise we get complains about port numbers. */ r->hostname = NULL; - ap_update_vhost_from_headers(r); - - /* we may have switched to another server */ - r->per_dir_config = r->server->lookup_defaults; - - s = apr_table_get(r->headers_in, "Expect"); - if (s && s[0]) { - if (ap_cstr_casecmp(s, "100-continue") == 0) { - r->expecting_100 = 1; + + /* Validate HTTP/1 request and select vhost. */ + if (!ap_parse_request_line(r) || !ap_check_request_header(r)) { + /* we may have switched to another server still */ + r->per_dir_config = r->server->lookup_defaults; + if (req->http_status != H2_HTTP_STATUS_UNSET) { + access_status = req->http_status; + /* Be safe and close the connection */ + c->keepalive = AP_CONN_CLOSE; } else { - r->status = HTTP_EXPECTATION_FAILED; - ap_send_error_response(r, 0); + access_status = r->status; + } + r->status = HTTP_OK; + goto die; + } +#else + { + const char *s; + + assign_headers(r, req, no_body, is_connect); + ap_run_pre_read_request(r, c); + + /* Time to populate r with the data we have. */ + r->request_time = req->request_time; + r->method = apr_pstrdup(r->pool, req->method); + /* Provide quick information about the request method as soon as known */ + r->method_number = ap_method_number_of(r->method); + if (r->method_number == M_GET && r->method[0] == 'H') { + r->header_only = 1; } + ap_parse_uri(r, req->path ? req->path : ""); + r->protocol = (char*)"HTTP/2.0"; + r->proto_num = HTTP_VERSION(2, 0); + r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0", + r->method, req->path ? req->path : ""); + + /* Start with r->hostname = NULL, ap_check_request_header() will get it + * form Host: header, otherwise we get complains about port numbers. + */ + r->hostname = NULL; + ap_update_vhost_from_headers(r); + + /* we may have switched to another server */ + r->per_dir_config = r->server->lookup_defaults; + + s = apr_table_get(r->headers_in, "Expect"); + if (s && s[0]) { + if (ap_cstr_casecmp(s, "100-continue") == 0) { + r->expecting_100 = 1; + } + else { + r->status = HTTP_EXPECTATION_FAILED; + access_status = r->status; + goto die; + } + } + } +#endif + + /* we may have switched to another server */ + r->per_dir_config = r->server->lookup_defaults; + + if (req->http_status != H2_HTTP_STATUS_UNSET) { + access_status = req->http_status; + r->status = HTTP_OK; + /* Be safe and close the connection */ + c->keepalive = AP_CONN_CLOSE; + goto die; } /* @@ -311,29 +536,58 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) */ ap_add_input_filter_handle(ap_http_input_filter_handle, NULL, r, r->connection); - - if (access_status != HTTP_OK - || (access_status = ap_run_post_read_request(r))) { + + if ((access_status = ap_post_read_request(r))) { /* Request check post hooks failed. An example of this would be a * request for a vhost where h2 is disabled --> 421. */ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03367) "h2_request: access_status=%d, request_create failed", access_status); - ap_die(access_status, r); - ap_update_child_status(c->sbh, SERVER_BUSY_LOG, r); - ap_run_log_transaction(r); - r = NULL; - goto traceout; + goto die; } - AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, - (char *)r->uri, (char *)r->server->defn_name, + AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, + (char *)r->uri, (char *)r->server->defn_name, r->status); return r; -traceout: - AP_READ_REQUEST_FAILURE((uintptr_t)r); - return r; -} +die: + if (!r->method) { + /* if we fail early, `r` is not properly initialized for error + * processing which accesses fields in message generation. + * Make a best effort. */ + if (!r->the_request) { + r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0", + req->method, req->path); + } + ap_parse_request_line(r); + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "ap_die(%d) for %s", access_status, r->the_request); + ap_die(access_status, r); + /* ap_die() sent the response through the output filters, we must now + * end the request with an EOR bucket for stream/pipeline accounting. + */ + { + apr_bucket_brigade *eor_bb; +#if AP_MODULE_MAGIC_AT_LEAST(20180905, 1) + eor_bb = ap_acquire_brigade(c); + APR_BRIGADE_INSERT_TAIL(eor_bb, + ap_bucket_eor_create(c->bucket_alloc, r)); + ap_pass_brigade(c->output_filters, eor_bb); + ap_release_brigade(c, eor_bb); +#else + eor_bb = apr_brigade_create(c->pool, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(eor_bb, + ap_bucket_eor_create(c->bucket_alloc, r)); + ap_pass_brigade(c->output_filters, eor_bb); + apr_brigade_destroy(eor_bb); +#endif + } + + r = NULL; + AP_READ_REQUEST_FAILURE((uintptr_t)r); + return NULL; +} diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h index 48aee09..7e20b69 100644 --- a/modules/http2/h2_request.h +++ b/modules/http2/h2_request.h @@ -19,18 +19,24 @@ #include "h2.h" -apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, +h2_request *h2_request_create(int id, apr_pool_t *pool, const char *method, + const char *scheme, const char *authority, + const char *path, apr_table_t *header); + +apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, request_rec *r); apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, const char *name, size_t nlen, - const char *value, size_t vlen); + const char *value, size_t vlen, + size_t max_field_len, int *pwas_added); apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool, const char *name, size_t nlen, const char *value, size_t vlen); -apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes); +apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, + size_t raw_bytes); h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src); @@ -40,9 +46,14 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src); * * @param req the h2 request to process * @param conn the connection to process the request on + * @param no_body != 0 iff the request is known to have no body * @return the request_rec representing the request */ -request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn); +request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn, + int no_body); +#if AP_HAS_RESPONSE_BUCKETS +apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r); +#endif #endif /* defined(__mod_h2__h2_request__) */ diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c index ed96cf0..5724fda 100644 --- a/modules/http2/h2_session.c +++ b/modules/http2/h2_session.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -26,33 +27,35 @@ #include #include #include +#include #include #include +#if APR_HAVE_UNISTD_H +#include /* for getpid() */ +#endif + #include "h2_private.h" #include "h2.h" #include "h2_bucket_beam.h" #include "h2_bucket_eos.h" #include "h2_config.h" -#include "h2_ctx.h" -#include "h2_filter.h" -#include "h2_h2.h" +#include "h2_conn_ctx.h" +#include "h2_protocol.h" #include "h2_mplx.h" #include "h2_push.h" #include "h2_request.h" #include "h2_headers.h" #include "h2_stream.h" -#include "h2_task.h" +#include "h2_c2.h" #include "h2_session.h" #include "h2_util.h" #include "h2_version.h" #include "h2_workers.h" -static apr_status_t dispatch_master(h2_session *session); -static apr_status_t h2_session_read(h2_session *session, int block); -static void transit(h2_session *session, const char *action, +static void transit(h2_session *session, const char *action, h2_session_state nstate); static void on_stream_state_enter(void *ctx, h2_stream *stream); @@ -73,23 +76,20 @@ static int h2_session_status_from_apr_status(apr_status_t rv) return NGHTTP2_ERR_PROTO; } -h2_stream *h2_session_stream_get(h2_session *session, int stream_id) +static h2_stream *get_stream(h2_session *session, int stream_id) { return nghttp2_session_get_stream_user_data(session->ngh2, stream_id); } -static void dispatch_event(h2_session *session, h2_session_event_t ev, - int err, const char *msg); - -void h2_session_event(h2_session *session, h2_session_event_t ev, +void h2_session_event(h2_session *session, h2_session_event_t ev, int err, const char *msg) { - dispatch_event(session, ev, err, msg); + h2_session_dispatch_event(session, ev, err, msg); } static int rst_unprocessed_stream(h2_stream *stream, void *ctx) { - int unprocessed = (!h2_stream_was_closed(stream) + int unprocessed = (!h2_stream_is_at_or_past(stream, H2_SS_CLOSED) && (H2_STREAM_CLIENT_INITIATED(stream->id)? (!stream->session->local.accepting && stream->id > stream->session->local.accepted_max) @@ -106,7 +106,7 @@ static int rst_unprocessed_stream(h2_stream *stream, void *ctx) static void cleanup_unprocessed_streams(h2_session *session) { - h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session); + h2_mplx_c1_streams_do(session->mplx, rst_unprocessed_stream, session); } static h2_stream *h2_session_open_stream(h2_session *session, int stream_id, @@ -127,7 +127,7 @@ static h2_stream *h2_session_open_stream(h2_session *session, int stream_id, } /** - * Determine the importance of streams when scheduling tasks. + * Determine the priority order of streams. * - if both stream depend on the same one, compare weights * - if one stream is closer to the root, prioritize that one * - if both are on the same level, use the weight of their root @@ -187,20 +187,26 @@ static ssize_t send_cb(nghttp2_session *ngh2, int flags, void *userp) { h2_session *session = (h2_session *)userp; - apr_status_t status; + apr_status_t rv; (void)ngh2; (void)flags; - - status = h2_conn_io_write(&session->io, (const char *)data, length); - if (status == APR_SUCCESS) { + + if (h2_c1_io_needs_flush(&session->io)) { + return NGHTTP2_ERR_WOULDBLOCK; + } + + rv = h2_c1_io_add_data(&session->io, (const char *)data, length); + if (APR_SUCCESS == rv) { return length; } - if (APR_STATUS_IS_EAGAIN(status)) { + else if (APR_STATUS_IS_EAGAIN(rv)) { return NGHTTP2_ERR_WOULDBLOCK; } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062) - "h2_session: send error"); - return h2_session_status_from_apr_status(status); + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, session->c1, + APLOGNO(03062) "h2_session: send error"); + return h2_session_status_from_apr_status(rv); + } } static int on_invalid_frame_recv_cb(nghttp2_session *ngh2, @@ -210,11 +216,11 @@ static int on_invalid_frame_recv_cb(nghttp2_session *ngh2, h2_session *session = (h2_session *)userp; (void)ngh2; - if (APLOGcdebug(session->c)) { + if (APLOGcdebug(session->c1)) { char buffer[256]; h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_SSSN_LOG(APLOGNO(03063), session, "recv invalid FRAME[%s], frames=%ld/%ld (r/s)"), buffer, (long)session->frames_received, @@ -232,15 +238,17 @@ static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags, h2_stream * stream; int rv = 0; - stream = h2_session_stream_get(session, stream_id); + stream = get_stream(session, stream_id); if (stream) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_SSSN_STRM_MSG(session, stream_id, "write %ld bytes of DATA"), + (long)len); status = h2_stream_recv_DATA(stream, flags, data, len); - dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream data rcvd"); } else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064) - "h2_stream(%ld-%d): on_data_chunk for unknown stream", - session->id, (int)stream_id); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03064) + H2_SSSN_STRM_MSG(session, stream_id, + "on_data_chunk for unknown stream")); rv = NGHTTP2_ERR_CALLBACK_FAILURE; } @@ -258,13 +266,13 @@ static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id, h2_stream *stream; (void)ngh2; - stream = h2_session_stream_get(session, stream_id); + stream = get_stream(session, stream_id); if (stream) { if (error_code) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_STRM_LOG(APLOGNO(03065), stream, "closing with err=%d %s"), - (int)error_code, h2_h2_err_description(error_code)); + (int)error_code, h2_protocol_err_description(error_code)); h2_stream_rst(stream, error_code); } } @@ -275,16 +283,16 @@ static int on_begin_headers_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, void *userp) { h2_session *session = (h2_session *)userp; - h2_stream *s; + h2_stream *s = NULL; /* We may see HEADERs at the start of a stream or after all DATA * streams to carry trailers. */ (void)ngh2; - s = h2_session_stream_get(session, frame->hd.stream_id); + s = get_stream(session, frame->hd.stream_id); if (s) { /* nop */ } - else { + else if (session->local.accepting) { s = h2_session_open_stream(userp, frame->hd.stream_id, 0); } return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; @@ -301,17 +309,23 @@ static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, apr_status_t status; (void)flags; - stream = h2_session_stream_get(session, frame->hd.stream_id); + stream = get_stream(session, frame->hd.stream_id); if (!stream) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(02920) - "h2_stream(%ld-%d): on_header unknown stream", - session->id, (int)frame->hd.stream_id); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(02920) + H2_SSSN_STRM_MSG(session, frame->hd.stream_id, + "on_header unknown stream")); return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } status = h2_stream_add_header(stream, (const char *)name, namelen, (const char *)value, valuelen); - if (status != APR_SUCCESS && !h2_stream_is_ready(stream)) { + if (status != APR_SUCCESS && + (!stream->rtmp || + stream->rtmp->http_status == H2_HTTP_STATUS_UNSET || + /* We accept a certain amount of failures in order to reply + * with an informative HTTP error response like 413. But if the + * client is too wrong, we fail the request a RESET of the stream */ + stream->request_headers_failed > 100)) { return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } return 0; @@ -330,15 +344,25 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, h2_stream *stream; apr_status_t rv = APR_SUCCESS; - if (APLOGcdebug(session->c)) { + stream = frame->hd.stream_id? get_stream(session, frame->hd.stream_id) : NULL; + if (APLOGcdebug(session->c1)) { char buffer[256]; - + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - H2_SSSN_LOG(APLOGNO(03066), session, - "recv FRAME[%s], frames=%ld/%ld (r/s)"), - buffer, (long)session->frames_received, - (long)session->frames_sent); + if (stream) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_STRM_LOG(APLOGNO(10302), stream, + "recv FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_SSSN_LOG(APLOGNO(03066), session, + "recv FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } } ++session->frames_received; @@ -347,16 +371,14 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, /* This can be HEADERS for a new stream, defining the request, * or HEADER may come after DATA at the end of a stream as in * trailers */ - stream = h2_session_stream_get(session, frame->hd.stream_id); if (stream) { rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags, frame->hd.length + H2_FRAME_HDR_LEN); } break; case NGHTTP2_DATA: - stream = h2_session_stream_get(session, frame->hd.stream_id); if (stream) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_STRM_LOG(APLOGNO(02923), stream, "DATA, len=%ld, flags=%d"), (long)frame->hd.length, frame->hd.flags); @@ -366,35 +388,40 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, break; case NGHTTP2_PRIORITY: session->reprioritize = 1; - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - "h2_stream(%ld-%d): PRIORITY frame " - " weight=%d, dependsOn=%d, exclusive=%d", - session->id, (int)frame->hd.stream_id, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_SSSN_STRM_MSG(session, frame->hd.stream_id, "PRIORITY frame " + " weight=%d, dependsOn=%d, exclusive=%d"), frame->priority.pri_spec.weight, frame->priority.pri_spec.stream_id, frame->priority.pri_spec.exclusive); break; case NGHTTP2_WINDOW_UPDATE: - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - "h2_stream(%ld-%d): WINDOW_UPDATE incr=%d", - session->id, (int)frame->hd.stream_id, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_SSSN_STRM_MSG(session, frame->hd.stream_id, + "WINDOW_UPDATE incr=%d"), frame->window_update.window_size_increment); - if (nghttp2_session_want_write(session->ngh2)) { - dispatch_event(session, H2_SESSION_EV_FRAME_RCVD, 0, "window update"); - } break; case NGHTTP2_RST_STREAM: - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067) - "h2_stream(%ld-%d): RST_STREAM by client, errror=%d", - session->id, (int)frame->hd.stream_id, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03067) + H2_SSSN_STRM_MSG(session, frame->hd.stream_id, + "RST_STREAM by client, error=%d"), (int)frame->rst_stream.error_code); - stream = h2_session_stream_get(session, frame->hd.stream_id); + if (stream) { + rv = h2_stream_recv_frame(stream, NGHTTP2_RST_STREAM, frame->hd.flags, + frame->hd.length + H2_FRAME_HDR_LEN); + } if (stream && stream->initiated_on) { + /* A stream reset on a request we sent it. Normal, when the + * client does not want it. */ ++session->pushes_reset; } else { - ++session->streams_reset; + /* A stream reset on a request it sent us. Could happen in a browser + * when the user navigates away or cancels loading - maybe. */ + h2_mplx_c1_client_rst(session->mplx, frame->hd.stream_id, + stream); } + ++session->streams_reset; break; case NGHTTP2_GOAWAY: if (frame->goaway.error_code == 0 @@ -404,23 +431,21 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, } else { session->remote.accepted_max = frame->goaway.last_stream_id; - dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, + h2_session_dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, frame->goaway.error_code, NULL); } break; case NGHTTP2_SETTINGS: - if (APLOGctrace2(session->c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length); - } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length); break; default: - if (APLOGctrace2(session->c)) { + if (APLOGctrace2(session->c1)) { char buffer[256]; h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer); } break; @@ -436,7 +461,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, * become in serving this connection. This is expressed in increasing "idle_delays". * Eventually, the connection will timeout and we'll close it. */ session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received); - ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c, + ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c1, H2_SSSN_MSG(session, "session has %ld idle frames"), (long)session->idle_frames); if (session->idle_frames > 10) { @@ -461,16 +486,6 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, return 0; } -static int h2_session_continue_data(h2_session *session) { - if (h2_mplx_has_master_events(session->mplx)) { - return 0; - } - if (h2_conn_io_needs_flush(&session->io)) { - return 0; - } - return 1; -} - static char immortal_zeros[H2_MAX_PADLEN]; static int on_send_data_cb(nghttp2_session *ngh2, @@ -491,48 +506,42 @@ static int on_send_data_cb(nghttp2_session *ngh2, (void)ngh2; (void)source; - if (!h2_session_continue_data(session)) { - return NGHTTP2_ERR_WOULDBLOCK; - } - - if (frame->data.padlen > H2_MAX_PADLEN) { - return NGHTTP2_ERR_PROTO; - } + ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1)); padlen = (unsigned char)frame->data.padlen; - stream = h2_session_stream_get(session, stream_id); + stream = get_stream(session, stream_id); if (!stream) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c, + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c1, APLOGNO(02924) - "h2_stream(%ld-%d): send_data, stream not found", - session->id, (int)stream_id); + H2_SSSN_STRM_MSG(session, stream_id, "send_data, stream not found")); return NGHTTP2_ERR_CALLBACK_FAILURE; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, H2_STRM_MSG(stream, "send_data_cb for %ld bytes"), (long)length); - status = h2_conn_io_write(&session->io, (const char *)framehd, 9); + status = h2_c1_io_add_data(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN); if (padlen && status == APR_SUCCESS) { - status = h2_conn_io_write(&session->io, (const char *)&padlen, 1); + --padlen; + status = h2_c1_io_add_data(&session->io, (const char *)&padlen, 1); } if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1, H2_STRM_MSG(stream, "writing frame header")); return NGHTTP2_ERR_CALLBACK_FAILURE; } status = h2_stream_read_to(stream, session->bbtmp, &len, &eos); if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1, H2_STRM_MSG(stream, "send_data_cb, reading stream")); apr_brigade_cleanup(session->bbtmp); return NGHTTP2_ERR_CALLBACK_FAILURE; } - else if (len != length) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + else if (len != (apr_off_t)length) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1, H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, " "got %ld from stream"), (long)length, (long)len); apr_brigade_cleanup(session->bbtmp); @@ -541,20 +550,23 @@ static int on_send_data_cb(nghttp2_session *ngh2, if (padlen) { b = apr_bucket_immortal_create(immortal_zeros, padlen, - session->c->bucket_alloc); + session->c1->bucket_alloc); APR_BRIGADE_INSERT_TAIL(session->bbtmp, b); } - status = h2_conn_io_pass(&session->io, session->bbtmp); + status = h2_c1_io_append(&session->io, session->bbtmp); apr_brigade_cleanup(session->bbtmp); if (status == APR_SUCCESS) { stream->out_data_frames++; stream->out_data_octets += length; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_STRM_MSG(stream, "sent data length=%ld, total=%ld"), + (long)length, (long)stream->out_data_octets); return 0; } else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1, H2_STRM_LOG(APLOGNO(02925), stream, "failed send_data_cb")); return NGHTTP2_ERR_CALLBACK_FAILURE; } @@ -578,18 +590,27 @@ static int on_frame_send_cb(nghttp2_session *ngh2, break; } - if (APLOGcdebug(session->c)) { + stream = get_stream(session, stream_id); + if (APLOGcdebug(session->c1)) { char buffer[256]; h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - H2_SSSN_LOG(APLOGNO(03068), session, - "sent FRAME[%s], frames=%ld/%ld (r/s)"), - buffer, (long)session->frames_received, - (long)session->frames_sent); + if (stream) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_STRM_LOG(APLOGNO(10303), stream, + "sent FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_SSSN_LOG(APLOGNO(03068), session, + "sent FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } } - stream = h2_session_stream_get(session, stream_id); if (stream) { h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags, frame->hd.length + H2_FRAME_HDR_LEN); @@ -598,7 +619,7 @@ static int on_frame_send_cb(nghttp2_session *ngh2, } #ifdef H2_NG2_INVALID_HEADER_CB -static int on_invalid_header_cb(nghttp2_session *ngh2, +static int on_invalid_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, const uint8_t *name, size_t namelen, const uint8_t *value, size_t valuelen, @@ -607,14 +628,11 @@ static int on_invalid_header_cb(nghttp2_session *ngh2, h2_session *session = user_data; h2_stream *stream; - if (APLOGcdebug(session->c)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03456) - "h2_stream(%ld-%d): invalid header '%s: %s'", - session->id, (int)frame->hd.stream_id, - apr_pstrndup(session->pool, (const char *)name, namelen), - apr_pstrndup(session->pool, (const char *)value, valuelen)); - } - stream = h2_session_stream_get(session, frame->hd.stream_id); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03456) + H2_SSSN_STRM_MSG(session, frame->hd.stream_id, + "invalid header '%.*s: %.*s'"), + (int)namelen, name, (int)valuelen, value); + stream = get_stream(session, frame->hd.stream_id); if (stream) { h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); } @@ -622,6 +640,37 @@ static int on_invalid_header_cb(nghttp2_session *ngh2, } #endif +static ssize_t select_padding_cb(nghttp2_session *ngh2, + const nghttp2_frame *frame, + size_t max_payloadlen, void *user_data) +{ + h2_session *session = user_data; + size_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */ + size_t padded_len = frame_len; + + /* Determine # of padding bytes to append to frame. Unless session->padding_always + * the number my be capped by the ui.write_size that currently applies. + */ + if (session->padding_max) { + int n = ap_random_pick(0, session->padding_max); + padded_len = H2MIN(max_payloadlen + H2_FRAME_HDR_LEN, frame_len + n); + } + + if (padded_len != frame_len) { + if (!session->padding_always && session->io.write_size + && (padded_len > session->io.write_size) + && (frame_len <= session->io.write_size)) { + padded_len = session->io.write_size; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)", + (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN, + (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size); + return padded_len - H2_FRAME_HDR_LEN; + } + return frame->hd.length; +} + #define NGH2_SET_CALLBACK(callbacks, name, fn)\ nghttp2_session_callbacks_set_##name##_callback(callbacks, fn) @@ -647,9 +696,37 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb) #ifdef H2_NG2_INVALID_HEADER_CB NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb); #endif + NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb); return APR_SUCCESS; } +static void update_child_status(h2_session *session, int status, + const char *msg, const h2_stream *stream) +{ + /* Assume that we also change code/msg when something really happened and + * avoid updating the scoreboard in between */ + if (session->last_status_code != status + || session->last_status_msg != msg) { + char sbuffer[1024]; + sbuffer[0] = '\0'; + if (stream) { + apr_snprintf(sbuffer, sizeof(sbuffer), + ": stream %d, %s %s", + stream->id, + stream->request? stream->request->method : "", + stream->request? stream->request->path : ""); + } + apr_snprintf(session->status, sizeof(session->status), + "[%d/%d] %s%s", + (int)(session->remote.emitted_count + session->pushes_submitted), + (int)session->streams_done, + msg? msg : "-", sbuffer); + ap_update_child_status_from_server(session->c1->sbh, status, + session->c1, session->s); + ap_update_child_status_descr(session->c1->sbh, status, session->status); + } +} + static apr_status_t h2_session_shutdown_notice(h2_session *session) { apr_status_t status; @@ -663,9 +740,9 @@ static apr_status_t h2_session_shutdown_notice(h2_session *session) session->local.accepting = 0; status = nghttp2_session_send(session->ngh2); if (status == APR_SUCCESS) { - status = h2_conn_io_flush(&session->io); + status = h2_c1_io_assure_flushed(&session->io); } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_SSSN_LOG(APLOGNO(03457), session, "sent shutdown notice")); return status; } @@ -679,10 +756,13 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error, if (session->local.shutdown) { return APR_SUCCESS; } - if (!msg && error) { - msg = nghttp2_strerror(error); + + if (error && !msg) { + if (APR_STATUS_IS_EPIPE(error)) { + msg = "remote close"; + } } - + if (error || force_close) { /* not a graceful shutdown, we want to leave... * Do not start further streams that are waiting to be scheduled. @@ -691,8 +771,9 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error, * Remove all streams greater than this number without submitting * a RST_STREAM frame, since that should be clear from the GOAWAY * we send. */ - session->local.accepted_max = h2_mplx_shutdown(session->mplx); + session->local.accepted_max = h2_mplx_c1_shutdown(session->mplx); session->local.error = error; + session->local.error_msg = msg; } else { /* graceful shutdown. we will continue processing all streams @@ -702,25 +783,25 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error, session->local.accepting = 0; session->local.shutdown = 1; - if (!session->c->aborted) { + if (!session->c1->aborted) { nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, session->local.accepted_max, error, (uint8_t*)msg, msg? strlen(msg):0); status = nghttp2_session_send(session->ngh2); if (status == APR_SUCCESS) { - status = h2_conn_io_flush(&session->io); + status = h2_c1_io_assure_flushed(&session->io); } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_SSSN_LOG(APLOGNO(03069), session, "sent GOAWAY, err=%d, msg=%s"), error, msg? msg : ""); } - dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg); + h2_session_dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg); return status; } static apr_status_t session_cleanup(h2_session *session, const char *trigger) { - conn_rec *c = session->c; + conn_rec *c = session->c1; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, H2_SSSN_MSG(session, "pool_cleanup")); @@ -734,40 +815,54 @@ static apr_status_t session_cleanup(h2_session *session, const char *trigger) * connection when sending the next request, this has the effect * that at least this one request will fail. */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, H2_SSSN_LOG(APLOGNO(03199), session, "connection disappeared without proper " "goodbye, clients will be confused, should not happen")); } + if (!h2_iq_empty(session->ready_to_process)) { + int sid; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + H2_SSSN_LOG(APLOGNO(10485), session, + "cleanup, resetting %d streams in ready-to-process"), + h2_iq_count(session->ready_to_process)); + while ((sid = h2_iq_shift(session->ready_to_process)) > 0) { + h2_mplx_c1_client_rst(session->mplx, sid, get_stream(session, sid)); + } + } + transit(session, trigger, H2_SESSION_ST_CLEANUP); - h2_mplx_release_and_join(session->mplx, session->iowait); + h2_mplx_c1_destroy(session->mplx); session->mplx = NULL; ap_assert(session->ngh2); nghttp2_session_del(session->ngh2); session->ngh2 = NULL; - h2_ctx_clear(c); - - + h2_conn_ctx_detach(c); + return APR_SUCCESS; } static apr_status_t session_pool_cleanup(void *data) { conn_rec *c = data; - h2_session *session; - h2_ctx *ctx = h2_ctx_get(c, 0); - - if (ctx && (session = h2_ctx_session_get(ctx))) { + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + h2_session *session = conn_ctx? conn_ctx->session : NULL; + + if (session) { + int mpm_state = 0; + int level; + + ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state); + level = (AP_MPMQ_STOPPING == mpm_state)? APLOG_DEBUG : APLOG_WARNING; /* if the session is still there, now is the last chance * to perform cleanup. Normally, cleanup should have happened - * earlier in the connection pre_close. Main reason is that - * any ongoing requests on slave connections might still access - * data which has, at this time, already been freed. An example - * is mod_ssl that uses request hooks. */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, - H2_SSSN_LOG(APLOGNO(10020), session, + * earlier in the connection pre_close. + * However, when the server is stopping, it may shutdown connections + * without running the pre_close hooks. Do not want about that. */ + ap_log_cerror(APLOG_MARK, level, 0, c, + H2_SSSN_LOG(APLOGNO(10020), session, "session cleanup triggered by pool cleanup. " "this should have happened earlier already.")); return session_cleanup(session, "pool cleanup"); @@ -775,101 +870,83 @@ static apr_status_t session_pool_cleanup(void *data) return APR_SUCCESS; } -static apr_status_t h2_session_create_int(h2_session **psession, - conn_rec *c, - request_rec *r, - h2_ctx *ctx, - h2_workers *workers) +static /* atomic */ apr_uint32_t next_id; + +apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *r, + server_rec *s, h2_workers *workers) { nghttp2_session_callbacks *callbacks = NULL; nghttp2_option *options = NULL; - apr_allocator_t *allocator; - apr_thread_mutex_t *mutex; uint32_t n; + int thread_num; apr_pool_t *pool = NULL; h2_session *session; + h2_stream *stream0; apr_status_t status; int rv; *psession = NULL; - status = apr_allocator_create(&allocator); - if (status != APR_SUCCESS) { - return status; - } - apr_allocator_max_free_set(allocator, ap_max_mem_free); - apr_pool_create_ex(&pool, c->pool, NULL, allocator); - if (!pool) { - apr_allocator_destroy(allocator); - return APR_ENOMEM; - } + apr_pool_create(&pool, c->pool); apr_pool_tag(pool, "h2_session"); - apr_allocator_owner_set(allocator, pool); - status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool); - if (status != APR_SUCCESS) { - apr_pool_destroy(pool); - return APR_ENOMEM; - } - apr_allocator_mutex_set(allocator, mutex); - session = apr_pcalloc(pool, sizeof(h2_session)); if (!session) { return APR_ENOMEM; } *psession = session; - session->id = c->id; - session->c = c; + /* c->id does not give a unique id for the lifetime of the session. + * mpms like event change c->id when re-activating a keepalive + * connection based on the child_num+thread_num of the worker + * processing it. + * We'd like to have an id that remains constant and unique bc + * h2 streams can live through keepalive periods. While double id + * will not lead to processing failures, it will confuse log analysis. + */ +#if AP_MODULE_MAGIC_AT_LEAST(20211221, 8) + ap_sb_get_child_thread(c->sbh, &session->child_num, &thread_num); +#else + (void)thread_num; + session->child_num = (int)getpid(); +#endif + session->id = apr_atomic_inc32(&next_id); + session->c1 = c; session->r = r; - session->s = h2_ctx_server_get(ctx); + session->s = s; session->pool = pool; - session->config = h2_config_sget(session->s); session->workers = workers; session->state = H2_SESSION_ST_INIT; session->local.accepting = 1; session->remote.accepting = 1; - session->max_stream_count = h2_config_geti(session->config, - H2_CONF_MAX_STREAMS); - session->max_stream_mem = h2_config_geti(session->config, - H2_CONF_STREAM_MAX_MEM); - - status = apr_thread_cond_create(&session->iowait, session->pool); - if (status != APR_SUCCESS) { - apr_pool_destroy(pool); - return status; - } - - session->in_pending = h2_iq_create(session->pool, (int)session->max_stream_count); - if (session->in_pending == NULL) { - apr_pool_destroy(pool); - return APR_ENOMEM; - } + session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS); + session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM); + session->max_data_frame_len = h2_config_sgeti(s, H2_CONF_MAX_DATA_FRAME_LEN); + + session->out_c1_blocked = h2_iq_create(session->pool, (int)session->max_stream_count); + session->ready_to_process = h2_iq_create(session->pool, (int)session->max_stream_count); - session->in_process = h2_iq_create(session->pool, (int)session->max_stream_count); - if (session->in_process == NULL) { - apr_pool_destroy(pool); - return APR_ENOMEM; - } - session->monitor = apr_pcalloc(pool, sizeof(h2_stream_monitor)); - if (session->monitor == NULL) { - apr_pool_destroy(pool); - return APR_ENOMEM; - } session->monitor->ctx = session; session->monitor->on_state_enter = on_stream_state_enter; session->monitor->on_state_event = on_stream_state_event; session->monitor->on_event = on_stream_event; - - session->mplx = h2_mplx_create(c, session->pool, session->config, - workers); - - /* connection input filter that feeds the session */ - session->cin = h2_filter_cin_create(session); - ap_add_input_filter("H2_IN", session->cin, r, c); - - h2_conn_io_init(&session->io, c, session->config); + + stream0 = h2_stream_create(0, session->pool, session, NULL, 0); + stream0->c2 = session->c1; /* stream0's connection is the main connection */ + session->mplx = h2_mplx_c1_create(session->child_num, session->id, + stream0, s, session->pool, workers); + if (!session->mplx) { + apr_pool_destroy(pool); + return APR_ENOTIMPL; + } + + h2_c1_io_init(&session->io, session); + session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS); + if (session->padding_max) { + session->padding_max = (0x01 << session->padding_max) - 1; + } + session->padding_always = h2_config_sgeti(s, H2_CONF_PADDING_ALWAYS); session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc); status = init_callbacks(c, &callbacks); @@ -888,12 +965,23 @@ static apr_status_t h2_session_create_int(h2_session **psession, apr_pool_destroy(pool); return status; } - nghttp2_option_set_peer_max_concurrent_streams( - options, (uint32_t)session->max_stream_count); + nghttp2_option_set_peer_max_concurrent_streams(options, (uint32_t)session->max_stream_count); /* We need to handle window updates ourself, otherwise we * get flooded by nghttp2. */ nghttp2_option_set_no_auto_window_update(options, 1); - +#ifdef H2_NG2_NO_CLOSED_STREAMS + /* We do not want nghttp2 to keep information about closed streams as + * that accumulates memory on long connections. This makes PRIORITY + * setting in relation to older streams non-working. */ + nghttp2_option_set_no_closed_streams(options, 1); +#endif +#ifdef H2_NG2_RFC9113_STRICTNESS + /* nghttp2 v1.50.0 introduces the strictness checks on leading/trailing + * whitespace of RFC 9113 for fields. But, by default, it RST streams + * carrying such. We do not want that. We want to strip the ws and + * handle them, just like the HTTP/1.1 parser does. */ + nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation(options, 1); +#endif rv = nghttp2_session_server_new2(&session->ngh2, callbacks, session, options); nghttp2_session_callbacks_del(callbacks); @@ -907,7 +995,7 @@ static apr_status_t h2_session_create_int(h2_session **psession, return APR_ENOMEM; } - n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE); + n = h2_config_sgeti(s, H2_CONF_PUSH_DIARY_SIZE); session->push_diary = h2_push_diary_create(session->pool, n); if (APLOGcdebug(c)) { @@ -915,35 +1003,26 @@ static apr_status_t h2_session_create_int(h2_session **psession, H2_SSSN_LOG(APLOGNO(03200), session, "created, max_streams=%d, stream_mem=%d, " "workers_limit=%d, workers_max=%d, " - "push_diary(type=%d,N=%d)"), + "push_diary(type=%d,N=%d), " + "max_data_frame_len=%d"), (int)session->max_stream_count, (int)session->max_stream_mem, - session->mplx->limit_active, - session->mplx->max_active, + session->mplx->processing_limit, + session->mplx->processing_max, session->push_diary->dtype, - (int)session->push_diary->N); + (int)session->push_diary->N, + (int)session->max_data_frame_len); } - apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup); + apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup); + return APR_SUCCESS; } -apr_status_t h2_session_create(h2_session **psession, - conn_rec *c, h2_ctx *ctx, h2_workers *workers) -{ - return h2_session_create_int(psession, c, NULL, ctx, workers); -} - -apr_status_t h2_session_rcreate(h2_session **psession, - request_rec *r, h2_ctx *ctx, h2_workers *workers) -{ - return h2_session_create_int(psession, r->connection, r, ctx, workers); -} - static apr_status_t h2_session_start(h2_session *session, int *rv) { apr_status_t status = APR_SUCCESS; - nghttp2_settings_entry settings[3]; + nghttp2_settings_entry settings[4]; size_t slen; int win_size; @@ -1004,14 +1083,21 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS; settings[slen].value = (uint32_t)session->max_stream_count; ++slen; - win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE); + win_size = h2_config_sgeti(session->s, H2_CONF_WIN_SIZE); if (win_size != H2_INITIAL_WINDOW_SIZE) { settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; settings[slen].value = win_size; ++slen; } - - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, +#if H2_USE_WEBSOCKETS + if (h2_config_sgeti(session->s, H2_CONF_WEBSOCKETS)) { + settings[slen].settings_id = NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL; + settings[slen].value = 1; + ++slen; + } +#endif + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1, H2_SSSN_LOG(APLOGNO(03201), session, "start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"), (long)win_size, (int)session->max_stream_count); @@ -1019,7 +1105,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) settings, slen); if (*rv != 0) { status = APR_EGENERAL; - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c1, H2_SSSN_LOG(APLOGNO(02935), session, "nghttp2_submit_settings: %s"), nghttp2_strerror(*rv)); } @@ -1037,7 +1123,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) 0, NGHTTP2_MAX_WINDOW_SIZE - win_size); if (*rv != 0) { status = APR_EGENERAL; - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c1, H2_SSSN_LOG(APLOGNO(02970), session, "nghttp2_submit_window_update: %s"), nghttp2_strerror(*rv)); @@ -1047,87 +1133,6 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) return status; } -static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, - h2_headers *headers, apr_off_t len, - int eos); - -static ssize_t stream_data_cb(nghttp2_session *ng2s, - int32_t stream_id, - uint8_t *buf, - size_t length, - uint32_t *data_flags, - nghttp2_data_source *source, - void *puser) -{ - h2_session *session = (h2_session *)puser; - apr_off_t nread = length; - int eos = 0; - apr_status_t status; - h2_stream *stream; - ap_assert(session); - - /* The session wants to send more DATA for the stream. We need - * to find out how much of the requested length we can send without - * blocking. - * Indicate EOS when we encounter it or DEFERRED if the stream - * should be suspended. Beware of trailers. - */ - - (void)ng2s; - (void)buf; - (void)source; - stream = h2_session_stream_get(session, stream_id); - if (!stream) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c, - APLOGNO(02937) - "h2_stream(%ld-%d): data_cb, stream not found", - session->id, (int)stream_id); - return NGHTTP2_ERR_CALLBACK_FAILURE; - } - - status = h2_stream_out_prepare(stream, &nread, &eos, NULL); - if (nread) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - H2_STRM_MSG(stream, "prepared no_copy, len=%ld, eos=%d"), - (long)nread, eos); - *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; - } - - switch (status) { - case APR_SUCCESS: - break; - - case APR_EOF: - eos = 1; - break; - - case APR_ECONNRESET: - case APR_ECONNABORTED: - return NGHTTP2_ERR_CALLBACK_FAILURE; - - case APR_EAGAIN: - /* If there is no data available, our session will automatically - * suspend this stream and not ask for more data until we resume - * it. Remember at our h2_stream that we need to do this. - */ - nread = 0; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - H2_STRM_LOG(APLOGNO(03071), stream, "suspending")); - return NGHTTP2_ERR_DEFERRED; - - default: - nread = 0; - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, - H2_STRM_LOG(APLOGNO(02938), stream, "reading data")); - return NGHTTP2_ERR_CALLBACK_FAILURE; - } - - if (eos) { - *data_flags |= NGHTTP2_DATA_FLAG_EOF; - } - return (ssize_t)nread; -} - struct h2_stream *h2_session_push(h2_session *session, h2_stream *is, h2_push *push) { @@ -1142,21 +1147,21 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is, ngh->nv, ngh->nvlen, NULL); } if (status != APR_SUCCESS || nid <= 0) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1, H2_STRM_LOG(APLOGNO(03075), is, "submitting push promise fail: %s"), nghttp2_strerror(nid)); return NULL; } ++session->pushes_promised; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_STRM_LOG(APLOGNO(03076), is, "SERVER_PUSH %d for %s %s on %d"), nid, push->req->method, push->req->path, is->id); stream = h2_session_open_stream(session, nid, is->id); if (!stream) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - H2_STRM_LOG(APLOGNO(03077), stream, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_STRM_LOG(APLOGNO(03077), is, "failed to create stream obj %d"), nid); /* kill the push_promise */ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid, @@ -1166,7 +1171,6 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is, h2_session_set_prio(session, stream, push->priority); h2_stream_set_request(stream, push->req); - ++session->unsent_promises; return stream; } @@ -1181,7 +1185,6 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, const h2_priority *prio) { apr_status_t status = APR_SUCCESS; -#ifdef H2_NG2_CHANGE_PRIO nghttp2_stream *s_grandpa, *s_parent, *s; if (prio == NULL) { @@ -1190,7 +1193,7 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, } s = nghttp2_session_find_stream(session->ngh2, stream->id); if (!s) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1, H2_STRM_MSG(stream, "lookup of nghttp2_stream failed")); return APR_EINVAL; } @@ -1239,10 +1242,10 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, id_grandpa = nghttp2_stream_get_stream_id(s_grandpa); rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps); if (rv < 0) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202) - "h2_stream(%ld-%d): PUSH BEFORE, weight=%d, " - "depends=%d, returned=%d", - session->id, id_parent, ps.weight, ps.stream_id, rv); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03202) + H2_SSSN_STRM_MSG(session, id_parent, + "PUSH BEFORE, weight=%d, depends=%d, returned=%d"), + ps.weight, ps.stream_id, rv); return APR_EGENERAL; } nghttp2_priority_spec_init(&ps, id_grandpa, w, 0); @@ -1261,18 +1264,13 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - ""H2_STRM_LOG(APLOGNO(03203), stream, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_STRM_LOG(APLOGNO(03203), stream, "PUSH %s, weight=%d, depends=%d, returned=%d"), ptype, ps.weight, ps.stream_id, rv); status = (rv < 0)? APR_EGENERAL : APR_SUCCESS; } -#else - (void)session; - (void)stream; - (void)prio; - (void)valid_weight; -#endif + return status; } @@ -1280,338 +1278,89 @@ int h2_session_push_enabled(h2_session *session) { /* iff we can and they can and want */ return (session->remote.accepting /* remote GOAWAY received */ - && h2_config_geti(session->config, H2_CONF_PUSH) + && h2_config_sgeti(session->s, H2_CONF_PUSH) && nghttp2_session_get_remote_settings(session->ngh2, NGHTTP2_SETTINGS_ENABLE_PUSH)); } -static apr_status_t h2_session_send(h2_session *session) +static int h2_session_want_send(h2_session *session) { - apr_interval_time_t saved_timeout; - int rv; - apr_socket_t *socket; - - socket = ap_get_conn_socket(session->c); - if (socket) { - apr_socket_timeout_get(socket, &saved_timeout); - apr_socket_timeout_set(socket, session->s->timeout); - } - - rv = nghttp2_session_send(session->ngh2); - - if (socket) { - apr_socket_timeout_set(socket, saved_timeout); - } - session->have_written = 1; - if (rv != 0 && rv != NGHTTP2_ERR_WOULDBLOCK) { - if (nghttp2_is_fatal(rv)) { - dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv)); - return APR_EGENERAL; - } - } - - session->unsent_promises = 0; - session->unsent_submits = 0; - - return APR_SUCCESS; + return nghttp2_session_want_write(session->ngh2) + || h2_c1_io_pending(&session->io); } -/** - * headers for the stream are ready. - */ -static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, - h2_headers *headers, apr_off_t len, - int eos) +static apr_status_t h2_session_send(h2_session *session) { - apr_status_t status = APR_SUCCESS; - int rv = 0; + int ngrv, pending = 0; + apr_status_t rv = APR_SUCCESS; - ap_assert(session); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - H2_STRM_MSG(stream, "on_headers")); - if (headers->status < 100) { - h2_stream_rst(stream, headers->status); - goto leave; - } - else if (stream->has_response) { - h2_ngheader *nh; - - status = h2_res_create_ngtrailer(&nh, stream->pool, headers); - - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, - H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"), - (int)nh->nvlen); - if (status == APR_SUCCESS) { - rv = nghttp2_submit_trailer(session->ngh2, stream->id, - nh->nv, nh->nvlen); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, - H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers")); - h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); - } - goto leave; - } - else { - nghttp2_data_provider provider, *pprovider = NULL; - h2_ngheader *ngh; - const char *note; - - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - H2_STRM_LOG(APLOGNO(03073), stream, "submit response %d, REMOTE_WINDOW_SIZE=%u"), - headers->status, - (unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id)); - - if (!eos || len > 0) { - memset(&provider, 0, sizeof(provider)); - provider.source.fd = stream->id; - provider.read_callback = stream_data_cb; - pprovider = &provider; - } - - /* If this stream is not a pushed one itself, - * and HTTP/2 server push is enabled here, - * and the response HTTP status is not sth >= 400, - * and the remote side has pushing enabled, - * -> find and perform any pushes on this stream - * *before* we submit the stream response itself. - * This helps clients avoid opening new streams on Link - * headers that get pushed right afterwards. - * - * *) the response code is relevant, as we do not want to - * make pushes on 401 or 403 codes and friends. - * And if we see a 304, we do not push either - * as the client, having this resource in its cache, might - * also have the pushed ones as well. - */ - if (!stream->initiated_on - && !stream->has_response - && stream->request && stream->request->method - && !strcmp("GET", stream->request->method) - && (headers->status < 400) - && (headers->status != 304) - && h2_session_push_enabled(session)) { - - h2_stream_submit_pushes(stream, headers); - } - - if (!stream->pref_priority) { - stream->pref_priority = h2_stream_get_priority(stream, headers); - } - h2_session_set_prio(session, stream, stream->pref_priority); - - note = apr_table_get(headers->notes, H2_FILTER_DEBUG_NOTE); - if (note && !strcmp("on", note)) { - int32_t connFlowIn, connFlowOut; - - connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2); - connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2); - headers = h2_headers_copy(stream->pool, headers); - apr_table_setn(headers->headers, "conn-flow-in", - apr_itoa(stream->pool, connFlowIn)); - apr_table_setn(headers->headers, "conn-flow-out", - apr_itoa(stream->pool, connFlowOut)); - } - - if (headers->status == 103 - && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) { - /* suppress sending this to the client, it might have triggered - * pushes and served its purpose nevertheless */ - rv = 0; - goto leave; - } - - status = h2_res_create_ngheader(&ngh, stream->pool, headers); - if (status == APR_SUCCESS) { - rv = nghttp2_submit_response(session->ngh2, stream->id, - ngh->nv, ngh->nvlen, pprovider); - stream->has_response = h2_headers_are_response(headers); - session->have_written = 1; - - if (stream->initiated_on) { - ++session->pushes_submitted; - } - else { - ++session->responses_submitted; + while (nghttp2_session_want_write(session->ngh2)) { + ngrv = nghttp2_session_send(session->ngh2); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + "nghttp2_session_send: %d", (int)ngrv); + pending = 1; + if (ngrv != 0 && ngrv != NGHTTP2_ERR_WOULDBLOCK) { + if (nghttp2_is_fatal(ngrv)) { + h2_session_dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, + ngrv, nghttp2_strerror(ngrv)); + rv = APR_EGENERAL; + goto cleanup; } } - else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, - H2_STRM_LOG(APLOGNO(10025), stream, "invalid response")); - h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + if (h2_c1_io_needs_flush(&session->io) || + ngrv == NGHTTP2_ERR_WOULDBLOCK) { + rv = h2_c1_io_assure_flushed(&session->io); + if (rv != APR_SUCCESS) + goto cleanup; + pending = 0; } } - -leave: - if (nghttp2_is_fatal(rv)) { - status = APR_EGENERAL; - dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv)); - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, - APLOGNO(02940) "submit_response: %s", - nghttp2_strerror(rv)); + if (pending) { + rv = h2_c1_io_pass(&session->io); } - - ++session->unsent_submits; - - /* Unsent push promises are written immediately, as nghttp2 - * 1.5.0 realizes internal stream data structures only on - * send and we might need them for other submits. - * Also, to conserve memory, we send at least every 10 submits - * so that nghttp2 does not buffer all outbound items too - * long. - */ - if (status == APR_SUCCESS - && (session->unsent_promises || session->unsent_submits > 10)) { - status = h2_session_send(session); +cleanup: + if (rv != APR_SUCCESS) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, rv, NULL); } - return status; + return rv; } /** - * A stream was resumed as new response/output data arrived. + * A streams input state has changed. */ -static apr_status_t on_stream_resume(void *ctx, h2_stream *stream) +static void on_stream_input(void *ctx, h2_stream *stream) { h2_session *session = ctx; - apr_status_t status = APR_EAGAIN; - int rv; - apr_off_t len = 0; - int eos = 0; - h2_headers *headers; - - ap_assert(stream); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - H2_STRM_MSG(stream, "on_resume")); - -send_headers: - headers = NULL; - status = h2_stream_out_prepare(stream, &len, &eos, &headers); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c, - H2_STRM_MSG(stream, "prepared len=%ld, eos=%d"), - (long)len, eos); - if (headers) { - status = on_stream_headers(session, stream, headers, len, eos); - if (status != APR_SUCCESS || stream->rst_error) { - return status; - } - goto send_headers; - } - else if (status != APR_EAGAIN) { - /* we have DATA to send */ - if (!stream->has_response) { - /* but no response */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - H2_STRM_LOG(APLOGNO(03466), stream, - "no response, RST_STREAM")); - h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR); - return APR_SUCCESS; - } - rv = nghttp2_session_resume_data(session->ngh2, stream->id); - session->have_written = 1; - ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)? - APLOG_ERR : APLOG_DEBUG, 0, session->c, - H2_STRM_LOG(APLOGNO(02936), stream, "resumed")); - } - return status; -} -static void h2_session_in_flush(h2_session *session) -{ - int id; - - while ((id = h2_iq_shift(session->in_process)) > 0) { - h2_stream *stream = h2_session_stream_get(session, id); - if (stream) { - ap_assert(!stream->scheduled); - if (h2_stream_prep_processing(stream) == APR_SUCCESS) { - h2_mplx_process(session->mplx, stream, stream_pri_cmp, session); - } - else { - h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); - } - } + ap_assert(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_STRM_MSG(stream, "on_input change")); + update_child_status(session, SERVER_BUSY_READ, "read", stream); + if (stream->id == 0) { + /* input on primary connection available? read */ + h2_c1_read(session); } - - while ((id = h2_iq_shift(session->in_pending)) > 0) { - h2_stream *stream = h2_session_stream_get(session, id); - if (stream) { - h2_stream_flush_input(stream); - } + else { + h2_stream_on_input_change(stream); } } -static apr_status_t session_read(h2_session *session, apr_size_t readlen, int block) +/** + * A streams output state has changed. + */ +static void on_stream_output(void *ctx, h2_stream *stream) { - apr_status_t status, rstatus = APR_EAGAIN; - conn_rec *c = session->c; - apr_off_t read_start = session->io.bytes_read; - - while (1) { - /* H2_IN filter handles all incoming data against the session. - * We just pull at the filter chain to make it happen */ - status = ap_get_brigade(c->input_filters, - session->bbtmp, AP_MODE_READBYTES, - block? APR_BLOCK_READ : APR_NONBLOCK_READ, - H2MAX(APR_BUCKET_BUFF_SIZE, readlen)); - /* get rid of any possible data we do not expect to get */ - apr_brigade_cleanup(session->bbtmp); - - switch (status) { - case APR_SUCCESS: - /* successful read, reset our idle timers */ - rstatus = APR_SUCCESS; - if (block) { - /* successful blocked read, try unblocked to - * get more. */ - block = 0; - } - break; - case APR_EAGAIN: - return rstatus; - case APR_TIMEUP: - return status; - default: - if (session->io.bytes_read == read_start) { - /* first attempt failed */ - if (APR_STATUS_IS_ETIMEDOUT(status) - || APR_STATUS_IS_ECONNABORTED(status) - || APR_STATUS_IS_ECONNRESET(status) - || APR_STATUS_IS_EOF(status) - || APR_STATUS_IS_EBADF(status)) { - /* common status for a client that has left */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - H2_SSSN_MSG(session, "input gone")); - } - else { - /* uncommon status, log on INFO so that we see this */ - ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c, - H2_SSSN_LOG(APLOGNO(02950), session, - "error reading, terminating")); - } - return status; - } - /* subsequent failure after success(es), return initial - * status. */ - return rstatus; - } - if ((session->io.bytes_read - read_start) > readlen) { - /* read enough in one go, give write a chance */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, - H2_SSSN_MSG(session, "read enough, returning")); - break; - } + h2_session *session = ctx; + + ap_assert(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_STRM_MSG(stream, "on_output change")); + if (stream->id != 0) { + update_child_status(session, SERVER_BUSY_WRITE, "write", stream); + h2_stream_on_output_change(stream); } - return rstatus; } -static apr_status_t h2_session_read(h2_session *session, int block) -{ - apr_status_t status = session_read(session, session->max_stream_mem - * H2MAX(2, session->open_streams), - block); - h2_session_in_flush(session); - return status; -} static const char *StateNames[] = { "INIT", /* H2_SESSION_ST_INIT */ @@ -1630,40 +1379,14 @@ const char *h2_session_state_str(h2_session_state state) return StateNames[state]; } -static void update_child_status(h2_session *session, int status, const char *msg) -{ - /* Assume that we also change code/msg when something really happened and - * avoid updating the scoreboard in between */ - if (session->last_status_code != status - || session->last_status_msg != msg) { - apr_snprintf(session->status, sizeof(session->status), - "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)", - msg? msg : "-", - (int)session->open_streams, - (int)session->remote.emitted_count, - (int)session->responses_submitted, - (int)session->pushes_submitted, - (int)session->pushes_reset + session->streams_reset); - ap_update_child_status_descr(session->c->sbh, status, session->status); - } -} - static void transit(h2_session *session, const char *action, h2_session_state nstate) { - apr_time_t timeout; - int ostate, loglvl; - const char *s; - + int ostate; + if (session->state != nstate) { ostate = session->state; - session->state = nstate; - - loglvl = APLOG_DEBUG; - if ((ostate == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT) - || (ostate == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){ - loglvl = APLOG_TRACE1; - } - ap_log_cerror(APLOG_MARK, loglvl, 0, session->c, + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_SSSN_LOG(APLOGNO(03078), session, "transit [%s] -- %s --> [%s]"), h2_session_state_str(ostate), action, @@ -1678,35 +1401,21 @@ static void transit(h2_session *session, const char *action, h2_session_state ns * If we return to mpm right away, this connection has the * same chance of being cleaned up by the mpm as connections * that already served requests - not fair. */ - session->idle_sync_until = apr_time_now() + apr_time_from_sec(1); - s = "timeout"; - timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout); - update_child_status(session, SERVER_BUSY_READ, "idle"); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, - H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"), - (int)apr_time_sec(H2MAX(session->s->timeout, session->s->keep_alive_timeout))); - } - else if (session->open_streams) { - s = "timeout"; - timeout = session->s->keep_alive_timeout; - update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle"); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1, + H2_SSSN_LOG("", session, "enter idle")); } else { /* normal keepalive setup */ - s = "keepalive"; - timeout = session->s->keep_alive_timeout; - update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle"); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1, + H2_SSSN_LOG("", session, "enter keepalive")); } - session->idle_until = apr_time_now() + timeout; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, - H2_SSSN_LOG("", session, "enter idle, %s = %d sec"), - s, (int)apr_time_sec(timeout)); + session->state = nstate; break; case H2_SESSION_ST_DONE: - update_child_status(session, SERVER_CLOSING, "done"); break; default: /* nop */ + session->state = nstate; break; } } @@ -1724,12 +1433,45 @@ static void h2_session_ev_init(h2_session *session, int arg, const char *msg) } } +static void h2_session_ev_input_pending(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_INIT: + case H2_SESSION_ST_IDLE: + case H2_SESSION_ST_WAIT: + transit(session, "input read", H2_SESSION_ST_BUSY); + break; + default: + break; + } +} + +static void h2_session_ev_input_exhausted(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_BUSY: + if (!h2_session_want_send(session)) { + if (session->open_streams == 0) { + transit(session, "input exhausted, no streams", H2_SESSION_ST_IDLE); + } + else { + transit(session, "input exhausted", H2_SESSION_ST_WAIT); + } + } + break; + case H2_SESSION_ST_WAIT: + if (session->open_streams == 0) { + transit(session, "input exhausted, no streams", H2_SESSION_ST_IDLE); + } + break; + default: + break; + } +} + static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg) { cleanup_unprocessed_streams(session); - if (!session->remote.shutdown) { - update_child_status(session, SERVER_CLOSING, "local goaway"); - } transit(session, "local goaway", H2_SESSION_ST_DONE); } @@ -1740,7 +1482,6 @@ static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char session->remote.accepting = 0; session->remote.shutdown = 1; cleanup_unprocessed_streams(session); - update_child_status(session, SERVER_CLOSING, "remote goaway"); transit(session, "remote goaway", H2_SESSION_ST_DONE); } } @@ -1755,7 +1496,7 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m break; default: - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_SSSN_LOG(APLOGNO(03401), session, "conn error -> shutdown")); h2_session_shutdown(session, arg, msg, 0); @@ -1766,7 +1507,7 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg) { if (!session->local.shutdown) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_SSSN_LOG(APLOGNO(03402), session, "proto error -> shutdown")); h2_session_shutdown(session, arg, msg, 0); @@ -1781,115 +1522,91 @@ static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char } } -static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg) +static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg) { switch (session->state) { - case H2_SESSION_ST_BUSY: - /* Nothing to READ, nothing to WRITE on the master connection. - * Possible causes: - * - we wait for the client to send us sth - * - we wait for started tasks to produce output - * - we have finished all streams and the client has sent GO_AWAY - */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - H2_SSSN_MSG(session, "NO_IO event, %d streams open"), - session->open_streams); - h2_conn_io_flush(&session->io); - if (session->open_streams > 0) { - if (h2_mplx_awaits_data(session->mplx)) { - /* waiting for at least one stream to produce data */ - transit(session, "no io", H2_SESSION_ST_WAIT); - } - else { - /* we have streams open, and all are submitted and none - * is suspended. The only thing keeping us from WRITEing - * more must be the flow control. - * This means we only wait for WINDOW_UPDATE from the - * client and can block on READ. */ - transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE); - /* Make sure we have flushed all previously written output - * so that the client will react. */ - if (h2_conn_io_flush(&session->io) != APR_SUCCESS) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); - return; - } - } - } - else if (session->local.accepting) { - /* When we have no streams, but accept new, switch to idle */ - transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE); - } - else { - /* We are no longer accepting new streams and there are - * none left. Time to leave. */ - h2_session_shutdown(session, arg, msg, 0); - transit(session, "no io", H2_SESSION_ST_DONE); - } - break; - default: + case H2_SESSION_ST_DONE: /* nop */ break; - } -} - -static void h2_session_ev_frame_rcvd(h2_session *session, int arg, const char *msg) -{ - switch (session->state) { - case H2_SESSION_ST_IDLE: - case H2_SESSION_ST_WAIT: - transit(session, "frame received", H2_SESSION_ST_BUSY); - break; default: - /* nop */ + transit(session, "nghttp2 done", H2_SESSION_ST_DONE); break; } } -static void h2_session_ev_stream_change(h2_session *session, int arg, const char *msg) +static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg) { switch (session->state) { - case H2_SESSION_ST_IDLE: - case H2_SESSION_ST_WAIT: - transit(session, "stream change", H2_SESSION_ST_BUSY); + case H2_SESSION_ST_DONE: + /* nop */ break; default: - /* nop */ + h2_session_shutdown_notice(session); +#if !AP_MODULE_MAGIC_AT_LEAST(20120211, 110) + h2_workers_graceful_shutdown(session->workers); +#endif break; } } -static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg) +static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg) { - switch (session->state) { - case H2_SESSION_ST_DONE: - /* nop */ - break; - default: - transit(session, "nghttp2 done", H2_SESSION_ST_DONE); - break; - } + h2_session_shutdown(session, arg, msg, 1); } -static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg) +static void h2_session_ev_no_more_streams(h2_session *session) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_SSSN_LOG(APLOGNO(10304), session, "no more streams")); switch (session->state) { - case H2_SESSION_ST_DONE: - /* nop */ + case H2_SESSION_ST_BUSY: + case H2_SESSION_ST_WAIT: + if (!h2_session_want_send(session)) { + if (session->local.accepting) { + /* We wait for new frames on c1 only. */ + transit(session, "all streams done", H2_SESSION_ST_IDLE); + } + else { + /* We are no longer accepting new streams. + * Time to leave. */ + h2_session_shutdown(session, 0, "done", 0); + transit(session, "c1 done after goaway", H2_SESSION_ST_DONE); + } + } + else { + transit(session, "no more streams", H2_SESSION_ST_WAIT); + } break; default: - h2_session_shutdown_notice(session); + /* nop */ break; } } -static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg) +static void ev_stream_created(h2_session *session, h2_stream *stream) { - h2_session_shutdown(session, arg, msg, 1); + /* nop */ } static void ev_stream_open(h2_session *session, h2_stream *stream) { - h2_iq_append(session->in_process, stream->id); + if (H2_STREAM_CLIENT_INITIATED(stream->id)) { + ++session->remote.emitted_count; + if (stream->id > session->remote.emitted_max) { + session->remote.emitted_max = stream->id; + session->local.accepted_max = stream->id; + } + } + else { + if (stream->id > session->local.emitted_max) { + ++session->local.emitted_count; + session->remote.emitted_max = stream->id; + } + } + /* Stream state OPEN means we have received all request headers + * and can start processing the stream. */ + h2_iq_append(session->ready_to_process, stream->id); + update_child_status(session, SERVER_BUSY_READ, "schedule", stream); } static void ev_stream_closed(h2_session *session, h2_stream *stream) @@ -1900,75 +1617,72 @@ static void ev_stream_closed(h2_session *session, h2_stream *stream) && (stream->id > session->local.completed_max)) { session->local.completed_max = stream->id; } - switch (session->state) { - case H2_SESSION_ST_IDLE: - break; - default: - break; - } - /* The stream might have data in the buffers of the main connection. * We can only free the allocated resources once all had been written. * Send a special buckets on the connection that gets destroyed when * all preceding data has been handled. On its destruction, it is safe * to purge all resources of the stream. */ - b = h2_bucket_eos_create(session->c->bucket_alloc, stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_STRM_MSG(stream, "adding h2_eos to c1 out")); + b = h2_bucket_eos_create(session->c1->bucket_alloc, stream); APR_BRIGADE_INSERT_TAIL(session->bbtmp, b); - h2_conn_io_pass(&session->io, session->bbtmp); + h2_c1_io_append(&session->io, session->bbtmp); apr_brigade_cleanup(session->bbtmp); } static void on_stream_state_enter(void *ctx, h2_stream *stream) { h2_session *session = ctx; - /* stream entered a new state */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, H2_STRM_MSG(stream, "entered state")); switch (stream->state) { case H2_SS_IDLE: /* stream was created */ - ++session->open_streams; - if (H2_STREAM_CLIENT_INITIATED(stream->id)) { - ++session->remote.emitted_count; - if (stream->id > session->remote.emitted_max) { - session->remote.emitted_max = stream->id; - session->local.accepted_max = stream->id; - } - } - else { - if (stream->id > session->local.emitted_max) { - ++session->local.emitted_count; - session->remote.emitted_max = stream->id; - } - } + ev_stream_created(session, stream); break; case H2_SS_OPEN: /* stream has request headers */ - case H2_SS_RSVD_L: /* stream has request headers */ + case H2_SS_RSVD_L: ev_stream_open(session, stream); break; - case H2_SS_CLOSED_L: /* stream output was closed */ + case H2_SS_CLOSED_L: /* stream output was closed, but remote end is not */ + /* If the stream is still being processed, it could still be reading + * its input (theoretically, http request hangling does not normally). + * But when processing is done, we need to cancel the stream as no + * one is consuming the input any longer. + * This happens, for example, on a large POST when the response + * is ready early due to the POST being denied. */ + if (!h2_mplx_c1_stream_is_running(session->mplx, stream)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, + H2_STRM_LOG(APLOGNO(10305), stream, "remote close missing")); + nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, + stream->id, H2_ERR_NO_ERROR); + } break; case H2_SS_CLOSED_R: /* stream input was closed */ break; case H2_SS_CLOSED: /* stream in+out were closed */ - --session->open_streams; ev_stream_closed(session, stream); break; case H2_SS_CLEANUP: - h2_mplx_stream_cleanup(session->mplx, stream); + nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL); + h2_mplx_c1_stream_cleanup(session->mplx, stream, &session->open_streams); + ++session->streams_done; + update_child_status(session, SERVER_BUSY_WRITE, "done", stream); break; default: break; } - dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream state change"); } -static void on_stream_event(void *ctx, h2_stream *stream, - h2_stream_event_t ev) +static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev) { h2_session *session = ctx; switch (ev) { case H2_SEV_IN_DATA_PENDING: - h2_iq_append(session->in_pending, stream->id); + session->input_flushed = 1; + break; + case H2_SEV_OUT_C1_BLOCK: + h2_iq_append(session->out_c1_blocked, stream->id); break; default: /* NOP */ @@ -1993,13 +1707,19 @@ static void on_stream_state_event(void *ctx, h2_stream *stream, } } -static void dispatch_event(h2_session *session, h2_session_event_t ev, - int arg, const char *msg) +void h2_session_dispatch_event(h2_session *session, h2_session_event_t ev, + apr_status_t arg, const char *msg) { switch (ev) { case H2_SESSION_EV_INIT: h2_session_ev_init(session, arg, msg); break; + case H2_SESSION_EV_INPUT_PENDING: + h2_session_ev_input_pending(session, arg, msg); + break; + case H2_SESSION_EV_INPUT_EXHAUSTED: + h2_session_ev_input_exhausted(session, arg, msg); + break; case H2_SESSION_EV_LOCAL_GOAWAY: h2_session_ev_local_goaway(session, arg, msg); break; @@ -2015,12 +1735,6 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev, case H2_SESSION_EV_CONN_TIMEOUT: h2_session_ev_conn_timeout(session, arg, msg); break; - case H2_SESSION_EV_NO_IO: - h2_session_ev_no_io(session, arg, msg); - break; - case H2_SESSION_EV_FRAME_RCVD: - h2_session_ev_frame_rcvd(session, arg, msg); - break; case H2_SESSION_EV_NGH2_DONE: h2_session_ev_ngh2_done(session, arg, msg); break; @@ -2030,311 +1744,265 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev, case H2_SESSION_EV_PRE_CLOSE: h2_session_ev_pre_close(session, arg, msg); break; - case H2_SESSION_EV_STREAM_CHANGE: - h2_session_ev_stream_change(session, arg, msg); + case H2_SESSION_EV_NO_MORE_STREAMS: + h2_session_ev_no_more_streams(session); break; default: - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1, H2_SSSN_MSG(session, "unknown event %d"), ev); break; } } -/* trigger window updates, stream resumes and submits */ -static apr_status_t dispatch_master(h2_session *session) { - apr_status_t status; - - status = h2_mplx_dispatch_master_events(session->mplx, - on_stream_resume, session); - if (status == APR_EAGAIN) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, - H2_SSSN_MSG(session, "no master event available")); - } - else if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, - H2_SSSN_MSG(session, "dispatch error")); - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, - H2_ERR_INTERNAL_ERROR, "dispatch error"); +static void unblock_c1_out(h2_session *session) { + int sid; + + while ((sid = h2_iq_shift(session->out_c1_blocked)) > 0) { + nghttp2_session_resume_data(session->ngh2, sid); } - return status; } -static const int MAX_WAIT_MICROS = 200 * 1000; - apr_status_t h2_session_process(h2_session *session, int async) { apr_status_t status = APR_SUCCESS; - conn_rec *c = session->c; + conn_rec *c = session->c1; int rv, mpm_state, trace = APLOGctrace3(c); - apr_time_t now; - + if (trace) { ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, H2_SSSN_MSG(session, "process start, async=%d"), async); } - + + if (H2_SESSION_ST_INIT == session->state) { + if (!h2_protocol_is_acceptable_c1(c, session->r, 1)) { + const char *msg = nghttp2_strerror(NGHTTP2_INADEQUATE_SECURITY); + update_child_status(session, SERVER_BUSY_READ, msg, NULL); + h2_session_shutdown(session, APR_EINVAL, msg, 1); + } + else { + update_child_status(session, SERVER_BUSY_READ, "init", NULL); + status = h2_session_start(session, &rv); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + H2_SSSN_LOG(APLOGNO(03079), session, + "started on %s:%d"), + session->s->server_hostname, + c->local_addr->port); + if (status != APR_SUCCESS) { + h2_session_dispatch_event(session, + H2_SESSION_EV_CONN_ERROR, status, NULL); + } + else { + h2_session_dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL); + } + } + } + while (session->state != H2_SESSION_ST_DONE) { - now = apr_time_now(); - session->have_read = session->have_written = 0; - if (session->local.accepting + /* PR65731: we may get a new connection to process while the + * MPM already is stopping. For example due to having reached + * MaxRequestsPerChild limit. + * Since this is supposed to handle things gracefully, we need to: + * a) fully initialize the session before GOAWAYing + * b) give the client the chance to submit at least one request + */ + if (session->state != H2_SESSION_ST_INIT /* no longer intializing */ + && session->local.accepted_max > 0 /* have gotten at least one stream */ + && session->local.accepting /* have not already locally shut down */ && !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) { if (mpm_state == AP_MPMQ_STOPPING) { - dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL); + h2_session_dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL); } } - + session->status[0] = '\0'; + if (h2_session_want_send(session)) { + h2_session_send(session); + } + else if (!nghttp2_session_want_read(session->ngh2)) { + h2_session_dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL); + } + + if (!h2_iq_empty(session->ready_to_process)) { + h2_mplx_c1_process(session->mplx, session->ready_to_process, + get_stream, stream_pri_cmp, session, + &session->open_streams); + transit(session, "scheduled stream", H2_SESSION_ST_BUSY); + } + + if (session->input_flushed) { + transit(session, "forwarded input", H2_SESSION_ST_BUSY); + session->input_flushed = 0; + } + + if (!h2_iq_empty(session->out_c1_blocked)) { + unblock_c1_out(session); + transit(session, "unblocked output", H2_SESSION_ST_BUSY); + } + + if (session->reprioritize) { + h2_mplx_c1_reprioritize(session->mplx, stream_pri_cmp, session); + session->reprioritize = 0; + } + + if (h2_session_want_send(session)) { + h2_session_send(session); + } + + status = h2_c1_io_assure_flushed(&session->io); + if (APR_SUCCESS != status) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL); + } + switch (session->state) { - case H2_SESSION_ST_INIT: - ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); - if (!h2_is_acceptable_connection(c, 1)) { - update_child_status(session, SERVER_BUSY_READ, - "inadequate security"); - h2_session_shutdown(session, - NGHTTP2_INADEQUATE_SECURITY, NULL, 1); - } - else { - update_child_status(session, SERVER_BUSY_READ, "init"); - status = h2_session_start(session, &rv); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, - H2_SSSN_LOG(APLOGNO(03079), session, - "started on %s:%d"), - session->s->server_hostname, - c->local_addr->port); - if (status != APR_SUCCESS) { - dispatch_event(session, - H2_SESSION_EV_CONN_ERROR, 0, NULL); - } - dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL); - } - break; - - case H2_SESSION_ST_IDLE: - if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) { - ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c, - H2_SSSN_MSG(session, "idle, timeout reached, closing")); - if (session->idle_delay) { - apr_table_setn(session->c->notes, "short-lingering-close", "1"); - } - dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout"); - goto out; - } - - if (session->idle_delay) { - /* we are less interested in spending time on this connection */ - ap_log_cerror( APLOG_MARK, APLOG_TRACE2, status, c, - H2_SSSN_MSG(session, "session is idle (%ld ms), idle wait %ld sec left"), - (long)apr_time_as_msec(session->idle_delay), - (long)apr_time_sec(session->idle_until - now)); - apr_sleep(session->idle_delay); - session->idle_delay = 0; - } + case H2_SESSION_ST_INIT: + ap_assert(0); + h2_c1_read(session); + break; - h2_conn_io_flush(&session->io); - if (async && !session->r && (now > session->idle_sync_until)) { - if (trace) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c, - H2_SSSN_MSG(session, - "nonblock read, %d streams open"), - session->open_streams); - } - status = h2_session_read(session, 0); - - if (status == APR_SUCCESS) { - session->have_read = 1; - } - else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) { - status = APR_EAGAIN; - goto out; - } - else { + case H2_SESSION_ST_IDLE: + ap_assert(session->open_streams == 0); + ap_assert(nghttp2_session_want_read(session->ngh2)); + if (!h2_session_want_send(session)) { + /* Give any new incoming request a short grace period to + * arrive while we are still hot and return to the mpm + * connection handling when nothing really happened. */ + h2_c1_read(session); + if (H2_SESSION_ST_IDLE == session->state) { + if (async) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, - H2_SSSN_LOG(APLOGNO(03403), session, - "no data, error")); - dispatch_event(session, - H2_SESSION_EV_CONN_ERROR, 0, "timeout"); - } - } - else { - /* make certain, we send everything before we idle */ - if (trace) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c, - H2_SSSN_MSG(session, - "sync, stutter 1-sec, %d streams open"), - session->open_streams); - } - /* We wait in smaller increments, using a 1 second timeout. - * That gives us the chance to check for MPMQ_STOPPING often. - */ - status = h2_mplx_idle(session->mplx); - if (status == APR_EAGAIN) { - break; - } - else if (status != APR_SUCCESS) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, - H2_ERR_ENHANCE_YOUR_CALM, "less is more"); - } - h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1)); - status = h2_session_read(session, 1); - if (status == APR_SUCCESS) { - session->have_read = 1; - } - else if (status == APR_EAGAIN) { - /* nothing to read */ - } - else if (APR_STATUS_IS_TIMEUP(status)) { - /* continue reading handling */ - } - else if (APR_STATUS_IS_ECONNABORTED(status) - || APR_STATUS_IS_ECONNRESET(status) - || APR_STATUS_IS_EOF(status) - || APR_STATUS_IS_EBADF(status)) { - ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, - H2_SSSN_MSG(session, "input gone")); - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + H2_SSSN_LOG(APLOGNO(10306), session, + "returning to mpm c1 monitoring")); + goto leaving; } else { - ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, - H2_SSSN_MSG(session, - "(1 sec timeout) read failed")); - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error"); - } - } - if (nghttp2_session_want_write(session->ngh2)) { - ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL); - status = h2_session_send(session); - if (status == APR_SUCCESS) { - status = h2_conn_io_flush(&session->io); - } - if (status != APR_SUCCESS) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, - H2_ERR_INTERNAL_ERROR, "writing"); - break; + /* Not an async mpm, we must continue waiting + * for client data to arrive until the configured + * server Timeout/KeepAliveTimeout happens */ + apr_time_t timeout = (session->open_streams == 0)? + session->s->keep_alive_timeout : + session->s->timeout; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, + H2_SSSN_MSG(session, "polling timeout=%d"), + (int)apr_time_sec(timeout)); + status = h2_mplx_c1_poll(session->mplx, timeout, + on_stream_input, + on_stream_output, session); + if (APR_STATUS_IS_TIMEUP(status)) { + if (session->open_streams == 0) { + h2_session_dispatch_event(session, + H2_SESSION_EV_CONN_TIMEOUT, status, NULL); + break; + } + } + else if (APR_SUCCESS != status) { + h2_session_dispatch_event(session, + H2_SESSION_EV_CONN_ERROR, status, NULL); + break; + } } } + } + else { + transit(session, "c1 io pending", H2_SESSION_ST_BUSY); + } + break; + + case H2_SESSION_ST_BUSY: + /* IO happening in and out. Make sure we react to c2 events + * inbetween send and receive. */ + status = h2_mplx_c1_poll(session->mplx, 0, + on_stream_input, on_stream_output, session); + if (APR_SUCCESS != status && !APR_STATUS_IS_TIMEUP(status)) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL); break; - - case H2_SESSION_ST_BUSY: - if (nghttp2_session_want_read(session->ngh2)) { - ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL); - h2_filter_cin_timeout_set(session->cin, session->s->timeout); - status = h2_session_read(session, 0); - if (status == APR_SUCCESS) { - session->have_read = 1; - } - else if (status == APR_EAGAIN) { - /* nothing to read */ - } - else if (APR_STATUS_IS_TIMEUP(status)) { - dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL); - break; - } - else { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); - } - } + } + h2_c1_read(session); + break; - status = dispatch_master(session); - if (status != APR_SUCCESS && status != APR_EAGAIN) { + case H2_SESSION_ST_WAIT: + status = h2_c1_io_assure_flushed(&session->io); + if (APR_SUCCESS != status) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL); + break; + } + if (session->open_streams == 0) { + h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS, + 0, "streams really done"); + if (session->state != H2_SESSION_ST_WAIT) { break; } - - if (nghttp2_session_want_write(session->ngh2)) { - ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL); - status = h2_session_send(session); - if (status == APR_SUCCESS) { - status = h2_conn_io_flush(&session->io); - } - if (status != APR_SUCCESS) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, - H2_ERR_INTERNAL_ERROR, "writing"); - break; - } - } - - if (session->have_read || session->have_written) { - if (session->wait_us) { - session->wait_us = 0; - } - } - else if (!nghttp2_session_want_write(session->ngh2)) { - dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL); + } + /* No IO happening and input is exhausted. Make sure we have + * flushed any possibly pending output and then wait with + * the c1 connection timeout for sth to happen in our c1/c2 sockets/pipes */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, + H2_SSSN_MSG(session, "polling timeout=%d, open_streams=%d"), + (int)apr_time_sec(session->s->timeout), session->open_streams); + status = h2_mplx_c1_poll(session->mplx, session->s->timeout, + on_stream_input, on_stream_output, session); + if (APR_STATUS_IS_TIMEUP(status)) { + /* If we timeout without streams open, no new request from client + * arrived. + * If we timeout without nghttp2 wanting to write something, but + * all open streams have something to send, it means we are + * blocked on HTTP/2 flow control and the client did not send + * WINDOW_UPDATEs to us. */ + if (session->open_streams == 0 || + (!h2_session_want_send(session) && + h2_mplx_c1_all_streams_want_send_data(session->mplx))) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, status, NULL); + break; } + } + else if (APR_SUCCESS != status) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL); break; - - case H2_SESSION_ST_WAIT: - if (session->wait_us <= 0) { - session->wait_us = 10; - if (h2_conn_io_flush(&session->io) != APR_SUCCESS) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); - break; - } - } - else { - /* repeating, increase timer for graceful backoff */ - session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS); - } + } + break; - if (trace) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, - "h2_session: wait for data, %ld micros", - (long)session->wait_us); - } - status = h2_mplx_out_trywait(session->mplx, session->wait_us, - session->iowait); - if (status == APR_SUCCESS) { - session->wait_us = 0; - dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, NULL); - } - else if (APR_STATUS_IS_TIMEUP(status)) { - /* go back to checking all inputs again */ - transit(session, "wait cycle", session->local.shutdown? - H2_SESSION_ST_DONE : H2_SESSION_ST_BUSY); - } - else if (APR_STATUS_IS_ECONNRESET(status) - || APR_STATUS_IS_ECONNABORTED(status)) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c, - H2_SSSN_LOG(APLOGNO(03404), session, - "waiting on conditional")); - h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, - "cond wait error", 0); - } - break; - - default: - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c, - H2_SSSN_LOG(APLOGNO(03080), session, - "unknown state")); - dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL); - break; - } + case H2_SESSION_ST_DONE: + h2_c1_read(session); + break; - if (!nghttp2_session_want_read(session->ngh2) - && !nghttp2_session_want_write(session->ngh2)) { - dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL); - } - if (session->reprioritize) { - h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session); - session->reprioritize = 0; + default: + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c, + H2_SSSN_LOG(APLOGNO(03080), session, + "unknown state")); + h2_session_dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, APR_EGENERAL, NULL); + break; } } - -out: + +leaving: if (trace) { ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, H2_SSSN_MSG(session, "process returns")); } - - if ((session->state != H2_SESSION_ST_DONE) - && (APR_STATUS_IS_EOF(status) + h2_mplx_c1_going_keepalive(session->mplx); + + if (session->state == H2_SESSION_ST_DONE) { + if (session->local.error) { + char buffer[128]; + const char *msg; + if (session->local.error_msg) { + msg = session->local.error_msg; + } + else { + msg = apr_strerror(session->local.error, buffer, sizeof(buffer)); + } + update_child_status(session, SERVER_CLOSING, msg, NULL); + } + else { + update_child_status(session, SERVER_CLOSING, "done", NULL); + } + } + else if (APR_STATUS_IS_EOF(status) || APR_STATUS_IS_ECONNRESET(status) - || APR_STATUS_IS_ECONNABORTED(status))) { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + || APR_STATUS_IS_ECONNABORTED(status)) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, status, NULL); + update_child_status(session, SERVER_CLOSING, "error", NULL); } return (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS; @@ -2344,14 +2012,14 @@ apr_status_t h2_session_pre_close(h2_session *session, int async) { apr_status_t status; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1, H2_SSSN_MSG(session, "pre_close")); - dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0, + h2_session_dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0, (session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL); status = session_cleanup(session, "pre_close"); if (status == APR_SUCCESS) { /* no one should hold a reference to this session any longer and - * the h2_ctx was removed from the connection. + * the h2_conn_ctx_twas removed from the connection. * Take the pool (and thus all subpools etc. down now, instead of * during cleanup of main connection pool. */ apr_pool_destroy(session->pool); diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h index df2a862..3328509 100644 --- a/modules/http2/h2_session.h +++ b/modules/http2/h2_session.h @@ -17,25 +17,15 @@ #ifndef __mod_h2__h2_session__ #define __mod_h2__h2_session__ -#include "h2_conn_io.h" +#include "h2_c1_io.h" /** * A HTTP/2 connection, a session with a specific client. * * h2_session sits on top of a httpd conn_rec* instance and takes complete * control of the connection data. It receives protocol frames from the - * client. For new HTTP/2 streams it creates h2_task(s) that are sent - * via callback to a dispatcher (see h2_conn.c). - * h2_session keeps h2_io's for each ongoing stream which buffer the - * payload for that stream. - * - * New incoming HEADER frames are converted into a h2_stream+h2_task instance - * that both represent a HTTP/2 stream, but may have separate lifetimes. This - * allows h2_task to be scheduled in other threads without semaphores - * all over the place. It allows task memory to be freed independent of - * session lifetime and sessions may close down while tasks are still running. - * - * + * client. For new HTTP/2 streams it creates secondary connections + * to execute the requests in h2 workers. */ #include "h2.h" @@ -44,7 +34,6 @@ struct apr_thread_mutext_t; struct apr_thread_cond_t; struct h2_ctx; struct h2_config; -struct h2_filter_cin; struct h2_ihash_t; struct h2_mplx; struct h2_priority; @@ -53,39 +42,39 @@ struct h2_push_diary; struct h2_session; struct h2_stream; struct h2_stream_monitor; -struct h2_task; struct h2_workers; struct nghttp2_session; typedef enum { H2_SESSION_EV_INIT, /* session was initialized */ + H2_SESSION_EV_INPUT_PENDING, /* c1 input may have data pending */ + H2_SESSION_EV_INPUT_EXHAUSTED, /* c1 input exhausted */ H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */ H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */ H2_SESSION_EV_CONN_ERROR, /* connection error */ H2_SESSION_EV_PROTO_ERROR, /* protocol error */ H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */ - H2_SESSION_EV_NO_IO, /* nothing has been read or written */ - H2_SESSION_EV_FRAME_RCVD, /* a frame has been received */ H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */ H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */ H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */ - H2_SESSION_EV_STREAM_CHANGE, /* a stream (state/input/output) changed */ + H2_SESSION_EV_NO_MORE_STREAMS, /* no more streams to process */ } h2_session_event_t; typedef struct h2_session { - long id; /* identifier of this session, unique - * inside a httpd process */ - conn_rec *c; /* the connection this session serves */ + int child_num; /* child number this session runs in */ + apr_uint32_t id; /* identifier of this session, unique per child */ + conn_rec *c1; /* the main connection this session serves */ request_rec *r; /* the request that started this in case * of 'h2c', NULL otherwise */ server_rec *s; /* server/vhost we're starting on */ - const struct h2_config *config; /* Relevant config for this session */ apr_pool_t *pool; /* pool to use in session */ struct h2_mplx *mplx; /* multiplexer for stream data */ - struct h2_workers *workers; /* for executing stream tasks */ - struct h2_filter_cin *cin; /* connection input filter context */ - h2_conn_io io; /* io on httpd conn filters */ + struct h2_workers *workers; /* for executing streams */ + struct h2_c1_io_in_ctx_t *cin; /* connection input filter context */ + h2_c1_io io; /* io on httpd conn filters */ + unsigned int padding_max; /* max number of padding bytes */ + int padding_always; /* padding has precedence over I/O optimizations */ struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */ h2_session_state state; /* state session is in */ @@ -95,43 +84,39 @@ typedef struct h2_session { unsigned int reprioritize : 1; /* scheduled streams priority changed */ unsigned int flush : 1; /* flushing output necessary */ - unsigned int have_read : 1; /* session has read client data */ - unsigned int have_written : 1; /* session did write data to client */ apr_interval_time_t wait_us; /* timeout during BUSY_WAIT state, micro secs */ struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */ struct h2_stream_monitor *monitor;/* monitor callbacks for streams */ - int open_streams; /* number of client streams open */ - int unsent_submits; /* number of submitted, but not yet written responses. */ - int unsent_promises; /* number of submitted, but not yet written push promises */ - - int responses_submitted; /* number of http/2 responses submitted */ - int streams_reset; /* number of http/2 streams reset by client */ - int pushes_promised; /* number of http/2 push promises submitted */ - int pushes_submitted; /* number of http/2 pushed responses submitted */ - int pushes_reset; /* number of http/2 pushed reset by client */ + unsigned int open_streams; /* number of streams processing */ + + unsigned int streams_done; /* number of http/2 streams handled */ + unsigned int responses_submitted; /* number of http/2 responses submitted */ + unsigned int streams_reset; /* number of http/2 streams reset by client */ + unsigned int pushes_promised; /* number of http/2 push promises submitted */ + unsigned int pushes_submitted; /* number of http/2 pushed responses submitted */ + unsigned int pushes_reset; /* number of http/2 pushed reset by client */ apr_size_t frames_received; /* number of http/2 frames received */ apr_size_t frames_sent; /* number of http/2 frames sent */ apr_size_t max_stream_count; /* max number of open streams */ apr_size_t max_stream_mem; /* max buffer memory for a single stream */ - - apr_time_t idle_until; /* Time we shut down due to sheer boredom */ - apr_time_t idle_sync_until; /* Time we sync wait until keepalive handling kicks in */ + apr_size_t max_data_frame_len; /* max amount of bytes for a single DATA frame */ + apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */ apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */ apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */ - struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */ - + char status[64]; /* status message for scoreboard */ int last_status_code; /* the one already reported */ const char *last_status_msg; /* the one already reported */ - - struct h2_iqueue *in_pending; /* all streams with input pending */ - struct h2_iqueue *in_process; /* all streams ready for processing on slave */ + + int input_flushed; /* stream input was flushed */ + struct h2_iqueue *out_c1_blocked; /* all streams with output blocked on c1 buffer full */ + struct h2_iqueue *ready_to_process; /* all streams ready for processing */ } h2_session; @@ -142,29 +127,17 @@ const char *h2_session_state_str(h2_session_state state); * The session will apply the configured parameter. * @param psession pointer receiving the created session on success or NULL * @param c the connection to work on + * @param r optional request when protocol was upgraded * @param cfg the module config to apply * @param workers the worker pool to use * @return the created session */ apr_status_t h2_session_create(h2_session **psession, - conn_rec *c, struct h2_ctx *ctx, + conn_rec *c, request_rec *r, server_rec *, struct h2_workers *workers); -/** - * Create a new h2_session for the given request. - * The session will apply the configured parameter. - * @param psession pointer receiving the created session on success or NULL - * @param r the request that was upgraded - * @param cfg the module config to apply - * @param workers the worker pool to use - * @return the created session - */ -apr_status_t h2_session_rcreate(h2_session **psession, - request_rec *r, struct h2_ctx *ctx, - struct h2_workers *workers); - void h2_session_event(h2_session *session, h2_session_event_t ev, - int err, const char *msg); + int err, const char *msg); /** * Process the given HTTP/2 session until it is ended or a fatal @@ -187,22 +160,12 @@ apr_status_t h2_session_pre_close(h2_session *session, int async); */ void h2_session_abort(h2_session *session, apr_status_t reason); -/** - * Close and deallocate the given session. - */ -void h2_session_close(h2_session *session); - /** * Returns if client settings have push enabled. * @param != 0 iff push is enabled in client settings */ int h2_session_push_enabled(h2_session *session); -/** - * Look up the stream in this session with the given id. - */ -struct h2_stream *h2_session_stream_get(h2_session *session, int stream_id); - /** * Submit a push promise on the stream and schedule the new steam for * processing.. @@ -219,10 +182,25 @@ apr_status_t h2_session_set_prio(h2_session *session, struct h2_stream *stream, const struct h2_priority *prio); +/** + * Dispatch a event happending during session processing. + * @param session the sessiont + * @param ev the event that happened + * @param arg integer argument (event type dependant) + * @param msg destriptive message + */ +void h2_session_dispatch_event(h2_session *session, h2_session_event_t ev, + int arg, const char *msg); + + #define H2_SSSN_MSG(s, msg) \ - "h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \ + "h2_session(%d-%lu,%s,%d): "msg, s->child_num, (unsigned long)s->id, \ + h2_session_state_str(s->state), \ s->open_streams #define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg) +#define H2_SSSN_STRM_MSG(s, stream_id, msg) \ + "h2_stream(%d-%lu-%d): "msg, s->child_num, (unsigned long)s->id, stream_id + #endif /* defined(__mod_h2__h2_session__) */ diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c index 24ebc56..ee87555 100644 --- a/modules/http2/h2_stream.c +++ b/modules/http2/h2_stream.c @@ -17,34 +17,39 @@ #include #include -#include +#include "apr.h" +#include "apr_strings.h" +#include "apr_lib.h" +#include "apr_strmatch.h" #include #include #include #include +#include +#include #include #include "h2_private.h" #include "h2.h" #include "h2_bucket_beam.h" -#include "h2_conn.h" +#include "h2_c1.h" #include "h2_config.h" -#include "h2_h2.h" +#include "h2_protocol.h" #include "h2_mplx.h" #include "h2_push.h" #include "h2_request.h" #include "h2_headers.h" #include "h2_session.h" #include "h2_stream.h" -#include "h2_task.h" -#include "h2_ctx.h" -#include "h2_task.h" +#include "h2_c2.h" +#include "h2_conn_ctx.h" +#include "h2_c2.h" #include "h2_util.h" -static const char *h2_ss_str(h2_stream_state_t state) +static const char *h2_ss_str(const h2_stream_state_t state) { switch (state) { case H2_SS_IDLE: @@ -68,7 +73,7 @@ static const char *h2_ss_str(h2_stream_state_t state) } } -const char *h2_stream_state_str(h2_stream *stream) +const char *h2_stream_state_str(const h2_stream *stream) { return h2_ss_str(stream->state); } @@ -120,7 +125,8 @@ static int trans_on_event[][H2_SS_MAX] = { { S_XXX, S_ERR, S_ERR, S_CL_L, S_CLS, S_XXX, S_XXX, S_XXX, },/* EV_CLOSED_L*/ { S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/ { S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/ -{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/ +{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_NOP, },/* EV_EOS_SENT*/ +{ S_NOP, S_XXX, S_CLS, S_XXX, S_XXX, S_CLS, S_XXX, S_XXX, },/* EV_IN_ERROR*/ }; static int on_map(h2_stream_state_t state, int map[H2_SS_MAX]) @@ -142,7 +148,7 @@ static int on_frame(h2_stream_state_t state, int frame_type, { ap_assert(frame_type >= 0); ap_assert(state >= 0); - if (frame_type >= maxlen) { + if ((apr_size_t)frame_type >= maxlen) { return state; /* NOP, ignore unknown frame types */ } return on_map(state, frame_map[frame_type]); @@ -160,6 +166,7 @@ static int on_frame_recv(h2_stream_state_t state, int frame_type) static int on_event(h2_stream* stream, h2_stream_event_t ev) { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); if (stream->monitor && stream->monitor->on_event) { stream->monitor->on_event(stream->monitor->ctx, stream, ev); } @@ -169,10 +176,18 @@ static int on_event(h2_stream* stream, h2_stream_event_t ev) return stream->state; } +static ssize_t stream_data_cb(nghttp2_session *ng2s, + int32_t stream_id, + uint8_t *buf, + size_t length, + uint32_t *data_flags, + nghttp2_data_source *source, + void *puser); + static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag) { - if (APLOG_C_IS_LEVEL(s->session->c, lvl)) { - conn_rec *c = s->session->c; + if (APLOG_C_IS_LEVEL(s->session->c1, lvl)) { + conn_rec *c = s->session->c1; char buffer[4 * 1024]; apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); @@ -182,76 +197,116 @@ static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag) } } -static apr_status_t setup_input(h2_stream *stream) { - if (stream->input == NULL) { - int empty = (stream->input_eof - && (!stream->in_buffer - || APR_BRIGADE_EMPTY(stream->in_buffer))); - if (!empty) { - h2_beam_create(&stream->input, stream->pool, stream->id, - "input", H2_BEAM_OWNER_SEND, 0, - stream->session->s->timeout); - h2_beam_send_from(stream->input, stream->pool); - } +static void stream_setup_input(h2_stream *stream) +{ + if (stream->input != NULL) return; + ap_assert(!stream->input_closed); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1, + H2_STRM_MSG(stream, "setup input beam")); + h2_beam_create(&stream->input, stream->session->c1, + stream->pool, stream->id, + "input", 0, stream->session->s->timeout); +} + +apr_status_t h2_stream_prepare_processing(h2_stream *stream) +{ + /* Right before processing starts, last chance to decide if + * there is need to an input beam. */ + if (!stream->input_closed) { + stream_setup_input(stream); } return APR_SUCCESS; } -static apr_status_t close_input(h2_stream *stream) +static int input_buffer_is_empty(h2_stream *stream) +{ + return !stream->in_buffer || APR_BRIGADE_EMPTY(stream->in_buffer); +} + +static apr_status_t input_flush(h2_stream *stream) { - conn_rec *c = stream->session->c; apr_status_t status = APR_SUCCESS; + apr_off_t written; - stream->input_eof = 1; - if (stream->input && h2_beam_is_closed(stream->input)) { - return APR_SUCCESS; - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, - H2_STRM_MSG(stream, "closing input")); - if (stream->rst_error) { - return APR_ECONNRESET; + if (input_buffer_is_empty(stream)) goto cleanup; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1, + H2_STRM_MSG(stream, "flush input")); + status = h2_beam_send(stream->input, stream->session->c1, + stream->in_buffer, APR_BLOCK_READ, &written); + stream->in_last_write = apr_time_now(); + if (APR_SUCCESS != status && h2_stream_is_at(stream, H2_SS_CLOSED_L)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c1, + H2_STRM_MSG(stream, "send input error")); + h2_stream_dispatch(stream, H2_SEV_IN_ERROR); } - - if (stream->trailers && !apr_is_empty_table(stream->trailers)) { - apr_bucket *b; - h2_headers *r; - - if (!stream->in_buffer) { - stream->in_buffer = apr_brigade_create(stream->pool, c->bucket_alloc); - } - - r = h2_headers_create(HTTP_OK, stream->trailers, NULL, - stream->in_trailer_octets, stream->pool); - stream->trailers = NULL; - b = h2_bucket_headers_create(c->bucket_alloc, r); - APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b); - - b = apr_bucket_eos_create(c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b); - - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, - H2_STRM_MSG(stream, "added trailers")); - h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING); +cleanup: + return status; +} + +static void input_append_bucket(h2_stream *stream, apr_bucket *b) +{ + if (!stream->in_buffer) { + stream_setup_input(stream); + stream->in_buffer = apr_brigade_create( + stream->pool, stream->session->c1->bucket_alloc); } - if (stream->input) { - h2_stream_flush_input(stream); - return h2_beam_close(stream->input); + APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b); +} + +static void input_append_data(h2_stream *stream, const char *data, apr_size_t len) +{ + if (!stream->in_buffer) { + stream_setup_input(stream); + stream->in_buffer = apr_brigade_create( + stream->pool, stream->session->c1->bucket_alloc); } - return status; + apr_brigade_write(stream->in_buffer, NULL, NULL, data, len); } -static apr_status_t close_output(h2_stream *stream) + +static apr_status_t close_input(h2_stream *stream) { - if (!stream->output || h2_beam_is_closed(stream->output)) { - return APR_SUCCESS; + conn_rec *c = stream->session->c1; + apr_status_t rv = APR_SUCCESS; + apr_bucket *b; + + if (stream->input_closed) goto cleanup; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, + H2_STRM_MSG(stream, "closing input")); + if (!stream->rst_error + && stream->trailers_in + && !apr_is_empty_table(stream->trailers_in)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1, + H2_STRM_MSG(stream, "adding trailers")); +#if AP_HAS_RESPONSE_BUCKETS + b = ap_bucket_headers_create(stream->trailers_in, + stream->pool, c->bucket_alloc); +#else + b = h2_bucket_headers_create(c->bucket_alloc, + h2_headers_create(HTTP_OK, stream->trailers_in, NULL, + stream->in_trailer_octets, stream->pool)); +#endif + input_append_bucket(stream, b); + stream->trailers_in = NULL; + } + + stream->input_closed = 1; + if (stream->input) { + b = apr_bucket_eos_create(c->bucket_alloc); + input_append_bucket(stream, b); + input_flush(stream); + h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1, + H2_STRM_MSG(stream, "input flush + EOS")); } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, - H2_STRM_MSG(stream, "closing output")); - return h2_beam_leave(stream->output); + +cleanup: + return rv; } -static void on_state_enter(h2_stream *stream) +static void on_state_enter(h2_stream *stream) { if (stream->monitor && stream->monitor->on_state_enter) { stream->monitor->on_state_enter(stream->monitor->ctx, stream); @@ -271,7 +326,7 @@ static void on_state_invalid(h2_stream *stream) stream->monitor->on_state_invalid(stream->monitor->ctx, stream); } /* stream got an event/frame invalid in its state */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, H2_STRM_MSG(stream, "invalid state event")); switch (stream->state) { case H2_SS_OPEN: @@ -288,17 +343,17 @@ static void on_state_invalid(h2_stream *stream) static apr_status_t transit(h2_stream *stream, int new_state) { - if (new_state == stream->state) { + if ((h2_stream_state_t)new_state == stream->state) { return APR_SUCCESS; } else if (new_state < 0) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c1, H2_STRM_LOG(APLOGNO(03081), stream, "invalid transition")); on_state_invalid(stream); return APR_EINVAL; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, H2_STRM_MSG(stream, "transit to [%s]"), h2_ss_str(new_state)); stream->state = new_state; switch (new_state) { @@ -312,14 +367,12 @@ static apr_status_t transit(h2_stream *stream, int new_state) case H2_SS_OPEN: break; case H2_SS_CLOSED_L: - close_output(stream); break; case H2_SS_CLOSED_R: close_input(stream); break; case H2_SS_CLOSED: close_input(stream); - close_output(stream); if (stream->out_buffer) { apr_brigade_cleanup(stream->out_buffer); } @@ -340,19 +393,20 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev) { int new_state; - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1, H2_STRM_MSG(stream, "dispatch event %d"), ev); new_state = on_event(stream, ev); if (new_state < 0) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c1, H2_STRM_LOG(APLOGNO(10002), stream, "invalid event %d"), ev); on_state_invalid(stream); AP_DEBUG_ASSERT(new_state > S_XXX); return; } - else if (new_state == stream->state) { + else if ((h2_stream_state_t)new_state == stream->state) { /* nop */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1, H2_STRM_MSG(stream, "non-state event %d"), ev); return; } @@ -365,9 +419,7 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev) static void set_policy_for(h2_stream *stream, h2_request *r) { int enabled = h2_session_push_enabled(stream->session); - stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, - enabled); - r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS); + stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled); } apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len) @@ -375,9 +427,10 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_ apr_status_t status = APR_SUCCESS; int new_state, eos = 0; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); new_state = on_frame_send(stream->state, ftype); if (new_state < 0) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, H2_STRM_MSG(stream, "invalid frame %d send"), ftype); AP_DEBUG_ASSERT(new_state > S_XXX); return transit(stream, new_state); @@ -385,6 +438,12 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_ ++stream->out_frames; stream->out_frame_octets += frame_len; + if(stream->c2) { + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2); + if(conn_ctx) + conn_ctx->bytes_sent = stream->out_frame_octets; + } + switch (ftype) { case NGHTTP2_DATA: eos = (flags & NGHTTP2_FLAG_END_STREAM); @@ -398,24 +457,18 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_ /* start pushed stream */ ap_assert(stream->request == NULL); ap_assert(stream->rtmp != NULL); - status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0); - if (status != APR_SUCCESS) { - return status; - } - set_policy_for(stream, stream->rtmp); - stream->request = stream->rtmp; - stream->rtmp = NULL; + status = h2_stream_end_headers(stream, 1, 0); + if (status != APR_SUCCESS) goto leave; break; default: break; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, - H2_STRM_MSG(stream, "send frame %d, eos=%d"), ftype, eos); status = transit(stream, new_state); if (status == APR_SUCCESS && eos) { status = transit(stream, on_event(stream, H2_SEV_CLOSED_L)); } +leave: return status; } @@ -424,9 +477,10 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_ apr_status_t status = APR_SUCCESS; int new_state, eos = 0; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); new_state = on_frame_recv(stream->state, ftype); if (new_state < 0) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, H2_STRM_MSG(stream, "invalid frame %d recv"), ftype); AP_DEBUG_ASSERT(new_state > S_XXX); return transit(stream, new_state); @@ -439,7 +493,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_ case NGHTTP2_HEADERS: eos = (flags & NGHTTP2_FLAG_END_STREAM); - if (stream->state == H2_SS_OPEN) { + if (h2_stream_is_at_or_past(stream, H2_SS_OPEN)) { /* trailer HEADER */ if (!eos) { h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR); @@ -451,18 +505,13 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_ ap_assert(stream->request == NULL); if (stream->rtmp == NULL) { /* This can only happen, if the stream has received no header - * name/value pairs at all. The lastest nghttp2 version have become + * name/value pairs at all. The latest nghttp2 version have become * pretty good at detecting this early. In any case, we have * to abort the connection here, since this is clearly a protocol error */ return APR_EINVAL; } - status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len); - if (status != APR_SUCCESS) { - return status; - } - set_policy_for(stream, stream->rtmp); - stream->request = stream->rtmp; - stream->rtmp = NULL; + status = h2_stream_end_headers(stream, eos, frame_len); + if (status != APR_SUCCESS) goto leave; } break; @@ -473,22 +522,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_ if (status == APR_SUCCESS && eos) { status = transit(stream, on_event(stream, H2_SEV_CLOSED_R)); } - return status; -} - -apr_status_t h2_stream_flush_input(h2_stream *stream) -{ - apr_status_t status = APR_SUCCESS; - - if (stream->in_buffer && !APR_BRIGADE_EMPTY(stream->in_buffer)) { - setup_input(stream); - status = h2_beam_send(stream->input, stream->in_buffer, APR_BLOCK_READ); - stream->in_last_write = apr_time_now(); - } - if (stream->input_eof - && stream->input && !h2_beam_is_closed(stream->input)) { - status = h2_beam_close(stream->input); - } +leave: return status; } @@ -498,41 +532,59 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags, h2_session *session = stream->session; apr_status_t status = APR_SUCCESS; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); stream->in_data_frames++; if (len > 0) { - if (APLOGctrace3(session->c)) { + if (APLOGctrace3(session->c1)) { const char *load = apr_pstrndup(stream->pool, (const char *)data, len); - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c1, H2_STRM_MSG(stream, "recv DATA, len=%d: -->%s<--"), (int)len, load); } else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c1, H2_STRM_MSG(stream, "recv DATA, len=%d"), (int)len); } stream->in_data_octets += len; - if (!stream->in_buffer) { - stream->in_buffer = apr_brigade_create(stream->pool, - session->c->bucket_alloc); - } - apr_brigade_write(stream->in_buffer, NULL, NULL, (const char *)data, len); + input_append_data(stream, (const char*)data, len); + input_flush(stream); h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING); } return status; } -static void prep_output(h2_stream *stream) { - conn_rec *c = stream->session->c; - if (!stream->out_buffer) { - stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc); +#ifdef AP_DEBUG +static apr_status_t stream_pool_destroy(void *data) +{ + h2_stream *stream = data; + switch (stream->magic) { + case H2_STRM_MAGIC_OK: + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, stream->session->c1, + H2_STRM_MSG(stream, "was not destroyed explicitly")); + AP_DEBUG_ASSERT(0); + break; + case H2_STRM_MAGIC_SDEL: + /* stream has been explicitly destroyed, as it should */ + H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_PDEL); + break; + case H2_STRM_MAGIC_PDEL: + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, stream->session->c1, + H2_STRM_MSG(stream, "already pool destroyed")); + AP_DEBUG_ASSERT(0); + break; + default: + AP_DEBUG_ASSERT(0); } + return APR_SUCCESS; } +#endif h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session, h2_stream_monitor *monitor, int initiated_on) { h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream)); - + + H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_OK); stream->id = id; stream->initiated_on = initiated_on; stream->created = apr_time_now(); @@ -540,15 +592,21 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session, stream->pool = pool; stream->session = session; stream->monitor = monitor; - stream->max_mem = session->max_stream_mem; - -#ifdef H2_NG2_LOCAL_WIN_SIZE - stream->in_window_size = - nghttp2_session_get_stream_local_window_size( - stream->session->ngh2, stream->id); +#ifdef AP_DEBUG + if (id) { /* stream 0 has special lifetime */ + apr_pool_cleanup_register(pool, stream, stream_pool_destroy, + apr_pool_cleanup_null); + } #endif - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, +#ifdef H2_NG2_LOCAL_WIN_SIZE + if (id) { + stream->in_window_size = + nghttp2_session_get_stream_local_window_size( + stream->session->ngh2, stream->id); + } +#endif + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, H2_STRM_LOG(APLOGNO(03082), stream, "created")); on_state_enter(stream); return stream; @@ -556,59 +614,35 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session, void h2_stream_cleanup(h2_stream *stream) { - apr_status_t status; - + /* Stream is done on c1. There might still be processing on a c2 + * going on. The input/output beams get aborted and the stream's + * end of the in/out notifications get closed. + */ ap_assert(stream); + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); if (stream->out_buffer) { - /* remove any left over output buckets that may still have - * references into request pools */ apr_brigade_cleanup(stream->out_buffer); } - if (stream->input) { - h2_beam_abort(stream->input); - status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ); - if (status == APR_EAGAIN) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, - H2_STRM_MSG(stream, "wait on input drain")); - status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, - H2_STRM_MSG(stream, "input drain returned")); - } - } } void h2_stream_destroy(h2_stream *stream) { ap_assert(stream); - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c, + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c1, H2_STRM_MSG(stream, "destroy")); + H2_STRM_ASSIGN_MAGIC(stream, H2_STRM_MAGIC_SDEL); apr_pool_destroy(stream->pool); } -apr_status_t h2_stream_prep_processing(h2_stream *stream) -{ - if (stream->request) { - const h2_request *r = stream->request; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, - H2_STRM_MSG(stream, "schedule %s %s://%s%s chunked=%d"), - r->method, r->scheme, r->authority, r->path, r->chunked); - setup_input(stream); - stream->scheduled = 1; - return APR_SUCCESS; - } - return APR_EINVAL; -} - void h2_stream_rst(h2_stream *stream, int error_code) { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); stream->rst_error = error_code; - if (stream->input) { - h2_beam_abort(stream->input); - } - if (stream->output) { - h2_beam_leave(stream->output); + if (stream->c2) { + h2_c2_abort(stream->c2, stream->session->c1); } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, H2_STRM_MSG(stream, "reset, error=%d"), error_code); h2_stream_dispatch(stream, H2_SEV_CANCELLED); } @@ -619,6 +653,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream, h2_request *req; apr_status_t status; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); ap_assert(stream->request == NULL); ap_assert(stream->rtmp == NULL); if (stream->rst_error) { @@ -640,6 +675,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream, void h2_stream_set_request(h2_stream *stream, const h2_request *r) { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); ap_assert(stream->request == NULL); ap_assert(stream->rtmp == NULL); stream->rtmp = h2_request_clone(stream->pool, r); @@ -647,43 +683,46 @@ void h2_stream_set_request(h2_stream *stream, const h2_request *r) static void set_error_response(h2_stream *stream, int http_status) { - if (!h2_stream_is_ready(stream)) { - conn_rec *c = stream->session->c; - apr_bucket *b; - h2_headers *response; - - response = h2_headers_die(http_status, stream->request, stream->pool); - prep_output(stream); - b = apr_bucket_eos_create(c->bucket_alloc); - APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b); - b = h2_bucket_headers_create(c->bucket_alloc, response); - APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b); + if (!h2_stream_is_ready(stream) && stream->rtmp) { + stream->rtmp->http_status = http_status; } } static apr_status_t add_trailer(h2_stream *stream, const char *name, size_t nlen, - const char *value, size_t vlen) + const char *value, size_t vlen, + size_t max_field_len, int *pwas_added) { - conn_rec *c = stream->session->c; + conn_rec *c = stream->session->c1; char *hname, *hvalue; + const char *existing; + *pwas_added = 0; if (nlen == 0 || name[0] == ':') { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c, H2_STRM_LOG(APLOGNO(03060), stream, "pseudo header in trailer")); return APR_EINVAL; } - if (h2_req_ignore_trailer(name, nlen)) { + if (h2_ignore_req_trailer(name, nlen)) { return APR_SUCCESS; } - if (!stream->trailers) { - stream->trailers = apr_table_make(stream->pool, 5); + if (!stream->trailers_in) { + stream->trailers_in = apr_table_make(stream->pool, 5); } hname = apr_pstrndup(stream->pool, name, nlen); - hvalue = apr_pstrndup(stream->pool, value, vlen); h2_util_camel_case_header(hname, nlen); - apr_table_mergen(stream->trailers, hname, hvalue); + existing = apr_table_get(stream->trailers_in, hname); + if (max_field_len + && ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len)) { + /* "key: (oldval, )?nval" is too long */ + return APR_EINVAL; + } + if (!existing) *pwas_added = 1; + hvalue = apr_pstrndup(stream->pool, value, vlen); + apr_table_mergen(stream->trailers_in, hname, hvalue); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue); return APR_SUCCESS; } @@ -693,274 +732,487 @@ apr_status_t h2_stream_add_header(h2_stream *stream, const char *value, size_t vlen) { h2_session *session = stream->session; - int error = 0; - apr_status_t status; + int error = 0, was_added = 0; + apr_status_t status = APR_SUCCESS; - if (stream->has_response) { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + if (stream->response) { return APR_EINVAL; } - ++stream->request_headers_added; + if (name[0] == ':') { - if ((vlen) > session->s->limit_req_line) { + if (vlen > APR_INT32_MAX || (int)vlen > session->s->limit_req_line) { /* pseudo header: approximation of request line size check */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, - H2_STRM_MSG(stream, "pseudo %s too long"), name); + if (!h2_stream_is_ready(stream)) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1, + H2_STRM_LOG(APLOGNO(10178), stream, + "Request pseudo header exceeds " + "LimitRequestFieldSize: %s"), name); + } error = HTTP_REQUEST_URI_TOO_LARGE; + goto cleanup; } } - else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) { + + if (session->s->limit_req_fields > 0 + && stream->request_headers_added > session->s->limit_req_fields) { + /* already over limit, count this attempt, but do not take it in */ + ++stream->request_headers_added; + } + else if (H2_SS_IDLE == stream->state) { + if (!stream->rtmp) { + stream->rtmp = h2_request_create(stream->id, stream->pool, + NULL, NULL, NULL, NULL, NULL); + } + status = h2_request_add_header(stream->rtmp, stream->pool, + name, nlen, value, vlen, + session->s->limit_req_fieldsize, &was_added); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c1, + H2_STRM_MSG(stream, "add_header: '%.*s: %.*s"), + (int)nlen, name, (int)vlen, value); + if (was_added) ++stream->request_headers_added; + } + else if (H2_SS_OPEN == stream->state) { + status = add_trailer(stream, name, nlen, value, vlen, + session->s->limit_req_fieldsize, &was_added); + if (was_added) ++stream->request_headers_added; + } + else { + status = APR_EINVAL; + goto cleanup; + } + + if (APR_EINVAL == status) { /* header too long */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, - H2_STRM_MSG(stream, "header %s too long"), name); + if (!h2_stream_is_ready(stream)) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1, + H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds " + "LimitRequestFieldSize: %.*s"), + (int)H2MIN(nlen, 80), name); + } error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE; + goto cleanup; } - if (stream->request_headers_added > session->s->limit_req_fields + 4) { - /* too many header lines, include 4 pseudo headers */ - if (stream->request_headers_added - > session->s->limit_req_fields + 4 + 100) { - /* yeah, right */ + if (session->s->limit_req_fields > 0 + && stream->request_headers_added > session->s->limit_req_fields) { + /* too many header lines */ + if (stream->request_headers_added > session->s->limit_req_fields + 100) { + /* yeah, right, this request is way over the limit, say goodbye */ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM); return APR_ECONNRESET; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, - H2_STRM_MSG(stream, "too many header lines")); + if (!h2_stream_is_ready(stream)) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1, + H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers " + "exceeds LimitRequestFields")); + } error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE; + goto cleanup; } +cleanup: if (error) { + ++stream->request_headers_failed; set_error_response(stream, error); return APR_EINVAL; } - else if (H2_SS_IDLE == stream->state) { - if (!stream->rtmp) { - stream->rtmp = h2_req_create(stream->id, stream->pool, - NULL, NULL, NULL, NULL, NULL, 0); - } - status = h2_request_add_header(stream->rtmp, stream->pool, - name, nlen, value, vlen); + else if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1, + H2_STRM_MSG(stream, "header %s not accepted"), name); + h2_stream_dispatch(stream, H2_SEV_CANCELLED); } - else if (H2_SS_OPEN == stream->state) { - status = add_trailer(stream, name, nlen, value, vlen); + return status; +} + +typedef struct { + apr_size_t maxlen; + const char *failed_key; +} val_len_check_ctx; + +static int table_check_val_len(void *baton, const char *key, const char *value) +{ + val_len_check_ctx *ctx = baton; + + if (strlen(value) <= ctx->maxlen) return 1; + ctx->failed_key = key; + return 0; +} + +apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes) +{ + apr_status_t status; + val_len_check_ctx ctx; + int is_http_or_https; + h2_request *req = stream->rtmp; + + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + status = h2_request_end_headers(req, stream->pool, raw_bytes); + if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) { + goto cleanup; + } + + /* keep on returning APR_SUCCESS for error responses, so that we + * send it and do not RST the stream. + */ + set_policy_for(stream, req); + + ctx.maxlen = stream->session->s->limit_req_fieldsize; + ctx.failed_key = NULL; + apr_table_do(table_check_val_len, &ctx, req->headers, NULL); + if (ctx.failed_key) { + if (!h2_stream_is_ready(stream)) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1, + H2_STRM_LOG(APLOGNO(10230), stream,"Request header exceeds " + "LimitRequestFieldSize: %.*s"), + (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key); + } + set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); + goto cleanup; + } + + /* http(s) scheme. rfc7540, ch. 8.1.2.3: + * This [:path] pseudo-header field MUST NOT be empty for "http" or "https" + * URIs; "http" or "https" URIs that do not contain a path component + * MUST include a value of '/'. The exception to this rule is an + * OPTIONS request for an "http" or "https" URI that does not include + * a path component; these MUST include a ":path" pseudo-header field + * with a value of '*' + * + * All HTTP/2 requests MUST include exactly one valid value for the + * ":method", ":scheme", and ":path" pseudo-header fields, unless it is + * a CONNECT request. + */ + is_http_or_https = (!req->scheme + || !(ap_cstr_casecmpn(req->scheme, "http", 4) != 0 + || (req->scheme[4] != '\0' + && (apr_tolower(req->scheme[4]) != 's' + || req->scheme[5] != '\0')))); + + /* CONNECT. rfc7540, ch. 8.3: + * In HTTP/2, the CONNECT method is used to establish a tunnel over a + * single HTTP/2 stream to a remote host for similar purposes. The HTTP + * header field mapping works as defined in Section 8.1.2.3 ("Request + * Pseudo-Header Fields"), with a few differences. Specifically: + * o The ":method" pseudo-header field is set to "CONNECT". + * o The ":scheme" and ":path" pseudo-header fields MUST be omitted. + * o The ":authority" pseudo-header field contains the host and port to + * connect to (equivalent to the authority-form of the request-target + * of CONNECT requests (see [RFC7230], Section 5.3)). + */ + if (!ap_cstr_casecmp(req->method, "CONNECT")) { + if (req->protocol) { + if (!strcmp("websocket", req->protocol)) { + if (!req->scheme || !req->path) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1, + H2_STRM_LOG(APLOGNO(10457), stream, "Request to websocket CONNECT " + "without :scheme or :path, sending 400 answer")); + set_error_response(stream, HTTP_BAD_REQUEST); + goto cleanup; + } + } + else { + /* do not know that protocol */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c1, APLOGNO(10460) + "':protocol: %s' header present in %s request", + req->protocol, req->method); + set_error_response(stream, HTTP_NOT_IMPLEMENTED); + goto cleanup; + } + } + else if (req->scheme || req->path) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1, + H2_STRM_LOG(APLOGNO(10384), stream, "Request to CONNECT " + "with :scheme or :path specified, sending 400 answer")); + set_error_response(stream, HTTP_BAD_REQUEST); + goto cleanup; + } } - else { - status = APR_EINVAL; + else if (is_http_or_https) { + if (!req->path) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1, + H2_STRM_LOG(APLOGNO(10385), stream, "Request for http(s) " + "resource without :path, sending 400 answer")); + set_error_response(stream, HTTP_BAD_REQUEST); + goto cleanup; + } + if (!req->scheme) { + req->scheme = ap_ssl_conn_is_ssl(stream->session->c1)? "https" : "http"; + } } - - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, - H2_STRM_MSG(stream, "header %s not accepted"), name); - h2_stream_dispatch(stream, H2_SEV_CANCELLED); + + if (req->scheme && (req->path && req->path[0] != '/')) { + /* We still have a scheme, which means we need to pass an absolute URI into + * our HTTP protocol handling and the missing '/' at the start will prevent + * us from doing so (as it then confuses path and authority). */ + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1, + H2_STRM_LOG(APLOGNO(10379), stream, "Request :scheme '%s' and " + "path '%s' do not allow creating an absolute URL. Failing " + "request with 400."), req->scheme, req->path); + set_error_response(stream, HTTP_BAD_REQUEST); + goto cleanup; + } + +cleanup: + if (APR_SUCCESS == status) { + stream->request = req; + stream->rtmp = NULL; + + if (APLOGctrace4(stream->session->c1)) { + int i; + const apr_array_header_t *t_h = apr_table_elts(req->headers); + const apr_table_entry_t *t_elt = (apr_table_entry_t *)t_h->elts; + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, stream->session->c1, + H2_STRM_MSG(stream,"headers received from client:")); + for (i = 0; i < t_h->nelts; i++, t_elt++) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, stream->session->c1, + H2_STRM_MSG(stream, " %s: %s"), + ap_escape_logitem(stream->pool, t_elt->key), + ap_escape_logitem(stream->pool, t_elt->val)); + } + } } return status; } -static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb) +static apr_bucket *get_first_response_bucket(apr_bucket_brigade *bb) { if (bb) { apr_bucket *b = APR_BRIGADE_FIRST(bb); while (b != APR_BRIGADE_SENTINEL(bb)) { +#if AP_HAS_RESPONSE_BUCKETS + if (AP_BUCKET_IS_RESPONSE(b)) { + return b; + } +#else if (H2_BUCKET_IS_HEADERS(b)) { return b; } +#endif b = APR_BUCKET_NEXT(b); } } return NULL; } -static apr_status_t add_buffered_data(h2_stream *stream, apr_off_t requested, - apr_off_t *plen, int *peos, int *is_all, - h2_headers **pheaders) +static void stream_do_error_bucket(h2_stream *stream, apr_bucket *b) { - apr_bucket *b, *e; - - *peos = 0; - *plen = 0; - *is_all = 0; - if (pheaders) { - *pheaders = NULL; + int err = ((ap_bucket_error *)(b->data))->status; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, + H2_STRM_MSG(stream, "error bucket received, err=%d"), err); + if (err >= 500) { + err = NGHTTP2_INTERNAL_ERROR; } - - H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "add_buffered_data"); - b = APR_BRIGADE_FIRST(stream->out_buffer); - while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { - e = APR_BUCKET_NEXT(b); - if (APR_BUCKET_IS_METADATA(b)) { - if (APR_BUCKET_IS_FLUSH(b)) { - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - } - else if (APR_BUCKET_IS_EOS(b)) { - *peos = 1; - return APR_SUCCESS; - } - else if (H2_BUCKET_IS_HEADERS(b)) { - if (*plen > 0) { - /* data before the response, can only return up to here */ - return APR_SUCCESS; - } - else if (pheaders) { - *pheaders = h2_bucket_headers_get(b); - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, - H2_STRM_MSG(stream, "prep, -> response %d"), - (*pheaders)->status); - return APR_SUCCESS; - } - else { - return APR_EAGAIN; - } - } - } - else if (b->length == 0) { - APR_BUCKET_REMOVE(b); - apr_bucket_destroy(b); - } - else { - ap_assert(b->length != (apr_size_t)-1); - *plen += b->length; - if (*plen >= requested) { - *plen = requested; - return APR_SUCCESS; - } - } - b = e; + else if (err >= 400) { + err = NGHTTP2_STREAM_CLOSED; } - *is_all = 1; - return APR_SUCCESS; + else { + err = NGHTTP2_PROTOCOL_ERROR; + } + h2_stream_rst(stream, err); } -apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, - int *peos, h2_headers **pheaders) +static apr_status_t buffer_output_receive(h2_stream *stream) { - apr_status_t status = APR_SUCCESS; - apr_off_t requested, missing, max_chunk = H2_DATA_CHUNK_SIZE; - conn_rec *c; - int complete; + apr_status_t rv = APR_EAGAIN; + apr_off_t buf_len; + conn_rec *c1 = stream->session->c1; + apr_bucket *b, *e; - ap_assert(stream); - + if (!stream->output) { + goto cleanup; + } if (stream->rst_error) { - *plen = 0; - *peos = 1; - return APR_ECONNRESET; + rv = APR_ECONNRESET; + goto cleanup; } - - c = stream->session->c; - prep_output(stream); - /* determine how much we'd like to send. We cannot send more than - * is requested. But we can reduce the size in case the master - * connection operates in smaller chunks. (TSL warmup) */ - if (stream->session->io.write_size > 0) { - max_chunk = stream->session->io.write_size - 9; /* header bits */ + if (!stream->out_buffer) { + stream->out_buffer = apr_brigade_create(stream->pool, c1->bucket_alloc); + buf_len = 0; } - requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk; - - /* count the buffered data until eos or a headers bucket */ - status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders); - - if (status == APR_EAGAIN) { - /* TODO: ugly, someone needs to retrieve the response first */ - h2_mplx_keep_active(stream->session->mplx, stream); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - H2_STRM_MSG(stream, "prep, response eagain")); - return status; + else { + /* if the brigade contains a file bucket, its normal report length + * might be megabytes, but the memory used is tiny. For buffering, + * we are only interested in the memory footprint. */ + buf_len = h2_brigade_mem_size(stream->out_buffer); } - else if (status != APR_SUCCESS) { - return status; + + if (buf_len > APR_INT32_MAX + || (apr_size_t)buf_len >= stream->session->max_stream_mem) { + /* we have buffered enough. No need to read more. + * However, we have now output pending for which we may not + * receive another poll event. We need to make sure that this + * stream is not suspended so we keep on processing output. + */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1, + H2_STRM_MSG(stream, "out_buffer, already has %ld length"), + (long)buf_len); + rv = APR_SUCCESS; + goto cleanup; } - - if (pheaders && *pheaders) { - return APR_SUCCESS; + + if (stream->output_eos) { + rv = APR_BRIGADE_EMPTY(stream->out_buffer)? APR_EOF : APR_SUCCESS; } - - /* If there we do not have enough buffered data to satisfy the requested - * length *and* we counted the _complete_ buffer (and did not stop in the middle - * because of meta data there), lets see if we can read more from the - * output beam */ - missing = H2MIN(requested, stream->max_mem) - *plen; - if (complete && !*peos && missing > 0) { - apr_status_t rv = APR_EOF; - - if (stream->output) { - H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre"); - rv = h2_beam_receive(stream->output, stream->out_buffer, - APR_NONBLOCK_READ, stream->max_mem - *plen); - H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post"); - } - - if (rv == APR_SUCCESS) { - /* count the buffer again, now that we have read output */ - status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders); - } - else if (APR_STATUS_IS_EOF(rv)) { - apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos); - *peos = 1; - } - else if (APR_STATUS_IS_EAGAIN(rv)) { - /* we set this is the status of this call only if there - * is no buffered data, see check below */ - } - else { - /* real error reading. Give this back directly, even though - * we may have something buffered. */ - status = rv; + else { + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre"); + rv = h2_beam_receive(stream->output, stream->session->c1, stream->out_buffer, + APR_NONBLOCK_READ, stream->session->max_stream_mem - buf_len); + if (APR_SUCCESS != rv) { + if (APR_EAGAIN != rv) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1, + H2_STRM_MSG(stream, "out_buffer, receive unsuccessful")); + } } } - - if (status == APR_SUCCESS) { - if (*peos || *plen) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - H2_STRM_MSG(stream, "prepare, len=%ld eos=%d"), - (long)*plen, *peos); - } - else { - status = (stream->output && h2_beam_is_closed(stream->output))? APR_EOF : APR_EAGAIN; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - H2_STRM_MSG(stream, "prepare, no data")); + + /* get rid of buckets we have no need for */ + if (!APR_BRIGADE_EMPTY(stream->out_buffer)) { + b = APR_BRIGADE_FIRST(stream->out_buffer); + while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { + e = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_FLUSH(b)) { /* we flush any c1 data already */ + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + else if (APR_BUCKET_IS_EOS(b)) { + stream->output_eos = 1; + } + else if (AP_BUCKET_IS_ERROR(b)) { + stream_do_error_bucket(stream, b); + break; + } + } + else if (b->length == 0) { /* zero length data */ + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + b = e; } } - return status; + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "out_buffer, after receive"); + +cleanup: + return rv; } -static int is_not_headers(apr_bucket *b) +static int bucket_pass_to_c1(apr_bucket *b) { - return !H2_BUCKET_IS_HEADERS(b); +#if AP_HAS_RESPONSE_BUCKETS + return !AP_BUCKET_IS_RESPONSE(b) + && !AP_BUCKET_IS_HEADERS(b) + && !APR_BUCKET_IS_EOS(b); +#else + return !H2_BUCKET_IS_HEADERS(b) && !APR_BUCKET_IS_EOS(b); +#endif } apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb, apr_off_t *plen, int *peos) { - conn_rec *c = stream->session->c; - apr_status_t status = APR_SUCCESS; + apr_status_t rv = APR_SUCCESS; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); if (stream->rst_error) { return APR_ECONNRESET; } - status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers); - if (status == APR_SUCCESS && !*peos && !*plen) { - status = APR_EAGAIN; + rv = h2_append_brigade(bb, stream->out_buffer, plen, peos, bucket_pass_to_c1); + if (APR_SUCCESS == rv && !*peos && !*plen) { + rv = APR_EAGAIN; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, - H2_STRM_MSG(stream, "read_to, len=%ld eos=%d"), - (long)*plen, *peos); - return status; + return rv; } +static apr_status_t stream_do_trailers(h2_stream *stream) +{ + conn_rec *c1 = stream->session->c1; + int ngrv; + h2_ngheader *nh = NULL; + apr_bucket *b, *e; +#if AP_HAS_RESPONSE_BUCKETS + ap_bucket_headers *headers = NULL; +#else + h2_headers *headers = NULL; +#endif + apr_status_t rv; + + ap_assert(stream->response); + ap_assert(stream->out_buffer); + + b = APR_BRIGADE_FIRST(stream->out_buffer); + while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { + e = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { +#if AP_HAS_RESPONSE_BUCKETS + if (AP_BUCKET_IS_HEADERS(b)) { + headers = b->data; +#else /* AP_HAS_RESPONSE_BUCKETS */ + if (H2_BUCKET_IS_HEADERS(b)) { + headers = h2_bucket_headers_get(b); +#endif /* else AP_HAS_RESPONSE_BUCKETS */ + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1, + H2_STRM_MSG(stream, "process trailers")); + break; + } + else if (APR_BUCKET_IS_EOS(b)) { + break; + } + } + else { + break; + } + b = e; + } + if (!headers) { + rv = APR_EAGAIN; + goto cleanup; + } + + rv = h2_res_create_ngtrailer(&nh, stream->pool, headers); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1, + H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"), + (int)nh->nvlen); + if (APR_SUCCESS != rv) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1, + H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers")); + h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + goto cleanup; + } + + ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen); + if (nghttp2_is_fatal(ngrv)) { + rv = APR_EGENERAL; + h2_session_dispatch_event(stream->session, + H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv)); + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1, + APLOGNO(02940) "submit_response: %s", + nghttp2_strerror(rv)); + } + stream->sent_trailers = 1; + +cleanup: + return rv; +} + +#if AP_HAS_RESPONSE_BUCKETS +apr_status_t h2_stream_submit_pushes(h2_stream *stream, ap_bucket_response *response) +#else apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response) +#endif { apr_status_t status = APR_SUCCESS; apr_array_header_t *pushes; int i; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); pushes = h2_push_collect_update(stream, stream->request, response); if (pushes && !apr_is_empty_array(pushes)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1, H2_STRM_MSG(stream, "found %d push candidates"), pushes->nelts); for (i = 0; i < pushes->nelts; ++i) { @@ -977,17 +1229,24 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response) apr_table_t *h2_stream_get_trailers(h2_stream *stream) { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); return NULL; } -const h2_priority *h2_stream_get_priority(h2_stream *stream, +#if AP_HAS_RESPONSE_BUCKETS +const h2_priority *h2_stream_get_priority(h2_stream *stream, + ap_bucket_response *response) +#else +const h2_priority *h2_stream_get_priority(h2_stream *stream, h2_headers *response) +#endif { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); if (response && stream->initiated_on) { const char *ctype = apr_table_get(response->headers, "content-type"); if (ctype) { /* FIXME: Not good enough, config needs to come from request->server */ - return h2_config_get_priority(stream->session->config, ctype); + return h2_cconfig_get_priority(stream->session->c1, ctype); } } return NULL; @@ -995,21 +1254,47 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream, int h2_stream_is_ready(h2_stream *stream) { - if (stream->has_response) { + /* Have we sent a response or do we have the response in our buffer? */ + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + if (stream->response) { return 1; } - else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) { + else if (stream->out_buffer && get_first_response_bucket(stream->out_buffer)) { return 1; } return 0; } -int h2_stream_was_closed(const h2_stream *stream) +int h2_stream_wants_send_data(h2_stream *stream) { - switch (stream->state) { + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + return h2_stream_is_ready(stream) && + ((stream->out_buffer && !APR_BRIGADE_EMPTY(stream->out_buffer)) || + (stream->output && !h2_beam_empty(stream->output))); +} + +int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state) +{ + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + return stream->state == state; +} + +int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state) +{ + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + switch (state) { + case H2_SS_IDLE: + return 1; /* by definition */ + case H2_SS_RSVD_R: /*fall through*/ + case H2_SS_RSVD_L: /*fall through*/ + case H2_SS_OPEN: + return stream->state == state || stream->state >= H2_SS_OPEN; + case H2_SS_CLOSED_R: /*fall through*/ + case H2_SS_CLOSED_L: /*fall through*/ case H2_SS_CLOSED: + return stream->state == state || stream->state >= H2_SS_CLOSED; case H2_SS_CLEANUP: - return 1; + return stream->state == state; default: return 0; } @@ -1019,6 +1304,7 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount) { h2_session *session = stream->session; + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); if (amount > 0) { apr_off_t consumed = amount; @@ -1066,13 +1352,477 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount) nghttp2_session_set_local_window_size(session->ngh2, NGHTTP2_FLAG_NONE, stream->id, win); } - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - "h2_stream(%ld-%d): consumed %ld bytes, window now %d/%d", - session->id, stream->id, (long)amount, - cur_size, stream->in_window_size); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1, + H2_STRM_MSG(stream, "consumed %ld bytes, window now %d/%d"), + (long)amount, cur_size, stream->in_window_size); } -#endif +#endif /* #ifdef H2_NG2_LOCAL_WIN_SIZE */ } return APR_SUCCESS; } +static apr_off_t output_data_buffered(h2_stream *stream, int *peos, int *pheader_blocked) +{ + /* How much data do we have in our buffers that we can write? */ + apr_off_t buf_len = 0; + apr_bucket *b; + + *peos = *pheader_blocked = 0; + if (stream->out_buffer) { + b = APR_BRIGADE_FIRST(stream->out_buffer); + while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + *peos = 1; + break; + } +#if AP_HAS_RESPONSE_BUCKETS + else if (AP_BUCKET_IS_RESPONSE(b)) { + break; + } + else if (AP_BUCKET_IS_HEADERS(b)) { + *pheader_blocked = 1; + break; + } +#else + else if (H2_BUCKET_IS_HEADERS(b)) { + *pheader_blocked = 1; + break; + } +#endif + } + else { + buf_len += b->length; + } + b = APR_BUCKET_NEXT(b); + } + } + return buf_len; +} + +static ssize_t stream_data_cb(nghttp2_session *ng2s, + int32_t stream_id, + uint8_t *buf, + size_t length, + uint32_t *data_flags, + nghttp2_data_source *source, + void *puser) +{ + h2_session *session = (h2_session *)puser; + conn_rec *c1 = session->c1; + apr_off_t buf_len; + int eos, header_blocked; + apr_status_t rv; + h2_stream *stream; + + /* nghttp2 wants to send more DATA for the stream. + * we should have submitted the final response at this time + * after receiving output via stream_do_responses() */ + ap_assert(session); + (void)ng2s; + (void)buf; + (void)source; + stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id); + + if (!stream) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c1, + APLOGNO(02937) + H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found")); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + if (!stream->output || !stream->response || !stream->out_buffer) { + return NGHTTP2_ERR_DEFERRED; + } + if (stream->rst_error) { + return NGHTTP2_ERR_DEFERRED; + } + if (h2_c1_io_needs_flush(&session->io)) { + rv = h2_c1_io_pass(&session->io); + if (APR_STATUS_IS_EAGAIN(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1, + H2_SSSN_STRM_MSG(session, stream_id, "suspending on c1 out needs flush")); + h2_stream_dispatch(stream, H2_SEV_OUT_C1_BLOCK); + return NGHTTP2_ERR_DEFERRED; + } + else if (rv) { + h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, rv, NULL); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + } + + /* determine how much we'd like to send. We cannot send more than + * is requested. But we can reduce the size in case the master + * connection operates in smaller chunks. (TSL warmup) */ + if (stream->session->io.write_size > 0) { + apr_size_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN; + if (length > chunk_len) { + length = chunk_len; + } + } + /* We allow configurable max DATA frame length. */ + if (stream->session->max_data_frame_len > 0 + && length > stream->session->max_data_frame_len) { + length = stream->session->max_data_frame_len; + } + + /* How much data do we have in our buffers that we can write? + * if not enough, receive more. */ + buf_len = output_data_buffered(stream, &eos, &header_blocked); + if (buf_len < (apr_off_t)length && !eos + && !header_blocked && !stream->rst_error) { + /* read more? */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1, + H2_SSSN_STRM_MSG(session, stream_id, + "need more (read len=%ld, %ld in buffer)"), + (long)length, (long)buf_len); + rv = buffer_output_receive(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1, + H2_SSSN_STRM_MSG(session, stream_id, + "buffer_output_received")); + if (APR_STATUS_IS_EAGAIN(rv)) { + /* currently, no more is available */ + } + else if (APR_SUCCESS == rv) { + /* got some, re-assess */ + buf_len = output_data_buffered(stream, &eos, &header_blocked); + } + else if (APR_EOF == rv) { + if (!stream->output_eos) { + /* Seeing APR_EOF without an EOS bucket received before indicates + * that stream output is incomplete. Commonly, we expect to see + * an ERROR bucket to have been generated. But faulty handlers + * may not have generated one. + * We need to RST the stream bc otherwise the client thinks + * it is all fine. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1, + H2_SSSN_STRM_MSG(session, stream_id, "rst stream")); + h2_stream_rst(stream, H2_ERR_STREAM_CLOSED); + return NGHTTP2_ERR_DEFERRED; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1, + H2_SSSN_STRM_MSG(session, stream_id, + "eof on receive (read len=%ld, %ld in buffer)"), + (long)length, (long)buf_len); + eos = 1; + rv = APR_SUCCESS; + } + else if (APR_ECONNRESET == rv || APR_ECONNABORTED == rv) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1, + H2_STRM_LOG(APLOGNO(10471), stream, "data_cb, reading data")); + h2_stream_rst(stream, H2_ERR_STREAM_CLOSED); + return NGHTTP2_ERR_DEFERRED; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1, + H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data")); + h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); + return NGHTTP2_ERR_DEFERRED; + } + } + + if (stream->rst_error) { + return NGHTTP2_ERR_DEFERRED; + } + + if (buf_len == 0 && header_blocked) { + rv = stream_do_trailers(stream); + if (APR_SUCCESS != rv && !APR_STATUS_IS_EAGAIN(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1, + H2_STRM_LOG(APLOGNO(10300), stream, + "data_cb, error processing trailers")); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + length = 0; + eos = 0; + } + else if (buf_len > (apr_off_t)length) { + eos = 0; /* Any EOS we have in the buffer does not apply yet */ + } + else { + length = (size_t)buf_len; + } + + if (length) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1, + H2_STRM_MSG(stream, "data_cb, sending len=%ld, eos=%d"), + (long)length, eos); + *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; + } + else if (!eos && !stream->sent_trailers) { + /* We have not reached the end of DATA yet, DEFER sending */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1, + H2_STRM_LOG(APLOGNO(03071), stream, "data_cb, suspending")); + return NGHTTP2_ERR_DEFERRED; + } + + if (eos) { + *data_flags |= NGHTTP2_DATA_FLAG_EOF; + } + return length; +} + +static apr_status_t stream_do_response(h2_stream *stream) +{ + conn_rec *c1 = stream->session->c1; + apr_status_t rv = APR_EAGAIN; + int ngrv, is_empty = 0; + h2_ngheader *nh = NULL; + apr_bucket *b, *e; +#if AP_HAS_RESPONSE_BUCKETS + ap_bucket_response *resp = NULL; +#else + h2_headers *resp = NULL; +#endif + nghttp2_data_provider provider, *pprovider = NULL; + + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + ap_assert(!stream->response); + ap_assert(stream->out_buffer); + + b = APR_BRIGADE_FIRST(stream->out_buffer); + while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { + e = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { +#if AP_HAS_RESPONSE_BUCKETS + if (AP_BUCKET_IS_RESPONSE(b)) { + resp = b->data; +#else /* AP_HAS_RESPONSE_BUCKETS */ + if (H2_BUCKET_IS_HEADERS(b)) { + resp = h2_bucket_headers_get(b); +#endif /* else AP_HAS_RESPONSE_BUCKETS */ + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1, + H2_STRM_MSG(stream, "process response %d"), + resp->status); + is_empty = (e != APR_BRIGADE_SENTINEL(stream->out_buffer) + && APR_BUCKET_IS_EOS(e)); + break; + } + else if (APR_BUCKET_IS_EOS(b)) { + h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); + rv = APR_EINVAL; + goto cleanup; + } + else if (AP_BUCKET_IS_ERROR(b)) { + stream_do_error_bucket(stream, b); + rv = APR_EINVAL; + goto cleanup; + } + } + else { + /* data buckets before response headers, an error */ + h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); + rv = APR_EINVAL; + goto cleanup; + } + b = e; + } + + if (!resp) { + rv = APR_EAGAIN; + goto cleanup; + } + + if (resp->status < 100) { + h2_stream_rst(stream, resp->status); + goto cleanup; + } + + if (resp->status == HTTP_FORBIDDEN && resp->notes) { + const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden"); + if (cause) { + /* This request triggered a TLS renegotiation that is not allowed + * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. + */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1, + H2_STRM_LOG(APLOGNO(03061), stream, + "renegotiate forbidden, cause: %s"), cause); + h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED); + goto cleanup; + } + } + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1, + H2_STRM_LOG(APLOGNO(03073), stream, + "submit response %d"), resp->status); + + /* If this stream is not a pushed one itself, + * and HTTP/2 server push is enabled here, + * and the response HTTP status is not sth >= 400, + * and the remote side has pushing enabled, + * -> find and perform any pushes on this stream + * *before* we submit the stream response itself. + * This helps clients avoid opening new streams on Link + * resp that get pushed right afterwards. + * + * *) the response code is relevant, as we do not want to + * make pushes on 401 or 403 codes and friends. + * And if we see a 304, we do not push either + * as the client, having this resource in its cache, might + * also have the pushed ones as well. + */ + if (!stream->initiated_on + && !stream->response + && stream->request && stream->request->method + && !strcmp("GET", stream->request->method) + && (resp->status < 400) + && (resp->status != 304) + && h2_session_push_enabled(stream->session)) { + /* PUSH is possible and enabled on server, unless the request + * denies it, submit resources to push */ + const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE); + if (!s || strcmp(s, "0")) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1, + H2_STRM_MSG(stream, "submit pushes, note=%s"), s); + h2_stream_submit_pushes(stream, resp); + } + } + + if (!stream->pref_priority) { + stream->pref_priority = h2_stream_get_priority(stream, resp); + } + h2_session_set_prio(stream->session, stream, stream->pref_priority); + + if (resp->status == 103 + && !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) { + /* suppress sending this to the client, it might have triggered + * pushes and served its purpose nevertheless */ + rv = APR_SUCCESS; + goto cleanup; + } + if (resp->status >= 200) { + stream->response = resp; + } + + if (!is_empty) { + memset(&provider, 0, sizeof(provider)); + provider.source.fd = stream->id; + provider.read_callback = stream_data_cb; + pprovider = &provider; + } + + rv = h2_res_create_ngheader(&nh, stream->pool, resp); + if (APR_SUCCESS != rv) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1, + H2_STRM_LOG(APLOGNO(10025), stream, "invalid response")); + h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + goto cleanup; + } + + ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id, + nh->nv, nh->nvlen, pprovider); + if (nghttp2_is_fatal(ngrv)) { + rv = APR_EGENERAL; + h2_session_dispatch_event(stream->session, + H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv)); + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1, + APLOGNO(10402) "submit_response: %s", + nghttp2_strerror(rv)); + goto cleanup; + } + + if (stream->initiated_on) { + ++stream->session->pushes_submitted; + } + else { + ++stream->session->responses_submitted; + } + +cleanup: + return rv; +} + +static void stream_do_responses(h2_stream *stream) +{ + h2_session *session = stream->session; + conn_rec *c1 = session->c1; + apr_status_t rv; + + ap_assert(!stream->response); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1, + H2_STRM_MSG(stream, "do_response")); + rv = buffer_output_receive(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1, + H2_SSSN_STRM_MSG(session, stream->id, + "buffer_output_received2")); + if (APR_SUCCESS != rv && APR_EAGAIN != rv) { + h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + } + else { + /* process all headers sitting at the buffer head. */ + do { + rv = stream_do_response(stream); + } while (APR_SUCCESS == rv + && !stream->rst_error + && !stream->response); + } +} + +void h2_stream_on_output_change(h2_stream *stream) +{ + conn_rec *c1 = stream->session->c1; + apr_status_t rv = APR_EAGAIN; + + /* stream->pout_recv_write signalled a change. Check what has happend, read + * from it and act on seeing a response/data. */ + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + if (!stream->output) { + /* c2 has not assigned the output beam to the stream (yet). */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1, + H2_STRM_MSG(stream, "read_output, no output beam registered")); + } + else if (h2_stream_is_at_or_past(stream, H2_SS_CLOSED)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1, + H2_STRM_LOG(APLOGNO(10301), stream, "already closed")); + } + else if (h2_stream_is_at(stream, H2_SS_CLOSED_L)) { + /* We have delivered a response to a stream that was not closed + * by the client. This could be a POST with body that we negate + * and we need to RST_STREAM to end if. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1, + H2_STRM_LOG(APLOGNO(10313), stream, "remote close missing")); + h2_stream_rst(stream, H2_ERR_NO_ERROR); + } + else { + /* stream is not closed, a change in output happened. There are + * two modes of operation here: + * 1) the final response has been submitted. nghttp2 is invoking + * stream_data_cb() to progress the stream. This handles DATA, + * trailers, EOS and ERRORs. + * When stream_data_cb() runs out of things to send, it returns + * NGHTTP2_ERR_DEFERRED and nghttp2 *suspends* further processing + * until we tell it to resume. + * 2) We have not seen the *final* response yet. The stream can not + * send any response DATA. The nghttp2 stream_data_cb() is not + * invoked. We need to receive output, expecting not DATA but + * RESPONSEs (intermediate may arrive) and submit those. On + * the final response, nghttp2 will start calling stream_data_cb(). + */ + if (stream->response) { + nghttp2_session_resume_data(stream->session->ngh2, stream->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1, + H2_STRM_MSG(stream, "resumed")); + } + else { + stream_do_responses(stream); + if (!stream->rst_error) { + nghttp2_session_resume_data(stream->session->ngh2, stream->id); + } + } + } +} + +void h2_stream_on_input_change(h2_stream *stream) +{ + H2_STRM_ASSERT_MAGIC(stream, H2_STRM_MAGIC_OK); + ap_assert(stream->input); + h2_beam_report_consumption(stream->input); + if (h2_stream_is_at(stream, H2_SS_CLOSED_L) + && !h2_mplx_c1_stream_is_running(stream->session->mplx, stream)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c1, + H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing")); + h2_stream_rst(stream, H2_ERR_NO_ERROR); + } +} diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h index 7ecc0ad..405978a 100644 --- a/modules/http2/h2_stream.h +++ b/modules/http2/h2_stream.h @@ -17,7 +17,10 @@ #ifndef __mod_h2__h2_stream__ #define __mod_h2__h2_stream__ +#include + #include "h2.h" +#include "h2_headers.h" /** * A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms. @@ -26,8 +29,8 @@ * connection to the client. The h2_session writes to the h2_stream, * adding HEADERS and DATA and finally an EOS. When headers are done, * h2_stream is scheduled for handling, which is expected to produce - * a response h2_headers at least. - * + * h2_headers/RESPONSE buckets. + * * The h2_headers may be followed by more h2_headers (interim responses) and * by DATA frames read from the h2_stream until EOS is reached. Trailers * are send when a last h2_headers is received. This always closes the stream @@ -37,9 +40,7 @@ struct h2_mplx; struct h2_priority; struct h2_request; -struct h2_headers; struct h2_session; -struct h2_task; struct h2_bucket_beam; typedef struct h2_stream h2_stream; @@ -62,7 +63,22 @@ typedef struct h2_stream_monitor { trigger a state change */ } h2_stream_monitor; +#ifdef AP_DEBUG +#define H2_STRM_MAGIC_OK 0x5354524d +#define H2_STRM_MAGIC_SDEL 0x5344454c +#define H2_STRM_MAGIC_PDEL 0x5044454c + +#define H2_STRM_ASSIGN_MAGIC(s,m) ((s)->magic = m) +#define H2_STRM_ASSERT_MAGIC(s,m) ap_assert((s)->magic == m) +#else +#define H2_STRM_ASSIGN_MAGIC(s,m) ((void)0) +#define H2_STRM_ASSERT_MAGIC(s,m) ((void)0) +#endif + struct h2_stream { +#ifdef AP_DEBUG + uint32_t magic; +#endif int id; /* http2 stream identifier */ int initiated_on; /* initiating stream id (PUSH) or 0 */ apr_pool_t *pool; /* the memory pool for this stream */ @@ -73,9 +89,16 @@ struct h2_stream { const struct h2_request *request; /* the request made in this stream */ struct h2_request *rtmp; /* request being assembled */ - apr_table_t *trailers; /* optional incoming trailers */ + apr_table_t *trailers_in; /* optional, incoming trailers */ int request_headers_added; /* number of request headers added */ - + int request_headers_failed; /* number of request headers failed to add */ + +#if AP_HAS_RESPONSE_BUCKETS + ap_bucket_response *response; /* the final, non-interim response or NULL */ +#else + struct h2_headers *response; /* the final, non-interim response or NULL */ +#endif + struct h2_bucket_beam *input; apr_bucket_brigade *in_buffer; int in_window_size; @@ -83,17 +106,16 @@ struct h2_stream { struct h2_bucket_beam *output; apr_bucket_brigade *out_buffer; - apr_size_t max_mem; /* maximum amount of data buffered */ int rst_error; /* stream error for RST_STREAM */ unsigned int aborted : 1; /* was aborted */ unsigned int scheduled : 1; /* stream has been scheduled */ - unsigned int has_response : 1; /* response headers are known */ - unsigned int input_eof : 1; /* no more request data coming */ - unsigned int out_checked : 1; /* output eof was double checked */ + unsigned int input_closed : 1; /* no more request data/trailers coming */ unsigned int push_policy; /* which push policy to use for this request */ - - struct h2_task *task; /* assigned task to fullfill request */ + unsigned int sent_trailers : 1; /* trailers have been submitted */ + unsigned int output_eos : 1; /* output EOS in buffer/sent */ + + conn_rec *c2; /* connection processing stream */ const h2_priority *pref_priority; /* preferred priority for this stream */ apr_off_t out_frames; /* # of frames sent out */ @@ -132,13 +154,9 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, void h2_stream_destroy(h2_stream *stream); /** - * Prepare the stream so that processing may start. - * - * This is the time to allocated resources not needed before. - * - * @param stream the stream to prep + * Perform any late initialization before stream starts processing. */ -apr_status_t h2_stream_prep_processing(h2_stream *stream); +apr_status_t h2_stream_prepare_processing(h2_stream *stream); /* * Set a new monitor for this stream, replacing any existing one. Can @@ -153,6 +171,22 @@ void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor); */ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev); +/** + * Determine if stream is at given state. + * @param stream the stream to check + * @param state the state to look for + * @return != 0 iff stream is at given state. + */ +int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state); + +/** + * Determine if stream is reached given state or is past this state. + * @param stream the stream to check + * @param state the state to look for + * @return != 0 iff stream is at or past given state. + */ +int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state); + /** * Cleanup references into requst processing. * @@ -198,6 +232,10 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream, apr_status_t h2_stream_add_header(h2_stream *stream, const char *name, size_t nlen, const char *value, size_t vlen); + +/* End the construction of request headers */ +apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes); + apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len); apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len); @@ -213,8 +251,6 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags, const uint8_t *data, size_t len); -apr_status_t h2_stream_flush_input(h2_stream *stream); - /** * Reset the stream. Stream write/reads will return errors afterwards. * @@ -224,31 +260,16 @@ apr_status_t h2_stream_flush_input(h2_stream *stream); void h2_stream_rst(h2_stream *stream, int error_code); /** - * Determine if stream was closed already. This is true for - * states H2_SS_CLOSED, H2_SS_CLEANUP. But not true - * for H2_SS_CLOSED_L and H2_SS_CLOSED_R. - * - * @param stream the stream to check on - * @return != 0 iff stream has been closed + * Stream input signals change. Take necessary actions. + * @param stream the stream to read output for */ -int h2_stream_was_closed(const h2_stream *stream); +void h2_stream_on_input_change(h2_stream *stream); /** - * Do a speculative read on the stream output to determine the - * amount of data that can be read. - * - * @param stream the stream to speculatively read from - * @param plen (in-/out) number of bytes requested and on return amount of bytes that - * may be read without blocking - * @param peos (out) != 0 iff end of stream will be reached when reading plen - * bytes (out value). - * @param presponse (out) the response of one became available - * @return APR_SUCCESS if out information was computed successfully. - * APR_EAGAIN if not data is available and end of stream has not been - * reached yet. + * Stream output signals change. Take necessary actions. + * @param stream the stream to read output for */ -apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, - int *peos, h2_headers **presponse); +void h2_stream_on_output_change(h2_stream *stream); /** * Read a maximum number of bytes into the bucket brigade. @@ -277,23 +298,34 @@ apr_table_t *h2_stream_get_trailers(h2_stream *stream); /** * Submit any server push promises on this stream and schedule - * the tasks connection with these. + * the streams for these. * * @param stream the stream for which to submit */ -apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response); +#if AP_HAS_RESPONSE_BUCKETS +apr_status_t h2_stream_submit_pushes(h2_stream *stream, + ap_bucket_response *response); +#else +apr_status_t h2_stream_submit_pushes(h2_stream *stream, + struct h2_headers *response); +#endif /** * Get priority information set for this stream. */ -const struct h2_priority *h2_stream_get_priority(h2_stream *stream, - h2_headers *response); +#if AP_HAS_RESPONSE_BUCKETS +const struct h2_priority *h2_stream_get_priority(h2_stream *stream, + ap_bucket_response *response); +#else +const struct h2_priority *h2_stream_get_priority(h2_stream *stream, + struct h2_headers *response); +#endif /** * Return a textual representation of the stream state as in RFC 7540 * nomenclator, all caps, underscores. */ -const char *h2_stream_state_str(h2_stream *stream); +const char *h2_stream_state_str(const h2_stream *stream); /** * Determine if stream is ready for submitting a response or a RST @@ -301,8 +333,11 @@ const char *h2_stream_state_str(h2_stream *stream); */ int h2_stream_is_ready(h2_stream *stream); +int h2_stream_wants_send_data(h2_stream *stream); + #define H2_STRM_MSG(s, msg) \ - "h2_stream(%ld-%d,%s): "msg, s->session->id, s->id, h2_stream_state_str(s) + "h2_stream(%d-%lu-%d,%s): "msg, s->session->child_num, \ + (unsigned long)s->session->id, s->id, h2_stream_state_str(s) #define H2_STRM_LOG(aplogno, s, msg) aplogno H2_STRM_MSG(s, msg) diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c index 5e73568..3799701 100644 --- a/modules/http2/h2_switch.c +++ b/modules/http2/h2_switch.c @@ -25,14 +25,17 @@ #include #include #include +#include #include #include "h2_private.h" +#include "h2.h" #include "h2_config.h" -#include "h2_ctx.h" -#include "h2_conn.h" -#include "h2_h2.h" +#include "h2_conn_ctx.h" +#include "h2_c1.h" +#include "h2_c2.h" +#include "h2_protocol.h" #include "h2_switch.h" /******************************************************************************* @@ -52,10 +55,9 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, apr_array_header_t *proposals) { int proposed = 0; - int is_tls = h2_h2_is_tls(c); - const char **protos = is_tls? h2_tls_protos : h2_clear_protos; + int is_tls = ap_ssl_conn_is_ssl(c); + const char **protos = is_tls? h2_protocol_ids_tls : h2_protocol_ids_clear; - (void)s; if (!h2_mpm_supported()) { return DECLINED; } @@ -68,7 +70,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, return DECLINED; } - if (!h2_is_acceptable_connection(c, 0)) { + if (!h2_protocol_is_acceptable_c1(c, r, 0)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084) "protocol propose: connection requirements not met"); return DECLINED; @@ -81,7 +83,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, */ const char *p; - if (!h2_allows_h2_upgrade(c)) { + if (!h2_c1_can_upgrade(r)) { return DECLINED; } @@ -102,9 +104,10 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, /* We also allow switching only for requests that have no body. */ p = apr_table_get(r->headers_in, "Content-Length"); - if (p && strcmp(p, "0")) { + if ((p && strcmp(p, "0")) + || (!p && apr_table_get(r->headers_in, "Transfer-Encoding"))) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087) - "upgrade with content-length: %s, declined", p); + "upgrade with body declined"); return DECLINED; } } @@ -124,11 +127,35 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, return proposed? DECLINED : OK; } +#if AP_HAS_RESPONSE_BUCKETS +static void remove_output_filters_below(ap_filter_t *f, ap_filter_type ftype) +{ + ap_filter_t *fnext; + + while (f && f->frec->ftype < ftype) { + fnext = f->next; + ap_remove_output_filter(f); + f = fnext; + } +} + +static void remove_input_filters_below(ap_filter_t *f, ap_filter_type ftype) +{ + ap_filter_t *fnext; + + while (f && f->frec->ftype < ftype) { + fnext = f->next; + ap_remove_input_filter(f); + f = fnext; + } +} +#endif + static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, const char *protocol) { int found = 0; - const char **protos = h2_h2_is_tls(c)? h2_tls_protos : h2_clear_protos; + const char **protos = ap_ssl_conn_is_ssl(c)? h2_protocol_ids_tls : h2_protocol_ids_clear; const char **p = protos; (void)s; @@ -145,35 +172,41 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, } if (found) { - h2_ctx *ctx = h2_ctx_get(c, 1); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "switching protocol to '%s'", protocol); - h2_ctx_protocol_set(ctx, protocol); - h2_ctx_server_set(ctx, s); - + h2_conn_ctx_create_for_c1(c, s, protocol); + if (r != NULL) { apr_status_t status; +#if AP_HAS_RESPONSE_BUCKETS + /* Switching in the middle of a request means that + * we have to send out the response to this one in h2 + * format. So we need to take over the connection + * and remove all old filters with type up to the + * CONNEDCTION/NETWORK ones. + */ + remove_input_filters_below(r->input_filters, AP_FTYPE_CONNECTION); + remove_output_filters_below(r->output_filters, AP_FTYPE_CONNECTION); +#else /* Switching in the middle of a request means that * we have to send out the response to this one in h2 * format. So we need to take over the connection * right away. */ ap_remove_input_filter_byhandle(r->input_filters, "http_in"); - ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout"); ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); - +#endif /* Ok, start an h2_conn on this one. */ - h2_ctx_server_set(ctx, r->server); - status = h2_conn_setup(ctx, r->connection, r); + status = h2_c1_setup(c, r, s); + if (status != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088) "session setup"); - h2_ctx_clear(c); + h2_conn_ctx_detach(c); return !OK; } - h2_conn_run(ctx, c); + h2_c1_run(c); } return OK; } @@ -183,7 +216,13 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, static const char *h2_protocol_get(const conn_rec *c) { - return h2_ctx_protocol_get(c); + h2_conn_ctx_t *ctx; + + if (c->master) { + c = c->master; + } + ctx = h2_conn_ctx_get(c); + return ctx? ctx->protocol : NULL; } void h2_switch_register_hooks(void) diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c deleted file mode 100644 index 86fb026..0000000 --- a/modules/http2/h2_task.c +++ /dev/null @@ -1,769 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "h2_private.h" -#include "h2.h" -#include "h2_bucket_beam.h" -#include "h2_conn.h" -#include "h2_config.h" -#include "h2_ctx.h" -#include "h2_from_h1.h" -#include "h2_h2.h" -#include "h2_mplx.h" -#include "h2_request.h" -#include "h2_headers.h" -#include "h2_session.h" -#include "h2_stream.h" -#include "h2_task.h" -#include "h2_util.h" - -static void H2_TASK_OUT_LOG(int lvl, h2_task *task, apr_bucket_brigade *bb, - const char *tag) -{ - if (APLOG_C_IS_LEVEL(task->c, lvl)) { - conn_rec *c = task->c; - char buffer[4 * 1024]; - const char *line = "(null)"; - apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); - - len = h2_util_bb_print(buffer, bmax, tag, "", bb); - ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s", - task->id, len? buffer : line); - } -} - -/******************************************************************************* - * task input handling - ******************************************************************************/ - -static int input_ser_header(void *ctx, const char *name, const char *value) -{ - h2_task *task = ctx; - apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value); - return 1; -} - -/******************************************************************************* - * task output handling - ******************************************************************************/ - -static apr_status_t open_output(h2_task *task) -{ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348) - "h2_task(%s): open output to %s %s %s", - task->id, task->request->method, - task->request->authority, - task->request->path); - task->output.opened = 1; - return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam); -} - -static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block) -{ - apr_off_t written, left; - apr_status_t status; - - apr_brigade_length(bb, 0, &written); - H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out"); - h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)"); - /* engines send unblocking */ - status = h2_beam_send(task->output.beam, bb, - block? APR_BLOCK_READ : APR_NONBLOCK_READ); - h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)"); - - if (APR_STATUS_IS_EAGAIN(status)) { - apr_brigade_length(bb, 0, &left); - written -= left; - status = APR_SUCCESS; - } - if (status == APR_SUCCESS) { - if (h2_task_logio_add_bytes_out) { - h2_task_logio_add_bytes_out(task->c, written); - } - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, - "h2_task(%s): send_out done", task->id); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, - "h2_task(%s): send_out (%ld bytes)", - task->id, (long)written); - } - return status; -} - -/* Bring the data from the brigade (which represents the result of the - * request_rec out filter chain) into the h2_mplx for further sending - * on the master connection. - */ -static apr_status_t slave_out(h2_task *task, ap_filter_t* f, - apr_bucket_brigade* bb) -{ - apr_bucket *b; - apr_status_t rv = APR_SUCCESS; - int flush = 0, blocking; - - if (task->frozen) { - h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2, - "frozen task output write, ignored", bb); - while (!APR_BRIGADE_EMPTY(bb)) { - b = APR_BRIGADE_FIRST(bb); - if (AP_BUCKET_IS_EOR(b)) { - APR_BUCKET_REMOVE(b); - task->eor = b; - } - else { - apr_bucket_delete(b); - } - } - return APR_SUCCESS; - } - -send: - /* we send block once we opened the output, so someone is there - * reading it *and* the task is not assigned to a h2_req_engine */ - blocking = (!task->assigned && task->output.opened); - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb); - b = APR_BUCKET_NEXT(b)) { - if (APR_BUCKET_IS_FLUSH(b) || APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) { - flush = 1; - break; - } - } - - if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) { - /* still have data buffered from previous attempt. - * setaside and append new data and try to pass the complete data */ - if (!APR_BRIGADE_EMPTY(bb)) { - if (APR_SUCCESS != (rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool))) { - goto out; - } - } - rv = send_out(task, task->output.bb, blocking); - } - else { - /* no data buffered previously, pass brigade directly */ - rv = send_out(task, bb, blocking); - - if (APR_SUCCESS == rv && !APR_BRIGADE_EMPTY(bb)) { - /* output refused to buffer it all, time to open? */ - if (!task->output.opened && APR_SUCCESS == (rv = open_output(task))) { - /* Make another attempt to send the data. With the output open, - * the call might be blocking and send all data, so we do not need - * to save the brigade */ - goto send; - } - else if (blocking && flush) { - /* Need to keep on doing this. */ - goto send; - } - - if (APR_SUCCESS == rv) { - /* could not write all, buffer the rest */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405) - "h2_slave_out(%s): saving brigade", task->id); - ap_assert(NULL); - rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool); - flush = 1; - } - } - } - - if (APR_SUCCESS == rv && !task->output.opened && flush) { - /* got a flush or could not write all, time to tell someone to read */ - rv = open_output(task); - } -out: - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c, - "h2_slave_out(%s): slave_out leave", task->id); - return rv; -} - -static apr_status_t output_finish(h2_task *task) -{ - if (!task->output.opened) { - return open_output(task); - } - return APR_SUCCESS; -} - -/******************************************************************************* - * task slave connection filters - ******************************************************************************/ - -static apr_status_t h2_filter_slave_in(ap_filter_t* f, - apr_bucket_brigade* bb, - ap_input_mode_t mode, - apr_read_type_e block, - apr_off_t readbytes) -{ - h2_task *task; - apr_status_t status = APR_SUCCESS; - apr_bucket *b, *next; - apr_off_t bblen; - const int trace1 = APLOGctrace1(f->c); - apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)? - (apr_size_t)readbytes : APR_SIZE_MAX); - - task = h2_ctx_cget_task(f->c); - ap_assert(task); - - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld", - task->id, mode, block, (long)readbytes); - } - - if (mode == AP_MODE_INIT) { - return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes); - } - - if (f->c->aborted) { - return APR_ECONNABORTED; - } - - if (!task->input.bb) { - return APR_EOF; - } - - /* Cleanup brigades from those nasty 0 length non-meta buckets - * that apr_brigade_split_line() sometimes produces. */ - for (b = APR_BRIGADE_FIRST(task->input.bb); - b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) { - next = APR_BUCKET_NEXT(b); - if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) { - apr_bucket_delete(b); - } - } - - while (APR_BRIGADE_EMPTY(task->input.bb)) { - /* Get more input data for our request. */ - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "h2_slave_in(%s): get more data from mplx, block=%d, " - "readbytes=%ld", task->id, block, (long)readbytes); - } - if (task->input.beam) { - status = h2_beam_receive(task->input.beam, task->input.bb, block, - 128*1024); - } - else { - status = APR_EOF; - } - - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, - "h2_slave_in(%s): read returned", task->id); - } - if (APR_STATUS_IS_EAGAIN(status) - && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) { - /* chunked input handling does not seem to like it if we - * return with APR_EAGAIN from a GETLINE read... - * upload 100k test on test-ser.example.org hangs */ - status = APR_SUCCESS; - } - else if (APR_STATUS_IS_EOF(status)) { - break; - } - else if (status != APR_SUCCESS) { - return status; - } - - if (trace1) { - h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, - "input.beam recv raw", task->input.bb); - } - if (h2_task_logio_add_bytes_in) { - apr_brigade_length(bb, 0, &bblen); - h2_task_logio_add_bytes_in(f->c, bblen); - } - } - - /* Nothing there, no more data to get. Return APR_EAGAIN on - * speculative reads, this is ap_check_pipeline()'s trick to - * see if the connection needs closing. */ - if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) { - return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF; - } - - if (trace1) { - h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, - "task_input.bb", task->input.bb); - } - - if (APR_BRIGADE_EMPTY(task->input.bb)) { - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_slave_in(%s): no data", task->id); - } - return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF; - } - - if (mode == AP_MODE_EXHAUSTIVE) { - /* return all we have */ - APR_BRIGADE_CONCAT(bb, task->input.bb); - } - else if (mode == AP_MODE_READBYTES) { - status = h2_brigade_concat_length(bb, task->input.bb, rmax); - } - else if (mode == AP_MODE_SPECULATIVE) { - status = h2_brigade_copy_length(bb, task->input.bb, rmax); - } - else if (mode == AP_MODE_GETLINE) { - /* we are reading a single LF line, e.g. the HTTP headers. - * this has the nasty side effect to split the bucket, even - * though it ends with CRLF and creates a 0 length bucket */ - status = apr_brigade_split_line(bb, task->input.bb, block, - HUGE_STRING_LEN); - if (APLOGctrace1(f->c)) { - char buffer[1024]; - apr_size_t len = sizeof(buffer)-1; - apr_brigade_flatten(bb, buffer, &len); - buffer[len] = 0; - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "h2_slave_in(%s): getline: %s", - task->id, buffer); - } - } - } - else { - /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not - * to support it. Seems to work. */ - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, - APLOGNO(03472) - "h2_slave_in(%s), unsupported READ mode %d", - task->id, mode); - status = APR_ENOTIMPL; - } - - if (trace1) { - apr_brigade_length(bb, 0, &bblen); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen); - } - return status; -} - -static apr_status_t h2_filter_slave_output(ap_filter_t* filter, - apr_bucket_brigade* brigade) -{ - h2_task *task = h2_ctx_cget_task(filter->c); - apr_status_t status; - - ap_assert(task); - status = slave_out(task, filter, brigade); - if (status != APR_SUCCESS) { - h2_task_rst(task, H2_ERR_INTERNAL_ERROR); - } - return status; -} - -static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb) -{ - h2_task *task = h2_ctx_cget_task(f->c); - apr_status_t status; - - ap_assert(task); - /* There are cases where we need to parse a serialized http/1.1 - * response. One example is a 100-continue answer in serialized mode - * or via a mod_proxy setup */ - while (bb && !task->output.sent_response) { - status = h2_from_h1_parse_response(task, f, bb); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, - "h2_task(%s): parsed response", task->id); - if (APR_BRIGADE_EMPTY(bb) || status != APR_SUCCESS) { - return status; - } - } - - return ap_pass_brigade(f->next, bb); -} - -/******************************************************************************* - * task things - ******************************************************************************/ - -int h2_task_can_redo(h2_task *task) { - if (task->input.beam && h2_beam_was_received(task->input.beam)) { - /* cannot repeat that. */ - return 0; - } - return (!strcmp("GET", task->request->method) - || !strcmp("HEAD", task->request->method) - || !strcmp("OPTIONS", task->request->method)); -} - -void h2_task_redo(h2_task *task) -{ - task->rst_error = 0; -} - -void h2_task_rst(h2_task *task, int error) -{ - task->rst_error = error; - if (task->input.beam) { - h2_beam_leave(task->input.beam); - } - if (!task->worker_done) { - h2_beam_abort(task->output.beam); - } - if (task->c) { - task->c->aborted = 1; - } -} - -/******************************************************************************* - * Register various hooks - */ -static const char *const mod_ssl[] = { "mod_ssl.c", NULL}; -static int h2_task_pre_conn(conn_rec* c, void *arg); -static int h2_task_process_conn(conn_rec* c); - -APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in; -APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out; - -void h2_task_register_hooks(void) -{ - /* This hook runs on new connections before mod_ssl has a say. - * Its purpose is to prevent mod_ssl from touching our pseudo-connections - * for streams. - */ - ap_hook_pre_connection(h2_task_pre_conn, - NULL, mod_ssl, APR_HOOK_FIRST); - /* When the connection processing actually starts, we might - * take over, if the connection is for a task. - */ - ap_hook_process_connection(h2_task_process_conn, - NULL, NULL, APR_HOOK_FIRST); - - ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in, - NULL, AP_FTYPE_NETWORK); - ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output, - NULL, AP_FTYPE_NETWORK); - ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1, - NULL, AP_FTYPE_NETWORK); - - ap_register_input_filter("H2_REQUEST", h2_filter_request_in, - NULL, AP_FTYPE_PROTOCOL); - ap_register_output_filter("H2_RESPONSE", h2_filter_headers_out, - NULL, AP_FTYPE_PROTOCOL); - ap_register_output_filter("H2_TRAILERS_OUT", h2_filter_trailers_out, - NULL, AP_FTYPE_PROTOCOL); -} - -/* post config init */ -apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s) -{ - h2_task_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in); - h2_task_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out); - - return APR_SUCCESS; -} - -static int h2_task_pre_conn(conn_rec* c, void *arg) -{ - h2_ctx *ctx; - - if (!c->master) { - return OK; - } - - ctx = h2_ctx_get(c, 0); - (void)arg; - if (h2_ctx_is_task(ctx)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, - "h2_h2, pre_connection, found stream task"); - ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c); - ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c); - ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c); - } - return OK; -} - -h2_task *h2_task_create(conn_rec *slave, int stream_id, - const h2_request *req, h2_mplx *m, - h2_bucket_beam *input, - apr_interval_time_t timeout, - apr_size_t output_max_mem) -{ - apr_pool_t *pool; - h2_task *task; - - ap_assert(slave); - ap_assert(req); - - apr_pool_create(&pool, slave->pool); - task = apr_pcalloc(pool, sizeof(h2_task)); - if (task == NULL) { - return NULL; - } - task->id = "000"; - task->stream_id = stream_id; - task->c = slave; - task->mplx = m; - task->pool = pool; - task->request = req; - task->timeout = timeout; - task->input.beam = input; - task->output.max_buffer = output_max_mem; - - return task; -} - -void h2_task_destroy(h2_task *task) -{ - if (task->output.beam) { - h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy"); - h2_beam_destroy(task->output.beam); - task->output.beam = NULL; - } - - if (task->eor) { - apr_bucket_destroy(task->eor); - } - if (task->pool) { - apr_pool_destroy(task->pool); - } -} - -apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) -{ - conn_rec *c; - - ap_assert(task); - c = task->c; - task->worker_started = 1; - task->started_at = apr_time_now(); - - if (c->master) { - /* Each conn_rec->id is supposed to be unique at a point in time. Since - * some modules (and maybe external code) uses this id as an identifier - * for the request_rec they handle, it needs to be unique for slave - * connections also. - * The connection id is generated by the MPM and most MPMs use the formula - * id := (child_num * max_threads) + thread_num - * which means that there is a maximum id of about - * idmax := max_child_count * max_threads - * If we assume 2024 child processes with 2048 threads max, we get - * idmax ~= 2024 * 2048 = 2 ** 22 - * On 32 bit systems, we have not much space left, but on 64 bit systems - * (and higher?) we can use the upper 32 bits without fear of collision. - * 32 bits is just what we need, since a connection can only handle so - * many streams. - */ - int slave_id, free_bits; - - task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id, - task->stream_id); - if (sizeof(unsigned long) >= 8) { - free_bits = 32; - slave_id = task->stream_id; - } - else { - /* Assume we have a more limited number of threads/processes - * and h2 workers on a 32-bit system. Use the worker instead - * of the stream id. */ - free_bits = 8; - slave_id = worker_id; - } - task->c->id = (c->master->id << free_bits)^slave_id; - c->keepalive = AP_CONN_KEEPALIVE; - } - - h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output", - H2_BEAM_OWNER_SEND, 0, task->timeout); - if (!task->output.beam) { - return APR_ENOMEM; - } - - h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer); - h2_beam_send_from(task->output.beam, task->pool); - - h2_ctx_create_for(c, task); - apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id); - - h2_slave_run_pre_connection(c, ap_get_conn_socket(c)); - - task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc); - if (task->request->serialize) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): serialize request %s %s", - task->id, task->request->method, task->request->path); - apr_brigade_printf(task->input.bb, NULL, - NULL, "%s %s HTTP/1.1\r\n", - task->request->method, task->request->path); - apr_table_do(input_ser_header, task, task->request->headers, NULL); - apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n"); - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): process connection", task->id); - - task->c->current_thread = thread; - ap_run_process_connection(c); - - if (task->frozen) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): process_conn returned frozen task", - task->id); - /* cleanup delayed */ - return APR_EAGAIN; - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): processing done", task->id); - return output_finish(task); - } -} - -static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c) -{ - const h2_request *req = task->request; - conn_state_t *cs = c->cs; - request_rec *r; - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): create request_rec", task->id); - r = h2_request_create_rec(req, c); - if (r && (r->status == HTTP_OK)) { - /* set timeouts for virtual host of request */ - if (task->timeout != r->server->timeout) { - task->timeout = r->server->timeout; - h2_beam_timeout_set(task->output.beam, task->timeout); - if (task->input.beam) { - h2_beam_timeout_set(task->input.beam, task->timeout); - } - } - - ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r); - - if (cs) { - cs->state = CONN_STATE_HANDLER; - } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): start process_request", task->id); - - /* Add the raw bytes of the request (e.g. header frame lengths to - * the logio for this request. */ - if (req->raw_bytes && h2_task_logio_add_bytes_in) { - h2_task_logio_add_bytes_in(c, req->raw_bytes); - } - - ap_process_request(r); - - if (task->frozen) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): process_request frozen", task->id); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): process_request done", task->id); - } - - /* After the call to ap_process_request, the - * request pool may have been deleted. We set - * r=NULL here to ensure that any dereference - * of r that might be added later in this function - * will result in a segfault immediately instead - * of nondeterministic failures later. - */ - if (cs) - cs->state = CONN_STATE_WRITE_COMPLETION; - r = NULL; - } - else if (!r) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): create request_rec failed, r=NULL", task->id); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): create request_rec failed, r->status=%d", - task->id, r->status); - } - - return APR_SUCCESS; -} - -static int h2_task_process_conn(conn_rec* c) -{ - h2_ctx *ctx; - - if (!c->master) { - return DECLINED; - } - - ctx = h2_ctx_get(c, 0); - if (h2_ctx_is_task(ctx)) { - if (!ctx->task->request->serialize) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_h2, processing request directly"); - h2_task_process_request(ctx->task, c); - return DONE; - } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s), serialized handling", ctx->task->id); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "slave_conn(%ld): has no task", c->id); - } - return DECLINED; -} - -apr_status_t h2_task_freeze(h2_task *task) -{ - if (!task->frozen) { - task->frozen = 1; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406) - "h2_task(%s), frozen", task->id); - } - return APR_SUCCESS; -} - -apr_status_t h2_task_thaw(h2_task *task) -{ - if (task->frozen) { - task->frozen = 0; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407) - "h2_task(%s), thawed", task->id); - } - task->thawed = 1; - return APR_SUCCESS; -} - -int h2_task_has_thawed(h2_task *task) -{ - return task->thawed; -} diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h deleted file mode 100644 index ab6a746..0000000 --- a/modules/http2/h2_task.h +++ /dev/null @@ -1,127 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __mod_h2__h2_task__ -#define __mod_h2__h2_task__ - -#include - -/** - * A h2_task fakes a HTTP/1.1 request from the data in a HTTP/2 stream - * (HEADER+CONT.+DATA) the module recieves. - * - * In order to answer a HTTP/2 stream, we want all Apache httpd infrastructure - * to be involved as usual, as if this stream can as a separate HTTP/1.1 - * request. The basic trickery to do so was derived from google's mod_spdy - * source. Basically, we fake a new conn_rec object, even with its own - * socket and give it to ap_process_connection(). - * - * Since h2_task instances are executed in separate threads, we may have - * different lifetimes than our h2_stream or h2_session instances. Basically, - * we would like to be as standalone as possible. - * - * Finally, to keep certain connection level filters, such as ourselves and - * especially mod_ssl ones, from messing with our data, we need a filter - * of our own to disble those. - */ - -struct h2_bucket_beam; -struct h2_conn; -struct h2_mplx; -struct h2_task; -struct h2_req_engine; -struct h2_request; -struct h2_response_parser; -struct h2_stream; -struct h2_worker; - -typedef struct h2_task h2_task; - -struct h2_task { - const char *id; - int stream_id; - conn_rec *c; - apr_pool_t *pool; - - const struct h2_request *request; - apr_interval_time_t timeout; - int rst_error; /* h2 related stream abort error */ - - struct { - struct h2_bucket_beam *beam; - unsigned int eos : 1; - apr_bucket_brigade *bb; - apr_bucket_brigade *bbchunk; - apr_off_t chunked_total; - } input; - struct { - struct h2_bucket_beam *beam; - unsigned int opened : 1; - unsigned int sent_response : 1; - unsigned int copy_files : 1; - struct h2_response_parser *rparser; - apr_bucket_brigade *bb; - apr_size_t max_buffer; - } output; - - struct h2_mplx *mplx; - - unsigned int filters_set : 1; - unsigned int frozen : 1; - unsigned int thawed : 1; - unsigned int worker_started : 1; /* h2_worker started processing */ - unsigned int worker_done : 1; /* h2_worker finished */ - - apr_time_t started_at; /* when processing started */ - apr_time_t done_at; /* when processing was done */ - apr_bucket *eor; - - struct h2_req_engine *engine; /* engine hosted by this task */ - struct h2_req_engine *assigned; /* engine that task has been assigned to */ -}; - -h2_task *h2_task_create(conn_rec *slave, int stream_id, - const h2_request *req, struct h2_mplx *m, - struct h2_bucket_beam *input, - apr_interval_time_t timeout, - apr_size_t output_max_mem); - -void h2_task_destroy(h2_task *task); - -apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id); - -void h2_task_redo(h2_task *task); -int h2_task_can_redo(h2_task *task); - -/** - * Reset the task with the given error code, resets all input/output. - */ -void h2_task_rst(h2_task *task, int error); - -void h2_task_register_hooks(void); -/* - * One time, post config intialization. - */ -apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s); - -extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in; -extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out; - -apr_status_t h2_task_freeze(h2_task *task); -apr_status_t h2_task_thaw(h2_task *task); -int h2_task_has_thawed(h2_task *task); - -#endif /* defined(__mod_h2__h2_task__) */ diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c index 9dacd8b..8e53ceb 100644 --- a/modules/http2/h2_util.c +++ b/modules/http2/h2_util.c @@ -22,11 +22,13 @@ #include #include #include +#include #include #include #include "h2.h" +#include "h2_headers.h" #include "h2_util.h" /* h2_log2(n) iff n is a power of 2 */ @@ -55,7 +57,7 @@ unsigned char h2_log2(int n) if (!(n & 0x80000000u)) { lz += 1; } - + return 31 - lz; } @@ -75,26 +77,6 @@ size_t h2_util_hex_dump(char *buffer, size_t maxlen, return strlen(buffer); } -size_t h2_util_header_print(char *buffer, size_t maxlen, - const char *name, size_t namelen, - const char *value, size_t valuelen) -{ - size_t offset = 0; - size_t i; - for (i = 0; i < namelen && offset < maxlen; ++i, ++offset) { - buffer[offset] = name[i]; - } - for (i = 0; i < 2 && offset < maxlen; ++i, ++offset) { - buffer[offset] = ": "[i]; - } - for (i = 0; i < valuelen && offset < maxlen; ++i, ++offset) { - buffer[offset] = value[i]; - } - buffer[offset] = '\0'; - return offset; -} - - void h2_util_camel_case_header(char *s, size_t len) { size_t start = 1; @@ -104,7 +86,7 @@ void h2_util_camel_case_header(char *s, size_t len) if (s[i] >= 'a' && s[i] <= 'z') { s[i] -= 'a' - 'A'; } - + start = 0; } else if (s[i] == '-') { @@ -113,16 +95,16 @@ void h2_util_camel_case_header(char *s, size_t len) } } -/* base64 url encoding ****************************************************************************/ +/* base64 url encoding */ #define N6 (unsigned int)-1 static const unsigned int BASE64URL_UINT6[] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 0 */ - N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, 62, N6, N6, /* 2 */ - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */ + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */ N6, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, N6, N6, N6, N6, 63, /* 5 */ N6, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */ @@ -148,7 +130,7 @@ static const unsigned char BASE64URL_CHARS[] = { #define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ] -apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, +apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, apr_pool_t *pool) { const unsigned char *e = (const unsigned char *)encoded; @@ -156,14 +138,14 @@ apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, unsigned char *d; unsigned int n; long len, mlen, remain, i; - + while (*p && BASE64URL_UINT6[ *p ] != N6) { ++p; } len = (int)(p - e); mlen = (len/4)*4; *decoded = apr_pcalloc(pool, (apr_size_t)len + 1); - + i = 0; d = (unsigned char*)*decoded; for (; i < mlen; i += 4) { @@ -197,14 +179,14 @@ apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, return (apr_size_t)(mlen/4*3 + remain); } -const char *h2_util_base64url_encode(const char *data, +const char *h2_util_base64url_encode(const char *data, apr_size_t dlen, apr_pool_t *pool) { int i, len = (int)dlen; apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */ const unsigned char *udata = (const unsigned char*)data; unsigned char *enc, *p = apr_pcalloc(pool, slen); - + enc = p; for (i = 0; i < len-2; i+= 3) { *p++ = BASE64URL_CHAR( (udata[i] >> 2) ); @@ -212,7 +194,7 @@ const char *h2_util_base64url_encode(const char *data, *p++ = BASE64URL_CHAR( (udata[i+1] << 2) + (udata[i+2] >> 6) ); *p++ = BASE64URL_CHAR( (udata[i+2]) ); } - + if (i < len) { *p++ = BASE64URL_CHAR( (udata[i] >> 2) ); if (i == (len - 1)) { @@ -248,7 +230,7 @@ h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int) return ih; } -size_t h2_ihash_count(h2_ihash_t *ih) +unsigned int h2_ihash_count(h2_ihash_t *ih) { return apr_hash_count(ih->hash); } @@ -268,7 +250,7 @@ typedef struct { void *ctx; } iter_ctx; -static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen, +static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen, const void *val) { iter_ctx *ictx = ctx; @@ -326,7 +308,7 @@ size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max) { collect_ctx ctx; size_t i; - + ctx.ih = ih; ctx.buffer = buffer; ctx.max = max; @@ -344,19 +326,17 @@ size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max) static void iq_grow(h2_iqueue *q, int nlen); static void iq_swap(h2_iqueue *q, int i, int j); -static int iq_bubble_up(h2_iqueue *q, int i, int top, +static int iq_bubble_up(h2_iqueue *q, int i, int top, h2_iq_cmp *cmp, void *ctx); -static int iq_bubble_down(h2_iqueue *q, int i, int bottom, +static int iq_bubble_down(h2_iqueue *q, int i, int bottom, h2_iq_cmp *cmp, void *ctx); h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity) { h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue)); - if (q) { - q->pool = pool; - iq_grow(q, capacity); - q->nelts = 0; - } + q->pool = pool; + iq_grow(q, capacity); + q->nelts = 0; return q; } @@ -374,7 +354,7 @@ int h2_iq_count(h2_iqueue *q) int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx) { int i; - + if (h2_iq_contains(q, sid)) { return 0; } @@ -384,7 +364,7 @@ int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx) i = (q->head + q->nelts) % q->nalloc; q->elts[i] = sid; ++q->nelts; - + if (cmp) { /* bubble it to the front of the queue */ iq_bubble_up(q, i, q->head, cmp, ctx); @@ -405,7 +385,7 @@ int h2_iq_remove(h2_iqueue *q, int sid) break; } } - + if (i < q->nelts) { ++i; for (; i < q->nelts; ++i) { @@ -425,23 +405,23 @@ void h2_iq_clear(h2_iqueue *q) void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx) { /* Assume that changes in ordering are minimal. This needs, - * best case, q->nelts - 1 comparisions to check that nothing + * best case, q->nelts - 1 comparisons to check that nothing * changed. */ if (q->nelts > 0) { int i, ni, prev, last; - + /* Start at the end of the queue and create a tail of sorted * entries. Make that tail one element longer in each iteration. */ last = i = (q->head + q->nelts - 1) % q->nalloc; while (i != q->head) { prev = (q->nalloc + i - 1) % q->nalloc; - + ni = iq_bubble_up(q, i, prev, cmp, ctx); if (ni == prev) { /* i bubbled one up, bubble the new i down, which - * keeps all tasks below i sorted. */ + * keeps all ints below i sorted. */ iq_bubble_down(q, i, last, cmp, ctx); } i = prev; @@ -453,21 +433,21 @@ void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx) int h2_iq_shift(h2_iqueue *q) { int sid; - + if (q->nelts <= 0) { return 0; } - + sid = q->elts[q->head]; q->head = (q->head + 1) % q->nalloc; q->nelts--; - + return sid; } size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max) { - int i; + size_t i; for (i = 0; i < max; ++i) { pint[i] = h2_iq_shift(q); if (pint[i] == 0) { @@ -483,7 +463,7 @@ static void iq_grow(h2_iqueue *q, int nlen) int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen); if (q->nelts > 0) { int l = ((q->head + q->nelts) % q->nalloc) - q->head; - + memmove(nq, q->elts + q->head, sizeof(int) * l); if (l < q->nelts) { /* elts wrapped, append elts in [0, remain] to nq */ @@ -504,11 +484,11 @@ static void iq_swap(h2_iqueue *q, int i, int j) q->elts[j] = x; } -static int iq_bubble_up(h2_iqueue *q, int i, int top, - h2_iq_cmp *cmp, void *ctx) +static int iq_bubble_up(h2_iqueue *q, int i, int top, + h2_iq_cmp *cmp, void *ctx) { int prev; - while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top) + while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top) && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) { iq_swap(q, prev, i); i = prev; @@ -516,11 +496,11 @@ static int iq_bubble_up(h2_iqueue *q, int i, int top, return i; } -static int iq_bubble_down(h2_iqueue *q, int i, int bottom, +static int iq_bubble_down(h2_iqueue *q, int i, int bottom, h2_iq_cmp *cmp, void *ctx) { int next; - while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom) + while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom) && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) { iq_swap(q, next, i); i = next; @@ -545,9 +525,10 @@ int h2_iq_contains(h2_iqueue *q, int sid) struct h2_fifo { void **elems; - int nelems; + int capacity; int set; - int head; + int in; + int out; int count; int aborted; apr_thread_mutex_t *lock; @@ -555,12 +536,7 @@ struct h2_fifo { apr_thread_cond_t *not_full; }; -static int nth_index(h2_fifo *fifo, int n) -{ - return (fifo->head + n) % fifo->nelems; -} - -static apr_status_t fifo_destroy(void *data) +static apr_status_t fifo_destroy(void *data) { h2_fifo *fifo = data; @@ -574,21 +550,21 @@ static apr_status_t fifo_destroy(void *data) static int index_of(h2_fifo *fifo, void *elem) { int i; - - for (i = 0; i < fifo->count; ++i) { - if (elem == fifo->elems[nth_index(fifo, i)]) { + + for (i = fifo->out; i != fifo->in; i = (i + 1) % fifo->capacity) { + if (elem == fifo->elems[i]) { return i; } } return -1; } -static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool, +static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool, int capacity, int as_set) { apr_status_t rv; h2_fifo *fifo; - + fifo = apr_pcalloc(pool, sizeof(*fifo)); if (fifo == NULL) { return APR_ENOMEM; @@ -614,9 +590,9 @@ static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool, if (fifo->elems == NULL) { return APR_ENOMEM; } - fifo->nelems = capacity; + fifo->capacity = capacity; fifo->set = as_set; - + *pfifo = fifo; apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null); @@ -638,15 +614,6 @@ apr_status_t h2_fifo_term(h2_fifo *fifo) apr_status_t rv; if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { fifo->aborted = 1; - apr_thread_mutex_unlock(fifo->lock); - } - return rv; -} - -apr_status_t h2_fifo_interrupt(h2_fifo *fifo) -{ - apr_status_t rv; - if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { apr_thread_cond_broadcast(fifo->not_empty); apr_thread_cond_broadcast(fifo->not_full); apr_thread_mutex_unlock(fifo->lock); @@ -656,7 +623,12 @@ apr_status_t h2_fifo_interrupt(h2_fifo *fifo) int h2_fifo_count(h2_fifo *fifo) { - return fifo->count; + int n; + + apr_thread_mutex_lock(fifo->lock); + n = fifo->count; + apr_thread_mutex_unlock(fifo->lock); + return n; } static apr_status_t check_not_empty(h2_fifo *fifo, int block) @@ -683,9 +655,9 @@ static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block) /* set mode, elem already member */ return APR_EEXIST; } - else if (fifo->count == fifo->nelems) { + else if (fifo->count == fifo->capacity) { if (block) { - while (fifo->count == fifo->nelems) { + while (fifo->count == fifo->capacity) { if (fifo->aborted) { return APR_EOF; } @@ -696,12 +668,14 @@ static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block) return APR_EAGAIN; } } - - ap_assert(fifo->count < fifo->nelems); - fifo->elems[nth_index(fifo, fifo->count)] = elem; + + fifo->elems[fifo->in++] = elem; + if (fifo->in >= fifo->capacity) { + fifo->in -= fifo->capacity; + } ++fifo->count; if (fifo->count == 1) { - apr_thread_cond_broadcast(fifo->not_empty); + apr_thread_cond_signal(fifo->not_empty); } return APR_SUCCESS; } @@ -709,10 +683,6 @@ static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block) static apr_status_t fifo_push(h2_fifo *fifo, void *elem, int block) { apr_status_t rv; - - if (fifo->aborted) { - return APR_EOF; - } if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { rv = fifo_push_int(fifo, elem, block); @@ -734,18 +704,20 @@ apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem) static apr_status_t pull_head(h2_fifo *fifo, void **pelem, int block) { apr_status_t rv; - + int was_full; + if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) { *pelem = NULL; return rv; } - *pelem = fifo->elems[fifo->head]; + *pelem = fifo->elems[fifo->out++]; + if (fifo->out >= fifo->capacity) { + fifo->out -= fifo->capacity; + } + was_full = (fifo->count == fifo->capacity); --fifo->count; - if (fifo->count > 0) { - fifo->head = nth_index(fifo, 1); - if (fifo->count+1 == fifo->nelems) { - apr_thread_cond_broadcast(fifo->not_full); - } + if (was_full) { + apr_thread_cond_broadcast(fifo->not_full); } return APR_SUCCESS; } @@ -753,11 +725,7 @@ static apr_status_t pull_head(h2_fifo *fifo, void **pelem, int block) static apr_status_t fifo_pull(h2_fifo *fifo, void **pelem, int block) { apr_status_t rv; - - if (fifo->aborted) { - return APR_EOF; - } - + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { rv = pull_head(fifo, pelem, block); apr_thread_mutex_unlock(fifo->lock); @@ -779,11 +747,11 @@ static apr_status_t fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx, int { apr_status_t rv; void *elem; - + if (fifo->aborted) { return APR_EOF; } - + if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) { if (APR_SUCCESS == (rv = pull_head(fifo, &elem, block))) { switch (fn(elem, ctx)) { @@ -812,28 +780,58 @@ apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx) apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem) { apr_status_t rv; - + if (fifo->aborted) { return APR_EOF; } if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { - int i, rc; - void *e; - - rc = 0; - for (i = 0; i < fifo->count; ++i) { - e = fifo->elems[nth_index(fifo, i)]; - if (e == elem) { - ++rc; - } - else if (rc) { - fifo->elems[nth_index(fifo, i-rc)] = e; + int i, last_count = fifo->count; + + for (i = fifo->out; i != fifo->in; i = (i + 1) % fifo->capacity) { + if (fifo->elems[i] == elem) { + --fifo->count; + if (fifo->count == 0) { + fifo->out = fifo->in = 0; + } + else if (i == fifo->out) { + /* first element */ + ++fifo->out; + if (fifo->out >= fifo->capacity) { + fifo->out -= fifo->capacity; + } + } + else if (((i + 1) % fifo->capacity) == fifo->in) { + /* last element */ + --fifo->in; + if (fifo->in < 0) { + fifo->in += fifo->capacity; + } + } + else if (i > fifo->out) { + /* between out and in/capacity, move elements below up */ + memmove(&fifo->elems[fifo->out+1], &fifo->elems[fifo->out], + (i - fifo->out) * sizeof(void*)); + ++fifo->out; + if (fifo->out >= fifo->capacity) { + fifo->out -= fifo->capacity; + } + } + else { + /* we wrapped around, move elements above down */ + AP_DEBUG_ASSERT((fifo->in - i - 1) > 0); + AP_DEBUG_ASSERT((fifo->in - i - 1) < fifo->capacity); + memmove(&fifo->elems[i], &fifo->elems[i + 1], + (fifo->in - i - 1) * sizeof(void*)); + --fifo->in; + if (fifo->in < 0) { + fifo->in += fifo->capacity; + } + } } } - if (rc) { - fifo->count -= rc; - if (fifo->count + rc == fifo->nelems) { + if (fifo->count != last_count) { + if (last_count == fifo->capacity) { apr_thread_cond_broadcast(fifo->not_full); } rv = APR_SUCCESS; @@ -841,7 +839,7 @@ apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem) else { rv = APR_EAGAIN; } - + apr_thread_mutex_unlock(fifo->lock); } return rv; @@ -853,7 +851,7 @@ apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem) struct h2_ififo { int *elems; - int nelems; + int capacity; int set; int head; int count; @@ -863,12 +861,12 @@ struct h2_ififo { apr_thread_cond_t *not_full; }; -static int inth_index(h2_ififo *fifo, int n) +static int inth_index(h2_ififo *fifo, int n) { - return (fifo->head + n) % fifo->nelems; + return (fifo->head + n) % fifo->capacity; } -static apr_status_t ififo_destroy(void *data) +static apr_status_t ififo_destroy(void *data) { h2_ififo *fifo = data; @@ -882,7 +880,7 @@ static apr_status_t ififo_destroy(void *data) static int iindex_of(h2_ififo *fifo, int id) { int i; - + for (i = 0; i < fifo->count; ++i) { if (id == fifo->elems[inth_index(fifo, i)]) { return i; @@ -891,12 +889,12 @@ static int iindex_of(h2_ififo *fifo, int id) return -1; } -static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool, +static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool, int capacity, int as_set) { apr_status_t rv; h2_ififo *fifo; - + fifo = apr_pcalloc(pool, sizeof(*fifo)); if (fifo == NULL) { return APR_ENOMEM; @@ -922,9 +920,9 @@ static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool, if (fifo->elems == NULL) { return APR_ENOMEM; } - fifo->nelems = capacity; + fifo->capacity = capacity; fifo->set = as_set; - + *pfifo = fifo; apr_pool_cleanup_register(pool, fifo, ififo_destroy, apr_pool_cleanup_null); @@ -946,15 +944,6 @@ apr_status_t h2_ififo_term(h2_ififo *fifo) apr_status_t rv; if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { fifo->aborted = 1; - apr_thread_mutex_unlock(fifo->lock); - } - return rv; -} - -apr_status_t h2_ififo_interrupt(h2_ififo *fifo) -{ - apr_status_t rv; - if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { apr_thread_cond_broadcast(fifo->not_empty); apr_thread_cond_broadcast(fifo->not_full); apr_thread_mutex_unlock(fifo->lock); @@ -991,9 +980,9 @@ static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block) /* set mode, elem already member */ return APR_EEXIST; } - else if (fifo->count == fifo->nelems) { + else if (fifo->count == fifo->capacity) { if (block) { - while (fifo->count == fifo->nelems) { + while (fifo->count == fifo->capacity) { if (fifo->aborted) { return APR_EOF; } @@ -1004,8 +993,8 @@ static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block) return APR_EAGAIN; } } - - ap_assert(fifo->count < fifo->nelems); + + ap_assert(fifo->count < fifo->capacity); fifo->elems[inth_index(fifo, fifo->count)] = id; ++fifo->count; if (fifo->count == 1) { @@ -1017,10 +1006,6 @@ static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block) static apr_status_t ififo_push(h2_ififo *fifo, int id, int block) { apr_status_t rv; - - if (fifo->aborted) { - return APR_EOF; - } if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { rv = ififo_push_int(fifo, id, block); @@ -1042,7 +1027,7 @@ apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id) static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block) { apr_status_t rv; - + if ((rv = icheck_not_empty(fifo, block)) != APR_SUCCESS) { *pi = 0; return rv; @@ -1051,7 +1036,7 @@ static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block) --fifo->count; if (fifo->count > 0) { fifo->head = inth_index(fifo, 1); - if (fifo->count+1 == fifo->nelems) { + if (fifo->count+1 == fifo->capacity) { apr_thread_cond_broadcast(fifo->not_full); } } @@ -1061,11 +1046,7 @@ static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block) static apr_status_t ififo_pull(h2_ififo *fifo, int *pi, int block) { apr_status_t rv; - - if (fifo->aborted) { - return APR_EOF; - } - + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { rv = ipull_head(fifo, pi, block); apr_thread_mutex_unlock(fifo->lock); @@ -1087,11 +1068,7 @@ static apr_status_t ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx, { apr_status_t rv; int id; - - if (fifo->aborted) { - return APR_EOF; - } - + if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) { if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) { switch (fn(id, ctx)) { @@ -1117,39 +1094,40 @@ apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx) return ififo_peek(fifo, fn, ctx, 0); } -apr_status_t h2_ififo_remove(h2_ififo *fifo, int id) +static apr_status_t ififo_remove(h2_ififo *fifo, int id) { - apr_status_t rv; - + int rc, i; + if (fifo->aborted) { return APR_EOF; } - if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { - int i, rc; - int e; - - rc = 0; - for (i = 0; i < fifo->count; ++i) { - e = fifo->elems[inth_index(fifo, i)]; - if (e == id) { - ++rc; - } - else if (rc) { - fifo->elems[inth_index(fifo, i-rc)] = e; - } - } - if (rc) { - fifo->count -= rc; - if (fifo->count + rc == fifo->nelems) { - apr_thread_cond_broadcast(fifo->not_full); - } - rv = APR_SUCCESS; + rc = 0; + for (i = 0; i < fifo->count; ++i) { + int e = fifo->elems[inth_index(fifo, i)]; + if (e == id) { + ++rc; } - else { - rv = APR_EAGAIN; + else if (rc) { + fifo->elems[inth_index(fifo, i-rc)] = e; } - + } + if (!rc) { + return APR_EAGAIN; + } + fifo->count -= rc; + if (fifo->count + rc == fifo->capacity) { + apr_thread_cond_broadcast(fifo->not_full); + } + return APR_SUCCESS; +} + +apr_status_t h2_ififo_remove(h2_ififo *fifo, int id) +{ + apr_status_t rv; + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + rv = ififo_remove(fifo, id); apr_thread_mutex_unlock(fifo->lock); } return rv; @@ -1158,7 +1136,7 @@ apr_status_t h2_ififo_remove(h2_ififo *fifo, int id) /******************************************************************************* * h2_util for apt_table_t ******************************************************************************/ - + typedef struct { apr_size_t bytes; apr_size_t pair_extra; @@ -1180,7 +1158,7 @@ static int count_bytes(void *x, const char *key, const char *value) apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra) { table_bytes_ctx ctx; - + ctx.bytes = 0; ctx.pair_extra = pair_extra; apr_table_do(count_bytes, &ctx, t, NULL); @@ -1192,287 +1170,108 @@ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra) * h2_util for bucket brigades ******************************************************************************/ -static apr_status_t last_not_included(apr_bucket_brigade *bb, - apr_off_t maxlen, - int same_alloc, - apr_size_t *pfile_buckets_allowed, - apr_bucket **pend) +static void fit_bucket_into(apr_bucket *b, apr_off_t *plen) { - apr_bucket *b; - apr_status_t status = APR_SUCCESS; - int files_allowed = pfile_buckets_allowed? (int)*pfile_buckets_allowed : 0; - - if (maxlen >= 0) { - /* Find the bucket, up to which we reach maxlen/mem bytes */ - for (b = APR_BRIGADE_FIRST(bb); - (b != APR_BRIGADE_SENTINEL(bb)); - b = APR_BUCKET_NEXT(b)) { - - if (APR_BUCKET_IS_METADATA(b)) { - /* included */ - } - else { - if (b->length == ((apr_size_t)-1)) { - const char *ign; - apr_size_t ilen; - status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); - if (status != APR_SUCCESS) { - return status; - } - } - - if (maxlen == 0 && b->length > 0) { - *pend = b; - return status; - } - - if (same_alloc && APR_BUCKET_IS_FILE(b)) { - /* we like it move it, always */ - } - else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) { - /* this has no memory footprint really unless - * it is read, disregard it in length count, - * unless we do not move the file buckets */ - --files_allowed; - } - else if (maxlen < (apr_off_t)b->length) { - apr_bucket_split(b, (apr_size_t)maxlen); - maxlen = 0; - } - else { - maxlen -= b->length; - } - } - } + /* signed apr_off_t is at least as large as unsigned apr_size_t. + * Problems may arise when they are both the same size. Then + * the bucket length *may* be larger than a value we can hold + * in apr_off_t. Before casting b->length to apr_off_t we must + * check the limitations. + * After we resized the bucket, it is safe to cast and substract. + */ + if ((sizeof(apr_off_t) == sizeof(apr_int64_t) + && b->length > APR_INT64_MAX) + || (sizeof(apr_off_t) == sizeof(apr_int32_t) + && b->length > APR_INT32_MAX) + || *plen < (apr_off_t)b->length) { + /* bucket is longer the *plen */ + apr_bucket_split(b, *plen); } - *pend = APR_BRIGADE_SENTINEL(bb); - return status; + *plen -= (apr_off_t)b->length; } -apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest, +apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length) { apr_bucket *b; apr_off_t remain = length; apr_status_t status = APR_SUCCESS; - + while (!APR_BRIGADE_EMPTY(src)) { - b = APR_BRIGADE_FIRST(src); - + b = APR_BRIGADE_FIRST(src); + if (APR_BUCKET_IS_METADATA(b)) { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(dest, b); } else { - if (remain == b->length) { - /* fall through */ - } - else if (remain <= 0) { + if (remain <= 0) { return status; } - else { - if (b->length == ((apr_size_t)-1)) { - const char *ign; - apr_size_t ilen; - status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); - if (status != APR_SUCCESS) { - return status; - } - } - - if (remain < b->length) { - apr_bucket_split(b, remain); + if (b->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (status != APR_SUCCESS) { + return status; } } + fit_bucket_into(b, &remain); APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(dest, b); - remain -= b->length; } } return status; } -apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest, +apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length) { apr_bucket *b, *next; apr_off_t remain = length; apr_status_t status = APR_SUCCESS; - - for (b = APR_BRIGADE_FIRST(src); + + for (b = APR_BRIGADE_FIRST(src); b != APR_BRIGADE_SENTINEL(src); b = next) { next = APR_BUCKET_NEXT(b); - + if (APR_BUCKET_IS_METADATA(b)) { /* fall through */ } else { - if (remain == b->length) { - /* fall through */ - } - else if (remain <= 0) { + if (remain <= 0) { return status; } - else { - if (b->length == ((apr_size_t)-1)) { - const char *ign; - apr_size_t ilen; - status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); - if (status != APR_SUCCESS) { - return status; - } - } - - if (remain < b->length) { - apr_bucket_split(b, remain); + if (b->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (status != APR_SUCCESS) { + return status; } } + fit_bucket_into(b, &remain); } status = apr_bucket_copy(b, &b); if (status != APR_SUCCESS) { return status; } APR_BRIGADE_INSERT_TAIL(dest, b); - remain -= b->length; - } - return status; -} - -int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len) -{ - apr_bucket *b, *end; - - apr_status_t status = last_not_included(bb, len, 0, 0, &end); - if (status != APR_SUCCESS) { - return status; - } - - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb) && b != end; - b = APR_BUCKET_NEXT(b)) - { - if (APR_BUCKET_IS_EOS(b)) { - return 1; - } - } - return 0; -} - -apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, - apr_off_t *plen, int *peos) -{ - apr_status_t status; - apr_off_t blen = 0; - - /* test read to determine available length */ - status = apr_brigade_length(bb, 1, &blen); - if (status != APR_SUCCESS) { - return status; - } - else if (blen == 0) { - /* brigade without data, does it have an EOS bucket somwhere? */ - *plen = 0; - *peos = h2_util_has_eos(bb, -1); - } - else { - /* data in the brigade, limit the length returned. Check for EOS - * bucket only if we indicate data. This is required since plen == 0 - * means "the whole brigade" for h2_util_hash_eos() - */ - if (blen < *plen || *plen < 0) { - *plen = blen; - } - *peos = h2_util_has_eos(bb, *plen); - } - return APR_SUCCESS; -} - -apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, - h2_util_pass_cb *cb, void *ctx, - apr_off_t *plen, int *peos) -{ - apr_status_t status = APR_SUCCESS; - int consume = (cb != NULL); - apr_off_t written = 0; - apr_off_t avail = *plen; - apr_bucket *next, *b; - - /* Pass data in our brigade through the callback until the length - * is satisfied or we encounter an EOS. - */ - *peos = 0; - for (b = APR_BRIGADE_FIRST(bb); - (status == APR_SUCCESS) && (b != APR_BRIGADE_SENTINEL(bb)); - b = next) { - - if (APR_BUCKET_IS_METADATA(b)) { - if (APR_BUCKET_IS_EOS(b)) { - *peos = 1; - } - else { - /* ignore */ - } - } - else if (avail <= 0) { - break; - } - else { - const char *data = NULL; - apr_size_t data_len; - - if (b->length == ((apr_size_t)-1)) { - /* read to determine length */ - status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ); - } - else { - data_len = b->length; - } - - if (data_len > avail) { - apr_bucket_split(b, avail); - data_len = (apr_size_t)avail; - } - - if (consume) { - if (!data) { - status = apr_bucket_read(b, &data, &data_len, - APR_NONBLOCK_READ); - } - if (status == APR_SUCCESS) { - status = cb(ctx, data, data_len); - } - } - else { - data_len = b->length; - } - avail -= data_len; - written += data_len; - } - - next = APR_BUCKET_NEXT(b); - if (consume) { - apr_bucket_delete(b); - } - } - - *plen = written; - if (status == APR_SUCCESS && !*peos && !*plen) { - return APR_EAGAIN; } return status; } -apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax, +apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax, apr_bucket *b, const char *sep) { apr_size_t off = 0; if (sep && *sep) { off += apr_snprintf(buffer+off, bmax-off, "%s", sep); } - + if (bmax <= off) { return off; } @@ -1480,30 +1279,30 @@ apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax, off += apr_snprintf(buffer+off, bmax-off, "%s", b->type->name); } else if (bmax > off) { - off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]", - b->type->name, - (long)(b->length == ((apr_size_t)-1)? - -1 : b->length)); + off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]", + b->type->name, + (b->length == ((apr_size_t)-1)? + -1 : (long)b->length)); } return off; } -apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax, - const char *tag, const char *sep, +apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax, + const char *tag, const char *sep, apr_bucket_brigade *bb) { apr_size_t off = 0; const char *sp = ""; apr_bucket *b; - + if (bmax > 1) { if (bb) { memset(buffer, 0, bmax--); off += apr_snprintf(buffer+off, bmax-off, "%s(", tag); - for (b = APR_BRIGADE_FIRST(bb); + for (b = APR_BRIGADE_FIRST(bb); (bmax > off) && (b != APR_BRIGADE_SENTINEL(bb)); b = APR_BUCKET_NEXT(b)) { - + off += h2_util_bucket_print(buffer+off, bmax-off, b, sp); sp = " "; } @@ -1519,20 +1318,21 @@ apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax, } apr_status_t h2_append_brigade(apr_bucket_brigade *to, - apr_bucket_brigade *from, + apr_bucket_brigade *from, apr_off_t *plen, int *peos, h2_bucket_gate *should_append) { apr_bucket *e; - apr_off_t len = 0, remain = *plen; + apr_off_t start, remain; apr_status_t rv; *peos = 0; - + start = remain = *plen; + while (!APR_BRIGADE_EMPTY(from)) { e = APR_BRIGADE_FIRST(from); - + if (!should_append(e)) { goto leave; } @@ -1543,8 +1343,11 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to, continue; } } - else { - if (remain > 0 && e->length == ((apr_size_t)-1)) { + else { + if (remain <= 0) { + goto leave; + } + if (e->length == ((apr_size_t)-1)) { const char *ign; apr_size_t ilen; rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ); @@ -1552,22 +1355,13 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to, return rv; } } - - if (remain < e->length) { - if (remain <= 0) { - goto leave; - } - apr_bucket_split(e, (apr_size_t)remain); - } + fit_bucket_into(e, &remain); } - APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(to, e); - len += e->length; - remain -= e->length; } leave: - *plen = len; + *plen = start - remain; return APR_SUCCESS; } @@ -1595,20 +1389,10 @@ apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb) /******************************************************************************* * h2_ngheader ******************************************************************************/ - -int h2_util_ignore_header(const char *name) -{ - /* never forward, ch. 8.1.2.2 */ - return (H2_HD_MATCH_LIT_CS("connection", name) - || H2_HD_MATCH_LIT_CS("proxy-connection", name) - || H2_HD_MATCH_LIT_CS("upgrade", name) - || H2_HD_MATCH_LIT_CS("keep-alive", name) - || H2_HD_MATCH_LIT_CS("transfer-encoding", name)); -} static int count_header(void *ctx, const char *key, const char *value) { - if (!h2_util_ignore_header(key)) { + if (!h2_util_ignore_resp_header(key)) { (*((size_t*)ctx))++; } return 1; @@ -1629,6 +1413,17 @@ static const char *inv_field_value_chr(const char *token) return (p && *p)? p : NULL; } +static void strip_field_value_ws(nghttp2_nv *nv) +{ + while(nv->valuelen && (nv->value[0] == ' ' || nv->value[0] == '\t')) { + nv->value++; nv->valuelen--; + } + while(nv->valuelen && (nv->value[nv->valuelen-1] == ' ' + || nv->value[nv->valuelen-1] == '\t')) { + nv->valuelen--; + } +} + typedef struct ngh_ctx { apr_pool_t *p; int unsafe; @@ -1644,14 +1439,14 @@ static int add_header(ngh_ctx *ctx, const char *key, const char *value) if (!ctx->unsafe) { if ((p = inv_field_name_chr(key))) { ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p, - "h2_request: head field '%s: %s' has invalid char %s", + "h2_request: head field '%s: %s' has invalid char %s", key, value, p); ctx->status = APR_EINVAL; return 0; } if ((p = inv_field_value_chr(value))) { ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p, - "h2_request: head field '%s: %s' has invalid char %s", + "h2_request: head field '%s: %s' has invalid char %s", key, value, p); ctx->status = APR_EINVAL; return 0; @@ -1661,69 +1456,100 @@ static int add_header(ngh_ctx *ctx, const char *key, const char *value) nv->namelen = strlen(key); nv->value = (uint8_t*)value; nv->valuelen = strlen(value); - + strip_field_value_ws(nv); + return 1; } static int add_table_header(void *ctx, const char *key, const char *value) { - if (!h2_util_ignore_header(key)) { + if (!h2_util_ignore_resp_header(key)) { add_header(ctx, key, value); } return 1; } -static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p, - int unsafe, size_t key_count, +static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p, + int unsafe, size_t key_count, const char *keys[], const char *values[], apr_table_t *headers) { ngh_ctx ctx; size_t n, i; - + ctx.p = p; ctx.unsafe = unsafe; - + n = key_count; apr_table_do(count_header, &n, headers, NULL); - + *ph = ctx.ngh = apr_pcalloc(p, sizeof(h2_ngheader)); if (!ctx.ngh) { return APR_ENOMEM; } - - ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv)); + + ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv)); if (!ctx.ngh->nv) { return APR_ENOMEM; } - + ctx.status = APR_SUCCESS; for (i = 0; i < key_count; ++i) { if (!add_header(&ctx, keys[i], values[i])) { return ctx.status; } } - + apr_table_do(add_table_header, &ctx, headers, NULL); return ctx.status; } +#if AP_HAS_RESPONSE_BUCKETS + +static int is_unsafe(ap_bucket_response *h) +{ + const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL; + return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE)); +} + +apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, + ap_bucket_headers *headers) +{ + return ngheader_create(ph, p, 0, + 0, NULL, NULL, headers->headers); +} + +apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + ap_bucket_response *response) +{ + const char *keys[] = { + ":status" + }; + const char *values[] = { + apr_psprintf(p, "%d", response->status) + }; + return ngheader_create(ph, p, is_unsafe(response), + H2_ALEN(keys), keys, values, response->headers); +} + +#else /* AP_HAS_RESPONSE_BUCKETS */ + static int is_unsafe(h2_headers *h) { - const char *v = apr_table_get(h->notes, H2_HDR_CONFORMANCE); + const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL; return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE)); } -apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, +apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, h2_headers *headers) { - return ngheader_create(ph, p, is_unsafe(headers), + return ngheader_create(ph, p, is_unsafe(headers), 0, NULL, NULL, headers->headers); } - + apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, - h2_headers *headers) + h2_headers *headers) { const char *keys[] = { ":status" @@ -1731,27 +1557,29 @@ apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, const char *values[] = { apr_psprintf(p, "%d", headers->status) }; - return ngheader_create(ph, p, is_unsafe(headers), + return ngheader_create(ph, p, is_unsafe(headers), H2_ALEN(keys), keys, values, headers->headers); } -apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, +#endif /* else AP_HAS_RESPONSE_BUCKETS */ + +apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, const struct h2_request *req) { - + const char *keys[] = { - ":scheme", - ":authority", - ":path", - ":method", + ":scheme", + ":authority", + ":path", + ":method", }; const char *values[] = { req->scheme, - req->authority, - req->path, - req->method, + req->authority, + req->path, + req->method, }; - + ap_assert(req->scheme); ap_assert(req->authority); ap_assert(req->path); @@ -1763,7 +1591,7 @@ apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, /******************************************************************************* * header HTTP/1 <-> HTTP/2 conversions ******************************************************************************/ - + typedef struct { const char *name; @@ -1791,9 +1619,15 @@ static literal IgnoredRequestTrailers[] = { /* Ignore, see rfc7230, ch. 4.1.2 */ H2_DEF_LITERAL("max-forwards"), H2_DEF_LITERAL("cache-control"), H2_DEF_LITERAL("authorization"), - H2_DEF_LITERAL("content-length"), + H2_DEF_LITERAL("content-length"), H2_DEF_LITERAL("proxy-authorization"), -}; +}; +static literal IgnoredResponseHeaders[] = { + H2_DEF_LITERAL("upgrade"), + H2_DEF_LITERAL("connection"), + H2_DEF_LITERAL("keep-alive"), + H2_DEF_LITERAL("transfer-encoding"), +}; static literal IgnoredResponseTrailers[] = { H2_DEF_LITERAL("age"), H2_DEF_LITERAL("date"), @@ -1808,93 +1642,124 @@ static literal IgnoredResponseTrailers[] = { H2_DEF_LITERAL("proxy-authenticate"), }; -static int ignore_header(const literal *lits, size_t llen, - const char *name, size_t nlen) +static int contains_name(const literal *lits, size_t llen, nghttp2_nv *nv) { const literal *lit; size_t i; - + for (i = 0; i < llen; ++i) { lit = &lits[i]; - if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) { + if (lit->len == nv->namelen + && !ap_cstr_casecmp(lit->name, (const char *)nv->name)) { return 1; } } return 0; } -int h2_req_ignore_header(const char *name, size_t len) +int h2_util_ignore_resp_header(const char *name) { - return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len); + nghttp2_nv nv; + + nv.name = (uint8_t*)name; + nv.namelen = strlen(name); + return contains_name(H2_LIT_ARGS(IgnoredResponseHeaders), &nv); } -int h2_req_ignore_trailer(const char *name, size_t len) + +static int h2_req_ignore_header(nghttp2_nv *nv) { - return (h2_req_ignore_header(name, len) - || ignore_header(H2_LIT_ARGS(IgnoredRequestTrailers), name, len)); + return contains_name(H2_LIT_ARGS(IgnoredRequestHeaders), nv); } -int h2_res_ignore_trailer(const char *name, size_t len) +int h2_ignore_req_trailer(const char *name, size_t len) { - return ignore_header(H2_LIT_ARGS(IgnoredResponseTrailers), name, len); + nghttp2_nv nv; + + nv.name = (uint8_t*)name; + nv.namelen = strlen(name); + return (h2_req_ignore_header(&nv) + || contains_name(H2_LIT_ARGS(IgnoredRequestTrailers), &nv)); } -apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, - const char *name, size_t nlen, - const char *value, size_t vlen) +int h2_ignore_resp_trailer(const char *name, size_t len) +{ + nghttp2_nv nv; + + nv.name = (uint8_t*)name; + nv.namelen = strlen(name); + return (contains_name(H2_LIT_ARGS(IgnoredResponseHeaders), &nv) + || contains_name(H2_LIT_ARGS(IgnoredResponseTrailers), &nv)); +} + +static apr_status_t req_add_header(apr_table_t *headers, apr_pool_t *pool, + nghttp2_nv *nv, size_t max_field_len, + int *pwas_added) { char *hname, *hvalue; - - if (h2_req_ignore_header(name, nlen)) { + const char *existing; + + *pwas_added = 0; + strip_field_value_ws(nv); + + if (h2_req_ignore_header(nv)) { return APR_SUCCESS; } - else if (H2_HD_MATCH_LIT("cookie", name, nlen)) { - const char *existing = apr_table_get(headers, "cookie"); + else if (nv->namelen == sizeof("cookie")-1 + && !ap_cstr_casecmp("cookie", (const char *)nv->name)) { + existing = apr_table_get(headers, "cookie"); if (existing) { - char *nval; - /* Cookie header come separately in HTTP/2, but need * to be merged by "; " (instead of default ", ") */ - hvalue = apr_pstrndup(pool, value, vlen); - nval = apr_psprintf(pool, "%s; %s", existing, hvalue); - apr_table_setn(headers, "Cookie", nval); + if (max_field_len + && strlen(existing) + nv->valuelen + nv->namelen + 4 + > max_field_len) { + /* "key: oldval, nval" is too long */ + return APR_EINVAL; + } + hvalue = apr_pstrndup(pool, (const char*)nv->value, nv->valuelen); + apr_table_setn(headers, "Cookie", + apr_psprintf(pool, "%s; %s", existing, hvalue)); return APR_SUCCESS; } } - else if (H2_HD_MATCH_LIT("host", name, nlen)) { + else if (nv->namelen == sizeof("host")-1 + && !ap_cstr_casecmp("host", (const char *)nv->name)) { if (apr_table_get(headers, "Host")) { return APR_SUCCESS; /* ignore duplicate */ } } - - hname = apr_pstrndup(pool, name, nlen); - hvalue = apr_pstrndup(pool, value, vlen); - h2_util_camel_case_header(hname, nlen); + + hname = apr_pstrndup(pool, (const char*)nv->name, nv->namelen); + h2_util_camel_case_header(hname, nv->namelen); + existing = apr_table_get(headers, hname); + if (max_field_len) { + if ((existing? strlen(existing)+2 : 0) + nv->valuelen + nv->namelen + 2 + > max_field_len) { + /* "key: (oldval, )?nval" is too long */ + return APR_EINVAL; + } + } + if (!existing) *pwas_added = 1; + hvalue = apr_pstrndup(pool, (const char*)nv->value, nv->valuelen); apr_table_mergen(headers, hname, hvalue); - + return APR_SUCCESS; } -/******************************************************************************* - * h2 request handling - ******************************************************************************/ - -h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method, - const char *scheme, const char *authority, - const char *path, apr_table_t *header, int serialize) +apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen, + size_t max_field_len, int *pwas_added) { - h2_request *req = apr_pcalloc(pool, sizeof(h2_request)); - - req->method = method; - req->scheme = scheme; - req->authority = authority; - req->path = path; - req->headers = header? header : apr_table_make(pool, 10); - req->request_time = apr_time_now(); - req->serialize = serialize; - - return req; + nghttp2_nv nv; + + nv.name = (uint8_t*)name; + nv.namelen = nlen; + nv.value = (uint8_t*)value; + nv.valuelen = vlen; + return req_add_header(headers, pool, &nv, max_field_len, pwas_added); } /******************************************************************************* @@ -1905,7 +1770,7 @@ int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen) { char scratch[128]; size_t s_len = sizeof(scratch)/sizeof(scratch[0]); - + switch (frame->hd.type) { case NGHTTP2_DATA: { return apr_snprintf(buffer, maxlen, @@ -1960,16 +1825,17 @@ int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen) case NGHTTP2_GOAWAY: { size_t len = (frame->goaway.opaque_data_len < s_len)? frame->goaway.opaque_data_len : s_len-1; - memcpy(scratch, frame->goaway.opaque_data, len); + if (len) + memcpy(scratch, frame->goaway.opaque_data, len); scratch[len] = '\0'; return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', " - "last_stream=%d]", frame->goaway.error_code, + "last_stream=%d]", frame->goaway.error_code, scratch, frame->goaway.last_stream_id); } case NGHTTP2_WINDOW_UPDATE: { return apr_snprintf(buffer, maxlen, "WINDOW_UPDATE[stream=%d, incr=%d]", - frame->hd.stream_id, + frame->hd.stream_id, frame->window_update.window_size_increment); } default: @@ -2013,3 +1879,60 @@ int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabl return policy; } +void h2_util_drain_pipe(apr_file_t *pipe) +{ + char rb[512]; + apr_size_t nr = sizeof(rb); + apr_interval_time_t timeout; + apr_status_t trv; + + /* Make the pipe non-blocking if we can */ + trv = apr_file_pipe_timeout_get(pipe, &timeout); + if (trv == APR_SUCCESS) + apr_file_pipe_timeout_set(pipe, 0); + + while (apr_file_read(pipe, rb, &nr) == APR_SUCCESS) { + /* Although we write just one byte to the other end of the pipe + * during wakeup, multiple threads could call the wakeup. + * So simply drain out from the input side of the pipe all + * the data. + */ + if (nr != sizeof(rb)) + break; + } + if (trv == APR_SUCCESS) + apr_file_pipe_timeout_set(pipe, timeout); +} + +apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe) +{ + char rb[512]; + apr_size_t nr = sizeof(rb); + + return apr_file_read(pipe, rb, &nr); +} + +#if AP_HAS_RESPONSE_BUCKETS + +static int add_header_lengths(void *ctx, const char *name, const char *value) +{ + apr_size_t *plen = ctx; + *plen += strlen(name) + strlen(value); + return 1; +} + +apr_size_t headers_length_estimate(ap_bucket_headers *hdrs) +{ + apr_size_t len = 0; + apr_table_do(add_header_lengths, &len, hdrs->headers, NULL); + return len; +} + +apr_size_t response_length_estimate(ap_bucket_response *resp) +{ + apr_size_t len = 3 + 1 + 8 + (resp->reason? strlen(resp->reason) : 10); + apr_table_do(add_header_lengths, &len, resp->headers, NULL); + return len; +} + +#endif /* AP_HAS_RESPONSE_BUCKETS */ diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h index 1eb262d..d2e6548 100644 --- a/modules/http2/h2_util.h +++ b/modules/http2/h2_util.h @@ -18,6 +18,10 @@ #define __mod_h2__h2_util__ #include +#include + +#include "h2.h" +#include "h2_headers.h" /******************************************************************************* * some debugging/format helpers @@ -28,10 +32,6 @@ struct nghttp2_frame; size_t h2_util_hex_dump(char *buffer, size_t maxlen, const char *data, size_t datalen); -size_t h2_util_header_print(char *buffer, size_t maxlen, - const char *name, size_t namelen, - const char *value, size_t valuelen); - void h2_util_camel_case_header(char *s, size_t len); int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); @@ -49,7 +49,7 @@ typedef int h2_ihash_iter_t(void *ctx, void *val); */ h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int); -size_t h2_ihash_count(h2_ihash_t *ih); +unsigned int h2_ihash_count(h2_ihash_t *ih); int h2_ihash_empty(h2_ihash_t *ih); void *h2_ihash_get(h2_ihash_t *ih, int id); @@ -96,13 +96,13 @@ typedef int h2_iq_cmp(int i1, int i2, void *ctx); /** * Allocate a new queue from the pool and initialize. - * @param id the identifier of the queue * @param pool the memory pool + * @param capacity the initial capacity of the queue */ h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity); /** - * Return != 0 iff there are no tasks in the queue. + * Return != 0 iff there are no ints in the queue. * @param q the queue to check */ int h2_iq_empty(h2_iqueue *q); @@ -134,11 +134,10 @@ int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx); int h2_iq_append(h2_iqueue *q, int sid); /** - * Remove the stream id from the queue. Return != 0 iff task - * was found in queue. - * @param q the task queue + * Remove the int from the queue. Return != 0 iff it was found. + * @param q the queue * @param sid the stream id to remove - * @return != 0 iff task was found in queue + * @return != 0 iff int was found in queue */ int h2_iq_remove(h2_iqueue *q, int sid); @@ -148,7 +147,7 @@ int h2_iq_remove(h2_iqueue *q, int sid); void h2_iq_clear(h2_iqueue *q); /** - * Sort the stream idqueue again. Call if the task ordering + * Sort the stream idqueue again. Call if the int ordering * has changed. * * @param q the queue to sort @@ -169,7 +168,7 @@ int h2_iq_shift(h2_iqueue *q); /** * Get the first max ids from the queue. All these ids will be removed. * - * @param q the queue to get the first task from + * @param q the queue to get the first ids from * @param pint the int array to receive the values * @param max the maximum number of ids to shift * @return the actual number of ids shifted @@ -179,7 +178,7 @@ size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max); /** * Determine if int is in the queue already * - * @parm q the queue + * @param q the queue * @param sid the integer id to check for * @return != 0 iff sid is already in the queue */ @@ -209,7 +208,6 @@ apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity); apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity); apr_status_t h2_fifo_term(h2_fifo *fifo); -apr_status_t h2_fifo_interrupt(h2_fifo *fifo); int h2_fifo_count(h2_fifo *fifo); @@ -229,7 +227,7 @@ apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem); typedef enum { H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */ - H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */ + H2_FIFO_OP_REPUSH, /* pull and immediately re-push it */ } h2_fifo_op_t; typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx); @@ -280,7 +278,6 @@ apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity); apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity); apr_status_t h2_ififo_term(h2_ififo *fifo); -apr_status_t h2_ififo_interrupt(h2_ififo *fifo); int h2_ififo_count(h2_ififo *fifo); @@ -345,9 +342,8 @@ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra); /******************************************************************************* * HTTP/2 header helpers ******************************************************************************/ -int h2_req_ignore_header(const char *name, size_t len); -int h2_req_ignore_trailer(const char *name, size_t len); -int h2_res_ignore_trailer(const char *name, size_t len); +int h2_ignore_req_trailer(const char *name, size_t len); +int h2_ignore_resp_trailer(const char *name, size_t len); /** * Set the push policy for the given request. Takes request headers into @@ -378,52 +374,37 @@ const char *h2_util_base64url_encode(const char *data, * nghttp2 helpers ******************************************************************************/ -#define H2_HD_MATCH_LIT_CS(l, name) \ - ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) - -#define H2_CREATE_NV_LIT_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \ - nv->namelen = sizeof(NAME) - 1; \ - nv->value = (uint8_t *)VALUE; \ - nv->valuelen = strlen(VALUE) - -#define H2_CREATE_NV_CS_LIT(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \ - nv->namelen = strlen(NAME); \ - nv->value = (uint8_t *)VALUE; \ - nv->valuelen = sizeof(VALUE) - 1 - -#define H2_CREATE_NV_CS_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \ - nv->namelen = strlen(NAME); \ - nv->value = (uint8_t *)VALUE; \ - nv->valuelen = strlen(VALUE) - -int h2_util_ignore_header(const char *name); - -struct h2_headers; +int h2_util_ignore_resp_header(const char *name); typedef struct h2_ngheader { nghttp2_nv *nv; apr_size_t nvlen; } h2_ngheader; +#if AP_HAS_RESPONSE_BUCKETS +apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, + ap_bucket_headers *headers); +apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + ap_bucket_response *response); +apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + const struct h2_request *req); +#else apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, struct h2_headers *headers); apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, struct h2_headers *headers); apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, const struct h2_request *req); +#endif +/** + * Add a HTTP/2 header and return the table key if it really was added + * and not ignored. + */ apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, const char *name, size_t nlen, - const char *value, size_t vlen); - -/******************************************************************************* - * h2_request helpers - ******************************************************************************/ - -struct h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method, - const char *scheme, const char *authority, - const char *path, apr_table_t *header, - int serialize); + const char *value, size_t vlen, + size_t max_field_len, int *pwas_added); /******************************************************************************* * apr brigade helpers @@ -445,42 +426,9 @@ apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length); -/** - * Return != 0 iff there is a FLUSH or EOS bucket in the brigade. - * @param bb the brigade to check on - * @return != 0 iff brigade holds FLUSH or EOS bucket (or both) - */ -int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len); - -/** - * Check how many bytes of the desired amount are available and if the - * end of stream is reached by that amount. - * @param bb the brigade to check - * @param plen the desired length and, on return, the available length - * @param on return, if eos has been reached - */ -apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, - apr_off_t *plen, int *peos); - -typedef apr_status_t h2_util_pass_cb(void *ctx, +typedef apr_status_t h2_util_pass_cb(void *ctx, const char *data, apr_off_t len); -/** - * Read at most *plen bytes from the brigade and pass them into the - * given callback. If cb is NULL, just return the amount of data that - * could have been read. - * If an EOS was/would be encountered, set *peos != 0. - * @param bb the brigade to read from - * @param cb the callback to invoke for the read data - * @param ctx optional data passed to callback - * @param plen inout, as input gives the maximum number of bytes to read, - * on return specifies the actual/would be number of bytes - * @param peos != 0 iff an EOS bucket was/would be encountered. - */ -apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, - h2_util_pass_cb *cb, void *ctx, - apr_off_t *plen, int *peos); - /** * Print a bucket's meta data (type and length) to the buffer. * @return number of characters printed @@ -506,14 +454,16 @@ apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax, * @param bb the brigade to log */ #define h2_util_bb_log(c, sid, level, tag, bb) \ -do { \ - char buffer[4 * 1024]; \ - const char *line = "(null)"; \ - apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \ - len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \ - ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \ - ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \ -} while(0) +if (APLOG_C_IS_LEVEL(c, level)) { \ + do { \ + char buffer[4 * 1024]; \ + const char *line = "(null)"; \ + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \ + len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \ + ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \ + ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \ + } while(0); \ +} typedef int h2_bucket_gate(apr_bucket *b); @@ -541,4 +491,29 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to, */ apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb); +/** + * Drain a pipe used for notification. + */ +void h2_util_drain_pipe(apr_file_t *pipe); + +/** + * Wait on data arriving on a pipe. + */ +apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe); + + +#if AP_HAS_RESPONSE_BUCKETS +/** + * Give an estimate of the length of the header fields, + * without compression or other formatting decorations. + */ +apr_size_t headers_length_estimate(ap_bucket_headers *hdrs); + +/** + * Give an estimate of the length of the response meta data size, + * without compression or other formatting decorations. + */ +apr_size_t response_length_estimate(ap_bucket_response *resp); +#endif /* AP_HAS_RESPONSE_BUCKETS */ + #endif /* defined(__mod_h2__h2_util__) */ diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h index 7079437..7e7da21 100644 --- a/modules/http2/h2_version.h +++ b/modules/http2/h2_version.h @@ -27,7 +27,7 @@ * @macro * Version number of the http2 module as c string */ -#define MOD_HTTP2_VERSION "1.11.4" +#define MOD_HTTP2_VERSION "2.0.22" /** * @macro @@ -35,7 +35,7 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define MOD_HTTP2_VERSION_NUM 0x010b04 +#define MOD_HTTP2_VERSION_NUM 0x020016 #endif /* mod_h2_h2_version_h */ diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c index 699f533..e7e2039 100644 --- a/modules/http2/h2_workers.c +++ b/modules/http2/h2_workers.c @@ -15,285 +15,440 @@ */ #include -#include +#include #include #include #include #include +#include #include #include +#include #include "h2.h" #include "h2_private.h" #include "h2_mplx.h" -#include "h2_task.h" +#include "h2_c2.h" #include "h2_workers.h" #include "h2_util.h" +typedef enum { + PROD_IDLE, + PROD_ACTIVE, + PROD_JOINED, +} prod_state_t; + +struct ap_conn_producer_t { + APR_RING_ENTRY(ap_conn_producer_t) link; + const char *name; + void *baton; + ap_conn_producer_next *fn_next; + ap_conn_producer_done *fn_done; + ap_conn_producer_shutdown *fn_shutdown; + volatile prod_state_t state; + volatile int conns_active; +}; + + +typedef enum { + H2_SLOT_FREE, + H2_SLOT_RUN, + H2_SLOT_ZOMBIE, +} h2_slot_state_t; + typedef struct h2_slot h2_slot; struct h2_slot { - int id; - h2_slot *next; + APR_RING_ENTRY(h2_slot) link; + apr_uint32_t id; + apr_pool_t *pool; + h2_slot_state_t state; + volatile int should_shutdown; + volatile int is_idle; h2_workers *workers; - int aborted; - int sticks; - h2_task *task; + ap_conn_producer_t *prod; apr_thread_t *thread; - apr_thread_mutex_t *lock; - apr_thread_cond_t *not_idle; + struct apr_thread_cond_t *more_work; + int activations; }; -static h2_slot *pop_slot(h2_slot **phead) -{ - /* Atomically pop a slot from the list */ - for (;;) { - h2_slot *first = *phead; - if (first == NULL) { - return NULL; - } - if (apr_atomic_casptr((void*)phead, first->next, first) == first) { - first->next = NULL; - return first; - } - } -} +struct h2_workers { + server_rec *s; + apr_pool_t *pool; + + apr_uint32_t max_slots; + apr_uint32_t min_active; + volatile apr_time_t idle_limit; + volatile int aborted; + volatile int shutdown; + int dynamic; + + volatile apr_uint32_t active_slots; + volatile apr_uint32_t idle_slots; + + apr_threadattr_t *thread_attr; + h2_slot *slots; + + APR_RING_HEAD(h2_slots_free, h2_slot) free; + APR_RING_HEAD(h2_slots_idle, h2_slot) idle; + APR_RING_HEAD(h2_slots_busy, h2_slot) busy; + APR_RING_HEAD(h2_slots_zombie, h2_slot) zombie; + + APR_RING_HEAD(ap_conn_producer_active, ap_conn_producer_t) prod_active; + APR_RING_HEAD(ap_conn_producer_idle, ap_conn_producer_t) prod_idle; + + struct apr_thread_mutex_t *lock; + struct apr_thread_cond_t *prod_done; + struct apr_thread_cond_t *all_done; +}; -static void push_slot(h2_slot **phead, h2_slot *slot) -{ - /* Atomically push a slot to the list */ - ap_assert(!slot->next); - for (;;) { - h2_slot *next = slot->next = *phead; - if (apr_atomic_casptr((void*)phead, slot, next) == next) { - return; - } - } -} static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx); -static apr_status_t activate_slot(h2_workers *workers, h2_slot *slot) +static apr_status_t activate_slot(h2_workers *workers) { - apr_status_t status; - - slot->workers = workers; - slot->aborted = 0; - slot->task = NULL; - - if (!slot->lock) { - status = apr_thread_mutex_create(&slot->lock, - APR_THREAD_MUTEX_DEFAULT, - workers->pool); - if (status != APR_SUCCESS) { - push_slot(&workers->free, slot); - return status; - } - } + h2_slot *slot; + apr_pool_t *pool; + apr_status_t rv; - if (!slot->not_idle) { - status = apr_thread_cond_create(&slot->not_idle, workers->pool); - if (status != APR_SUCCESS) { - push_slot(&workers->free, slot); - return status; - } + if (APR_RING_EMPTY(&workers->free, h2_slot, link)) { + return APR_EAGAIN; } - - ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s, - "h2_workers: new thread for slot %d", slot->id); - /* thread will either immediately start work or add itself - * to the idle queue */ - apr_thread_create(&slot->thread, workers->thread_attr, slot_run, slot, - workers->pool); - if (!slot->thread) { - push_slot(&workers->free, slot); - return APR_ENOMEM; - } - - apr_atomic_inc32(&workers->worker_count); - return APR_SUCCESS; -} + slot = APR_RING_FIRST(&workers->free); + ap_assert(slot->state == H2_SLOT_FREE); + APR_RING_REMOVE(slot, link); -static apr_status_t add_worker(h2_workers *workers) -{ - h2_slot *slot = pop_slot(&workers->free); - if (slot) { - return activate_slot(workers, slot); - } - return APR_EAGAIN; -} + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, + "h2_workers: activate slot %d", slot->id); -static void wake_idle_worker(h2_workers *workers) -{ - h2_slot *slot = pop_slot(&workers->idle); - if (slot) { - apr_thread_mutex_lock(slot->lock); - apr_thread_cond_signal(slot->not_idle); - apr_thread_mutex_unlock(slot->lock); - } - else if (workers->dynamic) { - add_worker(workers); + slot->state = H2_SLOT_RUN; + slot->should_shutdown = 0; + slot->is_idle = 0; + slot->pool = NULL; + ++workers->active_slots; + rv = apr_pool_create(&pool, workers->pool); + if (APR_SUCCESS != rv) goto cleanup; + apr_pool_tag(pool, "h2_worker_slot"); + slot->pool = pool; + + rv = ap_thread_create(&slot->thread, workers->thread_attr, + slot_run, slot, slot->pool); + +cleanup: + if (rv != APR_SUCCESS) { + AP_DEBUG_ASSERT(0); + slot->state = H2_SLOT_FREE; + if (slot->pool) { + apr_pool_destroy(slot->pool); + slot->pool = NULL; + } + APR_RING_INSERT_TAIL(&workers->free, slot, h2_slot, link); + --workers->active_slots; } + return rv; } -static void cleanup_zombies(h2_workers *workers) +static void join_zombies(h2_workers *workers) { h2_slot *slot; - while ((slot = pop_slot(&workers->zombies))) { - if (slot->thread) { - apr_status_t status; - apr_thread_join(&status, slot->thread); - slot->thread = NULL; + apr_status_t status; + + while (!APR_RING_EMPTY(&workers->zombie, h2_slot, link)) { + slot = APR_RING_FIRST(&workers->zombie); + APR_RING_REMOVE(slot, link); + ap_assert(slot->state == H2_SLOT_ZOMBIE); + ap_assert(slot->thread != NULL); + + apr_thread_mutex_unlock(workers->lock); + apr_thread_join(&status, slot->thread); + apr_thread_mutex_lock(workers->lock); + + slot->thread = NULL; + slot->state = H2_SLOT_FREE; + if (slot->pool) { + apr_pool_destroy(slot->pool); + slot->pool = NULL; } - apr_atomic_dec32(&workers->worker_count); - slot->next = NULL; - push_slot(&workers->free, slot); + APR_RING_INSERT_TAIL(&workers->free, slot, h2_slot, link); } } -static apr_status_t slot_pull_task(h2_slot *slot, h2_mplx *m) +static void wake_idle_worker(h2_workers *workers, ap_conn_producer_t *prod) { - apr_status_t rv; - - rv = h2_mplx_pop_task(m, &slot->task); - if (slot->task) { - /* Ok, we got something to give back to the worker for execution. - * If we still have idle workers, we let the worker be sticky, - * e.g. making it poll the task's h2_mplx instance for more work - * before asking back here. */ - slot->sticks = slot->workers->max_workers; - return rv; + if (!APR_RING_EMPTY(&workers->idle, h2_slot, link)) { + h2_slot *slot; + for (slot = APR_RING_FIRST(&workers->idle); + slot != APR_RING_SENTINEL(&workers->idle, h2_slot, link); + slot = APR_RING_NEXT(slot, link)) { + if (slot->is_idle && !slot->should_shutdown) { + apr_thread_cond_signal(slot->more_work); + slot->is_idle = 0; + return; + } + } + } + if (workers->dynamic && !workers->shutdown + && (workers->active_slots < workers->max_slots)) { + activate_slot(workers); } - slot->sticks = 0; - return APR_EOF; -} - -static h2_fifo_op_t mplx_peek(void *head, void *ctx) -{ - h2_mplx *m = head; - h2_slot *slot = ctx; - - if (slot_pull_task(slot, m) == APR_EAGAIN) { - wake_idle_worker(slot->workers); - return H2_FIFO_OP_REPUSH; - } - return H2_FIFO_OP_PULL; } /** - * Get the next task for the given worker. Will block until a task arrives - * or the max_wait timer expires and more than min workers exist. + * Get the next connection to work on. */ -static apr_status_t get_next(h2_slot *slot) +static conn_rec *get_next(h2_slot *slot) { h2_workers *workers = slot->workers; - apr_status_t status; - - slot->task = NULL; - while (!slot->aborted) { - if (!slot->task) { - status = h2_fifo_try_peek(workers->mplxs, mplx_peek, slot); - if (status == APR_EOF) { - return status; - } + conn_rec *c = NULL; + ap_conn_producer_t *prod; + int has_more; + + slot->prod = NULL; + if (!APR_RING_EMPTY(&workers->prod_active, ap_conn_producer_t, link)) { + slot->prod = prod = APR_RING_FIRST(&workers->prod_active); + APR_RING_REMOVE(prod, link); + AP_DEBUG_ASSERT(PROD_ACTIVE == prod->state); + + c = prod->fn_next(prod->baton, &has_more); + if (c && has_more) { + APR_RING_INSERT_TAIL(&workers->prod_active, prod, ap_conn_producer_t, link); + wake_idle_worker(workers, slot->prod); } - - if (slot->task) { - return APR_SUCCESS; + else { + prod->state = PROD_IDLE; + APR_RING_INSERT_TAIL(&workers->prod_idle, prod, ap_conn_producer_t, link); + } + if (c) { + ++prod->conns_active; } - - cleanup_zombies(workers); - - apr_thread_mutex_lock(slot->lock); - push_slot(&workers->idle, slot); - apr_thread_cond_wait(slot->not_idle, slot->lock); - apr_thread_mutex_unlock(slot->lock); } - return APR_EOF; -} -static void slot_done(h2_slot *slot) -{ - push_slot(&(slot->workers->zombies), slot); + return c; } - static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx) { h2_slot *slot = wctx; - - while (!slot->aborted) { - - /* Get a h2_task from the mplxs queue. */ - get_next(slot); - while (slot->task) { - - h2_task_do(slot->task, thread, slot->id); - - /* Report the task as done. If stickyness is left, offer the - * mplx the opportunity to give us back a new task right away. - */ - if (!slot->aborted && (--slot->sticks > 0)) { - h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task); - } - else { - h2_mplx_task_done(slot->task->mplx, slot->task, NULL); - slot->task = NULL; + h2_workers *workers = slot->workers; + conn_rec *c; + apr_status_t rv; + + apr_thread_mutex_lock(workers->lock); + slot->state = H2_SLOT_RUN; + ++slot->activations; + APR_RING_ELEM_INIT(slot, link); + for(;;) { + if (APR_RING_NEXT(slot, link) != slot) { + /* slot is part of the idle ring from the last loop */ + APR_RING_REMOVE(slot, link); + --workers->idle_slots; + } + slot->is_idle = 0; + + if (!workers->aborted && !slot->should_shutdown) { + APR_RING_INSERT_TAIL(&workers->busy, slot, h2_slot, link); + do { + c = get_next(slot); + if (!c) { + break; + } + apr_thread_mutex_unlock(workers->lock); + /* See the discussion at + * + * Each conn_rec->id is supposed to be unique at a point in time. Since + * some modules (and maybe external code) uses this id as an identifier + * for the request_rec they handle, it needs to be unique for secondary + * connections also. + * + * The MPM module assigns the connection ids and mod_unique_id is using + * that one to generate identifier for requests. While the implementation + * works for HTTP/1.x, the parallel execution of several requests per + * connection will generate duplicate identifiers on load. + * + * The original implementation for secondary connection identifiers used + * to shift the master connection id up and assign the stream id to the + * lower bits. This was cramped on 32 bit systems, but on 64bit there was + * enough space. + * + * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the + * connection id, even on 64bit systems. Therefore collisions in request ids. + * + * The way master connection ids are generated, there is some space "at the + * top" of the lower 32 bits on allmost all systems. If you have a setup + * with 64k threads per child and 255 child processes, you live on the edge. + * + * The new implementation shifts 8 bits and XORs in the worker + * id. This will experience collisions with > 256 h2 workers and heavy + * load still. There seems to be no way to solve this in all possible + * configurations by mod_h2 alone. + */ + if (c->master) { + c->id = (c->master->id << 8)^slot->id; + } + c->current_thread = thread; + AP_DEBUG_ASSERT(slot->prod); + +#if AP_HAS_RESPONSE_BUCKETS + ap_process_connection(c, ap_get_conn_socket(c)); +#else + h2_c2_process(c, thread, slot->id); +#endif + slot->prod->fn_done(slot->prod->baton, c); + + apr_thread_mutex_lock(workers->lock); + if (--slot->prod->conns_active <= 0) { + apr_thread_cond_broadcast(workers->prod_done); + } + if (slot->prod->state == PROD_IDLE) { + APR_RING_REMOVE(slot->prod, link); + slot->prod->state = PROD_ACTIVE; + APR_RING_INSERT_TAIL(&workers->prod_active, slot->prod, ap_conn_producer_t, link); + } + + } while (!workers->aborted && !slot->should_shutdown); + APR_RING_REMOVE(slot, link); /* no longer busy */ + } + + if (workers->aborted || slot->should_shutdown) { + break; + } + + join_zombies(workers); + + /* we are idle */ + APR_RING_INSERT_TAIL(&workers->idle, slot, h2_slot, link); + ++workers->idle_slots; + slot->is_idle = 1; + if (slot->id >= workers->min_active && workers->idle_limit > 0) { + rv = apr_thread_cond_timedwait(slot->more_work, workers->lock, + workers->idle_limit); + if (APR_TIMEUP == rv) { + APR_RING_REMOVE(slot, link); + --workers->idle_slots; + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s, + "h2_workers: idle timeout slot %d in state %d (%d activations)", + slot->id, slot->state, slot->activations); + break; } } + else { + apr_thread_cond_wait(slot->more_work, workers->lock); + } + } + + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, + "h2_workers: terminate slot %d in state %d (%d activations)", + slot->id, slot->state, slot->activations); + slot->is_idle = 0; + slot->state = H2_SLOT_ZOMBIE; + slot->should_shutdown = 0; + APR_RING_INSERT_TAIL(&workers->zombie, slot, h2_slot, link); + --workers->active_slots; + if (workers->active_slots <= 0) { + apr_thread_cond_broadcast(workers->all_done); } + apr_thread_mutex_unlock(workers->lock); - slot_done(slot); + apr_thread_exit(thread, APR_SUCCESS); return NULL; } +static void wake_all_idles(h2_workers *workers) +{ + h2_slot *slot; + for (slot = APR_RING_FIRST(&workers->idle); + slot != APR_RING_SENTINEL(&workers->idle, h2_slot, link); + slot = APR_RING_NEXT(slot, link)) + { + apr_thread_cond_signal(slot->more_work); + } +} + static apr_status_t workers_pool_cleanup(void *data) { h2_workers *workers = data; - h2_slot *slot; - - if (!workers->aborted) { - workers->aborted = 1; - /* abort all idle slots */ - for (;;) { - slot = pop_slot(&workers->idle); - if (slot) { - apr_thread_mutex_lock(slot->lock); - slot->aborted = 1; - apr_thread_cond_signal(slot->not_idle); - apr_thread_mutex_unlock(slot->lock); - } - else { - break; - } - } + apr_time_t end, timeout = apr_time_from_sec(1); + apr_status_t rv; + int n = 0, wait_sec = 5; - h2_fifo_term(workers->mplxs); - h2_fifo_interrupt(workers->mplxs); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s, + "h2_workers: cleanup %d workers (%d idle)", + workers->active_slots, workers->idle_slots); + apr_thread_mutex_lock(workers->lock); + workers->shutdown = 1; + workers->aborted = 1; + wake_all_idles(workers); + apr_thread_mutex_unlock(workers->lock); + + /* wait for all the workers to become zombies and join them. + * this gets called after the mpm shuts down and all connections + * have either been handled (graceful) or we are forced exiting + * (ungrateful). Either way, we show limited patience. */ + end = apr_time_now() + apr_time_from_sec(wait_sec); + while (apr_time_now() < end) { + apr_thread_mutex_lock(workers->lock); + if (!(n = workers->active_slots)) { + apr_thread_mutex_unlock(workers->lock); + break; + } + wake_all_idles(workers); + rv = apr_thread_cond_timedwait(workers->all_done, workers->lock, timeout); + apr_thread_mutex_unlock(workers->lock); - cleanup_zombies(workers); + if (APR_TIMEUP == rv) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s, + APLOGNO(10290) "h2_workers: waiting for workers to close, " + "still seeing %d workers (%d idle) living", + workers->active_slots, workers->idle_slots); + } + } + if (n) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, workers->s, + APLOGNO(10291) "h2_workers: cleanup, %d workers (%d idle) " + "did not exit after %d seconds.", + n, workers->idle_slots, wait_sec); } + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s, + "h2_workers: cleanup all workers terminated"); + apr_thread_mutex_lock(workers->lock); + join_zombies(workers); + apr_thread_mutex_unlock(workers->lock); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, workers->s, + "h2_workers: cleanup zombie workers joined"); + return APR_SUCCESS; } -h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, - int min_workers, int max_workers, - int idle_secs) +h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild, + int max_slots, int min_active, + apr_time_t idle_limit) { - apr_status_t status; + apr_status_t rv; h2_workers *workers; apr_pool_t *pool; - int i, n; + apr_allocator_t *allocator; + int locked = 0; + apr_uint32_t i; ap_assert(s); - ap_assert(server_pool); + ap_assert(pchild); + ap_assert(idle_limit > 0); /* let's have our own pool that will be parent to all h2_worker * instances we create. This happens in various threads, but always * guarded by our lock. Without this pool, all subpool creations would * happen on the pool handed to us, which we do not guard. */ - apr_pool_create(&pool, server_pool); + rv = apr_allocator_create(&allocator); + if (rv != APR_SUCCESS) { + goto cleanup; + } + rv = apr_pool_create_ex(&pool, pchild, NULL, allocator); + if (rv != APR_SUCCESS) { + apr_allocator_destroy(allocator); + goto cleanup; + } + apr_allocator_owner_set(allocator, pool); apr_pool_tag(pool, "h2_workers"); workers = apr_pcalloc(pool, sizeof(h2_workers)); if (!workers) { @@ -302,31 +457,27 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, workers->s = s; workers->pool = pool; - workers->min_workers = min_workers; - workers->max_workers = max_workers; - workers->max_idle_secs = (idle_secs > 0)? idle_secs : 10; - - /* FIXME: the fifo set we use here has limited capacity. Once the - * set is full, connections with new requests do a wait. Unfortunately, - * we have optimizations in place there that makes such waiting "unfair" - * in the sense that it may take connections a looong time to get scheduled. - * - * Need to rewrite this to use one of our double-linked lists and a mutex - * to have unlimited capacity and fair scheduling. - * - * For now, we just make enough room to have many connections inside one - * process. - */ - status = h2_fifo_set_create(&workers->mplxs, pool, 8 * 1024); - if (status != APR_SUCCESS) { - return NULL; - } - - status = apr_threadattr_create(&workers->thread_attr, workers->pool); - if (status != APR_SUCCESS) { - return NULL; - } - + workers->min_active = min_active; + workers->max_slots = max_slots; + workers->idle_limit = idle_limit; + workers->dynamic = (workers->min_active < workers->max_slots); + + ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, + "h2_workers: created with min=%d max=%d idle_ms=%d", + workers->min_active, workers->max_slots, + (int)apr_time_as_msec(idle_limit)); + + APR_RING_INIT(&workers->idle, h2_slot, link); + APR_RING_INIT(&workers->busy, h2_slot, link); + APR_RING_INIT(&workers->free, h2_slot, link); + APR_RING_INIT(&workers->zombie, h2_slot, link); + + APR_RING_INIT(&workers->prod_active, ap_conn_producer_t, link); + APR_RING_INIT(&workers->prod_idle, ap_conn_producer_t, link); + + rv = apr_threadattr_create(&workers->thread_attr, workers->pool); + if (rv != APR_SUCCESS) goto cleanup; + if (ap_thread_stacksize != 0) { apr_threadattr_stacksize_set(workers->thread_attr, ap_thread_stacksize); @@ -335,49 +486,141 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, (long)ap_thread_stacksize); } - status = apr_thread_mutex_create(&workers->lock, - APR_THREAD_MUTEX_DEFAULT, - workers->pool); - if (status == APR_SUCCESS) { - n = workers->nslots = workers->max_workers; - workers->slots = apr_pcalloc(workers->pool, n * sizeof(h2_slot)); - if (workers->slots == NULL) { - workers->nslots = 0; - status = APR_ENOMEM; - } - for (i = 0; i < n; ++i) { - workers->slots[i].id = i; - } + rv = apr_thread_mutex_create(&workers->lock, + APR_THREAD_MUTEX_DEFAULT, + workers->pool); + if (rv != APR_SUCCESS) goto cleanup; + rv = apr_thread_cond_create(&workers->all_done, workers->pool); + if (rv != APR_SUCCESS) goto cleanup; + rv = apr_thread_cond_create(&workers->prod_done, workers->pool); + if (rv != APR_SUCCESS) goto cleanup; + + apr_thread_mutex_lock(workers->lock); + locked = 1; + + /* create the slots and put them on the free list */ + workers->slots = apr_pcalloc(workers->pool, workers->max_slots * sizeof(h2_slot)); + + for (i = 0; i < workers->max_slots; ++i) { + workers->slots[i].id = i; + workers->slots[i].state = H2_SLOT_FREE; + workers->slots[i].workers = workers; + APR_RING_ELEM_INIT(&workers->slots[i], link); + APR_RING_INSERT_TAIL(&workers->free, &workers->slots[i], h2_slot, link); + rv = apr_thread_cond_create(&workers->slots[i].more_work, workers->pool); + if (rv != APR_SUCCESS) goto cleanup; } - if (status == APR_SUCCESS) { - /* we activate all for now, TODO: support min_workers again. - * do this in reverse for vanity reasons so slot 0 will most - * likely be at head of idle queue. */ - n = workers->max_workers; - for (i = n-1; i >= 0; --i) { - status = activate_slot(workers, &workers->slots[i]); - } - /* the rest of the slots go on the free list */ - for(i = n; i < workers->nslots; ++i) { - push_slot(&workers->free, &workers->slots[i]); - } - workers->dynamic = (workers->worker_count < workers->max_workers); + + /* activate the min amount of workers */ + for (i = 0; i < workers->min_active; ++i) { + rv = activate_slot(workers); + if (rv != APR_SUCCESS) goto cleanup; } - if (status == APR_SUCCESS) { - apr_pool_pre_cleanup_register(pool, workers, workers_pool_cleanup); + +cleanup: + if (locked) { + apr_thread_mutex_unlock(workers->lock); + } + if (rv == APR_SUCCESS) { + /* Stop/join the workers threads when the MPM child exits (pchild is + * destroyed), and as a pre_cleanup of pchild thus before the threads + * pools (children of workers->pool) so that they are not destroyed + * before/under us. + */ + apr_pool_pre_cleanup_register(pchild, workers, workers_pool_cleanup); return workers; } + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, + "h2_workers: errors initializing"); return NULL; } -apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m) +apr_uint32_t h2_workers_get_max_workers(h2_workers *workers) { - apr_status_t status = h2_fifo_push(workers->mplxs, m); - wake_idle_worker(workers); - return status; + return workers->max_slots; } -apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m) +void h2_workers_shutdown(h2_workers *workers, int graceful) { - return h2_fifo_remove(workers->mplxs, m); + ap_conn_producer_t *prod; + + apr_thread_mutex_lock(workers->lock); + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s, + "h2_workers: shutdown graceful=%d", graceful); + workers->shutdown = 1; + workers->idle_limit = apr_time_from_sec(1); + wake_all_idles(workers); + for (prod = APR_RING_FIRST(&workers->prod_idle); + prod != APR_RING_SENTINEL(&workers->prod_idle, ap_conn_producer_t, link); + prod = APR_RING_NEXT(prod, link)) { + if (prod->fn_shutdown) { + prod->fn_shutdown(prod->baton, graceful); + } + } + apr_thread_mutex_unlock(workers->lock); +} + +ap_conn_producer_t *h2_workers_register(h2_workers *workers, + apr_pool_t *producer_pool, + const char *name, + ap_conn_producer_next *fn_next, + ap_conn_producer_done *fn_done, + ap_conn_producer_shutdown *fn_shutdown, + void *baton) +{ + ap_conn_producer_t *prod; + + prod = apr_pcalloc(producer_pool, sizeof(*prod)); + APR_RING_ELEM_INIT(prod, link); + prod->name = name; + prod->fn_next = fn_next; + prod->fn_done = fn_done; + prod->fn_shutdown = fn_shutdown; + prod->baton = baton; + + apr_thread_mutex_lock(workers->lock); + prod->state = PROD_IDLE; + APR_RING_INSERT_TAIL(&workers->prod_idle, prod, ap_conn_producer_t, link); + apr_thread_mutex_unlock(workers->lock); + + return prod; +} + +apr_status_t h2_workers_join(h2_workers *workers, ap_conn_producer_t *prod) +{ + apr_status_t rv = APR_SUCCESS; + + apr_thread_mutex_lock(workers->lock); + if (PROD_JOINED == prod->state) { + AP_DEBUG_ASSERT(APR_RING_NEXT(prod, link) == prod); /* should be in no ring */ + rv = APR_EINVAL; + } + else { + AP_DEBUG_ASSERT(PROD_ACTIVE == prod->state || PROD_IDLE == prod->state); + APR_RING_REMOVE(prod, link); + prod->state = PROD_JOINED; /* prevent further activations */ + while (prod->conns_active > 0) { + apr_thread_cond_wait(workers->prod_done, workers->lock); + } + APR_RING_ELEM_INIT(prod, link); /* make it link to itself */ + } + apr_thread_mutex_unlock(workers->lock); + return rv; +} + +apr_status_t h2_workers_activate(h2_workers *workers, ap_conn_producer_t *prod) +{ + apr_status_t rv = APR_SUCCESS; + apr_thread_mutex_lock(workers->lock); + if (PROD_IDLE == prod->state) { + APR_RING_REMOVE(prod, link); + prod->state = PROD_ACTIVE; + APR_RING_INSERT_TAIL(&workers->prod_active, prod, ap_conn_producer_t, link); + wake_idle_worker(workers, prod); + } + else if (PROD_JOINED == prod->state) { + rv = APR_EINVAL; + } + apr_thread_mutex_unlock(workers->lock); + return rv; } diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h index 3561582..c219304 100644 --- a/modules/http2/h2_workers.h +++ b/modules/http2/h2_workers.h @@ -17,66 +17,113 @@ #ifndef __mod_h2__h2_workers__ #define __mod_h2__h2_workers__ -/* Thread pool specific to executing h2_tasks. Has a minimum and maximum - * number of workers it creates. Starts with minimum workers and adds - * some on load, reduces the number again when idle. - * +/* Thread pool specific to executing secondary connections. + * Has a minimum and maximum number of workers it creates. + * Starts with minimum workers and adds some on load, + * reduces the number again when idle. */ struct apr_thread_mutex_t; struct apr_thread_cond_t; struct h2_mplx; struct h2_request; -struct h2_task; struct h2_fifo; -struct h2_slot; - typedef struct h2_workers h2_workers; -struct h2_workers { - server_rec *s; - apr_pool_t *pool; - - int next_worker_id; - int min_workers; - int max_workers; - int max_idle_secs; - - int aborted; - int dynamic; - apr_threadattr_t *thread_attr; - int nslots; - struct h2_slot *slots; - - volatile apr_uint32_t worker_count; - - struct h2_slot *free; - struct h2_slot *idle; - struct h2_slot *zombies; - - struct h2_fifo *mplxs; - - struct apr_thread_mutex_t *lock; -}; +/** + * Create a worker set with a maximum number of 'slots', e.g. worker + * threads to run. Always keep `min_active` workers running. Shutdown + * any additional workers after `idle_secs` seconds of doing nothing. + * + * @oaram s the base server + * @param pool for allocations + * @param min_active minimum number of workers to run + * @param max_slots maximum number of worker slots + * @param idle_limit upper duration of idle after a non-minimal slots shuts down + */ +h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool, + int max_slots, int min_active, apr_time_t idle_limit); +/** + * Shut down processing. + */ +void h2_workers_shutdown(h2_workers *workers, int graceful); -/* Create a worker pool with the given minimum and maximum number of - * threads. +/** + * Get the maximum number of workers. */ -h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool, - int min_size, int max_size, int idle_secs); +apr_uint32_t h2_workers_get_max_workers(h2_workers *workers); + +/** + * ap_conn_producer_t is the source of connections (conn_rec*) to run. + * + * Active producers are queried by idle workers for connections. + * If they do not hand one back, they become inactive and are not + * queried further. `h2_workers_activate()` places them on the active + * list again. + * + * A producer finishing MUST call `h2_workers_join()` which removes + * it completely from workers processing and waits for all ongoing + * work for this producer to be done. + */ +typedef struct ap_conn_producer_t ap_conn_producer_t; + +/** + * Ask a producer for the next connection to process. + * @param baton value from producer registration + * @param pconn holds the connection to process on return + * @param pmore if the producer has more connections that may be retrieved + * @return APR_SUCCESS for a connection to process, APR_EAGAIN for no + * connection being available at the time. + */ +typedef conn_rec *ap_conn_producer_next(void *baton, int *pmore); + +/** + * Tell the producer that processing the connection is done. + * @param baton value from producer registration + * @param conn the connection that has been processed. + */ +typedef void ap_conn_producer_done(void *baton, conn_rec *conn); + +/** + * Tell the producer that the workers are shutting down. + * @param baton value from producer registration + * @param graceful != 0 iff shutdown is graceful + */ +typedef void ap_conn_producer_shutdown(void *baton, int graceful); + +/** + * Register a new producer with the given `baton` and callback functions. + * Will allocate internal structures from the given pool (but make no use + * of the pool after registration). + * Producers are inactive on registration. See `h2_workers_activate()`. + * @param producer_pool to allocate the producer from + * @param name descriptive name of the producer, must not be unique + * @param fn_next callback for retrieving connections to process + * @param fn_done callback for processed connections + * @param baton provided value passed on in callbacks + * @return the producer instance created + */ +ap_conn_producer_t *h2_workers_register(h2_workers *workers, + apr_pool_t *producer_pool, + const char *name, + ap_conn_producer_next *fn_next, + ap_conn_producer_done *fn_done, + ap_conn_producer_shutdown *fn_shutdown, + void *baton); /** - * Registers a h2_mplx for task scheduling. If this h2_mplx runs - * out of tasks, it will be automatically be unregistered. Should - * new tasks arrive, it needs to be registered again. + * Stop retrieving more connection from the producer and wait + * for all ongoing for from that producer to be done. */ -apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m); +apr_status_t h2_workers_join(h2_workers *workers, ap_conn_producer_t *producer); /** - * Remove a h2_mplx from the worker registry. + * Activate a producer. A worker will query the producer for a connection + * to process, once a worker is available. + * This may be called, irregardless of the producers active/inactive. */ -apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m); +apr_status_t h2_workers_activate(h2_workers *workers, ap_conn_producer_t *producer); #endif /* defined(__mod_h2__h2_workers__) */ diff --git a/modules/http2/h2_ws.c b/modules/http2/h2_ws.c new file mode 100644 index 0000000..396e6e1 --- /dev/null +++ b/modules/http2/h2_ws.c @@ -0,0 +1,362 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "apr.h" +#include "apr_strings.h" +#include "apr_lib.h" +#include "apr_sha1.h" +#include "apr_strmatch.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "h2_private.h" +#include "h2_config.h" +#include "h2_conn_ctx.h" +#include "h2_headers.h" +#include "h2_request.h" +#include "h2_ws.h" + +#if H2_USE_WEBSOCKETS + +#include "apr_encode.h" /* H2_USE_WEBSOCKETS is conditional on APR 1.6+ */ + +static ap_filter_rec_t *c2_ws_out_filter_handle; + +struct ws_filter_ctx { + const char *ws_accept_base64; + int has_final_response; + int override_body; +}; + +/** + * Generate the "Sec-WebSocket-Accept" header field for the given key + * (base64 encoded) as defined in RFC 6455 ch. 4.2.2 step 5.3 + */ +static const char *gen_ws_accept(conn_rec *c, const char *key_base64) +{ + apr_byte_t dgst[APR_SHA1_DIGESTSIZE]; + const char ws_guid[] = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; + apr_sha1_ctx_t sha1_ctx; + + apr_sha1_init(&sha1_ctx); + apr_sha1_update(&sha1_ctx, key_base64, (unsigned int)strlen(key_base64)); + apr_sha1_update(&sha1_ctx, ws_guid, (unsigned int)strlen(ws_guid)); + apr_sha1_final(dgst, &sha1_ctx); + + return apr_pencode_base64_binary(c->pool, dgst, sizeof(dgst), + APR_ENCODE_NONE, NULL); +} + +const h2_request *h2_ws_rewrite_request(const h2_request *req, + conn_rec *c2, int no_body) +{ + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2); + h2_request *wsreq; + unsigned char key_raw[16]; + const char *key_base64, *accept_base64; + struct ws_filter_ctx *ws_ctx; + apr_status_t rv; + + if (!conn_ctx || !req->protocol || strcmp("websocket", req->protocol)) + return req; + + if (ap_cstr_casecmp("CONNECT", req->method)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): websocket request with method %s", + conn_ctx->id, conn_ctx->stream_id, req->method); + return req; + } + if (!req->scheme) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): websocket CONNECT without :scheme", + conn_ctx->id, conn_ctx->stream_id); + return req; + } + if (!req->path) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): websocket CONNECT without :path", + conn_ctx->id, conn_ctx->stream_id); + return req; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): websocket CONNECT for %s", + conn_ctx->id, conn_ctx->stream_id, req->path); + /* Transform the HTTP/2 extended CONNECT to an internal GET using + * the HTTP/1.1 version of websocket connection setup. */ + wsreq = h2_request_clone(c2->pool, req); + wsreq->method = "GET"; + wsreq->protocol = NULL; + apr_table_set(wsreq->headers, "Upgrade", "websocket"); + apr_table_add(wsreq->headers, "Connection", "Upgrade"); + /* add Sec-WebSocket-Key header */ + rv = apr_generate_random_bytes(key_raw, sizeof(key_raw)); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(10461) + "error generating secret"); + return NULL; + } + key_base64 = apr_pencode_base64_binary(c2->pool, key_raw, sizeof(key_raw), + APR_ENCODE_NONE, NULL); + apr_table_set(wsreq->headers, "Sec-WebSocket-Key", key_base64); + /* This is now the request to process internally */ + + /* When this request gets processed and delivers a 101 response, + * we expect it to carry a "Sec-WebSocket-Accept" header with + * exactly the following value, as per RFC 6455. */ + accept_base64 = gen_ws_accept(c2, key_base64); + /* Add an output filter that intercepts generated responses: + * - if a valid WebSocket negotiation happens, transform the + * 101 response to a 200 + * - if a 2xx response happens, that does not pass the Accept test, + * return a 502 indicating that the URI seems not support the websocket + * protocol (RFC 8441 does not define this, but it seems the best + * choice) + * - if a 3xx, 4xx or 5xx response happens, forward this unchanged. + */ + ws_ctx = apr_pcalloc(c2->pool, sizeof(*ws_ctx)); + ws_ctx->ws_accept_base64 = accept_base64; + /* insert our filter just before the C2 core filter */ + ap_remove_output_filter_byhandle(c2->output_filters, "H2_C2_NET_OUT"); + ap_add_output_filter("H2_C2_WS_OUT", ws_ctx, NULL, c2); + ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2); + /* Mark the connection as being an Upgrade, with some special handling + * since the request needs an EOS, without the stream being closed */ + conn_ctx->is_upgrade = 1; + + return wsreq; +} + +static apr_bucket *make_valid_resp(conn_rec *c2, int status, + apr_table_t *headers, apr_table_t *notes) +{ + apr_table_t *nheaders, *nnotes; + + ap_assert(headers); + nheaders = apr_table_clone(c2->pool, headers); + apr_table_unset(nheaders, "Connection"); + apr_table_unset(nheaders, "Upgrade"); + apr_table_unset(nheaders, "Sec-WebSocket-Accept"); + nnotes = notes? apr_table_clone(c2->pool, notes) : + apr_table_make(c2->pool, 10); +#if AP_HAS_RESPONSE_BUCKETS + return ap_bucket_response_create(status, NULL, nheaders, nnotes, + c2->pool, c2->bucket_alloc); +#else + return h2_bucket_headers_create(c2->bucket_alloc, + h2_headers_create(status, nheaders, + nnotes, 0, c2->pool)); +#endif +} + +static apr_bucket *make_invalid_resp(conn_rec *c2, int status, + apr_table_t *notes) +{ + apr_table_t *nheaders, *nnotes; + + nheaders = apr_table_make(c2->pool, 10); + apr_table_setn(nheaders, "Content-Length", "0"); + nnotes = notes? apr_table_clone(c2->pool, notes) : + apr_table_make(c2->pool, 10); +#if AP_HAS_RESPONSE_BUCKETS + return ap_bucket_response_create(status, NULL, nheaders, nnotes, + c2->pool, c2->bucket_alloc); +#else + return h2_bucket_headers_create(c2->bucket_alloc, + h2_headers_create(status, nheaders, + nnotes, 0, c2->pool)); +#endif +} + +static void ws_handle_resp(conn_rec *c2, h2_conn_ctx_t *conn_ctx, + struct ws_filter_ctx *ws_ctx, apr_bucket *b) +{ +#if AP_HAS_RESPONSE_BUCKETS + ap_bucket_response *resp = b->data; +#else /* AP_HAS_RESPONSE_BUCKETS */ + h2_headers *resp = h2_bucket_headers_get(b); +#endif /* !AP_HAS_RESPONSE_BUCKETS */ + apr_bucket *b_override = NULL; + int is_final = 0; + int override_body = 0; + + if (ws_ctx->has_final_response) { + /* already did, nop */ + return; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, + "h2_c2(%s-%d): H2_C2_WS_OUT inspecting response %d", + conn_ctx->id, conn_ctx->stream_id, resp->status); + if (resp->status == HTTP_SWITCHING_PROTOCOLS) { + /* The resource agreed to switch protocol. But this is only valid + * if it send back the correct Sec-WebSocket-Accept header value */ + const char *hd = apr_table_get(resp->headers, "Sec-WebSocket-Accept"); + if (hd && !strcmp(ws_ctx->ws_accept_base64, hd)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): websocket CONNECT, valid 101 Upgrade" + ", converting to 200 response", + conn_ctx->id, conn_ctx->stream_id); + b_override = make_valid_resp(c2, HTTP_OK, resp->headers, resp->notes); + is_final = 1; + } + else { + if (!hd) { + /* This points to someone being confused */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2, APLOGNO(10462) + "h2_c2(%s-%d): websocket CONNECT, got 101 response " + "without Sec-WebSocket-Accept header", + conn_ctx->id, conn_ctx->stream_id); + } + else { + /* This points to a bug, either in our WebSockets negotiation + * or in the request processings implementation of WebSockets */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c2, APLOGNO(10463) + "h2_c2(%s-%d): websocket CONNECT, 101 response " + "with 'Sec-WebSocket-Accept: %s' but expected %s", + conn_ctx->id, conn_ctx->stream_id, hd, + ws_ctx->ws_accept_base64); + } + b_override = make_invalid_resp(c2, HTTP_BAD_GATEWAY, resp->notes); + override_body = is_final = 1; + } + } + else if (resp->status < 200) { + /* other intermediate response, pass through */ + } + else if (resp->status < 300) { + /* Failure, we might be talking to a plain http resource */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2, + "h2_c2(%s-%d): websocket CONNECT, invalid response %d", + conn_ctx->id, conn_ctx->stream_id, resp->status); + b_override = make_invalid_resp(c2, HTTP_BAD_GATEWAY, resp->notes); + override_body = is_final = 1; + } + else { + /* error response, pass through. */ + ws_ctx->has_final_response = 1; + } + + if (b_override) { + APR_BUCKET_INSERT_BEFORE(b, b_override); + apr_bucket_delete(b); + b = b_override; + } + if (override_body) { + APR_BUCKET_INSERT_AFTER(b, apr_bucket_eos_create(c2->bucket_alloc)); + ws_ctx->override_body = 1; + } + if (is_final) { + ws_ctx->has_final_response = 1; + conn_ctx->has_final_response = 1; + } +} + +static apr_status_t h2_c2_ws_filter_out(ap_filter_t* f, apr_bucket_brigade* bb) +{ + struct ws_filter_ctx *ws_ctx = f->ctx; + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c); + apr_bucket *b, *bnext; + + ap_assert(conn_ctx); + if (ws_ctx->override_body) { + /* We have overridden the original response and also its body. + * If this filter is called again, we signal a hard abort to + * allow processing to terminate at the earliest. */ + f->c->aborted = 1; + return APR_ECONNABORTED; + } + + /* Inspect the brigade, looking for RESPONSE/HEADER buckets. + * Remember, this filter is only active for client websocket CONNECT + * requests that we translated to an internal GET with websocket + * headers. + * We inspect the repsone to see if the internal resource actually + * agrees to talk websocket or is "just" a normal HTTP resource that + * ignored the websocket request headers. */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = bnext) + { + bnext = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { +#if AP_HAS_RESPONSE_BUCKETS + if (AP_BUCKET_IS_RESPONSE(b)) { +#else + if (H2_BUCKET_IS_HEADERS(b)) { +#endif /* !AP_HAS_RESPONSE_BUCKETS */ + ws_handle_resp(f->c, conn_ctx, ws_ctx, b); + continue; + } + } + else if (ws_ctx->override_body) { + apr_bucket_delete(b); + } + } + return ap_pass_brigade(f->next, bb); +} + +static int ws_post_read(request_rec *r) +{ + + if (r->connection->master) { + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection); + if (conn_ctx && conn_ctx->is_upgrade && + !h2_config_sgeti(r->server, H2_CONF_WEBSOCKETS)) { + return HTTP_NOT_IMPLEMENTED; + } + } + return DECLINED; +} + +void h2_ws_register_hooks(void) +{ + ap_hook_post_read_request(ws_post_read, NULL, NULL, APR_HOOK_MIDDLE); + c2_ws_out_filter_handle = + ap_register_output_filter("H2_C2_WS_OUT", h2_c2_ws_filter_out, + NULL, AP_FTYPE_NETWORK); +} + +#else /* H2_USE_WEBSOCKETS */ + +const h2_request *h2_ws_rewrite_request(const h2_request *req, + conn_rec *c2, int no_body) +{ + (void)c2; + (void)no_body; + /* no rewriting */ + return req; +} + +void h2_ws_register_hooks(void) +{ + /* NOP */ +} + +#endif /* H2_USE_WEBSOCKETS (else part) */ diff --git a/modules/http2/h2_ws.h b/modules/http2/h2_ws.h new file mode 100644 index 0000000..a94d300 --- /dev/null +++ b/modules/http2/h2_ws.h @@ -0,0 +1,35 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_ws__ +#define __mod_h2__h2_ws__ + +#include "h2.h" + +/** + * Rewrite a websocket request. + * + * @param req the h2 request to rewrite + * @param c2 the connection to process the request on + * @param no_body != 0 iff the request is known to have no body + * @return the websocket request for internal submit + */ +const h2_request *h2_ws_rewrite_request(const h2_request *req, + conn_rec *c2, int no_body); + +void h2_ws_register_hooks(void); + +#endif /* defined(__mod_h2__h2_ws__) */ diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c index 3d278e9..1bd34b2 100644 --- a/modules/http2/mod_http2.c +++ b/modules/http2/mod_http2.c @@ -24,24 +24,25 @@ #include #include #include +#include #include "mod_http2.h" #include #include "h2_stream.h" -#include "h2_alt_svc.h" -#include "h2_conn.h" -#include "h2_filter.h" -#include "h2_task.h" +#include "h2_c1.h" +#include "h2_c2.h" #include "h2_session.h" #include "h2_config.h" -#include "h2_ctx.h" -#include "h2_h2.h" +#include "h2_conn_ctx.h" +#include "h2_protocol.h" #include "h2_mplx.h" #include "h2_push.h" #include "h2_request.h" #include "h2_switch.h" #include "h2_version.h" +#include "h2_bucket_beam.h" +#include "h2_ws.h" static void h2_hooks(apr_pool_t *pool); @@ -125,27 +126,6 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, myfeats.dyn_windows? "+DWINS" : "", ngh2? ngh2->version_str : "unknown"); - switch (h2_conn_mpm_type()) { - case H2_MPM_SIMPLE: - case H2_MPM_MOTORZ: - case H2_MPM_NETWARE: - case H2_MPM_WINNT: - /* not sure we need something extra for those. */ - break; - case H2_MPM_EVENT: - case H2_MPM_WORKER: - /* all fine, we know these ones */ - break; - case H2_MPM_PREFORK: - /* ok, we now know how to handle that one */ - break; - case H2_MPM_UNKNOWN: - /* ??? */ - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03091) - "post_config: mpm type unknown"); - break; - } - if (!h2_mpm_supported() && !mpm_warned) { mpm_warned = 1; ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10034) @@ -157,14 +137,11 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, h2_conn_mpm_name()); } - status = h2_h2_init(p, s); + status = h2_protocol_init(p, s); if (status == APR_SUCCESS) { status = h2_switch_init(p, s); } - if (status == APR_SUCCESS) { - status = h2_task_init(p, s); - } - + return status; } @@ -172,44 +149,29 @@ static char *http2_var_lookup(apr_pool_t *, server_rec *, conn_rec *, request_rec *, char *name); static int http2_is_h2(conn_rec *); -static apr_status_t http2_req_engine_push(const char *ngn_type, - request_rec *r, - http2_req_engine_init *einit) -{ - return h2_mplx_req_engine_push(ngn_type, r, einit); -} - -static apr_status_t http2_req_engine_pull(h2_req_engine *ngn, - apr_read_type_e block, - int capacity, - request_rec **pr) -{ - return h2_mplx_req_engine_pull(ngn, block, capacity, pr); -} - -static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, - apr_status_t status) -{ - h2_mplx_req_engine_done(ngn, r_conn, status); -} - static void http2_get_num_workers(server_rec *s, int *minw, int *maxw) { - h2_get_num_workers(s, minw, maxw); + apr_time_t tdummy; + + h2_get_workers_config(s, minw, maxw, &tdummy); } /* Runs once per created child process. Perform any process * related initionalization here. */ -static void h2_child_init(apr_pool_t *pool, server_rec *s) +static void h2_child_init(apr_pool_t *pchild, server_rec *s) { + apr_status_t rv; + /* Set up our connection processing */ - apr_status_t status = h2_conn_child_init(pool, s); - if (status != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_ERR, status, s, + rv = h2_c1_child_init(pchild, s); + if (APR_SUCCESS == rv) { + rv = h2_c2_child_init(pchild, s); + } + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(02949) "initializing connection handling"); } - } /* Install this module into the apache2 infrastructure. @@ -220,9 +182,6 @@ static void h2_hooks(apr_pool_t *pool) APR_REGISTER_OPTIONAL_FN(http2_is_h2); APR_REGISTER_OPTIONAL_FN(http2_var_lookup); - APR_REGISTER_OPTIONAL_FN(http2_req_engine_push); - APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull); - APR_REGISTER_OPTIONAL_FN(http2_req_engine_done); APR_REGISTER_OPTIONAL_FN(http2_get_num_workers); ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks"); @@ -234,47 +193,45 @@ static void h2_hooks(apr_pool_t *pool) /* Run once after a child process has been created. */ ap_hook_child_init(h2_child_init, NULL, NULL, APR_HOOK_MIDDLE); +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 110) + ap_hook_child_stopping(h2_c1_child_stopping, NULL, NULL, APR_HOOK_MIDDLE); +#endif - h2_h2_register_hooks(); + h2_c1_register_hooks(); h2_switch_register_hooks(); - h2_task_register_hooks(); + h2_c2_register_hooks(); + h2_ws_register_hooks(); - h2_alt_svc_register_hooks(); - - /* Setup subprocess env for certain variables + /* Setup subprocess env for certain variables */ ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE); - - /* test http2 connection status handler */ - ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE); } static const char *val_HTTP2(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx) + conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx) { return ctx? "on" : "off"; } static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx) + conn_rec *c, request_rec *r, + h2_conn_ctx_t *conn_ctx) { - if (ctx) { + if (conn_ctx) { if (r) { - h2_task *task = h2_ctx_get_task(ctx); - if (task) { - h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id); + if (conn_ctx->stream_id) { + const h2_stream *stream = h2_mplx_c2_stream_get(conn_ctx->mplx, conn_ctx->stream_id); if (stream && stream->push_policy != H2_PUSH_NONE) { return "on"; } } } - else if (c && h2_session_push_enabled(ctx->session)) { + else if (c && h2_session_push_enabled(conn_ctx->session)) { return "on"; } } else if (s) { - const h2_config *cfg = h2_config_sget(s); - if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) { + if (h2_config_geti(r, s, H2_CONF_PUSH)) { return "on"; } } @@ -282,11 +239,11 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, } static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx) + conn_rec *c, request_rec *r, + h2_conn_ctx_t *conn_ctx) { - if (ctx) { - h2_task *task = h2_ctx_get_task(ctx); - if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { + if (conn_ctx) { + if (conn_ctx->stream_id && !H2_STREAM_CLIENT_INITIATED(conn_ctx->stream_id)) { return "PUSHED"; } } @@ -294,12 +251,12 @@ static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s, } static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx) + conn_rec *c, request_rec *r, + h2_conn_ctx_t *conn_ctx) { - if (ctx) { - h2_task *task = h2_ctx_get_task(ctx); - if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { - h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id); + if (conn_ctx) { + if (conn_ctx->stream_id && !H2_STREAM_CLIENT_INITIATED(conn_ctx->stream_id)) { + const h2_stream *stream = h2_mplx_c2_stream_get(conn_ctx->mplx, conn_ctx->stream_id); if (stream) { return apr_itoa(p, stream->initiated_on); } @@ -309,29 +266,30 @@ static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s, } static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx) + conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx) { - if (ctx) { - h2_task *task = h2_ctx_get_task(ctx); - if (task) { - return task->id; + if (c) { + h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c); + if (conn_ctx) { + return conn_ctx->stream_id == 0? conn_ctx->id + : apr_psprintf(p, "%s-%d", conn_ctx->id, conn_ctx->stream_id); } } return ""; } static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx) + conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx) { const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx); - if (cp && (cp = ap_strchr_c(cp, '-'))) { + if (cp && (cp = ap_strrchr_c(cp, '-'))) { return ++cp; } return NULL; } typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r, h2_ctx *ctx); + conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx); typedef struct h2_var_def { const char *name; h2_var_lookup *lookup; @@ -355,19 +313,19 @@ static h2_var_def H2_VARS[] = { static int http2_is_h2(conn_rec *c) { - return h2_ctx_get(c->master? c->master : c, 0) != NULL; + return h2_conn_ctx_get(c->master? c->master : c) != NULL; } static char *http2_var_lookup(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, char *name) { - int i; + unsigned int i; /* If the # of vars grow, we need to put definitions in a hash */ for (i = 0; i < H2_ALEN(H2_VARS); ++i) { h2_var_def *vdef = &H2_VARS[i]; if (!strcmp(vdef->name, name)) { - h2_ctx *ctx = (r? h2_ctx_rget(r) : - h2_ctx_get(c->master? c->master : c, 0)); + h2_conn_ctx_t *ctx = (r? h2_conn_ctx_get(c) : + h2_conn_ctx_get(c->master? c->master : c)); return (char *)vdef->lookup(p, s, c, r, ctx); } } @@ -377,9 +335,9 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s, static int h2_h2_fixups(request_rec *r) { if (r->connection->master) { - h2_ctx *ctx = h2_ctx_rget(r); - int i; - + h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection); + unsigned int i; + for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) { h2_var_def *vdef = &H2_VARS[i]; if (vdef->subprocess) { diff --git a/modules/http2/mod_http2.dep b/modules/http2/mod_http2.dep index 52f2286..25c0ede 100644 --- a/modules/http2/mod_http2.dep +++ b/modules/http2/mod_http2.dep @@ -694,7 +694,6 @@ ".\h2_ctx.h"\ ".\h2_h2.h"\ ".\h2_mplx.h"\ - ".\h2_ngn_shed.h"\ ".\h2_private.h"\ ".\h2_request.h"\ ".\h2_stream.h"\ @@ -754,7 +753,6 @@ ".\h2_ctx.h"\ ".\h2_h2.h"\ ".\h2_mplx.h"\ - ".\h2_ngn_shed.h"\ ".\h2_private.h"\ ".\h2_request.h"\ ".\h2_task.h"\ diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp index d1c4322..9775534 100644 --- a/modules/http2/mod_http2.dsp +++ b/modules/http2/mod_http2.dsp @@ -101,10 +101,6 @@ PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).ma # Name "mod_http2 - Win32 Debug" # Begin Source File -SOURCE=./h2_alt_svc.c -# End Source File -# Begin Source File - SOURCE=./h2_bucket_beam.c # End Source File # Begin Source File @@ -113,31 +109,31 @@ SOURCE=./h2_bucket_eos.c # End Source File # Begin Source File -SOURCE=./h2_config.c +SOURCE=./h2_c1.c # End Source File # Begin Source File -SOURCE=./h2_conn.c +SOURCE=./h2_c1_io.c # End Source File # Begin Source File -SOURCE=./h2_conn_io.c +SOURCE=./h2_c2.c # End Source File # Begin Source File -SOURCE=./h2_ctx.c +SOURCE=./h2_c2_filter.c # End Source File # Begin Source File -SOURCE=./h2_filter.c +SOURCE=./h2_config.c # End Source File # Begin Source File -SOURCE=./h2_from_h1.c +SOURCE=./h2_conn_ctx.c # End Source File # Begin Source File -SOURCE=./h2_h2.c +SOURCE=./h2_headers.c # End Source File # Begin Source File @@ -145,7 +141,7 @@ SOURCE=./h2_mplx.c # End Source File # Begin Source File -SOURCE=./h2_ngn_shed.c +SOURCE=./h2_protocol.c # End Source File # Begin Source File @@ -157,10 +153,6 @@ SOURCE=./h2_request.c # End Source File # Begin Source File -SOURCE=./h2_headers.c -# End Source File -# Begin Source File - SOURCE=./h2_session.c # End Source File # Begin Source File @@ -173,15 +165,19 @@ SOURCE=./h2_switch.c # End Source File # Begin Source File -SOURCE=./h2_task.c +SOURCE=./h2_util.c # End Source File # Begin Source File -SOURCE=./h2_util.c +SOURCE=./h2_workers.c # End Source File # Begin Source File -SOURCE=./h2_workers.c +SOURCE=./h2_ws.c +# End Source File +# Begin Source File + +SOURCE=./mod_http2.c # End Source File # Begin Source File diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h index 7a1b49a..9cb04a6 100644 --- a/modules/http2/mod_http2.h +++ b/modules/http2/mod_http2.h @@ -28,24 +28,48 @@ APR_DECLARE_OPTIONAL_FN(char *, APR_DECLARE_OPTIONAL_FN(int, http2_is_h2, (conn_rec *)); +APR_DECLARE_OPTIONAL_FN(void, + http2_get_num_workers, (server_rec *s, + int *minw, int *max)); + +#define AP_HTTP2_HAS_GET_POLLFD + +/** + * Get a apr_pollfd_t populated for a h2 connection where + * (c->master != NULL) is true and pipes are supported. + * To be used in Apache modules implementing WebSockets in Apache httpd + * versions that do not support the corresponding `ap_get_pollfd_from_conn()` + * function. + * When available, use `ap_get_pollfd_from_conn()` instead of this function. + * + * How it works: pass in a `apr_pollfd_t` which gets populated for + * monitoring the input of connection `c`. If `c` is not a HTTP/2 + * stream connection, the function will return `APR_ENOTIMPL`. + * `ptimeout` is optional and, if passed, will get the timeout in effect + * + * On platforms without support for pipes (e.g. Windows), this function + * will return `APR_ENOTIMPL`. + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_get_pollfd_from_conn, + (conn_rec *c, struct apr_pollfd_t *pfd, + apr_interval_time_t *ptimeout)); /******************************************************************************* - * HTTP/2 request engines + * START HTTP/2 request engines (DEPRECATED) ******************************************************************************/ + +/* The following functions were introduced for the experimental mod_proxy_http2 + * support, but have been abandoned since. + * They are still declared here for backward compatibility, in case someone + * tries to build an old mod_proxy_http2 against it, but will disappear + * completely sometime in the future. + */ struct apr_thread_cond_t; - typedef struct h2_req_engine h2_req_engine; - typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); -/** - * Initialize a h2_req_engine. The structure will be passed in but - * only the name and master are set. The function should initialize - * all fields. - * @param engine the allocated, partially filled structure - * @param r the first request to process, or NULL - */ typedef apr_status_t http2_req_engine_init(h2_req_engine *engine, const char *id, const char *type, @@ -55,35 +79,11 @@ typedef apr_status_t http2_req_engine_init(h2_req_engine *engine, http2_output_consumed **pconsumed, void **pbaton); -/** - * Push a request to an engine with the specified name for further processing. - * If no such engine is available, einit is not NULL, einit is called - * with a new engine record and the caller is responsible for running the - * new engine instance. - * @param engine_type the type of the engine to add the request to - * @param r the request to push to an engine for processing - * @param einit an optional initialization callback for a new engine - * of the requested type, should no instance be available. - * By passing a non-NULL callback, the caller is willing - * to init and run a new engine itself. - * @return APR_SUCCESS iff slave was successfully added to an engine - */ APR_DECLARE_OPTIONAL_FN(apr_status_t, http2_req_engine_push, (const char *engine_type, request_rec *r, http2_req_engine_init *einit)); -/** - * Get a new request for processing in this engine. - * @param engine the engine which is done processing the slave - * @param block if call should block waiting for request to come - * @param capacity how many parallel requests are acceptable - * @param pr the request that needs processing or NULL - * @return APR_SUCCESS if new request was assigned - * APR_EAGAIN if no new request is available - * APR_EOF if engine may shut down, as no more request will be scheduled - * APR_ECONNABORTED if the engine needs to shut down immediately - */ APR_DECLARE_OPTIONAL_FN(apr_status_t, http2_req_engine_pull, (h2_req_engine *engine, apr_read_type_e block, @@ -94,8 +94,9 @@ APR_DECLARE_OPTIONAL_FN(void, conn_rec *rconn, apr_status_t status)); -APR_DECLARE_OPTIONAL_FN(void, - http2_get_num_workers, (server_rec *s, - int *minw, int *max)); + +/******************************************************************************* + * END HTTP/2 request engines (DEPRECATED) + ******************************************************************************/ #endif diff --git a/modules/http2/mod_http2.mak b/modules/http2/mod_http2.mak index 10ae887..26611c7 100644 --- a/modules/http2/mod_http2.mak +++ b/modules/http2/mod_http2.mak @@ -61,7 +61,6 @@ CLEAN : -@erase "$(INTDIR)\h2_h2.obj" -@erase "$(INTDIR)\h2_headers.obj" -@erase "$(INTDIR)\h2_mplx.obj" - -@erase "$(INTDIR)\h2_ngn_shed.obj" -@erase "$(INTDIR)\h2_push.obj" -@erase "$(INTDIR)\h2_request.obj" -@erase "$(INTDIR)\h2_session.obj" @@ -138,7 +137,6 @@ LINK32_OBJS= \ "$(INTDIR)\h2_h2.obj" \ "$(INTDIR)\h2_headers.obj" \ "$(INTDIR)\h2_mplx.obj" \ - "$(INTDIR)\h2_ngn_shed.obj" \ "$(INTDIR)\h2_push.obj" \ "$(INTDIR)\h2_request.obj" \ "$(INTDIR)\h2_session.obj" \ @@ -207,7 +205,6 @@ CLEAN : -@erase "$(INTDIR)\h2_h2.obj" -@erase "$(INTDIR)\h2_headers.obj" -@erase "$(INTDIR)\h2_mplx.obj" - -@erase "$(INTDIR)\h2_ngn_shed.obj" -@erase "$(INTDIR)\h2_push.obj" -@erase "$(INTDIR)\h2_request.obj" -@erase "$(INTDIR)\h2_session.obj" @@ -284,7 +281,6 @@ LINK32_OBJS= \ "$(INTDIR)\h2_h2.obj" \ "$(INTDIR)\h2_headers.obj" \ "$(INTDIR)\h2_mplx.obj" \ - "$(INTDIR)\h2_ngn_shed.obj" \ "$(INTDIR)\h2_push.obj" \ "$(INTDIR)\h2_request.obj" \ "$(INTDIR)\h2_session.obj" \ @@ -469,11 +465,6 @@ SOURCE=./h2_mplx.c "$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)" -SOURCE=./h2_ngn_shed.c - -"$(INTDIR)\h2_ngn_shed.obj" : $(SOURCE) "$(INTDIR)" - - SOURCE=./h2_push.c "$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)" diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c index a7e0dcd..ebf8f61 100644 --- a/modules/http2/mod_proxy_http2.c +++ b/modules/http2/mod_proxy_http2.c @@ -16,13 +16,14 @@ #include +#include #include #include #include "mod_http2.h" #include "mod_proxy_http2.h" -#include "h2_request.h" +#include "h2.h" #include "h2_proxy_util.h" #include "h2_version.h" #include "h2_proxy_session.h" @@ -46,19 +47,11 @@ AP_DECLARE_MODULE(proxy_http2) = { /* Optional functions from mod_http2 */ static int (*is_h2)(conn_rec *c); -static apr_status_t (*req_engine_push)(const char *name, request_rec *r, - http2_req_engine_init *einit); -static apr_status_t (*req_engine_pull)(h2_req_engine *engine, - apr_read_type_e block, - int capacity, - request_rec **pr); -static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn, - apr_status_t status); - + typedef struct h2_proxy_ctx { - conn_rec *owner; + const char *id; + conn_rec *cfront; apr_pool_t *pool; - request_rec *rbase; server_rec *server; const char *proxy_func; char server_portstr[32]; @@ -66,20 +59,16 @@ typedef struct h2_proxy_ctx { proxy_worker *worker; proxy_server_conf *conf; - h2_req_engine *engine; - const char *engine_id; - const char *engine_type; - apr_pool_t *engine_pool; apr_size_t req_buffer_size; - h2_proxy_fifo *requests; int capacity; - unsigned standalone : 1; unsigned is_ssl : 1; - unsigned flushall : 1; - apr_status_t r_status; /* status of our first request work */ - h2_proxy_session *session; /* current http2 session against backend */ + request_rec *r; /* the request processed in this ctx */ + int r_status; /* status of request work */ + int r_done; /* request was processed, not necessarily successfully */ + int r_may_retry; /* request may be retried */ + int has_reusable_session; /* http2 session is live and clean */ } h2_proxy_ctx; static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog, @@ -104,16 +93,6 @@ static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog, MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown"); is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2); - req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push); - req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull); - req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done); - - /* we need all of them */ - if (!req_engine_push || !req_engine_pull || !req_engine_done) { - req_engine_push = NULL; - req_engine_pull = NULL; - req_engine_done = NULL; - } return status; } @@ -174,9 +153,24 @@ static int proxy_http2_canon(request_rec *r, char *url) if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } + else if (apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the encoded path already */ + search = r->args; + } else { +#ifdef PROXY_CANONENC_NOENCODEDSLASHENCODING + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, (int)strlen(url), + enc_path, flags, r->proxyreq); +#else path = ap_proxy_canonenc(r->pool, url, (int)strlen(url), enc_path, 0, r->proxyreq); +#endif + if (!path) { + return HTTP_BAD_REQUEST; + } search = r->args; } break; @@ -184,9 +178,21 @@ static int proxy_http2_canon(request_rec *r, char *url) path = url; break; } - - if (path == NULL) { - return HTTP_BAD_REQUEST; + /* + * If we have a raw control character or a ' ' in nocanon path or + * r->args, correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10420) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } + if (search && *ap_scan_vchar_obstext(search)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10412) + "To be forwarded query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; } if (port != def_port) { @@ -204,45 +210,6 @@ static int proxy_http2_canon(request_rec *r, char *url) return OK; } -static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes) -{ - h2_proxy_ctx *ctx = baton; - - if (ctx->session) { - h2_proxy_session_update_window(ctx->session, c, bytes); - } -} - -static apr_status_t proxy_engine_init(h2_req_engine *engine, - const char *id, - const char *type, - apr_pool_t *pool, - apr_size_t req_buffer_size, - request_rec *r, - http2_output_consumed **pconsumed, - void **pctx) -{ - h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config, - &proxy_http2_module); - if (!ctx) { - ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368) - "h2_proxy_session, engine init, no ctx found"); - return APR_ENOTIMPL; - } - - ctx->pool = pool; - ctx->engine = engine; - ctx->engine_id = id; - ctx->engine_type = type; - ctx->engine_pool = pool; - ctx->req_buffer_size = req_buffer_size; - ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests)); - - *pconsumed = out_consumed; - *pctx = ctx; - return APR_SUCCESS; -} - static apr_status_t add_request(h2_proxy_session *session, request_rec *r) { h2_proxy_ctx *ctx = session->user_data; @@ -252,7 +219,7 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r) url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE); apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu", ctx->p_conn->connection->local_addr->port)); - status = h2_proxy_session_submit(session, url, r, ctx->standalone); + status = h2_proxy_session_submit(session, url, r, 1); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351) "pass request body failed to %pI (%s) from %s (%s)", @@ -264,201 +231,84 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r) } static void request_done(h2_proxy_ctx *ctx, request_rec *r, - apr_status_t status, int touched) + apr_status_t status, int touched, int error_code) { - const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE); - - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection, - "h2_proxy_session(%s): request done %s, touched=%d", - ctx->engine_id, task_id, touched); - if (status != APR_SUCCESS) { - if (!touched) { - /* untouched request, need rescheduling */ - status = h2_proxy_fifo_push(ctx->requests, r); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, - APLOGNO(03369) - "h2_proxy_session(%s): rescheduled request %s", - ctx->engine_id, task_id); - return; - } - else { - const char *uri; - uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, - APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s " - "not complete, cannot repeat", - ctx->engine_id, task_id, uri); - } - } - - if (r == ctx->rbase) { - ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS - : HTTP_SERVICE_UNAVAILABLE); - } - - if (req_engine_done && ctx->engine) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, - APLOGNO(03370) - "h2_proxy_session(%s): finished request %s", - ctx->engine_id, task_id); - req_engine_done(ctx->engine, r->connection, status); + if (r == ctx->r) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection, + "h2_proxy_session(%s): request done, touched=%d, error=%d", + ctx->id, touched, error_code); + ctx->r_done = 1; + if (touched) ctx->r_may_retry = 0; + ctx->r_status = error_code? HTTP_BAD_GATEWAY : + ((status == APR_SUCCESS)? OK : + ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE)); } } static void session_req_done(h2_proxy_session *session, request_rec *r, - apr_status_t status, int touched) -{ - request_done(session->user_data, r, status, touched); -} - -static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave) + apr_status_t status, int touched, int error_code) { - if (h2_proxy_fifo_count(ctx->requests) > 0) { - return APR_SUCCESS; - } - else if (req_engine_pull && ctx->engine) { - apr_status_t status; - request_rec *r = NULL; - - status = req_engine_pull(ctx->engine, before_leave? - APR_BLOCK_READ: APR_NONBLOCK_READ, - ctx->capacity, &r); - if (status == APR_SUCCESS && r) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner, - "h2_proxy_engine(%s): pulled request (%s) %s", - ctx->engine_id, - before_leave? "before leave" : "regular", - r->the_request); - h2_proxy_fifo_push(ctx->requests, r); - } - return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status; - } - return APR_EOF; + request_done(session->user_data, r, status, touched, error_code); } -static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) { +static apr_status_t ctx_run(h2_proxy_ctx *ctx) { apr_status_t status = OK; + h2_proxy_session *session; int h2_front; - request_rec *r; /* Step Four: Send the Request in a new HTTP/2 stream and * loop until we got the response or encounter errors. */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner, - "eng(%s): setup session", ctx->engine_id); - h2_front = is_h2? is_h2(ctx->owner) : 0; - ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf, - h2_front, 30, - h2_proxy_log2((int)ctx->req_buffer_size), - session_req_done); - if (!ctx->session) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, + ctx->has_reusable_session = 0; /* don't know yet */ + h2_front = is_h2? is_h2(ctx->cfront) : 0; + session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf, + h2_front, 30, + h2_proxy_log2((int)ctx->req_buffer_size), + session_req_done); + if (!session) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03372) "session unavailable"); return HTTP_SERVICE_UNAVAILABLE; } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373) - "eng(%s): run session %s", ctx->engine_id, ctx->session->id); - ctx->session->user_data = ctx; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03373) + "eng(%s): run session %s", ctx->id, session->id); + session->user_data = ctx; - while (!ctx->owner->aborted) { - if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) { - add_request(ctx->session, r); - } - - status = h2_proxy_session_process(ctx->session); - - if (status == APR_SUCCESS) { - apr_status_t s2; - /* ongoing processing, call again */ - if (ctx->session->remote_max_concurrent > 0 - && ctx->session->remote_max_concurrent != ctx->capacity) { - ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent, - h2_proxy_fifo_capacity(ctx->requests)); - } - s2 = next_request(ctx, 0); - if (s2 == APR_ECONNABORTED) { - /* master connection gone */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner, - APLOGNO(03374) "eng(%s): pull request", - ctx->engine_id); - /* give notice that we're leaving and cancel all ongoing - * streams. */ - next_request(ctx, 1); - h2_proxy_session_cancel_all(ctx->session); - h2_proxy_session_process(ctx->session); - status = ctx->r_status = APR_SUCCESS; - break; - } - if ((h2_proxy_fifo_count(ctx->requests) == 0) - && h2_proxy_ihash_empty(ctx->session->streams)) { - break; - } - } - else { - /* end of processing, maybe error */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, + ctx->r_done = 0; + add_request(session, ctx->r); + + while (!ctx->cfront->aborted && !ctx->r_done) { + + status = h2_proxy_session_process(session); + if (status != APR_SUCCESS) { + /* Encountered an error during session processing */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront, APLOGNO(03375) "eng(%s): end of session %s", - ctx->engine_id, ctx->session->id); - /* - * Any open stream of that session needs to + ctx->id, session->id); + /* Any open stream of that session needs to * a) be reopened on the new session iff safe to do so * b) reported as done (failed) otherwise */ - h2_proxy_session_cleanup(ctx->session, session_req_done); - break; + h2_proxy_session_cleanup(session, session_req_done); + goto out; } } - ctx->session->user_data = NULL; - ctx->session = NULL; - +out: + if (ctx->cfront->aborted) { + /* master connection gone */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront, + APLOGNO(03374) "eng(%s): master connection gone", ctx->id); + /* cancel all ongoing requests */ + h2_proxy_session_cancel_all(session); + h2_proxy_session_process(session); + } + ctx->has_reusable_session = h2_proxy_session_is_reusable(session); + session->user_data = NULL; return status; } -static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r) -{ - conn_rec *c = ctx->owner; - const char *engine_type, *hostname; - - hostname = (ctx->p_conn->ssl_hostname? - ctx->p_conn->ssl_hostname : ctx->p_conn->hostname); - engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname, - ctx->server_portstr); - - if (c->master && req_engine_push && r && is_h2 && is_h2(c)) { - /* If we are have req_engine capabilities, push the handling of this - * request (e.g. slave connection) to a proxy_http2 engine which - * uses the same backend. We may be called to create an engine - * ourself. */ - if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) { - if (ctx->engine == NULL) { - /* request has been assigned to an engine in another thread */ - return SUSPENDED; - } - } - } - - if (!ctx->engine) { - /* No engine was available or has been initialized, handle this - * request just by ourself. */ - ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id); - ctx->engine_type = engine_type; - ctx->engine_pool = ctx->pool; - ctx->req_buffer_size = (32*1024); - ctx->standalone = 1; - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_proxy_http2(%ld): setup standalone engine for type %s", - c->id, engine_type); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "H2: hosting engine %s", ctx->engine_id); - } - - return h2_proxy_fifo_push(ctx->requests, r); -} - static int proxy_http2_handler(request_rec *r, proxy_worker *worker, proxy_server_conf *conf, @@ -498,29 +348,32 @@ static int proxy_http2_handler(request_rec *r, default: return DECLINED; } - + ctx = apr_pcalloc(r->pool, sizeof(*ctx)); - ctx->owner = r->connection; - ctx->pool = r->pool; - ctx->rbase = r; - ctx->server = r->server; + ctx->id = apr_psprintf(r->pool, "%ld", (long)r->connection->id); + ctx->cfront = r->connection; + ctx->pool = r->pool; + ctx->server = r->server; ctx->proxy_func = proxy_func; - ctx->is_ssl = is_ssl; - ctx->worker = worker; - ctx->conf = conf; - ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0; - ctx->r_status = HTTP_SERVICE_UNAVAILABLE; + ctx->is_ssl = is_ssl; + ctx->worker = worker; + ctx->conf = conf; + ctx->req_buffer_size = (32*1024); + ctx->r = r; + ctx->r_status = status = HTTP_SERVICE_UNAVAILABLE; + ctx->r_done = 0; + ctx->r_may_retry = 1; - h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100); - - ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx); + ap_set_module_config(ctx->cfront->conn_config, &proxy_http2_module, ctx); /* scheme says, this is for us. */ - apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url); - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase, + apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->r, "H2: serving URL %s", url); run_connect: + if (ctx->cfront->aborted) goto cleanup; + /* Get a proxy_conn_rec from the worker, might be a new one, might * be one still open from another request, or it might fail if the * worker is stopped or in error. */ @@ -530,25 +383,11 @@ run_connect: } ctx->p_conn->is_ssl = ctx->is_ssl; - if (ctx->is_ssl && ctx->p_conn->connection) { - /* If there are some metadata on the connection (e.g. TLS alert), - * let mod_ssl detect them, and create a new connection below. - */ - apr_bucket_brigade *tmp_bb; - tmp_bb = apr_brigade_create(ctx->rbase->pool, - ctx->rbase->connection->bucket_alloc); - status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb, - AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1); - if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) { - ctx->p_conn->close = 1; - } - apr_brigade_cleanup(tmp_bb); - } /* Step One: Determine the URL to connect to (might be a proxy), * initialize the backend accordingly and determine the server * port string we can expect in responses. */ - if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker, + if ((status = ap_proxy_determine_connection(ctx->pool, ctx->r, conf, worker, ctx->p_conn, &uri, &locurl, proxyname, proxyport, ctx->server_portstr, @@ -556,111 +395,80 @@ run_connect: goto cleanup; } - /* If we are not already hosting an engine, try to push the request - * to an already existing engine or host a new engine here. */ - if (r && !ctx->engine) { - ctx->r_status = push_request_somewhere(ctx, r); - r = NULL; - if (ctx->r_status == SUSPENDED) { - /* request was pushed to another thread, leave processing here */ - goto cleanup; - } - } - /* Step Two: Make the Connection (or check that an already existing * socket is still usable). On success, we have a socket connected to * backend->hostname. */ if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker, ctx->server)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352) + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(03352) "H2: failed to make connection to backend: %s", ctx->p_conn->hostname); - goto reconnect; + goto cleanup; } /* Step Three: Create conn_rec for the socket we have open now. */ - if (!ctx->p_conn->connection) { - status = ap_proxy_connection_create_ex(ctx->proxy_func, - ctx->p_conn, ctx->rbase); - if (status != OK) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353) - "setup new connection: is_ssl=%d %s %s %s", - ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname, - locurl, ctx->p_conn->hostname); - goto reconnect; - } - - if (!ctx->p_conn->data) { - /* New conection: set a note on the connection what CN is - * requested and what protocol we want */ - if (ctx->p_conn->ssl_hostname) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner, - "set SNI to %s for (%s)", - ctx->p_conn->ssl_hostname, - ctx->p_conn->hostname); - apr_table_setn(ctx->p_conn->connection->notes, - "proxy-request-hostname", ctx->p_conn->ssl_hostname); - } - if (ctx->is_ssl) { - apr_table_setn(ctx->p_conn->connection->notes, - "proxy-request-alpn-protos", "h2"); - } - } + status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r); + if (status != OK) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront, APLOGNO(03353) + "setup new connection: is_ssl=%d %s %s %s", + ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname, + locurl, ctx->p_conn->hostname); + ctx->r_status = ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE); + goto cleanup; } - -run_session: - status = proxy_engine_run(ctx); - if (status == APR_SUCCESS) { - /* session and connection still ok */ - if (next_request(ctx, 1) == APR_SUCCESS) { - /* more requests, run again */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376) - "run_session, again"); - goto run_session; - } - /* done */ - ctx->engine = NULL; + + if (!ctx->p_conn->data && ctx->is_ssl) { + /* New SSL connection: set a note on the connection about what + * protocol we need. */ + apr_table_setn(ctx->p_conn->connection->notes, + "proxy-request-alpn-protos", "h2"); } -reconnect: - if (next_request(ctx, 1) == APR_SUCCESS) { - /* Still more to do, tear down old conn and start over */ + if (ctx->cfront->aborted) goto cleanup; + status = ctx_run(ctx); + + if (ctx->r_status != OK && ctx->r_may_retry && !ctx->cfront->aborted) { + /* Not successfully processed, but may retry, tear down old conn and start over */ if (ctx->p_conn) { ctx->p_conn->close = 1; - /*only in trunk so far */ - /*proxy_run_detach_backend(r, ctx->p_conn);*/ +#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2) + proxy_run_detach_backend(r, ctx->p_conn); +#endif ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); ctx->p_conn = NULL; } ++reconnects; - if (reconnects < 5 && !ctx->owner->aborted) { + if (reconnects < 2) { goto run_connect; } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023) - "giving up after %d reconnects, %d requests todo", - reconnects, h2_proxy_fifo_count(ctx->requests)); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->cfront, APLOGNO(10023) + "giving up after %d reconnects, request-done=%d", + reconnects, ctx->r_done); } cleanup: if (ctx->p_conn) { - if (status != APR_SUCCESS) { - /* close socket when errors happened or session shut down (EOF) */ + if (status != APR_SUCCESS || !ctx->has_reusable_session) { + /* close socket when errors happened or session is not "clean", + * meaning in a working condition with no open streams */ ctx->p_conn->close = 1; } - /*only in trunk so far */ - /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/ +#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2) + proxy_run_detach_backend(ctx->r, ctx->p_conn); +#endif ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); ctx->p_conn = NULL; } - /* Any requests will still have need to fail */ - while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) { - request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1); - } - - ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, + ap_set_module_config(ctx->cfront->conn_config, &proxy_http2_module, NULL); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->cfront, APLOGNO(03377) "leaving handler"); + if (ctx->r_status != OK) { + ap_die(ctx->r_status, r); + } + else if (status != APR_SUCCESS) { + ap_die(ap_map_http_request_error(status, HTTP_SERVICE_UNAVAILABLE), r); + } return ctx->r_status; } diff --git a/modules/ldap/util_ldap.c b/modules/ldap/util_ldap.c index 08f986c..aa0bad1 100644 --- a/modules/ldap/util_ldap.c +++ b/modules/ldap/util_ldap.c @@ -75,15 +75,61 @@ module AP_MODULE_DECLARE_DATA ldap_module; static const char *ldap_cache_mutex_type = "ldap-cache"; static apr_status_t uldap_connection_unbind(void *param); -#define LDAP_CACHE_LOCK() do { \ - if (st->util_ldap_cache_lock) \ - apr_global_mutex_lock(st->util_ldap_cache_lock); \ -} while (0) +/* For OpenLDAP with the 3-arg version of ldap_set_rebind_proc(), use + * a simpler rebind callback than the implementation in APR-util. + * Testing for API version >= 3001 appears safe although OpenLDAP + * 2.1.x (API version = 2004) also has the 3-arg API. */ +#if APR_HAS_OPENLDAP_LDAPSDK && defined(LDAP_API_VERSION) && LDAP_API_VERSION >= 3001 -#define LDAP_CACHE_UNLOCK() do { \ - if (st->util_ldap_cache_lock) \ - apr_global_mutex_unlock(st->util_ldap_cache_lock); \ -} while (0) +#define uldap_rebind_init(p) APR_SUCCESS /* noop */ + +static int uldap_rebind_proc(LDAP *ld, const char *url, ber_tag_t request, + ber_int_t msgid, void *params) +{ + util_ldap_connection_t *ldc = params; + + return ldap_bind_s(ld, ldc->binddn, ldc->bindpw, LDAP_AUTH_SIMPLE); +} + +static apr_status_t uldap_rebind_add(util_ldap_connection_t *ldc) +{ + ldap_set_rebind_proc(ldc->ldap, uldap_rebind_proc, ldc); + return APR_SUCCESS; +} + +#else /* !APR_HAS_OPENLDAP_LDAPSDK */ + +#define USE_APR_LDAP_REBIND +#include + +#define uldap_rebind_init(p) apr_ldap_rebind_init(p) +#define uldap_rebind_add(ldc) apr_ldap_rebind_add((ldc)->rebind_pool, \ + (ldc)->ldap, (ldc)->binddn, \ + (ldc)->bindpw) +#endif + +static APR_INLINE apr_status_t ldap_cache_lock(util_ldap_state_t *st, request_rec *r) { + apr_status_t rv = APR_SUCCESS; + if (st->util_ldap_cache_lock) { + apr_status_t rv = apr_global_mutex_lock(st->util_ldap_cache_lock); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_CRIT, rv, r, APLOGNO(10134) "LDAP cache lock failed"); + ap_assert(0); + } + } + return rv; +} +static APR_INLINE apr_status_t ldap_cache_unlock(util_ldap_state_t *st, request_rec *r) { + apr_status_t rv = APR_SUCCESS; + if (st->util_ldap_cache_lock) { + apr_status_t rv = apr_global_mutex_unlock(st->util_ldap_cache_lock); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_CRIT, rv, r, APLOGNO(10135) "LDAP cache lock failed"); + ap_assert(0); + } + } + return rv; +} static void util_ldap_strdup (char **str, const char *newstr) { @@ -181,6 +227,13 @@ static apr_status_t uldap_connection_unbind(void *param) util_ldap_connection_t *ldc = param; if (ldc) { +#ifdef USE_APR_LDAP_REBIND + /* forget the rebind info for this conn */ + if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { + apr_pool_clear(ldc->rebind_pool); + } +#endif + if (ldc->ldap) { if (ldc->r) { ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, ldc->r, "LDC %pp unbind", ldc); @@ -189,12 +242,6 @@ static apr_status_t uldap_connection_unbind(void *param) ldc->ldap = NULL; } ldc->bound = 0; - - /* forget the rebind info for this conn */ - if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { - apr_ldap_rebind_remove(ldc->ldap); - apr_pool_clear(ldc->rebind_pool); - } } return APR_SUCCESS; @@ -250,7 +297,7 @@ static apr_status_t util_ldap_connection_remove (void *param) apr_thread_mutex_unlock(st->mutex); #endif - /* Destory the pool associated with this connection */ + /* Destroy the pool associated with this connection */ apr_pool_destroy(ldc->pool); @@ -330,7 +377,7 @@ static int uldap_connection_init(request_rec *r, if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { /* Now that we have an ldap struct, add it to the referral list for rebinds. */ - rc = apr_ldap_rebind_add(ldc->rebind_pool, ldc->ldap, ldc->binddn, ldc->bindpw); + rc = uldap_rebind_add(ldc); if (rc != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server, APLOGNO(01277) "LDAP: Unable to add rebind cross reference entry. Out of memory?"); @@ -817,6 +864,7 @@ static util_ldap_connection_t * #endif return NULL; } + apr_pool_tag(newpool, "util_ldap_connection"); /* * Add the new connection entry to the linked list. Note that we @@ -855,6 +903,7 @@ static util_ldap_connection_t * /* whether or not to keep this connection in the pool when it's returned */ l->keep = (st->connection_pool_ttl == 0) ? 0 : 1; +#ifdef USE_APR_LDAP_REBIND if (l->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { if (apr_pool_create(&(l->rebind_pool), l->pool) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(01286) @@ -864,7 +913,9 @@ static util_ldap_connection_t * #endif return NULL; } + apr_pool_tag(l->rebind_pool, "util_ldap_rebind"); } +#endif if (p) { p->next = l; @@ -910,14 +961,14 @@ static int uldap_cache_comparedn(request_rec *r, util_ldap_connection_t *ldc, &ldap_module); /* get cache entry (or create one) */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); curnode.url = url; curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode); if (curl == NULL) { curl = util_ald_create_caches(st, url); } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); /* a simple compare? */ if (!compare_dn_on_server) { @@ -934,7 +985,7 @@ static int uldap_cache_comparedn(request_rec *r, util_ldap_connection_t *ldc, if (curl) { /* no - it's a server side compare */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); /* is it in the compare cache? */ newnode.reqdn = (char *)reqdn; @@ -942,13 +993,13 @@ static int uldap_cache_comparedn(request_rec *r, util_ldap_connection_t *ldc, if (node != NULL) { /* If it's in the cache, it's good */ /* unlock this read lock */ - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); ldc->reason = "DN Comparison TRUE (cached)"; return LDAP_COMPARE_TRUE; } /* unlock this read lock */ - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } start_over: @@ -1010,7 +1061,7 @@ start_over: else { if (curl) { /* compare successful - add to the compare cache */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); newnode.reqdn = (char *)reqdn; newnode.dn = (char *)dn; @@ -1021,7 +1072,7 @@ start_over: { util_ald_cache_insert(curl->dn_compare_cache, &newnode); } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } ldc->reason = "DN Comparison TRUE (checked on server)"; result = LDAP_COMPARE_TRUE; @@ -1056,17 +1107,17 @@ static int uldap_cache_compare(request_rec *r, util_ldap_connection_t *ldc, &ldap_module); /* get cache entry (or create one) */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); curnode.url = url; curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode); if (curl == NULL) { curl = util_ald_create_caches(st, url); } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); if (curl) { /* make a comparison to the cache */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); curtime = apr_time_now(); the_compare_node.dn = (char *)dn; @@ -1105,7 +1156,7 @@ static int uldap_cache_compare(request_rec *r, util_ldap_connection_t *ldc, /* record the result code to return with the reason... */ result = compare_nodep->result; /* and unlock this read lock */ - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "ldap_compare_s(%pp, %s, %s, %s) = %s (cached)", @@ -1114,7 +1165,7 @@ static int uldap_cache_compare(request_rec *r, util_ldap_connection_t *ldc, } } /* unlock this read lock */ - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } start_over: @@ -1162,7 +1213,7 @@ start_over: (LDAP_NO_SUCH_ATTRIBUTE == result)) { if (curl) { /* compare completed; caching result */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_compare_node.lastcompare = curtime; the_compare_node.result = result; the_compare_node.sgl_processed = 0; @@ -1191,7 +1242,7 @@ start_over: compare_nodep->lastcompare = curtime; compare_nodep->result = result; } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } if (LDAP_COMPARE_TRUE == result) { @@ -1456,14 +1507,14 @@ static int uldap_cache_check_subgroups(request_rec *r, * 2. Find previously created cache entry and check if there is already a * subgrouplist. */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); curnode.url = url; curl = util_ald_cache_fetch(st->util_ldap_cache, &curnode); - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); if (curl && curl->compare_cache) { /* make a comparison to the cache */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_compare_node.dn = (char *)dn; the_compare_node.attrib = (char *)"objectClass"; @@ -1505,7 +1556,7 @@ static int uldap_cache_check_subgroups(request_rec *r, } } } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } if (!tmp_local_sgl && !sgl_cached_empty) { @@ -1524,7 +1575,7 @@ static int uldap_cache_check_subgroups(request_rec *r, /* * Find the generic group cache entry and add the sgl we just retrieved. */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_compare_node.dn = (char *)dn; the_compare_node.attrib = (char *)"objectClass"; @@ -1589,7 +1640,7 @@ static int uldap_cache_check_subgroups(request_rec *r, } } } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } } @@ -1667,17 +1718,17 @@ static int uldap_cache_checkuserid(request_rec *r, util_ldap_connection_t *ldc, &ldap_module); /* Get the cache node for this url */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); curnode.url = url; curl = (util_url_node_t *)util_ald_cache_fetch(st->util_ldap_cache, &curnode); if (curl == NULL) { curl = util_ald_create_caches(st, url); } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); if (curl) { - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_search_node.username = filter; search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node); @@ -1709,13 +1760,13 @@ static int uldap_cache_checkuserid(request_rec *r, util_ldap_connection_t *ldc, (*retvals)[i] = apr_pstrdup(r->pool, search_nodep->vals[i]); } } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); ldc->reason = "Authentication successful (cached)"; return LDAP_SUCCESS; } } /* unlock this read lock */ - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } /* @@ -1874,7 +1925,7 @@ start_over: * Add the new username to the search cache. */ if (curl) { - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_search_node.username = filter; the_search_node.dn = *binddn; the_search_node.bindpw = bindpw; @@ -1905,7 +1956,7 @@ start_over: /* Cache entry is valid, update lastbind */ search_nodep->lastbind = the_search_node.lastbind; } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } ldap_msgfree(res); @@ -1943,17 +1994,17 @@ static int uldap_cache_getuserdn(request_rec *r, util_ldap_connection_t *ldc, &ldap_module); /* Get the cache node for this url */ - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); curnode.url = url; curl = (util_url_node_t *)util_ald_cache_fetch(st->util_ldap_cache, &curnode); if (curl == NULL) { curl = util_ald_create_caches(st, url); } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); if (curl) { - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_search_node.username = filter; search_nodep = util_ald_cache_fetch(curl->search_cache, &the_search_node); @@ -1979,13 +2030,13 @@ static int uldap_cache_getuserdn(request_rec *r, util_ldap_connection_t *ldc, (*retvals)[i] = apr_pstrdup(r->pool, search_nodep->vals[i]); } } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); ldc->reason = "Search successful (cached)"; return LDAP_SUCCESS; } } /* unlock this read lock */ - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } /* @@ -2083,7 +2134,7 @@ start_over: * Add the new username to the search cache. */ if (curl) { - LDAP_CACHE_LOCK(); + ldap_cache_lock(st, r); the_search_node.username = filter; the_search_node.dn = *binddn; the_search_node.bindpw = NULL; @@ -2112,7 +2163,7 @@ start_over: /* Cache entry is valid, update lastbind */ search_nodep->lastbind = the_search_node.lastbind; } - LDAP_CACHE_UNLOCK(); + ldap_cache_unlock(st, r); } ldap_msgfree(res); @@ -2736,12 +2787,14 @@ static const char *util_ldap_set_conn_ttl(cmd_parms *cmd, void *dummy, const char *val) { - apr_interval_time_t timeout; + apr_interval_time_t timeout = -1; util_ldap_state_t *st = (util_ldap_state_t *)ap_get_module_config(cmd->server->module_config, &ldap_module); - if (ap_timeout_parameter_parse(val, &timeout, "s") != APR_SUCCESS) { + /* Negative values mean AP_LDAP_CONNPOOL_INFINITE */ + if (val[0] != '-' && + ap_timeout_parameter_parse(val, &timeout, "s") != APR_SUCCESS) { return "LDAPConnectionPoolTTL has wrong format"; } @@ -2810,6 +2863,7 @@ static void *util_ldap_create_config(apr_pool_t *p, server_rec *s) * no shared memory managed by either. */ apr_pool_create(&st->pool, p); + apr_pool_tag(st->pool, "util_ldap_state"); #if APR_HAS_THREADS apr_thread_mutex_create(&st->mutex, APR_THREAD_MUTEX_DEFAULT, st->pool); #endif @@ -2874,7 +2928,7 @@ static void *util_ldap_merge_config(apr_pool_t *p, void *basev, able to handle the connection timeout per-connection but the Novell SDK cannot. Allowing the timeout to be set by each vhost is of little value so rather than - trying to make special expections for one LDAP SDK, GLOBAL_ONLY + trying to make special exceptions for one LDAP SDK, GLOBAL_ONLY is being enforced on this setting as well. */ st->connectionTimeout = base->connectionTimeout; st->opTimeout = base->opTimeout; @@ -3051,7 +3105,7 @@ static int util_ldap_post_config(apr_pool_t *p, apr_pool_t *plog, } /* Initialize the rebind callback's cross reference list. */ - apr_ldap_rebind_init (p); + (void) uldap_rebind_init(p); #ifdef AP_LDAP_OPT_DEBUG if (st->debug_level > 0) { diff --git a/modules/ldap/util_ldap_cache.c b/modules/ldap/util_ldap_cache.c index 774a76e..27dc733 100644 --- a/modules/ldap/util_ldap_cache.c +++ b/modules/ldap/util_ldap_cache.c @@ -230,8 +230,8 @@ void util_ldap_search_node_display(request_rec *r, util_ald_cache_t *cache, void "%s" "%s" "", - node->username, - node->dn, + ap_escape_html(r->pool, node->username), + ap_escape_html(r->pool, node->dn), date_str); } @@ -331,9 +331,9 @@ void util_ldap_compare_node_display(request_rec *r, util_ald_cache_t *cache, voi "%s" "%s" "", - node->dn, - node->attrib, - node->value, + ap_escape_html(r->pool, node->dn), + ap_escape_html(r->pool, node->attrib), + ap_escape_html(r->pool, node->value), date_str, cmp_result, sub_groups_val, @@ -391,8 +391,8 @@ void util_ldap_dn_compare_node_display(request_rec *r, util_ald_cache_t *cache, "%s" "%s" "", - node->reqdn, - node->dn); + ap_escape_html(r->pool, node->reqdn), + ap_escape_html(r->pool, node->dn)); } diff --git a/modules/ldap/util_ldap_cache_mgr.c b/modules/ldap/util_ldap_cache_mgr.c index 9bef3f8..aa822bc 100644 --- a/modules/ldap/util_ldap_cache_mgr.c +++ b/modules/ldap/util_ldap_cache_mgr.c @@ -280,7 +280,7 @@ void util_ald_cache_purge(util_ald_cache_t *cache) */ util_url_node_t *util_ald_create_caches(util_ldap_state_t *st, const char *url) { - util_url_node_t curl, *newcurl = NULL; + util_url_node_t curl; util_ald_cache_t *search_cache; util_ald_cache_t *compare_cache; util_ald_cache_t *dn_compare_cache; @@ -313,7 +313,6 @@ util_url_node_t *util_ald_create_caches(util_ldap_state_t *st, const char *url) /* check that all the caches initialised successfully */ if (search_cache && compare_cache && dn_compare_cache) { - /* The contents of this structure will be duplicated in shared memory during the insert. So use stack memory rather than pool memory to avoid a memory leak. */ @@ -323,11 +322,16 @@ util_url_node_t *util_ald_create_caches(util_ldap_state_t *st, const char *url) curl.compare_cache = compare_cache; curl.dn_compare_cache = dn_compare_cache; - newcurl = util_ald_cache_insert(st->util_ldap_cache, &curl); - + return util_ald_cache_insert(st->util_ldap_cache, &curl); } + else { + /* util_ald_destroy_cache is a noop for a NULL argument. */ + util_ald_destroy_cache(search_cache); + util_ald_destroy_cache(compare_cache); + util_ald_destroy_cache(dn_compare_cache); - return newcurl; + return NULL; + } } diff --git a/modules/loggers/mod_log_config.c b/modules/loggers/mod_log_config.c index 4270b3f..5d5b73a 100644 --- a/modules/loggers/mod_log_config.c +++ b/modules/loggers/mod_log_config.c @@ -309,9 +309,15 @@ static const char *constant_item(request_rec *dummy, char *stuff) static const char *log_remote_host(request_rec *r, char *a) { - return ap_escape_logitem(r->pool, ap_get_remote_host(r->connection, - r->per_dir_config, - REMOTE_NAME, NULL)); + const char *remote_host; + if (a && !strcmp(a, "c")) { + remote_host = ap_get_remote_host(r->connection, r->per_dir_config, + REMOTE_NAME, NULL); + } + else { + remote_host = ap_get_useragent_host(r, REMOTE_NAME, NULL); + } + return ap_escape_logitem(r->pool, remote_host); } static const char *log_remote_address(request_rec *r, char *a) @@ -467,7 +473,7 @@ static APR_INLINE char *find_multiple_headers(apr_pool_t *pool, result_list = rp = NULL; do { - if (!strcasecmp(t_elt->key, key)) { + if (!ap_cstr_casecmp(t_elt->key, key)) { if (!result_list) { result_list = rp = apr_palloc(pool, sizeof(*rp)); } @@ -511,10 +517,10 @@ static const char *log_header_out(request_rec *r, char *a) { const char *cp = NULL; - if (!strcasecmp(a, "Content-type") && r->content_type) { + if (!ap_cstr_casecmp(a, "Content-type") && r->content_type) { cp = ap_field_noparam(r->pool, r->content_type); } - else if (!strcasecmp(a, "Set-Cookie")) { + else if (!ap_cstr_casecmp(a, "Set-Cookie")) { cp = find_multiple_headers(r->pool, r->headers_out, a); } else { @@ -570,7 +576,7 @@ static const char *log_cookie(request_rec *r, char *a) --last; } - if (!strcasecmp(name, a)) { + if (!ap_cstr_casecmp(name, a)) { /* last1 points to the next char following the ';' delim, or the trailing NUL char of the string */ last = last1 - (*last1 ? 2 : 1); @@ -841,14 +847,8 @@ static const char *log_pid_tid(request_rec *r, char *a) int tid = 0; /* APR will format "0" anyway but an arg is needed */ #endif return apr_psprintf(r->pool, -#if APR_MAJOR_VERSION > 1 || (APR_MAJOR_VERSION == 1 && APR_MINOR_VERSION >= 2) /* APR can format a thread id in hex */ - *a == 'h' ? "%pt" : "%pT", -#else - /* APR is missing the feature, so always use decimal */ - "%pT", -#endif - &tid); + *a == 'h' ? "%pt" : "%pT", &tid); } /* bogus format */ return a; @@ -1099,7 +1099,8 @@ static const char *process_item(request_rec *r, request_rec *orig, static void flush_log(buffered_log *buf) { if (buf->outcnt && buf->handle != NULL) { - apr_file_write(buf->handle, buf->outbuf, &buf->outcnt); + /* XXX: error handling */ + apr_file_write_full(buf->handle, buf->outbuf, buf->outcnt, NULL); buf->outcnt = 0; } } @@ -1165,11 +1166,9 @@ static int config_log_transaction(request_rec *r, config_log_state *cls, for (i = 0; i < format->nelts; ++i) { strs[i] = process_item(r, orig, &items[i]); - } - - for (i = 0; i < format->nelts; ++i) { len += strl[i] = strlen(strs[i]); } + if (!log_writer) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00645) "log writer isn't correctly setup"); @@ -1710,7 +1709,7 @@ static apr_status_t ap_buffered_log_writer(request_rec *r, s += strl[i]; } w = len; - rv = apr_file_write(buf->handle, str, &w); + rv = apr_file_write_full(buf->handle, str, w, NULL); } else { diff --git a/modules/loggers/mod_log_debug.c b/modules/loggers/mod_log_debug.c index 8a6c124..3f27a95 100644 --- a/modules/loggers/mod_log_debug.c +++ b/modules/loggers/mod_log_debug.c @@ -49,6 +49,7 @@ static const char * const hooks[] = { "check_authn", /* 9 */ "check_authz", /* 10 */ "insert_filter", /* 11 */ + "pre_translate_name", /* 12 */ NULL }; @@ -109,6 +110,12 @@ static int log_debug_handler(request_rec *r) return DECLINED; } +static int log_debug_pre_translate_name(request_rec *r) +{ + do_debug_log(r, hooks[12]); + return DECLINED; +} + static int log_debug_translate_name(request_rec *r) { do_debug_log(r, hooks[3]); @@ -263,6 +270,7 @@ static void register_hooks(apr_pool_t *p) ap_hook_log_transaction(log_debug_log_transaction, NULL, NULL, APR_HOOK_FIRST); ap_hook_quick_handler(log_debug_quick_handler, NULL, NULL, APR_HOOK_FIRST); ap_hook_handler(log_debug_handler, NULL, NULL, APR_HOOK_FIRST); + ap_hook_pre_translate_name(log_debug_pre_translate_name, NULL, NULL, APR_HOOK_FIRST); ap_hook_translate_name(log_debug_translate_name, NULL, NULL, APR_HOOK_FIRST); ap_hook_map_to_storage(log_debug_map_to_storage, NULL, NULL, APR_HOOK_FIRST); ap_hook_fixups(log_debug_fixups, NULL, NULL, APR_HOOK_FIRST); diff --git a/modules/loggers/mod_log_forensic.c b/modules/loggers/mod_log_forensic.c index bb808e8..4884f25 100644 --- a/modules/loggers/mod_log_forensic.c +++ b/modules/loggers/mod_log_forensic.c @@ -123,7 +123,7 @@ static char *log_escape(char *q, const char *e, const char *p) { for ( ; *p ; ++p) { ap_assert(q < e); - if (test_char_table[*(unsigned char *)p]&T_ESCAPE_FORENSIC) { + if (TEST_CHAR(*p, T_ESCAPE_FORENSIC)) { ap_assert(q+2 < e); *q++ = '%'; ap_bin2hex(p, 1, q); @@ -146,12 +146,12 @@ typedef struct hlog { apr_size_t count; } hlog; -static int count_string(const char *p) +static apr_size_t count_string(const char *p) { - int n; + apr_size_t n; for (n = 0 ; *p ; ++p, ++n) - if (test_char_table[*(unsigned char *)p]&T_ESCAPE_FORENSIC) + if (TEST_CHAR(*p, T_ESCAPE_FORENSIC)) n += 2; return n; } diff --git a/modules/lua/config.m4 b/modules/lua/config.m4 index 29fd563..40ae6f0 100644 --- a/modules/lua/config.m4 +++ b/modules/lua/config.m4 @@ -34,7 +34,7 @@ AC_DEFUN([CHECK_LUA_PATH], [dnl fi ]) -dnl Check for Lua 5.3/5.2/5.1 Libraries +dnl Check for Lua Libraries dnl CHECK_LUA(ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND]) dnl Sets: dnl LUA_CFLAGS @@ -44,7 +44,7 @@ AC_DEFUN([CHECK_LUA], AC_ARG_WITH( lua, - [AC_HELP_STRING([--with-lua=PATH],[Path to the Lua 5.3/5.2/5.1 prefix])], + [AC_HELP_STRING([--with-lua=PATH],[Path to the Lua installation prefix])], lua_path="$withval", :) @@ -55,16 +55,25 @@ else test_paths="${lua_path}" fi -if test -n "$PKGCONFIG" -a -z "$lua_path" \ - && $PKGCONFIG --atleast-version=5.1 lua; then - LUA_LIBS="`$PKGCONFIG --libs lua`" - LUA_CFLAGS="`$PKGCONFIG --cflags lua`" - LUA_VERSION="`$PKGCONFIG --modversion lua`" - AC_MSG_NOTICE([using Lua $LUA_VERSION configuration from pkg-config]) -else +for pklua in lua lua5.4 lua5.3 lua5.2 lua5.1; do + if test -n "$PKGCONFIG" -a -z "$lua_path" \ + && $PKGCONFIG --atleast-version=5.1 $pklua; then + LUA_LIBS="`$PKGCONFIG --libs $pklua`" + LUA_CFLAGS="`$PKGCONFIG --cflags $pklua`" + LUA_VERSION="`$PKGCONFIG --modversion $pklua`" + AC_MSG_NOTICE([using Lua $LUA_VERSION configuration from pkg-config]) + break + fi +done + +if test -z "$LUA_VERSION"; then AC_CHECK_LIB(m, pow, lib_m="-lm") AC_CHECK_LIB(m, sqrt, lib_m="-lm") for x in $test_paths ; do + CHECK_LUA_PATH([${x}], [include/lua-5.4], [lib/lua-5.4], [lua-5.4]) + CHECK_LUA_PATH([${x}], [include/lua5.4], [lib], [lua5.4]) + CHECK_LUA_PATH([${x}], [include/lua54], [lib/lua54], [lua]) + CHECK_LUA_PATH([${x}], [include/lua-5.3], [lib/lua-5.3], [lua-5.3]) CHECK_LUA_PATH([${x}], [include/lua5.3], [lib], [lua5.3]) CHECK_LUA_PATH([${x}], [include/lua53], [lib/lua53], [lua]) @@ -85,13 +94,13 @@ AC_SUBST(LUA_LIBS) AC_SUBST(LUA_CFLAGS) if test -z "${LUA_LIBS}"; then - AC_MSG_WARN([*** Lua 5.3 5.2 or 5.1 library not found.]) + AC_MSG_WARN([*** Lua 5.4 5.3 5.2 or 5.1 library not found.]) ifelse([$2], , enable_lua="no" if test -z "${lua_path}"; then - AC_MSG_WARN([Lua 5.3 5.2 or 5.1 library is required]) + AC_MSG_WARN([Lua 5.4 5.3 5.2 or 5.1 library is required]) else - AC_MSG_ERROR([Lua 5.3 5.2 or 5.1 library is required]) + AC_MSG_ERROR([Lua 5.4 5.3 5.2 or 5.1 library is required]) fi, $2) else diff --git a/modules/lua/lua_apr.c b/modules/lua/lua_apr.c index 8e34cf3..9590fd6 100644 --- a/modules/lua/lua_apr.c +++ b/modules/lua/lua_apr.c @@ -39,7 +39,13 @@ static int lua_table_set(lua_State *L) { req_table_t *t = ap_lua_check_apr_table(L, 1); const char *key = luaL_checkstring(L, 2); - const char *val = luaL_checkstring(L, 3); + const char *val = luaL_optlstring(L, 3, NULL, NULL); + + if (!val) { + apr_table_unset(t->t, key); + return 0; + } + /* Unless it's the 'notes' table, check for newline chars */ /* t->r will be NULL in case of the connection notes, but since we aren't going to check anything called 'notes', we can safely diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c index 77a88b4..bec8580 100644 --- a/modules/lua/lua_request.c +++ b/modules/lua/lua_request.c @@ -235,33 +235,36 @@ static int lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size, { int rc = OK; + *rbuf = NULL; + *size = 0; + if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR))) { return (rc); } if (ap_should_client_block(r)) { /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ - char argsbuffer[HUGE_STRING_LEN]; - apr_off_t rsize, len_read, rpos = 0; + apr_off_t len_read = -1; + apr_off_t rpos = 0; apr_off_t length = r->remaining; /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ if (maxsize != 0 && length > maxsize) { return APR_EINCOMPLETE; /* Only room for incomplete data chunk :( */ } - *rbuf = (const char *) apr_pcalloc(r->pool, (apr_size_t) (length + 1)); - *size = length; - while ((len_read = ap_get_client_block(r, argsbuffer, sizeof(argsbuffer))) > 0) { - if ((rpos + len_read) > length) { - rsize = length - rpos; - } - else { - rsize = len_read; - } - - memcpy((char *) *rbuf + rpos, argsbuffer, (size_t) rsize); - rpos += rsize; + *rbuf = (const char *) apr_pcalloc(r->pool, (apr_size_t) (length) + 1); + while ((rpos < length) + && (len_read = ap_get_client_block(r, (char *) *rbuf + rpos, + length - rpos)) > 0) { + rpos += len_read; + } + if (len_read < 0) { + return APR_EINCOMPLETE; } + *size = rpos; + } + else { + rc = DONE; } return (rc); @@ -278,6 +281,8 @@ static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t * { apr_status_t rc = OK; + *size = 0; + if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR))) return rc; if (ap_should_client_block(r)) { @@ -303,10 +308,47 @@ static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t * rpos += rsize; } } + else { + rc = DONE; + } return rc; } +/* expose apr_table as (r/o) lua table */ +static int req_aprtable2luatable(lua_State *L, apr_table_t *t) +{ + lua_newtable(L); + lua_newtable(L); /* [table, table] */ + apr_table_do(req_aprtable2luatable_cb, L, t, NULL); + return 2; /* [table, table>] */ +} + +static int req_headers_in_table(lua_State *L) +{ + request_rec *r = ap_lua_check_request_rec(L, 1); + return req_aprtable2luatable(L, r->headers_in); +} +static int req_headers_out_table(lua_State *L) +{ + request_rec *r = ap_lua_check_request_rec(L, 1); + return req_aprtable2luatable(L, r->headers_out); +} +static int req_err_headers_out_table(lua_State *L) +{ + request_rec *r = ap_lua_check_request_rec(L, 1); + return req_aprtable2luatable(L, r->err_headers_out); +} +static int req_notes_table(lua_State *L) +{ + request_rec *r = ap_lua_check_request_rec(L, 1); + return req_aprtable2luatable(L, r->notes); +} +static int req_subprocess_env_table(lua_State *L) +{ + request_rec *r = ap_lua_check_request_rec(L, 1); + return req_aprtable2luatable(L, r->subprocess_env); +} /* r:parseargs() returning a lua table */ static int req_parseargs(lua_State *L) { @@ -376,6 +418,7 @@ static int req_parsebody(lua_State *L) if (end == NULL) break; key = (char *) apr_pcalloc(r->pool, 256); filename = (char *) apr_pcalloc(r->pool, 256); + if (end - crlf <= 8) break; vlen = end - crlf - 8; buffer = (char *) apr_pcalloc(r->pool, vlen+1); memcpy(buffer, crlf + 4, vlen); @@ -2185,23 +2228,20 @@ static int lua_websocket_greet(lua_State *L) return 0; } -static apr_status_t lua_websocket_readbytes(conn_rec* c, char* buffer, - apr_off_t len) +static apr_status_t lua_websocket_readbytes(conn_rec* c, + apr_bucket_brigade *brigade, + char* buffer, apr_off_t len) { - apr_bucket_brigade *brigade = apr_brigade_create(c->pool, c->bucket_alloc); + apr_size_t delivered; apr_status_t rv; + rv = ap_get_brigade(c->input_filters, brigade, AP_MODE_READBYTES, APR_BLOCK_READ, len); if (rv == APR_SUCCESS) { - if (!APR_BRIGADE_EMPTY(brigade)) { - apr_bucket* bucket = APR_BRIGADE_FIRST(brigade); - const char* data = NULL; - apr_size_t data_length = 0; - rv = apr_bucket_read(bucket, &data, &data_length, APR_BLOCK_READ); - if (rv == APR_SUCCESS) { - memcpy(buffer, data, len); - } - apr_bucket_delete(bucket); + delivered = len; + rv = apr_brigade_flatten(brigade, buffer, &delivered); + if ((rv == APR_SUCCESS) && (delivered < len)) { + rv = APR_INCOMPLETE; } } apr_brigade_cleanup(brigade); @@ -2231,35 +2271,28 @@ static int lua_websocket_peek(lua_State *L) static int lua_websocket_read(lua_State *L) { - apr_socket_t *sock; apr_status_t rv; int do_read = 1; int n = 0; - apr_size_t len = 1; apr_size_t plen = 0; unsigned short payload_short = 0; apr_uint64_t payload_long = 0; unsigned char *mask_bytes; char byte; - int plaintext; - - + apr_bucket_brigade *brigade; + conn_rec* c; + request_rec *r = ap_lua_check_request_rec(L, 1); - plaintext = ap_lua_ssl_is_https(r->connection) ? 0 : 1; + c = r->connection; - mask_bytes = apr_pcalloc(r->pool, 4); - sock = ap_get_conn_socket(r->connection); + + brigade = apr_brigade_create(r->pool, c->bucket_alloc); while (do_read) { do_read = 0; /* Get opcode and FIN bit */ - if (plaintext) { - rv = apr_socket_recv(sock, &byte, &len); - } - else { - rv = lua_websocket_readbytes(r->connection, &byte, 1); - } + rv = lua_websocket_readbytes(c, brigade, &byte, 1); if (rv == APR_SUCCESS) { unsigned char ubyte, fin, opcode, mask, payload; ubyte = (unsigned char)byte; @@ -2269,12 +2302,7 @@ static int lua_websocket_read(lua_State *L) opcode = ubyte & 0xf; /* Get the payload length and mask bit */ - if (plaintext) { - rv = apr_socket_recv(sock, &byte, &len); - } - else { - rv = lua_websocket_readbytes(r->connection, &byte, 1); - } + rv = lua_websocket_readbytes(c, brigade, &byte, 1); if (rv == APR_SUCCESS) { ubyte = (unsigned char)byte; /* Mask is the first bit */ @@ -2285,40 +2313,25 @@ static int lua_websocket_read(lua_State *L) /* Extended payload? */ if (payload == 126) { - len = 2; - if (plaintext) { - /* XXX: apr_socket_recv does not receive len bits, only up to len bits! */ - rv = apr_socket_recv(sock, (char*) &payload_short, &len); - } - else { - rv = lua_websocket_readbytes(r->connection, - (char*) &payload_short, 2); - } - payload_short = ntohs(payload_short); + rv = lua_websocket_readbytes(c, brigade, + (char*) &payload_short, 2); - if (rv == APR_SUCCESS) { - plen = payload_short; - } - else { + if (rv != APR_SUCCESS) { return 0; } + + plen = ntohs(payload_short); } /* Super duper extended payload? */ if (payload == 127) { - len = 8; - if (plaintext) { - rv = apr_socket_recv(sock, (char*) &payload_long, &len); - } - else { - rv = lua_websocket_readbytes(r->connection, - (char*) &payload_long, 8); - } - if (rv == APR_SUCCESS) { - plen = ap_ntoh64(&payload_long); - } - else { + rv = lua_websocket_readbytes(c, brigade, + (char*) &payload_long, 8); + + if (rv != APR_SUCCESS) { return 0; } + + plen = ap_ntoh64(&payload_long); } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03210) "Websocket: Reading %" APR_SIZE_T_FMT " (%s) bytes, masking is %s. %s", @@ -2327,46 +2340,27 @@ static int lua_websocket_read(lua_State *L) mask ? "on" : "off", fin ? "This is a final frame" : "more to follow"); if (mask) { - len = 4; - if (plaintext) { - rv = apr_socket_recv(sock, (char*) mask_bytes, &len); - } - else { - rv = lua_websocket_readbytes(r->connection, - (char*) mask_bytes, 4); - } + rv = lua_websocket_readbytes(c, brigade, + (char*) mask_bytes, 4); + if (rv != APR_SUCCESS) { return 0; } } if (plen < (HUGE_STRING_LEN*1024) && plen > 0) { apr_size_t remaining = plen; - apr_size_t received; - apr_off_t at = 0; char *buffer = apr_palloc(r->pool, plen+1); buffer[plen] = 0; - if (plaintext) { - while (remaining > 0) { - received = remaining; - rv = apr_socket_recv(sock, buffer+at, &received); - if (received > 0 ) { - remaining -= received; - at += received; - } - } - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "Websocket: Frame contained %" APR_OFF_T_FMT " bytes, pushed to Lua stack", - at); - } - else { - rv = lua_websocket_readbytes(r->connection, buffer, - remaining); - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "Websocket: SSL Frame contained %" APR_SIZE_T_FMT " bytes, "\ - "pushed to Lua stack", - remaining); + rv = lua_websocket_readbytes(c, brigade, buffer, remaining); + + if (rv != APR_SUCCESS) { + return 0; } + + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "Websocket: Frame contained %" APR_SIZE_T_FMT \ + " bytes, pushed to Lua stack", remaining); if (mask) { for (n = 0; n < plen; n++) { buffer[n] ^= mask_bytes[n%4]; @@ -2378,14 +2372,25 @@ static int lua_websocket_read(lua_State *L) return 2; } - /* Decide if we need to react to the opcode or not */ if (opcode == 0x09) { /* ping */ char frame[2]; - plen = 2; + apr_bucket *b; + frame[0] = 0x8A; frame[1] = 0; - apr_socket_send(sock, frame, &plen); /* Pong! */ + + /* Pong! */ + b = apr_bucket_transient_create(frame, 2, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(brigade, b); + + rv = ap_pass_brigade(c->output_filters, brigade); + apr_brigade_cleanup(brigade); + + if (rv != APR_SUCCESS) { + return 0; + } + do_read = 1; } } @@ -2814,14 +2819,24 @@ void ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p) makefun(&req_proxyreq_field, APL_REQ_FUNTYPE_STRING, p)); apr_hash_set(dispatch, "headers_in", APR_HASH_KEY_STRING, makefun(&req_headers_in, APL_REQ_FUNTYPE_TABLE, p)); + apr_hash_set(dispatch, "headers_in_table", APR_HASH_KEY_STRING, + makefun(&req_headers_in_table, APL_REQ_FUNTYPE_LUACFUN, p)); apr_hash_set(dispatch, "headers_out", APR_HASH_KEY_STRING, makefun(&req_headers_out, APL_REQ_FUNTYPE_TABLE, p)); + apr_hash_set(dispatch, "headers_out_table", APR_HASH_KEY_STRING, + makefun(&req_headers_out_table, APL_REQ_FUNTYPE_LUACFUN, p)); apr_hash_set(dispatch, "err_headers_out", APR_HASH_KEY_STRING, makefun(&req_err_headers_out, APL_REQ_FUNTYPE_TABLE, p)); + apr_hash_set(dispatch, "err_headers_out_table", APR_HASH_KEY_STRING, + makefun(&req_err_headers_out_table, APL_REQ_FUNTYPE_LUACFUN, p)); apr_hash_set(dispatch, "notes", APR_HASH_KEY_STRING, makefun(&req_notes, APL_REQ_FUNTYPE_TABLE, p)); + apr_hash_set(dispatch, "notes_table", APR_HASH_KEY_STRING, + makefun(&req_notes_table, APL_REQ_FUNTYPE_LUACFUN, p)); apr_hash_set(dispatch, "subprocess_env", APR_HASH_KEY_STRING, makefun(&req_subprocess_env, APL_REQ_FUNTYPE_TABLE, p)); + apr_hash_set(dispatch, "subprocess_env_table", APR_HASH_KEY_STRING, + makefun(&req_subprocess_env_table, APL_REQ_FUNTYPE_LUACFUN, p)); apr_hash_set(dispatch, "flush", APR_HASH_KEY_STRING, makefun(&lua_ap_rflush, APL_REQ_FUNTYPE_LUACFUN, p)); apr_hash_set(dispatch, "port", APR_HASH_KEY_STRING, diff --git a/modules/lua/mod_lua.c b/modules/lua/mod_lua.c index 6d79199..303890e 100644 --- a/modules/lua/mod_lua.c +++ b/modules/lua/mod_lua.c @@ -24,7 +24,6 @@ #include "lua_apr.h" #include "lua_config.h" #include "apr_optional.h" -#include "mod_ssl.h" #include "mod_auth.h" #include "util_mutex.h" @@ -53,8 +52,6 @@ APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ap_lua, AP_LUA, int, lua_open, APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ap_lua, AP_LUA, int, lua_request, (lua_State *L, request_rec *r), (L, r), OK, DECLINED) -static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *lua_ssl_val = NULL; -static APR_OPTIONAL_FN_TYPE(ssl_is_https) *lua_ssl_is_https = NULL; module AP_MODULE_DECLARE_DATA lua_module; @@ -216,6 +213,7 @@ static ap_lua_vm_spec *create_vm_spec(apr_pool_t **lifecycle_pool, case AP_LUA_SCOPE_ONCE: case AP_LUA_SCOPE_UNSET: apr_pool_create(&pool, r->pool); + apr_pool_tag(pool, "mod_lua-vm"); break; case AP_LUA_SCOPE_REQUEST: pool = r->pool; @@ -341,7 +339,7 @@ static apr_status_t lua_setup_filter_ctx(ap_filter_t* f, request_rec* r, lua_fil { apr_pool_t *pool; ap_lua_vm_spec *spec; - int n, rc; + int n, rc, nres; lua_State *L; lua_filter_ctx *ctx; ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config, @@ -409,7 +407,7 @@ static apr_status_t lua_setup_filter_ctx(ap_filter_t* f, request_rec* r, lua_fil /* If a Lua filter is interested in filtering a request, it must first do a yield, * otherwise we'll assume that it's not interested and pretend we didn't find it. */ - rc = lua_resume(L, 1); + rc = lua_resume(L, 1, &nres); if (rc == LUA_YIELD) { if (f->frec->providers == NULL) { /* Not wired by mod_filter */ @@ -431,7 +429,7 @@ static apr_status_t lua_setup_filter_ctx(ap_filter_t* f, request_rec* r, lua_fil static apr_status_t lua_output_filter_handle(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; - int rc; + int rc, nres; lua_State *L; lua_filter_ctx* ctx; conn_rec *c = r->connection; @@ -491,7 +489,7 @@ static apr_status_t lua_output_filter_handle(ap_filter_t *f, apr_bucket_brigade lua_setglobal(L, "bucket"); /* If Lua yielded, it means we have something to pass on */ - if (lua_resume(L, 0) == LUA_YIELD) { + if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { size_t olen; const char* output = lua_tolstring(L, 1, &olen); if (olen > 0) { @@ -523,7 +521,7 @@ static apr_status_t lua_output_filter_handle(ap_filter_t *f, apr_bucket_brigade apr_bucket *pbktEOS; lua_pushnil(L); lua_setglobal(L, "bucket"); - if (lua_resume(L, 0) == LUA_YIELD) { + if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { apr_bucket *pbktOut; size_t olen; const char* output = lua_tolstring(L, 1, &olen); @@ -557,7 +555,7 @@ static apr_status_t lua_input_filter_handle(ap_filter_t *f, apr_off_t nBytes) { request_rec *r = f->r; - int rc, lastCall = 0; + int rc, lastCall = 0, nres; lua_State *L; lua_filter_ctx* ctx; conn_rec *c = r->connection; @@ -620,7 +618,7 @@ static apr_status_t lua_input_filter_handle(ap_filter_t *f, lua_setglobal(L, "bucket"); /* If Lua yielded, it means we have something to pass on */ - if (lua_resume(L, 0) == LUA_YIELD) { + if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { size_t olen; const char* output = lua_tolstring(L, 1, &olen); pbktOut = apr_bucket_heap_create(output, olen, 0, c->bucket_alloc); @@ -642,7 +640,7 @@ static apr_status_t lua_input_filter_handle(ap_filter_t *f, apr_bucket *pbktEOS = apr_bucket_eos_create(c->bucket_alloc); lua_pushnil(L); lua_setglobal(L, "bucket"); - if (lua_resume(L, 0) == LUA_YIELD) { + if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { apr_bucket *pbktOut; size_t olen; const char* output = lua_tolstring(L, 1, &olen); @@ -1204,6 +1202,11 @@ static int lua_check_user_id_harness_last(request_rec *r) } */ +static int lua_pre_trans_name_harness(request_rec *r) +{ + return lua_request_rec_hook_harness(r, "pre_translate_name", APR_HOOK_MIDDLE); +} + static int lua_translate_name_harness_first(request_rec *r) { return lua_request_rec_hook_harness(r, "translate_name", AP_LUA_HOOK_FIRST); @@ -1276,6 +1279,21 @@ static int lua_quick_harness(request_rec *r, int lookup) return lua_request_rec_hook_harness(r, "quick", APR_HOOK_MIDDLE); } +static const char *register_pre_trans_name_hook(cmd_parms *cmd, void *_cfg, + const char *file, + const char *function) +{ + return register_named_file_function_hook("pre_translate_name", cmd, _cfg, file, + function, APR_HOOK_MIDDLE); +} + +static const char *register_pre_trans_name_block(cmd_parms *cmd, void *_cfg, + const char *line) +{ + return register_named_block_function_hook("pre_translate_name", cmd, _cfg, + line); +} + static const char *register_translate_name_hook(cmd_parms *cmd, void *_cfg, const char *file, const char *function, @@ -1632,7 +1650,7 @@ static const char *register_lua_scope(cmd_parms *cmd, return apr_psprintf(cmd->pool, "Scope type of '%s' cannot be used because this " "server does not have threading support " - "(APR_HAS_THREADS)" + "(APR_HAS_THREADS)", scope); #endif cfg->vm_scope = AP_LUA_SCOPE_THREAD; @@ -1643,7 +1661,7 @@ static const char *register_lua_scope(cmd_parms *cmd, return apr_psprintf(cmd->pool, "Scope type of '%s' cannot be used because this " "server does not have threading support " - "(APR_HAS_THREADS)" + "(APR_HAS_THREADS)", scope); #endif cfg->vm_scope = AP_LUA_SCOPE_SERVER; @@ -1687,15 +1705,12 @@ static const char *register_lua_root(cmd_parms *cmd, void *_cfg, const char *ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *var) { - if (lua_ssl_val) { - return (const char *)lua_ssl_val(p, s, c, r, (char *)var); - } - return NULL; + return ap_ssl_var_lookup(p, s, c, r, var); } int ap_lua_ssl_is_https(conn_rec *c) { - return lua_ssl_is_https ? lua_ssl_is_https(c) : 0; + return ap_ssl_conn_is_ssl(c); } /*******************************/ @@ -1833,7 +1848,7 @@ static const char *register_authz_provider(cmd_parms *cmd, void *_cfg, } -command_rec lua_commands[] = { +static const command_rec lua_commands[] = { AP_INIT_TAKE1("LuaRoot", register_lua_root, NULL, OR_ALL, "Specify the base path for resolving relative paths for mod_lua directives"), @@ -1847,6 +1862,14 @@ command_rec lua_commands[] = { AP_INIT_TAKE3("LuaAuthzProvider", register_authz_provider, NULL, RSRC_CONF|EXEC_ON_READ, "Provide an authorization provider"), + AP_INIT_TAKE2("LuaHookPreTranslateName", register_pre_trans_name_hook, NULL, + OR_ALL, + "Provide a hook for the pre_translate name phase of request processing"), + + AP_INIT_RAW_ARGS(" 501 /* Load mode for lua_load() */ #define lua_load(a,b,c,d) lua_load(a,b,c,d,NULL) -#define lua_resume(a,b) lua_resume(a, NULL, b) + +#if LUA_VERSION_NUM > 503 +#define lua_resume(a,b,c) lua_resume(a, NULL, b, c) +#else +/* ### For version < 5.4, assume that exactly one stack item is on the + * stack, which is what the code did before but seems dubious. */ +#define lua_resume(a,b,c) (*(c) = 1, lua_resume(a, NULL, b)) +#endif + #define luaL_setfuncs_compat(a,b) luaL_setfuncs(a,b,0) #else #define lua_rawlen(L,i) lua_objlen(L, (i)) #define luaL_setfuncs_compat(a,b) luaL_register(a,NULL,b) +#define lua_resume(a,b,c) (*(c) = 1, lua_resume(a, b)) #endif #if LUA_VERSION_NUM > 502 #define lua_dump(a,b,c) lua_dump(a,b,c,0) diff --git a/modules/mappers/config9.m4 b/modules/mappers/config9.m4 index 55a97ab..7120b72 100644 --- a/modules/mappers/config9.m4 +++ b/modules/mappers/config9.m4 @@ -14,6 +14,11 @@ APACHE_MODULE(userdir, mapping of requests to user-specific directories, , , mos APACHE_MODULE(alias, mapping of requests to different filesystem parts, , , yes) APACHE_MODULE(rewrite, rule based URL manipulation, , , most) +if test "x$enable_rewrite" != "xno"; then + # mod_rewrite needs test_char.h + APR_ADDTO(INCLUDES, [-I\$(top_builddir)/server]) +fi + APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) APACHE_MODPATH_FINISH diff --git a/modules/mappers/mod_alias.c b/modules/mappers/mod_alias.c index 79d58d8..35eca74 100644 --- a/modules/mappers/mod_alias.c +++ b/modules/mappers/mod_alias.c @@ -37,6 +37,12 @@ #include "ap_expr.h" +#define ALIAS_FLAG_DEFAULT -1 +#define ALIAS_FLAG_OFF 0 +#define ALIAS_FLAG_ON 1 + +#define ALIAS_PRESERVE_PATH_DEFAULT 0 + typedef struct { const char *real; const char *fake; @@ -55,9 +61,12 @@ typedef struct { unsigned int redirect_set:1; apr_array_header_t *redirects; const ap_expr_info_t *alias; + const char *alias_fake; char *handler; const ap_expr_info_t *redirect; int redirect_status; /* 301, 302, 303, 410, etc */ + int allow_relative; /* skip ap_construct_url() */ + int alias_preserve_path; /* map full path */ } alias_dir_conf; module AP_MODULE_DECLARE_DATA alias_module; @@ -80,6 +89,8 @@ static void *create_alias_dir_config(apr_pool_t *p, char *d) alias_dir_conf *a = (alias_dir_conf *) apr_pcalloc(p, sizeof(alias_dir_conf)); a->redirects = apr_array_make(p, 2, sizeof(alias_entry)); + a->allow_relative = ALIAS_FLAG_DEFAULT; + a->alias_preserve_path = ALIAS_FLAG_DEFAULT; return a; } @@ -105,12 +116,19 @@ static void *merge_alias_dir_config(apr_pool_t *p, void *basev, void *overridesv a->redirects = apr_array_append(p, overrides->redirects, base->redirects); a->alias = (overrides->alias_set == 0) ? base->alias : overrides->alias; + a->alias_fake = (overrides->alias_set == 0) ? base->alias_fake : overrides->alias_fake; a->handler = (overrides->alias_set == 0) ? base->handler : overrides->handler; a->alias_set = overrides->alias_set || base->alias_set; a->redirect = (overrides->redirect_set == 0) ? base->redirect : overrides->redirect; a->redirect_status = (overrides->redirect_set == 0) ? base->redirect_status : overrides->redirect_status; a->redirect_set = overrides->redirect_set || base->redirect_set; + a->allow_relative = (overrides->allow_relative != ALIAS_FLAG_DEFAULT) + ? overrides->allow_relative + : base->allow_relative; + a->alias_preserve_path = (overrides->alias_preserve_path != ALIAS_FLAG_DEFAULT) + ? overrides->alias_preserve_path + : base->alias_preserve_path; return a; } @@ -210,6 +228,7 @@ static const char *add_alias(cmd_parms *cmd, void *dummy, const char *fake, NULL); } + dirconf->alias_fake = cmd->path; dirconf->handler = cmd->info; dirconf->alias_set = 1; @@ -373,33 +392,6 @@ static const char *add_redirect_regex(cmd_parms *cmd, void *dirconf, return add_redirect_internal(cmd, dirconf, arg1, arg2, arg3, 1); } -static const command_rec alias_cmds[] = -{ - AP_INIT_TAKE12("Alias", add_alias, NULL, RSRC_CONF | ACCESS_CONF, - "a fakename and a realname, or a realname in a Location"), - AP_INIT_TAKE12("ScriptAlias", add_alias, "cgi-script", RSRC_CONF | ACCESS_CONF, - "a fakename and a realname, or a realname in a Location"), - AP_INIT_TAKE123("Redirect", add_redirect, (void *) HTTP_MOVED_TEMPORARILY, - OR_FILEINFO, - "an optional status, then document to be redirected and " - "destination URL"), - AP_INIT_TAKE2("AliasMatch", add_alias_regex, NULL, RSRC_CONF, - "a regular expression and a filename"), - AP_INIT_TAKE2("ScriptAliasMatch", add_alias_regex, "cgi-script", RSRC_CONF, - "a regular expression and a filename"), - AP_INIT_TAKE23("RedirectMatch", add_redirect_regex, - (void *) HTTP_MOVED_TEMPORARILY, OR_FILEINFO, - "an optional status, then a regular expression and " - "destination URL"), - AP_INIT_TAKE2("RedirectTemp", add_redirect2, - (void *) HTTP_MOVED_TEMPORARILY, OR_FILEINFO, - "a document to be redirected, then the destination URL"), - AP_INIT_TAKE2("RedirectPermanent", add_redirect2, - (void *) HTTP_MOVED_PERMANENTLY, OR_FILEINFO, - "a document to be redirected, then the destination URL"), - {NULL} -}; - static int alias_matches(const char *uri, const char *alias_fakename) { const char *aliasp = alias_fakename, *urip = uri; @@ -455,6 +447,17 @@ static char *try_alias(request_rec *r) return PREGSUB_ERROR; } + if (dirconf->alias_fake && dirconf->alias_preserve_path == ALIAS_FLAG_ON) { + int l; + + l = alias_matches(r->uri, dirconf->alias_fake); + + if (l > 0) { + ap_set_context_info(r, dirconf->alias_fake, found); + found = apr_pstrcat(r->pool, found, r->uri + l, NULL); + } + } + if (dirconf->handler) { /* Set handler, and leave a note for mod_cgi */ r->handler = dirconf->handler; apr_table_setn(r->notes, "alias-forced-type", r->handler); @@ -618,31 +621,33 @@ static int translate_alias_redir(request_rec *r) if (ret == PREGSUB_ERROR) return HTTP_INTERNAL_SERVER_ERROR; if (ap_is_HTTP_REDIRECT(status)) { - if (ret[0] == '/') { - char *orig_target = ret; - - ret = ap_construct_url(r->pool, ret, r); - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00673) - "incomplete redirection target of '%s' for " - "URI '%s' modified to '%s'", - orig_target, r->uri, ret); - } - if (!ap_is_url(ret)) { - status = HTTP_INTERNAL_SERVER_ERROR; - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00674) - "cannot redirect '%s' to '%s'; " - "target is not a valid absoluteURI or abs_path", - r->uri, ret); - } - else { - /* append requested query only, if the config didn't - * supply its own. - */ - if (r->args && !ap_strchr(ret, '?')) { - ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL); + alias_dir_conf *dirconf = (alias_dir_conf *) + ap_get_module_config(r->per_dir_config, &alias_module); + if (dirconf->allow_relative != ALIAS_FLAG_ON || ret[0] != '/') { + if (ret[0] == '/') { + char *orig_target = ret; + + ret = ap_construct_url(r->pool, ret, r); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00673) + "incomplete redirection target of '%s' for " + "URI '%s' modified to '%s'", + orig_target, r->uri, ret); } - apr_table_setn(r->headers_out, "Location", ret); + if (!ap_is_url(ret)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00674) + "cannot redirect '%s' to '%s'; " + "target is not a valid absoluteURI or abs_path", + r->uri, ret); + return HTTP_INTERNAL_SERVER_ERROR; + } + } + /* append requested query only, if the config didn't + * supply its own. + */ + if (r->args && !ap_strchr(ret, '?')) { + ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL); } + apr_table_setn(r->headers_out, "Location", ret); } return status; } @@ -673,31 +678,31 @@ static int fixup_redir(request_rec *r) if (ret == PREGSUB_ERROR) return HTTP_INTERNAL_SERVER_ERROR; if (ap_is_HTTP_REDIRECT(status)) { - if (ret[0] == '/') { - char *orig_target = ret; - - ret = ap_construct_url(r->pool, ret, r); - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00675) - "incomplete redirection target of '%s' for " - "URI '%s' modified to '%s'", - orig_target, r->uri, ret); - } - if (!ap_is_url(ret)) { - status = HTTP_INTERNAL_SERVER_ERROR; - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00676) - "cannot redirect '%s' to '%s'; " - "target is not a valid absoluteURI or abs_path", - r->uri, ret); - } - else { - /* append requested query only, if the config didn't - * supply its own. - */ - if (r->args && !ap_strchr(ret, '?')) { - ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL); + if (dirconf->allow_relative != ALIAS_FLAG_ON || ret[0] != '/') { + if (ret[0] == '/') { + char *orig_target = ret; + + ret = ap_construct_url(r->pool, ret, r); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00675) + "incomplete redirection target of '%s' for " + "URI '%s' modified to '%s'", + orig_target, r->uri, ret); } - apr_table_setn(r->headers_out, "Location", ret); + if (!ap_is_url(ret)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00676) + "cannot redirect '%s' to '%s'; " + "target is not a valid absoluteURI or abs_path", + r->uri, ret); + return HTTP_INTERNAL_SERVER_ERROR; + } + } + /* append requested query only, if the config didn't + * supply its own. + */ + if (r->args && !ap_strchr(ret, '?')) { + ret = apr_pstrcat(r->pool, ret, "?", r->args, NULL); } + apr_table_setn(r->headers_out, "Location", ret); } return status; } @@ -705,6 +710,41 @@ static int fixup_redir(request_rec *r) return DECLINED; } +static const command_rec alias_cmds[] = +{ + AP_INIT_TAKE12("Alias", add_alias, NULL, RSRC_CONF | ACCESS_CONF, + "a fakename and a realname, or a realname in a Location"), + AP_INIT_TAKE12("ScriptAlias", add_alias, "cgi-script", RSRC_CONF | ACCESS_CONF, + "a fakename and a realname, or a realname in a Location"), + AP_INIT_TAKE123("Redirect", add_redirect, (void *) HTTP_MOVED_TEMPORARILY, + OR_FILEINFO, + "an optional status, then document to be redirected and " + "destination URL"), + AP_INIT_TAKE2("AliasMatch", add_alias_regex, NULL, RSRC_CONF, + "a regular expression and a filename"), + AP_INIT_TAKE2("ScriptAliasMatch", add_alias_regex, "cgi-script", RSRC_CONF, + "a regular expression and a filename"), + AP_INIT_TAKE23("RedirectMatch", add_redirect_regex, + (void *) HTTP_MOVED_TEMPORARILY, OR_FILEINFO, + "an optional status, then a regular expression and " + "destination URL"), + AP_INIT_TAKE2("RedirectTemp", add_redirect2, + (void *) HTTP_MOVED_TEMPORARILY, OR_FILEINFO, + "a document to be redirected, then the destination URL"), + AP_INIT_TAKE2("RedirectPermanent", add_redirect2, + (void *) HTTP_MOVED_PERMANENTLY, OR_FILEINFO, + "a document to be redirected, then the destination URL"), + AP_INIT_FLAG("RedirectRelative", ap_set_flag_slot, + (void*)APR_OFFSETOF(alias_dir_conf, allow_relative), OR_FILEINFO, + "Set to ON to allow relative redirect targets to be issued as-is"), + AP_INIT_FLAG("AliasPreservePath", ap_set_flag_slot, + (void*)APR_OFFSETOF(alias_dir_conf, alias_preserve_path), OR_FILEINFO, + "Set to ON to map the full path after the fakename to the realname."), + + {NULL} +}; + + static void register_hooks(apr_pool_t *p) { static const char * const aszSucc[]={ "mod_userdir.c", diff --git a/modules/mappers/mod_imagemap.c b/modules/mappers/mod_imagemap.c index 187a500..206c0b6 100644 --- a/modules/mappers/mod_imagemap.c +++ b/modules/mappers/mod_imagemap.c @@ -319,7 +319,7 @@ static void read_quoted(char **string, char **quoted_part) static const char *imap_url(request_rec *r, const char *base, const char *value) { /* translates a value into a URL. */ - int slen, clen; + apr_size_t slen, clen; char *string_pos = NULL; const char *string_pos_const = NULL; char *directory = NULL; diff --git a/modules/mappers/mod_negotiation.c b/modules/mappers/mod_negotiation.c index b6dfedc..c056b28 100644 --- a/modules/mappers/mod_negotiation.c +++ b/modules/mappers/mod_negotiation.c @@ -774,7 +774,7 @@ static enum header_state get_header_line(char *buffer, int len, apr_file_t *map) /* We need to shortcut the rest of this block following the Body: * tag - we will not look for continutation after this line. */ - if (!strncasecmp(buffer, "Body:", 5)) + if (!ap_cstr_casecmpn(buffer, "Body:", 5)) return header_seen; while (apr_file_getc(&c, map) != APR_EOF) { @@ -988,19 +988,17 @@ static int read_type_map(apr_file_t **map, negotiation_state *neg, has_content = 1; } else if (!strncmp(buffer, "content-length:", 15)) { - char *errp; - apr_off_t number; + apr_off_t clen; body1 = ap_get_token(neg->pool, &body, 0); - if (apr_strtoff(&number, body1, &errp, 10) != APR_SUCCESS - || *errp || number < 0) { + if (!ap_parse_strict_length(&clen, body1)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00684) "Parse error in type map, Content-Length: " "'%s' in %s is invalid.", body1, r->filename); break; } - mime_info.bytes = number; + mime_info.bytes = clen; has_content = 1; } else if (!strncmp(buffer, "content-language:", 17)) { diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c index 68a33b6..bbcc11b 100644 --- a/modules/mappers/mod_rewrite.c +++ b/modules/mappers/mod_rewrite.c @@ -55,6 +55,12 @@ #include "apr_global_mutex.h" #include "apr_dbm.h" #include "apr_dbd.h" + +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + #include "mod_dbd.h" #if APR_HAS_THREADS @@ -93,14 +99,15 @@ #include "http_core.h" #include "http_log.h" #include "http_protocol.h" +#include "http_ssl.h" #include "http_vhost.h" #include "util_mutex.h" -#include "mod_ssl.h" - #include "mod_rewrite.h" #include "ap_expr.h" +#include "test_char.h" + static ap_dbd_t *(*dbd_acquire)(request_rec*) = NULL; static void (*dbd_prepare)(server_rec*, const char*, const char*) = NULL; static const char* really_last_key = "rewrite_really_last"; @@ -168,6 +175,8 @@ static const char* really_last_key = "rewrite_really_last"; #define RULEFLAG_END (1<<17) #define RULEFLAG_ESCAPENOPLUS (1<<18) #define RULEFLAG_QSLAST (1<<19) +#define RULEFLAG_QSNONE (1<<20) /* programattic only */ +#define RULEFLAG_ESCAPECTLS (1<<21) /* return code of the rewrite rule * the result may be escaped - or not @@ -321,7 +330,8 @@ typedef struct { data_item *cookie; /* added cookies */ int skip; /* number of next rules to skip */ int maxrounds; /* limit on number of loops with N flag */ - char *escapes; /* specific backref escapes */ + const char *escapes; /* specific backref escapes */ + const char *noescapes; /* specific backref chars not to escape */ } rewriterule_entry; typedef struct { @@ -421,9 +431,9 @@ static apr_global_mutex_t *rewrite_mapr_lock_acquire = NULL; static const char *rewritemap_mutex_type = "rewrite-map"; /* Optional functions imported from mod_ssl when loaded: */ -static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *rewrite_ssl_lookup = NULL; -static APR_OPTIONAL_FN_TYPE(ssl_is_https) *rewrite_is_https = NULL; -static char *escape_backref(apr_pool_t *p, const char *path, const char *escapeme, int noplus); +static char *escape_backref(apr_pool_t *p, const char *path, + const char *escapeme, const char *noescapeme, + int flags); /* * +-------------------------------------------------------+ @@ -524,7 +534,7 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) switch (*uri++) { case 'a': case 'A': - if (!strncasecmp(uri, "jp://", 5)) { /* ajp:// */ + if (!ap_cstr_casecmpn(uri, "jp://", 5)) { /* ajp:// */ *sqs = 1; return 6; } @@ -532,7 +542,7 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) case 'b': case 'B': - if (!strncasecmp(uri, "alancer://", 10)) { /* balancer:// */ + if (!ap_cstr_casecmpn(uri, "alancer://", 10)) { /* balancer:// */ *sqs = 1; return 11; } @@ -540,10 +550,10 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) case 'f': case 'F': - if (!strncasecmp(uri, "tp://", 5)) { /* ftp:// */ + if (!ap_cstr_casecmpn(uri, "tp://", 5)) { /* ftp:// */ return 6; } - if (!strncasecmp(uri, "cgi://", 6)) { /* fcgi:// */ + if (!ap_cstr_casecmpn(uri, "cgi://", 6)) { /* fcgi:// */ *sqs = 1; return 7; } @@ -551,26 +561,26 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) case 'g': case 'G': - if (!strncasecmp(uri, "opher://", 8)) { /* gopher:// */ + if (!ap_cstr_casecmpn(uri, "opher://", 8)) { /* gopher:// */ return 9; } break; case 'h': case 'H': - if (!strncasecmp(uri, "ttp://", 6)) { /* http:// */ + if (!ap_cstr_casecmpn(uri, "ttp://", 6)) { /* http:// */ *sqs = 1; return 7; } - else if (!strncasecmp(uri, "ttps://", 7)) { /* https:// */ + else if (!ap_cstr_casecmpn(uri, "ttps://", 7)) { /* https:// */ *sqs = 1; return 8; } - else if (!strncasecmp(uri, "2://", 4)) { /* h2:// */ + else if (!ap_cstr_casecmpn(uri, "2://", 4)) { /* h2:// */ *sqs = 1; return 5; } - else if (!strncasecmp(uri, "2c://", 5)) { /* h2c:// */ + else if (!ap_cstr_casecmpn(uri, "2c://", 5)) { /* h2c:// */ *sqs = 1; return 6; } @@ -578,14 +588,14 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) case 'l': case 'L': - if (!strncasecmp(uri, "dap://", 6)) { /* ldap:// */ + if (!ap_cstr_casecmpn(uri, "dap://", 6)) { /* ldap:// */ return 7; } break; case 'm': case 'M': - if (!strncasecmp(uri, "ailto:", 6)) { /* mailto: */ + if (!ap_cstr_casecmpn(uri, "ailto:", 6)) { /* mailto: */ *sqs = 1; return 7; } @@ -593,17 +603,17 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) case 'n': case 'N': - if (!strncasecmp(uri, "ews:", 4)) { /* news: */ + if (!ap_cstr_casecmpn(uri, "ews:", 4)) { /* news: */ return 5; } - else if (!strncasecmp(uri, "ntp://", 6)) { /* nntp:// */ + else if (!ap_cstr_casecmpn(uri, "ntp://", 6)) { /* nntp:// */ return 7; } break; case 's': case 'S': - if (!strncasecmp(uri, "cgi://", 6)) { /* scgi:// */ + if (!ap_cstr_casecmpn(uri, "cgi://", 6)) { /* scgi:// */ *sqs = 1; return 7; } @@ -611,15 +621,22 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) case 'w': case 'W': - if (!strncasecmp(uri, "s://", 4)) { /* ws:// */ + if (!ap_cstr_casecmpn(uri, "s://", 4)) { /* ws:// */ *sqs = 1; return 5; } - else if (!strncasecmp(uri, "ss://", 5)) { /* wss:// */ + else if (!ap_cstr_casecmpn(uri, "ss://", 5)) { /* wss:// */ *sqs = 1; return 6; } break; + + case 'u': + case 'U': + if (!ap_cstr_casecmpn(uri, "nix:", 4)) { /* unix: */ + *sqs = 1; + return (uri[4] == '/' && uri[5] == '/') ? 7 : 5; + } } return 0; @@ -643,14 +660,21 @@ static APR_INLINE unsigned char *c2x(unsigned what, unsigned char prefix, * Escapes a backreference in a similar way as php's urlencode does. * Based on ap_os_escape_path in server/util.c */ -static char *escape_backref(apr_pool_t *p, const char *path, const char *escapeme, int noplus) { - char *copy = apr_palloc(p, 3 * strlen(path) + 3); +static char *escape_backref(apr_pool_t *p, const char *path, + const char *escapeme, const char *noescapeme, + int flags) +{ + char *copy = apr_palloc(p, 3 * strlen(path) + 1); const unsigned char *s = (const unsigned char *)path; unsigned char *d = (unsigned char *)copy; - unsigned c; + int noplus = (flags & RULEFLAG_ESCAPENOPLUS) != 0; + int ctls = (flags & RULEFLAG_ESCAPECTLS) != 0; + unsigned char c; while ((c = *s)) { - if (!escapeme) { + if (((ctls ? !TEST_CHAR(c, T_VCHAR_OBSTEXT) : !escapeme) + || (escapeme && ap_strchr_c(escapeme, c))) + && (!noescapeme || !ap_strchr_c(noescapeme, c))) { if (apr_isalnum(c) || c == '_') { *d++ = c; } @@ -661,23 +685,8 @@ static char *escape_backref(apr_pool_t *p, const char *path, const char *escapem d = c2x(c, '%', d); } } - else { - const char *esc = escapeme; - while (*esc) { - if (c == *esc) { - if (c == ' ' && !noplus) { - *d++ = '+'; - } - else { - d = c2x(c, '%', d); - } - break; - } - ++esc; - } - if (!*esc) { - *d++ = c; - } + else { + *d++ = c; } ++s; } @@ -723,7 +732,7 @@ static char *escape_absolute_uri(apr_pool_t *p, char *uri, unsigned scheme) * [dn ["?" [attributes] ["?" [scope] * ["?" [filter] ["?" extensions]]]]]] */ - if (!strncasecmp(uri, "ldap", 4)) { + if (!ap_cstr_casecmpn(uri, "ldap", 4)) { char *token[5]; int c = 0; @@ -759,27 +768,35 @@ static char *escape_absolute_uri(apr_pool_t *p, char *uri, unsigned scheme) * split out a QUERY_STRING part from * the current URI string */ -static void splitout_queryargs(request_rec *r, int qsappend, int qsdiscard, - int qslast) +static void splitout_queryargs(request_rec *r, int flags) { char *q; - int split; + int split, skip; + int qsappend = flags & RULEFLAG_QSAPPEND; + int qsdiscard = flags & RULEFLAG_QSDISCARD; + int qslast = flags & RULEFLAG_QSLAST; + + if (flags & RULEFLAG_QSNONE) { + rewritelog((r, 2, NULL, "discarding query string, no parse from substitution")); + r->args = NULL; + return; + } /* don't touch, unless it's a scheme for which a query string makes sense. * See RFC 1738 and RFC 2368. */ - if (is_absolute_uri(r->filename, &split) + if ((skip = is_absolute_uri(r->filename, &split)) && !split) { r->args = NULL; /* forget the query that's still flying around */ return; } - if ( qsdiscard ) { + if (qsdiscard) { r->args = NULL; /* Discard query string */ rewritelog((r, 2, NULL, "discarding query string")); } - q = qslast ? ap_strrchr(r->filename, '?') : ap_strchr(r->filename, '?'); + q = qslast ? ap_strrchr(r->filename + skip, '?') : ap_strchr(r->filename + skip, '?'); if (q != NULL) { char *olduri; @@ -788,7 +805,7 @@ static void splitout_queryargs(request_rec *r, int qsappend, int qsdiscard, olduri = apr_pstrdup(r->pool, r->filename); *q++ = '\0'; if (qsappend) { - if (*q) { + if (*q) { r->args = apr_pstrcat(r->pool, q, "&" , r->args, NULL); } } @@ -796,9 +813,9 @@ static void splitout_queryargs(request_rec *r, int qsappend, int qsdiscard, r->args = apr_pstrdup(r->pool, q); } - if (r->args) { + if (r->args) { len = strlen(r->args); - + if (!len) { r->args = NULL; } @@ -810,8 +827,6 @@ static void splitout_queryargs(request_rec *r, int qsappend, int qsdiscard, rewritelog((r, 3, NULL, "split uri=%s -> uri=%s, args=%s", olduri, r->filename, r->args ? r->args : "")); } - - return; } /* @@ -825,7 +840,7 @@ static void reduce_uri(request_rec *r) cp = (char *)ap_http_scheme(r); l = strlen(cp); if ( strlen(r->filename) > l+3 - && strncasecmp(r->filename, cp, l) == 0 + && ap_cstr_casecmpn(r->filename, cp, l) == 0 && r->filename[l] == ':' && r->filename[l+1] == '/' && r->filename[l+2] == '/' ) { @@ -1029,6 +1044,7 @@ static void set_cache_value(const char *name, apr_time_t t, char *key, #endif return; } + apr_pool_tag(p, "rewrite_cachedmap"); map = apr_palloc(cachep->pool, sizeof(cachedmap)); map->pool = p; @@ -1106,6 +1122,7 @@ static int init_cache(apr_pool_t *p) cachep = NULL; /* turns off cache */ return 0; } + apr_pool_tag(cachep->pool, "rewrite_cachep"); cachep->maps = apr_hash_make(cachep->pool); #if APR_HAS_THREADS @@ -1353,12 +1370,31 @@ static char *lookup_map_txtfile(request_rec *r, const char *file, char *key) static char *lookup_map_dbmfile(request_rec *r, const char *file, const char *dbmtype, char *key) { +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif apr_dbm_t *dbmfp = NULL; apr_datum_t dbmkey; apr_datum_t dbmval; char *value; apr_status_t rv; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + if ((rv = apr_dbm_get_driver(&driver, dbmtype, &err, + r->pool)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10287) + "mod_rewrite: can't load DBM library '%s': %s", + err->reason, err->msg); + return NULL; + } + if ((rv = apr_dbm_open2(&dbmfp, driver, file, APR_DBM_READONLY, + APR_OS_DEFAULT, r->pool)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00656) + "mod_rewrite: can't open DBM RewriteMap %s", file); + return NULL; + } +#else if ((rv = apr_dbm_open_ex(&dbmfp, dbmtype, file, APR_DBM_READONLY, APR_OS_DEFAULT, r->pool)) != APR_SUCCESS) { @@ -1366,6 +1402,7 @@ static char *lookup_map_dbmfile(request_rec *r, const char *file, "mod_rewrite: can't open DBM RewriteMap %s", file); return NULL; } +#endif dbmkey.dptr = key; dbmkey.dsize = strlen(key); @@ -1864,8 +1901,8 @@ static char *lookup_variable(char *var, rewrite_ctx *ctx) result = getenv(var); } } - else if (var[4] && !strncasecmp(var, "SSL", 3) && rewrite_ssl_lookup) { - result = rewrite_ssl_lookup(r->pool, r->server, r->connection, r, + else if (var[4] && !strncasecmp(var, "SSL", 3)) { + result = ap_ssl_var_lookup(r->pool, r->server, r->connection, r, var + 4); } } @@ -1963,7 +2000,7 @@ static char *lookup_variable(char *var, rewrite_ctx *ctx) case 5: if (!strcmp(var, "HTTPS")) { - int flag = rewrite_is_https && rewrite_is_https(r->connection); + int flag = ap_ssl_conn_is_ssl(r->connection); return apr_pstrdup(r->pool, flag ? "on" : "off"); } break; @@ -2430,7 +2467,8 @@ static char *do_expand(char *input, rewrite_ctx *ctx, rewriterule_entry *entry) /* escape the backreference */ char *tmp2, *tmp; tmp = apr_pstrmemdup(pool, bri->source + bri->regmatch[n].rm_so, span); - tmp2 = escape_backref(pool, tmp, entry->escapes, entry->flags & RULEFLAG_ESCAPENOPLUS); + tmp2 = escape_backref(pool, tmp, entry->escapes, entry->noescapes, + entry->flags); rewritelog((ctx->r, 5, ctx->perdir, "escaping backreference '%s' to '%s'", tmp, tmp2)); @@ -2538,6 +2576,7 @@ static void add_cookie(request_rec *r, char *s) char *path; char *secure; char *httponly; + char *samesite; char *tok_cntx; char *cookie; @@ -2572,6 +2611,7 @@ static void add_cookie(request_rec *r, char *s) path = expires ? apr_strtok(NULL, sep, &tok_cntx) : NULL; secure = path ? apr_strtok(NULL, sep, &tok_cntx) : NULL; httponly = secure ? apr_strtok(NULL, sep, &tok_cntx) : NULL; + samesite = httponly ? apr_strtok(NULL, sep, &tok_cntx) : NULL; if (expires) { apr_time_exp_t tms; @@ -2599,18 +2639,23 @@ static void add_cookie(request_rec *r, char *s) : NULL, expires ? (exp_time ? exp_time : "") : NULL, - (secure && (!strcasecmp(secure, "true") + (secure && (!ap_cstr_casecmp(secure, "true") || !strcmp(secure, "1") - || !strcasecmp(secure, + || !ap_cstr_casecmp(secure, "secure"))) ? "; secure" : NULL, - (httponly && (!strcasecmp(httponly, "true") + (httponly && (!ap_cstr_casecmp(httponly, "true") || !strcmp(httponly, "1") - || !strcasecmp(httponly, + || !ap_cstr_casecmp(httponly, "HttpOnly"))) ? "; HttpOnly" : NULL, NULL); + if (samesite && strcmp(samesite, "0") && ap_cstr_casecmp(samesite,"false")) { + cookie = apr_pstrcat(rmain->pool, cookie, "; SameSite=", + samesite, NULL); + } + apr_table_addn(rmain->err_headers_out, "Set-Cookie", cookie); apr_pool_userdata_set("set", notename, NULL, rmain->pool); rewritelog((rmain, 5, NULL, "setting cookie '%s'", cookie)); @@ -2724,7 +2769,7 @@ static apr_status_t rewritelock_remove(void *data) * XXX: what an inclined parser. Seems we have to leave it so * for backwards compat. *sigh* */ -static int parseargline(char *str, char **a1, char **a2, char **a3) +static int parseargline(char *str, char **a1, char **a2, char **a2_end, char **a3) { char quote; @@ -2775,8 +2820,10 @@ static int parseargline(char *str, char **a1, char **a2, char **a3) if (!*str) { *a3 = NULL; /* 3rd argument is optional */ + *a2_end = str; return 0; } + *a2_end = str; *str++ = '\0'; while (apr_isspace(*str)) { @@ -3316,7 +3363,7 @@ static const char *cmd_rewritecond(cmd_parms *cmd, void *in_dconf, rewrite_server_conf *sconf; rewritecond_entry *newcond; ap_regex_t *regexp; - char *a1 = NULL, *a2 = NULL, *a3 = NULL; + char *a1 = NULL, *a2 = NULL, *a2_end, *a3 = NULL; const char *err; sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module); @@ -3334,7 +3381,7 @@ static const char *cmd_rewritecond(cmd_parms *cmd, void *in_dconf, * of the argument line. So we can use a1 .. a3 without * copying them again. */ - if (parseargline(str, &a1, &a2, &a3)) { + if (parseargline(str, &a1, &a2, &a2_end, &a3)) { return apr_pstrcat(cmd->pool, "RewriteCond: bad argument line '", str, "'", NULL); } @@ -3493,13 +3540,24 @@ static const char *cmd_rewriterule_setflag(apr_pool_t *p, void *_cfg, case 'B': if (!*key || !strcasecmp(key, "ackrefescaping")) { cfg->flags |= RULEFLAG_ESCAPEBACKREF; - if (val && *val) { + if (val && *val) { cfg->escapes = val; } } + else if (!strcasecmp(key, "NE")) { + if (val && *val) { + cfg->noescapes = val; + } + else { + return "flag 'BNE' wants a list of characters (i.e. [BNE=...])"; + } + } else if (!strcasecmp(key, "NP") || !strcasecmp(key, "ackrefernoplus")) { cfg->flags |= RULEFLAG_ESCAPENOPLUS; } + else if (!strcasecmp(key, "CTLS")) { + cfg->flags |= RULEFLAG_ESCAPECTLS|RULEFLAG_ESCAPEBACKREF; + } else { ++error; } @@ -3742,7 +3800,7 @@ static const char *cmd_rewriterule(cmd_parms *cmd, void *in_dconf, rewrite_server_conf *sconf; rewriterule_entry *newrule; ap_regex_t *regexp; - char *a1 = NULL, *a2 = NULL, *a3 = NULL; + char *a1 = NULL, *a2 = NULL, *a2_end, *a3 = NULL; const char *err; sconf = ap_get_module_config(cmd->server->module_config, &rewrite_module); @@ -3756,12 +3814,11 @@ static const char *cmd_rewriterule(cmd_parms *cmd, void *in_dconf, } /* parse the argument line ourself */ - if (parseargline(str, &a1, &a2, &a3)) { + if (parseargline(str, &a1, &a2, &a2_end, &a3)) { return apr_pstrcat(cmd->pool, "RewriteRule: bad argument line '", str, "'", NULL); } - /* arg3: optional flags field */ newrule->forced_mimetype = NULL; newrule->forced_handler = NULL; newrule->forced_responsecode = HTTP_MOVED_TEMPORARILY; @@ -3770,6 +3827,9 @@ static const char *cmd_rewriterule(cmd_parms *cmd, void *in_dconf, newrule->cookie = NULL; newrule->skip = 0; newrule->maxrounds = REWRITE_MAX_ROUNDS; + newrule->escapes = newrule->noescapes = NULL; + + /* arg3: optional flags field */ if (a3 != NULL) { if ((err = cmd_parseflagfield(cmd->pool, newrule, a3, cmd_rewriterule_setflag)) != NULL) { @@ -3803,6 +3863,25 @@ static const char *cmd_rewriterule(cmd_parms *cmd, void *in_dconf, newrule->flags |= RULEFLAG_NOSUB; } + if (*(a2_end-1) == '?') { + /* a literal ? at the end of the unsubstituted rewrite rule */ + if (newrule->flags & RULEFLAG_QSAPPEND) { + /* with QSA, splitout_queryargs will safely handle it if RULEFLAG_QSLAST is set */ + newrule->flags |= RULEFLAG_QSLAST; + } + else { + /* avoid getting a query string via inadvertent capture */ + newrule->flags |= RULEFLAG_QSNONE; + /* trailing ? has done its job, but splitout_queryargs will not chop it off */ + *(a2_end-1) = '\0'; + } + } + else if (newrule->flags & RULEFLAG_QSDISCARD) { + if (NULL == ap_strchr(newrule->output, '?')) { + newrule->flags |= RULEFLAG_QSNONE; + } + } + /* now, if the server or per-dir config holds an * array of RewriteCond entries, we take it for us * and clear the array @@ -4090,7 +4169,7 @@ static int apply_rewrite_rule(rewriterule_entry *p, rewrite_ctx *ctx) } /* Additionally we strip the physical path from the url to match - * it independent from the underlaying filesystem. + * it independent from the underlying filesystem. */ if (!is_proxyreq && strlen(ctx->uri) >= dirlen && !strncmp(ctx->uri, ctx->perdir, dirlen)) { @@ -4208,9 +4287,7 @@ static int apply_rewrite_rule(rewriterule_entry *p, rewrite_ctx *ctx) r->path_info = NULL; } - splitout_queryargs(r, p->flags & RULEFLAG_QSAPPEND, - p->flags & RULEFLAG_QSDISCARD, - p->flags & RULEFLAG_QSLAST); + splitout_queryargs(r, p->flags); /* Add the previously stripped per-directory location prefix, unless * (1) it's an absolute URL path and @@ -4516,9 +4593,6 @@ static int post_config(apr_pool_t *p, } } - rewrite_ssl_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); - rewrite_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); - return OK; } @@ -4692,8 +4766,25 @@ static int hook_uri2file(request_rec *r) } if (rulestatus) { - unsigned skip; - apr_size_t flen; + apr_size_t flen = r->filename ? strlen(r->filename) : 0; + unsigned skip_absolute = flen ? is_absolute_uri(r->filename, NULL) : 0; + int to_proxyreq = (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0); + int will_escape = skip_absolute && (rulestatus != ACTION_NOESCAPE); + + if (r->args + && !will_escape + && *(ap_scan_vchar_obstext(r->args))) { + /* + * We have a raw control character or a ' ' in r->args. + * Correct encoding was missed. + * Correct encoding was missed and we're not going to escape + * it before returning. + */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10410) + "Rewritten query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } if (ACTION_STATUS == rulestatus) { int n = r->status; @@ -4702,8 +4793,7 @@ static int hook_uri2file(request_rec *r) return n; } - flen = r->filename ? strlen(r->filename) : 0; - if (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0) { + if (to_proxyreq) { /* it should be go on as an internal proxy request */ /* check if the proxy module is enabled, so @@ -4745,7 +4835,7 @@ static int hook_uri2file(request_rec *r) r->filename)); return OK; } - else if ((skip = is_absolute_uri(r->filename, NULL)) > 0) { + else if (skip_absolute > 0) { int n; /* it was finally rewritten to a remote URL */ @@ -4753,7 +4843,7 @@ static int hook_uri2file(request_rec *r) if (rulestatus != ACTION_NOESCAPE) { rewritelog((r, 1, NULL, "escaping %s for redirect", r->filename)); - r->filename = escape_absolute_uri(r->pool, r->filename, skip); + r->filename = escape_absolute_uri(r->pool, r->filename, skip_absolute); } /* append the QUERY_STRING part */ @@ -4977,7 +5067,26 @@ static int hook_fixup(request_rec *r) */ rulestatus = apply_rewrite_list(r, dconf->rewriterules, dconf->directory); if (rulestatus) { - unsigned skip; + unsigned skip_absolute = is_absolute_uri(r->filename, NULL); + int to_proxyreq = 0; + int will_escape = 0; + + l = strlen(r->filename); + to_proxyreq = l > 6 && strncmp(r->filename, "proxy:", 6) == 0; + will_escape = skip_absolute && (rulestatus != ACTION_NOESCAPE); + + if (r->args + && !will_escape + && *(ap_scan_vchar_obstext(r->args))) { + /* + * We have a raw control character or a ' ' in r->args. + * Correct encoding was missed. + */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10411) + "Rewritten query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } if (ACTION_STATUS == rulestatus) { int n = r->status; @@ -4986,8 +5095,7 @@ static int hook_fixup(request_rec *r) return n; } - l = strlen(r->filename); - if (l > 6 && strncmp(r->filename, "proxy:", 6) == 0) { + if (to_proxyreq) { /* it should go on as an internal proxy request */ /* make sure the QUERY_STRING and @@ -5011,7 +5119,7 @@ static int hook_fixup(request_rec *r) "%s [OK]", r->filename)); return OK; } - else if ((skip = is_absolute_uri(r->filename, NULL)) > 0) { + else if (skip_absolute > 0) { /* it was finally rewritten to a remote URL */ /* because we are in a per-dir context @@ -5020,7 +5128,7 @@ static int hook_fixup(request_rec *r) */ if (dconf->baseurl != NULL) { /* skip 'scheme://' */ - cp = r->filename + skip; + cp = r->filename + skip_absolute; if ((cp = ap_strchr(cp, '/')) != NULL && *(++cp)) { rewritelog((r, 2, dconf->directory, @@ -5065,7 +5173,7 @@ static int hook_fixup(request_rec *r) if (rulestatus != ACTION_NOESCAPE) { rewritelog((r, 1, dconf->directory, "escaping %s for redirect", r->filename)); - r->filename = escape_absolute_uri(r->pool, r->filename, skip); + r->filename = escape_absolute_uri(r->pool, r->filename, skip_absolute); } /* append the QUERY_STRING part */ diff --git a/modules/mappers/mod_rewrite.mak b/modules/mappers/mod_rewrite.mak index 3b08cab..860dd8b 100644 --- a/modules/mappers/mod_rewrite.mak +++ b/modules/mappers/mod_rewrite.mak @@ -62,7 +62,7 @@ CLEAN : if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" CPP=cl.exe -CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /c +CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../server" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /c .c{$(INTDIR)}.obj:: $(CPP) @<< @@ -166,7 +166,7 @@ CLEAN : if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" CPP=cl.exe -CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /EHsc /c +CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../database" /I "../ssl" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../server" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_rewrite_src" /FD /EHsc /c .c{$(INTDIR)}.obj:: $(CPP) @<< diff --git a/modules/mappers/mod_speling.c b/modules/mappers/mod_speling.c index 3e97423..2ed65eb 100644 --- a/modules/mappers/mod_speling.c +++ b/modules/mappers/mod_speling.c @@ -22,8 +22,6 @@ #define APR_WANT_STRFUNC #include "apr_want.h" -#define WANT_BASENAME_MATCH - #include "httpd.h" #include "http_core.h" #include "http_config.h" @@ -59,7 +57,8 @@ module AP_MODULE_DECLARE_DATA speling_module; typedef struct { int enabled; - int case_only; + int check_case_only; + int check_basename_match; } spconfig; /* @@ -76,7 +75,8 @@ static void *mkconfig(apr_pool_t *p) spconfig *cfg = apr_pcalloc(p, sizeof(spconfig)); cfg->enabled = 0; - cfg->case_only = 0; + cfg->check_case_only = 0; + cfg->check_basename_match = 1; return cfg; } @@ -107,8 +107,11 @@ static const command_rec speling_cmds[] = (void*)APR_OFFSETOF(spconfig, enabled), OR_OPTIONS, "whether or not to fix miscapitalized/misspelled requests"), AP_INIT_FLAG("CheckCaseOnly", ap_set_flag_slot, - (void*)APR_OFFSETOF(spconfig, case_only), OR_OPTIONS, + (void*)APR_OFFSETOF(spconfig, check_case_only), OR_OPTIONS, "whether or not to fix only miscapitalized requests"), + AP_INIT_FLAG("CheckBasenameMatch", ap_set_flag_slot, + (void*)APR_OFFSETOF(spconfig, check_basename_match), OR_OPTIONS, + "whether or not to fix files with the same base name"), { NULL } }; @@ -302,7 +305,7 @@ static int check_speling(request_rec *r) * simple typing errors are checked next (like, e.g., * missing/extra/transposed char) */ - else if ((cfg->case_only == 0) + else if ((cfg->check_case_only == 0) && ((q = spdist(bad, dirent.name)) != SP_VERYDIFFERENT)) { misspelled_file *sp_new; @@ -316,22 +319,14 @@ static int check_speling(request_rec *r) * requests. It is of questionable use to continue looking for * files with the same base name, but potentially of totally wrong * type (index.html <-> index.db). - * I would propose to not set the WANT_BASENAME_MATCH define. - * 08-Aug-1997 * - * However, Alexei replied giving some reasons to add it anyway: - * > Oh, by the way, I remembered why having the - * > extension-stripping-and-matching stuff is a good idea: - * > - * > If you're using MultiViews, and have a file named foobar.html, - * > which you refer to as "foobar", and someone tried to access - * > "Foobar", mod_speling won't find it, because it won't find - * > anything matching that spelling. With the extension-munging, - * > it would locate "foobar.html". Not perfect, but I ran into - * > that problem when I first wrote the module. + * If you're using MultiViews, and have a file named foobar.html, + * which you refer to as "foobar", and someone tried to access + * "Foobar", without CheckBasenameMatch, mod_speling won't find it, + * because it won't find anything matching that spelling. + * With the extension-munging, it would locate "foobar.html". */ - else { -#ifdef WANT_BASENAME_MATCH + else if (cfg->check_basename_match == 1) { /* * Okay... we didn't find anything. Now we take out the hard-core * power tools. There are several cases here. Someone might have @@ -356,7 +351,6 @@ static int check_speling(request_rec *r) sp_new->name = apr_pstrdup(r->pool, dirent.name); sp_new->quality = SP_VERYDIFFERENT; } -#endif } } apr_dir_close(dir); @@ -425,6 +419,7 @@ static int check_speling(request_rec *r) if (apr_pool_create(&sub_pool, p) != APR_SUCCESS) return DECLINED; + apr_pool_tag(sub_pool, "speling_sub"); t = apr_array_make(sub_pool, candidates->nelts * 8 + 8, sizeof(char *)); diff --git a/modules/mappers/mod_vhost_alias.c b/modules/mappers/mod_vhost_alias.c index 0b61694..b1e5bfb 100644 --- a/modules/mappers/mod_vhost_alias.c +++ b/modules/mappers/mod_vhost_alias.c @@ -152,7 +152,7 @@ static const char *vhost_alias_set(cmd_parms *cmd, void *dummy, const char *map) } if (!ap_os_is_path_absolute(cmd->pool, map)) { - if (strcasecmp(map, "none")) { + if (ap_cstr_casecmp(map, "none")) { return "format string must be an absolute path, or 'none'"; } *pmap = NULL; diff --git a/modules/md/config2.m4 b/modules/md/config2.m4 index a2c8303..11d4f32 100644 --- a/modules/md/config2.m4 +++ b/modules/md/config2.m4 @@ -99,7 +99,7 @@ AC_DEFUN([APACHE_CHECK_CURL],[ AC_CHECK_HEADERS([curl/curl.h]) - AC_MSG_CHECKING([for curl version >= 7.50]) + AC_MSG_CHECKING([for curl version >= 7.29]) AC_TRY_COMPILE([#include ],[ #if !defined(LIBCURL_VERSION_MAJOR) #error "Missing libcurl version" @@ -107,7 +107,7 @@ AC_DEFUN([APACHE_CHECK_CURL],[ #if LIBCURL_VERSION_MAJOR < 7 #error "Unsupported libcurl version " LIBCURL_VERSION #endif -#if LIBCURL_VERSION_MAJOR == 7 && LIBCURL_VERSION_MINOR < 50 +#if LIBCURL_VERSION_MAJOR == 7 && LIBCURL_VERSION_MINOR < 29 #error "Unsupported libcurl version " LIBCURL_VERSION #endif], [AC_MSG_RESULT(OK) @@ -248,20 +248,31 @@ md_acme.lo dnl md_acme_acct.lo dnl md_acme_authz.lo dnl md_acme_drive.lo dnl +md_acmev2_drive.lo dnl +md_acme_order.lo dnl md_core.lo dnl md_curl.lo dnl md_crypt.lo dnl +md_event.lo dnl md_http.lo dnl md_json.lo dnl md_jws.lo dnl md_log.lo dnl +md_ocsp.lo dnl +md_result.lo dnl md_reg.lo dnl +md_status.lo dnl md_store.lo dnl md_store_fs.lo dnl +md_tailscale.lo dnl +md_time.lo dnl md_util.lo dnl mod_md.lo dnl mod_md_config.lo dnl +mod_md_drive.lo dnl +mod_md_ocsp.lo dnl mod_md_os.lo dnl +mod_md_status.lo dnl " # Ensure that other modules can pick up mod_md.h diff --git a/modules/md/md.h b/modules/md/md.h index 60f8852..035ccba 100644 --- a/modules/md/md.h +++ b/modules/md/md.h @@ -17,19 +17,22 @@ #ifndef mod_md_md_h #define mod_md_md_h +#include + +#include "md_time.h" #include "md_version.h" struct apr_array_header_t; struct apr_hash_t; struct md_json_t; struct md_cert_t; +struct md_job_t; struct md_pkey_t; +struct md_result_t; struct md_store_t; struct md_srv_conf_t; struct md_pkey_spec_t; -#define MD_TLSSNI01_DNS_SUFFIX ".acme.invalid" - #define MD_PKEY_RSA_BITS_MIN 2048 #define MD_PKEY_RSA_BITS_DEF 2048 @@ -37,13 +40,22 @@ struct md_pkey_spec_t; #define MD_HSTS_HEADER "Strict-Transport-Security" #define MD_HSTS_MAX_AGE_DEFAULT 15768000 +#define PROTO_ACME_TLS_1 "acme-tls/1" + +#define MD_TIME_LIFE_NORM (apr_time_from_sec(100 * MD_SECS_PER_DAY)) +#define MD_TIME_RENEW_WINDOW_DEF (apr_time_from_sec(33 * MD_SECS_PER_DAY)) +#define MD_TIME_WARN_WINDOW_DEF (apr_time_from_sec(10 * MD_SECS_PER_DAY)) +#define MD_TIME_OCSP_KEEP_NORM (apr_time_from_sec(7 * MD_SECS_PER_DAY)) + +#define MD_OTHER "other" + typedef enum { - MD_S_UNKNOWN, /* MD has not been analysed yet */ - MD_S_INCOMPLETE, /* MD is missing necessary information, cannot go live */ - MD_S_COMPLETE, /* MD has all necessary information, can go live */ - MD_S_EXPIRED, /* MD is complete, but credentials have expired */ - MD_S_ERROR, /* MD data is flawed, unable to be processed as is */ - MD_S_MISSING, /* MD is missing config information, cannot proceed */ + MD_S_UNKNOWN = 0, /* MD has not been analysed yet */ + MD_S_INCOMPLETE = 1, /* MD is missing necessary information, cannot go live */ + MD_S_COMPLETE = 2, /* MD has all necessary information, can go live */ + MD_S_EXPIRED_DEPRECATED = 3, /* deprecated */ + MD_S_ERROR = 4, /* MD data is flawed, unable to be processed as is */ + MD_S_MISSING_INFORMATION = 5, /* User has not agreed to ToS */ } md_state_t; typedef enum { @@ -54,30 +66,11 @@ typedef enum { } md_require_t; typedef enum { - MD_SV_TEXT, - MD_SV_JSON, - MD_SV_CERT, - MD_SV_PKEY, - MD_SV_CHAIN, -} md_store_vtype_t; - -typedef enum { - MD_SG_NONE, - MD_SG_ACCOUNTS, - MD_SG_CHALLENGES, - MD_SG_DOMAINS, - MD_SG_STAGING, - MD_SG_ARCHIVE, - MD_SG_TMP, - MD_SG_COUNT, -} md_store_group_t; - -typedef enum { - MD_DRIVE_DEFAULT = -1, /* default value */ - MD_DRIVE_MANUAL, /* manually triggered transmission of credentials */ - MD_DRIVE_AUTO, /* automatic process performed by httpd */ - MD_DRIVE_ALWAYS, /* always driven by httpd, even if not used in any vhost */ -} md_drive_mode_t; + MD_RENEW_DEFAULT = -1, /* default value */ + MD_RENEW_MANUAL, /* manually triggered renewal of certificate */ + MD_RENEW_AUTO, /* automatic process performed by httpd */ + MD_RENEW_ALWAYS, /* always renewed by httpd, even if not necessary */ +} md_renew_mode_t; typedef struct md_t md_t; struct md_t { @@ -85,90 +78,142 @@ struct md_t { struct apr_array_header_t *domains; /* all DNS names this MD includes */ struct apr_array_header_t *contacts; /* list of contact uris, e.g. mailto:xxx */ - int transitive; /* != 0 iff VirtualHost names/aliases are auto-added */ - md_require_t require_https; /* Iff https: is required for this MD */ - - int drive_mode; /* mode of obtaining credentials */ - struct md_pkey_spec_t *pkey_spec;/* specification for generating new private keys */ - int must_staple; /* certificates should set the OCSP Must Staple extension */ - apr_interval_time_t renew_norm; /* if > 0, normalized cert lifetime */ - apr_interval_time_t renew_window;/* time before expiration that starts renewal */ + struct md_pkeys_spec_t *pks; /* specification for generating private keys */ + md_timeslice_t *renew_window; /* time before expiration that starts renewal */ + md_timeslice_t *warn_window; /* time before expiration that warnings are sent out */ - const char *ca_url; /* url of CA certificate service */ const char *ca_proto; /* protocol used vs CA (e.g. ACME) */ + struct apr_array_header_t *ca_urls; /* urls of CAs */ + const char *ca_effective; /* url of CA used */ const char *ca_account; /* account used at CA */ - const char *ca_agreement; /* accepted agreement uri between CA and user */ + const char *ca_agreement; /* accepted agreement uri between CA and user */ struct apr_array_header_t *ca_challenges; /* challenge types configured for this MD */ + struct apr_array_header_t *cert_files; /* != NULL iff pubcerts explicitly configured */ + struct apr_array_header_t *pkey_files; /* != NULL iff privkeys explicitly configured */ + const char *ca_eab_kid; /* optional KEYID for external account binding */ + const char *ca_eab_hmac; /* optional HMAC for external account binding */ - md_state_t state; /* state of this MD */ - apr_time_t valid_from; /* When the credentials start to be valid. 0 if unknown */ - apr_time_t expires; /* When the credentials expire. 0 if unknown */ - const char *cert_url; /* url where cert has been created, remember during drive */ + const char *state_descr; /* description of state of NULL */ + struct apr_array_header_t *acme_tls_1_domains; /* domains supporting "acme-tls/1" protocol */ + const char *dns01_cmd; /* DNS challenge command, override global command */ + const struct md_srv_conf_t *sc; /* server config where it was defined or NULL */ const char *defn_name; /* config file this MD was defined */ unsigned defn_line_number; /* line number of definition */ + const char *configured_name; /* name this MD was configured with, if different */ + + int renew_mode; /* mode of obtaining credentials */ + md_require_t require_https; /* Iff https: is required for this MD */ + md_state_t state; /* state of this MD */ + int transitive; /* != 0 iff VirtualHost names/aliases are auto-added */ + int must_staple; /* certificates should set the OCSP Must Staple extension */ + int stapling; /* if OCSP stapling is enabled */ + int watched; /* if certificate is supervised (renew or expiration warning) */ }; #define MD_KEY_ACCOUNT "account" +#define MD_KEY_ACME_TLS_1 "acme-tls/1" +#define MD_KEY_ACTIVATION_DELAY "activation-delay" +#define MD_KEY_ACTIVITY "activity" #define MD_KEY_AGREEMENT "agreement" +#define MD_KEY_AUTHORIZATIONS "authorizations" #define MD_KEY_BITS "bits" #define MD_KEY_CA "ca" #define MD_KEY_CA_URL "ca-url" #define MD_KEY_CERT "cert" +#define MD_KEY_CERT_FILES "cert-files" +#define MD_KEY_CERTIFICATE "certificate" +#define MD_KEY_CHALLENGE "challenge" #define MD_KEY_CHALLENGES "challenges" +#define MD_KEY_CMD_DNS01 "cmd-dns-01" +#define MD_KEY_DNS01_VERSION "cmd-dns-01-version" +#define MD_KEY_COMPLETE "complete" #define MD_KEY_CONTACT "contact" #define MD_KEY_CONTACTS "contacts" #define MD_KEY_CSR "csr" +#define MD_KEY_CURVE "curve" #define MD_KEY_DETAIL "detail" #define MD_KEY_DISABLED "disabled" #define MD_KEY_DIR "dir" #define MD_KEY_DOMAIN "domain" #define MD_KEY_DOMAINS "domains" -#define MD_KEY_DRIVE_MODE "drive-mode" +#define MD_KEY_EAB "eab" +#define MD_KEY_EAB_REQUIRED "externalAccountRequired" +#define MD_KEY_ENTRIES "entries" +#define MD_KEY_ERRORED "errored" +#define MD_KEY_ERROR "error" #define MD_KEY_ERRORS "errors" #define MD_KEY_EXPIRES "expires" +#define MD_KEY_FINALIZE "finalize" +#define MD_KEY_FINISHED "finished" +#define MD_KEY_FROM "from" +#define MD_KEY_GOOD "good" +#define MD_KEY_HMAC "hmac" #define MD_KEY_HTTP "http" #define MD_KEY_HTTPS "https" #define MD_KEY_ID "id" #define MD_KEY_IDENTIFIER "identifier" #define MD_KEY_KEY "key" +#define MD_KEY_KID "kid" #define MD_KEY_KEYAUTHZ "keyAuthorization" +#define MD_KEY_LAST "last" +#define MD_KEY_LAST_RUN "last-run" #define MD_KEY_LOCATION "location" +#define MD_KEY_LOG "log" +#define MD_KEY_MDS "managed-domains" +#define MD_KEY_MESSAGE "message" #define MD_KEY_MUST_STAPLE "must-staple" #define MD_KEY_NAME "name" +#define MD_KEY_NEXT_RUN "next-run" +#define MD_KEY_NOTIFIED "notified" +#define MD_KEY_NOTIFIED_RENEWED "notified-renewed" +#define MD_KEY_OCSP "ocsp" +#define MD_KEY_OCSPS "ocsps" +#define MD_KEY_ORDERS "orders" #define MD_KEY_PERMANENT "permanent" #define MD_KEY_PKEY "privkey" -#define MD_KEY_PROCESSED "processed" +#define MD_KEY_PKEY_FILES "pkey-files" +#define MD_KEY_PROBLEM "problem" #define MD_KEY_PROTO "proto" +#define MD_KEY_READY "ready" #define MD_KEY_REGISTRATION "registration" #define MD_KEY_RENEW "renew" +#define MD_KEY_RENEW_AT "renew-at" +#define MD_KEY_RENEW_MODE "renew-mode" +#define MD_KEY_RENEWAL "renewal" +#define MD_KEY_RENEWING "renewing" #define MD_KEY_RENEW_WINDOW "renew-window" #define MD_KEY_REQUIRE_HTTPS "require-https" #define MD_KEY_RESOURCE "resource" +#define MD_KEY_RESPONSE "response" +#define MD_KEY_REVOKED "revoked" +#define MD_KEY_SERIAL "serial" +#define MD_KEY_SHA256_FINGERPRINT "sha256-fingerprint" +#define MD_KEY_STAPLING "stapling" #define MD_KEY_STATE "state" +#define MD_KEY_STATE_DESCR "state-descr" #define MD_KEY_STATUS "status" #define MD_KEY_STORE "store" +#define MD_KEY_SUBPROBLEMS "subproblems" #define MD_KEY_TEMPORARY "temporary" +#define MD_KEY_TOS "termsOfService" #define MD_KEY_TOKEN "token" +#define MD_KEY_TOTAL "total" #define MD_KEY_TRANSITIVE "transitive" #define MD_KEY_TYPE "type" +#define MD_KEY_UNKNOWN "unknown" +#define MD_KEY_UNTIL "until" #define MD_KEY_URL "url" +#define MD_KEY_URLS "urls" #define MD_KEY_URI "uri" -#define MD_KEY_VALID_FROM "validFrom" +#define MD_KEY_VALID "valid" +#define MD_KEY_VALID_FROM "valid-from" #define MD_KEY_VALUE "value" #define MD_KEY_VERSION "version" - -#define MD_FN_MD "md.json" -#define MD_FN_JOB "job.json" -#define MD_FN_PRIVKEY "privkey.pem" -#define MD_FN_PUBCERT "pubcert.pem" -#define MD_FN_CERT "cert.pem" -#define MD_FN_CHAIN "chain.pem" -#define MD_FN_HTTPD_JSON "httpd.json" - -#define MD_FN_FALLBACK_PKEY "fallback-privkey.pem" -#define MD_FN_FALLBACK_CERT "fallback-cert.pem" +#define MD_KEY_WATCHED "watched" +#define MD_KEY_WHEN "when" +#define MD_KEY_WARN_WINDOW "warn-window" /* Check if a string member of a new MD (n) has * a value and if it differs from the old MD o @@ -222,12 +267,6 @@ md_t *md_get_by_domain(struct apr_array_header_t *mds, const char *domain); */ md_t *md_get_by_dns_overlap(struct apr_array_header_t *mds, const md_t *md); -/** - * Find the managed domain in the list that, for the given md, - * has the same name, or the most number of overlaps in domains - */ -md_t *md_find_closest_match(apr_array_header_t *mds, const md_t *md); - /** * Create and empty md record, structures initialized. */ @@ -248,43 +287,44 @@ md_t *md_clone(apr_pool_t *p, const md_t *src); */ md_t *md_copy(apr_pool_t *p, const md_t *src); -/** - * Create a merged md with the settings of add overlaying the ones from base. - */ -md_t *md_merge(apr_pool_t *p, const md_t *add, const md_t *base); - /** * Convert the managed domain into a JSON representation and vice versa. * * This reads and writes the following information: name, domains, ca_url, ca_proto and state. */ -struct md_json_t *md_to_json (const md_t *md, apr_pool_t *p); +struct md_json_t *md_to_json(const md_t *md, apr_pool_t *p); md_t *md_from_json(struct md_json_t *json, apr_pool_t *p); /** - * Determine if MD should renew its cert (if it has one) + * Same as md_to_json(), but with sensitive fields stripped. */ -int md_should_renew(const md_t *md); +struct md_json_t *md_to_public_json(const md_t *md, apr_pool_t *p); + +int md_is_covered_by_alt_names(const md_t *md, const struct apr_array_header_t* alt_names); + +/* how many certificates this domain has/will eventually have. */ +int md_cert_count(const md_t *md); + +const char *md_get_ca_name_from_url(apr_pool_t *p, const char *url); +apr_status_t md_get_ca_url_from_name(const char **purl, apr_pool_t *p, const char *name); + +/**************************************************************************************************/ +/* notifications */ + +typedef apr_status_t md_job_notify_cb(struct md_job_t *job, const char *reason, + struct md_result_t *result, apr_pool_t *p, void *baton); /**************************************************************************************************/ /* domain credentials */ -typedef struct md_creds_t md_creds_t; -struct md_creds_t { - struct md_pkey_t *privkey; - struct apr_array_header_t *pubcert; /* complete md_cert* chain */ - struct md_cert_t *cert; - int expired; +typedef struct md_pubcert_t md_pubcert_t; +struct md_pubcert_t { + struct apr_array_header_t *certs; /* chain of const md_cert*, leaf cert first */ + struct apr_array_header_t *alt_names; /* alt-names of leaf cert */ + const char *cert_file; /* file path of chain */ + const char *key_file; /* file path of key for leaf cert */ }; -/* TODO: not sure this is a good idea, testing some readability and debuggabiltiy of - * cascaded apr_status_t checks. */ -#define MD_CHK_VARS const char *md_chk_ -#define MD_LAST_CHK md_chk_ -#define MD_CHK_STEP(c, status, s) (md_chk_ = s, (void)md_chk_, status == (rv = (c))) -#define MD_CHK(c, status) MD_CHK_STEP(c, status, #c) -#define MD_IS_ERR(c, err) (md_chk_ = #c, APR_STATUS_IS_##err((rv = (c)))) -#define MD_CHK_SUCCESS(c) MD_CHK(c, APR_SUCCESS) -#define MD_OK(c) MD_CHK_SUCCESS(c) +#define MD_OK(c) (APR_SUCCESS == (rv = c)) #endif /* mod_md_md_h */ diff --git a/modules/md/md_acme.c b/modules/md/md_acme.c index 3fbd365..4366bf6 100644 --- a/modules/md/md_acme.c +++ b/modules/md/md_acme.c @@ -30,6 +30,7 @@ #include "md_http.h" #include "md_log.h" #include "md_store.h" +#include "md_result.h" #include "md_util.h" #include "md_version.h" @@ -37,34 +38,36 @@ #include "md_acme_acct.h" -static const char *base_product; +static const char *base_product= "-"; typedef struct acme_problem_status_t acme_problem_status_t; struct acme_problem_status_t { - const char *type; - apr_status_t rv; + const char *type; /* the ACME error string */ + apr_status_t rv; /* what Apache status code we give it */ + int input_related; /* if error indicates wrong input value */ }; static acme_problem_status_t Problems[] = { - { "acme:error:badCSR", APR_EINVAL }, - { "acme:error:badNonce", APR_EAGAIN }, - { "acme:error:badSignatureAlgorithm", APR_EINVAL }, - { "acme:error:invalidContact", APR_BADARG }, - { "acme:error:unsupportedContact", APR_EGENERAL }, - { "acme:error:malformed", APR_EINVAL }, - { "acme:error:rateLimited", APR_BADARG }, - { "acme:error:rejectedIdentifier", APR_BADARG }, - { "acme:error:serverInternal", APR_EGENERAL }, - { "acme:error:unauthorized", APR_EACCES }, - { "acme:error:unsupportedIdentifier", APR_BADARG }, - { "acme:error:userActionRequired", APR_EAGAIN }, - { "acme:error:badRevocationReason", APR_EINVAL }, - { "acme:error:caa", APR_EGENERAL }, - { "acme:error:dns", APR_EGENERAL }, - { "acme:error:connection", APR_EGENERAL }, - { "acme:error:tls", APR_EGENERAL }, - { "acme:error:incorrectResponse", APR_EGENERAL }, + { "acme:error:badCSR", APR_EINVAL, 1 }, + { "acme:error:badNonce", APR_EAGAIN, 0 }, + { "acme:error:badSignatureAlgorithm", APR_EINVAL, 1 }, + { "acme:error:externalAccountRequired", APR_EINVAL, 1 }, + { "acme:error:invalidContact", APR_BADARG, 1 }, + { "acme:error:unsupportedContact", APR_EGENERAL, 1 }, + { "acme:error:malformed", APR_EINVAL, 1 }, + { "acme:error:rateLimited", APR_BADARG, 0 }, + { "acme:error:rejectedIdentifier", APR_BADARG, 1 }, + { "acme:error:serverInternal", APR_EGENERAL, 0 }, + { "acme:error:unauthorized", APR_EACCES, 0 }, + { "acme:error:unsupportedIdentifier", APR_BADARG, 1 }, + { "acme:error:userActionRequired", APR_EAGAIN, 0 }, + { "acme:error:badRevocationReason", APR_EINVAL, 1 }, + { "acme:error:caa", APR_EGENERAL, 0 }, + { "acme:error:dns", APR_EGENERAL, 0 }, + { "acme:error:connection", APR_EGENERAL, 0 }, + { "acme:error:tls", APR_EGENERAL, 0 }, + { "acme:error:incorrectResponse", APR_EGENERAL, 0 }, }; static apr_status_t problem_status_get(const char *type) { @@ -85,89 +88,23 @@ static apr_status_t problem_status_get(const char *type) { return APR_EGENERAL; } -apr_status_t md_acme_init(apr_pool_t *p, const char *base) -{ - base_product = base; - return md_crypt_init(p); -} +int md_acme_problem_is_input_related(const char *problem) { + size_t i; -apr_status_t md_acme_create(md_acme_t **pacme, apr_pool_t *p, const char *url, - const char *proxy_url) -{ - md_acme_t *acme; - const char *err = NULL; - apr_status_t rv; - apr_uri_t uri_parsed; - size_t len; - - if (!url) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, p, "create ACME without url"); - return APR_EINVAL; - } - - if (APR_SUCCESS != (rv = md_util_abs_uri_check(p, url, &err))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "invalid ACME uri (%s): %s", err, url); - return rv; + if (!problem) return 0; + if (strstr(problem, "urn:ietf:params:") == problem) { + problem += strlen("urn:ietf:params:"); } - - acme = apr_pcalloc(p, sizeof(*acme)); - acme->url = url; - acme->p = p; - acme->user_agent = apr_psprintf(p, "%s mod_md/%s", - base_product, MOD_MD_VERSION); - acme->proxy_url = proxy_url? apr_pstrdup(p, proxy_url) : NULL; - acme->max_retries = 3; - - if (APR_SUCCESS != (rv = apr_uri_parse(p, url, &uri_parsed))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "parsing ACME uri: %s", url); - return APR_EINVAL; + else if (strstr(problem, "urn:") == problem) { + problem += strlen("urn:"); } - - len = strlen(uri_parsed.hostname); - acme->sname = (len <= 16)? uri_parsed.hostname : apr_pstrdup(p, uri_parsed.hostname + len - 16); - - *pacme = (APR_SUCCESS == rv)? acme : NULL; - return rv; -} -apr_status_t md_acme_setup(md_acme_t *acme) -{ - apr_status_t rv; - md_json_t *json; - - assert(acme->url); - if (!acme->http && APR_SUCCESS != (rv = md_http_create(&acme->http, acme->p, - acme->user_agent, acme->proxy_url))) { - return rv; - } - md_http_set_response_limit(acme->http, 1024*1024); - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, "get directory from %s", acme->url); - - rv = md_acme_get_json(&json, acme, acme->url, acme->p); - if (APR_SUCCESS == rv) { - acme->new_authz = md_json_gets(json, "new-authz", NULL); - acme->new_cert = md_json_gets(json, "new-cert", NULL); - acme->new_reg = md_json_gets(json, "new-reg", NULL); - acme->revoke_cert = md_json_gets(json, "revoke-cert", NULL); - if (acme->new_authz && acme->new_cert && acme->new_reg && acme->revoke_cert) { - return APR_SUCCESS; + for(i = 0; i < (sizeof(Problems)/sizeof(Problems[0])); ++i) { + if (!apr_strnatcasecmp(problem, Problems[i].type)) { + return Problems[i].input_related; } - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, acme->p, - "Unable to understand ACME server response. Wrong ACME protocol version?"); - rv = APR_EINVAL; - } - else { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, acme->p, "unsuccessful in contacting ACME " - "server at %s. If this problem persists, please check your network " - "connectivity from your Apache server to the ACME server. Also, older " - "servers might have trouble verifying the certificates of the ACME " - "server. You can check if you are able to contact it manually via the " - "curl command. Sometimes, the ACME server might be down for maintenance, " - "so failing to contact it is not an immediate problem. mod_md will " - "continue retrying this.", acme->url); } - return rv; + return 0; } /**************************************************************************************************/ @@ -183,26 +120,10 @@ static void req_update_nonce(md_acme_t *acme, apr_table_t *hdrs) } } -static apr_status_t http_update_nonce(const md_http_response_t *res) +static apr_status_t http_update_nonce(const md_http_response_t *res, void *data) { - if (res->headers) { - const char *nonce = apr_table_get(res->headers, "Replay-Nonce"); - if (nonce) { - md_acme_t *acme = res->req->baton; - acme->nonce = apr_pstrdup(acme->p, nonce); - } - } - return res->rv; -} - -static apr_status_t md_acme_new_nonce(md_acme_t *acme) -{ - apr_status_t rv; - long id; - - rv = md_http_HEAD(acme->http, acme->new_reg, NULL, http_update_nonce, acme, &id); - md_http_await(acme->http, id); - return rv; + req_update_nonce(data, res->headers); + return APR_SUCCESS; } static md_acme_req_t *md_acme_req_create(md_acme_t *acme, const char *method, const char *url) @@ -215,6 +136,7 @@ static md_acme_req_t *md_acme_req_create(md_acme_t *acme, const char *method, co if (rv != APR_SUCCESS) { return NULL; } + apr_pool_tag(pool, "md_acme_req"); req = apr_pcalloc(pool, sizeof(*req)); if (!req) { @@ -226,54 +148,46 @@ static md_acme_req_t *md_acme_req_create(md_acme_t *acme, const char *method, co req->p = pool; req->method = method; req->url = url; - req->prot_hdrs = apr_table_make(pool, 5); - if (!req->prot_hdrs) { - apr_pool_destroy(pool); - return NULL; - } + req->prot_fields = md_json_create(pool); req->max_retries = acme->max_retries; - + req->result = md_result_make(req->p, APR_SUCCESS); return req; } -apr_status_t md_acme_req_body_init(md_acme_req_t *req, md_json_t *jpayload) +static apr_status_t acmev2_new_nonce(md_acme_t *acme) { - const char *payload; - size_t payload_len; - - if (!req->acme->acct) { - return APR_EINVAL; - } - - payload = md_json_writep(jpayload, req->p, MD_JSON_FMT_COMPACT); - if (!payload) { - return APR_EINVAL; - } + return md_http_HEAD_perform(acme->http, acme->api.v2.new_nonce, NULL, http_update_nonce, acme); +} - payload_len = strlen(payload); - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, req->p, - "acct payload(len=%" APR_SIZE_T_FMT "): %s", payload_len, payload); - return md_jws_sign(&req->req_json, req->p, payload, payload_len, - req->prot_hdrs, req->acme->acct_key, NULL); -} +apr_status_t md_acme_init(apr_pool_t *p, const char *base, int init_ssl) +{ + base_product = base; + return init_ssl? md_crypt_init(p) : APR_SUCCESS; +} static apr_status_t inspect_problem(md_acme_req_t *req, const md_http_response_t *res) { const char *ctype; - md_json_t *problem; - + md_json_t *problem = NULL; + apr_status_t rv; + ctype = apr_table_get(req->resp_hdrs, "content-type"); + ctype = md_util_parse_ct(res->req->pool, ctype); if (ctype && !strcmp(ctype, "application/problem+json")) { /* RFC 7807 */ - md_json_read_http(&problem, req->p, res); - if (problem) { + rv = md_json_read_http(&problem, req->p, res); + if (rv == APR_SUCCESS && problem) { const char *ptype, *pdetail; req->resp_json = problem; ptype = md_json_gets(problem, MD_KEY_TYPE, NULL); pdetail = md_json_gets(problem, MD_KEY_DETAIL, NULL); req->rv = problem_status_get(ptype); + md_result_problem_set(req->result, req->rv, ptype, pdetail, + md_json_getj(problem, MD_KEY_SUBPROBLEMS, NULL)); + + if (APR_STATUS_IS_EAGAIN(req->rv)) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, req->rv, req->p, @@ -287,43 +201,79 @@ static apr_status_t inspect_problem(md_acme_req_t *req, const md_http_response_t } } - if (APR_SUCCESS == res->rv) { - switch (res->status) { - case 400: - return APR_EINVAL; - case 403: - return APR_EACCES; - case 404: - return APR_ENOENT; - default: - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, req->p, - "acme problem unknown: http status %d", res->status); - return APR_EGENERAL; - } + switch (res->status) { + case 400: + return APR_EINVAL; + case 401: /* sectigo returns this instead of 403 */ + case 403: + return APR_EACCES; + case 404: + return APR_ENOENT; + default: + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, req->p, + "acme problem unknown: http status %d", res->status); + md_result_printf(req->result, APR_EGENERAL, "unexpected http status: %d", + res->status); + return req->result->status; } - return res->rv; + return APR_SUCCESS; } /**************************************************************************************************/ /* ACME requests with nonce handling */ -static apr_status_t md_acme_req_done(md_acme_req_t *req) +static apr_status_t acmev2_req_init(md_acme_req_t *req, md_json_t *jpayload) +{ + md_data_t payload; + + md_data_null(&payload); + if (!req->acme->acct) { + return APR_EINVAL; + } + if (jpayload) { + payload.data = md_json_writep(jpayload, req->p, MD_JSON_FMT_COMPACT); + if (!payload.data) { + return APR_EINVAL; + } + } + else { + payload.data = ""; + } + + payload.len = strlen(payload.data); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, req->p, + "acme payload(len=%" APR_SIZE_T_FMT "): %s", payload.len, payload.data); + return md_jws_sign(&req->req_json, req->p, &payload, + req->prot_fields, req->acme->acct_key, req->acme->acct->url); +} + +apr_status_t md_acme_req_body_init(md_acme_req_t *req, md_json_t *payload) { - apr_status_t rv = req->rv; + return req->acme->req_init_fn(req, payload); +} + +static apr_status_t md_acme_req_done(md_acme_req_t *req, apr_status_t rv) +{ + if (req->result->status != APR_SUCCESS) { + if (req->on_err) { + req->on_err(req, req->result, req->baton); + } + } + /* An error in rv superceeds the result->status */ + if (APR_SUCCESS != rv) req->result->status = rv; + rv = req->result->status; + /* transfer results into the acme's central result for longer life and later inspection */ + md_result_dup(req->acme->last, req->result); if (req->p) { apr_pool_destroy(req->p); } return rv; } -static apr_status_t on_response(const md_http_response_t *res) +static apr_status_t on_response(const md_http_response_t *res, void *data) { - md_acme_req_t *req = res->req->baton; - apr_status_t rv = res->rv; - - if (APR_SUCCESS != rv) { - goto out; - } + md_acme_req_t *req = data; + apr_status_t rv = APR_SUCCESS; req->resp_hdrs = apr_table_clone(req->p, res->headers); req_update_nonce(req->acme, res->headers); @@ -361,9 +311,10 @@ static apr_status_t on_response(const md_http_response_t *res) if (!processed) { rv = APR_EINVAL; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, req->p, - "response: %d, content-type=%s", res->status, - apr_table_get(res->headers, "Content-Type")); + md_result_printf(req->result, rv, "unable to process the response: " + "http-status=%d, content-type=%s", + res->status, apr_table_get(res->headers, "Content-Type")); + md_result_log(req->result, MD_LOG_ERR); } } else if (APR_EAGAIN == (rv = inspect_problem(req, res))) { @@ -371,85 +322,110 @@ static apr_status_t on_response(const md_http_response_t *res) return rv; } -out: - md_acme_req_done(req); + md_acme_req_done(req, rv); return rv; } +static apr_status_t acmev2_GET_as_POST_init(md_acme_req_t *req, void *baton) +{ + (void)baton; + return md_acme_req_body_init(req, NULL); +} + static apr_status_t md_acme_req_send(md_acme_req_t *req) { apr_status_t rv; md_acme_t *acme = req->acme; - const char *body = NULL; + md_data_t *body = NULL; + md_result_t *result; assert(acme->url); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, req->p, + "sending req: %s %s", req->method, req->url); + md_result_reset(req->acme->last); + result = md_result_make(req->p, APR_SUCCESS); + + /* Whom are we talking to? */ + if (acme->version == MD_ACME_VERSION_UNKNOWN) { + rv = md_acme_setup(acme, result); + if (APR_SUCCESS != rv) goto leave; + } + + if (!strcmp("GET", req->method) && !req->on_init && !req->req_json) { + /* See + * and + * and + * We implement this change in ACMEv2 and higher as keeping the md_acme_GET() methods, + * but switching them to POSTs with a empty, JWS signed, body when we call + * our HTTP client. */ + req->method = "POST"; + req->on_init = acmev2_GET_as_POST_init; + /*req->max_retries = 0; don't do retries on these "GET"s */ + } + + /* Besides GET/HEAD, we always need a fresh nonce */ if (strcmp("GET", req->method) && strcmp("HEAD", req->method)) { - if (!acme->new_authz) { - if (APR_SUCCESS != (rv = md_acme_setup(acme))) { - return rv; - } + if (acme->version == MD_ACME_VERSION_UNKNOWN) { + rv = md_acme_setup(acme, result); + if (APR_SUCCESS != rv) goto leave; } - if (!acme->nonce) { - if (APR_SUCCESS != (rv = md_acme_new_nonce(acme))) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, req->p, - "error retrieving new nonce from ACME server"); - return rv; - } + if (!acme->nonce && (APR_SUCCESS != (rv = acme->new_nonce_fn(acme)))) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, req->p, + "error retrieving new nonce from ACME server"); + goto leave; } - - apr_table_set(req->prot_hdrs, "nonce", acme->nonce); + + md_json_sets(acme->nonce, req->prot_fields, "nonce", NULL); + md_json_sets(req->url, req->prot_fields, "url", NULL); acme->nonce = NULL; } rv = req->on_init? req->on_init(req, req->baton) : APR_SUCCESS; + if (APR_SUCCESS != rv) goto leave; - if ((rv == APR_SUCCESS) && req->req_json) { - body = md_json_writep(req->req_json, req->p, MD_JSON_FMT_INDENT); - if (!body) { - rv = APR_EINVAL; - } + if (req->req_json) { + body = apr_pcalloc(req->p, sizeof(*body)); + body->data = md_json_writep(req->req_json, req->p, MD_JSON_FMT_INDENT); + body->len = strlen(body->data); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->p, + "sending JSON body: %s", body->data); } - if (rv == APR_SUCCESS) { - long id = 0; - - if (body && md_log_is_level(req->p, MD_LOG_TRACE2)) { - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, req->p, - "req: POST %s, body:\n%s", req->url, body); - } - else { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, req->p, - "req: POST %s", req->url); - } - if (!strcmp("GET", req->method)) { - rv = md_http_GET(req->acme->http, req->url, NULL, on_response, req, &id); - } - else if (!strcmp("POST", req->method)) { - rv = md_http_POSTd(req->acme->http, req->url, NULL, "application/json", - body, body? strlen(body) : 0, on_response, req, &id); - } - else if (!strcmp("HEAD", req->method)) { - rv = md_http_HEAD(req->acme->http, req->url, NULL, on_response, req, &id); - } - else { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, req->p, - "HTTP method %s against: %s", req->method, req->url); - rv = APR_ENOTIMPL; - } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, req->p, "req sent"); - md_http_await(acme->http, id); - - if (APR_EAGAIN == rv && req->max_retries > 0) { - --req->max_retries; - return md_acme_req_send(req); - } - req = NULL; + if (body && md_log_is_level(req->p, MD_LOG_TRACE4)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, req->p, + "req: %s %s, body:\n%s", req->method, req->url, body->data); } - - if (req) { - md_acme_req_done(req); + else { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, req->p, + "req: %s %s", req->method, req->url); + } + + if (!strcmp("GET", req->method)) { + rv = md_http_GET_perform(req->acme->http, req->url, NULL, on_response, req); + } + else if (!strcmp("POST", req->method)) { + rv = md_http_POSTd_perform(req->acme->http, req->url, NULL, "application/jose+json", + body, on_response, req); + } + else if (!strcmp("HEAD", req->method)) { + rv = md_http_HEAD_perform(req->acme->http, req->url, NULL, on_response, req); + } + else { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, req->p, + "HTTP method %s against: %s", req->method, req->url); + rv = APR_ENOTIMPL; + } + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, req->p, "req sent"); + + if (APR_EAGAIN == rv && req->max_retries > 0) { + --req->max_retries; + rv = md_acme_req_send(req); } + req = NULL; + +leave: + if (req) md_acme_req_done(req, rv); return rv; } @@ -457,6 +433,7 @@ apr_status_t md_acme_POST(md_acme_t *acme, const char *url, md_acme_req_init_cb *on_init, md_acme_req_json_cb *on_json, md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, void *baton) { md_acme_req_t *req; @@ -469,6 +446,7 @@ apr_status_t md_acme_POST(md_acme_t *acme, const char *url, req->on_init = on_init; req->on_json = on_json; req->on_res = on_res; + req->on_err = on_err; req->baton = baton; return md_acme_req_send(req); @@ -478,6 +456,7 @@ apr_status_t md_acme_GET(md_acme_t *acme, const char *url, md_acme_req_init_cb *on_init, md_acme_req_json_cb *on_json, md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, void *baton) { md_acme_req_t *req; @@ -490,11 +469,23 @@ apr_status_t md_acme_GET(md_acme_t *acme, const char *url, req->on_init = on_init; req->on_json = on_json; req->on_res = on_res; + req->on_err = on_err; req->baton = baton; return md_acme_req_send(req); } +void md_acme_report_result(md_acme_t *acme, apr_status_t rv, struct md_result_t *result) +{ + if (acme->last->status == APR_SUCCESS) { + md_result_set(result, rv, NULL); + } + else { + md_result_problem_set(result, acme->last->status, acme->last->problem, + acme->last->detail, acme->last->subproblems); + } +} + /**************************************************************************************************/ /* GET JSON */ @@ -524,8 +515,283 @@ apr_status_t md_acme_get_json(struct md_json_t **pjson, md_acme_t *acme, ctx.pool = p; ctx.json = NULL; - rv = md_acme_GET(acme, url, NULL, on_got_json, NULL, &ctx); + rv = md_acme_GET(acme, url, NULL, on_got_json, NULL, NULL, &ctx); *pjson = (APR_SUCCESS == rv)? ctx.json : NULL; return rv; } +/**************************************************************************************************/ +/* Generic ACME operations */ + +void md_acme_clear_acct(md_acme_t *acme) +{ + acme->acct_id = NULL; + acme->acct = NULL; + acme->acct_key = NULL; +} + +const char *md_acme_acct_id_get(md_acme_t *acme) +{ + return acme->acct_id; +} + +const char *md_acme_acct_url_get(md_acme_t *acme) +{ + return acme->acct? acme->acct->url : NULL; +} + +apr_status_t md_acme_use_acct(md_acme_t *acme, md_store_t *store, + apr_pool_t *p, const char *acct_id) +{ + md_acme_acct_t *acct; + md_pkey_t *pkey; + apr_status_t rv; + + if (APR_SUCCESS == (rv = md_acme_acct_load(&acct, &pkey, + store, MD_SG_ACCOUNTS, acct_id, acme->p))) { + if (md_acme_acct_matches_url(acct, acme->url)) { + acme->acct_id = apr_pstrdup(p, acct_id); + acme->acct = acct; + acme->acct_key = pkey; + rv = md_acme_acct_validate(acme, store, p); + } + else { + /* account is from another server or, more likely, from another + * protocol endpoint on the same server */ + rv = APR_ENOENT; + } + } + return rv; +} + +apr_status_t md_acme_use_acct_for_md(md_acme_t *acme, struct md_store_t *store, + apr_pool_t *p, const char *acct_id, + const md_t *md) +{ + md_acme_acct_t *acct; + md_pkey_t *pkey; + apr_status_t rv; + + if (APR_SUCCESS == (rv = md_acme_acct_load(&acct, &pkey, + store, MD_SG_ACCOUNTS, acct_id, acme->p))) { + if (md_acme_acct_matches_md(acct, md)) { + acme->acct_id = apr_pstrdup(p, acct_id); + acme->acct = acct; + acme->acct_key = pkey; + rv = md_acme_acct_validate(acme, store, p); + } + else { + /* account is from another server or, more likely, from another + * protocol endpoint on the same server */ + rv = APR_ENOENT; + } + } + return rv; +} + +apr_status_t md_acme_save_acct(md_acme_t *acme, apr_pool_t *p, md_store_t *store) +{ + return md_acme_acct_save(store, p, acme, &acme->acct_id, acme->acct, acme->acct_key); +} + +static apr_status_t acmev2_POST_new_account(md_acme_t *acme, + md_acme_req_init_cb *on_init, + md_acme_req_json_cb *on_json, + md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, + void *baton) +{ + return md_acme_POST(acme, acme->api.v2.new_account, on_init, on_json, on_res, on_err, baton); +} + +apr_status_t md_acme_POST_new_account(md_acme_t *acme, + md_acme_req_init_cb *on_init, + md_acme_req_json_cb *on_json, + md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, + void *baton) +{ + return acme->post_new_account_fn(acme, on_init, on_json, on_res, on_err, baton); +} + +/**************************************************************************************************/ +/* ACME setup */ + +apr_status_t md_acme_create(md_acme_t **pacme, apr_pool_t *p, const char *url, + const char *proxy_url, const char *ca_file) +{ + md_acme_t *acme; + const char *err = NULL; + apr_status_t rv; + apr_uri_t uri_parsed; + size_t len; + + if (!url) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, p, "create ACME without url"); + return APR_EINVAL; + } + + if (APR_SUCCESS != (rv = md_util_abs_uri_check(p, url, &err))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "invalid ACME uri (%s): %s", err, url); + return rv; + } + + acme = apr_pcalloc(p, sizeof(*acme)); + acme->url = url; + acme->p = p; + acme->user_agent = apr_psprintf(p, "%s mod_md/%s", + base_product, MOD_MD_VERSION); + acme->proxy_url = proxy_url? apr_pstrdup(p, proxy_url) : NULL; + acme->max_retries = 99; + acme->ca_file = ca_file; + + if (APR_SUCCESS != (rv = apr_uri_parse(p, url, &uri_parsed))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "parsing ACME uri: %s", url); + return APR_EINVAL; + } + + len = strlen(uri_parsed.hostname); + acme->sname = (len <= 16)? uri_parsed.hostname : apr_pstrdup(p, uri_parsed.hostname + len - 16); + acme->version = MD_ACME_VERSION_UNKNOWN; + acme->last = md_result_make(acme->p, APR_SUCCESS); + + *pacme = acme; + return rv; +} + +typedef struct { + md_acme_t *acme; + md_result_t *result; +} update_dir_ctx; + +static apr_status_t update_directory(const md_http_response_t *res, void *data) +{ + md_http_request_t *req = res->req; + md_acme_t *acme = ((update_dir_ctx *)data)->acme; + md_result_t *result = ((update_dir_ctx *)data)->result; + apr_status_t rv; + md_json_t *json; + const char *s; + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, req->pool, "directory lookup response: %d", res->status); + if (res->status == 503) { + md_result_printf(result, APR_EAGAIN, + "The ACME server at <%s> reports that Service is Unavailable (503). This " + "may happen during maintenance for short periods of time.", acme->url); + md_result_log(result, MD_LOG_INFO); + rv = result->status; + goto leave; + } + else if (res->status < 200 || res->status >= 300) { + md_result_printf(result, APR_EAGAIN, + "The ACME server at <%s> responded with HTTP status %d. This " + "is unusual. Please verify that the URL is correct and that you can indeed " + "make request from the server to it by other means, e.g. invoking curl/wget.", + acme->url, res->status); + rv = result->status; + goto leave; + } + + rv = md_json_read_http(&json, req->pool, res); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, req->pool, "reading JSON body"); + goto leave; + } + + if (md_log_is_level(acme->p, MD_LOG_TRACE2)) { + s = md_json_writep(json, req->pool, MD_JSON_FMT_INDENT); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, req->pool, + "response: %s", s ? s : ""); + } + + /* What have we got? */ + if ((s = md_json_dups(acme->p, json, "newAccount", NULL))) { + acme->api.v2.new_account = s; + acme->api.v2.new_order = md_json_dups(acme->p, json, "newOrder", NULL); + acme->api.v2.revoke_cert = md_json_dups(acme->p, json, "revokeCert", NULL); + acme->api.v2.key_change = md_json_dups(acme->p, json, "keyChange", NULL); + acme->api.v2.new_nonce = md_json_dups(acme->p, json, "newNonce", NULL); + /* RFC 8555 only requires "directory" and "newNonce" resources. + * mod_md uses "newAccount" and "newOrder" so check for them. + * But mod_md does not use the "revokeCert" or "keyChange" + * resources, so tolerate the absence of those keys. In the + * future if mod_md implements revocation or key rollover then + * the use of those features should be predicated on the + * server's advertised capabilities. */ + if (acme->api.v2.new_account + && acme->api.v2.new_order + && acme->api.v2.new_nonce) { + acme->version = MD_ACME_VERSION_2; + } + acme->ca_agreement = md_json_dups(acme->p, json, "meta", MD_KEY_TOS, NULL); + acme->eab_required = md_json_getb(json, "meta", MD_KEY_EAB_REQUIRED, NULL); + acme->new_nonce_fn = acmev2_new_nonce; + acme->req_init_fn = acmev2_req_init; + acme->post_new_account_fn = acmev2_POST_new_account; + } + else if ((s = md_json_dups(acme->p, json, "new-authz", NULL))) { + acme->api.v1.new_authz = s; + acme->api.v1.new_cert = md_json_dups(acme->p, json, "new-cert", NULL); + acme->api.v1.new_reg = md_json_dups(acme->p, json, "new-reg", NULL); + acme->api.v1.revoke_cert = md_json_dups(acme->p, json, "revoke-cert", NULL); + if (acme->api.v1.new_authz && acme->api.v1.new_cert + && acme->api.v1.new_reg && acme->api.v1.revoke_cert) { + acme->version = MD_ACME_VERSION_1; + } + acme->ca_agreement = md_json_dups(acme->p, json, "meta", "terms-of-service", NULL); + /* we init that far, but will not use the v1 api */ + } + + if (MD_ACME_VERSION_UNKNOWN == acme->version) { + md_result_printf(result, APR_EINVAL, + "Unable to understand ACME server response from <%s>. " + "Wrong ACME protocol version or link?", acme->url); + md_result_log(result, MD_LOG_WARNING); + rv = result->status; + } +leave: + return rv; +} + +apr_status_t md_acme_setup(md_acme_t *acme, md_result_t *result) +{ + apr_status_t rv; + update_dir_ctx ctx; + + assert(acme->url); + acme->version = MD_ACME_VERSION_UNKNOWN; + + if (!acme->http && APR_SUCCESS != (rv = md_http_create(&acme->http, acme->p, + acme->user_agent, acme->proxy_url))) { + return rv; + } + /* TODO: maybe this should be configurable. Let's take some reasonable + * defaults for now that protect our client */ + md_http_set_response_limit(acme->http, 1024*1024); + md_http_set_timeout_default(acme->http, apr_time_from_sec(10 * 60)); + md_http_set_connect_timeout_default(acme->http, apr_time_from_sec(30)); + md_http_set_stalling_default(acme->http, 10, apr_time_from_sec(30)); + md_http_set_ca_file(acme->http, acme->ca_file); + + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, "get directory from %s", acme->url); + + ctx.acme = acme; + ctx.result = result; + rv = md_http_GET_perform(acme->http, acme->url, NULL, update_directory, &ctx); + + if (APR_SUCCESS != rv && APR_SUCCESS == result->status) { + /* If the result reports no error, we never got a response from the server */ + md_result_printf(result, rv, + "Unsuccessful in contacting ACME server at <%s>. If this problem persists, " + "please check your network connectivity from your Apache server to the " + "ACME server. Also, older servers might have trouble verifying the certificates " + "of the ACME server. You can check if you are able to contact it manually via the " + "curl command. Sometimes, the ACME server might be down for maintenance, " + "so failing to contact it is not an immediate problem. Apache will " + "continue retrying this.", acme->url); + md_result_log(result, MD_LOG_WARNING); + } + return rv; +} + + diff --git a/modules/md/md_acme.h b/modules/md/md_acme.h index 2dcbee6..f28f2b6 100644 --- a/modules/md/md_acme.h +++ b/modules/md/md_acme.h @@ -26,14 +26,21 @@ struct md_json_t; struct md_pkey_t; struct md_t; struct md_acme_acct_t; -struct md_proto_t; +struct md_acmev2_acct_t; struct md_store_t; +struct md_result_t; #define MD_PROTO_ACME "ACME" #define MD_AUTHZ_CHA_HTTP_01 "http-01" #define MD_AUTHZ_CHA_SNI_01 "tls-sni-01" +#define MD_ACME_VERSION_UNKNOWN 0x0 +#define MD_ACME_VERSION_1 0x010000 +#define MD_ACME_VERSION_2 0x020000 + +#define MD_ACME_VERSION_MAJOR(i) (((i)&0xFF0000) >> 16) + typedef enum { MD_ACME_S_UNKNOWN, /* MD has not been analysed yet */ MD_ACME_S_REGISTERED, /* MD is registered at CA, but not more */ @@ -46,30 +53,92 @@ typedef enum { typedef struct md_acme_t md_acme_t; +typedef struct md_acme_req_t md_acme_req_t; +/** + * Request callback on a successful HTTP response (status 2xx). + */ +typedef apr_status_t md_acme_req_res_cb(md_acme_t *acme, + const struct md_http_response_t *res, void *baton); + +/** + * Request callback to initialize before sending. May be invoked more than once in + * case of retries. + */ +typedef apr_status_t md_acme_req_init_cb(md_acme_req_t *req, void *baton); + +/** + * Request callback on a successful response (HTTP response code 2xx) and content + * type matching application/.*json. + */ +typedef apr_status_t md_acme_req_json_cb(md_acme_t *acme, apr_pool_t *p, + const apr_table_t *headers, + struct md_json_t *jbody, void *baton); + +/** + * Request callback on detected errors. + */ +typedef apr_status_t md_acme_req_err_cb(md_acme_req_t *req, + const struct md_result_t *result, void *baton); + + +typedef apr_status_t md_acme_new_nonce_fn(md_acme_t *acme); +typedef apr_status_t md_acme_req_init_fn(md_acme_req_t *req, struct md_json_t *jpayload); + +typedef apr_status_t md_acme_post_fn(md_acme_t *acme, + md_acme_req_init_cb *on_init, + md_acme_req_json_cb *on_json, + md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, + void *baton); + struct md_acme_t { const char *url; /* directory url of the ACME service */ const char *sname; /* short name for the service, not necessarily unique */ apr_pool_t *p; const char *user_agent; const char *proxy_url; - struct md_acme_acct_t *acct; - struct md_pkey_t *acct_key; + const char *ca_file; - const char *new_authz; - const char *new_cert; - const char *new_reg; - const char *revoke_cert; + const char *acct_id; /* local storage id account was loaded from or NULL */ + struct md_acme_acct_t *acct; /* account at ACME server to use for requests */ + struct md_pkey_t *acct_key; /* private RSA key belonging to account */ + + int version; /* as detected from the server */ + union { + struct { /* obsolete */ + const char *new_authz; + const char *new_cert; + const char *new_reg; + const char *revoke_cert; + + } v1; + struct { + const char *new_account; + const char *new_order; + const char *key_change; + const char *revoke_cert; + const char *new_nonce; + } v2; + } api; + const char *ca_agreement; + const char *acct_name; + int eab_required; + + md_acme_new_nonce_fn *new_nonce_fn; + md_acme_req_init_fn *req_init_fn; + md_acme_post_fn *post_new_account_fn; struct md_http_t *http; const char *nonce; int max_retries; + struct md_result_t *last; /* result of last request */ }; /** * Global init, call once at start up. */ -apr_status_t md_acme_init(apr_pool_t *pool, const char *base_version); +apr_status_t md_acme_init(apr_pool_t *pool, const char *base_version, int init_ssl); /** * Create a new ACME server instance. If path is not NULL, will use that directory @@ -82,39 +151,68 @@ apr_status_t md_acme_init(apr_pool_t *pool, const char *base_version); * @param proxy_url optional url of a HTTP(S) proxy to use */ apr_status_t md_acme_create(md_acme_t **pacme, apr_pool_t *p, const char *url, - const char *proxy_url); + const char *proxy_url, const char *ca_file); /** * Contact the ACME server and retrieve its directory information. * * @param acme the ACME server to contact */ -apr_status_t md_acme_setup(md_acme_t *acme); +apr_status_t md_acme_setup(md_acme_t *acme, struct md_result_t *result); + +void md_acme_report_result(md_acme_t *acme, apr_status_t rv, struct md_result_t *result); /**************************************************************************************************/ /* account handling */ -#define MD_ACME_ACCT_STAGED "staged" +/** + * Clear any existing account data from acme instance. + */ +void md_acme_clear_acct(md_acme_t *acme); + +apr_status_t md_acme_POST_new_account(md_acme_t *acme, + md_acme_req_init_cb *on_init, + md_acme_req_json_cb *on_json, + md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, + void *baton); -apr_status_t md_acme_acct_load(struct md_acme_acct_t **pacct, struct md_pkey_t **ppkey, - struct md_store_t *store, md_store_group_t group, - const char *name, apr_pool_t *p); +/** + * Get the local name of the account currently used by the acme instance. + * Will be NULL if no account has been setup successfully. + */ +const char *md_acme_acct_id_get(md_acme_t *acme); +const char *md_acme_acct_url_get(md_acme_t *acme); /** * Specify the account to use by name in local store. On success, the account - * the "current" one used by the acme instance. + * is the "current" one used by the acme instance. + * @param acme the acme instance to set the account for + * @param store the store to load accounts from + * @param p pool for allocations + * @param acct_id name of the account to load */ apr_status_t md_acme_use_acct(md_acme_t *acme, struct md_store_t *store, apr_pool_t *p, const char *acct_id); -apr_status_t md_acme_use_acct_staged(md_acme_t *acme, struct md_store_t *store, - md_t *md, apr_pool_t *p); +/** + * Specify the account to use for a specific MD by name in local store. + * On success, the account is the "current" one used by the acme instance. + * @param acme the acme instance to set the account for + * @param store the store to load accounts from + * @param p pool for allocations + * @param acct_id name of the account to load + * @param md the MD the account shall be used for + */ +apr_status_t md_acme_use_acct_for_md(md_acme_t *acme, struct md_store_t *store, + apr_pool_t *p, const char *acct_id, + const md_t *md); /** * Get the local name of the account currently used by the acme instance. * Will be NULL if no account has been setup successfully. */ -const char *md_acme_get_acct_id(md_acme_t *acme); +const char *md_acme_acct_id_get(md_acme_t *acme); /** * Agree to the given Terms-of-Service url for the current account. @@ -136,78 +234,23 @@ apr_status_t md_acme_agree(md_acme_t *acme, apr_pool_t *p, const char *tos); apr_status_t md_acme_check_agreement(md_acme_t *acme, apr_pool_t *p, const char *agreement, const char **prequired); -/** - * Get the ToS agreement for current account. - */ -const char *md_acme_get_agreement(md_acme_t *acme); - - -/** - * Find an existing account in the local store. On APR_SUCCESS, the acme - * instance will have a current, validated account to use. - */ -apr_status_t md_acme_find_acct(md_acme_t *acme, struct md_store_t *store, apr_pool_t *p); - -/** - * Create a new account at the ACME server. The - * new account is the one used by the acme instance afterwards, on success. - */ -apr_status_t md_acme_create_acct(md_acme_t *acme, apr_pool_t *p, apr_array_header_t *contacts, - const char *agreement); - -apr_status_t md_acme_acct_save(struct md_store_t *store, apr_pool_t *p, md_acme_t *acme, - struct md_acme_acct_t *acct, struct md_pkey_t *acct_key); +apr_status_t md_acme_save_acct(md_acme_t *acme, apr_pool_t *p, struct md_store_t *store); -apr_status_t md_acme_save(md_acme_t *acme, struct md_store_t *store, apr_pool_t *p); - -apr_status_t md_acme_acct_save_staged(md_acme_t *acme, struct md_store_t *store, - md_t *md, apr_pool_t *p); - /** - * Delete the current account at the ACME server and remove it from store. + * Deactivate the current account at the ACME server.. */ -apr_status_t md_acme_delete_acct(md_acme_t *acme, struct md_store_t *store, apr_pool_t *p); - -/** - * Delete the account from the local store without contacting the ACME server. - */ -apr_status_t md_acme_unstore_acct(struct md_store_t *store, apr_pool_t *p, const char *acct_id); +apr_status_t md_acme_acct_deactivate(md_acme_t *acme, apr_pool_t *p); /**************************************************************************************************/ /* request handling */ -/** - * Request callback on a successful HTTP response (status 2xx). - */ -typedef apr_status_t md_acme_req_res_cb(md_acme_t *acme, - const struct md_http_response_t *res, void *baton); - -/** - * A request against an ACME server - */ -typedef struct md_acme_req_t md_acme_req_t; - -/** - * Request callback to initialize before sending. May be invoked more than once in - * case of retries. - */ -typedef apr_status_t md_acme_req_init_cb(md_acme_req_t *req, void *baton); - -/** - * Request callback on a successful response (HTTP response code 2xx) and content - * type matching application/.*json. - */ -typedef apr_status_t md_acme_req_json_cb(md_acme_t *acme, apr_pool_t *p, - const apr_table_t *headers, - struct md_json_t *jbody, void *baton); - struct md_acme_req_t { md_acme_t *acme; /* the ACME server to talk to */ apr_pool_t *p; /* pool for the request duration */ const char *url; /* url to POST the request to */ const char *method; /* HTTP method to use */ - apr_table_t *prot_hdrs; /* JWS headers needing protection (nonce) */ + struct md_json_t *prot_fields; /* JWS protected fields */ struct md_json_t *req_json; /* JSON to be POSTed in request body */ apr_table_t *resp_hdrs; /* HTTP response headers */ @@ -218,14 +261,19 @@ struct md_acme_req_t { md_acme_req_init_cb *on_init; /* callback to initialize the request before submit */ md_acme_req_json_cb *on_json; /* callback on successful JSON response */ md_acme_req_res_cb *on_res; /* callback on generic HTTP response */ + md_acme_req_err_cb *on_err; /* callback on encountered error */ int max_retries; /* how often this might be retried */ void *baton; /* userdata for callbacks */ + struct md_result_t *result; /* result of this request */ }; +apr_status_t md_acme_req_body_init(md_acme_req_t *req, struct md_json_t *payload); + apr_status_t md_acme_GET(md_acme_t *acme, const char *url, md_acme_req_init_cb *on_init, md_acme_req_json_cb *on_json, md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, void *baton); /** * Perform a POST against the ACME url. If a on_json callback is given and @@ -245,14 +293,9 @@ apr_status_t md_acme_POST(md_acme_t *acme, const char *url, md_acme_req_init_cb *on_init, md_acme_req_json_cb *on_json, md_acme_req_res_cb *on_res, + md_acme_req_err_cb *on_err, void *baton); -apr_status_t md_acme_GET(md_acme_t *acme, const char *url, - md_acme_req_init_cb *on_init, - md_acme_req_json_cb *on_json, - md_acme_req_res_cb *on_res, - void *baton); - /** * Retrieve a JSON resource from the ACME server */ @@ -264,4 +307,11 @@ apr_status_t md_acme_req_body_init(md_acme_req_t *req, struct md_json_t *jpayloa apr_status_t md_acme_protos_add(struct apr_hash_t *protos, apr_pool_t *p); +/** + * Return != 0 iff the given problem identifier is an ACME error string + * indicating something is wrong with the input values, e.g. from our + * configuration. + */ +int md_acme_problem_is_input_related(const char *problem); + #endif /* md_acme_h */ diff --git a/modules/md/md_acme_acct.c b/modules/md/md_acme_acct.c index c4a2b5f..f3e043e 100644 --- a/modules/md/md_acme_acct.c +++ b/modules/md/md_acme_acct.c @@ -30,6 +30,7 @@ #include "md_json.h" #include "md_jws.h" #include "md_log.h" +#include "md_result.h" #include "md_store.h" #include "md_util.h" #include "md_version.h" @@ -38,15 +39,12 @@ #include "md_acme_acct.h" static apr_status_t acct_make(md_acme_acct_t **pacct, apr_pool_t *p, - const char *ca_url, const char *id, apr_array_header_t *contacts) + const char *ca_url, apr_array_header_t *contacts) { md_acme_acct_t *acct; acct = apr_pcalloc(p, sizeof(*acct)); - - acct->id = id? apr_pstrdup(p, id) : NULL; acct->ca_url = ca_url; - if (!contacts || apr_is_empty_array(contacts)) { acct->contacts = apr_array_make(p, 5, sizeof(const char *)); } @@ -72,87 +70,118 @@ static const char *mk_acct_pattern(apr_pool_t *p, md_acme_t *acme) /**************************************************************************************************/ /* json load/save */ -static md_json_t *acct_to_json(md_acme_acct_t *acct, apr_pool_t *p) +static md_acme_acct_st acct_st_from_str(const char *s) +{ + if (s) { + if (!strcmp("valid", s)) { + return MD_ACME_ACCT_ST_VALID; + } + else if (!strcmp("deactivated", s)) { + return MD_ACME_ACCT_ST_DEACTIVATED; + } + else if (!strcmp("revoked", s)) { + return MD_ACME_ACCT_ST_REVOKED; + } + } + return MD_ACME_ACCT_ST_UNKNOWN; +} + +md_json_t *md_acme_acct_to_json(md_acme_acct_t *acct, apr_pool_t *p) { md_json_t *jacct; + const char *s; assert(acct); jacct = md_json_create(p); - md_json_sets(acct->id, jacct, MD_KEY_ID, NULL); - md_json_setb(acct->disabled, jacct, MD_KEY_DISABLED, NULL); - md_json_sets(acct->url, jacct, MD_KEY_URL, NULL); - md_json_sets(acct->ca_url, jacct, MD_KEY_CA_URL, NULL); - md_json_setj(acct->registration, jacct, MD_KEY_REGISTRATION, NULL); - if (acct->agreement) { - md_json_sets(acct->agreement, jacct, MD_KEY_AGREEMENT, NULL); - } - + switch (acct->status) { + case MD_ACME_ACCT_ST_VALID: + s = "valid"; + break; + case MD_ACME_ACCT_ST_DEACTIVATED: + s = "deactivated"; + break; + case MD_ACME_ACCT_ST_REVOKED: + s = "revoked"; + break; + default: + s = NULL; + break; + } + if (s) md_json_sets(s, jacct, MD_KEY_STATUS, NULL); + if (acct->url) md_json_sets(acct->url, jacct, MD_KEY_URL, NULL); + if (acct->ca_url) md_json_sets(acct->ca_url, jacct, MD_KEY_CA_URL, NULL); + if (acct->contacts) md_json_setsa(acct->contacts, jacct, MD_KEY_CONTACT, NULL); + if (acct->registration) md_json_setj(acct->registration, jacct, MD_KEY_REGISTRATION, NULL); + if (acct->agreement) md_json_sets(acct->agreement, jacct, MD_KEY_AGREEMENT, NULL); + if (acct->orders) md_json_sets(acct->orders, jacct, MD_KEY_ORDERS, NULL); + if (acct->eab_kid) md_json_sets(acct->eab_kid, jacct, MD_KEY_EAB, MD_KEY_KID, NULL); + if (acct->eab_hmac) md_json_sets(acct->eab_hmac, jacct, MD_KEY_EAB, MD_KEY_HMAC, NULL); + return jacct; } -static apr_status_t acct_from_json(md_acme_acct_t **pacct, md_json_t *json, apr_pool_t *p) +apr_status_t md_acme_acct_from_json(md_acme_acct_t **pacct, md_json_t *json, apr_pool_t *p) { apr_status_t rv = APR_EINVAL; md_acme_acct_t *acct; - int disabled; - const char *ca_url, *url, *id; + md_acme_acct_st status = MD_ACME_ACCT_ST_UNKNOWN; + const char *ca_url, *url; apr_array_header_t *contacts; - id = md_json_gets(json, MD_KEY_ID, NULL); - disabled = md_json_getb(json, MD_KEY_DISABLED, NULL); - ca_url = md_json_gets(json, MD_KEY_CA_URL, NULL); - if (!ca_url) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "account has no CA url: %s", id); - goto out; + if (md_json_has_key(json, MD_KEY_STATUS, NULL)) { + status = acct_st_from_str(md_json_gets(json, MD_KEY_STATUS, NULL)); } - + url = md_json_gets(json, MD_KEY_URL, NULL); if (!url) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "account has no url: %s", id); - goto out; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "account has no url"); + goto leave; } + ca_url = md_json_gets(json, MD_KEY_CA_URL, NULL); + if (!ca_url) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "account has no CA url: %s", url); + goto leave; + } + contacts = apr_array_make(p, 5, sizeof(const char *)); - md_json_getsa(contacts, json, MD_KEY_REGISTRATION, MD_KEY_CONTACT, NULL); - rv = acct_make(&acct, p, ca_url, id, contacts); - if (APR_SUCCESS == rv) { - acct->disabled = disabled; - acct->url = url; + if (md_json_has_key(json, MD_KEY_CONTACT, NULL)) { + md_json_getsa(contacts, json, MD_KEY_CONTACT, NULL); + } + else { + md_json_getsa(contacts, json, MD_KEY_REGISTRATION, MD_KEY_CONTACT, NULL); + } + rv = acct_make(&acct, p, ca_url, contacts); + if (APR_SUCCESS != rv) goto leave; + + acct->status = status; + acct->url = url; + acct->agreement = md_json_gets(json, MD_KEY_AGREEMENT, NULL); + if (!acct->agreement) { + /* backward compatible check */ acct->agreement = md_json_gets(json, "terms-of-service", NULL); } + acct->orders = md_json_gets(json, MD_KEY_ORDERS, NULL); + if (md_json_has_key(json, MD_KEY_EAB, MD_KEY_KID, NULL) + && md_json_has_key(json, MD_KEY_EAB, MD_KEY_HMAC, NULL)) { + acct->eab_kid = md_json_gets(json, MD_KEY_EAB, MD_KEY_KID, NULL); + acct->eab_hmac = md_json_gets(json, MD_KEY_EAB, MD_KEY_HMAC, NULL); + } -out: +leave: *pacct = (APR_SUCCESS == rv)? acct : NULL; return rv; } -apr_status_t md_acme_acct_save_staged(md_acme_t *acme, md_store_t *store, md_t *md, apr_pool_t *p) -{ - md_acme_acct_t *acct = acme->acct; - md_json_t *jacct; - apr_status_t rv; - - jacct = acct_to_json(acct, p); - - rv = md_store_save(store, p, MD_SG_STAGING, md->name, MD_FN_ACCOUNT, MD_SV_JSON, jacct, 0); - if (APR_SUCCESS == rv) { - rv = md_store_save(store, p, MD_SG_STAGING, md->name, MD_FN_ACCT_KEY, - MD_SV_PKEY, acme->acct_key, 0); - } - return rv; -} - apr_status_t md_acme_acct_save(md_store_t *store, apr_pool_t *p, md_acme_t *acme, - md_acme_acct_t *acct, md_pkey_t *acct_key) + const char **pid, md_acme_acct_t *acct, md_pkey_t *acct_key) { md_json_t *jacct; apr_status_t rv; int i; - const char *id; - - jacct = acct_to_json(acct, p); - id = acct->id; + const char *id = pid? *pid : NULL; + jacct = md_acme_acct_to_json(acct, p); if (id) { rv = md_store_save(store, p, MD_SG_ACCOUNTS, id, MD_FN_ACCOUNT, MD_SV_JSON, jacct, 0); } @@ -160,23 +189,16 @@ apr_status_t md_acme_acct_save(md_store_t *store, apr_pool_t *p, md_acme_t *acme rv = APR_EAGAIN; for (i = 0; i < 1000 && APR_SUCCESS != rv; ++i) { id = mk_acct_id(p, acme, i); - md_json_sets(id, jacct, MD_KEY_ID, NULL); rv = md_store_save(store, p, MD_SG_ACCOUNTS, id, MD_FN_ACCOUNT, MD_SV_JSON, jacct, 1); } - } if (APR_SUCCESS == rv) { - acct->id = id; + if (pid) *pid = id; rv = md_store_save(store, p, MD_SG_ACCOUNTS, id, MD_FN_ACCT_KEY, MD_SV_PKEY, acct_key, 0); } return rv; } -apr_status_t md_acme_save(md_acme_t *acme, md_store_t *store, apr_pool_t *p) -{ - return md_acme_acct_save(store, p, acme, acme->acct, acme->acct_key); -} - apr_status_t md_acme_acct_load(md_acme_acct_t **pacct, md_pkey_t **ppkey, md_store_t *store, md_store_group_t group, const char *name, apr_pool_t *p) @@ -193,11 +215,11 @@ apr_status_t md_acme_acct_load(md_acme_acct_t **pacct, md_pkey_t **ppkey, goto out; } - rv = acct_from_json(pacct, json, p); + rv = md_acme_acct_from_json(pacct, json, p); if (APR_SUCCESS == rv) { rv = md_store_load(store, group, name, MD_FN_ACCT_KEY, MD_SV_PKEY, (void**)ppkey, p); if (APR_SUCCESS != rv) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "loading key: %s", name); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "loading key: %s", name); goto out; } } @@ -212,9 +234,36 @@ out: /**************************************************************************************************/ /* Lookup */ +int md_acme_acct_matches_url(md_acme_acct_t *acct, const char *url) +{ + /* The ACME url must match exactly */ + if (!url || !acct->ca_url || strcmp(acct->ca_url, url)) return 0; + return 1; +} + +int md_acme_acct_matches_md(md_acme_acct_t *acct, const md_t *md) +{ + if (!md_acme_acct_matches_url(acct, md->ca_effective)) return 0; + /* if eab values are not mentioned, we match an account regardless + * if it was registered with eab or not */ + if (!md->ca_eab_kid || !md->ca_eab_hmac) { + /* No eab only acceptable when no eab is asked for. + * This prevents someone that has no external account binding + * to re-use an account from another MDomain that was created + * with a binding. */ + return !acct->eab_kid || !acct->eab_hmac; + } + /* But of eab is asked for, we need an acct that matches exactly. + * When someone configures a new EAB and we need + * to created a new account for it. */ + if (!acct->eab_kid || !acct->eab_hmac) return 0; + return !strcmp(acct->eab_kid, md->ca_eab_kid) + && !strcmp(acct->eab_hmac, md->ca_eab_hmac); +} + typedef struct { apr_pool_t *p; - md_acme_t *acme; + const md_t *md; const char *id; } find_ctx; @@ -222,232 +271,227 @@ static int find_acct(void *baton, const char *name, const char *aspect, md_store_vtype_t vtype, void *value, apr_pool_t *ptemp) { find_ctx *ctx = baton; - int disabled; - const char *ca_url, *id; - + md_acme_acct_t *acct; + apr_status_t rv; + (void)aspect; (void)ptemp; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ctx->p, "account candidate %s/%s", name, aspect); if (MD_SV_JSON == vtype) { - md_json_t *json = value; - - id = md_json_gets(json, MD_KEY_ID, NULL); - disabled = md_json_getb(json, MD_KEY_DISABLED, NULL); - ca_url = md_json_gets(json, MD_KEY_CA_URL, NULL); - - if (!disabled && ca_url && !strcmp(ctx->acme->url, ca_url)) { + rv = md_acme_acct_from_json(&acct, (md_json_t*)value, ptemp); + if (APR_SUCCESS != rv) goto cleanup; + + if (MD_ACME_ACCT_ST_VALID == acct->status + && (!ctx->md || md_acme_acct_matches_md(acct, ctx->md))) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ctx->p, - "found account %s for %s: %s, disabled=%d, ca-url=%s", - name, ctx->acme->url, id, disabled, ca_url); - ctx->id = id; + "found account %s for %s: %s, status=%d", + acct->id, ctx->md->ca_effective, aspect, acct->status); + ctx->id = apr_pstrdup(ctx->p, name); return 0; } } +cleanup: return 1; } -static apr_status_t acct_find(md_acme_acct_t **pacct, md_pkey_t **ppkey, - md_store_t *store, md_acme_t *acme, apr_pool_t *p) +static apr_status_t acct_find(const char **pid, md_acme_acct_t **pacct, md_pkey_t **ppkey, + md_store_t *store, md_store_group_t group, + const char *name_pattern, + const md_t *md, apr_pool_t *p) { apr_status_t rv; find_ctx ctx; - + + memset(&ctx, 0, sizeof(ctx)); ctx.p = p; - ctx.acme = acme; - ctx.id = NULL; - - rv = md_store_iter(find_acct, &ctx, store, p, MD_SG_ACCOUNTS, mk_acct_pattern(p, acme), - MD_FN_ACCOUNT, MD_SV_JSON); + ctx.md = md; + + rv = md_store_iter(find_acct, &ctx, store, p, group, name_pattern, MD_FN_ACCOUNT, MD_SV_JSON); if (ctx.id) { - rv = md_acme_acct_load(pacct, ppkey, store, MD_SG_ACCOUNTS, ctx.id, p); + *pid = ctx.id; + rv = md_acme_acct_load(pacct, ppkey, store, group, ctx.id, p); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "acct_find: got account %s", ctx.id); } else { *pacct = NULL; rv = APR_ENOENT; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, p, "acct_find: none found"); } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "acct_find %s", (*pacct)? (*pacct)->id : "NULL"); return rv; } -/**************************************************************************************************/ -/* Register a new account */ - -typedef struct { - md_acme_t *acme; - apr_pool_t *p; -} acct_ctx_t; - -static apr_status_t on_init_acct_new(md_acme_req_t *req, void *baton) +static apr_status_t acct_find_and_verify(md_store_t *store, md_store_group_t group, + const char *name_pattern, + md_acme_t *acme, const md_t *md, + apr_pool_t *p) { - acct_ctx_t *ctx = baton; - md_json_t *jpayload; + md_acme_acct_t *acct; + md_pkey_t *pkey; + const char *id; + apr_status_t rv; - jpayload = md_json_create(req->p); - md_json_sets("new-reg", jpayload, MD_KEY_RESOURCE, NULL); - md_json_setsa(ctx->acme->acct->contacts, jpayload, MD_KEY_CONTACT, NULL); - if (ctx->acme->acct->agreement) { - md_json_sets(ctx->acme->acct->agreement, jpayload, MD_KEY_AGREEMENT, NULL); - } - - return md_acme_req_body_init(req, jpayload); -} + rv = acct_find(&id, &acct, &pkey, store, group, name_pattern, md, p); + if (APR_SUCCESS == rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, p, "acct_find_and_verify: found %s", + id); + acme->acct_id = (MD_SG_STAGING == group)? NULL : id; + acme->acct = acct; + acme->acct_key = pkey; + rv = md_acme_acct_validate(acme, (MD_SG_STAGING == group)? NULL : store, p); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, p, "acct_find_and_verify: verified %s", + id); -static apr_status_t acct_upd(md_acme_t *acme, apr_pool_t *p, - const apr_table_t *hdrs, md_json_t *body, void *baton) -{ - acct_ctx_t *ctx = baton; - apr_status_t rv = APR_SUCCESS; - md_acme_acct_t *acct = acme->acct; - - if (!acct->url) { - const char *location = apr_table_get(hdrs, "location"); - if (!location) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, APR_EINVAL, p, "new acct without location"); - return APR_EINVAL; - } - acct->url = apr_pstrdup(ctx->p, location); - } - if (!acct->tos_required) { - acct->tos_required = md_link_find_relation(hdrs, ctx->p, "terms-of-service"); - if (acct->tos_required) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, - "server requires agreement to <%s>", acct->tos_required); + if (APR_SUCCESS != rv) { + acme->acct_id = NULL; + acme->acct = NULL; + acme->acct_key = NULL; + if (APR_STATUS_IS_ENOENT(rv)) { + /* verification failed and account has been disabled. + Indicate to caller that he may try again. */ + rv = APR_EAGAIN; + } } } - - apr_array_clear(acct->contacts); - md_json_getsa(acct->contacts, body, MD_KEY_CONTACT, NULL); - acct->registration = md_json_clone(ctx->p, body); - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "updated acct %s", acct->url); return rv; } -static apr_status_t acct_register(md_acme_t *acme, apr_pool_t *p, - apr_array_header_t *contacts, const char *agreement) +apr_status_t md_acme_find_acct_for_md(md_acme_t *acme, md_store_t *store, const md_t *md) { apr_status_t rv; - md_pkey_t *pkey; - const char *err = NULL, *uri; - md_pkey_spec_t spec; - int i; - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "create new account"); - if (agreement) { - if (APR_SUCCESS != (rv = md_util_abs_uri_check(acme->p, agreement, &err))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, - "invalid agreement uri (%s): %s", err, agreement); - goto out; - } + while (APR_EAGAIN == (rv = acct_find_and_verify(store, MD_SG_ACCOUNTS, + mk_acct_pattern(acme->p, acme), + acme, md, acme->p))) { + /* nop */ } - for (i = 0; i < contacts->nelts; ++i) { - uri = APR_ARRAY_IDX(contacts, i, const char *); - if (APR_SUCCESS != (rv = md_util_abs_uri_check(acme->p, uri, &err))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, - "invalid contact uri (%s): %s", err, uri); - goto out; + + if (APR_STATUS_IS_ENOENT(rv)) { + /* No suitable account found in MD_SG_ACCOUNTS. Maybe a new account + * can already be found in MD_SG_STAGING? */ + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, + "no account found, looking in STAGING"); + rv = acct_find_and_verify(store, MD_SG_STAGING, "*", acme, md, acme->p); + if (APR_EAGAIN == rv) { + rv = APR_ENOENT; } } - - spec.type = MD_PKEY_TYPE_RSA; - spec.params.rsa.bits = MD_ACME_ACCT_PKEY_BITS; - - if (APR_SUCCESS == (rv = md_pkey_gen(&pkey, acme->p, &spec)) - && APR_SUCCESS == (rv = acct_make(&acme->acct, p, acme->url, NULL, contacts))) { - acct_ctx_t ctx; + return rv; +} - acme->acct_key = pkey; - if (agreement) { - acme->acct->agreement = agreement; - } +apr_status_t md_acme_acct_id_for_md(const char **pid, md_store_t *store, + md_store_group_t group, const md_t *md, + apr_pool_t *p) +{ + apr_status_t rv; + find_ctx ctx; - ctx.acme = acme; - ctx.p = p; - rv = md_acme_POST(acme, acme->new_reg, on_init_acct_new, acct_upd, NULL, &ctx); - if (APR_SUCCESS == rv) { - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, p, - "registered new account %s", acme->acct->url); - } - } + memset(&ctx, 0, sizeof(ctx)); + ctx.p = p; + ctx.md = md; -out: - if (APR_SUCCESS != rv && acme->acct) { - acme->acct = NULL; + rv = md_store_iter(find_acct, &ctx, store, p, group, "*", MD_FN_ACCOUNT, MD_SV_JSON); + if (ctx.id) { + *pid = ctx.id; + rv = APR_SUCCESS; } + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "acct_id_for_md %s -> %s", md->name, *pid); return rv; } /**************************************************************************************************/ -/* acct validation */ +/* acct operation context */ +typedef struct { + md_acme_t *acme; + apr_pool_t *p; + const char *agreement; + const char *eab_kid; + const char *eab_hmac; +} acct_ctx_t; -static apr_status_t on_init_acct_valid(md_acme_req_t *req, void *baton) -{ - md_json_t *jpayload; +/**************************************************************************************************/ +/* acct update */ +static apr_status_t on_init_acct_upd(md_acme_req_t *req, void *baton) +{ (void)baton; - jpayload = md_json_create(req->p); - md_json_sets("reg", jpayload, MD_KEY_RESOURCE, NULL); - - return md_acme_req_body_init(req, jpayload); + return md_acme_req_body_init(req, NULL); } -static apr_status_t acct_valid(md_acme_t *acme, apr_pool_t *p, const apr_table_t *hdrs, - md_json_t *body, void *baton) +static apr_status_t acct_upd(md_acme_t *acme, apr_pool_t *p, + const apr_table_t *hdrs, md_json_t *body, void *baton) { - md_acme_acct_t *acct = acme->acct; + acct_ctx_t *ctx = baton; apr_status_t rv = APR_SUCCESS; - const char *body_str; - const char *tos_required; + md_acme_acct_t *acct = acme->acct; + + if (md_log_is_level(p, MD_LOG_TRACE2)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, acme->p, "acct update response: %s", + md_json_writep(body, p, MD_JSON_FMT_COMPACT)); + } + + if (!acct->url) { + const char *location = apr_table_get(hdrs, "location"); + if (!location) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, APR_EINVAL, p, "new acct without location"); + return APR_EINVAL; + } + acct->url = apr_pstrdup(ctx->p, location); + } - (void)p; - (void)baton; apr_array_clear(acct->contacts); - md_json_getsa(acct->contacts, body, MD_KEY_CONTACT, NULL); - acct->registration = md_json_clone(acme->p, body); - - body_str = md_json_writep(body, acme->p, MD_JSON_FMT_INDENT); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, acme->p, "validate acct %s: %s", - acct->url, body_str ? body_str : ""); - - acct->agreement = md_json_gets(acct->registration, MD_KEY_AGREEMENT, NULL); - tos_required = md_link_find_relation(hdrs, acme->p, "terms-of-service"); - - if (tos_required) { - if (!acct->agreement || strcmp(tos_required, acct->agreement)) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, acme->p, - "needs to agree to terms-of-service '%s', " - "has already agreed to '%s'", - tos_required, acct->agreement); - } - acct->tos_required = tos_required; + md_json_dupsa(acct->contacts, acme->p, body, MD_KEY_CONTACT, NULL); + if (md_json_has_key(body, MD_KEY_STATUS, NULL)) { + acct->status = acct_st_from_str(md_json_gets(body, MD_KEY_STATUS, NULL)); + } + if (md_json_has_key(body, MD_KEY_AGREEMENT, NULL)) { + acct->agreement = md_json_dups(acme->p, body, MD_KEY_AGREEMENT, NULL); } + if (md_json_has_key(body, MD_KEY_ORDERS, NULL)) { + acct->orders = md_json_dups(acme->p, body, MD_KEY_ORDERS, NULL); + } + if (ctx->eab_kid && ctx->eab_hmac) { + acct->eab_kid = ctx->eab_kid; + acct->eab_hmac = ctx->eab_hmac; + } + acct->registration = md_json_clone(ctx->p, body); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "updated acct %s", acct->url); return rv; } -static apr_status_t md_acme_validate_acct(md_acme_t *acme) +apr_status_t md_acme_acct_update(md_acme_t *acme) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, "acct validation"); + acct_ctx_t ctx; + + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, "acct update"); if (!acme->acct) { return APR_EINVAL; } - return md_acme_POST(acme, acme->acct->url, on_init_acct_valid, acct_valid, NULL, NULL); + memset(&ctx, 0, sizeof(ctx)); + ctx.acme = acme; + ctx.p = acme->p; + return md_acme_POST(acme, acme->acct->url, on_init_acct_upd, acct_upd, NULL, NULL, &ctx); } -/**************************************************************************************************/ -/* account setup */ - -static apr_status_t acct_validate(md_acme_t *acme, md_store_t *store, apr_pool_t *p) +apr_status_t md_acme_acct_validate(md_acme_t *acme, md_store_t *store, apr_pool_t *p) { apr_status_t rv; - if (APR_SUCCESS != (rv = md_acme_validate_acct(acme))) { - if (acme->acct && (APR_ENOENT == rv || APR_EACCES == rv)) { - if (!acme->acct->disabled) { - acme->acct->disabled = 1; + if (APR_SUCCESS != (rv = md_acme_acct_update(acme))) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, acme->p, + "acct update failed for %s", acme->acct->url); + if (APR_EINVAL == rv && (acme->acct->agreement || !acme->ca_agreement)) { + /* Sadly, some proprietary ACME servers choke on empty POSTs + * on accounts. Try a faked ToS agreement. */ + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, acme->p, + "trying acct update via ToS agreement"); + rv = md_acme_agree(acme, p, "accepted"); + } + if (acme->acct && (APR_ENOENT == rv || APR_EACCES == rv || APR_EINVAL == rv)) { + if (MD_ACME_ACCT_ST_VALID == acme->acct->status) { + acme->acct->status = MD_ACME_ACCT_ST_UNKNOWN; if (store) { - md_acme_save(acme, store, p); + md_acme_acct_save(store, p, acme, &acme->acct_id, acme->acct, acme->acct_key); } } acme->acct = NULL; @@ -458,133 +502,187 @@ static apr_status_t acct_validate(md_acme_t *acme, md_store_t *store, apr_pool_t return rv; } -apr_status_t md_acme_use_acct(md_acme_t *acme, md_store_t *store, - apr_pool_t *p, const char *acct_id) +/**************************************************************************************************/ +/* Register a new account */ + +static apr_status_t get_eab(md_json_t **peab, md_acme_req_t *req, const char *kid, + const char *hmac64, md_pkey_t *account_key, + const char *url) { - md_acme_acct_t *acct; - md_pkey_t *pkey; + md_json_t *eab, *prot_fields, *jwk; + md_data_t payload, hmac_key; apr_status_t rv; - - if (APR_SUCCESS == (rv = md_acme_acct_load(&acct, &pkey, - store, MD_SG_ACCOUNTS, acct_id, acme->p))) { - if (acct->ca_url && !strcmp(acct->ca_url, acme->url)) { - acme->acct = acct; - acme->acct_key = pkey; - rv = acct_validate(acme, store, p); - } - else { - /* account is from a nother server or, more likely, from another - * protocol endpoint on the same server */ - rv = APR_ENOENT; - } + + prot_fields = md_json_create(req->p); + md_json_sets(url, prot_fields, "url", NULL); + md_json_sets(kid, prot_fields, "kid", NULL); + + rv = md_jws_get_jwk(&jwk, req->p, account_key); + if (APR_SUCCESS != rv) goto cleanup; + + md_data_null(&payload); + payload.data = md_json_writep(jwk, req->p, MD_JSON_FMT_COMPACT); + if (!payload.data) { + rv = APR_EINVAL; + goto cleanup; } - return rv; -} + payload.len = strlen(payload.data); -apr_status_t md_acme_use_acct_staged(md_acme_t *acme, struct md_store_t *store, - md_t *md, apr_pool_t *p) -{ - md_acme_acct_t *acct; - md_pkey_t *pkey; - apr_status_t rv; - - if (APR_SUCCESS == (rv = md_acme_acct_load(&acct, &pkey, - store, MD_SG_STAGING, md->name, acme->p))) { - acme->acct = acct; - acme->acct_key = pkey; - rv = acct_validate(acme, NULL, p); + md_util_base64url_decode(&hmac_key, hmac64, req->p); + if (!hmac_key.len) { + rv = APR_EINVAL; + md_result_problem_set(req->result, rv, "apache:eab-hmac-invalid", + "external account binding HMAC value is not valid base64", NULL); + goto cleanup; } + + rv = md_jws_hmac(&eab, req->p, &payload, prot_fields, &hmac_key); + if (APR_SUCCESS != rv) { + md_result_problem_set(req->result, rv, "apache:eab-hmac-fail", + "external account binding MAC could not be computed", NULL); + } + +cleanup: + *peab = (APR_SUCCESS == rv)? eab : NULL; return rv; } -const char *md_acme_get_acct_id(md_acme_t *acme) +static apr_status_t on_init_acct_new(md_acme_req_t *req, void *baton) { - return acme->acct? acme->acct->id : NULL; -} + acct_ctx_t *ctx = baton; + md_json_t *jpayload, *jeab; + apr_status_t rv; -const char *md_acme_get_agreement(md_acme_t *acme) -{ - return acme->acct? acme->acct->agreement : NULL; -} + jpayload = md_json_create(req->p); + md_json_setsa(ctx->acme->acct->contacts, jpayload, MD_KEY_CONTACT, NULL); + if (ctx->agreement) { + md_json_setb(1, jpayload, "termsOfServiceAgreed", NULL); + } + if (ctx->eab_kid && ctx->eab_hmac) { + rv = get_eab(&jeab, req, ctx->eab_kid, ctx->eab_hmac, + req->acme->acct_key, req->url); + if (APR_SUCCESS != rv) goto cleanup; + md_json_setj(jeab, jpayload, "externalAccountBinding", NULL); + } + rv = md_acme_req_body_init(req, jpayload); + +cleanup: + return rv; +} -apr_status_t md_acme_find_acct(md_acme_t *acme, md_store_t *store, apr_pool_t *p) +apr_status_t md_acme_acct_register(md_acme_t *acme, md_store_t *store, + const md_t *md, apr_pool_t *p) { - md_acme_acct_t *acct; - md_pkey_t *pkey; apr_status_t rv; + md_pkey_t *pkey; + const char *err = NULL, *uri; + md_pkey_spec_t spec; + int i; + acct_ctx_t ctx; - while (APR_SUCCESS == acct_find(&acct, &pkey, store, acme, acme->p)) { - acme->acct = acct; - acme->acct_key = pkey; - rv = acct_validate(acme, store, p); - - if (APR_SUCCESS == rv) { - return rv; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "create new account"); + + memset(&ctx, 0, sizeof(ctx)); + ctx.acme = acme; + ctx.p = p; + /* The agreement URL is submitted when the ACME server announces Terms-of-Service + * in its directory meta data. The magic value "accepted" will always use the + * advertised URL. */ + ctx.agreement = NULL; + if (acme->ca_agreement && md->ca_agreement) { + ctx.agreement = !strcmp("accepted", md->ca_agreement)? + acme->ca_agreement : md->ca_agreement; + } + + if (ctx.agreement) { + if (APR_SUCCESS != (rv = md_util_abs_uri_check(acme->p, ctx.agreement, &err))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "invalid agreement uri (%s): %s", err, ctx.agreement); + goto out; } - else { - acme->acct = NULL; - acme->acct_key = NULL; - if (!APR_STATUS_IS_ENOENT(rv)) { - /* encountered error with server */ - return rv; + } + ctx.eab_kid = md->ca_eab_kid; + ctx.eab_hmac = md->ca_eab_hmac; + + for (i = 0; i < md->contacts->nelts; ++i) { + uri = APR_ARRAY_IDX(md->contacts, i, const char *); + if (APR_SUCCESS != (rv = md_util_abs_uri_check(acme->p, uri, &err))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "invalid contact uri (%s): %s", err, uri); + goto out; + } + } + + /* If there is no key selected yet, try to find an existing one for the same host. + * Let's Encrypt identifies accounts by their key for their ACMEv1 and v2 services. + * Although the account appears on both services with different urls, it is + * internally the same one. + * I think this is beneficial if someone migrates from ACMEv1 to v2 and not a leak + * of identifying information. + */ + if (!acme->acct_key) { + find_ctx fctx; + + memset(&fctx, 0, sizeof(fctx)); + fctx.p = p; + fctx.md = md; + + md_store_iter(find_acct, &fctx, store, p, MD_SG_ACCOUNTS, + mk_acct_pattern(p, acme), MD_FN_ACCOUNT, MD_SV_JSON); + if (fctx.id) { + rv = md_store_load(store, MD_SG_ACCOUNTS, fctx.id, MD_FN_ACCT_KEY, MD_SV_PKEY, + (void**)&acme->acct_key, p); + if (APR_SUCCESS == rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "reusing key from account %s", fctx.id); + } + else { + acme->acct_key = NULL; } } } - return APR_ENOENT; -} - -apr_status_t md_acme_create_acct(md_acme_t *acme, apr_pool_t *p, apr_array_header_t *contacts, - const char *agreement) -{ - return acct_register(acme, p, contacts, agreement); -} - -/**************************************************************************************************/ -/* Delete the account */ - -apr_status_t md_acme_unstore_acct(md_store_t *store, apr_pool_t *p, const char *acct_id) -{ - apr_status_t rv = APR_SUCCESS; - rv = md_store_remove(store, MD_SG_ACCOUNTS, acct_id, MD_FN_ACCOUNT, p, 1); + /* If we still have no key, generate a new one */ + if (!acme->acct_key) { + spec.type = MD_PKEY_TYPE_RSA; + spec.params.rsa.bits = MD_ACME_ACCT_PKEY_BITS; + + if (APR_SUCCESS != (rv = md_pkey_gen(&pkey, acme->p, &spec))) goto out; + acme->acct_key = pkey; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "created new account key"); + } + + if (APR_SUCCESS != (rv = acct_make(&acme->acct, p, acme->url, md->contacts))) goto out; + rv = md_acme_POST_new_account(acme, on_init_acct_new, acct_upd, NULL, NULL, &ctx); if (APR_SUCCESS == rv) { - md_store_remove(store, MD_SG_ACCOUNTS, acct_id, MD_FN_ACCT_KEY, p, 1); + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, p, + "registered new account %s", acme->acct->url); + } + +out: + if (APR_SUCCESS != rv && acme->acct) { + acme->acct = NULL; } return rv; } +/**************************************************************************************************/ +/* Deactivate the account */ + static apr_status_t on_init_acct_del(md_acme_req_t *req, void *baton) { md_json_t *jpayload; (void)baton; jpayload = md_json_create(req->p); - md_json_sets("reg", jpayload, MD_KEY_RESOURCE, NULL); - md_json_setb(1, jpayload, "delete", NULL); - + md_json_sets("deactivated", jpayload, MD_KEY_STATUS, NULL); return md_acme_req_body_init(req, jpayload); } -static apr_status_t acct_del(md_acme_t *acme, apr_pool_t *p, - const apr_table_t *hdrs, md_json_t *body, void *baton) -{ - md_store_t *store = baton; - apr_status_t rv = APR_SUCCESS; - - (void)hdrs; - (void)body; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, p, "deleted account %s", acme->acct->url); - if (store) { - rv = md_acme_unstore_acct(store, p, acme->acct->id); - acme->acct = NULL; - acme->acct_key = NULL; - } - return rv; -} - -apr_status_t md_acme_delete_acct(md_acme_t *acme, md_store_t *store, apr_pool_t *p) +apr_status_t md_acme_acct_deactivate(md_acme_t *acme, apr_pool_t *p) { md_acme_acct_t *acct = acme->acct; + acct_ctx_t ctx; (void)p; if (!acct) { @@ -592,7 +690,10 @@ apr_status_t md_acme_delete_acct(md_acme_t *acme, md_store_t *store, apr_pool_t } md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, "delete account %s from %s", acct->url, acct->ca_url); - return md_acme_POST(acme, acct->url, on_init_acct_del, acct_del, NULL, store); + memset(&ctx, 0, sizeof(ctx)); + ctx.acme = acme; + ctx.p = p; + return md_acme_POST(acme, acct->url, on_init_acct_del, acct_upd, NULL, NULL, &ctx); } /**************************************************************************************************/ @@ -604,9 +705,9 @@ static apr_status_t on_init_agree_tos(md_acme_req_t *req, void *baton) md_json_t *jpayload; jpayload = md_json_create(req->p); - md_json_sets("reg", jpayload, MD_KEY_RESOURCE, NULL); - md_json_sets(ctx->acme->acct->agreement, jpayload, MD_KEY_AGREEMENT, NULL); - + if (ctx->acme->acct->agreement) { + md_json_setb(1, jpayload, "termsOfServiceAgreed", NULL); + } return md_acme_req_body_init(req, jpayload); } @@ -615,21 +716,14 @@ apr_status_t md_acme_agree(md_acme_t *acme, apr_pool_t *p, const char *agreement acct_ctx_t ctx; acme->acct->agreement = agreement; + if (!strcmp("accepted", agreement) && acme->ca_agreement) { + acme->acct->agreement = acme->ca_agreement; + } + + memset(&ctx, 0, sizeof(ctx)); ctx.acme = acme; ctx.p = p; - return md_acme_POST(acme, acme->acct->url, on_init_agree_tos, acct_upd, NULL, &ctx); -} - -static int agreement_required(md_acme_acct_t *acct) -{ - /* We used to really check if the account agreement and the one - * indicated as valid are the very same: - * return (!acct->agreement - * || (acct->tos_required && strcmp(acct->tos_required, acct->agreement))); - * However, LE is happy if the account has agreed to a ToS in the past and - * does not required a renewed acceptance. - */ - return !acct->agreement; + return md_acme_POST(acme, acme->acct->url, on_init_agree_tos, acct_upd, NULL, NULL, &ctx); } apr_status_t md_acme_check_agreement(md_acme_t *acme, apr_pool_t *p, @@ -637,32 +731,17 @@ apr_status_t md_acme_check_agreement(md_acme_t *acme, apr_pool_t *p, { apr_status_t rv = APR_SUCCESS; - /* Check if (correct) Terms-of-Service for account were accepted */ + /* We used to really check if the account agreement and the one indicated in meta + * are the very same. However, LE is happy if the account has agreed to a ToS in + * the past and does not require a renewed acceptance. + */ *prequired = NULL; - if (agreement_required(acme->acct)) { - const char *tos = acme->acct->tos_required; - if (!tos) { - if (APR_SUCCESS != (rv = md_acme_validate_acct(acme))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, acme->p, - "validate for account %s", acme->acct->id); - return rv; - } - tos = acme->acct->tos_required; - if (!tos) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, acme->p, "unknown terms-of-service " - "required after validation of account %s", acme->acct->id); - return APR_EGENERAL; - } - } - - if (acme->acct->agreement && !strcmp(tos, acme->acct->agreement)) { - rv = md_acme_agree(acme, p, tos); - } - else if (agreement && !strcmp(tos, agreement)) { - rv = md_acme_agree(acme, p, tos); + if (!acme->acct->agreement && acme->ca_agreement) { + if (agreement) { + rv = md_acme_agree(acme, p, acme->ca_agreement); } else { - *prequired = apr_pstrdup(p, tos); + *prequired = acme->ca_agreement; rv = APR_INCOMPLETE; } } diff --git a/modules/md/md_acme_acct.h b/modules/md/md_acme_acct.h index e200da3..b5bba63 100644 --- a/modules/md/md_acme_acct.h +++ b/modules/md/md_acme_acct.h @@ -21,22 +21,32 @@ struct md_acme_req; struct md_json_t; struct md_pkey_t; +#include "md_store.h" /** * An ACME account at an ACME server. */ typedef struct md_acme_acct_t md_acme_acct_t; +typedef enum { + MD_ACME_ACCT_ST_UNKNOWN, + MD_ACME_ACCT_ST_VALID, + MD_ACME_ACCT_ST_DEACTIVATED, + MD_ACME_ACCT_ST_REVOKED, +} md_acme_acct_st; + struct md_acme_acct_t { const char *id; /* short, unique id for the account */ const char *url; /* url of the account, once registered */ const char *ca_url; /* url of the ACME protocol endpoint */ + md_acme_acct_st status; /* status of this account */ apr_array_header_t *contacts; /* list of contact uris, e.g. mailto:xxx */ const char *tos_required; /* terms of service asked for by CA */ const char *agreement; /* terms of service agreed to by user */ - + const char *orders; /* URL where certificate orders are found (ACMEv2) */ + const char *eab_kid; /* external account binding keyid used or NULL */ + const char *eab_hmac; /* external account binding hmac used or NULL */ struct md_json_t *registration; /* data from server registration */ - int disabled; }; #define MD_FN_ACCOUNT "account.json" @@ -46,4 +56,93 @@ struct md_acme_acct_t { * are expected to live long, better err on the safe side. */ #define MD_ACME_ACCT_PKEY_BITS 3072 +#define MD_ACME_ACCT_STAGED "staged" + +/** + * Convert an ACME account form/to JSON. + */ +struct md_json_t *md_acme_acct_to_json(md_acme_acct_t *acct, apr_pool_t *p); +apr_status_t md_acme_acct_from_json(md_acme_acct_t **pacct, struct md_json_t *json, apr_pool_t *p); + +/** + * Update the account from the ACME server. + * - Will update acme->acct structure from server on success + * - Will return error status when request failed or account is not known. + */ +apr_status_t md_acme_acct_update(md_acme_t *acme); + +/** + * Update the account and persist changes in the store, if given (and not NULL). + */ +apr_status_t md_acme_acct_validate(md_acme_t *acme, md_store_t *store, apr_pool_t *p); + +/** + * Agree to the given Terms-of-Service url for the current account. + */ +apr_status_t md_acme_agree(md_acme_t *acme, apr_pool_t *p, const char *tos); + +/** + * Confirm with the server that the current account agrees to the Terms-of-Service + * given in the agreement url. + * If the known agreement is equal to this, nothing is done. + * If it differs, the account is re-validated in the hope that the server + * announces the Tos URL it wants. If this is equal to the agreement specified, + * the server is notified of this. If the server requires a ToS that the account + * thinks it has already given, it is resend. + * + * If an agreement is required, different from the current one, APR_INCOMPLETE is + * returned and the agreement url is returned in the parameter. + */ +apr_status_t md_acme_check_agreement(md_acme_t *acme, apr_pool_t *p, + const char *agreement, const char **prequired); + +/** + * Get the ToS agreement for current account. + */ +const char *md_acme_get_agreement(md_acme_t *acme); + + +/** + * Find an existing account in the local store. On APR_SUCCESS, the acme + * instance will have a current, validated account to use. + */ +apr_status_t md_acme_find_acct_for_md(md_acme_t *acme, md_store_t *store, const md_t *md); + +/** + * Find the account id for a given md. + */ +apr_status_t md_acme_acct_id_for_md(const char **pid, md_store_t *store, + md_store_group_t group, const md_t *md, apr_pool_t *p); + +/** + * Create a new account at the ACME server for an MD. The + * new account is the one used by the acme instance afterwards, on success. + */ +apr_status_t md_acme_acct_register(md_acme_t *acme, md_store_t *store, + const md_t *md, apr_pool_t *p); + +apr_status_t md_acme_acct_save(md_store_t *store, apr_pool_t *p, md_acme_t *acme, + const char **pid, struct md_acme_acct_t *acct, + struct md_pkey_t *acct_key); + +/** + * Deactivate the current account at the ACME server. + */ +apr_status_t md_acme_acct_deactivate(md_acme_t *acme, apr_pool_t *p); + +apr_status_t md_acme_acct_load(struct md_acme_acct_t **pacct, struct md_pkey_t **ppkey, + md_store_t *store, md_store_group_t group, + const char *name, apr_pool_t *p); + +/* + * Return != 0 iff the account can be used for the ACME url. + */ +int md_acme_acct_matches_url(md_acme_acct_t *acct, const char *url); + +/* + * Return != 0 iff the account can be used for the MD, including + * its CA url and EAB settings. + */ +int md_acme_acct_matches_md(md_acme_acct_t *acct, const md_t *md); + #endif /* md_acme_acct_h */ diff --git a/modules/md/md_acme_authz.c b/modules/md/md_acme_authz.c index 2b5cbdc..f4579b3 100644 --- a/modules/md/md_acme_authz.c +++ b/modules/md/md_acme_authz.c @@ -32,6 +32,7 @@ #include "md_http.h" #include "md_log.h" #include "md_jws.h" +#include "md_result.h" #include "md_store.h" #include "md_util.h" @@ -46,64 +47,6 @@ md_acme_authz_t *md_acme_authz_create(apr_pool_t *p) return authz; } -md_acme_authz_set_t *md_acme_authz_set_create(apr_pool_t *p) -{ - md_acme_authz_set_t *authz_set; - - authz_set = apr_pcalloc(p, sizeof(*authz_set)); - authz_set->authzs = apr_array_make(p, 5, sizeof(md_acme_authz_t *)); - - return authz_set; -} - -md_acme_authz_t *md_acme_authz_set_get(md_acme_authz_set_t *set, const char *domain) -{ - md_acme_authz_t *authz; - int i; - - assert(domain); - for (i = 0; i < set->authzs->nelts; ++i) { - authz = APR_ARRAY_IDX(set->authzs, i, md_acme_authz_t *); - if (!apr_strnatcasecmp(domain, authz->domain)) { - return authz; - } - } - return NULL; -} - -apr_status_t md_acme_authz_set_add(md_acme_authz_set_t *set, md_acme_authz_t *authz) -{ - md_acme_authz_t *existing; - - assert(authz->domain); - if (NULL != (existing = md_acme_authz_set_get(set, authz->domain))) { - return APR_EINVAL; - } - APR_ARRAY_PUSH(set->authzs, md_acme_authz_t*) = authz; - return APR_SUCCESS; -} - -apr_status_t md_acme_authz_set_remove(md_acme_authz_set_t *set, const char *domain) -{ - md_acme_authz_t *authz; - int i; - - assert(domain); - for (i = 0; i < set->authzs->nelts; ++i) { - authz = APR_ARRAY_IDX(set->authzs, i, md_acme_authz_t *); - if (!apr_strnatcasecmp(domain, authz->domain)) { - int n = i + 1; - if (n < set->authzs->nelts) { - void **elems = (void **)set->authzs->elts; - memmove(elems + i, elems + n, (size_t)(set->authzs->nelts - n) * sizeof(*elems)); - } - --set->authzs->nelts; - return APR_SUCCESS; - } - } - return APR_ENOENT; -} - /**************************************************************************************************/ /* Register a new authorization */ @@ -133,88 +76,65 @@ static void authz_req_ctx_init(authz_req_ctx *ctx, md_acme_t *acme, ctx->authz = authz; } -static apr_status_t on_init_authz(md_acme_req_t *req, void *baton) -{ - authz_req_ctx *ctx = baton; - md_json_t *jpayload; - - jpayload = md_json_create(req->p); - md_json_sets("new-authz", jpayload, MD_KEY_RESOURCE, NULL); - md_json_sets("dns", jpayload, MD_KEY_IDENTIFIER, MD_KEY_TYPE, NULL); - md_json_sets(ctx->domain, jpayload, MD_KEY_IDENTIFIER, MD_KEY_VALUE, NULL); - - return md_acme_req_body_init(req, jpayload); -} +/**************************************************************************************************/ +/* Update an existing authorization */ -static apr_status_t authz_created(md_acme_t *acme, apr_pool_t *p, const apr_table_t *hdrs, - md_json_t *body, void *baton) +apr_status_t md_acme_authz_retrieve(md_acme_t *acme, apr_pool_t *p, const char *url, + md_acme_authz_t **pauthz) { - authz_req_ctx *ctx = baton; - const char *location = apr_table_get(hdrs, "location"); - apr_status_t rv = APR_SUCCESS; + md_acme_authz_t *authz; + apr_status_t rv; - (void)acme; - (void)p; - if (location) { - ctx->authz = md_acme_authz_create(ctx->p); - ctx->authz->domain = apr_pstrdup(ctx->p, ctx->domain); - ctx->authz->location = apr_pstrdup(ctx->p, location); - ctx->authz->resource = md_json_clone(ctx->p, body); - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, ctx->p, "authz_new at %s", location); - } - else { - rv = APR_EINVAL; - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, ctx->p, "new authz, no location header"); - } + authz = apr_pcalloc(p, sizeof(*authz)); + authz->url = apr_pstrdup(p, url); + rv = md_acme_authz_update(authz, acme, p); + + *pauthz = (APR_SUCCESS == rv)? authz : NULL; return rv; } -apr_status_t md_acme_authz_register(struct md_acme_authz_t **pauthz, md_acme_t *acme, - md_store_t *store, const char *domain, apr_pool_t *p) +typedef struct { + apr_pool_t *p; + md_acme_authz_t *authz; +} error_ctx_t; + +static int copy_challenge_error(void *baton, size_t index, md_json_t *json) { - apr_status_t rv; - authz_req_ctx ctx; + error_ctx_t *ctx = baton; - (void)store; - authz_req_ctx_init(&ctx, acme, domain, NULL, p); - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, acme->p, "create new authz"); - rv = md_acme_POST(acme, acme->new_authz, on_init_authz, authz_created, NULL, &ctx); - - *pauthz = (APR_SUCCESS == rv)? ctx.authz : NULL; - return rv; + (void)index; + if (md_json_has_key(json, MD_KEY_ERROR, NULL)) { + ctx->authz->error_type = md_json_dups(ctx->p, json, MD_KEY_ERROR, MD_KEY_TYPE, NULL); + ctx->authz->error_detail = md_json_dups(ctx->p, json, MD_KEY_ERROR, MD_KEY_DETAIL, NULL); + ctx->authz->error_subproblems = md_json_dupj(ctx->p, json, MD_KEY_ERROR, MD_KEY_SUBPROBLEMS, NULL); + } + return 1; } -/**************************************************************************************************/ -/* Update an existing authorization */ - -apr_status_t md_acme_authz_update(md_acme_authz_t *authz, md_acme_t *acme, - md_store_t *store, apr_pool_t *p) +apr_status_t md_acme_authz_update(md_acme_authz_t *authz, md_acme_t *acme, apr_pool_t *p) { md_json_t *json; const char *s, *err; md_log_level_t log_level; apr_status_t rv; - MD_CHK_VARS; + error_ctx_t ctx; - (void)store; assert(acme); assert(acme->http); assert(authz); - assert(authz->location); + assert(authz->url); authz->state = MD_ACME_AUTHZ_S_UNKNOWN; json = NULL; + authz->error_type = authz->error_detail = NULL; + authz->error_subproblems = NULL; err = "unable to parse response"; log_level = MD_LOG_ERR; - if (MD_OK(md_acme_get_json(&json, acme, authz->location, p)) - && (s = md_json_gets(json, MD_KEY_IDENTIFIER, MD_KEY_TYPE, NULL)) - && !strcmp(s, "dns") - && (s = md_json_gets(json, MD_KEY_IDENTIFIER, MD_KEY_VALUE, NULL)) - && !strcmp(s, authz->domain) + if (APR_SUCCESS == (rv = md_acme_get_json(&json, acme, authz->url, p)) && (s = md_json_gets(json, MD_KEY_STATUS, NULL))) { - + + authz->domain = md_json_gets(json, MD_KEY_IDENTIFIER, MD_KEY_VALUE, NULL); authz->resource = json; if (!strcmp(s, "pending")) { authz->state = MD_ACME_AUTHZ_S_PENDING; @@ -227,7 +147,10 @@ apr_status_t md_acme_authz_update(md_acme_authz_t *authz, md_acme_t *acme, log_level = MD_LOG_DEBUG; } else if (!strcmp(s, "invalid")) { + ctx.p = p; + ctx.authz = authz; authz->state = MD_ACME_AUTHZ_S_INVALID; + md_json_itera(copy_challenge_error, &ctx, json, MD_KEY_CHALLENGES, NULL); err = "challenge 'invalid'"; } } @@ -239,7 +162,7 @@ apr_status_t md_acme_authz_update(md_acme_authz_t *authz, md_acme_t *acme, if (md_log_is_level(p, log_level)) { md_log_perror(MD_LOG_MARK, log_level, rv, p, "ACME server authz: %s for %s at %s. " - "Exact repsonse was: %s", err? err : "", authz->domain, authz->location, + "Exact response was: %s", err, authz->domain, authz->url, json? md_json_writep(json, p, MD_JSON_FMT_COMPACT) : "not available"); } @@ -256,7 +179,12 @@ static md_acme_authz_cha_t *cha_from_json(apr_pool_t *p, size_t index, md_json_t cha = apr_pcalloc(p, sizeof(*cha)); cha->index = index; cha->type = md_json_dups(p, json, MD_KEY_TYPE, NULL); - cha->uri = md_json_dups(p, json, MD_KEY_URI, NULL); + if (md_json_has_key(json, MD_KEY_URL, NULL)) { /* ACMEv2 */ + cha->uri = md_json_dups(p, json, MD_KEY_URL, NULL); + } + else { /* ACMEv1 */ + cha->uri = md_json_dups(p, json, MD_KEY_URI, NULL); + } cha->token = md_json_dups(p, json, MD_KEY_TOKEN, NULL); cha->key_authz = md_json_dups(p, json, MD_KEY_KEYAUTHZ, NULL); @@ -265,13 +193,10 @@ static md_acme_authz_cha_t *cha_from_json(apr_pool_t *p, size_t index, md_json_t static apr_status_t on_init_authz_resp(md_acme_req_t *req, void *baton) { - authz_req_ctx *ctx = baton; md_json_t *jpayload; + (void)baton; jpayload = md_json_create(req->p); - md_json_sets("challenge", jpayload, MD_KEY_RESOURCE, NULL); - md_json_sets(ctx->challenge->key_authz, jpayload, MD_KEY_KEYAUTHZ, NULL); - return md_acme_req_body_init(req, jpayload); } @@ -284,7 +209,7 @@ static apr_status_t authz_http_set(md_acme_t *acme, apr_pool_t *p, const apr_tab (void)p; (void)hdrs; (void)body; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, ctx->p, "updated authz %s", ctx->authz->location); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ctx->p, "updated authz %s", ctx->authz->url); return APR_SUCCESS; } @@ -293,14 +218,13 @@ static apr_status_t setup_key_authz(md_acme_authz_cha_t *cha, md_acme_authz_t *a { const char *thumb64, *key_authz; apr_status_t rv; - MD_CHK_VARS; (void)authz; assert(cha); assert(cha->token); *pchanged = 0; - if (MD_OK(md_jws_pkey_thumb(&thumb64, p, acme->acct_key))) { + if (APR_SUCCESS == (rv = md_jws_pkey_thumb(&thumb64, p, acme->acct_key))) { key_authz = apr_psprintf(p, "%s.%s", cha->token, thumb64); if (cha->key_authz) { if (strcmp(key_authz, cha->key_authz)) { @@ -316,136 +240,334 @@ static apr_status_t setup_key_authz(md_acme_authz_cha_t *cha, md_acme_authz_t *a return rv; } -static apr_status_t cha_http_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, +static apr_status_t cha_http_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, md_acme_t *acme, md_store_t *store, - md_pkey_spec_t *key_spec, apr_pool_t *p) + md_pkeys_spec_t *key_specs, + apr_array_header_t *acme_tls_1_domains, const md_t *md, + apr_table_t *env, md_result_t *result, + const char **psetup_token, apr_pool_t *p) { const char *data; apr_status_t rv; int notify_server; - MD_CHK_VARS; - (void)key_spec; - if (!MD_OK(setup_key_authz(cha, authz, acme, p, ¬ify_server))) { + (void)key_specs; + (void)env; + (void)acme_tls_1_domains; + (void)md; + + if (APR_SUCCESS != (rv = setup_key_authz(cha, authz, acme, p, ¬ify_server))) { goto out; } rv = md_store_load(store, MD_SG_CHALLENGES, authz->domain, MD_FN_HTTP01, MD_SV_TEXT, (void**)&data, p); if ((APR_SUCCESS == rv && strcmp(cha->key_authz, data)) || APR_STATUS_IS_ENOENT(rv)) { + const char *content = apr_psprintf(p, "%s\n", cha->key_authz); rv = md_store_save(store, p, MD_SG_CHALLENGES, authz->domain, MD_FN_HTTP01, - MD_SV_TEXT, (void*)cha->key_authz, 0); - authz->dir = authz->domain; + MD_SV_TEXT, (void*)content, 0); notify_server = 1; } if (APR_SUCCESS == rv && notify_server) { authz_req_ctx ctx; - + const char *event; + + /* Raise event that challenge data has been set up before we tell the + ACME server. Clusters might want to distribute it. */ + event = apr_psprintf(p, "challenge-setup:%s:%s", MD_AUTHZ_TYPE_HTTP01, authz->domain); + rv = md_result_raise(result, event, p); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "%s: event '%s' failed. aborting challenge setup", + authz->domain, event); + goto out; + } /* challenge is setup or was changed from previous data, tell ACME server * so it may (re)try verification */ authz_req_ctx_init(&ctx, acme, NULL, authz, p); ctx.challenge = cha; - rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, &ctx); + rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, NULL, &ctx); } out: + *psetup_token = (APR_SUCCESS == rv)? + apr_psprintf(p, "%s:%s", MD_AUTHZ_TYPE_HTTP01, authz->domain) : NULL; return rv; } -static apr_status_t setup_cha_dns(const char **pdns, md_acme_authz_cha_t *cha, apr_pool_t *p) +void tls_alpn01_fnames(apr_pool_t *p, md_pkey_spec_t *kspec, char **keyfn, char **certfn ) { - const char *dhex; - char *dns; - apr_size_t dhex_len; - apr_status_t rv; - - rv = md_crypt_sha256_digest_hex(&dhex, p, cha->key_authz, strlen(cha->key_authz)); - if (APR_SUCCESS == rv) { - dhex = md_util_str_tolower((char*)dhex); - dhex_len = strlen(dhex); - assert(dhex_len > 32); - dns = apr_pcalloc(p, dhex_len + 1 + sizeof(MD_TLSSNI01_DNS_SUFFIX)); - strncpy(dns, dhex, 32); - dns[32] = '.'; - strncpy(dns+33, dhex+32, dhex_len-32); - memcpy(dns+(dhex_len+1), MD_TLSSNI01_DNS_SUFFIX, sizeof(MD_TLSSNI01_DNS_SUFFIX)); - } - *pdns = (APR_SUCCESS == rv)? dns : NULL; - return rv; + *keyfn = apr_pstrcat(p, "acme-tls-alpn-01-", md_pkey_filename(kspec, p), NULL); + *certfn = apr_pstrcat(p, "acme-tls-alpn-01-", md_chain_filename(kspec, p), NULL); } -static apr_status_t cha_tls_sni_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, - md_acme_t *acme, md_store_t *store, - md_pkey_spec_t *key_spec, apr_pool_t *p) +static apr_status_t cha_tls_alpn_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, + md_acme_t *acme, md_store_t *store, + md_pkeys_spec_t *key_specs, + apr_array_header_t *acme_tls_1_domains, const md_t *md, + apr_table_t *env, md_result_t *result, + const char **psetup_token, apr_pool_t *p) { - md_cert_t *cha_cert; - md_pkey_t *cha_key; - const char *cha_dns; + const char *acme_id, *token; apr_status_t rv; int notify_server; - apr_array_header_t *domains; - MD_CHK_VARS; - - if ( !MD_OK(setup_key_authz(cha, authz, acme, p, ¬ify_server)) - || !MD_OK(setup_cha_dns(&cha_dns, cha, p))) { - goto out; - } + md_data_t data; + int i; - rv = md_store_load(store, MD_SG_CHALLENGES, cha_dns, MD_FN_TLSSNI01_CERT, - MD_SV_CERT, (void**)&cha_cert, p); - if ((APR_SUCCESS == rv && !md_cert_covers_domain(cha_cert, cha_dns)) - || APR_STATUS_IS_ENOENT(rv)) { - - if (APR_SUCCESS != (rv = md_pkey_gen(&cha_key, p, key_spec))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: create tls-sni-01 challenge key", - authz->domain); - goto out; + (void)env; + (void)md; + if (md_array_str_index(acme_tls_1_domains, authz->domain, 0, 0) < 0) { + rv = APR_ENOTIMPL; + if (acme_tls_1_domains->nelts) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "%s: protocol 'acme-tls/1' seems not enabled for this domain, " + "but is enabled for other associated domains. " + "Continuing with fingers crossed.", authz->domain); } - - /* setup a certificate containing the challenge dns */ - domains = apr_array_make(p, 5, sizeof(const char*)); - APR_ARRAY_PUSH(domains, const char*) = cha_dns; - if (!MD_OK(md_cert_self_sign(&cha_cert, authz->domain, domains, cha_key, - apr_time_from_sec(7 * MD_SECS_PER_DAY), p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: setup self signed cert for %s", - authz->domain, cha_dns); + else { + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, p, + "%s: protocol 'acme-tls/1' seems not enabled for this or " + "any other associated domain. Not attempting challenge " + "type tls-alpn-01.", authz->domain); goto out; } + } + if (APR_SUCCESS != (rv = setup_key_authz(cha, authz, acme, p, ¬ify_server))) { + goto out; + } + + /* Create a "tls-alpn-01" certificate for the domain we want to authenticate. + * The server will need to answer a TLS connection with SNI == authz->domain + * and ALPN protocol "acme-tls/1" with this certificate. + */ + md_data_init_str(&data, cha->key_authz); + rv = md_crypt_sha256_digest_hex(&token, p, &data); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: create tls-alpn-01 validation token", + authz->domain); + goto out; + } + acme_id = apr_psprintf(p, "critical,DER:04:20:%s", token); + + /* Each configured key type must be generated to ensure: + * that any fallback certs already given to mod_ssl are replaced. + * We expect that the validation client (at the CA) can deal with at + * least one of them. + */ + + for (i = 0; i < md_pkeys_spec_count(key_specs); ++i) { + char *kfn, *cfn; + md_cert_t *cha_cert; + md_pkey_t *cha_key; + md_pkey_spec_t *key_spec; + + key_spec = md_pkeys_spec_get(key_specs, i); + tls_alpn01_fnames(p, key_spec, &kfn, &cfn); + + rv = md_store_load(store, MD_SG_CHALLENGES, authz->domain, cfn, + MD_SV_CERT, (void**)&cha_cert, p); + if ((APR_SUCCESS == rv && !md_cert_covers_domain(cha_cert, authz->domain)) + || APR_STATUS_IS_ENOENT(rv)) { + if (APR_SUCCESS != (rv = md_pkey_gen(&cha_key, p, key_spec))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: create tls-alpn-01 %s challenge key", + authz->domain, md_pkey_spec_name(key_spec)); + goto out; + } + + if (APR_SUCCESS != (rv = md_cert_make_tls_alpn_01(&cha_cert, authz->domain, acme_id, cha_key, + apr_time_from_sec(7 * MD_SECS_PER_DAY), p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: create tls-alpn-01 %s challenge cert", + authz->domain, md_pkey_spec_name(key_spec)); + goto out; + } - if (MD_OK(md_store_save(store, p, MD_SG_CHALLENGES, cha_dns, MD_FN_TLSSNI01_PKEY, - MD_SV_PKEY, (void*)cha_key, 0))) { - rv = md_store_save(store, p, MD_SG_CHALLENGES, cha_dns, MD_FN_TLSSNI01_CERT, - MD_SV_CERT, (void*)cha_cert, 0); + if (APR_SUCCESS == (rv = md_store_save(store, p, MD_SG_CHALLENGES, authz->domain, kfn, + MD_SV_PKEY, (void*)cha_key, 0))) { + rv = md_store_save(store, p, MD_SG_CHALLENGES, authz->domain, cfn, + MD_SV_CERT, (void*)cha_cert, 0); + } + ++notify_server; } - authz->dir = cha_dns; - notify_server = 1; } if (APR_SUCCESS == rv && notify_server) { authz_req_ctx ctx; - + const char *event; + + /* Raise event that challenge data has been set up before we tell the + ACME server. Clusters might want to distribute it. */ + event = apr_psprintf(p, "challenge-setup:%s:%s", MD_AUTHZ_TYPE_TLSALPN01, authz->domain); + rv = md_result_raise(result, event, p); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "%s: event '%s' failed. aborting challenge setup", + authz->domain, event); + goto out; + } /* challenge is setup or was changed from previous data, tell ACME server * so it may (re)try verification */ authz_req_ctx_init(&ctx, acme, NULL, authz, p); ctx.challenge = cha; - rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, &ctx); + rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, NULL, &ctx); } out: + *psetup_token = (APR_SUCCESS == rv)? + apr_psprintf(p, "%s:%s", MD_AUTHZ_TYPE_TLSALPN01, authz->domain) : NULL; return rv; } -typedef apr_status_t cha_starter(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, - md_acme_t *acme, md_store_t *store, - md_pkey_spec_t *key_spec, apr_pool_t *p); +static apr_status_t cha_dns_01_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, + md_acme_t *acme, md_store_t *store, + md_pkeys_spec_t *key_specs, + apr_array_header_t *acme_tls_1_domains, const md_t *md, + apr_table_t *env, md_result_t *result, + const char **psetup_token, apr_pool_t *p) +{ + const char *token; + const char * const *argv; + const char *cmdline, *dns01_cmd; + apr_status_t rv; + int exit_code, notify_server; + authz_req_ctx ctx; + md_data_t data; + const char *event; + + (void)store; + (void)key_specs; + (void)acme_tls_1_domains; + + dns01_cmd = md->dns01_cmd; + if (!dns01_cmd) + dns01_cmd = apr_table_get(env, MD_KEY_CMD_DNS01); + if (!dns01_cmd) { + rv = APR_ENOTIMPL; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: dns-01 command not set", + authz->domain); + goto out; + } + + if (APR_SUCCESS != (rv = setup_key_authz(cha, authz, acme, p, ¬ify_server))) { + goto out; + } + + md_data_init_str(&data, cha->key_authz); + rv = md_crypt_sha256_digest64(&token, p, &data); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: create dns-01 token for %s", + md->name, authz->domain); + goto out; + } + + cmdline = apr_psprintf(p, "%s setup %s %s", dns01_cmd, authz->domain, token); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "%s: dns-01 setup command: %s", authz->domain, cmdline); + + apr_tokenize_to_argv(cmdline, (char***)&argv, p); + if (APR_SUCCESS != (rv = md_util_exec(p, argv[0], argv, &exit_code))) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, + "%s: dns-01 setup command failed to execute for %s", md->name, authz->domain); + goto out; + } + if (exit_code) { + rv = APR_EGENERAL; + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, + "%s: dns-01 setup command returns %d for %s", md->name, exit_code, authz->domain); + goto out; + } + + /* Raise event that challenge data has been set up before we tell the + ACME server. Clusters might want to distribute it. */ + event = apr_psprintf(p, "challenge-setup:%s:%s", MD_AUTHZ_TYPE_DNS01, authz->domain); + rv = md_result_raise(result, event, p); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "%s: event '%s' failed. aborting challenge setup", + authz->domain, event); + goto out; + } + /* challenge is setup, tell ACME server so it may (re)try verification */ + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: dns-01 setup succeeded for %s", + md->name, authz->domain); + authz_req_ctx_init(&ctx, acme, NULL, authz, p); + ctx.challenge = cha; + rv = md_acme_POST(acme, cha->uri, on_init_authz_resp, authz_http_set, NULL, NULL, &ctx); + +out: + *psetup_token = (APR_SUCCESS == rv)? + apr_psprintf(p, "%s:%s %s", MD_AUTHZ_TYPE_DNS01, authz->domain, token) : NULL; + return rv; +} + +static apr_status_t cha_dns_01_teardown(md_store_t *store, const char *domain, const md_t *md, + apr_table_t *env, apr_pool_t *p) +{ + const char * const *argv; + const char *cmdline, *dns01_cmd, *dns01v; + char *tmp, *s; + apr_status_t rv; + int exit_code; + + (void)store; + + dns01_cmd = md->dns01_cmd; + if (!dns01_cmd) + dns01_cmd = apr_table_get(env, MD_KEY_CMD_DNS01); + if (!dns01_cmd) { + rv = APR_ENOTIMPL; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "%s: dns-01 command not set for %s", + md->name, domain); + goto out; + } + dns01v = apr_table_get(env, MD_KEY_DNS01_VERSION); + if (!dns01v || strcmp(dns01v, "2")) { + /* use older version of teardown args with only domain, remove token */ + tmp = apr_pstrdup(p, domain); + s = strchr(tmp, ' '); + if (s) { + *s = '\0'; + domain = tmp; + } + } + + cmdline = apr_psprintf(p, "%s teardown %s", dns01_cmd, domain); + apr_tokenize_to_argv(cmdline, (char***)&argv, p); + if (APR_SUCCESS != (rv = md_util_exec(p, argv[0], argv, &exit_code)) || exit_code) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, + "%s: dns-01 teardown command failed (exit code=%d) for %s", + md->name, exit_code, domain); + } +out: + return rv; +} + +static apr_status_t cha_teardown_dir(md_store_t *store, const char *domain, const md_t *md, + apr_table_t *env, apr_pool_t *p) +{ + (void)md; + (void)env; + return md_store_purge(store, p, MD_SG_CHALLENGES, domain); +} + +typedef apr_status_t cha_setup(md_acme_authz_cha_t *cha, md_acme_authz_t *authz, + md_acme_t *acme, md_store_t *store, + md_pkeys_spec_t *key_specs, + apr_array_header_t *acme_tls_1_domains, const md_t *md, + apr_table_t *env, md_result_t *result, + const char **psetup_token, apr_pool_t *p); + +typedef apr_status_t cha_teardown(md_store_t *store, const char *domain, const md_t *md, + apr_table_t *env, apr_pool_t *p); typedef struct { const char *name; - cha_starter *start; + cha_setup *setup; + cha_teardown *teardown; } cha_type; static const cha_type CHA_TYPES[] = { - { MD_AUTHZ_TYPE_HTTP01, cha_http_01_setup }, - { MD_AUTHZ_TYPE_TLSSNI01, cha_tls_sni_01_setup }, + { MD_AUTHZ_TYPE_HTTP01, cha_http_01_setup, cha_teardown_dir }, + { MD_AUTHZ_TYPE_TLSALPN01, cha_tls_alpn_01_setup, cha_teardown_dir }, + { MD_AUTHZ_TYPE_DNS01, cha_dns_01_setup, cha_dns_01_teardown }, }; static const apr_size_t CHA_TYPES_LEN = (sizeof(CHA_TYPES)/sizeof(CHA_TYPES[0])); @@ -481,13 +603,15 @@ static apr_status_t find_type(void *baton, size_t index, md_json_t *json) } apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, md_acme_t *acme, md_store_t *store, - apr_array_header_t *challenges, - md_pkey_spec_t *key_spec, apr_pool_t *p) + apr_array_header_t *challenges, md_pkeys_spec_t *key_specs, + apr_array_header_t *acme_tls_1_domains, const md_t *md, + apr_table_t *env, apr_pool_t *p, const char **psetup_token, + md_result_t *result) { apr_status_t rv; - int i; + int i, j; cha_find_ctx fctx; - + assert(acme); assert(authz); assert(authz->resource); @@ -495,229 +619,98 @@ apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, md_acme_t *acme, md_s fctx.p = p; fctx.accepted = NULL; - /* Look in the order challenge types are defined */ - for (i = 0; i < challenges->nelts && !fctx.accepted; ++i) { + /* Look in the order challenge types are defined: + * - if they are offered by the CA, try to set it up + * - if setup was successful, we are done and the CA will evaluate us + * - if setup failed, continue to look for another supported challenge type + * - if there is no overlap in types, tell the user that she has to configure + * either more types (dns, tls-alpn-01), make ports available or refrain + * from using wildcard domains when dns is not available. etc. + * - if there was an overlap, but no setup was successful, report that. We + * will retry this, maybe the failure is temporary (e.g. command to setup DNS + */ + md_result_printf(result, 0, "%s: selecting suitable authorization challenge " + "type, this domain supports %s", + authz->domain, apr_array_pstrcat(p, challenges, ' ')); + rv = APR_ENOTIMPL; + *psetup_token = NULL; + for (i = 0; i < challenges->nelts; ++i) { fctx.type = APR_ARRAY_IDX(challenges, i, const char *); + fctx.accepted = NULL; md_json_itera(find_type, &fctx, authz->resource, MD_KEY_CHALLENGES, NULL); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, p, + "%s: challenge type '%s' for %s: %s", + authz->domain, fctx.type, md->name, + fctx.accepted? "maybe acceptable" : "not applicable"); + + if (fctx.accepted) { + for (j = 0; j < (int)CHA_TYPES_LEN; ++j) { + if (!apr_strnatcasecmp(CHA_TYPES[j].name, fctx.accepted->type)) { + md_result_activity_printf(result, "Setting up challenge '%s' for domain %s", + fctx.accepted->type, authz->domain); + rv = CHA_TYPES[j].setup(fctx.accepted, authz, acme, store, key_specs, + acme_tls_1_domains, md, env, result, + psetup_token, p); + if (APR_SUCCESS == rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "%s: set up challenge '%s' for %s", + authz->domain, fctx.accepted->type, md->name); + goto out; + } + md_result_printf(result, rv, "error setting up challenge '%s' for %s, " + "for domain %s, looking for other option", + fctx.accepted->type, authz->domain, md->name); + md_result_log(result, MD_LOG_INFO); + } + } + } } - if (!fctx.accepted) { +out: + if (!fctx.accepted || APR_ENOTIMPL == rv) { rv = APR_EINVAL; fctx.offered = apr_array_make(p, 5, sizeof(const char*)); md_json_itera(collect_offered, &fctx, authz->resource, MD_KEY_CHALLENGES, NULL); - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, - "%s: the server offers no ACME challenge that is configured " - "for this MD. The server offered '%s' and available for this " - "MD are: '%s' (via %s).", + md_result_printf(result, rv, "None of offered challenge types for domain %s are supported. " + "The server offered '%s' and available are: '%s'.", authz->domain, apr_array_pstrcat(p, fctx.offered, ' '), - apr_array_pstrcat(p, challenges, ' '), - authz->location); - return rv; + apr_array_pstrcat(p, challenges, ' ')); + result->problem = "challenge-mismatch"; + md_result_log(result, MD_LOG_ERR); } - - for (i = 0; i < (int)CHA_TYPES_LEN; ++i) { - if (!apr_strnatcasecmp(CHA_TYPES[i].name, fctx.accepted->type)) { - return CHA_TYPES[i].start(fctx.accepted, authz, acme, store, key_spec, p); - } + else if (APR_SUCCESS != rv) { + fctx.offered = apr_array_make(p, 5, sizeof(const char*)); + md_json_itera(collect_offered, &fctx, authz->resource, MD_KEY_CHALLENGES, NULL); + md_result_printf(result, rv, "None of the offered challenge types %s offered " + "for domain %s could be setup successfully. Please check the " + "log for errors.", authz->domain, + apr_array_pstrcat(p, fctx.offered, ' ')); + result->problem = "challenge-setup-failure"; + md_result_log(result, MD_LOG_ERR); } - - rv = APR_ENOTIMPL; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, - "%s: no implementation found for challenge '%s'", - authz->domain, fctx.accepted->type); return rv; } -/**************************************************************************************************/ -/* Delete an existing authz resource */ - -typedef struct { - apr_pool_t *p; - md_acme_authz_t *authz; -} del_ctx; - -static apr_status_t on_init_authz_del(md_acme_req_t *req, void *baton) -{ - md_json_t *jpayload; - - (void)baton; - jpayload = md_json_create(req->p); - md_json_sets("deactivated", jpayload, MD_KEY_STATUS, NULL); - - return md_acme_req_body_init(req, jpayload); -} - -static apr_status_t authz_del(md_acme_t *acme, apr_pool_t *p, const apr_table_t *hdrs, - md_json_t *body, void *baton) -{ - authz_req_ctx *ctx = baton; - - (void)p; - (void)body; - (void)hdrs; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, ctx->p, "deleted authz %s", ctx->authz->location); - acme->acct = NULL; - return APR_SUCCESS; -} - -apr_status_t md_acme_authz_del(md_acme_authz_t *authz, md_acme_t *acme, - md_store_t *store, apr_pool_t *p) -{ - authz_req_ctx ctx; - - (void)store; - ctx.p = p; - ctx.authz = authz; - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "delete authz for %s from %s", - authz->domain, authz->location); - return md_acme_POST(acme, authz->location, on_init_authz_del, authz_del, NULL, &ctx); -} - -/**************************************************************************************************/ -/* authz conversion */ - -md_json_t *md_acme_authz_to_json(md_acme_authz_t *a, apr_pool_t *p) -{ - md_json_t *json = md_json_create(p); - if (json) { - md_json_sets(a->domain, json, MD_KEY_DOMAIN, NULL); - md_json_sets(a->location, json, MD_KEY_LOCATION, NULL); - md_json_sets(a->dir, json, MD_KEY_DIR, NULL); - md_json_setl(a->state, json, MD_KEY_STATE, NULL); - return json; - } - return NULL; -} - -md_acme_authz_t *md_acme_authz_from_json(struct md_json_t *json, apr_pool_t *p) +apr_status_t md_acme_authz_teardown(struct md_store_t *store, const char *token, + const md_t *md, apr_table_t *env, apr_pool_t *p) { - md_acme_authz_t *authz = md_acme_authz_create(p); - if (authz) { - authz->domain = md_json_dups(p, json, MD_KEY_DOMAIN, NULL); - authz->location = md_json_dups(p, json, MD_KEY_LOCATION, NULL); - authz->dir = md_json_dups(p, json, MD_KEY_DIR, NULL); - authz->state = (md_acme_authz_state_t)md_json_getl(json, MD_KEY_STATE, NULL); - return authz; - } - return NULL; -} - -/**************************************************************************************************/ -/* authz_set conversion */ - -#define MD_KEY_ACCOUNT "account" -#define MD_KEY_AUTHZS "authorizations" - -static apr_status_t authz_to_json(void *value, md_json_t *json, apr_pool_t *p, void *baton) -{ - (void)baton; - return md_json_setj(md_acme_authz_to_json(value, p), json, NULL); -} - -static apr_status_t authz_from_json(void **pvalue, md_json_t *json, apr_pool_t *p, void *baton) -{ - (void)baton; - *pvalue = md_acme_authz_from_json(json, p); - return (*pvalue)? APR_SUCCESS : APR_EINVAL; -} - -md_json_t *md_acme_authz_set_to_json(md_acme_authz_set_t *set, apr_pool_t *p) -{ - md_json_t *json = md_json_create(p); - if (json) { - md_json_seta(set->authzs, authz_to_json, NULL, json, MD_KEY_AUTHZS, NULL); - return json; - } - return NULL; -} - -md_acme_authz_set_t *md_acme_authz_set_from_json(md_json_t *json, apr_pool_t *p) -{ - md_acme_authz_set_t *set = md_acme_authz_set_create(p); - if (set) { - md_json_geta(set->authzs, authz_from_json, NULL, json, MD_KEY_AUTHZS, NULL); - return set; - } - return NULL; -} - -/**************************************************************************************************/ -/* persistence */ - -apr_status_t md_acme_authz_set_load(struct md_store_t *store, md_store_group_t group, - const char *md_name, md_acme_authz_set_t **pauthz_set, - apr_pool_t *p) -{ - apr_status_t rv; - md_json_t *json; - md_acme_authz_set_t *authz_set; - - rv = md_store_load_json(store, group, md_name, MD_FN_AUTHZ, &json, p); - if (APR_SUCCESS == rv) { - authz_set = md_acme_authz_set_from_json(json, p); - } - *pauthz_set = (APR_SUCCESS == rv)? authz_set : NULL; - return rv; -} - -static apr_status_t p_save(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) -{ - md_store_t *store = baton; - md_json_t *json; - md_store_group_t group; - md_acme_authz_set_t *set; - const char *md_name; - int create; - - (void)p; - group = (md_store_group_t)va_arg(ap, int); - md_name = va_arg(ap, const char *); - set = va_arg(ap, md_acme_authz_set_t *); - create = va_arg(ap, int); - - json = md_acme_authz_set_to_json(set, ptemp); - assert(json); - return md_store_save_json(store, ptemp, group, md_name, MD_FN_AUTHZ, json, create); -} - -apr_status_t md_acme_authz_set_save(struct md_store_t *store, apr_pool_t *p, - md_store_group_t group, const char *md_name, - md_acme_authz_set_t *authz_set, int create) -{ - return md_util_pool_vdo(p_save, store, p, group, md_name, authz_set, create, NULL); -} - -static apr_status_t p_purge(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) -{ - md_store_t *store = baton; - md_acme_authz_set_t *authz_set; - const md_acme_authz_t *authz; - md_store_group_t group; - const char *md_name; + char *challenge, *domain; int i; - - group = (md_store_group_t)va_arg(ap, int); - md_name = va_arg(ap, const char *); - - if (APR_SUCCESS == md_acme_authz_set_load(store, group, md_name, &authz_set, p)) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "authz_set loaded for %s", md_name); - for (i = 0; i < authz_set->authzs->nelts; ++i) { - authz = APR_ARRAY_IDX(authz_set->authzs, i, const md_acme_authz_t*); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "authz check %s", authz->domain); - if (authz->dir) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "authz purge %s", authz->dir); - md_store_purge(store, p, MD_SG_CHALLENGES, authz->dir); + + if (strchr(token, ':')) { + challenge = apr_pstrdup(p, token); + domain = strchr(challenge, ':'); + *domain = '\0'; domain++; + for (i = 0; i < (int)CHA_TYPES_LEN; ++i) { + if (!apr_strnatcasecmp(CHA_TYPES[i].name, challenge)) { + if (CHA_TYPES[i].teardown) { + return CHA_TYPES[i].teardown(store, domain, md, env, p); + } + break; } } } - return md_store_remove(store, group, md_name, MD_FN_AUTHZ, ptemp, 1); -} - -apr_status_t md_acme_authz_set_purge(md_store_t *store, md_store_group_t group, - apr_pool_t *p, const char *md_name) -{ - return md_util_pool_vdo(p_purge, store, p, group, md_name, NULL); + return APR_SUCCESS; } diff --git a/modules/md/md_acme_authz.h b/modules/md/md_acme_authz.h index aa33f23..d74beeb 100644 --- a/modules/md/md_acme_authz.h +++ b/modules/md/md_acme_authz.h @@ -18,19 +18,22 @@ #define mod_md_md_acme_authz_h struct apr_array_header_t; +struct apr_table_t; struct md_acme_t; struct md_acme_acct_t; struct md_json_t; struct md_store_t; struct md_pkey_spec_t; +struct md_result_t; typedef struct md_acme_challenge_t md_acme_challenge_t; /**************************************************************************************************/ /* authorization request for a specific domain name */ +#define MD_AUTHZ_TYPE_DNS01 "dns-01" #define MD_AUTHZ_TYPE_HTTP01 "http-01" -#define MD_AUTHZ_TYPE_TLSSNI01 "tls-sni-01" +#define MD_AUTHZ_TYPE_TLSALPN01 "tls-alpn-01" typedef enum { MD_ACME_AUTHZ_S_UNKNOWN, @@ -43,62 +46,34 @@ typedef struct md_acme_authz_t md_acme_authz_t; struct md_acme_authz_t { const char *domain; - const char *location; - const char *dir; + const char *url; md_acme_authz_state_t state; apr_time_t expires; + const char *error_type; + const char *error_detail; + const struct md_json_t *error_subproblems; struct md_json_t *resource; }; #define MD_FN_HTTP01 "acme-http-01.txt" -#define MD_FN_TLSSNI01_CERT "acme-tls-sni-01.cert.pem" -#define MD_FN_TLSSNI01_PKEY "acme-tls-sni-01.key.pem" -#define MD_FN_AUTHZ "authz.json" +void tls_alpn01_fnames(apr_pool_t *p, struct md_pkey_spec_t *kspec, char **keyfn, char **certfn ); md_acme_authz_t *md_acme_authz_create(apr_pool_t *p); -struct md_json_t *md_acme_authz_to_json(md_acme_authz_t *a, apr_pool_t *p); -md_acme_authz_t *md_acme_authz_from_json(struct md_json_t *json, apr_pool_t *p); - -/* authz interaction with ACME server */ -apr_status_t md_acme_authz_register(struct md_acme_authz_t **pauthz, struct md_acme_t *acme, - struct md_store_t *store, const char *domain, apr_pool_t *p); - -apr_status_t md_acme_authz_update(md_acme_authz_t *authz, struct md_acme_t *acme, - struct md_store_t *store, apr_pool_t *p); +apr_status_t md_acme_authz_retrieve(md_acme_t *acme, apr_pool_t *p, const char *url, + md_acme_authz_t **pauthz); +apr_status_t md_acme_authz_update(md_acme_authz_t *authz, struct md_acme_t *acme, apr_pool_t *p); apr_status_t md_acme_authz_respond(md_acme_authz_t *authz, struct md_acme_t *acme, struct md_store_t *store, apr_array_header_t *challenges, - struct md_pkey_spec_t *key_spec, apr_pool_t *p); -apr_status_t md_acme_authz_del(md_acme_authz_t *authz, struct md_acme_t *acme, - struct md_store_t *store, apr_pool_t *p); - -/**************************************************************************************************/ -/* set of authz data for a managed domain */ - -typedef struct md_acme_authz_set_t md_acme_authz_set_t; - -struct md_acme_authz_set_t { - struct apr_array_header_t *authzs; -}; - -md_acme_authz_set_t *md_acme_authz_set_create(apr_pool_t *p); -md_acme_authz_t *md_acme_authz_set_get(md_acme_authz_set_t *set, const char *domain); -apr_status_t md_acme_authz_set_add(md_acme_authz_set_t *set, md_acme_authz_t *authz); -apr_status_t md_acme_authz_set_remove(md_acme_authz_set_t *set, const char *domain); - -struct md_json_t *md_acme_authz_set_to_json(md_acme_authz_set_t *set, apr_pool_t *p); -md_acme_authz_set_t *md_acme_authz_set_from_json(struct md_json_t *json, apr_pool_t *p); - -apr_status_t md_acme_authz_set_load(struct md_store_t *store, md_store_group_t group, - const char *md_name, md_acme_authz_set_t **pauthz_set, - apr_pool_t *p); -apr_status_t md_acme_authz_set_save(struct md_store_t *store, apr_pool_t *p, - md_store_group_t group, const char *md_name, - md_acme_authz_set_t *authz_set, int create); - -apr_status_t md_acme_authz_set_purge(struct md_store_t *store, md_store_group_t group, - apr_pool_t *p, const char *md_name); + struct md_pkeys_spec_t *key_spec, + apr_array_header_t *acme_tls_1_domains, const md_t *md, + struct apr_table_t *env, + apr_pool_t *p, const char **setup_token, + struct md_result_t *result); + +apr_status_t md_acme_authz_teardown(struct md_store_t *store, const char *setup_token, + const md_t *md, struct apr_table_t *env, apr_pool_t *p); #endif /* md_acme_authz_h */ diff --git a/modules/md/md_acme_drive.c b/modules/md/md_acme_drive.c index ba4e865..4bb04f3 100644 --- a/modules/md/md_acme_drive.c +++ b/modules/md/md_acme_drive.c @@ -29,6 +29,7 @@ #include "md_jws.h" #include "md_http.h" #include "md_log.h" +#include "md_result.h" #include "md_reg.h" #include "md_store.h" #include "md_util.h" @@ -36,315 +37,160 @@ #include "md_acme.h" #include "md_acme_acct.h" #include "md_acme_authz.h" +#include "md_acme_order.h" -typedef struct { - md_proto_driver_t *driver; - - const char *phase; - int complete; +#include "md_acme_drive.h" +#include "md_acmev2_drive.h" - md_pkey_t *privkey; /* the new private key */ - apr_array_header_t *pubcert; /* the new certificate + chain certs */ - - md_cert_t *cert; /* the new certificate */ - apr_array_header_t *chain; /* the chain certificates */ - const char *next_up_link; /* where the next chain cert is */ - - md_acme_t *acme; - md_t *md; - const md_creds_t *ncreds; +/**************************************************************************************************/ +/* account setup */ + +static apr_status_t use_staged_acct(md_acme_t *acme, struct md_store_t *store, + const md_t *md, apr_pool_t *p) +{ + md_acme_acct_t *acct; + md_pkey_t *pkey; + apr_status_t rv; - apr_array_header_t *ca_challenges; - md_acme_authz_set_t *authz_set; - apr_interval_time_t authz_monitor_timeout; + if (APR_SUCCESS == (rv = md_acme_acct_load(&acct, &pkey, store, + MD_SG_STAGING, md->name, acme->p))) { + acme->acct_id = NULL; + acme->acct = acct; + acme->acct_key = pkey; + rv = md_acme_acct_validate(acme, NULL, p); + } + return rv; +} + +static apr_status_t save_acct_staged(md_acme_t *acme, md_store_t *store, + const char *md_name, apr_pool_t *p) +{ + md_json_t *jacct; + apr_status_t rv; - const char *csr_der_64; - apr_interval_time_t cert_poll_timeout; + jacct = md_acme_acct_to_json(acme->acct, p); -} md_acme_driver_t; - -/**************************************************************************************************/ -/* account setup */ + rv = md_store_save(store, p, MD_SG_STAGING, md_name, MD_FN_ACCOUNT, MD_SV_JSON, jacct, 0); + if (APR_SUCCESS == rv) { + rv = md_store_save(store, p, MD_SG_STAGING, md_name, MD_FN_ACCT_KEY, + MD_SV_PKEY, acme->acct_key, 0); + } + return rv; +} -static apr_status_t ad_set_acct(md_proto_driver_t *d) +apr_status_t md_acme_drive_set_acct(md_proto_driver_t *d, md_result_t *result) { md_acme_driver_t *ad = d->baton; md_t *md = ad->md; apr_status_t rv = APR_SUCCESS; - int update = 0, acct_installed = 0; + int update_md = 0, update_acct = 0; + + md_result_activity_printf(result, "Selecting account to use for %s", d->md->name); + md_acme_clear_acct(ad->acme); - ad->phase = "setup acme"; - if (!ad->acme - && APR_SUCCESS != (rv = md_acme_create(&ad->acme, d->p, md->ca_url, d->proxy_url))) { - goto out; - } - - ad->phase = "choose account"; /* Do we have a staged (modified) account? */ - if (APR_SUCCESS == (rv = md_acme_use_acct_staged(ad->acme, d->store, md, d->p))) { + if (APR_SUCCESS == (rv = use_staged_acct(ad->acme, d->store, md, d->p))) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "re-using staged account"); - md->ca_account = MD_ACME_ACCT_STAGED; - acct_installed = 1; } - else if (APR_STATUS_IS_ENOENT(rv)) { - rv = APR_SUCCESS; + else if (!APR_STATUS_IS_ENOENT(rv)) { + goto leave; } /* Get an account for the ACME server for this MD */ - if (md->ca_account && !acct_installed) { + if (!ad->acme->acct && md->ca_account) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "re-use account '%s'", md->ca_account); - rv = md_acme_use_acct(ad->acme, d->store, d->p, md->ca_account); + rv = md_acme_use_acct_for_md(ad->acme, d->store, d->p, md->ca_account, md); if (APR_STATUS_IS_ENOENT(rv) || APR_STATUS_IS_EINVAL(rv)) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "rejected %s", md->ca_account); md->ca_account = NULL; - update = 1; - rv = APR_SUCCESS; + update_md = 1; + } + else if (APR_SUCCESS != rv) { + goto leave; } } - if (APR_SUCCESS == rv && !md->ca_account) { + if (!ad->acme->acct && !md->ca_account) { /* Find a local account for server, store at MD */ md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: looking at existing accounts", d->proto->protocol); - if (APR_SUCCESS == md_acme_find_acct(ad->acme, d->store, d->p)) { - md->ca_account = md_acme_get_acct_id(ad->acme); - update = 1; + if (APR_SUCCESS == (rv = md_acme_find_acct_for_md(ad->acme, d->store, md))) { + md->ca_account = md_acme_acct_id_get(ad->acme); + update_md = 1; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: using account %s (id=%s)", + d->proto->protocol, ad->acme->acct->url, md->ca_account); } } - if (APR_SUCCESS == rv && !md->ca_account) { - /* 2.2 No local account exists, create a new one */ + if (!ad->acme->acct) { + /* No account staged, no suitable found in store, register a new one */ + md_result_activity_printf(result, "Creating new ACME account for %s", d->md->name); md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: creating new account", d->proto->protocol); if (!ad->md->contacts || apr_is_empty_array(md->contacts)) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, d->p, - "no contact information for md %s", md->name); rv = APR_EINVAL; - goto out; + md_result_printf(result, rv, "No contact information is available for MD %s. " + "Configure one using the MDContactEmail or ServerAdmin directive.", md->name); + md_result_log(result, MD_LOG_ERR); + goto leave; } - - if (APR_SUCCESS == (rv = md_acme_create_acct(ad->acme, d->p, md->contacts, - md->ca_agreement)) - && APR_SUCCESS == (rv = md_acme_acct_save_staged(ad->acme, d->store, md, d->p))) { - md->ca_account = MD_ACME_ACCT_STAGED; - update = 1; + + /* ACMEv1 allowed registration of accounts without accepted Terms-of-Service. + * ACMEv2 requires it. Fail early in this case with a meaningful error message. + */ + if (!md->ca_agreement) { + md_result_printf(result, APR_EINVAL, + "the CA requires you to accept the terms-of-service " + "as specified in <%s>. " + "Please read the document that you find at that URL and, " + "if you agree to the conditions, configure " + "\"MDCertificateAgreement accepted\" " + "in your Apache. Then (graceful) restart the server to activate.", + ad->acme->ca_agreement); + md_result_log(result, MD_LOG_ERR); + rv = result->status; + goto leave; } - } -out: - if (APR_SUCCESS == rv) { - const char *agreement = md_acme_get_agreement(ad->acme); - /* Persist the account chosen at the md so we use the same on future runs */ - if (agreement && !md->ca_agreement) { - md->ca_agreement = agreement; - update = 1; + if (ad->acme->eab_required && (!md->ca_eab_kid || !strcmp("none", md->ca_eab_kid))) { + md_result_printf(result, APR_EINVAL, + "the CA requires 'External Account Binding' which is not " + "configured. This means you need to obtain a 'Key ID' and a " + "'HMAC' from the CA and configure that using the " + "MDExternalAccountBinding directive in your config. " + "The creation of a new ACME account will most likely fail, " + "but an attempt is made anyway.", + ad->acme->ca_agreement); + md_result_log(result, MD_LOG_INFO); } - if (update) { - rv = md_save(d->store, d->p, MD_SG_STAGING, ad->md, 0); - } - } - return rv; -} - -/**************************************************************************************************/ -/* authz/challenge setup */ -/** - * Pre-Req: we have an account for the ACME server that has accepted the current license agreement - * For each domain in MD: - * - check if there already is a valid AUTHZ resource - * - if ot, create an AUTHZ resource with challenge data - */ -static apr_status_t ad_setup_authz(md_proto_driver_t *d) -{ - md_acme_driver_t *ad = d->baton; - apr_status_t rv; - md_t *md = ad->md; - md_acme_authz_t *authz; - int i; - int changed = 0; - - assert(ad->md); - assert(ad->acme); - - ad->phase = "check authz"; - - /* For each domain in MD: AUTHZ setup - * if an AUTHZ resource is known, check if it is still valid - * if known AUTHZ resource is not valid, remove, goto 4.1.1 - * if no AUTHZ available, create a new one for the domain, store it - */ - rv = md_acme_authz_set_load(d->store, MD_SG_STAGING, md->name, &ad->authz_set, d->p); - if (!ad->authz_set || APR_STATUS_IS_ENOENT(rv)) { - ad->authz_set = md_acme_authz_set_create(d->p); - rv = APR_SUCCESS; - } - else if (APR_SUCCESS != rv) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: loading authz data", md->name); - md_acme_authz_set_purge(d->store, MD_SG_STAGING, d->p, md->name); - return APR_EAGAIN; - } - - /* Remove anything we no longer need */ - for (i = 0; i < ad->authz_set->authzs->nelts;) { - authz = APR_ARRAY_IDX(ad->authz_set->authzs, i, md_acme_authz_t*); - if (!md_contains(md, authz->domain, 0)) { - md_acme_authz_set_remove(ad->authz_set, authz->domain); - changed = 1; - } - else { - ++i; - } - } - - /* Add anything we do not already have */ - for (i = 0; i < md->domains->nelts && APR_SUCCESS == rv; ++i) { - const char *domain = APR_ARRAY_IDX(md->domains, i, const char *); - authz = md_acme_authz_set_get(ad->authz_set, domain); - if (authz) { - /* check valid */ - rv = md_acme_authz_update(authz, ad->acme, d->store, d->p); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: updated authz for %s", - md->name, domain); - if (APR_SUCCESS != rv) { - md_acme_authz_set_remove(ad->authz_set, domain); - authz = NULL; - changed = 1; - } - } - if (!authz) { - /* create new one */ - rv = md_acme_authz_register(&authz, ad->acme, d->store, domain, d->p); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: created authz for %s", - md->name, domain); - if (APR_SUCCESS == rv) { - rv = md_acme_authz_set_add(ad->authz_set, authz); - changed = 1; + rv = md_acme_acct_register(ad->acme, d->store, md, d->p); + if (APR_SUCCESS != rv) { + if (APR_SUCCESS != ad->acme->last->status) { + md_result_dup(result, ad->acme->last); + md_result_log(result, MD_LOG_ERR); } - } - } - - /* Save any changes */ - if (APR_SUCCESS == rv && changed) { - rv = md_acme_authz_set_save(d->store, d->p, MD_SG_STAGING, md->name, ad->authz_set, 0); - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, "%s: saved", md->name); - } - - return rv; -} - -/** - * Pre-Req: all domains have a AUTHZ resources at the ACME server - * For each domain in MD: - * - if AUTHZ resource is 'valid' -> continue - * - if AUTHZ resource is 'pending': - * - find preferred challenge choice - * - calculate challenge data for httpd to find - * - POST challenge start to ACME server - * For each domain in MD where AUTHZ is 'pending', until overall timeout: - * - wait a certain time, check status again - * If not all AUTHZ are valid, fail - */ -static apr_status_t ad_start_challenges(md_proto_driver_t *d) -{ - md_acme_driver_t *ad = d->baton; - apr_status_t rv = APR_SUCCESS; - md_acme_authz_t *authz; - int i, changed = 0; - - assert(ad->md); - assert(ad->acme); - assert(ad->authz_set); - - ad->phase = "start challenges"; - - for (i = 0; i < ad->authz_set->authzs->nelts && APR_SUCCESS == rv; ++i) { - authz = APR_ARRAY_IDX(ad->authz_set->authzs, i, md_acme_authz_t*); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: check AUTHZ for %s", - ad->md->name, authz->domain); - if (APR_SUCCESS != (rv = md_acme_authz_update(authz, ad->acme, d->store, d->p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: check authz for %s", - ad->md->name, authz->domain); - break; + goto leave; } - switch (authz->state) { - case MD_ACME_AUTHZ_S_VALID: - break; - - case MD_ACME_AUTHZ_S_PENDING: - rv = md_acme_authz_respond(authz, ad->acme, d->store, ad->ca_challenges, - d->md->pkey_spec, d->p); - changed = 1; - break; - - default: - rv = APR_EINVAL; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, - "%s: unexpected AUTHZ state %d at %s", - authz->domain, authz->state, authz->location); - break; - } + md->ca_account = NULL; + update_md = 1; + update_acct = 1; } - if (APR_SUCCESS == rv && changed) { - rv = md_acme_authz_set_save(d->store, d->p, MD_SG_STAGING, ad->md->name, ad->authz_set, 0); - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, "%s: saved", ad->md->name); +leave: + /* Persist MD changes in STAGING, so we pick them up on next run */ + if (APR_SUCCESS == rv && update_md) { + rv = md_save(d->store, d->p, MD_SG_STAGING, ad->md, 0); } - return rv; -} - -static apr_status_t check_challenges(void *baton, int attempt) -{ - md_proto_driver_t *d = baton; - md_acme_driver_t *ad = d->baton; - md_acme_authz_t *authz; - apr_status_t rv = APR_SUCCESS; - int i; - - for (i = 0; i < ad->authz_set->authzs->nelts && APR_SUCCESS == rv; ++i) { - authz = APR_ARRAY_IDX(ad->authz_set->authzs, i, md_acme_authz_t*); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: check AUTHZ for %s(%d. attempt)", - ad->md->name, authz->domain, attempt); - if (APR_SUCCESS == (rv = md_acme_authz_update(authz, ad->acme, d->store, d->p))) { - switch (authz->state) { - case MD_ACME_AUTHZ_S_VALID: - break; - case MD_ACME_AUTHZ_S_PENDING: - rv = APR_EAGAIN; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, - "%s: status pending at %s", authz->domain, authz->location); - break; - default: - rv = APR_EINVAL; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, - "%s: unexpected AUTHZ state %d at %s", - authz->domain, authz->state, authz->location); - break; - } - } + /* Persist account changes in STAGING, so we pick them up on next run */ + if (APR_SUCCESS == rv && update_acct) { + rv = save_acct_staged(ad->acme, d->store, md->name, d->p); } return rv; } -static apr_status_t ad_monitor_challenges(md_proto_driver_t *d) -{ - md_acme_driver_t *ad = d->baton; - apr_status_t rv; - - assert(ad->md); - assert(ad->acme); - assert(ad->authz_set); - - ad->phase = "monitor challenges"; - rv = md_util_try(check_challenges, d, 0, ad->authz_monitor_timeout, 0, 0, 1); - - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, d->p, - "%s: checked all domain authorizations", ad->md->name); - return rv; -} - /**************************************************************************************************/ /* poll cert */ @@ -352,41 +198,52 @@ static void get_up_link(md_proto_driver_t *d, apr_table_t *headers) { md_acme_driver_t *ad = d->baton; - ad->next_up_link = md_link_find_relation(headers, d->p, "up"); - if (ad->next_up_link) { + ad->chain_up_link = md_link_find_relation(headers, d->p, "up"); + if (ad->chain_up_link) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, - "server reports up link as %s", ad->next_up_link); + "server reports up link as %s", ad->chain_up_link); } } -static apr_status_t read_http_cert(md_cert_t **pcert, apr_pool_t *p, +static apr_status_t add_http_certs(apr_array_header_t *chain, apr_pool_t *p, const md_http_response_t *res) { apr_status_t rv = APR_SUCCESS; + const char *ct; - if (APR_SUCCESS != (rv = md_cert_read_http(pcert, p, res)) + ct = apr_table_get(res->headers, "Content-Type"); + ct = md_util_parse_ct(res->req->pool, ct); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, p, + "parse certs from %s -> %d (%s)", res->req->url, res->status, ct); + if (ct && !strcmp("application/x-pkcs7-mime", ct)) { + /* this looks like a root cert and we do not want those in our chain */ + goto out; + } + + /* Lets try to read one or more certificates */ + if (APR_SUCCESS != (rv = md_cert_chain_read_http(chain, p, res)) && APR_STATUS_IS_ENOENT(rv)) { rv = APR_EAGAIN; md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "cert not in response from %s", res->req->url); } +out: return rv; } -static apr_status_t on_got_cert(md_acme_t *acme, const md_http_response_t *res, void *baton) +static apr_status_t on_add_cert(md_acme_t *acme, const md_http_response_t *res, void *baton) { md_proto_driver_t *d = baton; md_acme_driver_t *ad = d->baton; apr_status_t rv = APR_SUCCESS; + int count; (void)acme; - if (APR_SUCCESS == (rv = read_http_cert(&ad->cert, d->p, res))) { - rv = md_store_save(d->store, d->p, MD_SG_STAGING, ad->md->name, MD_FN_CERT, - MD_SV_CERT, ad->cert, 0); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "cert parsed and saved"); - if (APR_SUCCESS == rv) { - get_up_link(d, res->headers); - } + count = ad->cred->chain->nelts; + if (APR_SUCCESS == (rv = add_http_certs(ad->cred->chain, d->p, res))) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%d certs parsed", + ad->cred->chain->nelts - count); + get_up_link(d, res->headers); } return rv; } @@ -397,19 +254,21 @@ static apr_status_t get_cert(void *baton, int attempt) md_acme_driver_t *ad = d->baton; (void)attempt; - return md_acme_GET(ad->acme, ad->md->cert_url, NULL, NULL, on_got_cert, d); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, d->p, "retrieving cert from %s", + ad->order->certificate); + return md_acme_GET(ad->acme, ad->order->certificate, NULL, NULL, on_add_cert, NULL, d); } -static apr_status_t ad_cert_poll(md_proto_driver_t *d, int only_once) +apr_status_t md_acme_drive_cert_poll(md_proto_driver_t *d, int only_once) { md_acme_driver_t *ad = d->baton; apr_status_t rv; assert(ad->md); assert(ad->acme); - assert(ad->md->cert_url); + assert(ad->order); + assert(ad->order->certificate); - ad->phase = "poll certificate"; if (only_once) { rv = get_cert(d, 0); } @@ -417,12 +276,12 @@ static apr_status_t ad_cert_poll(md_proto_driver_t *d, int only_once) rv = md_util_try(get_cert, d, 1, ad->cert_poll_timeout, 0, 0, 1); } - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, "poll for cert at %s", ad->md->cert_url); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "poll for cert at %s", ad->order->certificate); return rv; } /**************************************************************************************************/ -/* cert setup */ +/* order finalization */ static apr_status_t on_init_csr_req(md_acme_req_t *req, void *baton) { @@ -431,7 +290,6 @@ static apr_status_t on_init_csr_req(md_acme_req_t *req, void *baton) md_json_t *jpayload; jpayload = md_json_create(req->p); - md_json_sets("new-cert", jpayload, MD_KEY_RESOURCE, NULL); md_json_sets(ad->csr_der_64, jpayload, MD_KEY_CSR, NULL); return md_acme_req_body_init(req, jpayload); @@ -441,34 +299,39 @@ static apr_status_t csr_req(md_acme_t *acme, const md_http_response_t *res, void { md_proto_driver_t *d = baton; md_acme_driver_t *ad = d->baton; + const char *location; + md_cert_t *cert; apr_status_t rv = APR_SUCCESS; (void)acme; - ad->md->cert_url = apr_table_get(res->headers, "location"); - if (!ad->md->cert_url) { + location = apr_table_get(res->headers, "location"); + if (!location) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, d->p, "cert created without giving its location header"); return APR_EINVAL; } - if (APR_SUCCESS != (rv = md_save(d->store, d->p, MD_SG_STAGING, ad->md, 0))) { + ad->order->certificate = apr_pstrdup(d->p, location); + if (APR_SUCCESS != (rv = md_acme_order_save(d->store, d->p, MD_SG_STAGING, + d->md->name, ad->order, 0))) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, d->p, - "%s: saving cert url %s", ad->md->name, ad->md->cert_url); + "%s: saving cert url %s", d->md->name, location); return rv; } /* Check if it already was sent with this response */ - ad->next_up_link = NULL; - if (APR_SUCCESS == (rv = md_cert_read_http(&ad->cert, d->p, res))) { - rv = md_cert_save(d->store, d->p, MD_SG_STAGING, ad->md->name, ad->cert, 0); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "cert parsed and saved"); - if (APR_SUCCESS == rv) { - get_up_link(d, res->headers); - } + ad->chain_up_link = NULL; + if (APR_SUCCESS == (rv = md_cert_read_http(&cert, d->p, res))) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "cert parsed"); + apr_array_clear(ad->cred->chain); + APR_ARRAY_PUSH(ad->cred->chain, md_cert_t*) = cert; + get_up_link(d, res->headers); } else if (APR_STATUS_IS_ENOENT(rv)) { rv = APR_SUCCESS; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, - "cert not in response, need to poll %s", ad->md->cert_url); + if (location) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, + "cert not in response, need to poll %s", location); + } } return rv; @@ -477,6 +340,7 @@ static apr_status_t csr_req(md_acme_t *acme, const md_http_response_t *res, void /** * Pre-Req: all domains have been validated by the ACME server, e.g. all have AUTHZ * resources that have status 'valid' + * - acme_driver->cred keeps the credentials to setup (key spec) * - Setup private key, if not already there * - Generate a CSR with org, contact, etc * - Optionally enable must-staple OCSP extension @@ -487,38 +351,41 @@ static apr_status_t csr_req(md_acme_t *acme, const md_http_response_t *res, void * - GET cert chain * - store cert chain */ -static apr_status_t ad_setup_certificate(md_proto_driver_t *d) +apr_status_t md_acme_drive_setup_cred_chain(md_proto_driver_t *d, md_result_t *result) { md_acme_driver_t *ad = d->baton; + md_pkey_spec_t *spec; md_pkey_t *privkey; apr_status_t rv; - ad->phase = "setup cert privkey"; - - rv = md_pkey_load(d->store, MD_SG_STAGING, ad->md->name, &privkey, d->p); + md_result_activity_printf(result, "Finalizing order for %s", ad->md->name); + + assert(ad->cred); + spec = ad->cred->spec; + + rv = md_pkey_load(d->store, MD_SG_STAGING, d->md->name, spec, &privkey, d->p); if (APR_STATUS_IS_ENOENT(rv)) { - if (APR_SUCCESS == (rv = md_pkey_gen(&privkey, d->p, d->md->pkey_spec))) { - rv = md_pkey_save(d->store, d->p, MD_SG_STAGING, ad->md->name, privkey, 1); + if (APR_SUCCESS == (rv = md_pkey_gen(&privkey, d->p, spec))) { + rv = md_pkey_save(d->store, d->p, MD_SG_STAGING, d->md->name, spec, privkey, 1); } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: generate privkey", ad->md->name); - } - - if (APR_SUCCESS == rv) { - ad->phase = "setup csr"; - rv = md_cert_req_create(&ad->csr_der_64, ad->md, privkey, d->p); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: create CSR", ad->md->name); - } - - if (APR_SUCCESS == rv) { - ad->phase = "submit csr"; - rv = md_acme_POST(ad->acme, ad->acme->new_cert, on_init_csr_req, NULL, csr_req, d); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, + "%s: generate %s privkey", d->md->name, md_pkey_spec_name(spec)); } + if (APR_SUCCESS != rv) goto leave; + + md_result_activity_printf(result, "Creating %s CSR", md_pkey_spec_name(spec)); + rv = md_cert_req_create(&ad->csr_der_64, d->md->name, ad->domains, + ad->md->must_staple, privkey, d->p); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: create %s CSR", + d->md->name, md_pkey_spec_name(spec)); + if (APR_SUCCESS != rv) goto leave; + + md_result_activity_printf(result, "Submitting %s CSR to CA", md_pkey_spec_name(spec)); + assert(ad->order->finalize); + rv = md_acme_POST(ad->acme, ad->order->finalize, on_init_csr_req, NULL, csr_req, NULL, d); - if (APR_SUCCESS == rv) { - if (!ad->cert) { - rv = ad_cert_poll(d, 0); - } - } +leave: + md_acme_report_result(ad->acme, rv, result); return rv; } @@ -530,22 +397,19 @@ static apr_status_t on_add_chain(md_acme_t *acme, const md_http_response_t *res, md_proto_driver_t *d = baton; md_acme_driver_t *ad = d->baton; apr_status_t rv = APR_SUCCESS; - md_cert_t *cert; const char *ct; (void)acme; ct = apr_table_get(res->headers, "Content-Type"); + ct = md_util_parse_ct(res->req->pool, ct); if (ct && !strcmp("application/x-pkcs7-mime", ct)) { /* root cert most likely, end it here */ return APR_SUCCESS; } - if (APR_SUCCESS == (rv = read_http_cert(&cert, d->p, res))) { + if (APR_SUCCESS == (rv = add_http_certs(ad->cred->chain, d->p, res))) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "chain cert parsed"); - APR_ARRAY_PUSH(ad->chain, md_cert_t *) = cert; - if (APR_SUCCESS == rv) { - get_up_link(d, res->headers); - } + get_up_link(d, res->headers); } return rv; } @@ -557,19 +421,32 @@ static apr_status_t get_chain(void *baton, int attempt) const char *prev_link = NULL; apr_status_t rv = APR_SUCCESS; - while (APR_SUCCESS == rv && ad->chain->nelts < 10) { - int nelts = ad->chain->nelts; + while (APR_SUCCESS == rv && ad->cred->chain->nelts < 10) { + int nelts = ad->cred->chain->nelts; - if (ad->next_up_link && (!prev_link || strcmp(prev_link, ad->next_up_link))) { - prev_link = ad->next_up_link; + if (ad->chain_up_link && (!prev_link || strcmp(prev_link, ad->chain_up_link))) { + prev_link = ad->chain_up_link; md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, - "next issuer is %s", ad->next_up_link); - rv = md_acme_GET(ad->acme, ad->next_up_link, NULL, NULL, on_add_chain, d); + "next chain cert at %s", ad->chain_up_link); + rv = md_acme_GET(ad->acme, ad->chain_up_link, NULL, NULL, on_add_chain, NULL, d); - if (APR_SUCCESS == rv && nelts == ad->chain->nelts) { + if (APR_SUCCESS == rv && nelts == ad->cred->chain->nelts) { break; } + else if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, + "error retrieving certificate from %s", ad->chain_up_link); + return rv; + } + } + else if (ad->cred->chain->nelts <= 1) { + /* This cannot be the complete chain (no one signs new web certs with their root) + * and we did not see a "Link: ...rel=up", so we do not know how to continue. */ + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, + "no link header 'up' for new certificate, unable to retrieve chain"); + rv = APR_EINVAL; + break; } else { rv = APR_SUCCESS; @@ -577,63 +454,103 @@ static apr_status_t get_chain(void *baton, int attempt) } } md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, - "got chain with %d certs (%d. attempt)", ad->chain->nelts, attempt); + "got chain with %d certs (%d. attempt)", ad->cred->chain->nelts, attempt); return rv; } -static apr_status_t ad_chain_install(md_proto_driver_t *d) +static apr_status_t ad_chain_retrieve(md_proto_driver_t *d) { md_acme_driver_t *ad = d->baton; apr_status_t rv; - /* We should have that from initial cert retrieval, but if we restarted - * or switched child process, we need to retrieve this again from the - * certificate resources. */ - if (!ad->next_up_link) { - if (APR_SUCCESS != (rv = ad_cert_poll(d, 0))) { - return rv; + /* This may be called repeatedly and needs to progress. The relevant state is in + * ad->cred->chain the certificate chain, starting with the new cert for the md + * ad->order->certificate the url where ACME offers us the new md certificate. This may + * be a single one or even the complete chain + * ad->chain_up_link in case the last certificate retrieval did not end the chain, + * the link header with relation "up" gives us the location + * for the next cert in the chain + */ + if (md_array_is_empty(ad->cred->chain)) { + /* Need to start at the order */ + ad->chain_up_link = NULL; + if (!ad->order) { + rv = APR_EGENERAL; + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, + "%s: asked to retrieve chain, but no order in context", d->md->name); + goto out; } - if (!ad->next_up_link) { + if (!ad->order->certificate) { + rv = APR_EGENERAL; md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, - "server reports no link header 'up' for certificate at %s", ad->md->cert_url); - return APR_EINVAL; + "%s: asked to retrieve chain, but no certificate url part of order", d->md->name); + goto out; + } + + if (APR_SUCCESS != (rv = md_acme_drive_cert_poll(d, 0))) { + goto out; } } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, - "chain starts at %s", ad->next_up_link); - ad->chain = apr_array_make(d->p, 5, sizeof(md_cert_t *)); - if (APR_SUCCESS == (rv = md_util_try(get_chain, d, 0, ad->cert_poll_timeout, 0, 0, 0))) { - rv = md_store_save(d->store, d->p, MD_SG_STAGING, ad->md->name, MD_FN_CHAIN, - MD_SV_CHAIN, ad->chain, 0); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "chain fetched and saved"); - } + rv = md_util_try(get_chain, d, 0, ad->cert_poll_timeout, 0, 0, 0); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "chain retrieved"); + +out: return rv; } /**************************************************************************************************/ /* ACME driver init */ -static apr_status_t acme_driver_init(md_proto_driver_t *d) +static apr_status_t acme_driver_preload_init(md_proto_driver_t *d, md_result_t *result) { md_acme_driver_t *ad; - apr_status_t rv = APR_SUCCESS; - + md_credentials_t *cred; + int i; + + md_result_set(result, APR_SUCCESS, NULL); + ad = apr_pcalloc(d->p, sizeof(*ad)); d->baton = ad; - ad->driver = d; + ad->driver = d; ad->authz_monitor_timeout = apr_time_from_sec(30); ad->cert_poll_timeout = apr_time_from_sec(30); + ad->ca_challenges = apr_array_make(d->p, 3, sizeof(const char*)); + + /* We want to obtain credentials (key+certificate) for every key spec in this MD */ + ad->creds = apr_array_make(d->p, md_pkeys_spec_count(d->md->pks), sizeof(md_credentials_t*)); + for (i = 0; i < md_pkeys_spec_count(d->md->pks); ++i) { + cred = apr_pcalloc(d->p, sizeof(*cred)); + cred->spec = md_pkeys_spec_get(d->md->pks, i); + cred->chain = apr_array_make(d->p, 5, sizeof(md_cert_t*)); + APR_ARRAY_PUSH(ad->creds, md_credentials_t*) = cred; + } + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, result->status, d->p, + "%s: init_base driver", d->md->name); + return result->status; +} + +static apr_status_t acme_driver_init(md_proto_driver_t *d, md_result_t *result) +{ + md_acme_driver_t *ad; + int dis_http, dis_https, dis_alpn_acme, dis_dns; + const char *challenge; + + acme_driver_preload_init(d, result); + md_result_set(result, APR_SUCCESS, NULL); + if (APR_SUCCESS != result->status) goto leave; + + ad = d->baton; /* We can only support challenges if the server is reachable from the outside * via port 80 and/or 443. These ports might be mapped for httpd to something * else, but a mapping needs to exist. */ - ad->ca_challenges = apr_array_make(d->p, 3, sizeof(const char *)); - if (d->challenge) { - /* we have been told to use this type */ - APR_ARRAY_PUSH(ad->ca_challenges, const char*) = apr_pstrdup(d->p, d->challenge); + challenge = apr_table_get(d->env, MD_KEY_CHALLENGE); + if (challenge) { + APR_ARRAY_PUSH(ad->ca_challenges, const char*) = apr_pstrdup(d->p, challenge); } else if (d->md->ca_challenges && d->md->ca_challenges->nelts > 0) { /* pre-configured set for this managed domain */ @@ -641,56 +558,119 @@ static apr_status_t acme_driver_init(md_proto_driver_t *d) } else { /* free to chose. Add all we support and see what we get offered */ + APR_ARRAY_PUSH(ad->ca_challenges, const char*) = MD_AUTHZ_TYPE_TLSALPN01; APR_ARRAY_PUSH(ad->ca_challenges, const char*) = MD_AUTHZ_TYPE_HTTP01; - APR_ARRAY_PUSH(ad->ca_challenges, const char*) = MD_AUTHZ_TYPE_TLSSNI01; - } - - if (!d->can_http && !d->can_https) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, d->p, "%s: the server seems neither " - "reachable via http (port 80) nor https (port 443). The ACME protocol " - "needs at least one of those so the CA can talk to the server and verify " - "a domain ownership.", d->md->name); - return APR_EGENERAL; - } - - if (!d->can_http) { - ad->ca_challenges = md_array_str_remove(d->p, ad->ca_challenges, MD_AUTHZ_TYPE_HTTP01, 0); - } - if (!d->can_https) { - ad->ca_challenges = md_array_str_remove(d->p, ad->ca_challenges, MD_AUTHZ_TYPE_TLSSNI01, 0); - } + APR_ARRAY_PUSH(ad->ca_challenges, const char*) = MD_AUTHZ_TYPE_DNS01; + + if (!d->can_http && !d->can_https + && md_array_str_index(ad->ca_challenges, MD_AUTHZ_TYPE_DNS01, 0, 0) < 0) { + md_result_printf(result, APR_EGENERAL, + "the server seems neither reachable via http (port 80) nor https (port 443). " + "Please look at the MDPortMap configuration directive on how to correct this. " + "The ACME protocol needs at least one of those so the CA can talk to the server " + "and verify a domain ownership. Alternatively, you may configure support " + "for the %s challenge directive.", MD_AUTHZ_TYPE_DNS01); + goto leave; + } - if (apr_is_empty_array(ad->ca_challenges)) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, d->p, "%s: specific CA challenge methods " - "have been configured, but the server is unable to use any of those. " - "For 'http-01' it needs to be reachable on port 80, for 'tls-sni-01'" - " port 443 is needed.", d->md->name); - return APR_EGENERAL; + dis_http = dis_https = dis_alpn_acme = dis_dns = 0; + if (!d->can_http && md_array_str_index(ad->ca_challenges, MD_AUTHZ_TYPE_HTTP01, 0, 1) >= 0) { + ad->ca_challenges = md_array_str_remove(d->p, ad->ca_challenges, MD_AUTHZ_TYPE_HTTP01, 0); + dis_http = 1; + } + if (!d->can_https && md_array_str_index(ad->ca_challenges, MD_AUTHZ_TYPE_TLSALPN01, 0, 1) >= 0) { + ad->ca_challenges = md_array_str_remove(d->p, ad->ca_challenges, MD_AUTHZ_TYPE_TLSALPN01, 0); + dis_https = 1; + } + if (apr_is_empty_array(d->md->acme_tls_1_domains) + && md_array_str_index(ad->ca_challenges, MD_AUTHZ_TYPE_TLSALPN01, 0, 1) >= 0) { + ad->ca_challenges = md_array_str_remove(d->p, ad->ca_challenges, MD_AUTHZ_TYPE_TLSALPN01, 0); + dis_alpn_acme = 1; + } + if (!apr_table_get(d->env, MD_KEY_CMD_DNS01) + && NULL == d->md->dns01_cmd + && md_array_str_index(ad->ca_challenges, MD_AUTHZ_TYPE_DNS01, 0, 1) >= 0) { + ad->ca_challenges = md_array_str_remove(d->p, ad->ca_challenges, MD_AUTHZ_TYPE_DNS01, 0); + dis_dns = 1; + } + + if (apr_is_empty_array(ad->ca_challenges)) { + md_result_printf(result, APR_EGENERAL, + "None of the ACME challenge methods configured for this domain are suitable.%s%s%s%s", + dis_http? " The http: challenge 'http-01' is disabled because the server seems not reachable on public port 80." : "", + dis_https? " The https: challenge 'tls-alpn-01' is disabled because the server seems not reachable on public port 443." : "", + dis_alpn_acme? " The https: challenge 'tls-alpn-01' is disabled because the Protocols configuration does not include the 'acme-tls/1' protocol." : "", + dis_dns? " The DNS challenge 'dns-01' is disabled because the directive 'MDChallengeDns01' is not configured." : "" + ); + goto leave; + } } - - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, d->p, "%s: init driver", d->md->name); - - return rv; + + md_result_printf(result, 0, "MDomain %s initialized with support for ACME challenges %s", + d->md->name, apr_array_pstrcat(d->p, ad->ca_challenges, ' ')); + +leave: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, result->status, d->p, "%s: init driver", d->md->name); + return result->status; } /**************************************************************************************************/ /* ACME staging */ -static apr_status_t acme_stage(md_proto_driver_t *d) +static apr_status_t load_missing_creds(md_proto_driver_t *d) +{ + md_acme_driver_t *ad = d->baton; + md_credentials_t *cred; + apr_array_header_t *chain; + int i, complete; + apr_status_t rv; + + complete = 1; + for (i = 0; i < ad->creds->nelts; ++i) { + rv = APR_SUCCESS; + cred = APR_ARRAY_IDX(ad->creds, i, md_credentials_t*); + if (!cred->pkey) { + rv = md_pkey_load(d->store, MD_SG_STAGING, d->md->name, cred->spec, &cred->pkey, d->p); + } + if (APR_SUCCESS == rv && md_array_is_empty(cred->chain)) { + rv = md_pubcert_load(d->store, MD_SG_STAGING, d->md->name, cred->spec, &chain, d->p); + if (APR_SUCCESS == rv) { + apr_array_cat(cred->chain, chain); + } + } + if (APR_SUCCESS == rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, d->p, "%s: credentials staged for %s certificate", + d->md->name, md_pkey_spec_name(cred->spec)); + } + else { + complete = 0; + } + } + return complete? APR_SUCCESS : APR_EAGAIN; +} + +static apr_status_t acme_renew(md_proto_driver_t *d, md_result_t *result) { md_acme_driver_t *ad = d->baton; int reset_staging = d->reset; apr_status_t rv = APR_SUCCESS; - int renew = 1; - - if (md_log_is_level(d->p, MD_LOG_DEBUG)) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: staging started, " - "state=%d, can_http=%d, can_https=%d, challenges='%s'", - d->md->name, d->md->state, d->can_http, d->can_https, - apr_array_pstrcat(d->p, ad->ca_challenges, ' ')); + apr_time_t now, t, t2; + md_credentials_t *cred; + const char *ca_effective = NULL; + char ts[APR_RFC822_DATE_LEN]; + int i, first = 0; + + if (!d->md->ca_urls || d->md->ca_urls->nelts <= 0) { + /* No CA defined? This is checked in several other places, but lets be sure */ + md_result_printf(result, APR_INCOMPLETE, + "The managed domain %s is missing MDCertificateAuthority", d->md->name); + goto out; } + /* When not explicitly told to reset, we check the existing data. If + * it is incomplete or old, we trigger the reset for a clean start. */ if (!reset_staging) { + md_result_activity_setn(result, "Checking staging area"); rv = md_load(d->store, MD_SG_STAGING, d->md->name, &ad->md, d->p); if (APR_SUCCESS == rv) { /* So, we have a copy in staging, but is it a recent or an old one? */ @@ -702,318 +682,420 @@ static apr_status_t acme_stage(md_proto_driver_t *d) reset_staging = 1; rv = APR_SUCCESS; } - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, - "%s: checked staging area, will%s reset", - d->md->name, reset_staging? "" : " not"); } - + + /* What CA are we using this time? */ + if (ad->md && ad->md->ca_effective) { + /* There was one chosen on the previous run. Do we stick to it? */ + ca_effective = ad->md->ca_effective; + if (d->md->ca_urls->nelts > 1 && d->attempt >= d->retry_failover) { + /* We have more than one CA to choose from and this is the (at least) + * third attempt with the same CA. Let's switch to the next one. */ + int last_idx = md_array_str_index(d->md->ca_urls, ca_effective, 0, 1); + if (last_idx >= 0) { + int next_idx = (last_idx+1) % d->md->ca_urls->nelts; + ca_effective = APR_ARRAY_IDX(d->md->ca_urls, next_idx, const char*); + } + else { + /* not part of current configuration? */ + ca_effective = NULL; + } + /* switching CA means we need to wipe the staging area */ + reset_staging = 1; + } + } + + if (!ca_effective) { + /* None chosen yet, pick the first one configured */ + ca_effective = APR_ARRAY_IDX(d->md->ca_urls, 0, const char*); + } + + if (md_log_is_level(d->p, MD_LOG_DEBUG)) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: staging started, " + "state=%d, attempt=%d, acme=%s, challenges='%s'", + d->md->name, d->md->state, d->attempt, ca_effective, + apr_array_pstrcat(d->p, ad->ca_challenges, ' ')); + } + if (reset_staging) { + md_result_activity_setn(result, "Resetting staging area"); /* reset the staging area for this domain */ rv = md_store_purge(d->store, d->p, MD_SG_STAGING, d->md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, + "%s: reset staging area", d->md->name); if (APR_SUCCESS != rv && !APR_STATUS_IS_ENOENT(rv)) { - return rv; + md_result_printf(result, rv, "resetting staging area"); + goto out; } rv = APR_SUCCESS; ad->md = NULL; + ad->order = NULL; } - if (ad->md && ad->md->state == MD_S_MISSING) { - /* There is config information missing. It makes no sense to drive this MD further */ - rv = APR_INCOMPLETE; + md_result_activity_setn(result, "Assessing current status"); + if (ad->md && ad->md->state == MD_S_MISSING_INFORMATION) { + /* ToS agreement is missing. It makes no sense to drive this MD further */ + md_result_printf(result, APR_INCOMPLETE, + "The managed domain %s is missing required information", d->md->name); goto out; } - if (ad->md) { - /* staging in progress. look for new ACME account information collected there */ - rv = md_reg_creds_get(&ad->ncreds, d->reg, MD_SG_STAGING, d->md, d->p); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: checked creds", d->md->name); - if (APR_STATUS_IS_ENOENT(rv)) { - rv = APR_SUCCESS; - } + if (ad->md && APR_SUCCESS == load_missing_creds(d)) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: all credentials staged", d->md->name); + goto ready; } - /* Find out where we're at with this managed domain */ - if (ad->ncreds && ad->ncreds->privkey && ad->ncreds->pubcert) { - /* There is a full set staged, to be loaded */ - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, "%s: all data staged", d->md->name); - renew = 0; + /* Need to renew */ + if (!ad->md || !md_array_str_eq(ad->md->ca_urls, d->md->ca_urls, 1)) { + md_result_activity_printf(result, "Resetting staging for %s", d->md->name); + /* re-initialize staging */ + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: setup staging", d->md->name); + md_store_purge(d->store, d->p, MD_SG_STAGING, d->md->name); + ad->md = md_copy(d->p, d->md); + ad->md->ca_effective = ca_effective; + ad->md->ca_account = NULL; + ad->order = NULL; + rv = md_save(d->store, d->p, MD_SG_STAGING, ad->md, 0); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "Saving MD information in staging area."); + md_result_log(result, MD_LOG_ERR); + goto out; + } + } + if (!ad->domains) { + ad->domains = md_dns_make_minimal(d->p, ad->md->domains); } - if (renew) { - if (APR_SUCCESS != (rv = md_acme_create(&ad->acme, d->p, d->md->ca_url, d->proxy_url)) - || APR_SUCCESS != (rv = md_acme_setup(ad->acme))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, "%s: setup ACME(%s)", - d->md->name, d->md->ca_url); - return rv; - } - - if (!ad->md) { - /* re-initialize staging */ - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, "%s: setup staging", d->md->name); - md_store_purge(d->store, d->p, MD_SG_STAGING, d->md->name); - ad->md = md_copy(d->p, d->md); - ad->md->cert_url = NULL; /* do not retrieve the old cert */ - rv = md_save(d->store, d->p, MD_SG_STAGING, ad->md, 0); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: save staged md", - ad->md->name); - } - - if (APR_SUCCESS == rv && !ad->cert) { - md_cert_load(d->store, MD_SG_STAGING, ad->md->name, &ad->cert, d->p); - } + md_result_activity_printf(result, "Contacting ACME server for %s at %s", + d->md->name, ca_effective); + if (APR_SUCCESS != (rv = md_acme_create(&ad->acme, d->p, ca_effective, + d->proxy_url, d->ca_file))) { + md_result_printf(result, rv, "setup ACME communications"); + md_result_log(result, MD_LOG_ERR); + goto out; + } + if (APR_SUCCESS != (rv = md_acme_setup(ad->acme, result))) { + md_result_log(result, MD_LOG_ERR); + goto out; + } - if (APR_SUCCESS == rv && !ad->cert) { - ad->phase = "get certificate"; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, "%s: need certificate", d->md->name); - - /* Chose (or create) and ACME account to use */ - rv = ad_set_acct(d); - - /* Check that the account agreed to the terms-of-service, otherwise - * requests for new authorizations are denied. ToS may change during the - * lifetime of an account */ - if (APR_SUCCESS == rv) { - const char *required; - - ad->phase = "check agreement"; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: check Terms-of-Service agreement", d->md->name); - - rv = md_acme_check_agreement(ad->acme, d->p, ad->md->ca_agreement, &required); - - if (APR_STATUS_IS_INCOMPLETE(rv) && required) { - /* The CA wants the user to agree to Terms-of-Services. Until the user - * has reconfigured and restarted the server, this MD cannot be - * driven further */ - ad->md->state = MD_S_MISSING; - md_save(d->store, d->p, MD_SG_STAGING, ad->md, 0); - - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, d->p, - "%s: the CA requires you to accept the terms-of-service " - "as specified in <%s>. " - "Please read the document that you find at that URL and, " - "if you agree to the conditions, configure " - "\"MDCertificateAgreement url\" " - "with exactly that URL in your Apache. " - "Then (graceful) restart the server to activate.", - ad->md->name, required); - goto out; + if (APR_SUCCESS != load_missing_creds(d)) { + for (i = 0; i < ad->creds->nelts; ++i) { + ad->cred = APR_ARRAY_IDX(ad->creds, i, md_credentials_t*); + if (!ad->cred->pkey || md_array_is_empty(ad->cred->chain)) { + md_result_activity_printf(result, "Driving ACME to renew %s certificate for %s", + md_pkey_spec_name(ad->cred->spec),d->md->name); + /* The process of setting up challenges and verifying domain + * names differs between ACME versions. */ + switch (MD_ACME_VERSION_MAJOR(ad->acme->version)) { + case 1: + md_result_printf(result, APR_EINVAL, + "ACME server speaks version 1, an obsolete version of the ACME " + "protocol that is no longer supported."); + rv = result->status; + break; + default: + /* In principle, we only know ACME version 2. But we assume + that a new protocol which announces a directory with all members + from version 2 will act backward compatible. + This is, of course, an assumption... + */ + rv = md_acmev2_drive_renew(ad, d, result); + break; } - } - - /* If we know a cert's location, try to get it. Previous download might - * have failed. If server 404 it, we clear our memory of it. */ - if (APR_SUCCESS == rv && ad->md->cert_url) { - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: polling certificate", d->md->name); - rv = ad_cert_poll(d, 1); - if (APR_STATUS_IS_ENOENT(rv)) { - /* Server reports to know nothing about it. */ - ad->md->cert_url = NULL; - rv = md_reg_update(d->reg, d->p, ad->md->name, ad->md, MD_UPD_CERT_URL); - } - } - - if (APR_SUCCESS == rv && !ad->cert) { - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: setup new authorization", d->md->name); - if (APR_SUCCESS != (rv = ad_setup_authz(d))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: setup authz resource", - ad->md->name); - goto out; - } - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: setup new challenges", d->md->name); - if (APR_SUCCESS != (rv = ad_start_challenges(d))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: start challenges", - ad->md->name); - goto out; - } - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: monitoring challenge status", d->md->name); - if (APR_SUCCESS != (rv = ad_monitor_challenges(d))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: monitor challenges", - ad->md->name); - goto out; - } - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: creating certificate request", d->md->name); - if (APR_SUCCESS != (rv = ad_setup_certificate(d))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: setup certificate", - ad->md->name); - goto out; + if (APR_SUCCESS != rv) goto out; + + if (md_array_is_empty(ad->cred->chain) || ad->chain_up_link) { + md_result_activity_printf(result, "Retrieving %s certificate chain for %s", + md_pkey_spec_name(ad->cred->spec), d->md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, + "%s: retrieving %s certificate chain", + d->md->name, md_pkey_spec_name(ad->cred->spec)); + rv = ad_chain_retrieve(d); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "Unable to retrieve %s certificate chain.", + md_pkey_spec_name(ad->cred->spec)); + goto out; + } + + if (!md_array_is_empty(ad->cred->chain)) { + + if (!ad->cred->pkey) { + rv = md_pkey_load(d->store, MD_SG_STAGING, d->md->name, ad->cred->spec, &ad->cred->pkey, d->p); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "Loading the private key."); + goto out; + } + } + + if (ad->cred->pkey) { + rv = md_check_cert_and_pkey(ad->cred->chain, ad->cred->pkey); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "Certificate and private key do not match."); + + /* Delete the order */ + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, d->md, d->env); + + goto out; + } + } + + rv = md_pubcert_save(d->store, d->p, MD_SG_STAGING, d->md->name, + ad->cred->spec, ad->cred->chain, 0); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "Saving new %s certificate chain.", + md_pkey_spec_name(ad->cred->spec)); + goto out; + } + } } - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: received certificate", d->md->name); + + /* Clean up the order, so the next pkey spec sets up a new one */ + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, d->md, d->env); } - } + } + + + /* As last step, cleanup any order we created so that challenge data + * may be removed asap. */ + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, d->md, d->env); + + /* first time this job ran through */ + first = 1; +ready: + md_result_activity_setn(result, NULL); + /* we should have the complete cert chain now */ + assert(APR_SUCCESS == load_missing_creds(d)); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, + "%s: certificates ready, activation delay set to %s", + d->md->name, md_duration_format(d->p, d->activation_delay)); + + /* determine when it should be activated */ + t = apr_time_now(); + for (i = 0; i < ad->creds->nelts; ++i) { + cred = APR_ARRAY_IDX(ad->creds, i, md_credentials_t*); + t2 = md_cert_get_not_before(APR_ARRAY_IDX(cred->chain, 0, md_cert_t*)); + if (t2 > t) t = t2; + } + md_result_delay_set(result, t); + + /* If the existing MD is complete and un-expired, delay the activation + * to 24 hours after new cert is valid (if there is enough time left), so + * that cients with skewed clocks do not see a problem. */ + now = apr_time_now(); + if (d->md->state == MD_S_COMPLETE) { + apr_time_t valid_until, delay_activation; - if (APR_SUCCESS == rv && !ad->chain) { - /* have we created this already? */ - md_chain_load(d->store, MD_SG_STAGING, ad->md->name, &ad->chain, d->p); - } - if (APR_SUCCESS == rv && !ad->chain) { - ad->phase = "install chain"; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, d->p, - "%s: retrieving certificate chain", d->md->name); - rv = ad_chain_install(d); - } - - if (APR_SUCCESS == rv && !ad->pubcert) { - /* have we created this already? */ - md_pubcert_load(d->store, MD_SG_STAGING, ad->md->name, &ad->pubcert, d->p); - } - if (APR_SUCCESS == rv && !ad->pubcert) { - /* combine cert + chain into the pubcert */ - ad->pubcert = apr_array_make(d->p, ad->chain->nelts + 1, sizeof(md_cert_t*)); - APR_ARRAY_PUSH(ad->pubcert, md_cert_t *) = ad->cert; - apr_array_cat(ad->pubcert, ad->chain); - rv = md_pubcert_save(d->store, d->p, MD_SG_STAGING, ad->md->name, ad->pubcert, 0); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, + "%s: state is COMPLETE, checking existing certificates", d->md->name); + valid_until = md_reg_valid_until(d->reg, d->md, d->p); + if (d->activation_delay < 0) { + /* special simulation for test case */ + if (first) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, + "%s: delay ready_at to now+1s", d->md->name); + md_result_delay_set(result, apr_time_now() + apr_time_from_sec(1)); + } } - - if (APR_SUCCESS == rv && ad->cert) { - apr_time_t now = apr_time_now(); - apr_interval_time_t max_delay, delay_activation; - - /* determine when this cert should be activated */ - d->stage_valid_from = md_cert_get_not_before(ad->cert); - if (d->md->state == MD_S_COMPLETE && d->md->expires > now) { - /** - * The MD is complete and un-expired. This is a renewal run. - * Give activation 24 hours leeway (if we have that time) to - * accommodate for clients with somewhat weird clocks. - */ - delay_activation = apr_time_from_sec(MD_SECS_PER_DAY); - if (delay_activation > (max_delay = d->md->expires - now)) { - delay_activation = max_delay; - } - d->stage_valid_from += delay_activation; + else if (valid_until > now) { + delay_activation = d->activation_delay; + if (delay_activation > (valid_until - now)) { + delay_activation = (valid_until - now); } + md_result_delay_set(result, result->ready_at + delay_activation); } } -out: + + /* There is a full set staged, to be loaded */ + apr_rfc822_date(ts, result->ready_at); + if (result->ready_at > now) { + md_result_printf(result, APR_SUCCESS, + "The certificate for the managed domain has been renewed successfully and can " + "be used from %s on.", ts); + } + else { + md_result_printf(result, APR_SUCCESS, + "The certificate for the managed domain has been renewed successfully and can " + "be used (valid since %s). A graceful server restart now is recommended.", ts); + } + +out: return rv; } -static apr_status_t acme_driver_stage(md_proto_driver_t *d) +static apr_status_t acme_driver_renew(md_proto_driver_t *d, md_result_t *result) { - md_acme_driver_t *ad = d->baton; apr_status_t rv; - ad->phase = "ACME staging"; - if (APR_SUCCESS == (rv = acme_stage(d))) { - ad->phase = "staging done"; - } - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: %s, %s", - d->md->name, d->proto->protocol, ad->phase); + rv = acme_renew(d, result); + md_result_log(result, MD_LOG_DEBUG); return rv; } /**************************************************************************************************/ /* ACME preload */ -static apr_status_t acme_preload(md_store_t *store, md_store_group_t load_group, - const char *name, const char *proxy_url, apr_pool_t *p) +static apr_status_t acme_preload(md_proto_driver_t *d, md_store_group_t load_group, + const char *name, md_result_t *result) { apr_status_t rv; - md_pkey_t *privkey, *acct_key; + md_pkey_t *acct_key; md_t *md; - apr_array_header_t *pubcert; + md_pkey_spec_t *pkspec; + md_credentials_t *creds; + apr_array_header_t *all_creds; struct md_acme_acct_t *acct; + const char *id; + int i; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "%s: preload start", name); - /* Load all data which will be taken into the DOMAIN storage group. + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: preload start", name); + /* Load data from MD_SG_STAGING and save it into "load_group". * This serves several purposes: * 1. It's a format check on the input data. * 2. We write back what we read, creating data with our own access permissions * 3. We ignore any other accumulated data in STAGING - * 4. Once TMP is verified, we can swap/archive groups with a rename + * 4. Once "load_group" is complete an ok, we can swap/archive groups with a rename * 5. Reading/Writing the data will apply/remove any group specific data encryption. - * With the exemption that DOMAINS and TMP must apply the same policy/keys. */ - if (APR_SUCCESS != (rv = md_load(store, MD_SG_STAGING, name, &md, p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: loading md json", name); - return rv; + if (APR_SUCCESS != (rv = md_load(d->store, MD_SG_STAGING, name, &md, d->p))) { + md_result_set(result, rv, "loading staged md.json"); + goto leave; } - if (APR_SUCCESS != (rv = md_pkey_load(store, MD_SG_STAGING, name, &privkey, p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: loading staging private key", name); - return rv; - } - if (APR_SUCCESS != (rv = md_pubcert_load(store, MD_SG_STAGING, name, &pubcert, p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: loading pubcert", name); - return rv; + if (!md->ca_effective) { + rv = APR_ENOENT; + md_result_set(result, rv, "effective CA url not set"); + goto leave; } + all_creds = apr_array_make(d->p, 5, sizeof(md_credentials_t*)); + for (i = 0; i < md_pkeys_spec_count(md->pks); ++i) { + pkspec = md_pkeys_spec_get(md->pks, i); + if (APR_SUCCESS != (rv = md_creds_load(d->store, MD_SG_STAGING, name, pkspec, &creds, d->p))) { + md_result_printf(result, rv, "loading staged credentials #%d", i); + goto leave; + } + if (!creds->chain) { + rv = APR_ENOENT; + md_result_printf(result, rv, "no certificate in staged credentials #%d", i); + goto leave; + } + if (APR_SUCCESS != (rv = md_check_cert_and_pkey(creds->chain, creds->pkey))) { + md_result_printf(result, rv, "certificate and private key do not match in staged credentials #%d", i); + goto leave; + } + APR_ARRAY_PUSH(all_creds, md_credentials_t*) = creds; + } + /* See if staging holds a new or modified account data */ - rv = md_acme_acct_load(&acct, &acct_key, store, MD_SG_STAGING, name, p); + rv = md_acme_acct_load(&acct, &acct_key, d->store, MD_SG_STAGING, name, d->p); if (APR_STATUS_IS_ENOENT(rv)) { acct = NULL; acct_key = NULL; rv = APR_SUCCESS; } else if (APR_SUCCESS != rv) { - return rv; + md_result_set(result, rv, "loading staged account"); + goto leave; } - /* Remove any authz information we have here or in MD_SG_CHALLENGES */ - md_acme_authz_set_purge(store, MD_SG_STAGING, p, name); + md_result_activity_setn(result, "purging order information"); + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, md, d->env); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "%s: staged data load, purging tmp space", name); - rv = md_store_purge(store, p, load_group, name); + md_result_activity_setn(result, "purging store tmp space"); + rv = md_store_purge(d->store, d->p, load_group, name); if (APR_SUCCESS != rv) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: error purging preload storage", name); - return rv; + md_result_set(result, rv, NULL); + goto leave; } if (acct) { md_acme_t *acme; + + /* We may have STAGED the same account several times. This happens when + * several MDs are renewed at once and need a new account. They will all store + * the new account in their own STAGING area. By checking for accounts with + * the same url, we save them all into a single one. + */ + md_result_activity_setn(result, "saving staged account"); + id = md->ca_account; + if (!id) { + rv = md_acme_acct_id_for_md(&id, d->store, MD_SG_ACCOUNTS, md, d->p); + if (APR_STATUS_IS_ENOENT(rv)) { + id = NULL; + } + else if (APR_SUCCESS != rv) { + md_result_set(result, rv, "error searching for existing account by url"); + goto leave; + } + } + + if (APR_SUCCESS != (rv = md_acme_create(&acme, d->p, md->ca_effective, + d->proxy_url, d->ca_file))) { + md_result_set(result, rv, "error setting up acme"); + goto leave; + } - if (APR_SUCCESS != (rv = md_acme_create(&acme, p, md->ca_url, proxy_url)) - || APR_SUCCESS != (rv = md_acme_acct_save(store, p, acme, acct, acct_key))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: error saving acct", name); - return rv; + if (APR_SUCCESS != (rv = md_acme_acct_save(d->store, d->p, acme, &id, acct, acct_key))) { + md_result_set(result, rv, "error saving account"); + goto leave; } - md->ca_account = acct->id; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: saved ACME account %s", - name, acct->id); + md->ca_account = id; } - - if (APR_SUCCESS != (rv = md_save(store, p, load_group, md, 1))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: saving md json", name); - return rv; + else if (!md->ca_account) { + /* staging reused another account and did not create a new one. find + * the account, if it is already there */ + rv = md_acme_acct_id_for_md(&id, d->store, MD_SG_ACCOUNTS, md, d->p); + if (APR_SUCCESS == rv) { + md->ca_account = id; + } } - if (APR_SUCCESS != (rv = md_pubcert_save(store, p, load_group, name, pubcert, 1))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: saving cert chain", name); - return rv; + + md_result_activity_setn(result, "saving staged md/privkey/pubcert"); + if (APR_SUCCESS != (rv = md_save(d->store, d->p, load_group, md, 1))) { + md_result_set(result, rv, "writing md.json"); + goto leave; } - if (APR_SUCCESS != (rv = md_pkey_save(store, p, load_group, name, privkey, 1))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: saving private key", name); - return rv; + + for (i = 0; i < all_creds->nelts; ++i) { + creds = APR_ARRAY_IDX(all_creds, i, md_credentials_t*); + if (APR_SUCCESS != (rv = md_creds_save(d->store, d->p, load_group, name, creds, 1))) { + md_result_printf(result, rv, "writing credentials #%d", i); + goto leave; + } } + md_result_set(result, APR_SUCCESS, "saved staged data successfully"); + +leave: + md_result_log(result, MD_LOG_DEBUG); return rv; } -static apr_status_t acme_driver_preload(md_proto_driver_t *d, md_store_group_t group) +static apr_status_t acme_driver_preload(md_proto_driver_t *d, + md_store_group_t group, md_result_t *result) { - md_acme_driver_t *ad = d->baton; apr_status_t rv; - ad->phase = "ACME preload"; - if (APR_SUCCESS == (rv = acme_preload(d->store, group, d->md->name, d->proxy_url, d->p))) { - ad->phase = "preload done"; - } - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: %s, %s", - d->md->name, d->proto->protocol, ad->phase); + rv = acme_preload(d, group, d->md->name, result); + md_result_log(result, MD_LOG_DEBUG); return rv; } +static apr_status_t acme_complete_md(md_t *md, apr_pool_t *p) +{ + (void)p; + if (!md->ca_urls || apr_is_empty_array(md->ca_urls)) { + md->ca_urls = apr_array_make(p, 3, sizeof(const char *)); + APR_ARRAY_PUSH(md->ca_urls, const char*) = MD_ACME_DEF_URL; + } + return APR_SUCCESS; +} + static md_proto_t ACME_PROTO = { - MD_PROTO_ACME, acme_driver_init, acme_driver_stage, acme_driver_preload + MD_PROTO_ACME, acme_driver_init, acme_driver_renew, + acme_driver_preload_init, acme_driver_preload, + acme_complete_md, }; apr_status_t md_acme_protos_add(apr_hash_t *protos, apr_pool_t *p) diff --git a/modules/md/md_acme_drive.h b/modules/md/md_acme_drive.h new file mode 100644 index 0000000..88761fa --- /dev/null +++ b/modules/md/md_acme_drive.h @@ -0,0 +1,55 @@ +/* Copyright 2019 greenbytes GmbH (https://www.greenbytes.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef md_acme_drive_h +#define md_acme_drive_h + +struct apr_array_header_t; +struct md_acme_order_t; +struct md_credentials_t; +struct md_result_t; + +typedef struct md_acme_driver_t { + md_proto_driver_t *driver; + void *sub_driver; + + md_acme_t *acme; + md_t *md; + struct apr_array_header_t *domains; + apr_array_header_t *ca_challenges; + + int complete; + apr_array_header_t *creds; /* the new md_credentials_t */ + + struct md_credentials_t *cred; /* credentials currently being processed */ + const char *chain_up_link; /* Link header "up" from last chain retrieval, + needs to be followed */ + + struct md_acme_order_t *order; + apr_interval_time_t authz_monitor_timeout; + + const char *csr_der_64; + apr_interval_time_t cert_poll_timeout; + +} md_acme_driver_t; + +apr_status_t md_acme_drive_set_acct(struct md_proto_driver_t *d, + struct md_result_t *result); +apr_status_t md_acme_drive_setup_cred_chain(struct md_proto_driver_t *d, + struct md_result_t *result); +apr_status_t md_acme_drive_cert_poll(struct md_proto_driver_t *d, int only_once); + +#endif /* md_acme_drive_h */ + diff --git a/modules/md/md_acme_order.c b/modules/md/md_acme_order.c new file mode 100644 index 0000000..061093a --- /dev/null +++ b/modules/md/md_acme_order.c @@ -0,0 +1,562 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "md.h" +#include "md_crypt.h" +#include "md_json.h" +#include "md_http.h" +#include "md_log.h" +#include "md_jws.h" +#include "md_result.h" +#include "md_store.h" +#include "md_util.h" + +#include "md_acme.h" +#include "md_acme_authz.h" +#include "md_acme_order.h" + + +md_acme_order_t *md_acme_order_create(apr_pool_t *p) +{ + md_acme_order_t *order; + + order = apr_pcalloc(p, sizeof(*order)); + order->p = p; + order->authz_urls = apr_array_make(p, 5, sizeof(const char *)); + order->challenge_setups = apr_array_make(p, 5, sizeof(const char *)); + + return order; +} + +/**************************************************************************************************/ +/* order conversion */ + +#define MD_KEY_CHALLENGE_SETUPS "challenge-setups" + +static md_acme_order_st order_st_from_str(const char *s) +{ + if (s) { + if (!strcmp("valid", s)) { + return MD_ACME_ORDER_ST_VALID; + } + else if (!strcmp("invalid", s)) { + return MD_ACME_ORDER_ST_INVALID; + } + else if (!strcmp("ready", s)) { + return MD_ACME_ORDER_ST_READY; + } + else if (!strcmp("pending", s)) { + return MD_ACME_ORDER_ST_PENDING; + } + else if (!strcmp("processing", s)) { + return MD_ACME_ORDER_ST_PROCESSING; + } + } + return MD_ACME_ORDER_ST_PENDING; +} + +static const char *order_st_to_str(md_acme_order_st status) +{ + switch (status) { + case MD_ACME_ORDER_ST_PENDING: + return "pending"; + case MD_ACME_ORDER_ST_READY: + return "ready"; + case MD_ACME_ORDER_ST_PROCESSING: + return "processing"; + case MD_ACME_ORDER_ST_VALID: + return "valid"; + case MD_ACME_ORDER_ST_INVALID: + return "invalid"; + default: + return "invalid"; + } +} + +md_json_t *md_acme_order_to_json(md_acme_order_t *order, apr_pool_t *p) +{ + md_json_t *json = md_json_create(p); + + if (order->url) { + md_json_sets(order->url, json, MD_KEY_URL, NULL); + } + md_json_sets(order_st_to_str(order->status), json, MD_KEY_STATUS, NULL); + md_json_setsa(order->authz_urls, json, MD_KEY_AUTHORIZATIONS, NULL); + md_json_setsa(order->challenge_setups, json, MD_KEY_CHALLENGE_SETUPS, NULL); + if (order->finalize) { + md_json_sets(order->finalize, json, MD_KEY_FINALIZE, NULL); + } + if (order->certificate) { + md_json_sets(order->certificate, json, MD_KEY_CERTIFICATE, NULL); + } + return json; +} + +static void order_update_from_json(md_acme_order_t *order, md_json_t *json, apr_pool_t *p) +{ + if (!order->url && md_json_has_key(json, MD_KEY_URL, NULL)) { + order->url = md_json_dups(p, json, MD_KEY_URL, NULL); + } + order->status = order_st_from_str(md_json_gets(json, MD_KEY_STATUS, NULL)); + if (md_json_has_key(json, MD_KEY_AUTHORIZATIONS, NULL)) { + md_json_dupsa(order->authz_urls, p, json, MD_KEY_AUTHORIZATIONS, NULL); + } + if (md_json_has_key(json, MD_KEY_CHALLENGE_SETUPS, NULL)) { + md_json_dupsa(order->challenge_setups, p, json, MD_KEY_CHALLENGE_SETUPS, NULL); + } + if (md_json_has_key(json, MD_KEY_FINALIZE, NULL)) { + order->finalize = md_json_dups(p, json, MD_KEY_FINALIZE, NULL); + } + if (md_json_has_key(json, MD_KEY_CERTIFICATE, NULL)) { + order->certificate = md_json_dups(p, json, MD_KEY_CERTIFICATE, NULL); + } +} + +md_acme_order_t *md_acme_order_from_json(md_json_t *json, apr_pool_t *p) +{ + md_acme_order_t *order = md_acme_order_create(p); + + order_update_from_json(order, json, p); + return order; +} + +apr_status_t md_acme_order_add(md_acme_order_t *order, const char *authz_url) +{ + assert(authz_url); + if (md_array_str_index(order->authz_urls, authz_url, 0, 1) < 0) { + APR_ARRAY_PUSH(order->authz_urls, const char*) = apr_pstrdup(order->p, authz_url); + } + return APR_SUCCESS; +} + +apr_status_t md_acme_order_remove(md_acme_order_t *order, const char *authz_url) +{ + int i; + + assert(authz_url); + i = md_array_str_index(order->authz_urls, authz_url, 0, 1); + if (i >= 0) { + order->authz_urls = md_array_str_remove(order->p, order->authz_urls, authz_url, 1); + return APR_SUCCESS; + } + return APR_ENOENT; +} + +static apr_status_t add_setup_token(md_acme_order_t *order, const char *token) +{ + if (md_array_str_index(order->challenge_setups, token, 0, 1) < 0) { + APR_ARRAY_PUSH(order->challenge_setups, const char*) = apr_pstrdup(order->p, token); + } + return APR_SUCCESS; +} + +/**************************************************************************************************/ +/* persistence */ + +apr_status_t md_acme_order_load(struct md_store_t *store, md_store_group_t group, + const char *md_name, md_acme_order_t **pauthz_set, + apr_pool_t *p) +{ + apr_status_t rv; + md_json_t *json; + md_acme_order_t *authz_set; + + rv = md_store_load_json(store, group, md_name, MD_FN_ORDER, &json, p); + if (APR_SUCCESS == rv) { + authz_set = md_acme_order_from_json(json, p); + } + *pauthz_set = (APR_SUCCESS == rv)? authz_set : NULL; + return rv; +} + +static apr_status_t p_save(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +{ + md_store_t *store = baton; + md_json_t *json; + md_store_group_t group; + md_acme_order_t *set; + const char *md_name; + int create; + + (void)p; + group = (md_store_group_t)va_arg(ap, int); + md_name = va_arg(ap, const char *); + set = va_arg(ap, md_acme_order_t *); + create = va_arg(ap, int); + + json = md_acme_order_to_json(set, ptemp); + assert(json); + return md_store_save_json(store, ptemp, group, md_name, MD_FN_ORDER, json, create); +} + +apr_status_t md_acme_order_save(struct md_store_t *store, apr_pool_t *p, + md_store_group_t group, const char *md_name, + md_acme_order_t *authz_set, int create) +{ + return md_util_pool_vdo(p_save, store, p, group, md_name, authz_set, create, NULL); +} + +static apr_status_t p_purge(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +{ + md_store_t *store = baton; + md_acme_order_t *order; + md_store_group_t group; + const md_t *md; + const char *setup_token; + apr_table_t *env; + int i; + + group = (md_store_group_t)va_arg(ap, int); + md = va_arg(ap, const md_t *); + env = va_arg(ap, apr_table_t *); + + if (APR_SUCCESS == md_acme_order_load(store, group, md->name, &order, p)) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "order loaded for %s", md->name); + for (i = 0; i < order->challenge_setups->nelts; ++i) { + setup_token = APR_ARRAY_IDX(order->challenge_setups, i, const char*); + if (setup_token) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "order teardown setup %s", setup_token); + md_acme_authz_teardown(store, setup_token, md, env, p); + } + } + } + return md_store_remove(store, group, md->name, MD_FN_ORDER, ptemp, 1); +} + +apr_status_t md_acme_order_purge(md_store_t *store, apr_pool_t *p, md_store_group_t group, + const md_t *md, apr_table_t *env) +{ + return md_util_pool_vdo(p_purge, store, p, group, md, env, NULL); +} + +/**************************************************************************************************/ +/* ACMEv2 order requests */ + +typedef struct { + apr_pool_t *p; + md_acme_order_t *order; + md_acme_t *acme; + const char *name; + apr_array_header_t *domains; + md_result_t *result; +} order_ctx_t; + +#define ORDER_CTX_INIT(ctx, p, o, a, n, d, r) \ + (ctx)->p = (p); (ctx)->order = (o); (ctx)->acme = (a); \ + (ctx)->name = (n); (ctx)->domains = d; (ctx)->result = r + +static apr_status_t identifier_to_json(void *value, md_json_t *json, apr_pool_t *p, void *baton) +{ + md_json_t *jid; + + (void)baton; + jid = md_json_create(p); + md_json_sets("dns", jid, "type", NULL); + md_json_sets(value, jid, "value", NULL); + return md_json_setj(jid, json, NULL); +} + +static apr_status_t on_init_order_register(md_acme_req_t *req, void *baton) +{ + order_ctx_t *ctx = baton; + md_json_t *jpayload; + + jpayload = md_json_create(req->p); + md_json_seta(ctx->domains, identifier_to_json, NULL, jpayload, "identifiers", NULL); + + return md_acme_req_body_init(req, jpayload); +} + +static apr_status_t on_order_upd(md_acme_t *acme, apr_pool_t *p, const apr_table_t *hdrs, + md_json_t *body, void *baton) +{ + order_ctx_t *ctx = baton; + const char *location = apr_table_get(hdrs, "location"); + apr_status_t rv = APR_SUCCESS; + + (void)acme; + (void)p; + if (!ctx->order) { + if (location) { + ctx->order = md_acme_order_create(ctx->p); + ctx->order->url = apr_pstrdup(ctx->p, location); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, ctx->p, "new order at %s", location); + } + else { + rv = APR_EINVAL; + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, ctx->p, "new order, no location header"); + goto out; + } + } + + order_update_from_json(ctx->order, body, ctx->p); +out: + return rv; +} + +apr_status_t md_acme_order_register(md_acme_order_t **porder, md_acme_t *acme, apr_pool_t *p, + const char *name, apr_array_header_t *domains) +{ + order_ctx_t ctx; + apr_status_t rv; + + assert(MD_ACME_VERSION_MAJOR(acme->version) > 1); + ORDER_CTX_INIT(&ctx, p, NULL, acme, name, domains, NULL); + rv = md_acme_POST(acme, acme->api.v2.new_order, on_init_order_register, on_order_upd, NULL, NULL, &ctx); + *porder = (APR_SUCCESS == rv)? ctx.order : NULL; + return rv; +} + +apr_status_t md_acme_order_update(md_acme_order_t *order, md_acme_t *acme, + md_result_t *result, apr_pool_t *p) +{ + order_ctx_t ctx; + apr_status_t rv; + + assert(MD_ACME_VERSION_MAJOR(acme->version) > 1); + ORDER_CTX_INIT(&ctx, p, order, acme, NULL, NULL, result); + rv = md_acme_GET(acme, order->url, NULL, on_order_upd, NULL, NULL, &ctx); + if (APR_SUCCESS != rv && APR_SUCCESS != acme->last->status) { + md_result_dup(result, acme->last); + } + return rv; +} + +static apr_status_t await_ready(void *baton, int attempt) +{ + order_ctx_t *ctx = baton; + apr_status_t rv = APR_SUCCESS; + + (void)attempt; + if (APR_SUCCESS != (rv = md_acme_order_update(ctx->order, ctx->acme, + ctx->result, ctx->p))) goto out; + switch (ctx->order->status) { + case MD_ACME_ORDER_ST_READY: + case MD_ACME_ORDER_ST_PROCESSING: + case MD_ACME_ORDER_ST_VALID: + break; + case MD_ACME_ORDER_ST_PENDING: + rv = APR_EAGAIN; + break; + default: + rv = APR_EINVAL; + break; + } +out: + return rv; +} + +apr_status_t md_acme_order_await_ready(md_acme_order_t *order, md_acme_t *acme, + const md_t *md, apr_interval_time_t timeout, + md_result_t *result, apr_pool_t *p) +{ + order_ctx_t ctx; + apr_status_t rv; + + assert(MD_ACME_VERSION_MAJOR(acme->version) > 1); + ORDER_CTX_INIT(&ctx, p, order, acme, md->name, NULL, result); + + md_result_activity_setn(result, "Waiting for order to become ready"); + rv = md_util_try(await_ready, &ctx, 0, timeout, 0, 0, 1); + md_result_log(result, MD_LOG_DEBUG); + return rv; +} + +static apr_status_t await_valid(void *baton, int attempt) +{ + order_ctx_t *ctx = baton; + apr_status_t rv = APR_SUCCESS; + + (void)attempt; + if (APR_SUCCESS != (rv = md_acme_order_update(ctx->order, ctx->acme, + ctx->result, ctx->p))) goto out; + switch (ctx->order->status) { + case MD_ACME_ORDER_ST_VALID: + md_result_set(ctx->result, APR_EINVAL, "ACME server order status is 'valid'."); + break; + case MD_ACME_ORDER_ST_PROCESSING: + rv = APR_EAGAIN; + break; + case MD_ACME_ORDER_ST_INVALID: + md_result_set(ctx->result, APR_EINVAL, "ACME server order status is 'invalid'."); + rv = APR_EINVAL; + break; + default: + rv = APR_EINVAL; + break; + } +out: + return rv; +} + +apr_status_t md_acme_order_await_valid(md_acme_order_t *order, md_acme_t *acme, + const md_t *md, apr_interval_time_t timeout, + md_result_t *result, apr_pool_t *p) +{ + order_ctx_t ctx; + apr_status_t rv; + + assert(MD_ACME_VERSION_MAJOR(acme->version) > 1); + ORDER_CTX_INIT(&ctx, p, order, acme, md->name, NULL, result); + + md_result_activity_setn(result, "Waiting for finalized order to become valid"); + rv = md_util_try(await_valid, &ctx, 0, timeout, 0, 0, 1); + md_result_log(result, MD_LOG_DEBUG); + return rv; +} + +/**************************************************************************************************/ +/* processing */ + +apr_status_t md_acme_order_start_challenges(md_acme_order_t *order, md_acme_t *acme, + apr_array_header_t *challenge_types, + md_store_t *store, const md_t *md, + apr_table_t *env, md_result_t *result, + apr_pool_t *p) +{ + apr_status_t rv = APR_SUCCESS; + md_acme_authz_t *authz; + const char *url, *setup_token; + int i; + + md_result_activity_printf(result, "Starting challenges for domains"); + for (i = 0; i < order->authz_urls->nelts; ++i) { + url = APR_ARRAY_IDX(order->authz_urls, i, const char*); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: check AUTHZ at %s", md->name, url); + + if (APR_SUCCESS != (rv = md_acme_authz_retrieve(acme, p, url, &authz))) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: check authz for %s", + md->name, authz->domain); + goto leave; + } + + switch (authz->state) { + case MD_ACME_AUTHZ_S_VALID: + break; + + case MD_ACME_AUTHZ_S_PENDING: + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "%s: authorization pending for %s", + md->name, authz->domain); + rv = md_acme_authz_respond(authz, acme, store, challenge_types, + md->pks, + md->acme_tls_1_domains, md, + env, p, &setup_token, result); + if (APR_SUCCESS != rv) { + goto leave; + } + add_setup_token(order, setup_token); + md_acme_order_save(store, p, MD_SG_STAGING, md->name, order, 0); + break; + + case MD_ACME_AUTHZ_S_INVALID: + rv = APR_EINVAL; + if (authz->error_type) { + md_result_problem_set(result, rv, authz->error_type, authz->error_detail, NULL); + goto leave; + } + /* fall through */ + default: + rv = APR_EINVAL; + md_result_printf(result, rv, "unexpected AUTHZ state %d for domain %s", + authz->state, authz->domain); + md_result_log(result, MD_LOG_ERR); + goto leave; + } + } +leave: + return rv; +} + +static apr_status_t check_challenges(void *baton, int attempt) +{ + order_ctx_t *ctx = baton; + const char *url; + md_acme_authz_t *authz; + apr_status_t rv = APR_SUCCESS; + int i; + + for (i = 0; i < ctx->order->authz_urls->nelts; ++i) { + url = APR_ARRAY_IDX(ctx->order->authz_urls, i, const char*); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ctx->p, "%s: check AUTHZ at %s (attempt %d)", + ctx->name, url, attempt); + + rv = md_acme_authz_retrieve(ctx->acme, ctx->p, url, &authz); + if (APR_SUCCESS == rv) { + switch (authz->state) { + case MD_ACME_AUTHZ_S_VALID: + md_result_printf(ctx->result, rv, + "domain authorization for %s is valid", authz->domain); + break; + case MD_ACME_AUTHZ_S_PENDING: + rv = APR_EAGAIN; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ctx->p, + "%s: status pending at %s", authz->domain, authz->url); + goto leave; + case MD_ACME_AUTHZ_S_INVALID: + rv = APR_EINVAL; + md_result_printf(ctx->result, rv, + "domain authorization for %s failed, CA considers " + "answer to challenge invalid%s.", + authz->domain, authz->error_type? "" : ", no error given"); + md_result_log(ctx->result, MD_LOG_ERR); + goto leave; + default: + rv = APR_EINVAL; + md_result_printf(ctx->result, rv, + "domain authorization for %s failed with state %d", + authz->domain, authz->state); + md_result_log(ctx->result, MD_LOG_ERR); + goto leave; + } + } + else { + md_result_printf(ctx->result, rv, "authorization retrieval failed for %s on <%s>", + ctx->name, url); + } + } +leave: + return rv; +} + +apr_status_t md_acme_order_monitor_authzs(md_acme_order_t *order, md_acme_t *acme, + const md_t *md, apr_interval_time_t timeout, + md_result_t *result, apr_pool_t *p) +{ + order_ctx_t ctx; + apr_status_t rv; + + ORDER_CTX_INIT(&ctx, p, order, acme, md->name, NULL, result); + + md_result_activity_printf(result, "Monitoring challenge status for %s", md->name); + rv = md_util_try(check_challenges, &ctx, 0, timeout, 0, 0, 1); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "%s: checked authorizations", md->name); + return rv; +} + diff --git a/modules/md/md_acme_order.h b/modules/md/md_acme_order.h new file mode 100644 index 0000000..4170440 --- /dev/null +++ b/modules/md/md_acme_order.h @@ -0,0 +1,91 @@ +/* Copyright 2019 greenbytes GmbH (https://www.greenbytes.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef md_acme_order_h +#define md_acme_order_h + +struct md_json_t; +struct md_result_t; + +typedef struct md_acme_order_t md_acme_order_t; + +typedef enum { + MD_ACME_ORDER_ST_PENDING, + MD_ACME_ORDER_ST_READY, + MD_ACME_ORDER_ST_PROCESSING, + MD_ACME_ORDER_ST_VALID, + MD_ACME_ORDER_ST_INVALID, +} md_acme_order_st; + +struct md_acme_order_t { + apr_pool_t *p; + const char *url; + md_acme_order_st status; + struct apr_array_header_t *authz_urls; + struct apr_array_header_t *challenge_setups; + struct md_json_t *json; + const char *finalize; + const char *certificate; +}; + +#define MD_FN_ORDER "order.json" + +/**************************************************************************************************/ + +md_acme_order_t *md_acme_order_create(apr_pool_t *p); + +apr_status_t md_acme_order_add(md_acme_order_t *order, const char *authz_url); +apr_status_t md_acme_order_remove(md_acme_order_t *order, const char *authz_url); + +struct md_json_t *md_acme_order_to_json(md_acme_order_t *set, apr_pool_t *p); +md_acme_order_t *md_acme_order_from_json(struct md_json_t *json, apr_pool_t *p); + +apr_status_t md_acme_order_load(struct md_store_t *store, md_store_group_t group, + const char *md_name, md_acme_order_t **pauthz_set, + apr_pool_t *p); +apr_status_t md_acme_order_save(struct md_store_t *store, apr_pool_t *p, + md_store_group_t group, const char *md_name, + md_acme_order_t *authz_set, int create); + +apr_status_t md_acme_order_purge(struct md_store_t *store, apr_pool_t *p, + md_store_group_t group, const md_t *md, + apr_table_t *env); + +apr_status_t md_acme_order_start_challenges(md_acme_order_t *order, md_acme_t *acme, + apr_array_header_t *challenge_types, + md_store_t *store, const md_t *md, + apr_table_t *env, struct md_result_t *result, + apr_pool_t *p); + +apr_status_t md_acme_order_monitor_authzs(md_acme_order_t *order, md_acme_t *acme, + const md_t *md, apr_interval_time_t timeout, + struct md_result_t *result, apr_pool_t *p); + +/* ACMEv2 only ************************************************************************************/ + +apr_status_t md_acme_order_register(md_acme_order_t **porder, md_acme_t *acme, apr_pool_t *p, + const char *name, struct apr_array_header_t *domains); + +apr_status_t md_acme_order_update(md_acme_order_t *order, md_acme_t *acme, + struct md_result_t *result, apr_pool_t *p); + +apr_status_t md_acme_order_await_ready(md_acme_order_t *order, md_acme_t *acme, + const md_t *md, apr_interval_time_t timeout, + struct md_result_t *result, apr_pool_t *p); +apr_status_t md_acme_order_await_valid(md_acme_order_t *order, md_acme_t *acme, + const md_t *md, apr_interval_time_t timeout, + struct md_result_t *result, apr_pool_t *p); + +#endif /* md_acme_order_h */ diff --git a/modules/md/md_acmev2_drive.c b/modules/md/md_acmev2_drive.c new file mode 100644 index 0000000..9dfca96 --- /dev/null +++ b/modules/md/md_acmev2_drive.c @@ -0,0 +1,181 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "md.h" +#include "md_crypt.h" +#include "md_json.h" +#include "md_jws.h" +#include "md_http.h" +#include "md_log.h" +#include "md_result.h" +#include "md_reg.h" +#include "md_store.h" +#include "md_util.h" + +#include "md_acme.h" +#include "md_acme_acct.h" +#include "md_acme_authz.h" +#include "md_acme_order.h" + +#include "md_acme_drive.h" +#include "md_acmev2_drive.h" + + + +/**************************************************************************************************/ +/* order setup */ + +/** + * Either we have an order stored in the STAGING area, or we need to create a + * new one at the ACME server. + */ +static apr_status_t ad_setup_order(md_proto_driver_t *d, md_result_t *result, int *pis_new) +{ + md_acme_driver_t *ad = d->baton; + apr_status_t rv; + md_t *md = ad->md; + + assert(ad->md); + assert(ad->acme); + + /* For each domain in MD: AUTHZ setup + * if an AUTHZ resource is known, check if it is still valid + * if known AUTHZ resource is not valid, remove, goto 4.1.1 + * if no AUTHZ available, create a new one for the domain, store it + */ + if (pis_new) *pis_new = 0; + rv = md_acme_order_load(d->store, MD_SG_STAGING, md->name, &ad->order, d->p); + if (APR_SUCCESS == rv) { + md_result_activity_setn(result, "Loaded order from staging"); + goto leave; + } + else if (!APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, d->p, "%s: loading order", md->name); + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, md, d->env); + } + + md_result_activity_setn(result, "Creating new order"); + rv = md_acme_order_register(&ad->order, ad->acme, d->p, d->md->name, ad->domains); + if (APR_SUCCESS !=rv) goto leave; + rv = md_acme_order_save(d->store, d->p, MD_SG_STAGING, d->md->name, ad->order, 0); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, "saving order in staging"); + } + if (pis_new) *pis_new = 1; + +leave: + md_acme_report_result(ad->acme, rv, result); + return rv; +} + +/**************************************************************************************************/ +/* ACMEv2 renewal */ + +apr_status_t md_acmev2_drive_renew(md_acme_driver_t *ad, md_proto_driver_t *d, md_result_t *result) +{ + apr_status_t rv = APR_SUCCESS; + int is_new_order = 0; + + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: (ACMEv2) need certificate", d->md->name); + + /* Chose (or create) and ACME account to use */ + rv = md_acme_drive_set_acct(d, result); + if (APR_SUCCESS != rv) goto leave; + + if (!md_array_is_empty(ad->cred->chain)) goto leave; + + /* ACMEv2 strategy: + * 1. load an md_acme_order_t from STAGING, if present + * 2. if no order found, register a new order at ACME server + * 3. update the order from the server + * 4. Switch order state: + * * PENDING: process authz challenges + * * READY: finalize the order + * * PROCESSING: wait and re-assses later + * * VALID: retrieve certificate + * * COMPLETE: all done, return success + * * INVALID and otherwise: fail renewal, delete local order + */ + if (APR_SUCCESS != (rv = ad_setup_order(d, result, &is_new_order))) { + goto leave; + } + + rv = md_acme_order_update(ad->order, ad->acme, result, d->p); + if (APR_STATUS_IS_ENOENT(rv) + || APR_STATUS_IS_EACCES(rv) + || MD_ACME_ORDER_ST_INVALID == ad->order->status) { + /* order is invalid or no longer known at the ACME server */ + ad->order = NULL; + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, d->md, d->env); + } + else if (APR_SUCCESS != rv) { + goto leave; + } + +retry: + if (!ad->order) { + rv = ad_setup_order(d, result, &is_new_order); + if (APR_SUCCESS != rv) goto leave; + } + + rv = md_acme_order_start_challenges(ad->order, ad->acme, ad->ca_challenges, + d->store, d->md, d->env, result, d->p); + if (!is_new_order && APR_STATUS_IS_EINVAL(rv)) { + /* found 'invalid' domains in previous order, need to start over */ + ad->order = NULL; + md_acme_order_purge(d->store, d->p, MD_SG_STAGING, d->md, d->env); + goto retry; + } + if (APR_SUCCESS != rv) goto leave; + + rv = md_acme_order_monitor_authzs(ad->order, ad->acme, d->md, + ad->authz_monitor_timeout, result, d->p); + if (APR_SUCCESS != rv) goto leave; + + rv = md_acme_order_await_ready(ad->order, ad->acme, d->md, + ad->authz_monitor_timeout, result, d->p); + if (APR_SUCCESS != rv) goto leave; + + if (MD_ACME_ORDER_ST_READY == ad->order->status) { + rv = md_acme_drive_setup_cred_chain(d, result); + if (APR_SUCCESS != rv) goto leave; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: finalized order", d->md->name); + } + + rv = md_acme_order_await_valid(ad->order, ad->acme, d->md, + ad->authz_monitor_timeout, result, d->p); + if (APR_SUCCESS != rv) goto leave; + + if (!ad->order->certificate) { + md_result_set(result, APR_EINVAL, "Order valid, but certificate url is missing."); + goto leave; + } + md_result_set(result, APR_SUCCESS, NULL); + +leave: + md_result_log(result, MD_LOG_DEBUG); + return result->status; +} + diff --git a/modules/md/md_acmev2_drive.h b/modules/md/md_acmev2_drive.h new file mode 100644 index 0000000..7552c4f --- /dev/null +++ b/modules/md/md_acmev2_drive.h @@ -0,0 +1,27 @@ +/* Copyright 2019 greenbytes GmbH (https://www.greenbytes.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef md_acmev2_drive_h +#define md_acmev2_drive_h + +struct md_json_t; +struct md_proto_driver_t; +struct md_result_t; + +apr_status_t md_acmev2_drive_renew(struct md_acme_driver_t *ad, + struct md_proto_driver_t *d, + struct md_result_t *result); + +#endif /* md_acmev2_drive_h */ diff --git a/modules/md/md_core.c b/modules/md/md_core.c index 51ad005..7aacff0 100644 --- a/modules/md/md_core.c +++ b/modules/md/md_core.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -33,7 +34,10 @@ int md_contains(const md_t *md, const char *domain, int case_sensitive) { - return md_array_str_index(md->domains, domain, 0, case_sensitive) >= 0; + if (md_array_str_index(md->domains, domain, 0, case_sensitive) >= 0) { + return 1; + } + return md_dns_domains_match(md->domains, domain); } const char *md_common_name(const md_t *md1, const md_t *md2) @@ -79,16 +83,35 @@ apr_size_t md_common_name_count(const md_t *md1, const md_t *md2) return hits; } +int md_is_covered_by_alt_names(const md_t *md, const struct apr_array_header_t* alt_names) +{ + const char *name; + int i; + + if (alt_names) { + for (i = 0; i < md->domains->nelts; ++i) { + name = APR_ARRAY_IDX(md->domains, i, const char *); + if (!md_dns_domains_match(alt_names, name)) { + return 0; + } + } + return 1; + } + return 0; +} + md_t *md_create_empty(apr_pool_t *p) { md_t *md = apr_pcalloc(p, sizeof(*md)); if (md) { md->domains = apr_array_make(p, 5, sizeof(const char *)); md->contacts = apr_array_make(p, 5, sizeof(const char *)); - md->drive_mode = MD_DRIVE_DEFAULT; + md->renew_mode = MD_RENEW_DEFAULT; md->require_https = MD_REQUIRE_UNSET; md->must_staple = -1; md->transitive = -1; + md->acme_tls_1_domains = apr_array_make(p, 5, sizeof(const char *)); + md->stapling = -1; md->defn_name = "unknown"; md->defn_line_number = 0; } @@ -125,38 +148,6 @@ int md_contains_domains(const md_t *md1, const md_t *md2) return 0; } -md_t *md_find_closest_match(apr_array_header_t *mds, const md_t *md) -{ - md_t *candidate, *m; - apr_size_t cand_n, n; - int i; - - candidate = md_get_by_name(mds, md->name); - if (!candidate) { - /* try to find an instance that contains all domain names from md */ - for (i = 0; i < mds->nelts; ++i) { - m = APR_ARRAY_IDX(mds, i, md_t *); - if (md_contains_domains(m, md)) { - return m; - } - } - /* no matching name and no md in the list has all domains. - * We consider that managed domain as closest match that contains at least one - * domain name from md, ONLY if there is no other one that also has. - */ - cand_n = 0; - for (i = 0; i < mds->nelts; ++i) { - m = APR_ARRAY_IDX(mds, i, md_t *); - n = md_common_name_count(md, m); - if (n > cand_n) { - candidate = m; - cand_n = n; - } - } - } - return candidate; -} - md_t *md_get_by_name(struct apr_array_header_t *mds, const char *name) { int i; @@ -193,6 +184,15 @@ md_t *md_get_by_dns_overlap(struct apr_array_header_t *mds, const md_t *md) return NULL; } +int md_cert_count(const md_t *md) +{ + /* cert are defined as a list of static files or a list of private key specs */ + if (md->cert_files && md->cert_files->nelts) { + return md->cert_files->nelts; + } + return md_pkeys_spec_count(md->pks); +} + md_t *md_create(apr_pool_t *p, apr_array_header_t *domains) { md_t *md; @@ -204,34 +204,6 @@ md_t *md_create(apr_pool_t *p, apr_array_header_t *domains) return md; } -int md_should_renew(const md_t *md) -{ - apr_time_t now = apr_time_now(); - - if (md->expires <= now) { - return 1; - } - else if (md->expires > 0) { - double renew_win, life; - apr_interval_time_t left; - - renew_win = (double)md->renew_window; - if (md->renew_norm > 0 - && md->renew_norm > renew_win - && md->expires > md->valid_from) { - /* Calc renewal days as fraction of cert lifetime - if known */ - life = (double)(md->expires - md->valid_from); - renew_win = life * renew_win / (double)md->renew_norm; - } - - left = md->expires - now; - if (left <= renew_win) { - return 1; - } - } - return 0; -} - /**************************************************************************************************/ /* lifetime */ @@ -247,6 +219,8 @@ md_t *md_copy(apr_pool_t *p, const md_t *src) if (src->ca_challenges) { md->ca_challenges = apr_array_copy(p, src->ca_challenges); } + md->acme_tls_1_domains = apr_array_copy(p, src->acme_tls_1_domains); + md->pks = md_pkeys_spec_clone(p, src->pks); } return md; } @@ -261,49 +235,33 @@ md_t *md_clone(apr_pool_t *p, const md_t *src) md->name = apr_pstrdup(p, src->name); md->require_https = src->require_https; md->must_staple = src->must_staple; - md->drive_mode = src->drive_mode; + md->renew_mode = src->renew_mode; md->domains = md_array_str_compact(p, src->domains, 0); - md->pkey_spec = src->pkey_spec; - md->renew_norm = src->renew_norm; + md->pks = md_pkeys_spec_clone(p, src->pks); md->renew_window = src->renew_window; + md->warn_window = src->warn_window; md->contacts = md_array_str_clone(p, src->contacts); - if (src->ca_url) md->ca_url = apr_pstrdup(p, src->ca_url); if (src->ca_proto) md->ca_proto = apr_pstrdup(p, src->ca_proto); + if (src->ca_urls) { + md->ca_urls = md_array_str_clone(p, src->ca_urls); + } + if (src->ca_effective) md->ca_effective = apr_pstrdup(p, src->ca_effective); if (src->ca_account) md->ca_account = apr_pstrdup(p, src->ca_account); if (src->ca_agreement) md->ca_agreement = apr_pstrdup(p, src->ca_agreement); if (src->defn_name) md->defn_name = apr_pstrdup(p, src->defn_name); - if (src->cert_url) md->cert_url = apr_pstrdup(p, src->cert_url); md->defn_line_number = src->defn_line_number; if (src->ca_challenges) { md->ca_challenges = md_array_str_clone(p, src->ca_challenges); } + md->acme_tls_1_domains = md_array_str_compact(p, src->acme_tls_1_domains, 0); + md->stapling = src->stapling; + if (src->dns01_cmd) md->dns01_cmd = apr_pstrdup(p, src->dns01_cmd); + if (src->cert_files) md->cert_files = md_array_str_clone(p, src->cert_files); + if (src->pkey_files) md->pkey_files = md_array_str_clone(p, src->pkey_files); } return md; } -md_t *md_merge(apr_pool_t *p, const md_t *add, const md_t *base) -{ - md_t *n = apr_pcalloc(p, sizeof(*n)); - - n->ca_url = add->ca_url? add->ca_url : base->ca_url; - n->ca_proto = add->ca_proto? add->ca_proto : base->ca_proto; - n->ca_agreement = add->ca_agreement? add->ca_agreement : base->ca_agreement; - n->require_https = (add->require_https != MD_REQUIRE_UNSET)? add->require_https : base->require_https; - n->must_staple = (add->must_staple >= 0)? add->must_staple : base->must_staple; - n->drive_mode = (add->drive_mode != MD_DRIVE_DEFAULT)? add->drive_mode : base->drive_mode; - n->pkey_spec = add->pkey_spec? add->pkey_spec : base->pkey_spec; - n->renew_norm = (add->renew_norm > 0)? add->renew_norm : base->renew_norm; - n->renew_window = (add->renew_window > 0)? add->renew_window : base->renew_window; - n->transitive = (add->transitive >= 0)? add->transitive : base->transitive; - if (add->ca_challenges) { - n->ca_challenges = apr_array_copy(p, add->ca_challenges); - } - else if (base->ca_challenges) { - n->ca_challenges = apr_array_copy(p, base->ca_challenges); - } - return n; -} - /**************************************************************************************************/ /* format conversion */ @@ -318,34 +276,22 @@ md_json_t *md_to_json(const md_t *md, apr_pool_t *p) md_json_setl(md->transitive, json, MD_KEY_TRANSITIVE, NULL); md_json_sets(md->ca_account, json, MD_KEY_CA, MD_KEY_ACCOUNT, NULL); md_json_sets(md->ca_proto, json, MD_KEY_CA, MD_KEY_PROTO, NULL); - md_json_sets(md->ca_url, json, MD_KEY_CA, MD_KEY_URL, NULL); - md_json_sets(md->ca_agreement, json, MD_KEY_CA, MD_KEY_AGREEMENT, NULL); - if (md->cert_url) { - md_json_sets(md->cert_url, json, MD_KEY_CERT, MD_KEY_URL, NULL); + md_json_sets(md->ca_effective, json, MD_KEY_CA, MD_KEY_URL, NULL); + if (md->ca_urls && !apr_is_empty_array(md->ca_urls)) { + md_json_setsa(md->ca_urls, json, MD_KEY_CA, MD_KEY_URLS, NULL); } - if (md->pkey_spec) { - md_json_setj(md_pkey_spec_to_json(md->pkey_spec, p), json, MD_KEY_PKEY, NULL); + md_json_sets(md->ca_agreement, json, MD_KEY_CA, MD_KEY_AGREEMENT, NULL); + if (!md_pkeys_spec_is_empty(md->pks)) { + md_json_setj(md_pkeys_spec_to_json(md->pks, p), json, MD_KEY_PKEY, NULL); } md_json_setl(md->state, json, MD_KEY_STATE, NULL); - md_json_setl(md->drive_mode, json, MD_KEY_DRIVE_MODE, NULL); - if (md->expires > 0) { - char *ts = apr_pcalloc(p, APR_RFC822_DATE_LEN); - apr_rfc822_date(ts, md->expires); - md_json_sets(ts, json, MD_KEY_CERT, MD_KEY_EXPIRES, NULL); - } - if (md->valid_from > 0) { - char *ts = apr_pcalloc(p, APR_RFC822_DATE_LEN); - apr_rfc822_date(ts, md->valid_from); - md_json_sets(ts, json, MD_KEY_CERT, MD_KEY_VALID_FROM, NULL); - } - if (md->renew_norm > 0) { - md_json_sets(apr_psprintf(p, "%ld%%", (long)(md->renew_window * 100L / md->renew_norm)), - json, MD_KEY_RENEW_WINDOW, NULL); - } - else { - md_json_setl((long)apr_time_sec(md->renew_window), json, MD_KEY_RENEW_WINDOW, NULL); - } - md_json_setb(md_should_renew(md), json, MD_KEY_RENEW, NULL); + if (md->state_descr) + md_json_sets(md->state_descr, json, MD_KEY_STATE_DESCR, NULL); + md_json_setl(md->renew_mode, json, MD_KEY_RENEW_MODE, NULL); + if (md->renew_window) + md_json_sets(md_timeslice_format(md->renew_window, p), json, MD_KEY_RENEW_WINDOW, NULL); + if (md->warn_window) + md_json_sets(md_timeslice_format(md->warn_window, p), json, MD_KEY_WARN_WINDOW, NULL); if (md->ca_challenges && md->ca_challenges->nelts > 0) { apr_array_header_t *na; na = md_array_str_compact(p, md->ca_challenges, 0); @@ -362,6 +308,15 @@ md_json_t *md_to_json(const md_t *md, apr_pool_t *p) break; } md_json_setb(md->must_staple > 0, json, MD_KEY_MUST_STAPLE, NULL); + md_json_setsa(md->acme_tls_1_domains, json, MD_KEY_PROTO, MD_KEY_ACME_TLS_1, NULL); + if (md->cert_files) md_json_setsa(md->cert_files, json, MD_KEY_CERT_FILES, NULL); + if (md->pkey_files) md_json_setsa(md->pkey_files, json, MD_KEY_PKEY_FILES, NULL); + md_json_setb(md->stapling > 0, json, MD_KEY_STAPLING, NULL); + if (md->dns01_cmd) md_json_sets(md->dns01_cmd, json, MD_KEY_CMD_DNS01, NULL); + if (md->ca_eab_kid && strcmp("none", md->ca_eab_kid)) { + md_json_sets(md->ca_eab_kid, json, MD_KEY_EAB, MD_KEY_KID, NULL); + if (md->ca_eab_hmac) md_json_sets(md->ca_eab_hmac, json, MD_KEY_EAB, MD_KEY_HMAC, NULL); + } return json; } return NULL; @@ -377,36 +332,30 @@ md_t *md_from_json(md_json_t *json, apr_pool_t *p) md_json_dupsa(md->contacts, p, json, MD_KEY_CONTACTS, NULL); md->ca_account = md_json_dups(p, json, MD_KEY_CA, MD_KEY_ACCOUNT, NULL); md->ca_proto = md_json_dups(p, json, MD_KEY_CA, MD_KEY_PROTO, NULL); - md->ca_url = md_json_dups(p, json, MD_KEY_CA, MD_KEY_URL, NULL); + md->ca_effective = md_json_dups(p, json, MD_KEY_CA, MD_KEY_URL, NULL); + if (md_json_has_key(json, MD_KEY_CA, MD_KEY_URLS, NULL)) { + md->ca_urls = apr_array_make(p, 5, sizeof(const char*)); + md_json_dupsa(md->ca_urls, p, json, MD_KEY_CA, MD_KEY_URLS, NULL); + } + else if (md->ca_effective) { + /* compat for old format where we had only a single url */ + md->ca_urls = apr_array_make(p, 5, sizeof(const char*)); + APR_ARRAY_PUSH(md->ca_urls, const char*) = md->ca_effective; + } md->ca_agreement = md_json_dups(p, json, MD_KEY_CA, MD_KEY_AGREEMENT, NULL); - md->cert_url = md_json_dups(p, json, MD_KEY_CERT, MD_KEY_URL, NULL); - if (md_json_has_key(json, MD_KEY_PKEY, MD_KEY_TYPE, NULL)) { - md->pkey_spec = md_pkey_spec_from_json(md_json_getj(json, MD_KEY_PKEY, NULL), p); + if (md_json_has_key(json, MD_KEY_PKEY, NULL)) { + md->pks = md_pkeys_spec_from_json(md_json_getj(json, MD_KEY_PKEY, NULL), p); } md->state = (md_state_t)md_json_getl(json, MD_KEY_STATE, NULL); - md->drive_mode = (int)md_json_getl(json, MD_KEY_DRIVE_MODE, NULL); + md->state_descr = md_json_dups(p, json, MD_KEY_STATE_DESCR, NULL); + if (MD_S_EXPIRED_DEPRECATED == md->state) md->state = MD_S_COMPLETE; + md->renew_mode = (int)md_json_getl(json, MD_KEY_RENEW_MODE, NULL); md->domains = md_array_str_compact(p, md->domains, 0); md->transitive = (int)md_json_getl(json, MD_KEY_TRANSITIVE, NULL); - s = md_json_dups(p, json, MD_KEY_CERT, MD_KEY_EXPIRES, NULL); - if (s && *s) { - md->expires = apr_date_parse_rfc(s); - } - s = md_json_dups(p, json, MD_KEY_CERT, MD_KEY_VALID_FROM, NULL); - if (s && *s) { - md->valid_from = apr_date_parse_rfc(s); - } - md->renew_norm = 0; - md->renew_window = apr_time_from_sec(md_json_getl(json, MD_KEY_RENEW_WINDOW, NULL)); - if (md->renew_window <= 0) { - s = md_json_gets(json, MD_KEY_RENEW_WINDOW, NULL); - if (s && strchr(s, '%')) { - int percent = atoi(s); - if (0 < percent && percent < 100) { - md->renew_norm = apr_time_from_sec(100 * MD_SECS_PER_DAY); - md->renew_window = apr_time_from_sec(percent * MD_SECS_PER_DAY); - } - } - } + s = md_json_gets(json, MD_KEY_RENEW_WINDOW, NULL); + md_timeslice_parse(&md->renew_window, p, s, MD_TIME_LIFE_NORM); + s = md_json_gets(json, MD_KEY_WARN_WINDOW, NULL); + md_timeslice_parse(&md->warn_window, p, s, MD_TIME_LIFE_NORM); if (md_json_has_key(json, MD_KEY_CA, MD_KEY_CHALLENGES, NULL)) { md->ca_challenges = apr_array_make(p, 5, sizeof(const char*)); md_json_dupsa(md->ca_challenges, p, json, MD_KEY_CA, MD_KEY_CHALLENGES, NULL); @@ -420,9 +369,94 @@ md_t *md_from_json(md_json_t *json, apr_pool_t *p) md->require_https = MD_REQUIRE_PERMANENT; } md->must_staple = (int)md_json_getb(json, MD_KEY_MUST_STAPLE, NULL); - + md_json_dupsa(md->acme_tls_1_domains, p, json, MD_KEY_PROTO, MD_KEY_ACME_TLS_1, NULL); + + if (md_json_has_key(json, MD_KEY_CERT_FILES, NULL)) { + md->cert_files = apr_array_make(p, 3, sizeof(char*)); + md->pkey_files = apr_array_make(p, 3, sizeof(char*)); + md_json_dupsa(md->cert_files, p, json, MD_KEY_CERT_FILES, NULL); + md_json_dupsa(md->pkey_files, p, json, MD_KEY_PKEY_FILES, NULL); + } + md->stapling = (int)md_json_getb(json, MD_KEY_STAPLING, NULL); + md->dns01_cmd = md_json_dups(p, json, MD_KEY_CMD_DNS01, NULL); + if (md_json_has_key(json, MD_KEY_EAB, NULL)) { + md->ca_eab_kid = md_json_dups(p, json, MD_KEY_EAB, MD_KEY_KID, NULL); + md->ca_eab_hmac = md_json_dups(p, json, MD_KEY_EAB, MD_KEY_HMAC, NULL); + } return md; } return NULL; } +md_json_t *md_to_public_json(const md_t *md, apr_pool_t *p) +{ + md_json_t *json = md_to_json(md, p); + if (md_json_has_key(json, MD_KEY_EAB, MD_KEY_HMAC, NULL)) { + md_json_sets("***", json, MD_KEY_EAB, MD_KEY_HMAC, NULL); + } + return json; +} + +typedef struct { + const char *name; + const char *url; +} md_ca_t; + +#define LE_ACMEv2_PROD "https://acme-v02.api.letsencrypt.org/directory" +#define LE_ACMEv2_STAGING "https://acme-staging-v02.api.letsencrypt.org/directory" +#define BUYPASS_ACME "https://api.buypass.com/acme/directory" +#define BUYPASS_ACME_TEST "https://api.test4.buypass.no/acme/directory" + +static md_ca_t KNOWN_CAs[] = { + { "LetsEncrypt", LE_ACMEv2_PROD }, + { "LetsEncrypt-Test", LE_ACMEv2_STAGING }, + { "Buypass", BUYPASS_ACME }, + { "Buypass-Test", BUYPASS_ACME_TEST }, +}; + +const char *md_get_ca_name_from_url(apr_pool_t *p, const char *url) +{ + apr_uri_t uri_parsed; + unsigned int i; + + for (i = 0; i < sizeof(KNOWN_CAs)/sizeof(KNOWN_CAs[0]); ++i) { + if (!apr_strnatcasecmp(KNOWN_CAs[i].url, url)) { + return KNOWN_CAs[i].name; + } + } + if (APR_SUCCESS == apr_uri_parse(p, url, &uri_parsed)) { + return uri_parsed.hostname; + } + return apr_pstrdup(p, url); +} + +apr_status_t md_get_ca_url_from_name(const char **purl, apr_pool_t *p, const char *name) +{ + const char *err; + unsigned int i; + apr_status_t rv = APR_SUCCESS; + + *purl = NULL; + for (i = 0; i < sizeof(KNOWN_CAs)/sizeof(KNOWN_CAs[0]); ++i) { + if (!apr_strnatcasecmp(KNOWN_CAs[i].name, name)) { + *purl = KNOWN_CAs[i].url; + goto leave; + } + } + *purl = name; + rv = md_util_abs_uri_check(p, name, &err); + if (APR_SUCCESS != rv) { + apr_array_header_t *names; + + names = apr_array_make(p, 10, sizeof(const char*)); + for (i = 0; i < sizeof(KNOWN_CAs)/sizeof(KNOWN_CAs[0]); ++i) { + APR_ARRAY_PUSH(names, const char *) = KNOWN_CAs[i].name; + } + *purl = apr_psprintf(p, + "The CA name '%s' is not known and it is not a URL either (%s). " + "Known CA names are: %s.", + name, err, apr_array_pstrcat(p, names, ' ')); + } +leave: + return rv; +} diff --git a/modules/md/md_crypt.c b/modules/md/md_crypt.c index e0aac3e..4b2af89 100644 --- a/modules/md/md_crypt.c +++ b/modules/md/md_crypt.c @@ -22,19 +22,26 @@ #include #include #include +#include +#include #include #include +#include #include #include #include #include +#if OPENSSL_VERSION_NUMBER >= 0x30000000L +#include +#endif #include "md.h" #include "md_crypt.h" #include "md_json.h" #include "md_log.h" #include "md_http.h" +#include "md_time.h" #include "md_util.h" /* getpid for *NIX */ @@ -57,6 +64,17 @@ #define MD_USE_OPENSSL_PRE_1_1_API (OPENSSL_VERSION_NUMBER < 0x10100000L) #endif +#if (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x3050000fL)) || (OPENSSL_VERSION_NUMBER < 0x10100000L) +/* Missing from LibreSSL < 3.5.0 and only available since OpenSSL v1.1.x */ +#ifndef OPENSSL_NO_CT +#define OPENSSL_NO_CT +#endif +#endif + +#ifndef OPENSSL_NO_CT +#include +#endif + static int initialized; struct md_pkey_t { @@ -142,17 +160,13 @@ apr_status_t md_crypt_init(apr_pool_t *pool) return APR_SUCCESS; } -typedef struct { - char *data; - apr_size_t len; -} buffer_rec; - static apr_status_t fwrite_buffer(void *baton, apr_file_t *f, apr_pool_t *p) { - buffer_rec *buf = baton; + md_data_t *buf = baton; + apr_size_t wlen; (void)p; - return apr_file_write_full(f, buf->data, buf->len, &buf->len); + return apr_file_write_full(f, buf->data, buf->len, &wlen); } apr_status_t md_rand_bytes(unsigned char *buf, apr_size_t len, apr_pool_t *p) @@ -183,8 +197,10 @@ static int pem_passwd(char *buf, int size, int rwflag, void *baton) size = (int)ctx->pass_len; } memcpy(buf, ctx->pass_phrase, (size_t)size); + } else { + return 0; } - return ctx->pass_len; + return size; } /**************************************************************************************************/ @@ -197,7 +213,8 @@ static int pem_passwd(char *buf, int size, int rwflag, void *baton) */ static apr_time_t md_asn1_time_get(const ASN1_TIME* time) { -#if OPENSSL_VERSION_NUMBER < 0x10002000L || defined(LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER < 0x10002000L || (defined(LIBRESSL_VERSION_NUMBER) && \ + LIBRESSL_VERSION_NUMBER < 0x3060000fL) /* courtesy: https://stackoverflow.com/questions/10975542/asn1-time-to-time-t-conversion#11263731 * all bugs are mine */ apr_time_exp_t t; @@ -246,10 +263,98 @@ static apr_time_t md_asn1_time_get(const ASN1_TIME* time) #endif } +apr_time_t md_asn1_generalized_time_get(void *ASN1_GENERALIZEDTIME) +{ + return md_asn1_time_get(ASN1_GENERALIZEDTIME); +} + +/**************************************************************************************************/ +/* OID/NID things */ + +static int get_nid(const char *num, const char *sname, const char *lname) +{ + /* Funny API, an OID for a feature might be configured or + * maybe not. In the second case, we need to add it. But adding + * when it already is there is an error... */ + int nid = OBJ_txt2nid(num); + if (NID_undef == nid) { + nid = OBJ_create(num, sname, lname); + } + return nid; +} + +#define MD_GET_NID(x) get_nid(MD_OID_##x##_NUM, MD_OID_##x##_SNAME, MD_OID_##x##_LNAME) /**************************************************************************************************/ /* private keys */ +md_pkeys_spec_t *md_pkeys_spec_make(apr_pool_t *p) +{ + md_pkeys_spec_t *pks; + + pks = apr_pcalloc(p, sizeof(*pks)); + pks->p = p; + pks->specs = apr_array_make(p, 2, sizeof(md_pkey_spec_t*)); + return pks; +} + +void md_pkeys_spec_add(md_pkeys_spec_t *pks, md_pkey_spec_t *spec) +{ + APR_ARRAY_PUSH(pks->specs, md_pkey_spec_t*) = spec; +} + +void md_pkeys_spec_add_default(md_pkeys_spec_t *pks) +{ + md_pkey_spec_t *spec; + + spec = apr_pcalloc(pks->p, sizeof(*spec)); + spec->type = MD_PKEY_TYPE_DEFAULT; + md_pkeys_spec_add(pks, spec); +} + +int md_pkeys_spec_contains_rsa(md_pkeys_spec_t *pks) +{ + md_pkey_spec_t *spec; + int i; + for (i = 0; i < pks->specs->nelts; ++i) { + spec = APR_ARRAY_IDX(pks->specs, i, md_pkey_spec_t*); + if (MD_PKEY_TYPE_RSA == spec->type) return 1; + } + return 0; +} + +void md_pkeys_spec_add_rsa(md_pkeys_spec_t *pks, unsigned int bits) +{ + md_pkey_spec_t *spec; + + spec = apr_pcalloc(pks->p, sizeof(*spec)); + spec->type = MD_PKEY_TYPE_RSA; + spec->params.rsa.bits = bits; + md_pkeys_spec_add(pks, spec); +} + +int md_pkeys_spec_contains_ec(md_pkeys_spec_t *pks, const char *curve) +{ + md_pkey_spec_t *spec; + int i; + for (i = 0; i < pks->specs->nelts; ++i) { + spec = APR_ARRAY_IDX(pks->specs, i, md_pkey_spec_t*); + if (MD_PKEY_TYPE_EC == spec->type + && !apr_strnatcasecmp(curve, spec->params.ec.curve)) return 1; + } + return 0; +} + +void md_pkeys_spec_add_ec(md_pkeys_spec_t *pks, const char *curve) +{ + md_pkey_spec_t *spec; + + spec = apr_pcalloc(pks->p, sizeof(*spec)); + spec->type = MD_PKEY_TYPE_EC; + spec->params.ec.curve = apr_pstrdup(pks->p, curve); + md_pkeys_spec_add(pks, spec); +} + md_json_t *md_pkey_spec_to_json(const md_pkey_spec_t *spec, apr_pool_t *p) { md_json_t *json = md_json_create(p); @@ -264,6 +369,12 @@ md_json_t *md_pkey_spec_to_json(const md_pkey_spec_t *spec, apr_pool_t *p) md_json_setl((long)spec->params.rsa.bits, json, MD_KEY_BITS, NULL); } break; + case MD_PKEY_TYPE_EC: + md_json_sets("EC", json, MD_KEY_TYPE, NULL); + if (spec->params.ec.curve) { + md_json_sets(spec->params.ec.curve, json, MD_KEY_CURVE, NULL); + } + break; default: md_json_sets("Unsupported", json, MD_KEY_TYPE, NULL); break; @@ -272,6 +383,27 @@ md_json_t *md_pkey_spec_to_json(const md_pkey_spec_t *spec, apr_pool_t *p) return json; } +static apr_status_t spec_to_json(void *value, md_json_t *json, apr_pool_t *p, void *baton) +{ + md_json_t *jspec; + + (void)baton; + jspec = md_pkey_spec_to_json((md_pkey_spec_t*)value, p); + return md_json_setj(jspec, json, NULL); +} + +md_json_t *md_pkeys_spec_to_json(const md_pkeys_spec_t *pks, apr_pool_t *p) +{ + md_json_t *j; + + if (pks->specs->nelts == 1) { + return md_pkey_spec_to_json(md_pkeys_spec_get(pks, 0), p); + } + j = md_json_create(p); + md_json_seta(pks->specs, spec_to_json, (void*)pks, j, "specs", NULL); + return md_json_getj(j, "specs", NULL); +} + md_pkey_spec_t *md_pkey_spec_from_json(struct md_json_t *json, apr_pool_t *p) { md_pkey_spec_t *spec = apr_pcalloc(p, sizeof(*spec)); @@ -293,29 +425,161 @@ md_pkey_spec_t *md_pkey_spec_from_json(struct md_json_t *json, apr_pool_t *p) spec->params.rsa.bits = MD_PKEY_RSA_BITS_DEF; } } + else if (!apr_strnatcasecmp("EC", s)) { + spec->type = MD_PKEY_TYPE_EC; + s = md_json_gets(json, MD_KEY_CURVE, NULL); + if (s) { + spec->params.ec.curve = apr_pstrdup(p, s); + } + else { + spec->params.ec.curve = NULL; + } + } } return spec; } -int md_pkey_spec_eq(md_pkey_spec_t *spec1, md_pkey_spec_t *spec2) +static apr_status_t spec_from_json(void **pvalue, md_json_t *json, apr_pool_t *p, void *baton) +{ + (void)baton; + *pvalue = md_pkey_spec_from_json(json, p); + return APR_SUCCESS; +} + +md_pkeys_spec_t *md_pkeys_spec_from_json(struct md_json_t *json, apr_pool_t *p) +{ + md_pkeys_spec_t *pks; + md_pkey_spec_t *spec; + + pks = md_pkeys_spec_make(p); + if (md_json_is(MD_JSON_TYPE_ARRAY, json, NULL)) { + md_json_geta(pks->specs, spec_from_json, pks, json, NULL); + } + else { + spec = md_pkey_spec_from_json(json, p); + md_pkeys_spec_add(pks, spec); + } + return pks; +} + +static int pkey_spec_eq(md_pkey_spec_t *s1, md_pkey_spec_t *s2) { - if (spec1 == spec2) { + if (s1 == s2) { return 1; } - if (spec1 && spec2 && spec1->type == spec2->type) { - switch (spec1->type) { + if (s1 && s2 && s1->type == s2->type) { + switch (s1->type) { case MD_PKEY_TYPE_DEFAULT: return 1; case MD_PKEY_TYPE_RSA: - if (spec1->params.rsa.bits == spec2->params.rsa.bits) { + if (s1->params.rsa.bits == s2->params.rsa.bits) { return 1; } break; + case MD_PKEY_TYPE_EC: + if (s1->params.ec.curve == s2->params.ec.curve) { + return 1; + } + else if (!s1->params.ec.curve || !s2->params.ec.curve) { + return 0; + } + return !strcmp(s1->params.ec.curve, s2->params.ec.curve); } } return 0; } +int md_pkeys_spec_eq(md_pkeys_spec_t *pks1, md_pkeys_spec_t *pks2) +{ + int i; + + if (pks1 == pks2) { + return 1; + } + if (pks1 && pks2 && pks1->specs->nelts == pks2->specs->nelts) { + for(i = 0; i < pks1->specs->nelts; ++i) { + if (!pkey_spec_eq(APR_ARRAY_IDX(pks1->specs, i, md_pkey_spec_t *), + APR_ARRAY_IDX(pks2->specs, i, md_pkey_spec_t *))) { + return 0; + } + } + return 1; + } + return 0; +} + +static md_pkey_spec_t *pkey_spec_clone(apr_pool_t *p, md_pkey_spec_t *spec) +{ + md_pkey_spec_t *nspec; + + nspec = apr_pcalloc(p, sizeof(*nspec)); + nspec->type = spec->type; + switch (spec->type) { + case MD_PKEY_TYPE_DEFAULT: + break; + case MD_PKEY_TYPE_RSA: + nspec->params.rsa.bits = spec->params.rsa.bits; + break; + case MD_PKEY_TYPE_EC: + nspec->params.ec.curve = apr_pstrdup(p, spec->params.ec.curve); + break; + } + return nspec; +} + +const char *md_pkey_spec_name(const md_pkey_spec_t *spec) +{ + if (!spec) return "rsa"; + switch (spec->type) { + case MD_PKEY_TYPE_DEFAULT: + case MD_PKEY_TYPE_RSA: + return "rsa"; + case MD_PKEY_TYPE_EC: + return spec->params.ec.curve; + } + return "unknown"; +} + +int md_pkeys_spec_is_empty(const md_pkeys_spec_t *pks) +{ + return NULL == pks || 0 == pks->specs->nelts; +} + +md_pkeys_spec_t *md_pkeys_spec_clone(apr_pool_t *p, const md_pkeys_spec_t *pks) +{ + md_pkeys_spec_t *npks = NULL; + md_pkey_spec_t *spec; + int i; + + if (pks && pks->specs->nelts > 0) { + npks = apr_pcalloc(p, sizeof(*npks)); + npks->specs = apr_array_make(p, pks->specs->nelts, sizeof(md_pkey_spec_t*)); + for (i = 0; i < pks->specs->nelts; ++i) { + spec = APR_ARRAY_IDX(pks->specs, i, md_pkey_spec_t*); + APR_ARRAY_PUSH(npks->specs, md_pkey_spec_t*) = pkey_spec_clone(p, spec); + } + } + return npks; +} + +int md_pkeys_spec_count(const md_pkeys_spec_t *pks) +{ + return md_pkeys_spec_is_empty(pks)? 1 : pks->specs->nelts; +} + +static md_pkey_spec_t PkeySpecDef = { MD_PKEY_TYPE_DEFAULT, {{ 0 }} }; + +md_pkey_spec_t *md_pkeys_spec_get(const md_pkeys_spec_t *pks, int index) +{ + if (md_pkeys_spec_is_empty(pks)) { + return index == 1? &PkeySpecDef : NULL; + } + else if (pks && index >= 0 && index < pks->specs->nelts) { + return APR_ARRAY_IDX(pks->specs, index, md_pkey_spec_t*); + } + return NULL; +} + static md_pkey_t *make_pkey(apr_pool_t *p) { md_pkey_t *pkey = apr_pcalloc(p, sizeof(*pkey)); @@ -377,13 +641,14 @@ apr_status_t md_pkey_fload(md_pkey_t **ppkey, apr_pool_t *p, return rv; } -static apr_status_t pkey_to_buffer(buffer_rec *buffer, md_pkey_t *pkey, apr_pool_t *p, +static apr_status_t pkey_to_buffer(md_data_t *buf, md_pkey_t *pkey, apr_pool_t *p, const char *pass, apr_size_t pass_len) { BIO *bio = BIO_new(BIO_s_mem()); const EVP_CIPHER *cipher = NULL; pem_password_cb *cb = NULL; void *cb_baton = NULL; + apr_status_t rv = APR_SUCCESS; passwd_ctx ctx; unsigned long err; int i; @@ -392,7 +657,8 @@ static apr_status_t pkey_to_buffer(buffer_rec *buffer, md_pkey_t *pkey, apr_pool return APR_ENOMEM; } if (pass_len > INT_MAX) { - return APR_EINVAL; + rv = APR_EINVAL; + goto cleanup; } if (pass && pass_len > 0) { ctx.pass_phrase = pass; @@ -401,35 +667,42 @@ static apr_status_t pkey_to_buffer(buffer_rec *buffer, md_pkey_t *pkey, apr_pool cb_baton = &ctx; cipher = EVP_aes_256_cbc(); if (!cipher) { - return APR_ENOTIMPL; + rv = APR_ENOTIMPL; + goto cleanup; } } ERR_clear_error(); +#if 1 + if (!PEM_write_bio_PKCS8PrivateKey(bio, pkey->pkey, cipher, NULL, 0, cb, cb_baton)) { +#else if (!PEM_write_bio_PrivateKey(bio, pkey->pkey, cipher, NULL, 0, cb, cb_baton)) { - BIO_free(bio); +#endif err = ERR_get_error(); md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "PEM_write key: %ld %s", err, ERR_error_string(err, NULL)); - return APR_EINVAL; + rv = APR_EINVAL; + goto cleanup; } + md_data_null(buf); i = BIO_pending(bio); if (i > 0) { - buffer->data = apr_palloc(p, (apr_size_t)i + 1); - i = BIO_read(bio, buffer->data, i); - buffer->data[i] = '\0'; - buffer->len = (apr_size_t)i; + buf->data = apr_palloc(p, (apr_size_t)i); + i = BIO_read(bio, (char*)buf->data, i); + buf->len = (apr_size_t)i; } + +cleanup: BIO_free(bio); - return APR_SUCCESS; + return rv; } apr_status_t md_pkey_fsave(md_pkey_t *pkey, apr_pool_t *p, const char *pass_phrase, apr_size_t pass_len, const char *fname, apr_fileperms_t perms) { - buffer_rec buffer; + md_data_t buffer; apr_status_t rv; if (APR_SUCCESS == (rv = pkey_to_buffer(&buffer, pkey, p, pass_phrase, pass_len))) { @@ -440,6 +713,71 @@ apr_status_t md_pkey_fsave(md_pkey_t *pkey, apr_pool_t *p, return rv; } +apr_status_t md_pkey_read_http(md_pkey_t **ppkey, apr_pool_t *pool, + const struct md_http_response_t *res) +{ + apr_status_t rv; + apr_off_t data_len; + char *pem_data; + apr_size_t pem_len; + md_pkey_t *pkey; + BIO *bf; + passwd_ctx ctx; + + rv = apr_brigade_length(res->body, 1, &data_len); + if (APR_SUCCESS != rv) goto leave; + if (data_len > 1024*1024) { /* certs usually are <2k each */ + rv = APR_EINVAL; + goto leave; + } + rv = apr_brigade_pflatten(res->body, &pem_data, &pem_len, res->req->pool); + if (APR_SUCCESS != rv) goto leave; + + if (NULL == (bf = BIO_new_mem_buf(pem_data, (int)pem_len))) { + rv = APR_ENOMEM; + goto leave; + } + pkey = make_pkey(pool); + ctx.pass_phrase = NULL; + ctx.pass_len = 0; + ERR_clear_error(); + pkey->pkey = PEM_read_bio_PrivateKey(bf, NULL, NULL, &ctx); + BIO_free(bf); + + if (pkey->pkey == NULL) { + unsigned long err = ERR_get_error(); + rv = APR_EINVAL; + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, pool, + "error loading pkey from http response: %s", + ERR_error_string(err, NULL)); + goto leave; + } + rv = APR_SUCCESS; + apr_pool_cleanup_register(pool, pkey, pkey_cleanup, apr_pool_cleanup_null); + +leave: + *ppkey = (APR_SUCCESS == rv)? pkey : NULL; + return rv; +} + +/* Determine the message digest used for signing with the given private key. + */ +static const EVP_MD *pkey_get_MD(md_pkey_t *pkey) +{ + switch (EVP_PKEY_id(pkey->pkey)) { +#ifdef NID_ED25519 + case NID_ED25519: + return NULL; +#endif +#ifdef NID_ED448 + case NID_ED448: + return NULL; +#endif + default: + return EVP_sha256(); + } +} + static apr_status_t gen_rsa(md_pkey_t **ppkey, apr_pool_t *p, unsigned int bits) { EVP_PKEY_CTX *ctx = NULL; @@ -465,6 +803,143 @@ static apr_status_t gen_rsa(md_pkey_t **ppkey, apr_pool_t *p, unsigned int bits) return rv; } +static apr_status_t check_EC_curve(int nid, apr_pool_t *p) { + EC_builtin_curve *curves = NULL; + size_t nc, i; + int rv = APR_ENOENT; + + nc = EC_get_builtin_curves(NULL, 0); + if (NULL == (curves = OPENSSL_malloc(sizeof(*curves) * nc)) || + nc != EC_get_builtin_curves(curves, nc)) { + rv = APR_EGENERAL; + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, + "error looking up OpenSSL builtin EC curves"); + goto leave; + } + for (i = 0; i < nc; ++i) { + if (nid == curves[i].nid) { + rv = APR_SUCCESS; + break; + } + } +leave: + OPENSSL_free(curves); + return rv; +} + +static apr_status_t gen_ec(md_pkey_t **ppkey, apr_pool_t *p, const char *curve) +{ + EVP_PKEY_CTX *ctx = NULL; + apr_status_t rv; + int curve_nid = NID_undef; + + /* 1. Convert the cure into its registered identifier. Curves can be known under + * different names. + * 2. Determine, if the curve is supported by OpenSSL (or whatever is linked). + * 3. Generate the key, respecting the specific quirks some curves require. + */ + curve_nid = EC_curve_nist2nid(curve); + /* In case this fails, try some names from other standards, like SECG */ +#ifdef NID_secp384r1 + if (NID_undef == curve_nid && !apr_strnatcasecmp("secp384r1", curve)) { + curve_nid = NID_secp384r1; + curve = EC_curve_nid2nist(curve_nid); + } +#endif +#ifdef NID_X9_62_prime256v1 + if (NID_undef == curve_nid && !apr_strnatcasecmp("secp256r1", curve)) { + curve_nid = NID_X9_62_prime256v1; + curve = EC_curve_nid2nist(curve_nid); + } +#endif +#ifdef NID_X9_62_prime192v1 + if (NID_undef == curve_nid && !apr_strnatcasecmp("secp192r1", curve)) { + curve_nid = NID_X9_62_prime192v1; + curve = EC_curve_nid2nist(curve_nid); + } +#endif +#if defined(NID_X25519) && (!defined(LIBRESSL_VERSION_NUMBER) || \ + LIBRESSL_VERSION_NUMBER >= 0x3070000fL) + if (NID_undef == curve_nid && !apr_strnatcasecmp("X25519", curve)) { + curve_nid = NID_X25519; + curve = EC_curve_nid2nist(curve_nid); + } +#endif + if (NID_undef == curve_nid) { + /* OpenSSL object/curve names */ + curve_nid = OBJ_sn2nid(curve); + } + if (NID_undef == curve_nid) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "ec curve unknown: %s", curve); + rv = APR_ENOTIMPL; goto leave; + } + + *ppkey = make_pkey(p); + switch (curve_nid) { + +#if defined(NID_X25519) && (!defined(LIBRESSL_VERSION_NUMBER) || \ + LIBRESSL_VERSION_NUMBER >= 0x3070000fL) + case NID_X25519: + /* no parameters */ + if (NULL == (ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_X25519, NULL)) + || EVP_PKEY_keygen_init(ctx) <= 0 + || EVP_PKEY_keygen(ctx, &(*ppkey)->pkey) <= 0) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, + "error generate EC key for group: %s", curve); + rv = APR_EGENERAL; goto leave; + } + rv = APR_SUCCESS; + break; +#endif + +#if defined(NID_X448) && !defined(LIBRESSL_VERSION_NUMBER) + case NID_X448: + /* no parameters */ + if (NULL == (ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_X448, NULL)) + || EVP_PKEY_keygen_init(ctx) <= 0 + || EVP_PKEY_keygen(ctx, &(*ppkey)->pkey) <= 0) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, + "error generate EC key for group: %s", curve); + rv = APR_EGENERAL; goto leave; + } + rv = APR_SUCCESS; + break; +#endif + + default: +#if OPENSSL_VERSION_NUMBER < 0x30000000L + if (APR_SUCCESS != (rv = check_EC_curve(curve_nid, p))) goto leave; + if (NULL == (ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL)) + || EVP_PKEY_paramgen_init(ctx) <= 0 + || EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, curve_nid) <= 0 + || EVP_PKEY_CTX_set_ec_param_enc(ctx, OPENSSL_EC_NAMED_CURVE) <= 0 + || EVP_PKEY_keygen_init(ctx) <= 0 + || EVP_PKEY_keygen(ctx, &(*ppkey)->pkey) <= 0) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, + "error generate EC key for group: %s", curve); + rv = APR_EGENERAL; goto leave; + } +#else + if (APR_SUCCESS != (rv = check_EC_curve(curve_nid, p))) goto leave; + if (NULL == (ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL)) + || EVP_PKEY_keygen_init(ctx) <= 0 + || EVP_PKEY_CTX_ctrl_str(ctx, "ec_paramgen_curve", curve) <= 0 + || EVP_PKEY_keygen(ctx, &(*ppkey)->pkey) <= 0) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, + "error generate EC key for group: %s", curve); + rv = APR_EGENERAL; goto leave; + } +#endif + rv = APR_SUCCESS; + break; + } + +leave: + if (APR_SUCCESS != rv) *ppkey = NULL; + EVP_PKEY_CTX_free(ctx); + return rv; +} + apr_status_t md_pkey_gen(md_pkey_t **ppkey, apr_pool_t *p, md_pkey_spec_t *spec) { md_pkey_type_t ptype = spec? spec->type : MD_PKEY_TYPE_DEFAULT; @@ -473,6 +948,8 @@ apr_status_t md_pkey_gen(md_pkey_t **ppkey, apr_pool_t *p, md_pkey_spec_t *spec) return gen_rsa(ppkey, p, MD_PKEY_RSA_BITS_DEF); case MD_PKEY_TYPE_RSA: return gen_rsa(ppkey, p, spec->params.rsa.bits); + case MD_PKEY_TYPE_EC: + return gen_ec(ppkey, p, spec->params.ec.curve); default: return APR_ENOTIMPL; } @@ -501,59 +978,77 @@ static void RSA_get0_key(const RSA *r, static const char *bn64(const BIGNUM *b, apr_pool_t *p) { if (b) { - apr_size_t len = (apr_size_t)BN_num_bytes(b); - char *buffer = apr_pcalloc(p, len); - if (buffer) { - BN_bn2bin(b, (unsigned char *)buffer); - return md_util_base64url_encode(buffer, len, p); - } + md_data_t buffer; + + md_data_pinit(&buffer, (apr_size_t)BN_num_bytes(b), p); + if (buffer.data) { + BN_bn2bin(b, (unsigned char *)buffer.data); + return md_util_base64url_encode(&buffer, p); + } } return NULL; } const char *md_pkey_get_rsa_e64(md_pkey_t *pkey, apr_pool_t *p) { - const BIGNUM *e; - RSA *rsa = EVP_PKEY_get1_RSA(pkey->pkey); - - if (!rsa) { - return NULL; +#if OPENSSL_VERSION_NUMBER < 0x30000000L + const RSA *rsa = EVP_PKEY_get0_RSA(pkey->pkey); + if (rsa) { + const BIGNUM *e; + RSA_get0_key(rsa, NULL, &e, NULL); + return bn64(e, p); + } +#else + BIGNUM *e = NULL; + if (EVP_PKEY_get_bn_param(pkey->pkey, OSSL_PKEY_PARAM_RSA_E, &e)) { + const char *e64 = bn64(e, p); + BN_free(e); + return e64; } - RSA_get0_key(rsa, NULL, &e, NULL); - return bn64(e, p); +#endif + return NULL; } const char *md_pkey_get_rsa_n64(md_pkey_t *pkey, apr_pool_t *p) { - const BIGNUM *n; - RSA *rsa = EVP_PKEY_get1_RSA(pkey->pkey); - - if (!rsa) { - return NULL; +#if OPENSSL_VERSION_NUMBER < 0x30000000L + const RSA *rsa = EVP_PKEY_get0_RSA(pkey->pkey); + if (rsa) { + const BIGNUM *n; + RSA_get0_key(rsa, &n, NULL, NULL); + return bn64(n, p); + } +#else + BIGNUM *n = NULL; + if (EVP_PKEY_get_bn_param(pkey->pkey, OSSL_PKEY_PARAM_RSA_N, &n)) { + const char *n64 = bn64(n, p); + BN_free(n); + return n64; } - RSA_get0_key(rsa, &n, NULL, NULL); - return bn64(n, p); +#endif + return NULL; } apr_status_t md_crypt_sign64(const char **psign64, md_pkey_t *pkey, apr_pool_t *p, const char *d, size_t dlen) { EVP_MD_CTX *ctx = NULL; - char *buffer; + md_data_t buffer; unsigned int blen; const char *sign64 = NULL; apr_status_t rv = APR_ENOMEM; - - buffer = apr_pcalloc(p, (apr_size_t)EVP_PKEY_size(pkey->pkey)); - if (buffer) { + + md_data_pinit(&buffer, (apr_size_t)EVP_PKEY_size(pkey->pkey), p); + if (buffer.data) { ctx = EVP_MD_CTX_create(); if (ctx) { rv = APR_ENOTIMPL; if (EVP_SignInit_ex(ctx, EVP_sha256(), NULL)) { rv = APR_EGENERAL; if (EVP_SignUpdate(ctx, d, dlen)) { - if (EVP_SignFinal(ctx, (unsigned char*)buffer, &blen, pkey->pkey)) { - sign64 = md_util_base64url_encode(buffer, blen, p); + if (EVP_SignFinal(ctx, (unsigned char*)buffer.data, &blen, pkey->pkey)) { + buffer.len = blen; + sign64 = md_util_base64url_encode(&buffer, p); if (sign64) { rv = APR_SUCCESS; } @@ -575,56 +1070,42 @@ apr_status_t md_crypt_sign64(const char **psign64, md_pkey_t *pkey, apr_pool_t * return rv; } -static apr_status_t sha256_digest(unsigned char **pdigest, size_t *pdigest_len, - apr_pool_t *p, const char *d, size_t dlen) +static apr_status_t sha256_digest(md_data_t **pdigest, apr_pool_t *p, const md_data_t *buf) { EVP_MD_CTX *ctx = NULL; - unsigned char *buffer; + md_data_t *digest; apr_status_t rv = APR_ENOMEM; - unsigned int blen; - - buffer = apr_pcalloc(p, EVP_MAX_MD_SIZE); - if (buffer) { - ctx = EVP_MD_CTX_create(); - if (ctx) { - rv = APR_ENOTIMPL; - if (EVP_DigestInit_ex(ctx, EVP_sha256(), NULL)) { - rv = APR_EGENERAL; - if (EVP_DigestUpdate(ctx, d, dlen)) { - if (EVP_DigestFinal(ctx, buffer, &blen)) { - rv = APR_SUCCESS; - } + unsigned int dlen; + + digest = md_data_pmake(EVP_MAX_MD_SIZE, p); + ctx = EVP_MD_CTX_create(); + if (ctx) { + rv = APR_ENOTIMPL; + if (EVP_DigestInit_ex(ctx, EVP_sha256(), NULL)) { + rv = APR_EGENERAL; + if (EVP_DigestUpdate(ctx, (unsigned char*)buf->data, buf->len)) { + if (EVP_DigestFinal(ctx, (unsigned char*)digest->data, &dlen)) { + digest->len = dlen; + rv = APR_SUCCESS; } } } - - if (ctx) { - EVP_MD_CTX_destroy(ctx); - } } - - if (APR_SUCCESS == rv) { - *pdigest = buffer; - *pdigest_len = blen; - } - else { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "digest"); - *pdigest = NULL; - *pdigest_len = 0; + if (ctx) { + EVP_MD_CTX_destroy(ctx); } + *pdigest = (APR_SUCCESS == rv)? digest : NULL; return rv; } -apr_status_t md_crypt_sha256_digest64(const char **pdigest64, apr_pool_t *p, - const char *d, size_t dlen) +apr_status_t md_crypt_sha256_digest64(const char **pdigest64, apr_pool_t *p, const md_data_t *d) { const char *digest64 = NULL; - unsigned char *buffer; - size_t blen; + md_data_t *digest; apr_status_t rv; - if (APR_SUCCESS == (rv = sha256_digest(&buffer, &blen, p, d, dlen))) { - if (NULL == (digest64 = md_util_base64url_encode((const char*)buffer, blen, p))) { + if (APR_SUCCESS == (rv = sha256_digest(&digest, p, d))) { + if (NULL == (digest64 = md_util_base64url_encode(digest, p))) { rv = APR_EGENERAL; } } @@ -632,47 +1113,41 @@ apr_status_t md_crypt_sha256_digest64(const char **pdigest64, apr_pool_t *p, return rv; } -static const char * const hex_const[] = { - "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0a", "0b", "0c", "0d", "0e", "0f", - "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "1a", "1b", "1c", "1d", "1e", "1f", - "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "2a", "2b", "2c", "2d", "2e", "2f", - "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3a", "3b", "3c", "3d", "3e", "3f", - "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "4a", "4b", "4c", "4d", "4e", "4f", - "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "5a", "5b", "5c", "5d", "5e", "5f", - "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6a", "6b", "6c", "6d", "6e", "6f", - "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "7a", "7b", "7c", "7d", "7e", "7f", - "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "8a", "8b", "8c", "8d", "8e", "8f", - "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9a", "9b", "9c", "9d", "9e", "9f", - "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "aa", "ab", "ac", "ad", "ae", "af", - "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "ba", "bb", "bc", "bd", "be", "bf", - "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "ca", "cb", "cc", "cd", "ce", "cf", - "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "da", "db", "dc", "dd", "de", "df", - "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "ea", "eb", "ec", "ed", "ee", "ef", - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "fa", "fb", "fc", "fd", "fe", "ff", -}; - apr_status_t md_crypt_sha256_digest_hex(const char **pdigesthex, apr_pool_t *p, - const char *d, size_t dlen) + const md_data_t *data) { - char *dhex = NULL, *cp; - const char * x; - unsigned char *buffer; - size_t blen; + md_data_t *digest; apr_status_t rv; - unsigned int i; - if (APR_SUCCESS == (rv = sha256_digest(&buffer, &blen, p, d, dlen))) { - cp = dhex = apr_pcalloc(p, 2 * blen + 1); - if (!dhex) { - rv = APR_EGENERAL; - } - for (i = 0; i < blen; ++i, cp += 2) { - x = hex_const[buffer[i]]; - cp[0] = x[0]; - cp[1] = x[1]; - } + if (APR_SUCCESS == (rv = sha256_digest(&digest, p, data))) { + return md_data_to_hex(pdigesthex, 0, p, digest); } - *pdigesthex = dhex; + *pdigesthex = NULL; + return rv; +} + +apr_status_t md_crypt_hmac64(const char **pmac64, const md_data_t *hmac_key, + apr_pool_t *p, const char *d, size_t dlen) +{ + const char *mac64 = NULL; + unsigned char *s; + unsigned int digest_len = 0; + md_data_t *digest; + apr_status_t rv = APR_SUCCESS; + + digest = md_data_pmake(EVP_MAX_MD_SIZE, p); + s = HMAC(EVP_sha256(), (const unsigned char*)hmac_key->data, (int)hmac_key->len, + (const unsigned char*)d, (size_t)dlen, + (unsigned char*)digest->data, &digest_len); + if (!s) { + rv = APR_EINVAL; + goto cleanup; + } + digest->len = digest_len; + mac64 = md_util_base64url_encode(digest, p); + +cleanup: + *pmac64 = (APR_SUCCESS == rv)? mac64 : NULL; return rv; } @@ -695,26 +1170,47 @@ static apr_status_t cert_cleanup(void *data) return APR_SUCCESS; } -static md_cert_t *make_cert(apr_pool_t *p, X509 *x509) +md_cert_t *md_cert_wrap(apr_pool_t *p, void *x509) { md_cert_t *cert = apr_pcalloc(p, sizeof(*cert)); cert->pool = p; cert->x509 = x509; - apr_pool_cleanup_register(p, cert, cert_cleanup, apr_pool_cleanup_null); - return cert; } -void md_cert_free(md_cert_t *cert) +md_cert_t *md_cert_make(apr_pool_t *p, void *x509) { - cert_cleanup(cert); + md_cert_t *cert = md_cert_wrap(p, x509); + apr_pool_cleanup_register(p, cert, cert_cleanup, apr_pool_cleanup_null); + return cert; } -void *md_cert_get_X509(struct md_cert_t *cert) +void *md_cert_get_X509(const md_cert_t *cert) { return cert->x509; } +const char *md_cert_get_serial_number(const md_cert_t *cert, apr_pool_t *p) +{ + const char *s = ""; + BIGNUM *bn; + const char *serial; + const ASN1_INTEGER *ai = X509_get_serialNumber(cert->x509); + if (ai) { + bn = ASN1_INTEGER_to_BN(ai, NULL); + serial = BN_bn2hex(bn); + s = apr_pstrdup(p, serial); + OPENSSL_free((void*)serial); + OPENSSL_free((void*)bn); + } + return s; +} + +int md_certs_are_equal(const md_cert_t *a, const md_cert_t *b) +{ + return X509_cmp(a->x509, b->x509) == 0; +} + int md_cert_is_valid_now(const md_cert_t *cert) { return ((X509_cmp_current_time(X509_get_notBefore(cert->x509)) < 0) @@ -726,23 +1222,31 @@ int md_cert_has_expired(const md_cert_t *cert) return (X509_cmp_current_time(X509_get_notAfter(cert->x509)) <= 0); } -apr_time_t md_cert_get_not_after(md_cert_t *cert) +apr_time_t md_cert_get_not_after(const md_cert_t *cert) { return md_asn1_time_get(X509_get_notAfter(cert->x509)); } -apr_time_t md_cert_get_not_before(md_cert_t *cert) +apr_time_t md_cert_get_not_before(const md_cert_t *cert) { return md_asn1_time_get(X509_get_notBefore(cert->x509)); } +md_timeperiod_t md_cert_get_valid(const md_cert_t *cert) +{ + md_timeperiod_t p; + p.start = md_cert_get_not_before(cert); + p.end = md_cert_get_not_after(cert); + return p; +} + int md_cert_covers_domain(md_cert_t *cert, const char *domain_name) { - if (!cert->alt_names) { - md_cert_get_alt_names(&cert->alt_names, cert, cert->pool); - } - if (cert->alt_names) { - return md_array_str_index(cert->alt_names, domain_name, 0, 0) >= 0; + apr_array_header_t *alt_names; + + md_cert_get_alt_names(&alt_names, cert, cert->pool); + if (alt_names) { + return md_array_str_index(alt_names, domain_name, 0, 0) >= 0; } return 0; } @@ -760,7 +1264,7 @@ int md_cert_covers_md(md_cert_t *cert, const md_t *md) cert->alt_names->nelts); for (i = 0; i < md->domains->nelts; ++i) { name = APR_ARRAY_IDX(md->domains, i, const char *); - if (md_array_str_index(cert->alt_names, name, 0, 0) < 0) { + if (!md_dns_domains_match(cert->alt_names, name)) { md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, cert->pool, "md domain %s not covered by cert", name); return 0; @@ -774,7 +1278,7 @@ int md_cert_covers_md(md_cert_t *cert, const md_t *md) return 0; } -apr_status_t md_cert_get_issuers_uri(const char **puri, md_cert_t *cert, apr_pool_t *p) +apr_status_t md_cert_get_issuers_uri(const char **puri, const md_cert_t *cert, apr_pool_t *p) { apr_status_t rv = APR_ENOENT; STACK_OF(ACCESS_DESCRIPTION) *xinfos; @@ -801,17 +1305,19 @@ apr_status_t md_cert_get_issuers_uri(const char **puri, md_cert_t *cert, apr_poo return rv; } -apr_status_t md_cert_get_alt_names(apr_array_header_t **pnames, md_cert_t *cert, apr_pool_t *p) +apr_status_t md_cert_get_alt_names(apr_array_header_t **pnames, const md_cert_t *cert, apr_pool_t *p) { apr_array_header_t *names; apr_status_t rv = APR_ENOENT; STACK_OF(GENERAL_NAME) *xalt_names; unsigned char *buf; int i; - + xalt_names = X509_get_ext_d2i(cert->x509, NID_subject_alt_name, NULL, NULL); if (xalt_names) { GENERAL_NAME *cval; + const unsigned char *ip; + int len; names = apr_array_make(p, sk_GENERAL_NAME_num(xalt_names), sizeof(char *)); for (i = 0; i < sk_GENERAL_NAME_num(xalt_names); ++i) { @@ -819,11 +1325,33 @@ apr_status_t md_cert_get_alt_names(apr_array_header_t **pnames, md_cert_t *cert, switch (cval->type) { case GEN_DNS: case GEN_URI: - case GEN_IPADD: ASN1_STRING_to_UTF8(&buf, cval->d.ia5); APR_ARRAY_PUSH(names, const char *) = apr_pstrdup(p, (char*)buf); OPENSSL_free(buf); break; + case GEN_IPADD: + len = ASN1_STRING_length(cval->d.iPAddress); +#if OPENSSL_VERSION_NUMBER < 0x10100000L + ip = ASN1_STRING_data(cval->d.iPAddress); +#else + ip = ASN1_STRING_get0_data(cval->d.iPAddress); +#endif + if (len == 4) /* IPv4 address */ + APR_ARRAY_PUSH(names, const char *) = apr_psprintf(p, "%u.%u.%u.%u", + ip[0], ip[1], ip[2], ip[3]); + else if (len == 16) /* IPv6 address */ + APR_ARRAY_PUSH(names, const char *) = apr_psprintf(p, "%02x%02x%02x%02x:" + "%02x%02x%02x%02x:" + "%02x%02x%02x%02x:" + "%02x%02x%02x%02x", + ip[0], ip[1], ip[2], ip[3], + ip[4], ip[5], ip[6], ip[7], + ip[8], ip[9], ip[10], ip[11], + ip[12], ip[13], ip[14], ip[15]); + else { + ; /* Unknown address type - Log? Assert? */ + } + break; default: break; } @@ -848,7 +1376,7 @@ apr_status_t md_cert_fload(md_cert_t **pcert, apr_pool_t *p, const char *fname) x509 = PEM_read_X509(f, NULL, NULL, NULL); rv = fclose(f); if (x509 != NULL) { - cert = make_cert(p, x509); + cert = md_cert_make(p, x509); } else { rv = APR_EINVAL; @@ -859,7 +1387,7 @@ apr_status_t md_cert_fload(md_cert_t **pcert, apr_pool_t *p, const char *fname) return rv; } -static apr_status_t cert_to_buffer(buffer_rec *buffer, md_cert_t *cert, apr_pool_t *p) +static apr_status_t cert_to_buffer(md_data_t *buffer, const md_cert_t *cert, apr_pool_t *p) { BIO *bio = BIO_new(BIO_s_mem()); int i; @@ -877,9 +1405,8 @@ static apr_status_t cert_to_buffer(buffer_rec *buffer, md_cert_t *cert, apr_pool i = BIO_pending(bio); if (i > 0) { - buffer->data = apr_palloc(p, (apr_size_t)i + 1); - i = BIO_read(bio, buffer->data, i); - buffer->data[i] = '\0'; + buffer->data = apr_palloc(p, (apr_size_t)i); + i = BIO_read(bio, (char*)buffer->data, i); buffer->len = (apr_size_t)i; } BIO_free(bio); @@ -889,64 +1416,194 @@ static apr_status_t cert_to_buffer(buffer_rec *buffer, md_cert_t *cert, apr_pool apr_status_t md_cert_fsave(md_cert_t *cert, apr_pool_t *p, const char *fname, apr_fileperms_t perms) { - buffer_rec buffer; + md_data_t buffer; apr_status_t rv; - + + md_data_null(&buffer); if (APR_SUCCESS == (rv = cert_to_buffer(&buffer, cert, p))) { return md_util_freplace(fname, perms, p, fwrite_buffer, &buffer); } return rv; } -apr_status_t md_cert_to_base64url(const char **ps64, md_cert_t *cert, apr_pool_t *p) +apr_status_t md_cert_to_base64url(const char **ps64, const md_cert_t *cert, apr_pool_t *p) { - buffer_rec buffer; + md_data_t buffer; apr_status_t rv; - + + md_data_null(&buffer); if (APR_SUCCESS == (rv = cert_to_buffer(&buffer, cert, p))) { - *ps64 = md_util_base64url_encode(buffer.data, buffer.len, p); + *ps64 = md_util_base64url_encode(&buffer, p); return APR_SUCCESS; } *ps64 = NULL; return rv; } +apr_status_t md_cert_to_sha256_digest(md_data_t **pdigest, const md_cert_t *cert, apr_pool_t *p) +{ + md_data_t *digest; + unsigned int dlen; + + digest = md_data_pmake(EVP_MAX_MD_SIZE, p); + X509_digest(cert->x509, EVP_sha256(), (unsigned char*)digest->data, &dlen); + digest->len = dlen; + + *pdigest = digest; + return APR_SUCCESS; +} + +apr_status_t md_cert_to_sha256_fingerprint(const char **pfinger, const md_cert_t *cert, apr_pool_t *p) +{ + md_data_t *digest; + apr_status_t rv; + + rv = md_cert_to_sha256_digest(&digest, cert, p); + if (APR_SUCCESS == rv) { + return md_data_to_hex(pfinger, 0, p, digest); + } + *pfinger = NULL; + return rv; +} + +static int md_cert_read_pem(BIO *bf, apr_pool_t *p, md_cert_t **pcert) +{ + md_cert_t *cert; + X509 *x509; + apr_status_t rv = APR_ENOENT; + + ERR_clear_error(); + x509 = PEM_read_bio_X509(bf, NULL, NULL, NULL); + if (x509 == NULL) goto cleanup; + cert = md_cert_make(p, x509); + rv = APR_SUCCESS; +cleanup: + *pcert = (APR_SUCCESS == rv)? cert : NULL; + return rv; +} + +apr_status_t md_cert_read_chain(apr_array_header_t *chain, apr_pool_t *p, + const char *pem, apr_size_t pem_len) +{ + BIO *bf = NULL; + apr_status_t rv = APR_SUCCESS; + md_cert_t *cert; + int added = 0; + + if (NULL == (bf = BIO_new_mem_buf(pem, (int)pem_len))) { + rv = APR_ENOMEM; + goto cleanup; + } + while (APR_SUCCESS == (rv = md_cert_read_pem(bf, chain->pool, &cert))) { + APR_ARRAY_PUSH(chain, md_cert_t *) = cert; + added = 1; + } + if (APR_ENOENT == rv && added) { + rv = APR_SUCCESS; + } + +cleanup: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, p, "read chain with %d certs", chain->nelts); + if (bf) BIO_free(bf); + return rv; +} + apr_status_t md_cert_read_http(md_cert_t **pcert, apr_pool_t *p, const md_http_response_t *res) { const char *ct; apr_off_t data_len; + char *der; apr_size_t der_len; + md_cert_t *cert = NULL; apr_status_t rv; ct = apr_table_get(res->headers, "Content-Type"); - if (!res->body || !ct || strcmp("application/pkix-cert", ct)) { - return APR_ENOENT; + ct = md_util_parse_ct(res->req->pool, ct); + if (!res->body || !ct || strcmp("application/pkix-cert", ct)) { + rv = APR_ENOENT; + goto out; } if (APR_SUCCESS == (rv = apr_brigade_length(res->body, 1, &data_len))) { - char *der; if (data_len > 1024*1024) { /* certs usually are <2k each */ return APR_EINVAL; } - if (APR_SUCCESS == (rv = apr_brigade_pflatten(res->body, &der, &der_len, p))) { + if (APR_SUCCESS == (rv = apr_brigade_pflatten(res->body, &der, &der_len, res->req->pool))) { const unsigned char *bf = (const unsigned char*)der; X509 *x509; if (NULL == (x509 = d2i_X509(NULL, &bf, (long)der_len))) { rv = APR_EINVAL; + goto out; } else { - *pcert = make_cert(p, x509); + cert = md_cert_make(p, x509); rv = APR_SUCCESS; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, p, + "parsing cert from content-type=%s, content-length=%ld", ct, (long)data_len); } } - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, "cert parsed"); } +out: + *pcert = (APR_SUCCESS == rv)? cert : NULL; return rv; } -md_cert_state_t md_cert_state_get(md_cert_t *cert) +apr_status_t md_cert_chain_read_http(struct apr_array_header_t *chain, + apr_pool_t *p, const struct md_http_response_t *res) +{ + const char *ct = NULL; + apr_off_t blen; + apr_size_t data_len = 0; + char *data; + md_cert_t *cert; + apr_status_t rv = APR_ENOENT; + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, p, + "chain_read, processing %d response", res->status); + if (APR_SUCCESS != (rv = apr_brigade_length(res->body, 1, &blen))) goto cleanup; + if (blen > 1024*1024) { /* certs usually are <2k each */ + rv = APR_EINVAL; + goto cleanup; + } + + data_len = (apr_size_t)blen; + ct = apr_table_get(res->headers, "Content-Type"); + if (!res->body || !ct) goto cleanup; + ct = md_util_parse_ct(res->req->pool, ct); + if (!strcmp("application/pkix-cert", ct)) { + rv = md_cert_read_http(&cert, p, res); + if (APR_SUCCESS != rv) goto cleanup; + APR_ARRAY_PUSH(chain, md_cert_t *) = cert; + } + else if (!strcmp("application/pem-certificate-chain", ct) + || !strncmp("text/plain", ct, sizeof("text/plain")-1)) { + /* Some servers seem to think 'text/plain' is sufficient, see #232 */ + rv = apr_brigade_pflatten(res->body, &data, &data_len, res->req->pool); + if (APR_SUCCESS != rv) goto cleanup; + rv = md_cert_read_chain(chain, res->req->pool, data, data_len); + } + else { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "attempting to parse certificates from unrecognized content-type: %s", ct); + rv = apr_brigade_pflatten(res->body, &data, &data_len, res->req->pool); + if (APR_SUCCESS != rv) goto cleanup; + rv = md_cert_read_chain(chain, res->req->pool, data, data_len); + if (APR_SUCCESS == rv && chain->nelts == 0) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "certificate chain response did not contain any certificates " + "(suspicious content-type: %s)", ct); + rv = APR_ENOENT; + } + } +cleanup: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, p, + "parsed certs from content-type=%s, content-length=%ld", ct, (long)data_len); + return rv; +} + +md_cert_state_t md_cert_state_get(const md_cert_t *cert) { if (cert->x509) { return md_cert_is_valid_now(cert)? MD_CERT_VALID : MD_CERT_EXPIRED; @@ -966,7 +1623,7 @@ apr_status_t md_chain_fappend(struct apr_array_header_t *certs, apr_pool_t *p, c if (rv == APR_SUCCESS) { ERR_clear_error(); while (NULL != (x509 = PEM_read_X509(f, NULL, NULL, NULL))) { - cert = make_cert(p, x509); + cert = md_cert_make(p, x509); APR_ARRAY_PUSH(certs, md_cert_t *) = cert; } fclose(f); @@ -1064,18 +1721,22 @@ static apr_status_t add_ext(X509 *x, int nid, const char *value, apr_pool_t *p) X509V3_CTX ctx; apr_status_t rv; + ERR_clear_error(); X509V3_set_ctx_nodb(&ctx); X509V3_set_ctx(&ctx, x, x, NULL, NULL, 0); if (NULL == (ext = X509V3_EXT_conf_nid(NULL, &ctx, nid, (char*)value))) { + unsigned long err = ERR_get_error(); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "add_ext, create, nid=%d value='%s' " + "(lib=%d, reason=%d)", nid, value, ERR_GET_LIB(err), ERR_GET_REASON(err)); return APR_EGENERAL; } ERR_clear_error(); rv = X509_add_ext(x, ext, -1)? APR_SUCCESS : APR_EINVAL; if (APR_SUCCESS != rv) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "add_ext nid=%dd value='%s'", - nid, value); - + unsigned long err = ERR_get_error(); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "add_ext, add, nid=%d value='%s' " + "(lib=%d, reason=%d)", nid, value, ERR_GET_LIB(err), ERR_GET_REASON(err)); } X509_EXTENSION_free(ext); return rv; @@ -1100,116 +1761,114 @@ static apr_status_t sk_add_alt_names(STACK_OF(X509_EXTENSION) *exts, #define MD_OID_MUST_STAPLE_SNAME "tlsfeature" #define MD_OID_MUST_STAPLE_LNAME "TLS Feature" -static int get_must_staple_nid(void) -{ - /* Funny API, the OID for must staple might be configured or - * might be not. In the second case, we need to add it. But adding - * when it already is there is an error... */ - int nid = OBJ_txt2nid(MD_OID_MUST_STAPLE_NUM); - if (NID_undef == nid) { - nid = OBJ_create(MD_OID_MUST_STAPLE_NUM, - MD_OID_MUST_STAPLE_SNAME, MD_OID_MUST_STAPLE_LNAME); - } - return nid; -} - -int md_cert_must_staple(md_cert_t *cert) +int md_cert_must_staple(const md_cert_t *cert) { /* In case we do not get the NID for it, we treat this as not set. */ - int nid = get_must_staple_nid(); + int nid = MD_GET_NID(MUST_STAPLE); return ((NID_undef != nid)) && X509_get_ext_by_NID(cert->x509, nid, -1) >= 0; } -static apr_status_t add_must_staple(STACK_OF(X509_EXTENSION) *exts, const md_t *md, apr_pool_t *p) +static apr_status_t add_must_staple(STACK_OF(X509_EXTENSION) *exts, const char *name, apr_pool_t *p) { + X509_EXTENSION *x; + int nid; - if (md->must_staple) { - X509_EXTENSION *x; - int nid; - - nid = get_must_staple_nid(); - if (NID_undef == nid) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, - "%s: unable to get NID for v3 must-staple TLS feature", md->name); - return APR_ENOTIMPL; - } - x = X509V3_EXT_conf_nid(NULL, NULL, nid, (char*)"DER:30:03:02:01:05"); - if (NULL == x) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, - "%s: unable to create x509 extension for must-staple", md->name); - return APR_EGENERAL; - } - sk_X509_EXTENSION_push(exts, x); + nid = MD_GET_NID(MUST_STAPLE); + if (NID_undef == nid) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "%s: unable to get NID for v3 must-staple TLS feature", name); + return APR_ENOTIMPL; + } + x = X509V3_EXT_conf_nid(NULL, NULL, nid, (char*)"DER:30:03:02:01:05"); + if (NULL == x) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "%s: unable to create x509 extension for must-staple", name); + return APR_EGENERAL; } + sk_X509_EXTENSION_push(exts, x); return APR_SUCCESS; } -apr_status_t md_cert_req_create(const char **pcsr_der_64, const md_t *md, +apr_status_t md_cert_req_create(const char **pcsr_der_64, const char *name, + apr_array_header_t *domains, int must_staple, md_pkey_t *pkey, apr_pool_t *p) { - const char *s, *csr_der, *csr_der_64 = NULL; + const char *s, *csr_der_64 = NULL; const unsigned char *domain; X509_REQ *csr; X509_NAME *n = NULL; STACK_OF(X509_EXTENSION) *exts = NULL; apr_status_t rv; + md_data_t csr_der; int csr_der_len; - assert(md->domains->nelts > 0); - + assert(domains->nelts > 0); + md_data_null(&csr_der); + if (NULL == (csr = X509_REQ_new()) || NULL == (exts = sk_X509_EXTENSION_new_null()) || NULL == (n = X509_NAME_new())) { rv = APR_ENOMEM; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: openssl alloc X509 things", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: openssl alloc X509 things", name); goto out; } /* subject name == first domain */ - domain = APR_ARRAY_IDX(md->domains, 0, const unsigned char *); - if (!X509_NAME_add_entry_by_txt(n, "CN", MBSTRING_ASC, domain, -1, -1, 0) - || !X509_REQ_set_subject_name(csr, n)) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: REQ name add entry", md->name); + domain = APR_ARRAY_IDX(domains, 0, const unsigned char *); + /* Do not set the domain in the CN if it is longer than 64 octets. + * Instead, let the CA choose a 'proper' name. At the moment (2021-01), LE will + * inspect all SAN names and use one < 64 chars if it can be found. It will fail + * otherwise. + * The reason we do not check this beforehand is that the restrictions on CNs + * are in flux. They used to be authoritative, now browsers no longer do that, but + * no one wants to hand out a cert with "google.com" as CN either. So, we leave + * it for the CA to decide if and how it hands out a cert for this or fails. + * This solves issue where the name is too long, see #227 */ + if (strlen((const char*)domain) < 64 + && (!X509_NAME_add_entry_by_txt(n, "CN", MBSTRING_ASC, domain, -1, -1, 0) + || !X509_REQ_set_subject_name(csr, n))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: REQ name add entry", name); rv = APR_EGENERAL; goto out; } /* collect extensions, such as alt names and must staple */ - if (APR_SUCCESS != (rv = sk_add_alt_names(exts, md->domains, p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: collecting alt names", md->name); + if (APR_SUCCESS != (rv = sk_add_alt_names(exts, domains, p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: collecting alt names", name); rv = APR_EGENERAL; goto out; } - if (APR_SUCCESS != (rv = add_must_staple(exts, md, p))) { + if (must_staple && APR_SUCCESS != (rv = add_must_staple(exts, name, p))) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: you requested that a certificate " "is created with the 'must-staple' extension, however the SSL library was " "unable to initialized that extension. Please file a bug report on which platform " "and with which library this happens. To continue before this problem is resolved, " - "configure 'MDMustStaple off' for your domains", md->name); + "configure 'MDMustStaple off' for your domains", name); rv = APR_EGENERAL; goto out; } /* add extensions to csr */ if (sk_X509_EXTENSION_num(exts) > 0 && !X509_REQ_add_extensions(csr, exts)) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: adding exts", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: adding exts", name); rv = APR_EGENERAL; goto out; } /* add our key */ if (!X509_REQ_set_pubkey(csr, pkey->pkey)) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set pkey in csr", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set pkey in csr", name); rv = APR_EGENERAL; goto out; } /* sign, der encode and base64url encode */ - if (!X509_REQ_sign(csr, pkey->pkey, EVP_sha256())) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: sign csr", md->name); + if (!X509_REQ_sign(csr, pkey->pkey, pkey_get_MD(pkey))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: sign csr", name); rv = APR_EGENERAL; goto out; } if ((csr_der_len = i2d_X509_REQ(csr, NULL)) < 0) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: der length", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: der length", name); rv = APR_EGENERAL; goto out; } - s = csr_der = apr_pcalloc(p, (apr_size_t)csr_der_len + 1); + csr_der.len = (apr_size_t)csr_der_len; + s = csr_der.data = apr_pcalloc(p, csr_der.len + 1); if (i2d_X509_REQ(csr, (unsigned char**)&s) != csr_der_len) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: csr der enc", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: csr der enc", name); rv = APR_EGENERAL; goto out; } - csr_der_64 = md_util_base64url_encode(csr_der, (apr_size_t)csr_der_len, p); + csr_der_64 = md_util_base64url_encode(&csr_der, p); rv = APR_SUCCESS; out: @@ -1226,20 +1885,16 @@ out: return rv; } -apr_status_t md_cert_self_sign(md_cert_t **pcert, const char *cn, - apr_array_header_t *domains, md_pkey_t *pkey, - apr_interval_time_t valid_for, apr_pool_t *p) +static apr_status_t mk_x509(X509 **px, md_pkey_t *pkey, const char *cn, + apr_interval_time_t valid_for, apr_pool_t *p) { - X509 *x; + X509 *x = NULL; X509_NAME *n = NULL; - md_cert_t *cert = NULL; - apr_status_t rv; - int days; BIGNUM *big_rnd = NULL; ASN1_INTEGER *asn1_rnd = NULL; unsigned char rnd[20]; - - assert(domains); + int days; + apr_status_t rv; if (NULL == (x = X509_new()) || NULL == (n = X509_NAME_new())) { @@ -1247,24 +1902,22 @@ apr_status_t md_cert_self_sign(md_cert_t **pcert, const char *cn, md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: openssl alloc X509 things", cn); goto out; } - + if (APR_SUCCESS != (rv = md_rand_bytes(rnd, sizeof(rnd), p)) || !(big_rnd = BN_bin2bn(rnd, sizeof(rnd), NULL)) || !(asn1_rnd = BN_to_ASN1_INTEGER(big_rnd, NULL))) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: setup random serial", cn); rv = APR_EGENERAL; goto out; } - - if (1 != X509_set_version(x, 2L)) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: setting x.509v3", cn); - rv = APR_EGENERAL; goto out; - } - if (!X509_set_serialNumber(x, asn1_rnd)) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: set serial number", cn); rv = APR_EGENERAL; goto out; } - /* set common name and issue */ + if (1 != X509_set_version(x, 2L)) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "%s: setting x.509v3", cn); + rv = APR_EGENERAL; goto out; + } + /* set common name and issuer */ if (!X509_NAME_add_entry_by_txt(n, "CN", MBSTRING_ASC, (const unsigned char*)cn, -1, -1, 0) || !X509_set_subject_name(x, n) || !X509_set_issuer_name(x, n)) { @@ -1272,21 +1925,16 @@ apr_status_t md_cert_self_sign(md_cert_t **pcert, const char *cn, rv = APR_EGENERAL; goto out; } /* cert are unconstrained (but not very trustworthy) */ - if (APR_SUCCESS != (rv = add_ext(x, NID_basic_constraints, "CA:FALSE, pathlen:0", p))) { + if (APR_SUCCESS != (rv = add_ext(x, NID_basic_constraints, "critical,CA:FALSE", p))) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set basic constraints ext", cn); goto out; } - /* add the domain as alt name */ - if (APR_SUCCESS != (rv = add_ext(x, NID_subject_alt_name, alt_names(domains, p), p))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set alt_name ext", cn); - goto out; - } /* add our key */ if (!X509_set_pubkey(x, pkey->pkey)) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set pkey in x509", cn); rv = APR_EGENERAL; goto out; } - + /* validity */ days = (int)((apr_time_sec(valid_for) + MD_SECS_PER_DAY - 1)/ MD_SECS_PER_DAY); if (!X509_set_notBefore(x, ASN1_TIME_set(NULL, time(NULL)))) { rv = APR_EGENERAL; goto out; @@ -1295,29 +1943,217 @@ apr_status_t md_cert_self_sign(md_cert_t **pcert, const char *cn, rv = APR_EGENERAL; goto out; } +out: + *px = (APR_SUCCESS == rv)? x : NULL; + if (APR_SUCCESS != rv && x) X509_free(x); + if (big_rnd) BN_free(big_rnd); + if (asn1_rnd) ASN1_INTEGER_free(asn1_rnd); + if (n) X509_NAME_free(n); + return rv; +} + +apr_status_t md_cert_self_sign(md_cert_t **pcert, const char *cn, + apr_array_header_t *domains, md_pkey_t *pkey, + apr_interval_time_t valid_for, apr_pool_t *p) +{ + X509 *x; + md_cert_t *cert = NULL; + apr_status_t rv; + + assert(domains); + + if (APR_SUCCESS != (rv = mk_x509(&x, pkey, cn, valid_for, p))) goto out; + + /* add the domain as alt name */ + if (APR_SUCCESS != (rv = add_ext(x, NID_subject_alt_name, alt_names(domains, p), p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set alt_name ext", cn); + goto out; + } + + /* keyUsage, ExtendedKeyUsage */ + + if (APR_SUCCESS != (rv = add_ext(x, NID_key_usage, "critical,digitalSignature", p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set keyUsage", cn); + goto out; + } + if (APR_SUCCESS != (rv = add_ext(x, NID_ext_key_usage, "serverAuth", p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set extKeyUsage", cn); + goto out; + } + /* sign with same key */ - if (!X509_sign(x, pkey->pkey, EVP_sha256())) { + if (!X509_sign(x, pkey->pkey, pkey_get_MD(pkey))) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: sign x509", cn); rv = APR_EGENERAL; goto out; } - cert = make_cert(p, x); + cert = md_cert_make(p, x); + rv = APR_SUCCESS; + +out: + *pcert = (APR_SUCCESS == rv)? cert : NULL; + if (!cert && x) X509_free(x); + return rv; +} + +#define MD_OID_ACME_VALIDATION_NUM "1.3.6.1.5.5.7.1.31" +#define MD_OID_ACME_VALIDATION_SNAME "pe-acmeIdentifier" +#define MD_OID_ACME_VALIDATION_LNAME "ACME Identifier" + +static int get_acme_validation_nid(void) +{ + int nid = OBJ_txt2nid(MD_OID_ACME_VALIDATION_NUM); + if (NID_undef == nid) { + nid = OBJ_create(MD_OID_ACME_VALIDATION_NUM, + MD_OID_ACME_VALIDATION_SNAME, MD_OID_ACME_VALIDATION_LNAME); + } + return nid; +} + +apr_status_t md_cert_make_tls_alpn_01(md_cert_t **pcert, const char *domain, + const char *acme_id, md_pkey_t *pkey, + apr_interval_time_t valid_for, apr_pool_t *p) +{ + X509 *x; + md_cert_t *cert = NULL; + const char *alts; + apr_status_t rv; + + if (APR_SUCCESS != (rv = mk_x509(&x, pkey, "tls-alpn-01-challenge", valid_for, p))) goto out; + + /* add the domain as alt name */ + alts = apr_psprintf(p, "DNS:%s", domain); + if (APR_SUCCESS != (rv = add_ext(x, NID_subject_alt_name, alts, p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set alt_name ext", domain); + goto out; + } + + if (APR_SUCCESS != (rv = add_ext(x, get_acme_validation_nid(), acme_id, p))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: set pe-acmeIdentifier", domain); + goto out; + } + + /* sign with same key */ + if (!X509_sign(x, pkey->pkey, pkey_get_MD(pkey))) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "%s: sign x509", domain); + rv = APR_EGENERAL; goto out; + } + + cert = md_cert_make(p, x); rv = APR_SUCCESS; out: if (!cert && x) { X509_free(x); } - if (n) { - X509_NAME_free(n); + *pcert = (APR_SUCCESS == rv)? cert : NULL; + return rv; +} + +#define MD_OID_CT_SCTS_NUM "1.3.6.1.4.1.11129.2.4.2" +#define MD_OID_CT_SCTS_SNAME "CT-SCTs" +#define MD_OID_CT_SCTS_LNAME "CT Certificate SCTs" + +#ifndef OPENSSL_NO_CT +static int get_ct_scts_nid(void) +{ + int nid = OBJ_txt2nid(MD_OID_CT_SCTS_NUM); + if (NID_undef == nid) { + nid = OBJ_create(MD_OID_CT_SCTS_NUM, + MD_OID_CT_SCTS_SNAME, MD_OID_CT_SCTS_LNAME); } - if (big_rnd) { - BN_free(big_rnd); + return nid; +} +#endif + +const char *md_nid_get_sname(int nid) +{ + return OBJ_nid2sn(nid); +} + +const char *md_nid_get_lname(int nid) +{ + return OBJ_nid2ln(nid); +} + +apr_status_t md_cert_get_ct_scts(apr_array_header_t *scts, apr_pool_t *p, const md_cert_t *cert) +{ +#ifndef OPENSSL_NO_CT + int nid, i, idx, critical; + STACK_OF(SCT) *sct_list; + SCT *sct_handle; + md_sct *sct; + size_t len; + const char *data; + + nid = get_ct_scts_nid(); + if (NID_undef == nid) return APR_ENOTIMPL; + + idx = -1; + while (1) { + sct_list = X509_get_ext_d2i(cert->x509, nid, &critical, &idx); + if (sct_list) { + for (i = 0; i < sk_SCT_num(sct_list); i++) { + sct_handle = sk_SCT_value(sct_list, i); + if (sct_handle) { + sct = apr_pcalloc(p, sizeof(*sct)); + sct->version = SCT_get_version(sct_handle); + sct->timestamp = apr_time_from_msec(SCT_get_timestamp(sct_handle)); + len = SCT_get0_log_id(sct_handle, (unsigned char**)&data); + sct->logid = md_data_make_pcopy(p, data, len); + sct->signature_type_nid = SCT_get_signature_nid(sct_handle); + len = SCT_get0_signature(sct_handle, (unsigned char**)&data); + sct->signature = md_data_make_pcopy(p, data, len); + + APR_ARRAY_PUSH(scts, md_sct*) = sct; + } + } + } + if (idx < 0) break; } - if (asn1_rnd) { - ASN1_INTEGER_free(asn1_rnd); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, p, "ct_sct, found %d SCT extensions", scts->nelts); + return APR_SUCCESS; +#else + (void)scts; + (void)p; + (void)cert; + return APR_ENOTIMPL; +#endif +} + +apr_status_t md_cert_get_ocsp_responder_url(const char **purl, apr_pool_t *p, const md_cert_t *cert) +{ + STACK_OF(OPENSSL_STRING) *ssk; + apr_status_t rv = APR_SUCCESS; + const char *url = NULL; + + ssk = X509_get1_ocsp(md_cert_get_X509(cert)); + if (!ssk) { + rv = APR_ENOENT; + goto cleanup; } - *pcert = (APR_SUCCESS == rv)? cert : NULL; + url = apr_pstrdup(p, sk_OPENSSL_STRING_value(ssk, 0)); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, p, "ocsp responder found '%s'", url); + +cleanup: + if (ssk) X509_email_free(ssk); + *purl = url; return rv; } +apr_status_t md_check_cert_and_pkey(struct apr_array_header_t *certs, md_pkey_t *pkey) +{ + const md_cert_t *cert; + + if (certs->nelts == 0) { + return APR_ENOENT; + } + + cert = APR_ARRAY_IDX(certs, 0, const md_cert_t*); + + if (1 != X509_check_private_key(cert->x509, pkey->pkey)) { + return APR_EGENERAL; + } + + return APR_SUCCESS; +} diff --git a/modules/md/md_crypt.h b/modules/md/md_crypt.h index e03c296..a892e00 100644 --- a/modules/md/md_crypt.h +++ b/modules/md/md_crypt.h @@ -24,18 +24,22 @@ struct md_t; struct md_http_response_t; struct md_cert_t; struct md_pkey_t; +struct md_data_t; +struct md_timeperiod_t; /**************************************************************************************************/ /* random */ apr_status_t md_rand_bytes(unsigned char *buf, apr_size_t len, apr_pool_t *p); +apr_time_t md_asn1_generalized_time_get(void *ASN1_GENERALIZEDTIME); + /**************************************************************************************************/ /* digests */ apr_status_t md_crypt_sha256_digest64(const char **pdigest64, apr_pool_t *p, - const char *d, size_t dlen); + const struct md_data_t *data); apr_status_t md_crypt_sha256_digest_hex(const char **pdigesthex, apr_pool_t *p, - const char *d, size_t dlen); + const struct md_data_t *data); /**************************************************************************************************/ /* private keys */ @@ -45,22 +49,54 @@ typedef struct md_pkey_t md_pkey_t; typedef enum { MD_PKEY_TYPE_DEFAULT, MD_PKEY_TYPE_RSA, + MD_PKEY_TYPE_EC, } md_pkey_type_t; -typedef struct md_pkey_rsa_spec_t { +typedef struct md_pkey_rsa_params_t { apr_uint32_t bits; -} md_pkey_rsa_spec_t; +} md_pkey_rsa_params_t; + +typedef struct md_pkey_ec_params_t { + const char *curve; +} md_pkey_ec_params_t; typedef struct md_pkey_spec_t { md_pkey_type_t type; union { - md_pkey_rsa_spec_t rsa; + md_pkey_rsa_params_t rsa; + md_pkey_ec_params_t ec; } params; } md_pkey_spec_t; +typedef struct md_pkeys_spec_t { + apr_pool_t *p; + struct apr_array_header_t *specs; +} md_pkeys_spec_t; + apr_status_t md_crypt_init(apr_pool_t *pool); -apr_status_t md_pkey_gen(md_pkey_t **ppkey, apr_pool_t *p, md_pkey_spec_t *spec); +const char *md_pkey_spec_name(const md_pkey_spec_t *spec); + +md_pkeys_spec_t *md_pkeys_spec_make(apr_pool_t *p); +void md_pkeys_spec_add_default(md_pkeys_spec_t *pks); +int md_pkeys_spec_contains_rsa(md_pkeys_spec_t *pks); +void md_pkeys_spec_add_rsa(md_pkeys_spec_t *pks, unsigned int bits); +int md_pkeys_spec_contains_ec(md_pkeys_spec_t *pks, const char *curve); +void md_pkeys_spec_add_ec(md_pkeys_spec_t *pks, const char *curve); +int md_pkeys_spec_eq(md_pkeys_spec_t *pks1, md_pkeys_spec_t *pks2); +md_pkeys_spec_t *md_pkeys_spec_clone(apr_pool_t *p, const md_pkeys_spec_t *pks); +int md_pkeys_spec_is_empty(const md_pkeys_spec_t *pks); +md_pkey_spec_t *md_pkeys_spec_get(const md_pkeys_spec_t *pks, int index); +int md_pkeys_spec_count(const md_pkeys_spec_t *pks); +void md_pkeys_spec_add(md_pkeys_spec_t *pks, md_pkey_spec_t *spec); + +struct md_json_t *md_pkey_spec_to_json(const md_pkey_spec_t *spec, apr_pool_t *p); +md_pkey_spec_t *md_pkey_spec_from_json(struct md_json_t *json, apr_pool_t *p); +struct md_json_t *md_pkeys_spec_to_json(const md_pkeys_spec_t *pks, apr_pool_t *p); +md_pkeys_spec_t *md_pkeys_spec_from_json(struct md_json_t *json, apr_pool_t *p); + + +apr_status_t md_pkey_gen(md_pkey_t **ppkey, apr_pool_t *p, md_pkey_spec_t *key_props); void md_pkey_free(md_pkey_t *pkey); const char *md_pkey_get_rsa_e64(md_pkey_t *pkey, apr_pool_t *p); @@ -76,12 +112,16 @@ apr_status_t md_pkey_fsave(md_pkey_t *pkey, apr_pool_t *p, apr_status_t md_crypt_sign64(const char **psign64, md_pkey_t *pkey, apr_pool_t *p, const char *d, size_t dlen); -void *md_cert_get_X509(struct md_cert_t *cert); void *md_pkey_get_EVP_PKEY(struct md_pkey_t *pkey); -struct md_json_t *md_pkey_spec_to_json(const md_pkey_spec_t *spec, apr_pool_t *p); -md_pkey_spec_t *md_pkey_spec_from_json(struct md_json_t *json, apr_pool_t *p); -int md_pkey_spec_eq(md_pkey_spec_t *spec1, md_pkey_spec_t *spec2); +apr_status_t md_crypt_hmac64(const char **pmac64, const struct md_data_t *hmac_key, + apr_pool_t *p, const char *d, size_t dlen); + +/** + * Read a private key from a http response. + */ +apr_status_t md_pkey_read_http(md_pkey_t **ppkey, apr_pool_t *pool, + const struct md_http_response_t *res); /**************************************************************************************************/ /* X509 certificates */ @@ -94,30 +134,73 @@ typedef enum { MD_CERT_EXPIRED } md_cert_state_t; -void md_cert_free(md_cert_t *cert); +/** + * Create a holder of the certificate that will free its memory when the + * pool is destroyed. + */ +md_cert_t *md_cert_make(apr_pool_t *p, void *x509); + +/** + * Wrap a x509 certificate into our own structure, without taking ownership + * of its memory. The caller remains responsible. + */ +md_cert_t *md_cert_wrap(apr_pool_t *p, void *x509); + +void *md_cert_get_X509(const md_cert_t *cert); apr_status_t md_cert_fload(md_cert_t **pcert, apr_pool_t *p, const char *fname); apr_status_t md_cert_fsave(md_cert_t *cert, apr_pool_t *p, const char *fname, apr_fileperms_t perms); +/** + * Read a x509 certificate from a http response. + * Will return APR_ENOENT if content-type is not recognized (currently + * only "application/pkix-cert" is supported). + */ apr_status_t md_cert_read_http(md_cert_t **pcert, apr_pool_t *pool, const struct md_http_response_t *res); -md_cert_state_t md_cert_state_get(md_cert_t *cert); +/** + * Read at least one certificate from the given PEM data. + */ +apr_status_t md_cert_read_chain(apr_array_header_t *chain, apr_pool_t *p, + const char *pem, apr_size_t pem_len); + +/** + * Read one or even a chain of certificates from a http response. + * Will return APR_ENOENT if content-type is not recognized (currently + * supports only "application/pem-certificate-chain" and "application/pkix-cert"). + * @param chain must be non-NULL, retrieved certificates will be added. + */ +apr_status_t md_cert_chain_read_http(struct apr_array_header_t *chain, + apr_pool_t *pool, const struct md_http_response_t *res); + +md_cert_state_t md_cert_state_get(const md_cert_t *cert); int md_cert_is_valid_now(const md_cert_t *cert); int md_cert_has_expired(const md_cert_t *cert); int md_cert_covers_domain(md_cert_t *cert, const char *domain_name); int md_cert_covers_md(md_cert_t *cert, const struct md_t *md); -int md_cert_must_staple(md_cert_t *cert); -apr_time_t md_cert_get_not_after(md_cert_t *cert); -apr_time_t md_cert_get_not_before(md_cert_t *cert); +int md_cert_must_staple(const md_cert_t *cert); +apr_time_t md_cert_get_not_after(const md_cert_t *cert); +apr_time_t md_cert_get_not_before(const md_cert_t *cert); +struct md_timeperiod_t md_cert_get_valid(const md_cert_t *cert); -apr_status_t md_cert_get_issuers_uri(const char **puri, md_cert_t *cert, apr_pool_t *p); -apr_status_t md_cert_get_alt_names(apr_array_header_t **pnames, md_cert_t *cert, apr_pool_t *p); +/** + * Return != 0 iff the hash values of the certificates are equal. + */ +int md_certs_are_equal(const md_cert_t *a, const md_cert_t *b); + +apr_status_t md_cert_get_issuers_uri(const char **puri, const md_cert_t *cert, apr_pool_t *p); +apr_status_t md_cert_get_alt_names(apr_array_header_t **pnames, const md_cert_t *cert, apr_pool_t *p); -apr_status_t md_cert_to_base64url(const char **ps64, md_cert_t *cert, apr_pool_t *p); +apr_status_t md_cert_to_base64url(const char **ps64, const md_cert_t *cert, apr_pool_t *p); apr_status_t md_cert_from_base64url(md_cert_t **pcert, const char *s64, apr_pool_t *p); +apr_status_t md_cert_to_sha256_digest(struct md_data_t **pdigest, const md_cert_t *cert, apr_pool_t *p); +apr_status_t md_cert_to_sha256_fingerprint(const char **pfinger, const md_cert_t *cert, apr_pool_t *p); + +const char *md_cert_get_serial_number(const md_cert_t *cert, apr_pool_t *p); + apr_status_t md_chain_fload(struct apr_array_header_t **pcerts, apr_pool_t *p, const char *fname); apr_status_t md_chain_fsave(struct apr_array_header_t *certs, @@ -125,11 +208,46 @@ apr_status_t md_chain_fsave(struct apr_array_header_t *certs, apr_status_t md_chain_fappend(struct apr_array_header_t *certs, apr_pool_t *p, const char *fname); -apr_status_t md_cert_req_create(const char **pcsr_der_64, const struct md_t *md, +apr_status_t md_cert_req_create(const char **pcsr_der_64, const char *name, + apr_array_header_t *domains, int must_staple, md_pkey_t *pkey, apr_pool_t *p); +/** + * Create a self-signed cerftificate with the given cn, key and list + * of alternate domain names. + */ apr_status_t md_cert_self_sign(md_cert_t **pcert, const char *cn, struct apr_array_header_t *domains, md_pkey_t *pkey, apr_interval_time_t valid_for, apr_pool_t *p); + +/** + * Create a certificate for answering "tls-alpn-01" ACME challenges + * (see ). + */ +apr_status_t md_cert_make_tls_alpn_01(md_cert_t **pcert, const char *domain, + const char *acme_id, md_pkey_t *pkey, + apr_interval_time_t valid_for, apr_pool_t *p); + +apr_status_t md_cert_get_ct_scts(apr_array_header_t *scts, apr_pool_t *p, const md_cert_t *cert); + +apr_status_t md_cert_get_ocsp_responder_url(const char **purl, apr_pool_t *p, const md_cert_t *cert); + +apr_status_t md_check_cert_and_pkey(struct apr_array_header_t *certs, md_pkey_t *pkey); + + +/**************************************************************************************************/ +/* X509 certificate transparency */ + +const char *md_nid_get_sname(int nid); +const char *md_nid_get_lname(int nid); + +typedef struct md_sct md_sct; +struct md_sct { + int version; + apr_time_t timestamp; + struct md_data_t *logid; + int signature_type_nid; + struct md_data_t *signature; +}; #endif /* md_crypt_h */ diff --git a/modules/md/md_curl.c b/modules/md/md_curl.c index f3585da..217e857 100644 --- a/modules/md/md_curl.c +++ b/modules/md/md_curl.c @@ -24,13 +24,14 @@ #include "md_http.h" #include "md_log.h" +#include "md_util.h" #include "md_curl.h" /**************************************************************************************************/ /* md_http curl implementation */ -static apr_status_t curl_status(int curl_code) +static apr_status_t curl_status(unsigned int curl_code) { switch (curl_code) { case CURLE_OK: return APR_SUCCESS; @@ -49,11 +50,21 @@ static apr_status_t curl_status(int curl_code) } } +typedef struct { + CURL *curl; + CURLM *curlm; + struct curl_slist *req_hdrs; + md_http_response_t *response; + apr_status_t rv; + int status_fired; +} md_curl_internals_t; + static size_t req_data_cb(void *data, size_t len, size_t nmemb, void *baton) { apr_bucket_brigade *body = baton; size_t blen, read_len = 0, max_len = len * nmemb; const char *bdata; + char *rdata = data; apr_bucket *b; apr_status_t rv; @@ -71,9 +82,10 @@ static size_t req_data_cb(void *data, size_t len, size_t nmemb, void *baton) apr_bucket_split(b, max_len); blen = max_len; } - memcpy(data, bdata, blen); + memcpy(rdata, bdata, blen); read_len += blen; max_len -= blen; + rdata += blen; } else { body = NULL; @@ -92,7 +104,8 @@ static size_t req_data_cb(void *data, size_t len, size_t nmemb, void *baton) static size_t resp_data_cb(void *data, size_t len, size_t nmemb, void *baton) { - md_http_response_t *res = baton; + md_curl_internals_t *internals = baton; + md_http_response_t *res = internals->response; size_t blen = len * nmemb; apr_status_t rv; @@ -100,7 +113,7 @@ static size_t resp_data_cb(void *data, size_t len, size_t nmemb, void *baton) if (res->req->resp_limit) { apr_off_t body_len = 0; apr_brigade_length(res->body, 0, &body_len); - if (body_len + (apr_off_t)len > res->req->resp_limit) { + if (body_len + (apr_off_t)blen > res->req->resp_limit) { return 0; /* signal curl failure */ } } @@ -115,7 +128,8 @@ static size_t resp_data_cb(void *data, size_t len, size_t nmemb, void *baton) static size_t header_cb(void *buffer, size_t elen, size_t nmemb, void *baton) { - md_http_response_t *res = baton; + md_curl_internals_t *internals = baton; + md_http_response_t *res = internals->response; size_t len, clen = elen * nmemb; const char *name = NULL, *value = "", *b = buffer; apr_size_t i; @@ -142,24 +156,6 @@ static size_t header_cb(void *buffer, size_t elen, size_t nmemb, void *baton) return clen; } -static apr_status_t curl_init(md_http_request_t *req) -{ - CURL *curl = curl_easy_init(); - if (!curl) { - return APR_EGENERAL; - } - - curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, header_cb); - curl_easy_setopt(curl, CURLOPT_HEADERDATA, NULL); - curl_easy_setopt(curl, CURLOPT_READFUNCTION, req_data_cb); - curl_easy_setopt(curl, CURLOPT_READDATA, NULL); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, resp_data_cb); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, NULL); - - req->internals = curl; - return APR_SUCCESS; -} - typedef struct { md_http_request_t *req; struct curl_slist *hdrs; @@ -181,24 +177,101 @@ static int curlify_headers(void *baton, const char *key, const char *value) return 1; } -static apr_status_t curl_perform(md_http_request_t *req) +/* Convert timeout values for curl. Since curl uses 0 to disable + * timeout, return at least 1 if the apr_time_t value is non-zero. */ +static long timeout_msec(apr_time_t timeout) { - apr_status_t rv = APR_SUCCESS; - CURLcode curle; - md_http_response_t *res; - CURL *curl; - struct curl_slist *req_hdrs = NULL; + long ms = (long)apr_time_as_msec(timeout); + return ms? ms : (timeout? 1 : 0); +} - rv = curl_init(req); - curl = req->internals; - - res = apr_pcalloc(req->pool, sizeof(*res)); +static long timeout_sec(apr_time_t timeout) +{ + long s = (long)apr_time_sec(timeout); + return s? s : (timeout? 1 : 0); +} + +static int curl_debug_log(CURL *curl, curl_infotype type, char *data, size_t size, void *baton) +{ + md_http_request_t *req = baton; - res->req = req; - res->rv = APR_SUCCESS; - res->status = 400; - res->headers = apr_table_make(req->pool, 5); - res->body = apr_brigade_create(req->pool, req->bucket_alloc); + (void)curl; + switch (type) { + case CURLINFO_TEXT: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, req->pool, + "req[%d]: info %s", req->id, apr_pstrndup(req->pool, data, size)); + break; + case CURLINFO_HEADER_OUT: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, req->pool, + "req[%d]: header --> %s", req->id, apr_pstrndup(req->pool, data, size)); + break; + case CURLINFO_HEADER_IN: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, req->pool, + "req[%d]: header <-- %s", req->id, apr_pstrndup(req->pool, data, size)); + break; + case CURLINFO_DATA_OUT: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, req->pool, + "req[%d]: data --> %ld bytes", req->id, (long)size); + if (md_log_is_level(req->pool, MD_LOG_TRACE5)) { + md_data_t d; + const char *s; + md_data_init(&d, data, size); + md_data_to_hex(&s, 0, req->pool, &d); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE5, 0, req->pool, + "req[%d]: data(hex) --> %s", req->id, s); + } + break; + case CURLINFO_DATA_IN: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, req->pool, + "req[%d]: data <-- %ld bytes", req->id, (long)size); + if (md_log_is_level(req->pool, MD_LOG_TRACE5)) { + md_data_t d; + const char *s; + md_data_init(&d, data, size); + md_data_to_hex(&s, 0, req->pool, &d); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE5, 0, req->pool, + "req[%d]: data(hex) <-- %s", req->id, s); + } + break; + default: + break; + } + return 0; +} + +static apr_status_t internals_setup(md_http_request_t *req) +{ + md_curl_internals_t *internals; + CURL *curl; + apr_status_t rv = APR_SUCCESS; + + curl = md_http_get_impl_data(req->http); + if (!curl) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->pool, "creating curl instance"); + curl = curl_easy_init(); + if (!curl) { + rv = APR_EGENERAL; + goto leave; + } + curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, header_cb); + curl_easy_setopt(curl, CURLOPT_HEADERDATA, NULL); + curl_easy_setopt(curl, CURLOPT_READFUNCTION, req_data_cb); + curl_easy_setopt(curl, CURLOPT_READDATA, NULL); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, resp_data_cb); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, NULL); + } + else { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->pool, "reusing curl instance from http"); + } + + internals = apr_pcalloc(req->pool, sizeof(*internals)); + internals->curl = curl; + + internals->response = apr_pcalloc(req->pool, sizeof(md_http_response_t)); + internals->response->req = req; + internals->response->status = 400; + internals->response->headers = apr_table_make(req->pool, 5); + internals->response->body = apr_brigade_create(req->pool, req->bucket_alloc); curl_easy_setopt(curl, CURLOPT_URL, req->url); if (!apr_strnatcasecmp("GET", req->method)) { @@ -213,9 +286,32 @@ static apr_status_t curl_perform(md_http_request_t *req) else { curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, req->method); } - curl_easy_setopt(curl, CURLOPT_HEADERDATA, res); + curl_easy_setopt(curl, CURLOPT_HEADERDATA, internals); curl_easy_setopt(curl, CURLOPT_READDATA, req->body); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, res); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, internals); + + if (req->timeout.overall > 0) { + curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, timeout_msec(req->timeout.overall)); + } + if (req->timeout.connect > 0) { + curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT_MS, timeout_msec(req->timeout.connect)); + } + if (req->timeout.stalled > 0) { + curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, req->timeout.stall_bytes_per_sec); + curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout_sec(req->timeout.stalled)); + } + if (req->ca_file) { + curl_easy_setopt(curl, CURLOPT_CAINFO, req->ca_file); + } + if (req->unix_socket_path) { + curl_easy_setopt(curl, CURLOPT_UNIX_SOCKET_PATH, req->unix_socket_path); + } + + if (req->body_len >= 0) { + /* set the Content-Length */ + curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)req->body_len); + curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)req->body_len); + } if (req->user_agent) { curl_easy_setopt(curl, CURLOPT_USERAGENT, req->user_agent); @@ -230,47 +326,267 @@ static apr_status_t curl_perform(md_http_request_t *req) ctx.hdrs = NULL; ctx.rv = APR_SUCCESS; apr_table_do(curlify_headers, &ctx, req->headers, NULL); - req_hdrs = ctx.hdrs; + internals->req_hdrs = ctx.hdrs; if (ctx.rv == APR_SUCCESS) { - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, req_hdrs); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, internals->req_hdrs); } } - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, req->pool, - "request %ld --> %s %s", req->id, req->method, req->url); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->pool, + "req[%d]: %s %s", req->id, req->method, req->url); - if (md_log_is_level(req->pool, MD_LOG_TRACE3)) { + if (md_log_is_level(req->pool, MD_LOG_TRACE4)) { curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); + curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_log); + curl_easy_setopt(curl, CURLOPT_DEBUGDATA, req); } - curle = curl_easy_perform(curl); - res->rv = curl_status(curle); +leave: + req->internals = (APR_SUCCESS == rv)? internals : NULL; + return rv; +} + +static apr_status_t update_status(md_http_request_t *req) +{ + md_curl_internals_t *internals = req->internals; + long l; + apr_status_t rv = APR_SUCCESS; + + if (internals) { + rv = curl_status(curl_easy_getinfo(internals->curl, CURLINFO_RESPONSE_CODE, &l)); + if (APR_SUCCESS == rv) { + internals->response->status = (int)l; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, req->pool, + "req[%d]: http status is %d", + req->id, internals->response->status); + } + } + return rv; +} + +static void fire_status(md_http_request_t *req, apr_status_t rv) +{ + md_curl_internals_t *internals = req->internals; + + if (internals && !internals->status_fired) { + internals->status_fired = 1; + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, req->pool, + "req[%d] fire callbacks", req->id); + if ((APR_SUCCESS == rv) && req->cb.on_response) { + rv = req->cb.on_response(internals->response, req->cb.on_response_data); + } - if (APR_SUCCESS == res->rv) { - long l; - res->rv = curl_status(curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &l)); - if (APR_SUCCESS == res->rv) { - res->status = (int)l; + internals->rv = rv; + if (req->cb.on_status) { + req->cb.on_status(req, rv, req->cb.on_status_data); } - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, res->rv, req->pool, - "request %ld <-- %d", req->id, res->status); } - else { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, res->rv, req->pool, - "request %ld failed(%d): %s", req->id, curle, - curl_easy_strerror(curle)); +} + +static apr_status_t md_curl_perform(md_http_request_t *req) +{ + apr_status_t rv = APR_SUCCESS; + CURLcode curle; + md_curl_internals_t *internals; + long l; + + if (APR_SUCCESS != (rv = internals_setup(req))) goto leave; + internals = req->internals; + + curle = curl_easy_perform(internals->curl); + + rv = curl_status(curle); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, req->pool, + "request failed(%d): %s", curle, curl_easy_strerror(curle)); + goto leave; + } + + rv = curl_status(curl_easy_getinfo(internals->curl, CURLINFO_RESPONSE_CODE, &l)); + if (APR_SUCCESS == rv) { + internals->response->status = (int)l; } + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, req->pool, "request <-- %d", + internals->response->status); + + if (req->cb.on_response) { + rv = req->cb.on_response(internals->response, req->cb.on_response_data); + req->cb.on_response = NULL; + } + +leave: + fire_status(req, rv); + md_http_req_destroy(req); + return rv; +} + +static md_http_request_t *find_curl_request(apr_array_header_t *requests, CURL *curl) +{ + md_http_request_t *req; + md_curl_internals_t *internals; + int i; - if (req->cb) { - res->rv = req->cb(res); + for (i = 0; i < requests->nelts; ++i) { + req = APR_ARRAY_IDX(requests, i, md_http_request_t*); + internals = req->internals; + if (internals && internals->curl == curl) { + return req; + } } + return NULL; +} + +static void add_to_curlm(md_http_request_t *req, CURLM *curlm) +{ + md_curl_internals_t *internals = req->internals; - rv = res->rv; + assert(curlm); + assert(internals); + if (internals->curlm == NULL) { + internals->curlm = curlm; + } + assert(internals->curlm == curlm); + curl_multi_add_handle(curlm, internals->curl); +} + +static void remove_from_curlm_and_destroy(md_http_request_t *req, CURLM *curlm) +{ + md_curl_internals_t *internals = req->internals; + + assert(curlm); + assert(internals); + assert(internals->curlm == curlm); + curl_multi_remove_handle(curlm, internals->curl); + internals->curlm = NULL; md_http_req_destroy(req); - if (req_hdrs) { - curl_slist_free_all(req_hdrs); +} + +static apr_status_t md_curl_multi_perform(md_http_t *http, apr_pool_t *p, + md_http_next_req *nextreq, void *baton) +{ + md_http_t *sub_http; + md_http_request_t *req; + CURLM *curlm = NULL; + CURLMcode mc; + struct CURLMsg *curlmsg; + apr_array_header_t *http_spares; + apr_array_header_t *requests; + int i, running, numfds, slowdown, msgcount; + apr_status_t rv; + + http_spares = apr_array_make(p, 10, sizeof(md_http_t*)); + requests = apr_array_make(p, 10, sizeof(md_http_request_t*)); + curlm = curl_multi_init(); + if (!curlm) { + rv = APR_ENOMEM; + goto leave; } + running = 1; + slowdown = 0; + while(1) { + while (1) { + /* fetch as many requests as nextreq gives us */ + if (http_spares->nelts > 0) { + sub_http = *(md_http_t **)(apr_array_pop(http_spares)); + } + else { + rv = md_http_clone(&sub_http, p, http); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, + "multi_perform[%d reqs]: setup failed", requests->nelts); + goto leave; + } + } + + rv = nextreq(&req, baton, sub_http, requests->nelts); + if (APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, p, + "multi_perform[%d reqs]: no more requests", requests->nelts); + if (!requests->nelts) { + goto leave; + } + break; + } + else if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, + "multi_perform[%d reqs]: nextreq() failed", requests->nelts); + APR_ARRAY_PUSH(http_spares, md_http_t*) = sub_http; + goto leave; + } + + if (APR_SUCCESS != (rv = internals_setup(req))) { + if (req->cb.on_status) req->cb.on_status(req, rv, req->cb.on_status_data); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, + "multi_perform[%d reqs]: setup failed", requests->nelts); + APR_ARRAY_PUSH(http_spares, md_http_t*) = sub_http; + goto leave; + } + + APR_ARRAY_PUSH(requests, md_http_request_t*) = req; + add_to_curlm(req, curlm); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, + "multi_perform[%d reqs]: added request", requests->nelts); + } + + mc = curl_multi_perform(curlm, &running); + if (CURLM_OK == mc) { + mc = curl_multi_wait(curlm, NULL, 0, 1000, &numfds); + if (numfds) slowdown = 0; + } + if (CURLM_OK != mc) { + rv = APR_ECONNABORTED; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "multi_perform[%d reqs] failed(%d): %s", + requests->nelts, mc, curl_multi_strerror(mc)); + goto leave; + } + if (!numfds) { + /* no activity on any connection, timeout */ + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, p, + "multi_perform[%d reqs]: slowdown %d", requests->nelts, slowdown); + if (slowdown) apr_sleep(apr_time_from_msec(100)); + ++slowdown; + } + + /* process status messages, e.g. that a request is done */ + while (running < requests->nelts) { + curlmsg = curl_multi_info_read(curlm, &msgcount); + if (!curlmsg) break; + if (curlmsg->msg == CURLMSG_DONE) { + req = find_curl_request(requests, curlmsg->easy_handle); + if (req) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, p, + "multi_perform[%d reqs]: req[%d] done", + requests->nelts, req->id); + update_status(req); + fire_status(req, curl_status(curlmsg->data.result)); + md_array_remove(requests, req); + sub_http = req->http; + APR_ARRAY_PUSH(http_spares, md_http_t*) = sub_http; + remove_from_curlm_and_destroy(req, curlm); + } + else { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "multi_perform[%d reqs]: req done, but not found by handle", + requests->nelts); + } + } + } + }; + +leave: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, + "multi_perform[%d reqs]: leaving", requests->nelts); + for (i = 0; i < requests->nelts; ++i) { + req = APR_ARRAY_IDX(requests, i, md_http_request_t*); + fire_status(req, APR_SUCCESS); + sub_http = req->http; + APR_ARRAY_PUSH(http_spares, md_http_t*) = sub_http; + remove_from_curlm_and_destroy(req, curlm); + } + if (curlm) curl_multi_cleanup(curlm); return rv; } @@ -284,18 +600,48 @@ static apr_status_t md_curl_init(void) { return APR_SUCCESS; } -static void curl_req_cleanup(md_http_request_t *req) +static void md_curl_req_cleanup(md_http_request_t *req) { - if (req->internals) { - curl_easy_cleanup(req->internals); + md_curl_internals_t *internals = req->internals; + if (internals) { + if (internals->curl) { + CURL *curl = md_http_get_impl_data(req->http); + if (curl == internals->curl) { + /* NOP: we have this curl at the md_http_t already */ + } + else if (!curl) { + /* no curl at the md_http_t yet, install this one */ + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->pool, "register curl instance at http"); + md_http_set_impl_data(req->http, internals->curl); + } + else { + /* There already is a curl at the md_http_t and it's not this one. */ + curl_easy_cleanup(internals->curl); + } + } + if (internals->req_hdrs) curl_slist_free_all(internals->req_hdrs); req->internals = NULL; } } +static void md_curl_cleanup(md_http_t *http, apr_pool_t *pool) +{ + CURL *curl; + + curl = md_http_get_impl_data(http); + if (curl) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, pool, "cleanup curl instance"); + md_http_set_impl_data(http, NULL); + curl_easy_cleanup(curl); + } +} + static md_http_impl_t impl = { md_curl_init, - curl_req_cleanup, - curl_perform + md_curl_req_cleanup, + md_curl_perform, + md_curl_multi_perform, + md_curl_cleanup, }; md_http_impl_t * md_curl_get_impl(apr_pool_t *p) diff --git a/modules/md/md_event.c b/modules/md/md_event.c new file mode 100644 index 0000000..c731d55 --- /dev/null +++ b/modules/md/md_event.c @@ -0,0 +1,89 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include + +#include "md.h" +#include "md_event.h" + + +typedef struct md_subscription { + struct md_subscription *next; + md_event_cb *cb; + void *baton; +} md_subscription; + +static struct { + apr_pool_t *p; + md_subscription *subs; +} EVNT; + +static apr_status_t cleanup_setup(void *dummy) +{ + (void)dummy; + memset(&EVNT, 0, sizeof(EVNT)); + return APR_SUCCESS; +} + +void md_event_init(apr_pool_t *p) +{ + memset(&EVNT, 0, sizeof(EVNT)); + EVNT.p = p; + apr_pool_cleanup_register(p, NULL, cleanup_setup, apr_pool_cleanup_null); +} + +void md_event_subscribe(md_event_cb *cb, void *baton) +{ + md_subscription *sub; + + sub = apr_pcalloc(EVNT.p, sizeof(*sub)); + sub->cb = cb; + sub->baton = baton; + sub->next = EVNT.subs; + EVNT.subs = sub; +} + +apr_status_t md_event_raise(const char *event, + const char *mdomain, + struct md_job_t *job, + struct md_result_t *result, + apr_pool_t *p) +{ + md_subscription *sub = EVNT.subs; + apr_status_t rv; + + while (sub) { + rv = sub->cb(event, mdomain, sub->baton, job, result, p); + if (APR_SUCCESS != rv) return rv; + sub = sub->next; + } + return APR_SUCCESS; +} + +void md_event_holler(const char *event, + const char *mdomain, + struct md_job_t *job, + struct md_result_t *result, + apr_pool_t *p) +{ + md_subscription *sub = EVNT.subs; + while (sub) { + sub->cb(event, mdomain, sub->baton, job, result, p); + sub = sub->next; + } +} diff --git a/modules/md/md_event.h b/modules/md/md_event.h new file mode 100644 index 0000000..e66c3c2 --- /dev/null +++ b/modules/md/md_event.h @@ -0,0 +1,46 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#ifndef md_event_h +#define md_event_h + +struct md_job_t; +struct md_result_t; + +typedef apr_status_t md_event_cb(const char *event, + const char *mdomain, + void *baton, + struct md_job_t *job, + struct md_result_t *result, + apr_pool_t *p); + +void md_event_init(apr_pool_t *p); + +void md_event_subscribe(md_event_cb *cb, void *baton); + +apr_status_t md_event_raise(const char *event, + const char *mdomain, + struct md_job_t *job, + struct md_result_t *result, + apr_pool_t *p); + +void md_event_holler(const char *event, + const char *mdomain, + struct md_job_t *job, + struct md_result_t *result, + apr_pool_t *p); + +#endif /* md_event_h */ diff --git a/modules/md/md_http.c b/modules/md/md_http.c index 310fc55..0d21e7b 100644 --- a/modules/md/md_http.c +++ b/modules/md/md_http.c @@ -22,14 +22,20 @@ #include "md_http.h" #include "md_log.h" +#include "md_util.h" struct md_http_t { apr_pool_t *pool; apr_bucket_alloc_t *bucket_alloc; + int next_id; apr_off_t resp_limit; md_http_impl_t *impl; + void *impl_data; /* to be used by the implementation */ const char *user_agent; const char *proxy_url; + const char *unix_socket_path; + md_http_timeouts_t timeout; + const char *ca_file; }; static md_http_impl_t *cur_impl; @@ -43,7 +49,14 @@ void md_http_use_implementation(md_http_impl_t *impl) } } -static long next_req_id; +static apr_status_t http_cleanup(void *data) +{ + md_http_t *http = data; + if (http && http->impl && http->impl->cleanup) { + http->impl->cleanup(http, http->pool); + } + return APR_SUCCESS; +} apr_status_t md_http_create(md_http_t **phttp, apr_pool_t *p, const char *user_agent, const char *proxy_url) @@ -74,18 +87,130 @@ apr_status_t md_http_create(md_http_t **phttp, apr_pool_t *p, const char *user_a if (!http->bucket_alloc) { return APR_EGENERAL; } + apr_pool_cleanup_register(p, http, http_cleanup, apr_pool_cleanup_null); *phttp = http; return APR_SUCCESS; } +apr_status_t md_http_clone(md_http_t **phttp, + apr_pool_t *p, md_http_t *source_http) +{ + apr_status_t rv; + + rv = md_http_create(phttp, p, source_http->user_agent, source_http->proxy_url); + if (APR_SUCCESS == rv) { + (*phttp)->resp_limit = source_http->resp_limit; + (*phttp)->timeout = source_http->timeout; + if (source_http->unix_socket_path) { + (*phttp)->unix_socket_path = apr_pstrdup(p, source_http->unix_socket_path); + } + if (source_http->ca_file) { + (*phttp)->ca_file = apr_pstrdup(p, source_http->ca_file); + } + } + return rv; +} + +void md_http_set_impl_data(md_http_t *http, void *data) +{ + http->impl_data = data; +} + +void *md_http_get_impl_data(md_http_t *http) +{ + return http->impl_data; +} + void md_http_set_response_limit(md_http_t *http, apr_off_t resp_limit) { http->resp_limit = resp_limit; } +void md_http_set_timeout_default(md_http_t *http, apr_time_t timeout) +{ + http->timeout.overall = timeout; +} + +void md_http_set_timeout(md_http_request_t *req, apr_time_t timeout) +{ + req->timeout.overall = timeout; +} + +void md_http_set_connect_timeout_default(md_http_t *http, apr_time_t timeout) +{ + http->timeout.connect = timeout; +} + +void md_http_set_connect_timeout(md_http_request_t *req, apr_time_t timeout) +{ + req->timeout.connect = timeout; +} + +void md_http_set_stalling_default(md_http_t *http, long bytes_per_sec, apr_time_t timeout) +{ + http->timeout.stall_bytes_per_sec = bytes_per_sec; + http->timeout.stalled = timeout; +} + +void md_http_set_stalling(md_http_request_t *req, long bytes_per_sec, apr_time_t timeout) +{ + req->timeout.stall_bytes_per_sec = bytes_per_sec; + req->timeout.stalled = timeout; +} + +void md_http_set_ca_file(md_http_t *http, const char *ca_file) +{ + http->ca_file = ca_file; +} + +void md_http_set_unix_socket_path(md_http_t *http, const char *path) +{ + http->unix_socket_path = path; +} + +static apr_status_t req_set_body(md_http_request_t *req, const char *content_type, + apr_bucket_brigade *body, apr_off_t body_len, + int detect_len) +{ + apr_status_t rv = APR_SUCCESS; + + if (body && detect_len) { + rv = apr_brigade_length(body, 1, &body_len); + if (rv != APR_SUCCESS) { + return rv; + } + } + + req->body = body; + req->body_len = body? body_len : 0; + if (content_type) { + apr_table_set(req->headers, "Content-Type", content_type); + } + else { + apr_table_unset(req->headers, "Content-Type"); + } + return rv; +} + +static apr_status_t req_set_body_data(md_http_request_t *req, const char *content_type, + const md_data_t *body) +{ + apr_bucket_brigade *bbody = NULL; + apr_status_t rv; + + if (body && body->len > 0) { + bbody = apr_brigade_create(req->pool, req->http->bucket_alloc); + rv = apr_brigade_write(bbody, NULL, NULL, body->data, body->len); + if (rv != APR_SUCCESS) { + return rv; + } + } + return req_set_body(req, content_type, bbody, body? (apr_off_t)body->len : 0, 0); +} + static apr_status_t req_create(md_http_request_t **preq, md_http_t *http, - const char *method, const char *url, struct apr_table_t *headers, - md_http_cb *cb, void *baton) + const char *method, const char *url, + struct apr_table_t *headers) { md_http_request_t *req; apr_pool_t *pool; @@ -95,21 +220,22 @@ static apr_status_t req_create(md_http_request_t **preq, md_http_t *http, if (rv != APR_SUCCESS) { return rv; } + apr_pool_tag(pool, "md_http_req"); req = apr_pcalloc(pool, sizeof(*req)); - req->id = next_req_id++; req->pool = pool; + req->id = http->next_id++; req->bucket_alloc = http->bucket_alloc; req->http = http; req->method = method; req->url = url; req->headers = headers? apr_table_copy(req->pool, headers) : apr_table_make(req->pool, 5); req->resp_limit = http->resp_limit; - req->cb = cb; - req->baton = baton; req->user_agent = http->user_agent; req->proxy_url = http->proxy_url; - + req->timeout = http->timeout; + req->ca_file = http->ca_file; + req->unix_socket_path = http->unix_socket_path; *preq = req; return rv; } @@ -123,123 +249,149 @@ void md_http_req_destroy(md_http_request_t *req) apr_pool_destroy(req->pool); } -static apr_status_t schedule(md_http_request_t *req, - apr_bucket_brigade *body, int detect_clen, - long *preq_id) +void md_http_set_on_status_cb(md_http_request_t *req, md_http_status_cb *cb, void *baton) { - apr_status_t rv; - - req->body = body; - req->body_len = body? -1 : 0; + req->cb.on_status = cb; + req->cb.on_status_data = baton; +} - if (req->body && detect_clen) { - rv = apr_brigade_length(req->body, 1, &req->body_len); - if (rv != APR_SUCCESS) { - md_http_req_destroy(req); - return rv; - } - } - - if (req->body_len == 0 && apr_strnatcasecmp("GET", req->method)) { - apr_table_setn(req->headers, "Content-Length", "0"); - } - else if (req->body_len > 0) { - apr_table_setn(req->headers, "Content-Length", apr_off_t_toa(req->pool, req->body_len)); - } +void md_http_set_on_response_cb(md_http_request_t *req, md_http_response_cb *cb, void *baton) +{ + req->cb.on_response = cb; + req->cb.on_response_data = baton; +} + +apr_status_t md_http_perform(md_http_request_t *req) +{ + return req->http->impl->perform(req); +} + +typedef struct { + md_http_next_req *nextreq; + void *baton; +} nextreq_proxy_t; + +static apr_status_t proxy_nextreq(md_http_request_t **preq, void *baton, + md_http_t *http, int in_flight) +{ + nextreq_proxy_t *proxy = baton; - if (preq_id) { - *preq_id = req->id; - } + return proxy->nextreq(preq, proxy->baton, http, in_flight); +} + +apr_status_t md_http_multi_perform(md_http_t *http, md_http_next_req *nextreq, void *baton) +{ + nextreq_proxy_t proxy; - /* we send right away */ - rv = req->http->impl->perform(req); + proxy.nextreq = nextreq; + proxy.baton = baton; + return http->impl->multi_perform(http, http->pool, proxy_nextreq, &proxy); +} + +apr_status_t md_http_GET_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers) +{ + md_http_request_t *req; + apr_status_t rv; + rv = req_create(&req, http, "GET", url, headers); + *preq = (APR_SUCCESS == rv)? req : NULL; return rv; } -apr_status_t md_http_GET(struct md_http_t *http, - const char *url, struct apr_table_t *headers, - md_http_cb *cb, void *baton, long *preq_id) +apr_status_t md_http_HEAD_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers) { md_http_request_t *req; apr_status_t rv; - rv = req_create(&req, http, "GET", url, headers, cb, baton); - if (rv != APR_SUCCESS) { - return rv; - } - - return schedule(req, NULL, 0, preq_id); + rv = req_create(&req, http, "HEAD", url, headers); + *preq = (APR_SUCCESS == rv)? req : NULL; + return rv; } -apr_status_t md_http_HEAD(struct md_http_t *http, - const char *url, struct apr_table_t *headers, - md_http_cb *cb, void *baton, long *preq_id) +apr_status_t md_http_POST_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + struct apr_bucket_brigade *body, int detect_len) { md_http_request_t *req; apr_status_t rv; - rv = req_create(&req, http, "HEAD", url, headers, cb, baton); - if (rv != APR_SUCCESS) { - return rv; + rv = req_create(&req, http, "POST", url, headers); + if (APR_SUCCESS == rv) { + rv = req_set_body(req, content_type, body, -1, detect_len); } - - return schedule(req, NULL, 0, preq_id); + *preq = (APR_SUCCESS == rv)? req : NULL; + return rv; } -apr_status_t md_http_POST(struct md_http_t *http, const char *url, - struct apr_table_t *headers, const char *content_type, - apr_bucket_brigade *body, - md_http_cb *cb, void *baton, long *preq_id) +apr_status_t md_http_POSTd_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + const struct md_data_t *body) { md_http_request_t *req; apr_status_t rv; - rv = req_create(&req, http, "POST", url, headers, cb, baton); - if (rv != APR_SUCCESS) { - return rv; + rv = req_create(&req, http, "POST", url, headers); + if (APR_SUCCESS != rv) goto cleanup; + rv = req_set_body_data(req, content_type, body); +cleanup: + if (APR_SUCCESS == rv) { + *preq = req; } - - if (content_type) { - apr_table_set(req->headers, "Content-Type", content_type); + else { + *preq = NULL; + if (req) md_http_req_destroy(req); } - return schedule(req, body, 1, preq_id); + return rv; } -apr_status_t md_http_POSTd(md_http_t *http, const char *url, - struct apr_table_t *headers, const char *content_type, - const char *data, size_t data_len, - md_http_cb *cb, void *baton, long *preq_id) +apr_status_t md_http_GET_perform(struct md_http_t *http, + const char *url, struct apr_table_t *headers, + md_http_response_cb *cb, void *baton) { md_http_request_t *req; apr_status_t rv; - apr_bucket_brigade *body = NULL; - - rv = req_create(&req, http, "POST", url, headers, cb, baton); - if (rv != APR_SUCCESS) { - return rv; - } - if (data && data_len > 0) { - body = apr_brigade_create(req->pool, req->http->bucket_alloc); - rv = apr_brigade_write(body, NULL, NULL, data, data_len); - if (rv != APR_SUCCESS) { - md_http_req_destroy(req); - return rv; - } - } - - if (content_type) { - apr_table_set(req->headers, "Content-Type", content_type); - } - - return schedule(req, body, 1, preq_id); + rv = md_http_GET_create(&req, http, url, headers); + if (APR_SUCCESS == rv) md_http_set_on_response_cb(req, cb, baton); + return (APR_SUCCESS == rv)? md_http_perform(req) : rv; } -apr_status_t md_http_await(md_http_t *http, long req_id) +apr_status_t md_http_HEAD_perform(struct md_http_t *http, + const char *url, struct apr_table_t *headers, + md_http_response_cb *cb, void *baton) { - (void)http; - (void)req_id; - return APR_SUCCESS; + md_http_request_t *req; + apr_status_t rv; + + rv = md_http_HEAD_create(&req, http, url, headers); + if (APR_SUCCESS == rv) md_http_set_on_response_cb(req, cb, baton); + return (APR_SUCCESS == rv)? md_http_perform(req) : rv; } +apr_status_t md_http_POST_perform(struct md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + apr_bucket_brigade *body, int detect_len, + md_http_response_cb *cb, void *baton) +{ + md_http_request_t *req; + apr_status_t rv; + + rv = md_http_POST_create(&req, http, url, headers, content_type, body, detect_len); + if (APR_SUCCESS == rv) md_http_set_on_response_cb(req, cb, baton); + return (APR_SUCCESS == rv)? md_http_perform(req) : rv; +} + +apr_status_t md_http_POSTd_perform(md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + const md_data_t *body, + md_http_response_cb *cb, void *baton) +{ + md_http_request_t *req; + apr_status_t rv; + + rv = md_http_POSTd_create(&req, http, url, headers, content_type, body); + if (APR_SUCCESS == rv) md_http_set_on_response_cb(req, cb, baton); + return (APR_SUCCESS == rv)? md_http_perform(req) : rv; +} diff --git a/modules/md/md_http.h b/modules/md/md_http.h index c6d94bb..2f250f6 100644 --- a/modules/md/md_http.h +++ b/modules/md/md_http.h @@ -20,35 +20,63 @@ struct apr_table_t; struct apr_bucket_brigade; struct apr_bucket_alloc_t; +struct md_data_t; typedef struct md_http_t md_http_t; typedef struct md_http_request_t md_http_request_t; typedef struct md_http_response_t md_http_response_t; -typedef apr_status_t md_http_cb(const md_http_response_t *res); +/** + * Callback invoked once per request, either when an error was encountered + * or when everything succeeded and the request is about to be released. Only + * in the last case will the status be APR_SUCCESS. + */ +typedef apr_status_t md_http_status_cb(const md_http_request_t *req, apr_status_t status, void *data); + +/** + * Callback invoked when the complete response has been received. + */ +typedef apr_status_t md_http_response_cb(const md_http_response_t *res, void *data); + +typedef struct md_http_callbacks_t md_http_callbacks_t; +struct md_http_callbacks_t { + md_http_status_cb *on_status; + void *on_status_data; + md_http_response_cb *on_response; + void *on_response_data; +}; + +typedef struct md_http_timeouts_t md_http_timeouts_t; +struct md_http_timeouts_t { + apr_time_t overall; + apr_time_t connect; + long stall_bytes_per_sec; + apr_time_t stalled; +}; struct md_http_request_t { - long id; md_http_t *http; apr_pool_t *pool; + int id; struct apr_bucket_alloc_t *bucket_alloc; const char *method; const char *url; const char *user_agent; const char *proxy_url; + const char *ca_file; + const char *unix_socket_path; apr_table_t *headers; struct apr_bucket_brigade *body; apr_off_t body_len; apr_off_t resp_limit; - md_http_cb *cb; - void *baton; + md_http_timeouts_t timeout; + md_http_callbacks_t cb; void *internals; }; struct md_http_response_t { md_http_request_t *req; - apr_status_t rv; int status; apr_table_t *headers; struct apr_bucket_brigade *body; @@ -59,44 +87,186 @@ apr_status_t md_http_create(md_http_t **phttp, apr_pool_t *p, const char *user_a void md_http_set_response_limit(md_http_t *http, apr_off_t resp_limit); -apr_status_t md_http_GET(md_http_t *http, - const char *url, struct apr_table_t *headers, - md_http_cb *cb, void *baton, long *preq_id); +/** + * Clone a http instance, inheriting all settings from source_http. + * The cloned instance is not tied in any way to the source. + */ +apr_status_t md_http_clone(md_http_t **phttp, + apr_pool_t *p, md_http_t *source_http); + +/** + * Set the timeout for the complete request. This needs to take everything from + * DNS looksups, to conntects, to transfer of all data into account and should + * be sufficiently large. + * Set to 0 the have no timeout for this. + */ +void md_http_set_timeout_default(md_http_t *http, apr_time_t timeout); +void md_http_set_timeout(md_http_request_t *req, apr_time_t timeout); + +/** + * Set the timeout for establishing a connection. + * Set to 0 the have no special timeout for this. + */ +void md_http_set_connect_timeout_default(md_http_t *http, apr_time_t timeout); +void md_http_set_connect_timeout(md_http_request_t *req, apr_time_t timeout); + +/** + * Set the condition for when a transfer is considered "stalled", e.g. does not + * progress at a sufficient rate and will be aborted. + * Set to 0 the have no stall detection in place. + */ +void md_http_set_stalling_default(md_http_t *http, long bytes_per_sec, apr_time_t timeout); +void md_http_set_stalling(md_http_request_t *req, long bytes_per_sec, apr_time_t timeout); + +/** + * Set a CA file (in PERM format) to use for root certificates when + * verifying SSL connections. If not set (or set to NULL), the systems + * certificate store will be used. + */ +void md_http_set_ca_file(md_http_t *http, const char *ca_file); + +/** + * Set the path of a unix domain socket for use instead of TCP + * in a connection. Disable by providing NULL as path. + */ +void md_http_set_unix_socket_path(md_http_t *http, const char *path); + +/** + * Perform the request. Then this function returns, the request and + * all its memory has been freed and must no longer be used. + */ +apr_status_t md_http_perform(md_http_request_t *request); -apr_status_t md_http_HEAD(md_http_t *http, - const char *url, struct apr_table_t *headers, - md_http_cb *cb, void *baton, long *preq_id); +/** + * Set the callback to be invoked once the status of a request is known. + * @param req the request + * @param cb the callback to invoke on the response + * @param baton data passed to the callback + */ +void md_http_set_on_status_cb(md_http_request_t *req, md_http_status_cb *cb, void *baton); + +/** + * Set the callback to be invoked when the complete + * response has been successfully received. The HTTP status may + * be 500, however. + * @param req the request + * @param cb the callback to invoke on the response + * @param baton data passed to the callback + */ +void md_http_set_on_response_cb(md_http_request_t *req, md_http_response_cb *cb, void *baton); + +/** + * Create a GET request. + * @param preq the created request after success + * @param http the md_http instance + * @param url the url to GET + * @param headers request headers + */ +apr_status_t md_http_GET_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers); + +/** + * Create a HEAD request. + * @param preq the created request after success + * @param http the md_http instance + * @param url the url to GET + * @param headers request headers + */ +apr_status_t md_http_HEAD_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers); -apr_status_t md_http_POST(md_http_t *http, const char *url, - struct apr_table_t *headers, const char *content_type, - struct apr_bucket_brigade *body, - md_http_cb *cb, void *baton, long *preq_id); +/** + * Create a POST request with a bucket brigade as request body. + * @param preq the created request after success + * @param http the md_http instance + * @param url the url to GET + * @param headers request headers + * @param content_type the content_type of the body or NULL + * @param body the body of the request or NULL + * @param detect_len scan the body to detect its length + */ +apr_status_t md_http_POST_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + struct apr_bucket_brigade *body, int detect_len); -apr_status_t md_http_POSTd(md_http_t *http, const char *url, - struct apr_table_t *headers, const char *content_type, - const char *data, size_t data_len, - md_http_cb *cb, void *baton, long *preq_id); +/** + * Create a POST request with known request body data. + * @param preq the created request after success + * @param http the md_http instance + * @param url the url to GET + * @param headers request headers + * @param content_type the content_type of the body or NULL + * @param body the body of the request or NULL + */ +apr_status_t md_http_POSTd_create(md_http_request_t **preq, md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + const struct md_data_t *body); -apr_status_t md_http_await(md_http_t *http, long req_id); +/* + * Convenience functions for create+perform. + */ +apr_status_t md_http_GET_perform(md_http_t *http, const char *url, + struct apr_table_t *headers, + md_http_response_cb *cb, void *baton); +apr_status_t md_http_HEAD_perform(md_http_t *http, const char *url, + struct apr_table_t *headers, + md_http_response_cb *cb, void *baton); +apr_status_t md_http_POST_perform(md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + struct apr_bucket_brigade *body, int detect_len, + md_http_response_cb *cb, void *baton); +apr_status_t md_http_POSTd_perform(md_http_t *http, const char *url, + struct apr_table_t *headers, const char *content_type, + const struct md_data_t *body, + md_http_response_cb *cb, void *baton); void md_http_req_destroy(md_http_request_t *req); +/** Return the next request for processing on APR_SUCCESS. Return ARP_ENOENT + * when no request is available. Anything else is an error. + */ +typedef apr_status_t md_http_next_req(md_http_request_t **preq, void *baton, + md_http_t *http, int in_flight); + +/** + * Perform requests in parallel as retrieved from the nextreq function. + * There are as many requests in flight as the nextreq functions provides. + * + * To limit the number of parallel requests, nextreq should return APR_ENOENT when the limit + * is reached. It will be called again when the number of in_flight requests changes. + * + * When all requests are done, nextreq will be called one more time. Should it not + * return anything, this function returns. + */ +apr_status_t md_http_multi_perform(md_http_t *http, md_http_next_req *nextreq, void *baton); + /**************************************************************************************************/ /* interface to implementation */ typedef apr_status_t md_http_init_cb(void); +typedef void md_http_cleanup_cb(md_http_t *req, apr_pool_t *p); typedef void md_http_req_cleanup_cb(md_http_request_t *req); typedef apr_status_t md_http_perform_cb(md_http_request_t *req); +typedef apr_status_t md_http_multi_perform_cb(md_http_t *http, apr_pool_t *p, + md_http_next_req *nextreq, void *baton); typedef struct md_http_impl_t md_http_impl_t; struct md_http_impl_t { md_http_init_cb *init; md_http_req_cleanup_cb *req_cleanup; md_http_perform_cb *perform; + md_http_multi_perform_cb *multi_perform; + md_http_cleanup_cb *cleanup; }; void md_http_use_implementation(md_http_impl_t *impl); +/** + * get/set data the implementation wants to remember between requests + * in the same md_http_t instance. + */ +void md_http_set_impl_data(md_http_t *http, void *data); +void *md_http_get_impl_data(md_http_t *http); #endif /* md_http_h */ diff --git a/modules/md/md_json.c b/modules/md/md_json.c index f73ab14..e0f977e 100644 --- a/modules/md/md_json.c +++ b/modules/md/md_json.c @@ -18,10 +18,12 @@ #include #include #include +#include #include "md_json.h" #include "md_log.h" #include "md_http.h" +#include "md_time.h" #include "md_util.h" /* jansson thinks everyone compiles with the platform's cc in its fullest capabilities @@ -106,12 +108,12 @@ void md_json_destroy(md_json_t *json) } } -md_json_t *md_json_copy(apr_pool_t *pool, md_json_t *json) +md_json_t *md_json_copy(apr_pool_t *pool, const md_json_t *json) { return json_create(pool, json_copy(json->j)); } -md_json_t *md_json_clone(apr_pool_t *pool, md_json_t *json) +md_json_t *md_json_clone(apr_pool_t *pool, const md_json_t *json) { return json_create(pool, json_deep_copy(json->j)); } @@ -120,7 +122,7 @@ md_json_t *md_json_clone(apr_pool_t *pool, md_json_t *json) /* selectors */ -static json_t *jselect(md_json_t *json, va_list ap) +static json_t *jselect(const md_json_t *json, va_list ap) { json_t *j; const char *key; @@ -167,6 +169,31 @@ static apr_status_t jselect_add(json_t *val, md_json_t *json, va_list ap) j = jselect_parent(&key, 1, json, ap); + if (!j || !json_is_object(j)) { + return APR_EINVAL; + } + + aj = json_object_get(j, key); + if (!aj) { + aj = json_array(); + json_object_set_new(j, key, aj); + } + + if (!json_is_array(aj)) { + return APR_EINVAL; + } + + json_array_append(aj, val); + return APR_SUCCESS; +} + +static apr_status_t jselect_insert(json_t *val, size_t index, md_json_t *json, va_list ap) +{ + const char *key; + json_t *j, *aj; + + j = jselect_parent(&key, 1, json, ap); + if (!j || !json_is_object(j)) { json_decref(val); return APR_EINVAL; @@ -183,7 +210,12 @@ static apr_status_t jselect_add(json_t *val, md_json_t *json, va_list ap) return APR_EINVAL; } - json_array_append(aj, val); + if (json_array_size(aj) <= index) { + json_array_append(aj, val); + } + else { + json_array_insert(aj, index, val); + } return APR_SUCCESS; } @@ -195,13 +227,11 @@ static apr_status_t jselect_set(json_t *val, md_json_t *json, va_list ap) j = jselect_parent(&key, 1, json, ap); if (!j) { - json_decref(val); return APR_EINVAL; } if (key) { if (!json_is_object(j)) { - json_decref(val); return APR_EINVAL; } json_object_set(j, key, val); @@ -246,7 +276,7 @@ static apr_status_t jselect_set_new(json_t *val, md_json_t *json, va_list ap) return APR_SUCCESS; } -int md_json_has_key(md_json_t *json, ...) +int md_json_has_key(const md_json_t *json, ...) { json_t *j; va_list ap; @@ -258,10 +288,46 @@ int md_json_has_key(md_json_t *json, ...) return j != NULL; } +/**************************************************************************************************/ +/* type things */ + +int md_json_is(const md_json_type_t jtype, md_json_t *json, ...) +{ + json_t *j; + va_list ap; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + switch (jtype) { + case MD_JSON_TYPE_OBJECT: return (j && json_is_object(j)); + case MD_JSON_TYPE_ARRAY: return (j && json_is_array(j)); + case MD_JSON_TYPE_STRING: return (j && json_is_string(j)); + case MD_JSON_TYPE_REAL: return (j && json_is_real(j)); + case MD_JSON_TYPE_INT: return (j && json_is_integer(j)); + case MD_JSON_TYPE_BOOL: return (j && (json_is_true(j) || json_is_false(j))); + case MD_JSON_TYPE_NULL: return (j == NULL); + } + return 0; +} + +static const char *md_json_type_name(const md_json_t *json) +{ + json_t *j = json->j; + if (json_is_object(j)) return "object"; + if (json_is_array(j)) return "array"; + if (json_is_string(j)) return "string"; + if (json_is_real(j)) return "real"; + if (json_is_integer(j)) return "integer"; + if (json_is_true(j)) return "true"; + if (json_is_false(j)) return "false"; + return "unknown"; +} + /**************************************************************************************************/ /* booleans */ -int md_json_getb(md_json_t *json, ...) +int md_json_getb(const md_json_t *json, ...) { json_t *j; va_list ap; @@ -287,7 +353,7 @@ apr_status_t md_json_setb(int value, md_json_t *json, ...) /**************************************************************************************************/ /* numbers */ -double md_json_getn(md_json_t *json, ...) +double md_json_getn(const md_json_t *json, ...) { json_t *j; va_list ap; @@ -312,7 +378,7 @@ apr_status_t md_json_setn(double value, md_json_t *json, ...) /**************************************************************************************************/ /* longs */ -long md_json_getl(md_json_t *json, ...) +long md_json_getl(const md_json_t *json, ...) { json_t *j; va_list ap; @@ -337,7 +403,7 @@ apr_status_t md_json_setl(long value, md_json_t *json, ...) /**************************************************************************************************/ /* strings */ -const char *md_json_gets(md_json_t *json, ...) +const char *md_json_gets(const md_json_t *json, ...) { json_t *j; va_list ap; @@ -349,7 +415,7 @@ const char *md_json_gets(md_json_t *json, ...) return (j && json_is_string(j))? json_string_value(j) : NULL; } -const char *md_json_dups(apr_pool_t *p, md_json_t *json, ...) +const char *md_json_dups(apr_pool_t *p, const md_json_t *json, ...) { json_t *j; va_list ap; @@ -372,6 +438,35 @@ apr_status_t md_json_sets(const char *value, md_json_t *json, ...) return rv; } +/**************************************************************************************************/ +/* time */ + +apr_time_t md_json_get_time(const md_json_t *json, ...) +{ + json_t *j; + va_list ap; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + + if (!j || !json_is_string(j)) return 0; + return apr_date_parse_rfc(json_string_value(j)); +} + +apr_status_t md_json_set_time(apr_time_t value, md_json_t *json, ...) +{ + char ts[APR_RFC822_DATE_LEN]; + va_list ap; + apr_status_t rv; + + apr_rfc822_date(ts, value); + va_start(ap, json); + rv = jselect_set_new(json_string(ts), json, ap); + va_end(ap); + return rv; +} + /**************************************************************************************************/ /* json itself */ @@ -394,7 +489,42 @@ md_json_t *md_json_getj(md_json_t *json, ...) return NULL; } -apr_status_t md_json_setj(md_json_t *value, md_json_t *json, ...) +md_json_t *md_json_dupj(apr_pool_t *p, const md_json_t *json, ...) +{ + json_t *j; + va_list ap; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + + if (j) { + json_incref(j); + return json_create(p, j); + } + return NULL; +} + +const md_json_t *md_json_getcj(const md_json_t *json, ...) +{ + json_t *j; + va_list ap; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + + if (j) { + if (j == json->j) { + return json; + } + json_incref(j); + return json_create(json->p, j); + } + return NULL; +} + +apr_status_t md_json_setj(const md_json_t *value, md_json_t *json, ...) { va_list ap; apr_status_t rv; @@ -422,7 +552,7 @@ apr_status_t md_json_setj(md_json_t *value, md_json_t *json, ...) return rv; } -apr_status_t md_json_addj(md_json_t *value, md_json_t *json, ...) +apr_status_t md_json_addj(const md_json_t *value, md_json_t *json, ...) { va_list ap; apr_status_t rv; @@ -433,6 +563,36 @@ apr_status_t md_json_addj(md_json_t *value, md_json_t *json, ...) return rv; } +apr_status_t md_json_insertj(md_json_t *value, size_t index, md_json_t *json, ...) +{ + va_list ap; + apr_status_t rv; + + va_start(ap, json); + rv = jselect_insert(value->j, index, json, ap); + va_end(ap); + return rv; +} + +apr_size_t md_json_limita(size_t max_elements, md_json_t *json, ...) +{ + json_t *j; + va_list ap; + apr_size_t n = 0; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + + if (j && json_is_array(j)) { + n = json_array_size(j); + while (n > max_elements) { + json_array_remove(j, n-1); + n = json_array_size(j); + } + } + return n; +} /**************************************************************************************************/ /* arrays / objects */ @@ -474,7 +634,7 @@ apr_status_t md_json_del(md_json_t *json, ...) /**************************************************************************************************/ /* object strings */ -apr_status_t md_json_gets_dict(apr_table_t *dict, md_json_t *json, ...) +apr_status_t md_json_gets_dict(apr_table_t *dict, const md_json_t *json, ...) { json_t *j; va_list ap; @@ -557,7 +717,7 @@ apr_status_t md_json_clone_to(void *value, md_json_t *json, apr_pool_t *p, void return md_json_setj(md_json_clone(p, value), json, NULL); } -apr_status_t md_json_clone_from(void **pvalue, md_json_t *json, apr_pool_t *p, void *baton) +apr_status_t md_json_clone_from(void **pvalue, const md_json_t *json, apr_pool_t *p, void *baton) { (void)baton; *pvalue = md_json_clone(p, json); @@ -568,7 +728,7 @@ apr_status_t md_json_clone_from(void **pvalue, md_json_t *json, apr_pool_t *p, v /* array generic */ apr_status_t md_json_geta(apr_array_header_t *a, md_json_from_cb *cb, void *baton, - md_json_t *json, ...) + const md_json_t *json, ...) { json_t *j; va_list ap; @@ -672,10 +832,36 @@ int md_json_itera(md_json_itera_cb *cb, void *baton, md_json_t *json, ...) return 1; } +int md_json_iterkey(md_json_iterkey_cb *cb, void *baton, md_json_t *json, ...) +{ + json_t *j; + va_list ap; + const char *key; + json_t *val; + md_json_t wrap; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + + if (!j || !json_is_object(j)) { + return 0; + } + + wrap.p = json->p; + json_object_foreach(j, key, val) { + wrap.j = val; + if (!cb(baton, key, &wrap)) { + return 0; + } + } + return 1; +} + /**************************************************************************************************/ /* array strings */ -apr_status_t md_json_getsa(apr_array_header_t *a, md_json_t *json, ...) +apr_status_t md_json_getsa(apr_array_header_t *a, const md_json_t *json, ...) { json_t *j; va_list ap; @@ -711,6 +897,7 @@ apr_status_t md_json_dupsa(apr_array_header_t *a, apr_pool_t *p, md_json_t *json size_t index; json_t *val; + apr_array_clear(a); json_array_foreach(j, index, val) { if (json_is_string(val)) { APR_ARRAY_PUSH(a, const char *) = apr_pstrdup(p, json_string_value(val)); @@ -757,7 +944,7 @@ apr_status_t md_json_setsa(apr_array_header_t *a, md_json_t *json, ...) /* formatting, parsing */ typedef struct { - md_json_t *json; + const md_json_t *json; md_json_fmt_t fmt; const char *fname; apr_file_t *f; @@ -782,7 +969,7 @@ static int dump_cb(const char *buffer, size_t len, void *baton) return (rv == APR_SUCCESS)? 0 : -1; } -apr_status_t md_json_writeb(md_json_t *json, md_json_fmt_t fmt, apr_bucket_brigade *bb) +apr_status_t md_json_writeb(const md_json_t *json, md_json_fmt_t fmt, apr_bucket_brigade *bb) { int rv = json_dump_callback(json->j, dump_cb, bb, fmt_to_flags(fmt)); return rv? APR_EGENERAL : APR_SUCCESS; @@ -791,22 +978,25 @@ apr_status_t md_json_writeb(md_json_t *json, md_json_fmt_t fmt, apr_bucket_briga static int chunk_cb(const char *buffer, size_t len, void *baton) { apr_array_header_t *chunks = baton; - char *chunk = apr_pcalloc(chunks->pool, len+1); + char *chunk; - memcpy(chunk, buffer, len); - APR_ARRAY_PUSH(chunks, const char *) = chunk; + if (len > 0) { + chunk = apr_palloc(chunks->pool, len+1); + memcpy(chunk, buffer, len); + chunk[len] = '\0'; + APR_ARRAY_PUSH(chunks, const char*) = chunk; + } return 0; } -const char *md_json_writep(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt) +const char *md_json_writep(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt) { apr_array_header_t *chunks; int rv; chunks = apr_array_make(p, 10, sizeof(char *)); rv = json_dump_callback(json->j, chunk_cb, chunks, fmt_to_flags(fmt)); - - if (rv) { + if (APR_SUCCESS != rv) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "md_json_writep failed to dump JSON"); return NULL; @@ -816,33 +1006,32 @@ const char *md_json_writep(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt) case 0: return ""; case 1: - return APR_ARRAY_IDX(chunks, 0, const char *); + return APR_ARRAY_IDX(chunks, 0, const char*); default: return apr_array_pstrcat(p, chunks, 0); } } -apr_status_t md_json_writef(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, apr_file_t *f) +apr_status_t md_json_writef(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, apr_file_t *f) { apr_status_t rv; const char *s; - s = md_json_writep(json, p, fmt); - - if (s) { + if ((s = md_json_writep(json, p, fmt))) { rv = apr_file_write_full(f, s, strlen(s), NULL); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, json->p, "md_json_writef: error writing file"); + } } else { rv = APR_EINVAL; - } - - if (APR_SUCCESS != rv) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, json->p, "md_json_writef"); + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, json->p, + "md_json_writef: error dumping json (%s)", md_json_dump_state(json, p)); } return rv; } -apr_status_t md_json_fcreatex(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, +apr_status_t md_json_fcreatex(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, const char *fpath, apr_fileperms_t perms) { apr_status_t rv; @@ -866,7 +1055,7 @@ static apr_status_t write_json(void *baton, apr_file_t *f, apr_pool_t *p) return rv; } -apr_status_t md_json_freplace(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, +apr_status_t md_json_freplace(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, const char *fpath, apr_fileperms_t perms) { j_write_ctx ctx; @@ -938,11 +1127,14 @@ apr_status_t md_json_readb(md_json_t **pjson, apr_pool_t *pool, apr_bucket_briga json_t *j; j = json_load_callback(load_cb, bb, 0, &error); - if (!j) { - return APR_EINVAL; + if (j) { + *pjson = json_create(pool, j); + } else { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, pool, + "failed to load JSON file: %s (line %d:%d)", + error.text, error.line, error.column); } - *pjson = json_create(pool, j); - return APR_SUCCESS; + return (j && *pjson) ? APR_SUCCESS : APR_EINVAL; } static size_t load_file_cb(void *data, size_t max_len, void *baton) @@ -993,12 +1185,18 @@ apr_status_t md_json_readf(md_json_t **pjson, apr_pool_t *p, const char *fpath) apr_status_t md_json_read_http(md_json_t **pjson, apr_pool_t *pool, const md_http_response_t *res) { apr_status_t rv = APR_ENOENT; - if (res->rv == APR_SUCCESS) { - const char *ctype = apr_table_get(res->headers, "content-type"); - if (ctype && res->body && (strstr(ctype, "/json") || strstr(ctype, "+json"))) { - rv = md_json_readb(pjson, pool, res->body); - } + const char *ctype, *p; + + *pjson = NULL; + if (!res->body) goto cleanup; + ctype = md_util_parse_ct(res->req->pool, apr_table_get(res->headers, "content-type")); + if (!ctype) goto cleanup; + p = ctype + strlen(ctype) +1; + if (!strcmp(p - sizeof("/json"), "/json") + || !strcmp(p - sizeof("+json"), "+json")) { + rv = md_json_readb(pjson, pool, res->body); } +cleanup: return rv; } @@ -1008,26 +1206,24 @@ typedef struct { md_json_t *json; } resp_data; -static apr_status_t json_resp_cb(const md_http_response_t *res) +static apr_status_t json_resp_cb(const md_http_response_t *res, void *data) { - resp_data *resp = res->req->baton; + resp_data *resp = data; return md_json_read_http(&resp->json, resp->pool, res); } apr_status_t md_json_http_get(md_json_t **pjson, apr_pool_t *pool, struct md_http_t *http, const char *url) { - long req_id; apr_status_t rv; resp_data resp; memset(&resp, 0, sizeof(resp)); resp.pool = pool; - rv = md_http_GET(http, url, NULL, json_resp_cb, &resp, &req_id); + rv = md_http_GET_perform(http, url, NULL, json_resp_cb, &resp); if (rv == APR_SUCCESS) { - md_http_await(http, req_id); *pjson = resp.json; return resp.rv; } @@ -1035,3 +1231,81 @@ apr_status_t md_json_http_get(md_json_t **pjson, apr_pool_t *pool, return rv; } + +apr_status_t md_json_copy_to(md_json_t *dest, const md_json_t *src, ...) +{ + json_t *j; + va_list ap; + apr_status_t rv = APR_SUCCESS; + + va_start(ap, src); + j = jselect(src, ap); + va_end(ap); + + if (j) { + va_start(ap, src); + rv = jselect_set(j, dest, ap); + va_end(ap); + } + return rv; +} + +const char *md_json_dump_state(const md_json_t *json, apr_pool_t *p) +{ + if (!json) return "NULL"; + return apr_psprintf(p, "%s, refc=%ld", md_json_type_name(json), (long)json->j->refcount); +} + +apr_status_t md_json_set_timeperiod(const md_timeperiod_t *tp, md_json_t *json, ...) +{ + char ts[APR_RFC822_DATE_LEN]; + json_t *jn, *j; + va_list ap; + const char *key; + apr_status_t rv; + + if (tp && tp->start && tp->end) { + jn = json_object(); + apr_rfc822_date(ts, tp->start); + json_object_set_new(jn, "from", json_string(ts)); + apr_rfc822_date(ts, tp->end); + json_object_set_new(jn, "until", json_string(ts)); + + va_start(ap, json); + rv = jselect_set_new(jn, json, ap); + va_end(ap); + return rv; + } + else { + va_start(ap, json); + j = jselect_parent(&key, 0, json, ap); + va_end(ap); + + if (key && j && json_is_object(j)) { + json_object_del(j, key); + } + return APR_SUCCESS; + } +} + +apr_status_t md_json_get_timeperiod(md_timeperiod_t *tp, md_json_t *json, ...) +{ + json_t *j, *jts; + va_list ap; + + va_start(ap, json); + j = jselect(json, ap); + va_end(ap); + + memset(tp, 0, sizeof(*tp)); + if (!j) goto not_found; + jts = json_object_get(j, "from"); + if (!jts || !json_is_string(jts)) goto not_found; + tp->start = apr_date_parse_rfc(json_string_value(jts)); + jts = json_object_get(j, "until"); + if (!jts || !json_is_string(jts)) goto not_found; + tp->end = apr_date_parse_rfc(json_string_value(jts)); + return APR_SUCCESS; +not_found: + return APR_ENOENT; +} diff --git a/modules/md/md_json.h b/modules/md/md_json.h index 7f2e4f3..50b8828 100644 --- a/modules/md/md_json.h +++ b/modules/md/md_json.h @@ -24,10 +24,21 @@ struct apr_file_t; struct md_http_t; struct md_http_response_t; - +struct md_timeperiod_t; typedef struct md_json_t md_json_t; +typedef enum { + MD_JSON_TYPE_OBJECT, + MD_JSON_TYPE_ARRAY, + MD_JSON_TYPE_STRING, + MD_JSON_TYPE_REAL, + MD_JSON_TYPE_INT, + MD_JSON_TYPE_BOOL, + MD_JSON_TYPE_NULL, +} md_json_type_t; + + typedef enum { MD_JSON_FMT_COMPACT, MD_JSON_FMT_INDENT, @@ -36,38 +47,50 @@ typedef enum { md_json_t *md_json_create(apr_pool_t *pool); void md_json_destroy(md_json_t *json); -md_json_t *md_json_copy(apr_pool_t *pool, md_json_t *json); -md_json_t *md_json_clone(apr_pool_t *pool, md_json_t *json); +md_json_t *md_json_copy(apr_pool_t *pool, const md_json_t *json); +md_json_t *md_json_clone(apr_pool_t *pool, const md_json_t *json); + -int md_json_has_key(md_json_t *json, ...); +int md_json_has_key(const md_json_t *json, ...); +int md_json_is(const md_json_type_t type, md_json_t *json, ...); /* boolean manipulation */ -int md_json_getb(md_json_t *json, ...); +int md_json_getb(const md_json_t *json, ...); apr_status_t md_json_setb(int value, md_json_t *json, ...); /* number manipulation */ -double md_json_getn(md_json_t *json, ...); +double md_json_getn(const md_json_t *json, ...); apr_status_t md_json_setn(double value, md_json_t *json, ...); /* long manipulation */ -long md_json_getl(md_json_t *json, ...); +long md_json_getl(const md_json_t *json, ...); apr_status_t md_json_setl(long value, md_json_t *json, ...); /* string manipulation */ md_json_t *md_json_create_s(apr_pool_t *pool, const char *s); -const char *md_json_gets(md_json_t *json, ...); -const char *md_json_dups(apr_pool_t *p, md_json_t *json, ...); +const char *md_json_gets(const md_json_t *json, ...); +const char *md_json_dups(apr_pool_t *p, const md_json_t *json, ...); apr_status_t md_json_sets(const char *s, md_json_t *json, ...); +/* timestamp manipulation */ +apr_time_t md_json_get_time(const md_json_t *json, ...); +apr_status_t md_json_set_time(apr_time_t value, md_json_t *json, ...); + /* json manipulation */ md_json_t *md_json_getj(md_json_t *json, ...); -apr_status_t md_json_setj(md_json_t *value, md_json_t *json, ...); -apr_status_t md_json_addj(md_json_t *value, md_json_t *json, ...); +md_json_t *md_json_dupj(apr_pool_t *p, const md_json_t *json, ...); +const md_json_t *md_json_getcj(const md_json_t *json, ...); +apr_status_t md_json_setj(const md_json_t *value, md_json_t *json, ...); +apr_status_t md_json_addj(const md_json_t *value, md_json_t *json, ...); +apr_status_t md_json_insertj(md_json_t *value, size_t index, md_json_t *json, ...); /* Array/Object manipulation */ apr_status_t md_json_clr(md_json_t *json, ...); apr_status_t md_json_del(md_json_t *json, ...); +/* Remove all array elements beyond max_elements */ +apr_size_t md_json_limita(size_t max_elements, md_json_t *json, ...); + /* conversion function from and to json */ typedef apr_status_t md_json_to_cb(void *value, md_json_t *json, apr_pool_t *p, void *baton); typedef apr_status_t md_json_from_cb(void **pvalue, md_json_t *json, apr_pool_t *p, void *baton); @@ -78,34 +101,39 @@ apr_status_t md_json_pass_from(void **pvalue, md_json_t *json, apr_pool_t *p, vo /* conversions from json to json in specified pool */ apr_status_t md_json_clone_to(void *value, md_json_t *json, apr_pool_t *p, void *baton); -apr_status_t md_json_clone_from(void **pvalue, md_json_t *json, apr_pool_t *p, void *baton); +apr_status_t md_json_clone_from(void **pvalue, const md_json_t *json, apr_pool_t *p, void *baton); /* Manipulating/Iteration on generic Arrays */ apr_status_t md_json_geta(apr_array_header_t *a, md_json_from_cb *cb, - void *baton, md_json_t *json, ...); + void *baton, const md_json_t *json, ...); apr_status_t md_json_seta(apr_array_header_t *a, md_json_to_cb *cb, void *baton, md_json_t *json, ...); +/* Called on each array element, aborts iteration when returning 0 */ typedef int md_json_itera_cb(void *baton, size_t index, md_json_t *json); int md_json_itera(md_json_itera_cb *cb, void *baton, md_json_t *json, ...); +/* Called on each object key, aborts iteration when returning 0 */ +typedef int md_json_iterkey_cb(void *baton, const char* key, md_json_t *json); +int md_json_iterkey(md_json_iterkey_cb *cb, void *baton, md_json_t *json, ...); + /* Manipulating Object String values */ -apr_status_t md_json_gets_dict(apr_table_t *dict, md_json_t *json, ...); +apr_status_t md_json_gets_dict(apr_table_t *dict, const md_json_t *json, ...); apr_status_t md_json_sets_dict(apr_table_t *dict, md_json_t *json, ...); /* Manipulating String Arrays */ -apr_status_t md_json_getsa(apr_array_header_t *a, md_json_t *json, ...); +apr_status_t md_json_getsa(apr_array_header_t *a, const md_json_t *json, ...); apr_status_t md_json_dupsa(apr_array_header_t *a, apr_pool_t *p, md_json_t *json, ...); apr_status_t md_json_setsa(apr_array_header_t *a, md_json_t *json, ...); /* serialization & parsing */ -apr_status_t md_json_writeb(md_json_t *json, md_json_fmt_t fmt, struct apr_bucket_brigade *bb); -const char *md_json_writep(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt); -apr_status_t md_json_writef(md_json_t *json, apr_pool_t *p, +apr_status_t md_json_writeb(const md_json_t *json, md_json_fmt_t fmt, struct apr_bucket_brigade *bb); +const char *md_json_writep(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt); +apr_status_t md_json_writef(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, struct apr_file_t *f); -apr_status_t md_json_fcreatex(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, +apr_status_t md_json_fcreatex(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, const char *fpath, apr_fileperms_t perms); -apr_status_t md_json_freplace(md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, +apr_status_t md_json_freplace(const md_json_t *json, apr_pool_t *p, md_json_fmt_t fmt, const char *fpath, apr_fileperms_t perms); apr_status_t md_json_readb(md_json_t **pjson, apr_pool_t *pool, struct apr_bucket_brigade *bb); @@ -119,4 +147,11 @@ apr_status_t md_json_http_get(md_json_t **pjson, apr_pool_t *pool, apr_status_t md_json_read_http(md_json_t **pjson, apr_pool_t *pool, const struct md_http_response_t *res); +apr_status_t md_json_copy_to(md_json_t *dest, const md_json_t *src, ...); + +const char *md_json_dump_state(const md_json_t *json, apr_pool_t *p); + +apr_status_t md_json_set_timeperiod(const struct md_timeperiod_t *tp, md_json_t *json, ...); +apr_status_t md_json_get_timeperiod(struct md_timeperiod_t *tp, md_json_t *json, ...); + #endif /* md_json_h */ diff --git a/modules/md/md_jws.c b/modules/md/md_jws.c index 37c1b0e..c0e8c1b 100644 --- a/modules/md/md_jws.c +++ b/modules/md/md_jws.c @@ -25,65 +25,67 @@ #include "md_log.h" #include "md_util.h" -static int header_set(void *data, const char *key, const char *val) +apr_status_t md_jws_get_jwk(md_json_t **pjwk, apr_pool_t *p, struct md_pkey_t *pkey) { - md_json_sets(val, (md_json_t *)data, key, NULL); - return 1; + md_json_t *jwk; + + if (!pkey) return APR_EINVAL; + + jwk = md_json_create(p); + md_json_sets(md_pkey_get_rsa_e64(pkey, p), jwk, "e", NULL); + md_json_sets("RSA", jwk, "kty", NULL); + md_json_sets(md_pkey_get_rsa_n64(pkey, p), jwk, "n", NULL); + *pjwk = jwk; + return APR_SUCCESS; } apr_status_t md_jws_sign(md_json_t **pmsg, apr_pool_t *p, - const char *payload, size_t len, - struct apr_table_t *protected, + md_data_t *payload, md_json_t *prot_fields, struct md_pkey_t *pkey, const char *key_id) { - md_json_t *msg, *jprotected; + md_json_t *msg, *jprotected, *jwk; const char *prot64, *pay64, *sign64, *sign, *prot; - apr_status_t rv = APR_SUCCESS; + md_data_t data; + apr_status_t rv; - *pmsg = NULL; - msg = md_json_create(p); - - jprotected = md_json_create(p); + jprotected = md_json_clone(p, prot_fields); md_json_sets("RS256", jprotected, "alg", NULL); if (key_id) { md_json_sets(key_id, jprotected, "kid", NULL); } else { - md_json_sets(md_pkey_get_rsa_e64(pkey, p), jprotected, "jwk", "e", NULL); - md_json_sets("RSA", jprotected, "jwk", "kty", NULL); - md_json_sets(md_pkey_get_rsa_n64(pkey, p), jprotected, "jwk", "n", NULL); + rv = md_jws_get_jwk(&jwk, p, pkey); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "get jwk"); + goto cleanup; + } + md_json_setj(jwk, jprotected, "jwk", NULL); } - apr_table_do(header_set, jprotected, protected, NULL); - prot = md_json_writep(jprotected, p, MD_JSON_FMT_COMPACT); - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, p, "protected: %s", - prot ? prot : ""); + prot = md_json_writep(jprotected, p, MD_JSON_FMT_COMPACT); if (!prot) { rv = APR_EINVAL; + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "serialize protected"); + goto cleanup; } - - if (rv == APR_SUCCESS) { - prot64 = md_util_base64url_encode(prot, strlen(prot), p); - md_json_sets(prot64, msg, "protected", NULL); - pay64 = md_util_base64url_encode(payload, len, p); - md_json_sets(pay64, msg, "payload", NULL); - sign = apr_psprintf(p, "%s.%s", prot64, pay64); + md_data_init(&data, prot, strlen(prot)); + prot64 = md_util_base64url_encode(&data, p); + md_json_sets(prot64, msg, "protected", NULL); - rv = md_crypt_sign64(&sign64, pkey, p, sign, strlen(sign)); - } + pay64 = md_util_base64url_encode(payload, p); + md_json_sets(pay64, msg, "payload", NULL); + sign = apr_psprintf(p, "%s.%s", prot64, pay64); - if (rv == APR_SUCCESS) { - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, p, - "jws pay64=%s\nprot64=%s\nsign64=%s", pay64, prot64, sign64); - - md_json_sets(sign64, msg, "signature", NULL); - } - else { + rv = md_crypt_sign64(&sign64, pkey, p, sign, strlen(sign)); + if (APR_SUCCESS != rv) { md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "jwk signed message"); - } - + goto cleanup; + } + md_json_sets(sign64, msg, "signature", NULL); + +cleanup: *pmsg = (APR_SUCCESS == rv)? msg : NULL; return rv; } @@ -91,6 +93,7 @@ apr_status_t md_jws_sign(md_json_t **pmsg, apr_pool_t *p, apr_status_t md_jws_pkey_thumb(const char **pthumb, apr_pool_t *p, struct md_pkey_t *pkey) { const char *e64, *n64, *s; + md_data_t data; apr_status_t rv; e64 = md_pkey_get_rsa_e64(pkey, p); @@ -101,6 +104,45 @@ apr_status_t md_jws_pkey_thumb(const char **pthumb, apr_pool_t *p, struct md_pke /* whitespace and order is relevant, since we hand out a digest of this */ s = apr_psprintf(p, "{\"e\":\"%s\",\"kty\":\"RSA\",\"n\":\"%s\"}", e64, n64); - rv = md_crypt_sha256_digest64(pthumb, p, s, strlen(s)); + md_data_init_str(&data, s); + rv = md_crypt_sha256_digest64(pthumb, p, &data); + return rv; +} + +apr_status_t md_jws_hmac(md_json_t **pmsg, apr_pool_t *p, + md_data_t *payload, md_json_t *prot_fields, + const md_data_t *hmac_key) +{ + md_json_t *msg, *jprotected; + const char *prot64, *pay64, *mac64, *sign, *prot; + md_data_t data; + apr_status_t rv; + + msg = md_json_create(p); + jprotected = md_json_clone(p, prot_fields); + md_json_sets("HS256", jprotected, "alg", NULL); + prot = md_json_writep(jprotected, p, MD_JSON_FMT_COMPACT); + if (!prot) { + rv = APR_EINVAL; + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "serialize protected"); + goto cleanup; + } + + md_data_init(&data, prot, strlen(prot)); + prot64 = md_util_base64url_encode(&data, p); + md_json_sets(prot64, msg, "protected", NULL); + + pay64 = md_util_base64url_encode(payload, p); + md_json_sets(pay64, msg, "payload", NULL); + sign = apr_psprintf(p, "%s.%s", prot64, pay64); + + rv = md_crypt_hmac64(&mac64, hmac_key, p, sign, strlen(sign)); + if (APR_SUCCESS != rv) { + goto cleanup; + } + md_json_sets(mac64, msg, "signature", NULL); + +cleanup: + *pmsg = (APR_SUCCESS == rv)? msg : NULL; return rv; } diff --git a/modules/md/md_jws.h b/modules/md/md_jws.h index e7c145e..466f2df 100644 --- a/modules/md/md_jws.h +++ b/modules/md/md_jws.h @@ -20,11 +20,33 @@ struct apr_table_t; struct md_json_t; struct md_pkey_t; +struct md_data_t; +/** + * Get the JSON value of the 'jwk' field for the given key. + */ +apr_status_t md_jws_get_jwk(md_json_t **pjwk, apr_pool_t *p, struct md_pkey_t *pkey); + +/** + * Get the JWS key signed JSON message with given payload and protected fields, signed + * using the given key and optional key_id. + */ apr_status_t md_jws_sign(md_json_t **pmsg, apr_pool_t *p, - const char *payload, size_t len, struct apr_table_t *protected, + struct md_data_t *payload, md_json_t *prot_fields, struct md_pkey_t *pkey, const char *key_id); +/** + * Get the 'Thumbprint' as defined in RFC8555 for the given key in + * base64 encoding. + */ +apr_status_t md_jws_pkey_thumb(const char **pthumb64, apr_pool_t *p, struct md_pkey_t *pkey); + +/** + * Get the JWS HS256 signed message for given payload and protected fields, + * using the base64 encoded MAC key. + */ +apr_status_t md_jws_hmac(md_json_t **pmsg, apr_pool_t *p, + struct md_data_t *payload, md_json_t *prot_fields, + const struct md_data_t *hmac_key); -apr_status_t md_jws_pkey_thumb(const char **pthumb, apr_pool_t *p, struct md_pkey_t *pkey); #endif /* md_jws_h */ diff --git a/modules/md/md_log.h b/modules/md/md_log.h index 73885f2..19e688f 100644 --- a/modules/md/md_log.h +++ b/modules/md/md_log.h @@ -38,6 +38,10 @@ typedef enum { #define MD_LOG_MARK __FILE__,__LINE__ +#ifndef APLOGNO +#define APLOGNO(n) "AH" #n ": " +#endif + const char *md_log_level_name(md_log_level_t level); int md_log_is_level(apr_pool_t *p, md_log_level_t level); diff --git a/modules/md/md_ocsp.c b/modules/md/md_ocsp.c new file mode 100644 index 0000000..8cbf05b --- /dev/null +++ b/modules/md/md_ocsp.c @@ -0,0 +1,1063 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(LIBRESSL_VERSION_NUMBER) +/* Missing from LibreSSL */ +#define MD_USE_OPENSSL_PRE_1_1_API (LIBRESSL_VERSION_NUMBER < 0x2070000f) +#else /* defined(LIBRESSL_VERSION_NUMBER) */ +#define MD_USE_OPENSSL_PRE_1_1_API (OPENSSL_VERSION_NUMBER < 0x10100000L) +#endif + +#include "md.h" +#include "md_crypt.h" +#include "md_event.h" +#include "md_json.h" +#include "md_log.h" +#include "md_http.h" +#include "md_json.h" +#include "md_result.h" +#include "md_status.h" +#include "md_store.h" +#include "md_util.h" +#include "md_ocsp.h" + +#define MD_OCSP_ID_LENGTH SHA_DIGEST_LENGTH + +struct md_ocsp_reg_t { + apr_pool_t *p; + md_store_t *store; + const char *user_agent; + const char *proxy_url; + apr_hash_t *id_by_external_id; + apr_hash_t *ostat_by_id; + apr_thread_mutex_t *mutex; + md_timeslice_t renew_window; + md_job_notify_cb *notify; + void *notify_ctx; + apr_time_t min_delay; +}; + +typedef struct md_ocsp_status_t md_ocsp_status_t; +struct md_ocsp_status_t { + md_data_t id; + const char *hexid; + const char *hex_sha256; + OCSP_CERTID *certid; + const char *responder_url; + + apr_time_t next_run; /* when the responder shall be asked again */ + int errors; /* consecutive failed attempts */ + + md_ocsp_cert_stat_t resp_stat; + md_data_t resp_der; + md_timeperiod_t resp_valid; + + md_data_t req_der; + OCSP_REQUEST *ocsp_req; + md_ocsp_reg_t *reg; + + const char *md_name; + const char *file_name; + + apr_time_t resp_mtime; + apr_time_t resp_last_check; +}; + +typedef struct md_ocsp_id_map_t md_ocsp_id_map_t; +struct md_ocsp_id_map_t { + md_data_t id; + md_data_t external_id; +}; + +static void md_openssl_free(void *d) +{ + OPENSSL_free(d); +} + +const char *md_ocsp_cert_stat_name(md_ocsp_cert_stat_t stat) +{ + switch (stat) { + case MD_OCSP_CERT_ST_GOOD: return "good"; + case MD_OCSP_CERT_ST_REVOKED: return "revoked"; + default: return "unknown"; + } +} + +md_ocsp_cert_stat_t md_ocsp_cert_stat_value(const char *name) +{ + if (name && !strcmp("good", name)) return MD_OCSP_CERT_ST_GOOD; + if (name && !strcmp("revoked", name)) return MD_OCSP_CERT_ST_REVOKED; + return MD_OCSP_CERT_ST_UNKNOWN; +} + +apr_status_t md_ocsp_init_id(md_data_t *id, apr_pool_t *p, const md_cert_t *cert) +{ + unsigned char iddata[SHA_DIGEST_LENGTH]; + X509 *x = md_cert_get_X509(cert); + unsigned int ulen = 0; + + md_data_null(id); + if (X509_digest(x, EVP_sha1(), iddata, &ulen) != 1) { + return APR_EGENERAL; + } + md_data_assign_pcopy(id, (const char*)iddata, ulen, p); + return APR_SUCCESS; +} + +static void ostat_req_cleanup(md_ocsp_status_t *ostat) +{ + if (ostat->ocsp_req) { + OCSP_REQUEST_free(ostat->ocsp_req); + ostat->ocsp_req = NULL; + } + md_data_clear(&ostat->req_der); +} + +static int ostat_cleanup(void *ctx, const void *key, apr_ssize_t klen, const void *val) +{ + md_ocsp_reg_t *reg = ctx; + md_ocsp_status_t *ostat = (md_ocsp_status_t *)val; + + (void)reg; + (void)key; + (void)klen; + ostat_req_cleanup(ostat); + if (ostat->certid) { + OCSP_CERTID_free(ostat->certid); + ostat->certid = NULL; + } + md_data_clear(&ostat->resp_der); + return 1; +} + +static int ostat_should_renew(md_ocsp_status_t *ostat) +{ + md_timeperiod_t renewal; + + renewal = md_timeperiod_slice_before_end(&ostat->resp_valid, &ostat->reg->renew_window); + return md_timeperiod_has_started(&renewal, apr_time_now()); +} + +static apr_status_t ostat_set(md_ocsp_status_t *ostat, md_ocsp_cert_stat_t stat, + md_data_t *der, md_timeperiod_t *valid, apr_time_t mtime) +{ + apr_status_t rv; + + rv = md_data_assign_copy(&ostat->resp_der, der->data, der->len); + if (APR_SUCCESS != rv) goto cleanup; + + ostat->resp_stat = stat; + ostat->resp_valid = *valid; + ostat->resp_mtime = mtime; + + ostat->errors = 0; + ostat->next_run = md_timeperiod_slice_before_end( + &ostat->resp_valid, &ostat->reg->renew_window).start; + +cleanup: + return rv; +} + +static apr_status_t ostat_from_json(md_ocsp_cert_stat_t *pstat, + md_data_t *resp_der, md_timeperiod_t *resp_valid, + md_json_t *json, apr_pool_t *p) +{ + const char *s; + md_timeperiod_t valid; + apr_status_t rv = APR_ENOENT; + + memset(resp_der, 0, sizeof(*resp_der)); + memset(resp_valid, 0, sizeof(*resp_valid)); + s = md_json_dups(p, json, MD_KEY_VALID, MD_KEY_FROM, NULL); + if (s && *s) valid.start = apr_date_parse_rfc(s); + s = md_json_dups(p, json, MD_KEY_VALID, MD_KEY_UNTIL, NULL); + if (s && *s) valid.end = apr_date_parse_rfc(s); + s = md_json_dups(p, json, MD_KEY_RESPONSE, NULL); + if (!s || !*s) goto cleanup; + md_util_base64url_decode(resp_der, s, p); + *pstat = md_ocsp_cert_stat_value(md_json_gets(json, MD_KEY_STATUS, NULL)); + *resp_valid = valid; + rv = APR_SUCCESS; +cleanup: + return rv; +} + +static void ostat_to_json(md_json_t *json, md_ocsp_cert_stat_t stat, + const md_data_t *resp_der, const md_timeperiod_t *resp_valid, + apr_pool_t *p) +{ + const char *s = NULL; + + if (resp_der->len > 0) { + md_json_sets(md_util_base64url_encode(resp_der, p), json, MD_KEY_RESPONSE, NULL); + s = md_ocsp_cert_stat_name(stat); + if (s) md_json_sets(s, json, MD_KEY_STATUS, NULL); + md_json_set_timeperiod(resp_valid, json, MD_KEY_VALID, NULL); + } +} + +static apr_status_t ocsp_status_refresh(md_ocsp_status_t *ostat, apr_pool_t *ptemp) +{ + md_store_t *store = ostat->reg->store; + md_json_t *jprops; + apr_time_t mtime; + apr_status_t rv = APR_EAGAIN; + md_data_t resp_der; + md_timeperiod_t resp_valid; + md_ocsp_cert_stat_t resp_stat; + /* Check if the store holds a newer response than the one we have */ + mtime = md_store_get_modified(store, MD_SG_OCSP, ostat->md_name, ostat->file_name, ptemp); + if (mtime <= ostat->resp_mtime) goto cleanup; + rv = md_store_load_json(store, MD_SG_OCSP, ostat->md_name, ostat->file_name, &jprops, ptemp); + if (APR_SUCCESS != rv) goto cleanup; + rv = ostat_from_json(&resp_stat, &resp_der, &resp_valid, jprops, ptemp); + if (APR_SUCCESS != rv) goto cleanup; + rv = ostat_set(ostat, resp_stat, &resp_der, &resp_valid, mtime); + if (APR_SUCCESS != rv) goto cleanup; +cleanup: + return rv; +} + + +static apr_status_t ocsp_status_save(md_ocsp_cert_stat_t stat, const md_data_t *resp_der, + const md_timeperiod_t *resp_valid, + md_ocsp_status_t *ostat, apr_pool_t *ptemp) +{ + md_store_t *store = ostat->reg->store; + md_json_t *jprops; + apr_time_t mtime; + apr_status_t rv; + + jprops = md_json_create(ptemp); + ostat_to_json(jprops, stat, resp_der, resp_valid, ptemp); + rv = md_store_save_json(store, ptemp, MD_SG_OCSP, ostat->md_name, ostat->file_name, jprops, 0); + if (APR_SUCCESS != rv) goto cleanup; + mtime = md_store_get_modified(store, MD_SG_OCSP, ostat->md_name, ostat->file_name, ptemp); + if (mtime) ostat->resp_mtime = mtime; +cleanup: + return rv; +} + +static apr_status_t ocsp_reg_cleanup(void *data) +{ + md_ocsp_reg_t *reg = data; + + /* free all OpenSSL structures that we hold */ + apr_hash_do(ostat_cleanup, reg, reg->ostat_by_id); + return APR_SUCCESS; +} + +apr_status_t md_ocsp_reg_make(md_ocsp_reg_t **preg, apr_pool_t *p, md_store_t *store, + const md_timeslice_t *renew_window, + const char *user_agent, const char *proxy_url, + apr_time_t min_delay) +{ + md_ocsp_reg_t *reg; + apr_status_t rv = APR_SUCCESS; + + reg = apr_palloc(p, sizeof(*reg)); + if (!reg) { + rv = APR_ENOMEM; + goto cleanup; + } + reg->p = p; + reg->store = store; + reg->user_agent = user_agent; + reg->proxy_url = proxy_url; + reg->id_by_external_id = apr_hash_make(p); + reg->ostat_by_id = apr_hash_make(p); + reg->renew_window = *renew_window; + reg->min_delay = min_delay; + + rv = apr_thread_mutex_create(®->mutex, APR_THREAD_MUTEX_NESTED, p); + if (APR_SUCCESS != rv) goto cleanup; + + apr_pool_cleanup_register(p, reg, ocsp_reg_cleanup, apr_pool_cleanup_null); +cleanup: + *preg = (APR_SUCCESS == rv)? reg : NULL; + return rv; +} + +apr_status_t md_ocsp_prime(md_ocsp_reg_t *reg, const char *ext_id, apr_size_t ext_id_len, + md_cert_t *cert, md_cert_t *issuer, const md_t *md) +{ + md_ocsp_status_t *ostat; + const char *name; + md_data_t id; + apr_status_t rv = APR_SUCCESS; + + /* Called during post_config. no mutex protection needed */ + name = md? md->name : MD_OTHER; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, reg->p, + "md[%s]: priming OCSP status", name); + + rv = md_ocsp_init_id(&id, reg->p, cert); + if (APR_SUCCESS != rv) goto cleanup; + + ostat = apr_hash_get(reg->ostat_by_id, id.data, (apr_ssize_t)id.len); + if (ostat) goto cleanup; /* already seen it, cert is used in >1 server_rec */ + + ostat = apr_pcalloc(reg->p, sizeof(*ostat)); + ostat->id = id; + ostat->reg = reg; + ostat->md_name = name; + md_data_to_hex(&ostat->hexid, 0, reg->p, &ostat->id); + ostat->file_name = apr_psprintf(reg->p, "ocsp-%s.json", ostat->hexid); + rv = md_cert_to_sha256_fingerprint(&ostat->hex_sha256, cert, reg->p); + if (APR_SUCCESS != rv) goto cleanup; + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, reg->p, + "md[%s]: getting ocsp responder from cert", name); + rv = md_cert_get_ocsp_responder_url(&ostat->responder_url, reg->p, cert); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, reg->p, + "md[%s]: certificate with serial %s has no OCSP responder URL", + name, md_cert_get_serial_number(cert, reg->p)); + goto cleanup; + } + + ostat->certid = OCSP_cert_to_id(NULL, md_cert_get_X509(cert), md_cert_get_X509(issuer)); + if (!ostat->certid) { + rv = APR_EGENERAL; + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, reg->p, + "md[%s]: unable to create OCSP certid for certificate with serial %s", + name, md_cert_get_serial_number(cert, reg->p)); + goto cleanup; + } + + /* See, if we have something in store */ + ocsp_status_refresh(ostat, reg->p); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, reg->p, + "md[%s]: adding ocsp info (responder=%s)", + name, ostat->responder_url); + apr_hash_set(reg->ostat_by_id, ostat->id.data, (apr_ssize_t)ostat->id.len, ostat); + if (ext_id) { + md_ocsp_id_map_t *id_map; + + id_map = apr_pcalloc(reg->p, sizeof(*id_map)); + id_map->id = id; + md_data_assign_pcopy(&id_map->external_id, ext_id, ext_id_len, reg->p); + /* check for collision/uniqness? */ + apr_hash_set(reg->id_by_external_id, id_map->external_id.data, + (apr_ssize_t)id_map->external_id.len, id_map); + } + rv = APR_SUCCESS; +cleanup: + return rv; +} + +apr_status_t md_ocsp_get_status(md_ocsp_copy_der *cb, void *userdata, md_ocsp_reg_t *reg, + const char *ext_id, apr_size_t ext_id_len, + apr_pool_t *p, const md_t *md) +{ + md_ocsp_status_t *ostat; + const char *name; + apr_status_t rv = APR_SUCCESS; + md_ocsp_id_map_t *id_map; + const char *id; + apr_size_t id_len; + int locked = 0; + + (void)p; + (void)md; + name = md? md->name : MD_OTHER; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, reg->p, + "md[%s]: OCSP, get_status", name); + + id_map = apr_hash_get(reg->id_by_external_id, ext_id, (apr_ssize_t)ext_id_len); + id = id_map? id_map->id.data : ext_id; + id_len = id_map? id_map->id.len : ext_id_len; + ostat = apr_hash_get(reg->ostat_by_id, id, (apr_ssize_t)id_len); + if (!ostat) { + rv = APR_ENOENT; + goto cleanup; + } + + /* While the ostat instance itself always exists, the response data it holds + * may vary over time and we need locked access to make a copy. */ + apr_thread_mutex_lock(reg->mutex); + locked = 1; + + if (ostat->resp_der.len <= 0) { + /* No response known, check store for new response. */ + ocsp_status_refresh(ostat, p); + if (ostat->resp_der.len <= 0) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, reg->p, + "md[%s]: OCSP, no response available", name); + cb(NULL, 0, userdata); + goto cleanup; + } + } + /* We have a response */ + if (ostat_should_renew(ostat)) { + /* But it is up for renewal. A watchdog should be busy with + * retrieving a new one. In case of outages, this might take + * a while, however. Pace the frequency of checks with the + * urgency of a new response based on the remaining time. */ + long secs = (long)apr_time_sec(md_timeperiod_remaining(&ostat->resp_valid, apr_time_now())); + apr_time_t waiting_time; + + /* every hour, every minute, every second */ + waiting_time = ((secs >= MD_SECS_PER_DAY)? + apr_time_from_sec(60 * 60) : ((secs >= 60)? + apr_time_from_sec(60) : apr_time_from_sec(1))); + if ((apr_time_now() - ostat->resp_last_check) >= waiting_time) { + ostat->resp_last_check = apr_time_now(); + ocsp_status_refresh(ostat, p); + } + } + + cb((const unsigned char*)ostat->resp_der.data, ostat->resp_der.len, userdata); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, reg->p, + "md[%s]: OCSP, provided %ld bytes of response", + name, (long)ostat->resp_der.len); +cleanup: + if (locked) apr_thread_mutex_unlock(reg->mutex); + return rv; +} + +static void ocsp_get_meta(md_ocsp_cert_stat_t *pstat, md_timeperiod_t *pvalid, + md_ocsp_reg_t *reg, md_ocsp_status_t *ostat, apr_pool_t *p) +{ + apr_thread_mutex_lock(reg->mutex); + if (ostat->resp_der.len <= 0) { + /* No response known, check the store if out watchdog retrieved one + * in the meantime. */ + ocsp_status_refresh(ostat, p); + } + *pvalid = ostat->resp_valid; + *pstat = ostat->resp_stat; + apr_thread_mutex_unlock(reg->mutex); +} + +apr_status_t md_ocsp_get_meta(md_ocsp_cert_stat_t *pstat, md_timeperiod_t *pvalid, + md_ocsp_reg_t *reg, const md_cert_t *cert, + apr_pool_t *p, const md_t *md) +{ + md_ocsp_status_t *ostat; + const char *name; + apr_status_t rv; + md_timeperiod_t valid; + md_ocsp_cert_stat_t stat; + md_data_t id; + + (void)p; + (void)md; + name = md? md->name : MD_OTHER; + memset(&valid, 0, sizeof(valid)); + stat = MD_OCSP_CERT_ST_UNKNOWN; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, reg->p, + "md[%s]: OCSP, get_status", name); + + rv = md_ocsp_init_id(&id, p, cert); + if (APR_SUCCESS != rv) goto cleanup; + + ostat = apr_hash_get(reg->ostat_by_id, id.data, (apr_ssize_t)id.len); + if (!ostat) { + rv = APR_ENOENT; + goto cleanup; + } + ocsp_get_meta(&stat, &valid, reg, ostat, p); +cleanup: + *pstat = stat; + *pvalid = valid; + return rv; +} + +apr_size_t md_ocsp_count(md_ocsp_reg_t *reg) +{ + return apr_hash_count(reg->ostat_by_id); +} + +static const char *certid_as_hex(const OCSP_CERTID *certid, apr_pool_t *p) +{ + md_data_t der; + const char *hex; + + memset(&der, 0, sizeof(der)); + der.len = (apr_size_t)i2d_OCSP_CERTID((OCSP_CERTID*)certid, (unsigned char**)&der.data); + der.free_data = md_openssl_free; + md_data_to_hex(&hex, 0, p, &der); + md_data_clear(&der); + return hex; +} + +static const char *certid_summary(const OCSP_CERTID *certid, apr_pool_t *p) +{ + const char *serial, *issuer, *key, *s; + ASN1_INTEGER *aserial; + ASN1_OCTET_STRING *aname_hash, *akey_hash; + ASN1_OBJECT *amd_nid; + BIGNUM *bn; + md_data_t data; + + serial = issuer = key = "???"; + OCSP_id_get0_info(&aname_hash, &amd_nid, &akey_hash, &aserial, (OCSP_CERTID*)certid); + if (aname_hash) { + data.len = (apr_size_t)aname_hash->length; + data.data = (const char*)aname_hash->data; + md_data_to_hex(&issuer, 0, p, &data); + } + if (akey_hash) { + data.len = (apr_size_t)akey_hash->length; + data.data = (const char*)akey_hash->data; + md_data_to_hex(&key, 0, p, &data); + } + if (aserial) { + bn = ASN1_INTEGER_to_BN(aserial, NULL); + s = BN_bn2hex(bn); + serial = apr_pstrdup(p, s); + OPENSSL_free((void*)bn); + OPENSSL_free((void*)s); + } + return apr_psprintf(p, "certid[der=%s, issuer=%s, key=%s, serial=%s]", + certid_as_hex(certid, p), issuer, key, serial); +} + +static const char *certstatus_string(int status) +{ + switch (status) { + case V_OCSP_CERTSTATUS_GOOD: return "good"; + case V_OCSP_CERTSTATUS_REVOKED: return "revoked"; + case V_OCSP_CERTSTATUS_UNKNOWN: return "unknown"; + default: return "???"; + } + +} + +static const char *single_resp_summary(OCSP_SINGLERESP* resp, apr_pool_t *p) +{ + const OCSP_CERTID *certid; + int status, reason = 0; + ASN1_GENERALIZEDTIME *bup = NULL, *bnextup = NULL; + md_timeperiod_t valid; + +#if MD_USE_OPENSSL_PRE_1_1_API + certid = resp->certId; +#else + certid = OCSP_SINGLERESP_get0_id(resp); +#endif + status = OCSP_single_get0_status(resp, &reason, NULL, &bup, &bnextup); + valid.start = bup? md_asn1_generalized_time_get(bup) : apr_time_now(); + valid.end = md_asn1_generalized_time_get(bnextup); + + return apr_psprintf(p, "ocsp-single-resp[%s, status=%s, reason=%d, valid=%s]", + certid_summary(certid, p), + certstatus_string(status), reason, + md_timeperiod_print(p, &valid)); +} + +typedef struct { + apr_pool_t *p; + md_ocsp_status_t *ostat; + md_result_t *result; + md_job_t *job; +} md_ocsp_update_t; + +static apr_status_t ostat_on_resp(const md_http_response_t *resp, void *baton) +{ + md_ocsp_update_t *update = baton; + md_ocsp_status_t *ostat = update->ostat; + md_http_request_t *req = resp->req; + OCSP_RESPONSE *ocsp_resp = NULL; + OCSP_BASICRESP *basic_resp = NULL; + OCSP_SINGLERESP *single_resp; + apr_status_t rv = APR_SUCCESS; + int n, breason = 0, bstatus; + ASN1_GENERALIZEDTIME *bup = NULL, *bnextup = NULL; + md_data_t der, new_der; + md_timeperiod_t valid; + md_ocsp_cert_stat_t nstat; + + der.data = new_der.data = NULL; + der.len = new_der.len = 0; + + md_result_activity_printf(update->result, "status of certid %s, reading response", + ostat->hexid); + if (APR_SUCCESS != (rv = apr_brigade_pflatten(resp->body, (char**)&der.data, + &der.len, req->pool))) { + goto cleanup; + } + if (NULL == (ocsp_resp = d2i_OCSP_RESPONSE(NULL, (const unsigned char**)&der.data, + (long)der.len))) { + rv = APR_EINVAL; + + md_result_set(update->result, rv, + apr_psprintf(req->pool, "req[%d] response body does not parse as " + "OCSP response, status=%d, body brigade length=%ld", + resp->req->id, resp->status, (long)der.len)); + md_result_log(update->result, MD_LOG_DEBUG); + goto cleanup; + } + /* got a response! but what does it say? */ + n = OCSP_response_status(ocsp_resp); + if (OCSP_RESPONSE_STATUS_SUCCESSFUL != n) { + rv = APR_EINVAL; + md_result_printf(update->result, rv, "OCSP response status is, unsuccessfully, %d", n); + md_result_log(update->result, MD_LOG_DEBUG); + goto cleanup; + } + basic_resp = OCSP_response_get1_basic(ocsp_resp); + if (!basic_resp) { + rv = APR_EINVAL; + md_result_set(update->result, rv, "OCSP response has no basicresponse"); + md_result_log(update->result, MD_LOG_DEBUG); + goto cleanup; + } + /* The notion of nonce enabled freshness in OCSP responses, e.g. that the response + * contains the signed nonce we sent to the responder, does not scale well. Responders + * like to return cached response bytes and therefore do not add a nonce to it. + * So, in reality, we can only detect a mismatch when present and otherwise have + * to accept it. */ + switch ((n = OCSP_check_nonce(ostat->ocsp_req, basic_resp))) { + case 1: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->pool, + "req[%d]: OCSP response nonce does match", req->id); + break; + case 0: + rv = APR_EINVAL; + md_result_printf(update->result, rv, "OCSP nonce mismatch in response", n); + md_result_log(update->result, MD_LOG_WARNING); + goto cleanup; + + case -1: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, req->pool, + "req[%d]: OCSP response did not return the nonce", req->id); + break; + default: + break; + } + + if (!OCSP_resp_find_status(basic_resp, ostat->certid, &bstatus, + &breason, NULL, &bup, &bnextup)) { + const char *prefix, *slist = "", *sep = ""; + int i; + + rv = APR_EINVAL; + prefix = apr_psprintf(req->pool, "OCSP response, no matching status reported for %s", + certid_summary(ostat->certid, req->pool)); + for (i = 0; i < OCSP_resp_count(basic_resp); ++i) { + single_resp = OCSP_resp_get0(basic_resp, i); + slist = apr_psprintf(req->pool, "%s%s%s", slist, sep, + single_resp_summary(single_resp, req->pool)); + sep = ", "; + } + md_result_printf(update->result, rv, "%s, status list [%s]", prefix, slist); + md_result_log(update->result, MD_LOG_DEBUG); + goto cleanup; + } + if (V_OCSP_CERTSTATUS_UNKNOWN == bstatus) { + rv = APR_ENOENT; + md_result_set(update->result, rv, "OCSP basicresponse says cert is unknown"); + md_result_log(update->result, MD_LOG_DEBUG); + goto cleanup; + } + if (!bnextup) { + rv = APR_EINVAL; + md_result_set(update->result, rv, "OCSP basicresponse reports not valid dates"); + md_result_log(update->result, MD_LOG_DEBUG); + goto cleanup; + } + + /* Coming here, we have a response for our certid and it is either GOOD + * or REVOKED. Both cases we want to remember and use in stapling. */ + n = i2d_OCSP_RESPONSE(ocsp_resp, (unsigned char**)&new_der.data); + if (n <= 0) { + rv = APR_EGENERAL; + md_result_set(update->result, rv, "error DER encoding OCSP response"); + md_result_log(update->result, MD_LOG_WARNING); + goto cleanup; + } + new_der.len = (apr_size_t)n; + new_der.free_data = md_openssl_free; + nstat = (bstatus == V_OCSP_CERTSTATUS_GOOD)? MD_OCSP_CERT_ST_GOOD : MD_OCSP_CERT_ST_REVOKED; + valid.start = bup? md_asn1_generalized_time_get(bup) : apr_time_now(); + valid.end = md_asn1_generalized_time_get(bnextup); + + /* First, update the instance with a copy */ + apr_thread_mutex_lock(ostat->reg->mutex); + ostat_set(ostat, nstat, &new_der, &valid, apr_time_now()); + apr_thread_mutex_unlock(ostat->reg->mutex); + + /* Next, save the original response */ + rv = ocsp_status_save(nstat, &new_der, &valid, ostat, req->pool); + if (APR_SUCCESS != rv) { + md_result_set(update->result, rv, "error saving OCSP status"); + md_result_log(update->result, MD_LOG_ERR); + goto cleanup; + } + + md_result_printf(update->result, rv, "certificate status is %s, status valid %s", + (nstat == MD_OCSP_CERT_ST_GOOD)? "GOOD" : "REVOKED", + md_timeperiod_print(req->pool, &ostat->resp_valid)); + md_result_log(update->result, MD_LOG_DEBUG); + +cleanup: + md_data_clear(&new_der); + if (basic_resp) OCSP_BASICRESP_free(basic_resp); + if (ocsp_resp) OCSP_RESPONSE_free(ocsp_resp); + return rv; +} + +static apr_status_t ostat_on_req_status(const md_http_request_t *req, apr_status_t status, + void *baton) +{ + md_ocsp_update_t *update = baton; + md_ocsp_status_t *ostat = update->ostat; + + (void)req; + md_job_end_run(update->job, update->result); + if (APR_SUCCESS != status) { + ++ostat->errors; + ostat->next_run = apr_time_now() + md_job_delay_on_errors(update->job, ostat->errors, NULL); + md_result_printf(update->result, status, "OCSP status update failed (%d. time)", + ostat->errors); + md_result_log(update->result, MD_LOG_DEBUG); + md_job_log_append(update->job, "ocsp-error", + update->result->problem, update->result->detail); + md_event_holler("ocsp-errored", update->job->mdomain, update->job, update->result, update->p); + goto cleanup; + } + md_event_holler("ocsp-renewed", update->job->mdomain, update->job, update->result, update->p); + +cleanup: + md_job_save(update->job, update->result, update->p); + ostat_req_cleanup(ostat); + return APR_SUCCESS; +} + +typedef struct { + md_ocsp_reg_t *reg; + apr_array_header_t *todos; + apr_pool_t *ptemp; + apr_time_t time; + int max_parallel; +} md_ocsp_todo_ctx_t; + +static apr_status_t ocsp_req_make(OCSP_REQUEST **pocsp_req, OCSP_CERTID *certid) +{ + OCSP_REQUEST *req = NULL; + OCSP_CERTID *id_copy = NULL; + apr_status_t rv = APR_ENOMEM; + + req = OCSP_REQUEST_new(); + if (!req) goto cleanup; + id_copy = OCSP_CERTID_dup(certid); + if (!id_copy) goto cleanup; + if (!OCSP_request_add0_id(req, id_copy)) goto cleanup; + id_copy = NULL; + OCSP_request_add1_nonce(req, 0, -1); + rv = APR_SUCCESS; +cleanup: + if (id_copy) OCSP_CERTID_free(id_copy); + if (APR_SUCCESS != rv && req) { + OCSP_REQUEST_free(req); + req = NULL; + } + *pocsp_req = req; + return rv; +} + +static apr_status_t ocsp_req_assign_der(md_data_t *d, OCSP_REQUEST *ocsp_req) +{ + int len; + + md_data_clear(d); + len = i2d_OCSP_REQUEST(ocsp_req, (unsigned char**)&d->data); + if (len < 0) return APR_ENOMEM; + d->len = (apr_size_t)len; + d->free_data = md_openssl_free; + return APR_SUCCESS; +} + +static apr_status_t next_todo(md_http_request_t **preq, void *baton, + md_http_t *http, int in_flight) +{ + md_ocsp_todo_ctx_t *ctx = baton; + md_ocsp_update_t *update, **pupdate; + md_ocsp_status_t *ostat; + md_http_request_t *req = NULL; + apr_status_t rv = APR_ENOENT; + apr_table_t *headers; + + if (in_flight < ctx->max_parallel) { + pupdate = apr_array_pop(ctx->todos); + if (pupdate) { + update = *pupdate; + ostat = update->ostat; + + update->job = md_ocsp_job_make(ctx->reg, ostat->md_name, update->p); + md_job_load(update->job); + md_job_start_run(update->job, update->result, ctx->reg->store); + + if (!ostat->ocsp_req) { + rv = ocsp_req_make(&ostat->ocsp_req, ostat->certid); + if (APR_SUCCESS != rv) goto cleanup; + } + if (0 == ostat->req_der.len) { + rv = ocsp_req_assign_der(&ostat->req_der, ostat->ocsp_req); + if (APR_SUCCESS != rv) goto cleanup; + } + md_result_activity_printf(update->result, "status of certid %s, " + "contacting %s", ostat->hexid, ostat->responder_url); + headers = apr_table_make(ctx->ptemp, 5); + apr_table_set(headers, "Expect", ""); + rv = md_http_POSTd_create(&req, http, ostat->responder_url, headers, + "application/ocsp-request", &ostat->req_der); + if (APR_SUCCESS != rv) goto cleanup; + md_http_set_on_status_cb(req, ostat_on_req_status, update); + md_http_set_on_response_cb(req, ostat_on_resp, update); + rv = APR_SUCCESS; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, req->pool, + "scheduling OCSP request[%d] for %s, %d request in flight", + req->id, ostat->md_name, in_flight); + } + } +cleanup: + *preq = (APR_SUCCESS == rv)? req : NULL; + return rv; +} + +static int select_updates(void *baton, const void *key, apr_ssize_t klen, const void *val) +{ + md_ocsp_todo_ctx_t *ctx = baton; + md_ocsp_status_t *ostat = (md_ocsp_status_t *)val; + md_ocsp_update_t *update; + + (void)key; + (void)klen; + if (ostat->next_run <= ctx->time) { + update = apr_pcalloc(ctx->ptemp, sizeof(*update)); + update->p = ctx->ptemp; + update->ostat = ostat; + update->result = md_result_md_make(update->p, ostat->md_name); + update->job = NULL; + APR_ARRAY_PUSH(ctx->todos, md_ocsp_update_t*) = update; + } + return 1; +} + +static int select_next_run(void *baton, const void *key, apr_ssize_t klen, const void *val) +{ + md_ocsp_todo_ctx_t *ctx = baton; + md_ocsp_status_t *ostat = (md_ocsp_status_t *)val; + + (void)key; + (void)klen; + if (ostat->next_run < ctx->time && ostat->next_run > apr_time_now()) { + ctx->time = ostat->next_run; + } + return 1; +} + +void md_ocsp_renew(md_ocsp_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp, apr_time_t *pnext_run) +{ + md_ocsp_todo_ctx_t ctx; + md_http_t *http; + apr_status_t rv = APR_SUCCESS; + + (void)p; + (void)pnext_run; + + ctx.reg = reg; + ctx.ptemp = ptemp; + ctx.todos = apr_array_make(ptemp, (int)md_ocsp_count(reg), sizeof(md_ocsp_status_t*)); + ctx.max_parallel = 6; /* the magic number in HTTP */ + + /* Create a list of update tasks that are needed now or in the next minute */ + ctx.time = apr_time_now() + apr_time_from_sec(60);; + apr_hash_do(select_updates, &ctx, reg->ostat_by_id); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "OCSP status updates due: %d", ctx.todos->nelts); + if (!ctx.todos->nelts) goto cleanup; + + rv = md_http_create(&http, ptemp, reg->user_agent, reg->proxy_url); + if (APR_SUCCESS != rv) goto cleanup; + + rv = md_http_multi_perform(http, next_todo, &ctx); + +cleanup: + /* When do we need to run next? *pnext_run contains the planned schedule from + * the watchdog. We can make that earlier if we need it. */ + ctx.time = *pnext_run; + apr_hash_do(select_next_run, &ctx, reg->ostat_by_id); + + /* sanity check and return */ + if (ctx.time < apr_time_now()) ctx.time = apr_time_now() + apr_time_from_sec(1); + *pnext_run = ctx.time; + + if (APR_SUCCESS != rv && APR_ENOENT != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "ocsp_renew done"); + } + return; +} + +apr_status_t md_ocsp_remove_responses_older_than(md_ocsp_reg_t *reg, apr_pool_t *p, + apr_time_t timestamp) +{ + return md_store_remove_not_modified_since(reg->store, p, timestamp, + MD_SG_OCSP, "*", "ocsp*.json"); +} + +typedef struct { + apr_pool_t *p; + md_ocsp_reg_t *reg; + int good; + int revoked; + int unknown; +} ocsp_summary_ctx_t; + +static int add_to_summary(void *baton, const void *key, apr_ssize_t klen, const void *val) +{ + ocsp_summary_ctx_t *ctx = baton; + md_ocsp_status_t *ostat = (md_ocsp_status_t *)val; + md_ocsp_cert_stat_t stat; + md_timeperiod_t valid; + + (void)key; + (void)klen; + ocsp_get_meta(&stat, &valid, ctx->reg, ostat, ctx->p); + switch (stat) { + case MD_OCSP_CERT_ST_GOOD: ++ctx->good; break; + case MD_OCSP_CERT_ST_REVOKED: ++ctx->revoked; break; + case MD_OCSP_CERT_ST_UNKNOWN: ++ctx->unknown; break; + } + return 1; +} + +void md_ocsp_get_summary(md_json_t **pjson, md_ocsp_reg_t *reg, apr_pool_t *p) +{ + md_json_t *json; + ocsp_summary_ctx_t ctx; + + memset(&ctx, 0, sizeof(ctx)); + ctx.p = p; + ctx.reg = reg; + apr_hash_do(add_to_summary, &ctx, reg->ostat_by_id); + + json = md_json_create(p); + md_json_setl(ctx.good+ctx.revoked+ctx.unknown, json, MD_KEY_TOTAL, NULL); + md_json_setl(ctx.good, json, MD_KEY_GOOD, NULL); + md_json_setl(ctx.revoked, json, MD_KEY_REVOKED, NULL); + md_json_setl(ctx.unknown, json, MD_KEY_UNKNOWN, NULL); + *pjson = json; +} + +static apr_status_t job_loadj(md_json_t **pjson, const char *name, + md_ocsp_reg_t *reg, apr_pool_t *p) +{ + return md_store_load_json(reg->store, MD_SG_OCSP, name, MD_FN_JOB, pjson, p); +} + +typedef struct { + apr_pool_t *p; + md_ocsp_reg_t *reg; + apr_array_header_t *ostats; +} ocsp_status_ctx_t; + +static md_json_t *mk_jstat(md_ocsp_status_t *ostat, md_ocsp_reg_t *reg, apr_pool_t *p) +{ + md_ocsp_cert_stat_t stat; + md_timeperiod_t valid, renewal; + md_json_t *json, *jobj; + apr_status_t rv; + + json = md_json_create(p); + md_json_sets(ostat->md_name, json, MD_KEY_DOMAIN, NULL); + md_json_sets(ostat->hexid, json, MD_KEY_ID, NULL); + ocsp_get_meta(&stat, &valid, reg, ostat, p); + md_json_sets(md_ocsp_cert_stat_name(stat), json, MD_KEY_STATUS, NULL); + md_json_sets(ostat->hex_sha256, json, MD_KEY_CERT, MD_KEY_SHA256_FINGERPRINT, NULL); + md_json_sets(ostat->responder_url, json, MD_KEY_URL, NULL); + md_json_set_timeperiod(&valid, json, MD_KEY_VALID, NULL); + renewal = md_timeperiod_slice_before_end(&valid, ®->renew_window); + md_json_set_time(renewal.start, json, MD_KEY_RENEW_AT, NULL); + if ((MD_OCSP_CERT_ST_UNKNOWN == stat) || renewal.start < apr_time_now()) { + /* We have no answer yet, or it should be in renew now. Add job information */ + rv = job_loadj(&jobj, ostat->md_name, reg, p); + if (APR_SUCCESS == rv) { + md_json_setj(jobj, json, MD_KEY_RENEWAL, NULL); + } + } + return json; +} + +static int add_ostat(void *baton, const void *key, apr_ssize_t klen, const void *val) +{ + ocsp_status_ctx_t *ctx = baton; + const md_ocsp_status_t *ostat = val; + + (void)key; + (void)klen; + APR_ARRAY_PUSH(ctx->ostats, const md_ocsp_status_t*) = ostat; + return 1; +} + +static int md_ostat_cmp(const void *v1, const void *v2) +{ + int n; + n = strcmp((*(md_ocsp_status_t**)v1)->md_name, (*(md_ocsp_status_t**)v2)->md_name); + if (!n) { + n = strcmp((*(md_ocsp_status_t**)v1)->hexid, (*(md_ocsp_status_t**)v2)->hexid); + } + return n; +} + +void md_ocsp_get_status_all(md_json_t **pjson, md_ocsp_reg_t *reg, apr_pool_t *p) +{ + md_json_t *json; + ocsp_status_ctx_t ctx; + md_ocsp_status_t *ostat; + int i; + + memset(&ctx, 0, sizeof(ctx)); + ctx.p = p; + ctx.reg = reg; + ctx.ostats = apr_array_make(p, (int)apr_hash_count(reg->ostat_by_id), sizeof(md_ocsp_status_t*)); + json = md_json_create(p); + + apr_hash_do(add_ostat, &ctx, reg->ostat_by_id); + qsort(ctx.ostats->elts, (size_t)ctx.ostats->nelts, sizeof(md_json_t*), md_ostat_cmp); + + for (i = 0; i < ctx.ostats->nelts; ++i) { + ostat = APR_ARRAY_IDX(ctx.ostats, i, md_ocsp_status_t*); + md_json_addj(mk_jstat(ostat, reg, p), json, MD_KEY_OCSPS, NULL); + } + *pjson = json; +} + +md_job_t *md_ocsp_job_make(md_ocsp_reg_t *ocsp, const char *mdomain, apr_pool_t *p) +{ + return md_job_make(p, ocsp->store, MD_SG_OCSP, mdomain, ocsp->min_delay); +} diff --git a/modules/md/md_ocsp.h b/modules/md/md_ocsp.h new file mode 100644 index 0000000..c91dc54 --- /dev/null +++ b/modules/md/md_ocsp.h @@ -0,0 +1,71 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef md_ocsp_h +#define md_ocsp_h + +struct md_data_t; +struct md_job_t; +struct md_json_t; +struct md_result_t; +struct md_store_t; +struct md_timeslice_t; + +typedef enum { + MD_OCSP_CERT_ST_UNKNOWN, + MD_OCSP_CERT_ST_GOOD, + MD_OCSP_CERT_ST_REVOKED, +} md_ocsp_cert_stat_t; + +const char *md_ocsp_cert_stat_name(md_ocsp_cert_stat_t stat); +md_ocsp_cert_stat_t md_ocsp_cert_stat_value(const char *name); + +typedef struct md_ocsp_reg_t md_ocsp_reg_t; + +apr_status_t md_ocsp_reg_make(md_ocsp_reg_t **preg, apr_pool_t *p, + struct md_store_t *store, + const md_timeslice_t *renew_window, + const char *user_agent, const char *proxy_url, + apr_time_t min_delay); + +apr_status_t md_ocsp_init_id(struct md_data_t *id, apr_pool_t *p, const md_cert_t *cert); + +apr_status_t md_ocsp_prime(md_ocsp_reg_t *reg, const char *ext_id, apr_size_t ext_id_len, + md_cert_t *x, md_cert_t *issuer, const md_t *md); + +typedef void md_ocsp_copy_der(const unsigned char *der, apr_size_t der_len, void *userdata); + +apr_status_t md_ocsp_get_status(md_ocsp_copy_der *cb, void *userdata, md_ocsp_reg_t *reg, + const char *ext_id, apr_size_t ext_id_len, + apr_pool_t *p, const md_t *md); + +apr_status_t md_ocsp_get_meta(md_ocsp_cert_stat_t *pstat, md_timeperiod_t *pvalid, + md_ocsp_reg_t *reg, const md_cert_t *cert, + apr_pool_t *p, const md_t *md); + +apr_size_t md_ocsp_count(md_ocsp_reg_t *reg); + +void md_ocsp_renew(md_ocsp_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp, apr_time_t *pnext_run); + +apr_status_t md_ocsp_remove_responses_older_than(md_ocsp_reg_t *reg, apr_pool_t *p, + apr_time_t timestamp); + +void md_ocsp_get_summary(struct md_json_t **pjson, md_ocsp_reg_t *reg, apr_pool_t *p); +void md_ocsp_get_status_all(struct md_json_t **pjson, md_ocsp_reg_t *reg, apr_pool_t *p); + +struct md_job_t *md_ocsp_job_make(md_ocsp_reg_t *ocsp, const char *mdomain, apr_pool_t *p); + +#endif /* md_ocsp_h */ diff --git a/modules/md/md_reg.c b/modules/md/md_reg.c index 233fea7..8bceb0e 100644 --- a/modules/md/md_reg.c +++ b/modules/md/md_reg.c @@ -26,21 +26,37 @@ #include "md.h" #include "md_crypt.h" +#include "md_event.h" #include "md_log.h" #include "md_json.h" +#include "md_result.h" #include "md_reg.h" #include "md_store.h" +#include "md_status.h" +#include "md_tailscale.h" #include "md_util.h" #include "md_acme.h" #include "md_acme_acct.h" struct md_reg_t { + apr_pool_t *p; struct md_store_t *store; struct apr_hash_t *protos; + struct apr_hash_t *certs; int can_http; int can_https; const char *proxy_url; + const char *ca_file; + int domains_frozen; + md_timeslice_t *renew_window; + md_timeslice_t *warn_window; + md_job_notify_cb *notify; + void *notify_ctx; + apr_time_t min_delay; + int retry_failover; + int use_store_locks; + apr_time_t lock_wait_timeout; }; /**************************************************************************************************/ @@ -67,20 +83,34 @@ static apr_status_t load_props(md_reg_t *reg, apr_pool_t *p) return rv; } -apr_status_t md_reg_init(md_reg_t **preg, apr_pool_t *p, struct md_store_t *store, - const char *proxy_url) +apr_status_t md_reg_create(md_reg_t **preg, apr_pool_t *p, struct md_store_t *store, + const char *proxy_url, const char *ca_file, + apr_time_t min_delay, int retry_failover, + int use_store_locks, apr_time_t lock_wait_timeout) { md_reg_t *reg; apr_status_t rv; reg = apr_pcalloc(p, sizeof(*reg)); + reg->p = p; reg->store = store; reg->protos = apr_hash_make(p); + reg->certs = apr_hash_make(p); reg->can_http = 1; reg->can_https = 1; reg->proxy_url = proxy_url? apr_pstrdup(p, proxy_url) : NULL; + reg->ca_file = (ca_file && apr_strnatcasecmp("none", ca_file))? + apr_pstrdup(p, ca_file) : NULL; + reg->min_delay = min_delay; + reg->retry_failover = retry_failover; + reg->use_store_locks = use_store_locks; + reg->lock_wait_timeout = lock_wait_timeout; + + md_timeslice_create(®->renew_window, p, MD_TIME_LIFE_NORM, MD_TIME_RENEW_WINDOW_DEF); + md_timeslice_create(®->warn_window, p, MD_TIME_LIFE_NORM, MD_TIME_WARN_WINDOW_DEF); - if (APR_SUCCESS == (rv = md_acme_protos_add(reg->protos, p))) { + if (APR_SUCCESS == (rv = md_acme_protos_add(reg->protos, p)) + && APR_SUCCESS == (rv = md_tailscale_protos_add(reg->protos, p))) { rv = load_props(reg, p); } @@ -114,7 +144,7 @@ static apr_status_t check_values(md_reg_t *reg, apr_pool_t *p, const md_t *md, i for (i = 0; i < md->domains->nelts; ++i) { domain = APR_ARRAY_IDX(md->domains, i, const char *); - if (!md_util_is_dns_name(p, domain, 1)) { + if (!md_dns_is_name(p, domain, 1) && !md_dns_is_wildcard(p, domain)) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, p, "md %s with invalid domain name: %s", md->name, domain); return APR_EINVAL; @@ -145,12 +175,17 @@ static apr_status_t check_values(md_reg_t *reg, apr_pool_t *p, const md_t *md, i } } - if ((MD_UPD_CA_URL & fields) && md->ca_url) { /* setting to empty is ok */ - rv = md_util_abs_uri_check(p, md->ca_url, &err); - if (err) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, p, - "CA url for %s invalid (%s): %s", md->name, err, md->ca_url); - return APR_EINVAL; + if ((MD_UPD_CA_URL & fields) && md->ca_urls) { /* setting to empty is ok */ + int i; + const char *url; + for (i = 0; i < md->ca_urls->nelts; ++i) { + url = APR_ARRAY_IDX(md->ca_urls, i, const char*); + rv = md_util_abs_uri_check(p, url, &err); + if (err) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, p, + "CA url for %s invalid (%s): %s", md->name, err, url); + return APR_EINVAL; + } } } @@ -162,7 +197,8 @@ static apr_status_t check_values(md_reg_t *reg, apr_pool_t *p, const md_t *md, i /* hmm, in case we know the protocol, some checks could be done */ } - if ((MD_UPD_AGREEMENT & fields) && md->ca_agreement) { /* setting to empty is ok */ + if ((MD_UPD_AGREEMENT & fields) && md->ca_agreement + && strcmp("accepted", md->ca_agreement)) { /* setting to empty is ok */ rv = md_util_abs_uri_check(p, md->ca_agreement, &err); if (err) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, p, @@ -177,138 +213,72 @@ static apr_status_t check_values(md_reg_t *reg, apr_pool_t *p, const md_t *md, i /**************************************************************************************************/ /* state assessment */ -static apr_status_t state_init(md_reg_t *reg, apr_pool_t *p, md_t *md, int save_changes) +static apr_status_t state_init(md_reg_t *reg, apr_pool_t *p, md_t *md) { - md_state_t state = MD_S_UNKNOWN; - const md_creds_t *creds; + md_state_t state = MD_S_COMPLETE; + const char *state_descr = NULL; + const md_pubcert_t *pub; const md_cert_t *cert; - apr_time_t expires = 0, valid_from = 0; - apr_status_t rv; + const md_pkey_spec_t *spec; + apr_status_t rv = APR_SUCCESS; int i; - if (APR_SUCCESS == (rv = md_reg_creds_get(&creds, reg, MD_SG_DOMAINS, md, p))) { - state = MD_S_INCOMPLETE; - if (!creds->privkey) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "md{%s}: incomplete, without private key", md->name); - } - else if (!creds->cert) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "md{%s}: incomplete, has key but no certificate", md->name); - } - else { - valid_from = md_cert_get_not_before(creds->cert); - expires = md_cert_get_not_after(creds->cert); - if (md_cert_has_expired(creds->cert)) { - state = MD_S_EXPIRED; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "md{%s}: expired, certificate has expired", md->name); - goto out; - } - if (!md_cert_is_valid_now(creds->cert)) { - state = MD_S_ERROR; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, - "md{%s}: error, certificate valid in future (clock wrong?)", - md->name); - goto out; - } - if (!md_cert_covers_md(creds->cert, md)) { + if (md->renew_window == NULL) md->renew_window = reg->renew_window; + if (md->warn_window == NULL) md->warn_window = reg->warn_window; + + if (md->domains && md->domains->pool != p) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "md{%s}: state_init called with foreign pool", md->name); + } + + for (i = 0; i < md_cert_count(md); ++i) { + spec = md_pkeys_spec_get(md->pks, i); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, p, + "md{%s}: check cert %s", md->name, md_pkey_spec_name(spec)); + rv = md_reg_get_pubcert(&pub, reg, md, i, p); + if (APR_SUCCESS == rv) { + cert = APR_ARRAY_IDX(pub->certs, 0, const md_cert_t*); + if (!md_is_covered_by_alt_names(md, pub->alt_names)) { state = MD_S_INCOMPLETE; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, - "md{%s}: incomplete, cert no longer covers all domains, " - "needs sign up for a new certificate", md->name); - goto out; + state_descr = apr_psprintf(p, "certificate(%s) does not cover all domains.", + md_pkey_spec_name(spec)); + goto cleanup; } - if (!md->must_staple != !md_cert_must_staple(creds->cert)) { + if (!md->must_staple != !md_cert_must_staple(cert)) { state = MD_S_INCOMPLETE; - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, - "md{%s}: OCSP Stapling is%s requested, but certificate " - "has it%s enabled. Need to get a new certificate.", md->name, - md->must_staple? "" : " not", + state_descr = apr_psprintf(p, "'must-staple' is%s requested, but " + "certificate(%s) has it%s enabled.", + md->must_staple? "" : " not", + md_pkey_spec_name(spec), !md->must_staple? "" : " not"); - goto out; + goto cleanup; } - - for (i = 1; i < creds->pubcert->nelts; ++i) { - cert = APR_ARRAY_IDX(creds->pubcert, i, const md_cert_t *); - if (!md_cert_is_valid_now(cert)) { - state = MD_S_ERROR; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, - "md{%s}: error, the certificate itself is valid, however the %d. " - "certificate in the chain is not valid now (clock wrong?).", - md->name, i); - goto out; - } - } - - state = MD_S_COMPLETE; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "md{%s}: is complete", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "md{%s}: certificate(%d) is ok", + md->name, i); + } + else if (APR_STATUS_IS_ENOENT(rv)) { + state = MD_S_INCOMPLETE; + state_descr = apr_psprintf(p, "certificate(%s) is missing", + md_pkey_spec_name(spec)); + rv = APR_SUCCESS; + goto cleanup; + } + else { + state = MD_S_ERROR; + state_descr = "error initializing"; + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "md{%s}: error", md->name); + goto cleanup; } } -out: - if (APR_SUCCESS != rv) { - state = MD_S_ERROR; - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, "md{%s}: error", md->name); - } - - if (save_changes && md->state == state - && md->valid_from == valid_from && md->expires == expires) { - save_changes = 0; - } +cleanup: + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, p, "md{%s}: state=%d, %s", + md->name, state, state_descr); md->state = state; - md->valid_from = valid_from; - md->expires = expires; - if (save_changes && APR_SUCCESS == rv) { - return md_save(reg->store, p, MD_SG_DOMAINS, md, 0); - } + md->state_descr = state_descr; return rv; } -apr_status_t md_reg_assess(md_reg_t *reg, md_t *md, int *perrored, int *prenew, apr_pool_t *p) -{ - int renew = 0; - int errored = 0; - - (void)reg; - switch (md->state) { - case MD_S_UNKNOWN: - md_log_perror( MD_LOG_MARK, MD_LOG_ERR, 0, p, "md(%s): in unknown state.", md->name); - break; - case MD_S_ERROR: - md_log_perror( MD_LOG_MARK, MD_LOG_ERR, 0, p, - "md(%s): in error state, unable to drive forward. If unable to " - " detect the cause, you may remove the staging or even domain " - " sub-directory for this MD and start all over.", md->name); - errored = 1; - break; - case MD_S_COMPLETE: - if (!md->expires) { - md_log_perror( MD_LOG_MARK, MD_LOG_WARNING, 0, p, - "md(%s): looks complete, but has unknown expiration date.", md->name); - errored = 1; - } - else if (md->expires <= apr_time_now()) { - /* Maybe we hibernated in the meantime? */ - md->state = MD_S_EXPIRED; - renew = 1; - } - else { - renew = md_should_renew(md); - } - break; - case MD_S_INCOMPLETE: - case MD_S_EXPIRED: - renew = 1; - break; - case MD_S_MISSING: - break; - } - *prenew = renew; - *perrored = errored; - return APR_SUCCESS; -} - /**************************************************************************************************/ /* iteration */ @@ -326,7 +296,7 @@ static int reg_md_iter(void *baton, md_store_t *store, md_t *md, apr_pool_t *pte (void)store; if (!ctx->exclude || strcmp(ctx->exclude, md->name)) { - state_init(ctx->reg, ptemp, (md_t*)md, 1); + state_init(ctx->reg, ptemp, (md_t*)md); return ctx->cb(ctx->baton, ctx->reg, md); } return 1; @@ -357,7 +327,7 @@ md_t *md_reg_get(md_reg_t *reg, const char *name, apr_pool_t *p) md_t *md; if (APR_SUCCESS == md_load(reg->store, MD_SG_DOMAINS, name, &md, p)) { - state_init(reg, p, md, 1); + state_init(reg, p, md); return md; } return NULL; @@ -389,7 +359,7 @@ md_t *md_reg_find(md_reg_t *reg, const char *domain, apr_pool_t *p) md_reg_do(find_domain, &ctx, reg, p); if (ctx.md) { - state_init(reg, p, ctx.md, 1); + state_init(reg, p, ctx.md); } return ctx.md; } @@ -427,23 +397,11 @@ md_t *md_reg_find_overlap(md_reg_t *reg, const md_t *md, const char **pdomain, a *pdomain = ctx.s; } if (ctx.md) { - state_init(reg, p, ctx.md, 1); + state_init(reg, p, ctx.md); } return ctx.md; } -apr_status_t md_reg_get_cred_files(md_reg_t *reg, const md_t *md, apr_pool_t *p, - const char **pkeyfile, const char **pcertfile) -{ - apr_status_t rv; - - rv = md_store_get_fname(pkeyfile, reg->store, MD_SG_DOMAINS, md->name, MD_FN_PRIVKEY, p); - if (APR_SUCCESS == rv) { - rv = md_store_get_fname(pcertfile, reg->store, MD_SG_DOMAINS, md->name, MD_FN_PUBCERT, p); - } - return rv; -} - /**************************************************************************************************/ /* manipulation */ @@ -452,19 +410,28 @@ static apr_status_t p_md_add(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l md_reg_t *reg = baton; apr_status_t rv = APR_SUCCESS; md_t *md, *mine; + int do_check; md = va_arg(ap, md_t *); + do_check = va_arg(ap, int); + + if (reg->domains_frozen) return APR_EACCES; mine = md_clone(ptemp, md); - if (APR_SUCCESS == (rv = check_values(reg, ptemp, md, MD_UPD_ALL)) - && APR_SUCCESS == (rv = state_init(reg, ptemp, mine, 0)) - && APR_SUCCESS == (rv = md_save(reg->store, p, MD_SG_DOMAINS, mine, 1))) { - } + if (do_check && APR_SUCCESS != (rv = check_values(reg, ptemp, md, MD_UPD_ALL))) goto leave; + if (APR_SUCCESS != (rv = state_init(reg, ptemp, mine))) goto leave; + rv = md_save(reg->store, p, MD_SG_DOMAINS, mine, 1); +leave: return rv; } +static apr_status_t add_md(md_reg_t *reg, md_t *md, apr_pool_t *p, int do_checks) +{ + return md_util_pool_vdo(p_md_add, reg, p, md, do_checks, NULL); +} + apr_status_t md_reg_add(md_reg_t *reg, md_t *md, apr_pool_t *p) { - return md_util_pool_vdo(p_md_add, reg, p, md, NULL); + return add_md(reg, md, p, 1); } static apr_status_t p_md_update(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) @@ -473,31 +440,34 @@ static apr_status_t p_md_update(void *baton, apr_pool_t *p, apr_pool_t *ptemp, v apr_status_t rv = APR_SUCCESS; const char *name; const md_t *md, *updates; - int fields; + int fields, do_checks; md_t *nmd; name = va_arg(ap, const char *); updates = va_arg(ap, const md_t *); fields = va_arg(ap, int); + do_checks = va_arg(ap, int); if (NULL == (md = md_reg_get(reg, name, ptemp))) { md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, APR_ENOENT, ptemp, "md %s", name); return APR_ENOENT; } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, "update md %s", name); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, "md[%s]: update store", name); - if (APR_SUCCESS != (rv = check_values(reg, ptemp, updates, fields))) { + if (do_checks && APR_SUCCESS != (rv = check_values(reg, ptemp, updates, fields))) { return rv; } + if (reg->domains_frozen) return APR_EACCES; nmd = md_copy(ptemp, md); if (MD_UPD_DOMAINS & fields) { nmd->domains = updates->domains; md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update domains: %s", name); } if (MD_UPD_CA_URL & fields) { - nmd->ca_url = updates->ca_url; + nmd->ca_urls = (updates->ca_urls? + apr_array_copy(p, updates->ca_urls) : NULL); md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update ca url: %s", name); } if (MD_UPD_CA_PROTO & fields) { @@ -516,18 +486,17 @@ static apr_status_t p_md_update(void *baton, apr_pool_t *p, apr_pool_t *ptemp, v md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update agreement: %s", name); nmd->ca_agreement = updates->ca_agreement; } - if (MD_UPD_CERT_URL & fields) { - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update cert url: %s", name); - nmd->cert_url = updates->cert_url; - } if (MD_UPD_DRIVE_MODE & fields) { md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update drive-mode: %s", name); - nmd->drive_mode = updates->drive_mode; + nmd->renew_mode = updates->renew_mode; } if (MD_UPD_RENEW_WINDOW & fields) { md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update renew-window: %s", name); - nmd->renew_norm = updates->renew_norm; - nmd->renew_window = updates->renew_window; + *nmd->renew_window = *updates->renew_window; + } + if (MD_UPD_WARN_WINDOW & fields) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update warn-window: %s", name); + *nmd->warn_window = *updates->warn_window; } if (MD_UPD_CA_CHALLENGES & fields) { md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update ca challenges: %s", name); @@ -536,10 +505,7 @@ static apr_status_t p_md_update(void *baton, apr_pool_t *p, apr_pool_t *ptemp, v } if (MD_UPD_PKEY_SPEC & fields) { md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update pkey spec: %s", name); - nmd->pkey_spec = NULL; - if (updates->pkey_spec) { - nmd->pkey_spec = apr_pmemdup(p, updates->pkey_spec, sizeof(md_pkey_spec_t)); - } + nmd->pks = md_pkeys_spec_clone(p, updates->pks); } if (MD_UPD_REQUIRE_HTTPS & fields) { md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update require-https: %s", name); @@ -553,118 +519,239 @@ static apr_status_t p_md_update(void *baton, apr_pool_t *p, apr_pool_t *ptemp, v md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update must-staple: %s", name); nmd->must_staple = updates->must_staple; } + if (MD_UPD_PROTO & fields) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update proto: %s", name); + nmd->acme_tls_1_domains = updates->acme_tls_1_domains; + } + if (MD_UPD_STAPLING & fields) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "update stapling: %s", name); + nmd->stapling = updates->stapling; + } if (fields && APR_SUCCESS == (rv = md_save(reg->store, p, MD_SG_DOMAINS, nmd, 0))) { - rv = state_init(reg, ptemp, nmd, 0); + rv = state_init(reg, ptemp, nmd); } return rv; } apr_status_t md_reg_update(md_reg_t *reg, apr_pool_t *p, - const char *name, const md_t *md, int fields) + const char *name, const md_t *md, int fields, + int do_checks) { - return md_util_pool_vdo(p_md_update, reg, p, name, md, fields, NULL); + return md_util_pool_vdo(p_md_update, reg, p, name, md, fields, do_checks, NULL); } -/**************************************************************************************************/ -/* certificate related */ - -static int ok_or_noent(apr_status_t rv) +apr_status_t md_reg_delete_acct(md_reg_t *reg, apr_pool_t *p, const char *acct_id) { - return (APR_SUCCESS == rv || APR_ENOENT == rv); + apr_status_t rv = APR_SUCCESS; + + rv = md_store_remove(reg->store, MD_SG_ACCOUNTS, acct_id, MD_FN_ACCOUNT, p, 1); + if (APR_SUCCESS == rv) { + md_store_remove(reg->store, MD_SG_ACCOUNTS, acct_id, MD_FN_ACCT_KEY, p, 1); + } + return rv; } -static apr_status_t creds_load(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +/**************************************************************************************************/ +/* certificate related */ + +static apr_status_t pubcert_load(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) { md_reg_t *reg = baton; - md_pkey_t *privkey; - apr_array_header_t *pubcert; - md_creds_t *creds, **pcreds; + apr_array_header_t *certs; + md_pubcert_t *pubcert, **ppubcert; const md_t *md; + int index; + const md_cert_t *cert; md_cert_state_t cert_state; md_store_group_t group; apr_status_t rv; - pcreds = va_arg(ap, md_creds_t **); + ppubcert = va_arg(ap, md_pubcert_t **); group = (md_store_group_t)va_arg(ap, int); md = va_arg(ap, const md_t *); + index = va_arg(ap, int); - if (ok_or_noent(rv = md_pkey_load(reg->store, group, md->name, &privkey, p)) - && ok_or_noent(rv = md_pubcert_load(reg->store, group, md->name, &pubcert, p))) { - rv = APR_SUCCESS; - - creds = apr_pcalloc(p, sizeof(*creds)); - creds->privkey = privkey; - if (pubcert && pubcert->nelts > 0) { - creds->pubcert = pubcert; - creds->cert = APR_ARRAY_IDX(pubcert, 0, md_cert_t *); - } - if (creds->cert) { - switch ((cert_state = md_cert_state_get(creds->cert))) { - case MD_CERT_VALID: - creds->expired = 0; - break; - case MD_CERT_EXPIRED: - creds->expired = 1; - break; - default: - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, ptemp, - "md %s has unexpected cert state: %d", md->name, cert_state); - rv = APR_ENOTIMPL; - break; - } - } + if (md->cert_files && md->cert_files->nelts) { + rv = md_chain_fload(&certs, p, APR_ARRAY_IDX(md->cert_files, index, const char *)); + } + else { + md_pkey_spec_t *spec = md_pkeys_spec_get(md->pks, index);; + rv = md_pubcert_load(reg->store, group, md->name, spec, &certs, p); + } + if (APR_SUCCESS != rv) goto leave; + if (certs->nelts == 0) { + rv = APR_ENOENT; + goto leave; + } + + pubcert = apr_pcalloc(p, sizeof(*pubcert)); + pubcert->certs = certs; + cert = APR_ARRAY_IDX(certs, 0, const md_cert_t *); + if (APR_SUCCESS != (rv = md_cert_get_alt_names(&pubcert->alt_names, cert, p))) goto leave; + switch ((cert_state = md_cert_state_get(cert))) { + case MD_CERT_VALID: + case MD_CERT_EXPIRED: + break; + default: + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, APR_EINVAL, ptemp, + "md %s has unexpected cert state: %d", md->name, cert_state); + rv = APR_ENOTIMPL; + break; } - *pcreds = (APR_SUCCESS == rv)? creds : NULL; +leave: + *ppubcert = (APR_SUCCESS == rv)? pubcert : NULL; return rv; } -apr_status_t md_reg_creds_get(const md_creds_t **pcreds, md_reg_t *reg, - md_store_group_t group, const md_t *md, apr_pool_t *p) +apr_status_t md_reg_get_pubcert(const md_pubcert_t **ppubcert, md_reg_t *reg, + const md_t *md, int i, apr_pool_t *p) { apr_status_t rv = APR_SUCCESS; - md_creds_t *creds; - - rv = md_util_pool_vdo(creds_load, reg, p, &creds, group, md, NULL); - *pcreds = (APR_SUCCESS == rv)? creds : NULL; + const md_pubcert_t *pubcert; + const char *name; + + name = apr_psprintf(p, "%s[%d]", md->name, i); + pubcert = apr_hash_get(reg->certs, name, (apr_ssize_t)strlen(name)); + if (!pubcert && !reg->domains_frozen) { + rv = md_util_pool_vdo(pubcert_load, reg, reg->p, &pubcert, MD_SG_DOMAINS, md, i, NULL); + if (APR_STATUS_IS_ENOENT(rv)) { + /* We cache it missing with an empty record */ + pubcert = apr_pcalloc(reg->p, sizeof(*pubcert)); + } + else if (APR_SUCCESS != rv) goto leave; + if (p != reg->p) name = apr_pstrdup(reg->p, name); + apr_hash_set(reg->certs, name, (apr_ssize_t)strlen(name), pubcert); + } +leave: + if (APR_SUCCESS == rv && (!pubcert || !pubcert->certs)) { + rv = APR_ENOENT; + } + *ppubcert = (APR_SUCCESS == rv)? pubcert : NULL; return rv; } -/**************************************************************************************************/ -/* synching */ +apr_status_t md_reg_get_cred_files(const char **pkeyfile, const char **pcertfile, + md_reg_t *reg, md_store_group_t group, + const md_t *md, md_pkey_spec_t *spec, apr_pool_t *p) +{ + apr_status_t rv; + + rv = md_store_get_fname(pkeyfile, reg->store, group, md->name, md_pkey_filename(spec, p), p); + if (APR_SUCCESS != rv) return rv; + if (!md_file_exists(*pkeyfile, p)) return APR_ENOENT; + rv = md_store_get_fname(pcertfile, reg->store, group, md->name, md_chain_filename(spec, p), p); + if (APR_SUCCESS != rv) return rv; + if (!md_file_exists(*pcertfile, p)) return APR_ENOENT; + return APR_SUCCESS; +} -typedef struct { - apr_pool_t *p; - apr_array_header_t *store_mds; -} sync_ctx; +apr_time_t md_reg_valid_until(md_reg_t *reg, const md_t *md, apr_pool_t *p) +{ + const md_pubcert_t *pub; + const md_cert_t *cert; + int i; + apr_time_t t, valid_until = 0; + apr_status_t rv; + + for (i = 0; i < md_cert_count(md); ++i) { + rv = md_reg_get_pubcert(&pub, reg, md, i, p); + if (APR_SUCCESS == rv) { + cert = APR_ARRAY_IDX(pub->certs, 0, const md_cert_t*); + t = md_cert_get_not_after(cert); + if (valid_until == 0 || t < valid_until) { + valid_until = t; + } + } + } + return valid_until; +} -static int do_add_md(void *baton, md_store_t *store, md_t *md, apr_pool_t *ptemp) +apr_time_t md_reg_renew_at(md_reg_t *reg, const md_t *md, apr_pool_t *p) { - sync_ctx *ctx = baton; + const md_pubcert_t *pub; + const md_cert_t *cert; + md_timeperiod_t certlife, renewal; + int i; + apr_time_t renew_at = 0; + apr_status_t rv; + + if (md->state == MD_S_INCOMPLETE) return apr_time_now(); + for (i = 0; i < md_cert_count(md); ++i) { + rv = md_reg_get_pubcert(&pub, reg, md, i, p); + if (APR_STATUS_IS_ENOENT(rv)) return apr_time_now(); + if (APR_SUCCESS == rv) { + cert = APR_ARRAY_IDX(pub->certs, 0, const md_cert_t*); + certlife.start = md_cert_get_not_before(cert); + certlife.end = md_cert_get_not_after(cert); + + renewal = md_timeperiod_slice_before_end(&certlife, md->renew_window); + if (md_log_is_level(p, MD_LOG_TRACE1)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, p, + "md[%s]: certificate(%d) valid[%s] renewal[%s]", + md->name, i, + md_timeperiod_print(p, &certlife), + md_timeperiod_print(p, &renewal)); + } + + if (renew_at == 0 || renewal.start < renew_at) { + renew_at = renewal.start; + } + } + } + return renew_at; +} - (void)store; - (void)ptemp; - APR_ARRAY_PUSH(ctx->store_mds, const md_t*) = md_clone(ctx->p, md); - return 1; +int md_reg_should_renew(md_reg_t *reg, const md_t *md, apr_pool_t *p) +{ + apr_time_t renew_at; + + renew_at = md_reg_renew_at(reg, md, p); + return renew_at && (renew_at <= apr_time_now()); } -static apr_status_t read_store_mds(md_reg_t *reg, sync_ctx *ctx) +int md_reg_should_warn(md_reg_t *reg, const md_t *md, apr_pool_t *p) { - int rv; + const md_pubcert_t *pub; + const md_cert_t *cert; + md_timeperiod_t certlife, warn; + int i; + apr_status_t rv; - apr_array_clear(ctx->store_mds); - rv = md_store_md_iter(do_add_md, ctx, reg->store, ctx->p, MD_SG_DOMAINS, "*"); - if (APR_STATUS_IS_ENOENT(rv)) { - rv = APR_SUCCESS; + if (md->state == MD_S_INCOMPLETE) return 0; + for (i = 0; i < md_cert_count(md); ++i) { + rv = md_reg_get_pubcert(&pub, reg, md, i, p); + if (APR_STATUS_IS_ENOENT(rv)) return 0; + if (APR_SUCCESS == rv) { + cert = APR_ARRAY_IDX(pub->certs, 0, const md_cert_t*); + certlife.start = md_cert_get_not_before(cert); + certlife.end = md_cert_get_not_after(cert); + + warn = md_timeperiod_slice_before_end(&certlife, md->warn_window); + if (md_log_is_level(p, MD_LOG_TRACE1)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, p, + "md[%s]: certificate(%d) life[%s] warn[%s]", + md->name, i, + md_timeperiod_print(p, &certlife), + md_timeperiod_print(p, &warn)); + } + if (md_timeperiod_has_started(&warn, apr_time_now())) { + return 1; + } + } } - return rv; + return 0; } +/**************************************************************************************************/ +/* syncing */ + apr_status_t md_reg_set_props(md_reg_t *reg, apr_pool_t *p, int can_http, int can_https) { if (reg->can_http != can_http || reg->can_https != can_https) { md_json_t *json; + if (reg->domains_frozen) return APR_EACCES; reg->can_http = can_http; reg->can_https = can_https; @@ -676,329 +763,561 @@ apr_status_t md_reg_set_props(md_reg_t *reg, apr_pool_t *p, int can_http, int ca } return APR_SUCCESS; } - -/** - * Procedure: - * 1. Collect all defined "managed domains" (MD). It does not matter where a MD is defined. - * All MDs need to be unique and have no overlaps in their domain names. - * Fail the config otherwise. Also, if a vhost matches an MD, it - * needs to *only* have ServerAliases from that MD. There can be no more than one - * matching MD for a vhost. But an MD can apply to several vhosts. - * 2. Synchronize with the persistent store. Iterate over all configured MDs and - * a. create them in the store if they do not already exist, neither under the - * name or with a common domain. - * b. compare domain lists from store and config, if - * - store has dns name in other MD than from config, remove dns name from store def, - * issue WARNING. - * - store misses dns name from config, add dns name and update store - * c. compare MD acme url/protocol, update if changed + +static md_t *find_closest_match(apr_array_header_t *mds, const md_t *md) +{ + md_t *candidate, *m; + apr_size_t cand_n, n; + int i; + + candidate = md_get_by_name(mds, md->name); + if (!candidate) { + /* try to find an instance that contains all domain names from md */ + for (i = 0; i < mds->nelts; ++i) { + m = APR_ARRAY_IDX(mds, i, md_t *); + if (md_contains_domains(m, md)) { + return m; + } + } + /* no matching name and no md in the list has all domains. + * We consider that managed domain as closest match that contains at least one + * domain name from md, ONLY if there is no other one that also has. + */ + cand_n = 0; + for (i = 0; i < mds->nelts; ++i) { + m = APR_ARRAY_IDX(mds, i, md_t *); + n = md_common_name_count(md, m); + if (n > cand_n) { + candidate = m; + cand_n = n; + } + } + } + return candidate; +} + +typedef struct { + apr_pool_t *p; + apr_array_header_t *master_mds; + apr_array_header_t *store_names; + apr_array_header_t *maybe_new_mds; + apr_array_header_t *new_mds; + apr_array_header_t *unassigned_mds; +} sync_ctx_v2; + +static int iter_add_name(void *baton, const char *dir, const char *name, + md_store_vtype_t vtype, void *value, apr_pool_t *ptemp) +{ + sync_ctx_v2 *ctx = baton; + + (void)dir; + (void)value; + (void)ptemp; + (void)vtype; + APR_ARRAY_PUSH(ctx->store_names, const char*) = apr_pstrdup(ctx->p, name); + return APR_SUCCESS; +} + +/* A better scaling version: + * 1. The consistency of the MDs in 'master_mds' has already been verified. E.g. + * that no domain lists overlap etc. + * 2. All MD storage that exists will be overwritten by the settings we have. + * And "exists" meaning that "store/MD_SG_DOMAINS/name" exists. + * 3. For MDs that have no directory in "store/MD_SG_DOMAINS", we load all MDs + * outside the list of known names from MD_SG_DOMAINS. In this list, we + * look for the MD with the most domain overlap. + * - if we find it, we assume this is a rename and move the old MD to the new name. + * - if not, MD is completely new. + * 4. Any MD in store that does not match the "master_mds" will just be left as is. */ -apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp, - apr_array_header_t *master_mds) +apr_status_t md_reg_sync_start(md_reg_t *reg, apr_array_header_t *master_mds, apr_pool_t *p) { - sync_ctx ctx; + sync_ctx_v2 ctx; apr_status_t rv; - - ctx.p = ptemp; - ctx.store_mds = apr_array_make(ptemp,100, sizeof(md_t *)); - rv = read_store_mds(reg, &ctx); + md_t *md, *oldmd; + const char *name; + int i, idx; - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "sync: found %d mds in store", ctx.store_mds->nelts); - if (APR_SUCCESS == rv) { - int i, fields; - md_t *md, *config_md, *smd, *omd; - const char *common; - - for (i = 0; i < master_mds->nelts; ++i) { - md = APR_ARRAY_IDX(master_mds, i, md_t *); - - /* find the store md that is closest match for the configured md */ - smd = md_find_closest_match(ctx.store_mds, md); - if (smd) { - fields = 0; - - /* Once stored, we keep the name */ - if (strcmp(md->name, smd->name)) { - md->name = apr_pstrdup(p, smd->name); - } - - /* Make the stored domain list *exactly* the same, even if - * someone only changed upper/lowercase, we'd like to persist that. */ - if (!md_equal_domains(md, smd, 1)) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "%s: domains changed", smd->name); - smd->domains = md_array_str_clone(ptemp, md->domains); - fields |= MD_UPD_DOMAINS; - } - - /* Look for other store mds which have domains now being part of smd */ - while (APR_SUCCESS == rv && (omd = md_get_by_dns_overlap(ctx.store_mds, md))) { - /* find the name now duplicate */ - common = md_common_name(md, omd); - assert(common); - - /* Is this md still configured or has it been abandoned in the config? */ - config_md = md_get_by_name(master_mds, omd->name); - if (config_md && md_contains(config_md, common, 0)) { - /* domain used in two configured mds, not allowed */ - rv = APR_EINVAL; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, - "domain %s used in md %s and %s", - common, md->name, omd->name); - } - else { - /* remove it from the other md and update store, or, if it - * is now empty, move it into the archive */ - omd->domains = md_array_str_remove(ptemp, omd->domains, common, 0); - if (apr_is_empty_array(omd->domains)) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, - "All domains of the MD %s have moved elsewhere, " - " moving it to the archive. ", omd->name); - md_reg_remove(reg, ptemp, omd->name, 1); /* best effort */ - } - else { - rv = md_reg_update(reg, ptemp, omd->name, omd, MD_UPD_DOMAINS); - } - } - } - - if (MD_SVAL_UPDATE(md, smd, ca_url)) { - smd->ca_url = md->ca_url; - fields |= MD_UPD_CA_URL; - } - if (MD_SVAL_UPDATE(md, smd, ca_proto)) { - smd->ca_proto = md->ca_proto; - fields |= MD_UPD_CA_PROTO; - } - if (MD_SVAL_UPDATE(md, smd, ca_agreement)) { - smd->ca_agreement = md->ca_agreement; - fields |= MD_UPD_AGREEMENT; - } - if (MD_VAL_UPDATE(md, smd, transitive)) { - smd->transitive = md->transitive; - fields |= MD_UPD_TRANSITIVE; - } - if (MD_VAL_UPDATE(md, smd, drive_mode)) { - smd->drive_mode = md->drive_mode; - fields |= MD_UPD_DRIVE_MODE; - } - if (!apr_is_empty_array(md->contacts) - && !md_array_str_eq(md->contacts, smd->contacts, 0)) { - smd->contacts = md->contacts; - fields |= MD_UPD_CONTACTS; - } - if (MD_VAL_UPDATE(md, smd, renew_window) - || MD_VAL_UPDATE(md, smd, renew_norm)) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, - "%s: update renew norm=%ld, window=%ld", - smd->name, (long)md->renew_norm, (long)md->renew_window); - smd->renew_norm = md->renew_norm; - smd->renew_window = md->renew_window; - fields |= MD_UPD_RENEW_WINDOW; - } - if (md->ca_challenges) { - md->ca_challenges = md_array_str_compact(p, md->ca_challenges, 0); - if (!smd->ca_challenges - || !md_array_str_eq(md->ca_challenges, smd->ca_challenges, 0)) { - smd->ca_challenges = apr_array_copy(ptemp, md->ca_challenges); - fields |= MD_UPD_CA_CHALLENGES; - } - } - else if (smd->ca_challenges) { - smd->ca_challenges = NULL; - fields |= MD_UPD_CA_CHALLENGES; - } - if (!md_pkey_spec_eq(md->pkey_spec, smd->pkey_spec)) { - fields |= MD_UPD_PKEY_SPEC; - smd->pkey_spec = NULL; - if (md->pkey_spec) { - smd->pkey_spec = apr_pmemdup(p, md->pkey_spec, sizeof(md_pkey_spec_t)); - } - } - if (MD_VAL_UPDATE(md, smd, require_https)) { - smd->require_https = md->require_https; - fields |= MD_UPD_REQUIRE_HTTPS; - } - if (MD_VAL_UPDATE(md, smd, must_staple)) { - smd->must_staple = md->must_staple; - fields |= MD_UPD_MUST_STAPLE; - } - - if (fields) { - rv = md_reg_update(reg, ptemp, smd->name, smd, fields); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "md %s updated", smd->name); - } - } - else { - /* new managed domain */ - rv = md_reg_add(reg, md, ptemp); - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, "new md %s added", md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "sync MDs, start"); + + ctx.p = p; + ctx.master_mds = master_mds; + ctx.store_names = apr_array_make(p, master_mds->nelts + 100, sizeof(const char*)); + ctx.maybe_new_mds = apr_array_make(p, master_mds->nelts, sizeof(md_t*)); + ctx.new_mds = apr_array_make(p, master_mds->nelts, sizeof(md_t*)); + ctx.unassigned_mds = apr_array_make(p, master_mds->nelts, sizeof(md_t*)); + + rv = md_store_iter_names(iter_add_name, &ctx, reg->store, p, MD_SG_DOMAINS, "*"); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "listing existing store MD names"); + goto leave; + } + + /* Get all MDs that are not already present in store */ + for (i = 0; i < ctx.master_mds->nelts; ++i) { + md = APR_ARRAY_IDX(ctx.master_mds, i, md_t*); + idx = md_array_str_index(ctx.store_names, md->name, 0, 1); + if (idx < 0) { + APR_ARRAY_PUSH(ctx.maybe_new_mds, md_t*) = md; + md_array_remove_at(ctx.store_names, idx); + } + } + + if (ctx.maybe_new_mds->nelts == 0) goto leave; /* none new */ + if (ctx.store_names->nelts == 0) goto leave; /* all new */ + + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "sync MDs, %d potentially new MDs detected, looking for renames among " + "the %d unassigned store domains", (int)ctx.maybe_new_mds->nelts, + (int)ctx.store_names->nelts); + for (i = 0; i < ctx.store_names->nelts; ++i) { + name = APR_ARRAY_IDX(ctx.store_names, i, const char*); + if (APR_SUCCESS == md_load(reg->store, MD_SG_DOMAINS, name, &md, p)) { + APR_ARRAY_PUSH(ctx.unassigned_mds, md_t*) = md; + } + } + + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "sync MDs, %d MDs maybe new, checking store", (int)ctx.maybe_new_mds->nelts); + for (i = 0; i < ctx.maybe_new_mds->nelts; ++i) { + md = APR_ARRAY_IDX(ctx.maybe_new_mds, i, md_t*); + oldmd = find_closest_match(ctx.unassigned_mds, md); + if (oldmd) { + /* found the rename, move the domains and possible staging directory */ + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "sync MDs, found MD %s under previous name %s", md->name, oldmd->name); + rv = md_store_rename(reg->store, p, MD_SG_DOMAINS, oldmd->name, md->name); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, + "sync MDs, renaming MD %s to %s failed", oldmd->name, md->name); + /* ignore it? */ } + md_store_rename(reg->store, p, MD_SG_STAGING, oldmd->name, md->name); + md_array_remove(ctx.unassigned_mds, oldmd); + } + else { + APR_ARRAY_PUSH(ctx.new_mds, md_t*) = md; } } - else { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "loading mds"); + +leave: + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, + "sync MDs, %d existing, %d moved, %d new.", + (int)ctx.master_mds->nelts - ctx.maybe_new_mds->nelts, + (int)ctx.maybe_new_mds->nelts - ctx.new_mds->nelts, + (int)ctx.new_mds->nelts); + return rv; +} + +/** + * Finish syncing an MD with the store. + * 1. if there are changed properties (or if the MD is new), save it. + * 2. read any existing certificate and init the state of the memory MD + */ +apr_status_t md_reg_sync_finish(md_reg_t *reg, md_t *md, apr_pool_t *p, apr_pool_t *ptemp) +{ + md_t *old; + apr_status_t rv; + int changed = 1; + md_proto_t *proto; + + if (!md->ca_proto) { + md->ca_proto = MD_PROTO_ACME; } + proto = apr_hash_get(reg->protos, md->ca_proto, (apr_ssize_t)strlen(md->ca_proto)); + if (!proto) { + rv = APR_ENOTIMPL; + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, + "[%s] uses unknown CA protocol '%s'", + md->name, md->ca_proto); + goto leave; + } + rv = proto->complete_md(md, p); + if (APR_SUCCESS != rv) goto leave; + + rv = state_init(reg, p, md); + if (APR_SUCCESS != rv) goto leave; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "loading md %s", md->name); + if (APR_SUCCESS == md_load(reg->store, MD_SG_DOMAINS, md->name, &old, ptemp)) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "loaded md %s", md->name); + /* Some parts are kept from old, lacking new values */ + if ((!md->contacts || apr_is_empty_array(md->contacts)) && old->contacts) { + md->contacts = md_array_str_clone(p, old->contacts); + } + if (md->ca_challenges && old->ca_challenges) { + if (!md_array_str_eq(md->ca_challenges, old->ca_challenges, 0)) { + md->ca_challenges = md_array_str_compact(p, md->ca_challenges, 0); + } + } + if (!md->ca_effective && old->ca_effective) { + md->ca_effective = apr_pstrdup(p, old->ca_effective); + } + if (!md->ca_account && old->ca_account) { + md->ca_account = apr_pstrdup(p, old->ca_account); + } + + /* if everything remains the same, spare the write back */ + if (!MD_VAL_UPDATE(md, old, state) + && md_array_str_eq(md->ca_urls, old->ca_urls, 0) + && !MD_SVAL_UPDATE(md, old, ca_proto) + && !MD_SVAL_UPDATE(md, old, ca_agreement) + && !MD_VAL_UPDATE(md, old, transitive) + && md_equal_domains(md, old, 1) + && !MD_VAL_UPDATE(md, old, renew_mode) + && md_timeslice_eq(md->renew_window, old->renew_window) + && md_timeslice_eq(md->warn_window, old->warn_window) + && md_pkeys_spec_eq(md->pks, old->pks) + && !MD_VAL_UPDATE(md, old, require_https) + && !MD_VAL_UPDATE(md, old, must_staple) + && md_array_str_eq(md->acme_tls_1_domains, old->acme_tls_1_domains, 0) + && !MD_VAL_UPDATE(md, old, stapling) + && md_array_str_eq(md->contacts, old->contacts, 0) + && md_array_str_eq(md->cert_files, old->cert_files, 0) + && md_array_str_eq(md->pkey_files, old->pkey_files, 0) + && md_array_str_eq(md->ca_challenges, old->ca_challenges, 0)) { + changed = 0; + } + } + if (changed) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "saving md %s", md->name); + rv = md_save(reg->store, ptemp, MD_SG_DOMAINS, md, 0); + } +leave: + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "sync MDs, finish done"); return rv; } apr_status_t md_reg_remove(md_reg_t *reg, apr_pool_t *p, const char *name, int archive) { + if (reg->domains_frozen) return APR_EACCES; return md_store_move(reg->store, p, MD_SG_DOMAINS, MD_SG_ARCHIVE, name, archive); } +typedef struct { + md_reg_t *reg; + apr_pool_t *p; + apr_array_header_t *mds; +} cleanup_challenge_ctx; + +static apr_status_t cleanup_challenge_inspector(void *baton, const char *dir, const char *name, + md_store_vtype_t vtype, void *value, + apr_pool_t *ptemp) +{ + cleanup_challenge_ctx *ctx = baton; + const md_t *md; + int i, used; + apr_status_t rv; + + (void)value; + (void)vtype; + (void)dir; + for (used = 0, i = 0; i < ctx->mds->nelts && !used; ++i) { + md = APR_ARRAY_IDX(ctx->mds, i, const md_t *); + used = !strcmp(name, md->name); + } + if (!used) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, + "challenges/%s: not in use, purging", name); + rv = md_store_purge(ctx->reg->store, ctx->p, MD_SG_CHALLENGES, name); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, ptemp, + "challenges/%s: unable to purge", name); + } + } + return APR_SUCCESS; +} + +apr_status_t md_reg_cleanup_challenges(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp, + apr_array_header_t *mds) +{ + apr_status_t rv; + cleanup_challenge_ctx ctx; + + (void)p; + ctx.reg = reg; + ctx.p = ptemp; + ctx.mds = mds; + rv = md_store_iter_names(cleanup_challenge_inspector, &ctx, reg->store, ptemp, + MD_SG_CHALLENGES, "*"); + return rv; +} + /**************************************************************************************************/ /* driving */ -static apr_status_t init_proto_driver(md_proto_driver_t *driver, const md_proto_t *proto, - md_reg_t *reg, const md_t *md, - const char *challenge, int reset, apr_pool_t *p) +static apr_status_t run_init(void *baton, apr_pool_t *p, ...) { - apr_status_t rv = APR_SUCCESS; + va_list ap; + md_reg_t *reg = baton; + const md_t *md; + md_proto_driver_t *driver, **pdriver; + md_result_t *result; + apr_table_t *env; + const char *s; + int preload; + + (void)p; + va_start(ap, p); + pdriver = va_arg(ap, md_proto_driver_t **); + md = va_arg(ap, const md_t *); + preload = va_arg(ap, int); + env = va_arg(ap, apr_table_t *); + result = va_arg(ap, md_result_t *); + va_end(ap); + + *pdriver = driver = apr_pcalloc(p, sizeof(*driver)); - /* If this registry instance was not synched before (and obtained server - * properties that way), read them from the store. - */ - driver->proto = proto; driver->p = p; - driver->challenge = challenge; - driver->can_http = reg->can_http; - driver->can_https = reg->can_https; + driver->env = env? apr_table_copy(p, env) : apr_table_make(p, 10); driver->reg = reg; driver->store = md_reg_store_get(reg); driver->proxy_url = reg->proxy_url; + driver->ca_file = reg->ca_file; driver->md = md; - driver->reset = reset; + driver->can_http = reg->can_http; + driver->can_https = reg->can_https; + + s = apr_table_get(driver->env, MD_KEY_ACTIVATION_DELAY); + if (!s || APR_SUCCESS != md_duration_parse(&driver->activation_delay, s, "d")) { + driver->activation_delay = 0; + } - return rv; + if (!md->ca_proto) { + md_result_printf(result, APR_EGENERAL, "CA protocol is not defined"); + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, "md[%s]: %s", md->name, result->detail); + goto leave; + } + + driver->proto = apr_hash_get(reg->protos, md->ca_proto, (apr_ssize_t)strlen(md->ca_proto)); + if (!driver->proto) { + md_result_printf(result, APR_EGENERAL, "Unknown CA protocol '%s'", md->ca_proto); + goto leave; + } + + if (preload) { + result->status = driver->proto->init_preload(driver, result); + } + else { + result->status = driver->proto->init(driver, result); + } + +leave: + if (APR_SUCCESS != result->status) { + md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, result->status, p, "md[%s]: %s", md->name, + result->detail? result->detail : ""); + } + else { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "%s: init done", md->name); + } + return result->status; +} + +static apr_status_t run_test_init(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +{ + const md_t *md; + apr_table_t *env; + md_result_t *result; + md_proto_driver_t *driver; + + (void)p; + md = va_arg(ap, const md_t *); + env = va_arg(ap, apr_table_t *); + result = va_arg(ap, md_result_t *); + + return run_init(baton, ptemp, &driver, md, 0, env, result, NULL); } -static apr_status_t run_stage(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +apr_status_t md_reg_test_init(md_reg_t *reg, const md_t *md, struct apr_table_t *env, + md_result_t *result, apr_pool_t *p) +{ + return md_util_pool_vdo(run_test_init, reg, p, md, env, result, NULL); +} + +static apr_status_t run_renew(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) { md_reg_t *reg = baton; - const md_proto_t *proto; const md_t *md; - int reset; + int reset, attempt; md_proto_driver_t *driver; - const char *challenge; - apr_time_t *pvalid_from; + apr_table_t *env; apr_status_t rv; + md_result_t *result; (void)p; - proto = va_arg(ap, const md_proto_t *); md = va_arg(ap, const md_t *); - challenge = va_arg(ap, const char *); + env = va_arg(ap, apr_table_t *); reset = va_arg(ap, int); - pvalid_from = va_arg(ap, apr_time_t*); - - driver = apr_pcalloc(ptemp, sizeof(*driver)); - rv = init_proto_driver(driver, proto, reg, md, challenge, reset, ptemp); - if (APR_SUCCESS == rv && - APR_SUCCESS == (rv = proto->init(driver))) { - - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, "%s: run staging", md->name); - rv = proto->stage(driver); + attempt = va_arg(ap, int); + result = va_arg(ap, md_result_t *); - if (APR_SUCCESS == rv && pvalid_from) { - *pvalid_from = driver->stage_valid_from; - } + rv = run_init(reg, ptemp, &driver, md, 0, env, result, NULL); + if (APR_SUCCESS == rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, "%s: run staging", md->name); + driver->reset = reset; + driver->attempt = attempt; + driver->retry_failover = reg->retry_failover; + rv = driver->proto->renew(driver, result); } md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "%s: staging done", md->name); return rv; } -apr_status_t md_reg_stage(md_reg_t *reg, const md_t *md, const char *challenge, - int reset, apr_time_t *pvalid_from, apr_pool_t *p) +apr_status_t md_reg_renew(md_reg_t *reg, const md_t *md, apr_table_t *env, + int reset, int attempt, + md_result_t *result, apr_pool_t *p) { - const md_proto_t *proto; - - if (!md->ca_proto) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, "md %s has no CA protocol", md->name); - ((md_t *)md)->state = MD_S_ERROR; - return APR_SUCCESS; - } - - proto = apr_hash_get(reg->protos, md->ca_proto, (apr_ssize_t)strlen(md->ca_proto)); - if (!proto) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, - "md %s has unknown CA protocol: %s", md->name, md->ca_proto); - ((md_t *)md)->state = MD_S_ERROR; - return APR_EINVAL; - } - - return md_util_pool_vdo(run_stage, reg, p, proto, md, challenge, reset, pvalid_from, NULL); + return md_util_pool_vdo(run_renew, reg, p, md, env, reset, attempt, result, NULL); } -static apr_status_t run_load(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +static apr_status_t run_load_staging(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) { md_reg_t *reg = baton; - const char *name; - const md_proto_t *proto; - const md_t *md, *nmd; + const md_t *md; md_proto_driver_t *driver; + md_result_t *result; + apr_table_t *env; + md_job_t *job; apr_status_t rv; - name = va_arg(ap, const char *); + /* For the MD, check if something is in the STAGING area. If none is there, + * return that status. Otherwise ask the protocol driver to preload it into + * a new, temporary area. + * If that succeeds, we move the TEMP area over the DOMAINS (causing the + * existing one go to ARCHIVE). + * Finally, we clean up the data from CHALLENGES and STAGING. + */ + md = va_arg(ap, const md_t*); + env = va_arg(ap, apr_table_t*); + result = va_arg(ap, md_result_t*); - if (APR_STATUS_IS_ENOENT(rv = md_load(reg->store, MD_SG_STAGING, name, NULL, ptemp))) { - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, ptemp, "%s: nothing staged", name); - return APR_ENOENT; + if (APR_STATUS_IS_ENOENT(rv = md_load(reg->store, MD_SG_STAGING, md->name, NULL, ptemp))) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, ptemp, "%s: nothing staged", md->name); + goto out; } - md = md_reg_get(reg, name, p); - if (!md) { - return APR_ENOENT; - } + rv = run_init(baton, ptemp, &driver, md, 1, env, result, NULL); + if (APR_SUCCESS != rv) goto out; - if (!md->ca_proto) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, "md %s has no CA protocol", name); - ((md_t *)md)->state = MD_S_ERROR; - return APR_EINVAL; + apr_hash_set(reg->certs, md->name, (apr_ssize_t)strlen(md->name), NULL); + md_result_activity_setn(result, "preloading staged to tmp"); + rv = driver->proto->preload(driver, MD_SG_TMP, result); + if (APR_SUCCESS != rv) goto out; + + /* If we had a job saved in STAGING, copy it over too */ + job = md_reg_job_make(reg, md->name, ptemp); + if (APR_SUCCESS == md_job_load(job)) { + md_job_set_group(job, MD_SG_TMP); + md_job_save(job, NULL, ptemp); } - proto = apr_hash_get(reg->protos, md->ca_proto, (apr_ssize_t)strlen(md->ca_proto)); - if (!proto) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, 0, p, - "md %s has unknown CA protocol: %s", md->name, md->ca_proto); - ((md_t *)md)->state = MD_S_ERROR; - return APR_EINVAL; + /* swap */ + md_result_activity_setn(result, "moving tmp to become new domains"); + rv = md_store_move(reg->store, p, MD_SG_TMP, MD_SG_DOMAINS, md->name, 1); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, NULL); + goto out; } - driver = apr_pcalloc(ptemp, sizeof(*driver)); - init_proto_driver(driver, proto, reg, md, NULL, 0, ptemp); + md_store_purge(reg->store, p, MD_SG_STAGING, md->name); + md_store_purge(reg->store, p, MD_SG_CHALLENGES, md->name); + md_result_set(result, APR_SUCCESS, "new certificate successfully saved in domains"); + md_event_holler("installed", md->name, job, result, ptemp); + if (job->dirty) md_job_save(job, result, ptemp); + +out: + if (!APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, ptemp, "%s: load done", md->name); + } + return rv; +} - if (APR_SUCCESS == (rv = proto->init(driver))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, "%s: run load", md->name); - - if (APR_SUCCESS == (rv = proto->preload(driver, MD_SG_TMP))) { - /* swap */ - rv = md_store_move(reg->store, p, MD_SG_TMP, MD_SG_DOMAINS, md->name, 1); - if (APR_SUCCESS == rv) { - /* load again */ - nmd = md_reg_get(reg, md->name, p); - if (!nmd) { - rv = APR_ENOENT; - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "loading md after staging"); - } - else if (nmd->state != MD_S_COMPLETE) { - md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p, - "md has state %d after load", nmd->state); - } - - md_store_purge(reg->store, p, MD_SG_STAGING, md->name); - md_store_purge(reg->store, p, MD_SG_CHALLENGES, md->name); - } +apr_status_t md_reg_load_staging(md_reg_t *reg, const md_t *md, apr_table_t *env, + md_result_t *result, apr_pool_t *p) +{ + if (reg->domains_frozen) return APR_EACCES; + return md_util_pool_vdo(run_load_staging, reg, p, md, env, result, NULL); +} + +apr_status_t md_reg_load_stagings(md_reg_t *reg, apr_array_header_t *mds, + apr_table_t *env, apr_pool_t *p) +{ + apr_status_t rv = APR_SUCCESS; + md_t *md; + md_result_t *result; + int i; + + for (i = 0; i < mds->nelts; ++i) { + md = APR_ARRAY_IDX(mds, i, md_t *); + result = md_result_md_make(p, md->name); + rv = md_reg_load_staging(reg, md, env, result, p); + if (APR_SUCCESS == rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, APLOGNO(10068) + "%s: staged set activated", md->name); + } + else if (!APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, APLOGNO(10069) + "%s: error loading staged set", md->name); + } + } + + return rv; +} + +apr_status_t md_reg_lock_global(md_reg_t *reg, apr_pool_t *p) +{ + apr_status_t rv = APR_SUCCESS; + + if (reg->use_store_locks) { + rv = md_store_lock_global(reg->store, p, reg->lock_wait_timeout); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "unable to acquire global store lock"); + } + } + return rv; +} + +void md_reg_unlock_global(md_reg_t *reg, apr_pool_t *p) +{ + if (reg->use_store_locks) { + md_store_unlock_global(reg->store, p); + } +} + +apr_status_t md_reg_freeze_domains(md_reg_t *reg, apr_array_header_t *mds) +{ + apr_status_t rv = APR_SUCCESS; + md_t *md; + const md_pubcert_t *pubcert; + int i, j; + + assert(!reg->domains_frozen); + /* prefill the certs cache for all mds */ + for (i = 0; i < mds->nelts; ++i) { + md = APR_ARRAY_IDX(mds, i, md_t*); + for (j = 0; j < md_cert_count(md); ++j) { + rv = md_reg_get_pubcert(&pubcert, reg, md, i, reg->p); + if (APR_SUCCESS != rv && !APR_STATUS_IS_ENOENT(rv)) goto leave; } } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "%s: load done", md->name); + reg->domains_frozen = 1; +leave: return rv; } -apr_status_t md_reg_load(md_reg_t *reg, const char *name, apr_pool_t *p) +void md_reg_set_renew_window_default(md_reg_t *reg, md_timeslice_t *renew_window) { - return md_util_pool_vdo(run_load, reg, p, name, NULL); + *reg->renew_window = *renew_window; } +void md_reg_set_warn_window_default(md_reg_t *reg, md_timeslice_t *warn_window) +{ + *reg->warn_window = *warn_window; +} + +md_job_t *md_reg_job_make(md_reg_t *reg, const char *mdomain, apr_pool_t *p) +{ + return md_job_make(p, reg->store, MD_SG_STAGING, mdomain, reg->min_delay); +} diff --git a/modules/md/md_reg.h b/modules/md/md_reg.h index d976b7f..58ee16a 100644 --- a/modules/md/md_reg.h +++ b/modules/md/md_reg.h @@ -19,9 +19,12 @@ struct apr_hash_t; struct apr_array_header_t; -struct md_store_t; struct md_pkey_t; struct md_cert_t; +struct md_result_t; +struct md_pkey_spec_t; + +#include "md_store.h" /** * A registry for managed domains with a md_store_t as persistence. @@ -30,13 +33,21 @@ struct md_cert_t; typedef struct md_reg_t md_reg_t; /** - * Initialize the registry, using the pool and loading any existing information - * from the store. + * Create the MD registry, using the pool and store. + * @param preg on APR_SUCCESS, the create md_reg_t + * @param pm memory pool to use for creation + * @param store the store to base on + * @param proxy_url optional URL of a proxy to use for requests + * @param ca_file optioinal CA trust anchor file to use + * @param min_delay minimum delay between renewal attempts for a domain + * @param retry_failover numer of failed renewals attempt to fail over to alternate ACME ca */ -apr_status_t md_reg_init(md_reg_t **preg, apr_pool_t *pm, struct md_store_t *store, - const char *proxy_url); +apr_status_t md_reg_create(md_reg_t **preg, apr_pool_t *pm, md_store_t *store, + const char *proxy_url, const char *ca_file, + apr_time_t min_delay, int retry_failover, + int use_store_locks, apr_time_t lock_wait_timeout); -struct md_store_t *md_reg_store_get(md_reg_t *reg); +md_store_t *md_reg_store_get(md_reg_t *reg); apr_status_t md_reg_set_props(md_reg_t *reg, apr_pool_t *p, int can_http, int can_https); @@ -65,11 +76,6 @@ md_t *md_reg_find_overlap(md_reg_t *reg, const md_t *md, const char **pdomain, a */ md_t *md_reg_get(md_reg_t *reg, const char *name, apr_pool_t *p); -/** - * Assess the capability and need to driving this managed domain. - */ -apr_status_t md_reg_assess(md_reg_t *reg, md_t *md, int *perrored, int *prenew, apr_pool_t *p); - /** * Callback invoked for every md in the registry. If 0 is returned, iteration stops. */ @@ -85,20 +91,22 @@ int md_reg_do(md_reg_do_cb *cb, void *baton, md_reg_t *reg, apr_pool_t *p); /** * Bitmask for fields that are updated. */ -#define MD_UPD_DOMAINS 0x0001 -#define MD_UPD_CA_URL 0x0002 -#define MD_UPD_CA_PROTO 0x0004 -#define MD_UPD_CA_ACCOUNT 0x0008 -#define MD_UPD_CONTACTS 0x0010 -#define MD_UPD_AGREEMENT 0x0020 -#define MD_UPD_CERT_URL 0x0040 -#define MD_UPD_DRIVE_MODE 0x0080 -#define MD_UPD_RENEW_WINDOW 0x0100 -#define MD_UPD_CA_CHALLENGES 0x0200 -#define MD_UPD_PKEY_SPEC 0x0400 -#define MD_UPD_REQUIRE_HTTPS 0x0800 -#define MD_UPD_TRANSITIVE 0x1000 -#define MD_UPD_MUST_STAPLE 0x2000 +#define MD_UPD_DOMAINS 0x00001 +#define MD_UPD_CA_URL 0x00002 +#define MD_UPD_CA_PROTO 0x00004 +#define MD_UPD_CA_ACCOUNT 0x00008 +#define MD_UPD_CONTACTS 0x00010 +#define MD_UPD_AGREEMENT 0x00020 +#define MD_UPD_DRIVE_MODE 0x00080 +#define MD_UPD_RENEW_WINDOW 0x00100 +#define MD_UPD_CA_CHALLENGES 0x00200 +#define MD_UPD_PKEY_SPEC 0x00400 +#define MD_UPD_REQUIRE_HTTPS 0x00800 +#define MD_UPD_TRANSITIVE 0x01000 +#define MD_UPD_MUST_STAPLE 0x02000 +#define MD_UPD_PROTO 0x04000 +#define MD_UPD_WARN_WINDOW 0x08000 +#define MD_UPD_STAPLING 0x10000 #define MD_UPD_ALL 0x7FFFFFFF /** @@ -106,26 +114,87 @@ int md_reg_do(md_reg_do_cb *cb, void *baton, md_reg_t *reg, apr_pool_t *p); * values from the given md, all other values remain unchanged. */ apr_status_t md_reg_update(md_reg_t *reg, apr_pool_t *p, - const char *name, const md_t *md, int fields); + const char *name, const md_t *md, + int fields, int check_consistency); /** - * Get the credentials available for the managed domain md. Returns APR_ENOENT - * when none is available. The returned values are immutable. + * Get the chain of public certificates of the managed domain md, starting with the cert + * of the domain and going up the issuers. Returns APR_ENOENT when not available. */ -apr_status_t md_reg_creds_get(const md_creds_t **pcreds, md_reg_t *reg, - md_store_group_t group, const md_t *md, apr_pool_t *p); +apr_status_t md_reg_get_pubcert(const md_pubcert_t **ppubcert, md_reg_t *reg, + const md_t *md, int i, apr_pool_t *p); -apr_status_t md_reg_get_cred_files(md_reg_t *reg, const md_t *md, apr_pool_t *p, - const char **pkeyfile, const char **pcertfile); +/** + * Get the filenames of private key and pubcert of the MD - if they exist. + * @return APR_ENOENT if one or both do not exist. + */ +apr_status_t md_reg_get_cred_files(const char **pkeyfile, const char **pcertfile, + md_reg_t *reg, md_store_group_t group, + const md_t *md, struct md_pkey_spec_t *spec, apr_pool_t *p); /** - * Synchronise the give master mds with the store. + * Synchronize the given master mds with the store. */ -apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp, - apr_array_header_t *master_mds); +apr_status_t md_reg_sync_start(md_reg_t *reg, apr_array_header_t *master_mds, apr_pool_t *p); + +/** + * Re-compute the state of the MD, given current store contents. + */ +apr_status_t md_reg_sync_finish(md_reg_t *reg, md_t *md, apr_pool_t *p, apr_pool_t *ptemp); + apr_status_t md_reg_remove(md_reg_t *reg, apr_pool_t *p, const char *name, int archive); +/** + * Delete the account from the local store. + */ +apr_status_t md_reg_delete_acct(md_reg_t *reg, apr_pool_t *p, const char *acct_id); + + +/** + * Cleanup any challenges that are no longer in use. + * + * @param reg the registry + * @param p pool for permanent storage + * @param ptemp pool for temporary storage + * @param mds the list of configured MDs + */ +apr_status_t md_reg_cleanup_challenges(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp, + apr_array_header_t *mds); + +/** + * Mark all information from group MD_SG_DOMAINS as readonly, deny future modifications + * (MD_SG_STAGING and MD_SG_CHALLENGES remain writeable). For the given MDs, cache + * the public information (MDs themselves and their pubcerts or lack of). + */ +apr_status_t md_reg_freeze_domains(md_reg_t *reg, apr_array_header_t *mds); + +/** + * Return if the certificate of the MD should be renewed. This includes reaching + * the renewal window of an otherwise valid certificate. It return also !0 iff + * no certificate has been obtained yet. + */ +int md_reg_should_renew(md_reg_t *reg, const md_t *md, apr_pool_t *p); + +/** + * Return the timestamp when the certificate should be renewed. A value of 0 + * indicates that that renewal is not configured (see renew_mode). + */ +apr_time_t md_reg_renew_at(md_reg_t *reg, const md_t *md, apr_pool_t *p); + +/** + * Return the timestamp up to which *all* certificates for the MD can be used. + * A value of 0 indicates that there is no certificate. + */ +apr_time_t md_reg_valid_until(md_reg_t *reg, const md_t *md, apr_pool_t *p); + +/** + * Return if a warning should be issued about the certificate expiration. + * This applies the configured warn window to the remaining lifetime of the + * current certiciate. If no certificate is present, this returns 0. + */ +int md_reg_should_warn(md_reg_t *reg, const md_t *md, apr_pool_t *p); + /**************************************************************************************************/ /* protocol drivers */ @@ -133,47 +202,112 @@ typedef struct md_proto_t md_proto_t; typedef struct md_proto_driver_t md_proto_driver_t; +/** + * Operating environment for a protocol driver. This is valid only for the + * duration of one run (init + renew, init + preload). + */ struct md_proto_driver_t { const md_proto_t *proto; apr_pool_t *p; - const char *challenge; - int can_http; - int can_https; - struct md_store_t *store; + void *baton; + struct apr_table_t *env; + md_reg_t *reg; + md_store_t *store; + const char *proxy_url; + const char *ca_file; const md_t *md; - void *baton; + + int can_http; + int can_https; int reset; - apr_time_t stage_valid_from; - const char *proxy_url; + int attempt; + int retry_failover; + apr_interval_time_t activation_delay; }; -typedef apr_status_t md_proto_init_cb(md_proto_driver_t *driver); -typedef apr_status_t md_proto_stage_cb(md_proto_driver_t *driver); -typedef apr_status_t md_proto_preload_cb(md_proto_driver_t *driver, md_store_group_t group); +typedef apr_status_t md_proto_init_cb(md_proto_driver_t *driver, struct md_result_t *result); +typedef apr_status_t md_proto_renew_cb(md_proto_driver_t *driver, struct md_result_t *result); +typedef apr_status_t md_proto_init_preload_cb(md_proto_driver_t *driver, struct md_result_t *result); +typedef apr_status_t md_proto_preload_cb(md_proto_driver_t *driver, + md_store_group_t group, struct md_result_t *result); +typedef apr_status_t md_proto_complete_md_cb(md_t *md, apr_pool_t *p); struct md_proto_t { const char *protocol; md_proto_init_cb *init; - md_proto_stage_cb *stage; + md_proto_renew_cb *renew; + md_proto_init_preload_cb *init_preload; md_proto_preload_cb *preload; + md_proto_complete_md_cb *complete_md; }; +/** + * Run a test initialization of the renew protocol for the given MD. This verifies + * basic parameter settings and is expected to return a description of encountered + * problems in when != APR_SUCCESS. + * A message return is allocated fromt the given pool. + */ +apr_status_t md_reg_test_init(md_reg_t *reg, const md_t *md, struct apr_table_t *env, + struct md_result_t *result, apr_pool_t *p); /** - * Stage a new credentials set for the given managed domain in a separate location - * without interfering with any existing credentials. + * Obtain new credentials for the given managed domain in STAGING. + * @param reg the registry instance + * @param md the mdomain to renew + * @param env global environment of settings + * @param reset != 0 if any previous, partial information should be wiped + * @param attempt the number of attempts made this far (for this md) + * @param result for reporting results of the renewal + * @param p the memory pool to use + * @return APR_SUCCESS if new credentials have been staged successfully */ -apr_status_t md_reg_stage(md_reg_t *reg, const md_t *md, - const char *challenge, int reset, - apr_time_t *pvalid_from, apr_pool_t *p); +apr_status_t md_reg_renew(md_reg_t *reg, const md_t *md, + struct apr_table_t *env, int reset, int attempt, + struct md_result_t *result, apr_pool_t *p); /** - * Load a staged set of new credentials for the managed domain. This will archive - * any existing credential data and make the staged set the new live one. + * Load a new set of credentials for the managed domain from STAGING - if it exists. + * This will archive any existing credential data and make the staged set the new one + * in DOMAINS. * If staging is incomplete or missing, the load will fail and all credentials remain * as they are. + * + * @return APR_SUCCESS on loading new data, APR_ENOENT when nothing is staged, error otherwise. + */ +apr_status_t md_reg_load_staging(md_reg_t *reg, const md_t *md, struct apr_table_t *env, + struct md_result_t *result, apr_pool_t *p); + +/** + * Check given MDomains for new data in staging areas and, if it exists, load + * the new credentials. On encountering errors, leave the credentails as + * they are. + */ +apr_status_t md_reg_load_stagings(md_reg_t *reg, apr_array_header_t *mds, + apr_table_t *env, apr_pool_t *p); + +void md_reg_set_renew_window_default(md_reg_t *reg, md_timeslice_t *renew_window); +void md_reg_set_warn_window_default(md_reg_t *reg, md_timeslice_t *warn_window); + +struct md_job_t *md_reg_job_make(md_reg_t *reg, const char *mdomain, apr_pool_t *p); + +/** + * Acquire a cooperative, global lock on registry modifications. Will + * do nothing if locking is not configured. + * + * This will only prevent other children/processes/cluster nodes from + * doing the same and does not protect individual store functions from + * being called without it. + * @param reg the registy + * @param p memory pool to use + * @param max_wait maximum time to wait in order to acquire + * @return APR_SUCCESS when lock was obtained + */ +apr_status_t md_reg_lock_global(md_reg_t *reg, apr_pool_t *p); + +/** + * Realease the global registry lock. Will do nothing if there is no lock. */ -apr_status_t md_reg_load(md_reg_t *reg, const char *name, apr_pool_t *p); +void md_reg_unlock_global(md_reg_t *reg, apr_pool_t *p); #endif /* mod_md_md_reg_h */ diff --git a/modules/md/md_result.c b/modules/md/md_result.c new file mode 100644 index 0000000..64a2f70 --- /dev/null +++ b/modules/md/md_result.c @@ -0,0 +1,285 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "md.h" +#include "md_json.h" +#include "md_log.h" +#include "md_result.h" + +static const char *dup_trim(apr_pool_t *p, const char *s) +{ + char *d = apr_pstrdup(p, s); + if (d) apr_collapse_spaces(d, d); + return d; +} + +md_result_t *md_result_make(apr_pool_t *p, apr_status_t status) +{ + md_result_t *result; + + result = apr_pcalloc(p, sizeof(*result)); + result->p = p; + result->md_name = MD_OTHER; + result->status = status; + return result; +} + +md_result_t *md_result_md_make(apr_pool_t *p, const char *md_name) +{ + md_result_t *result = md_result_make(p, APR_SUCCESS); + result->md_name = md_name; + return result; +} + +void md_result_reset(md_result_t *result) +{ + apr_pool_t *p = result->p; + memset(result, 0, sizeof(*result)); + result->p = p; +} + +static void on_change(md_result_t *result) +{ + if (result->on_change) result->on_change(result, result->on_change_data); +} + +void md_result_activity_set(md_result_t *result, const char *activity) +{ + md_result_activity_setn(result, activity? apr_pstrdup(result->p, activity) : NULL); +} + +void md_result_activity_setn(md_result_t *result, const char *activity) +{ + result->activity = activity; + result->problem = result->detail = NULL; + result->subproblems = NULL; + on_change(result); +} + +void md_result_activity_printf(md_result_t *result, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + md_result_activity_setn(result, apr_pvsprintf(result->p, fmt, ap)); + va_end(ap); +} + +void md_result_set(md_result_t *result, apr_status_t status, const char *detail) +{ + result->status = status; + result->problem = NULL; + result->detail = detail? apr_pstrdup(result->p, detail) : NULL; + result->subproblems = NULL; + on_change(result); +} + +void md_result_problem_set(md_result_t *result, apr_status_t status, + const char *problem, const char *detail, + const md_json_t *subproblems) +{ + result->status = status; + result->problem = dup_trim(result->p, problem); + result->detail = apr_pstrdup(result->p, detail); + result->subproblems = subproblems? md_json_clone(result->p, subproblems) : NULL; + on_change(result); +} + +void md_result_problem_printf(md_result_t *result, apr_status_t status, + const char *problem, const char *fmt, ...) +{ + va_list ap; + + result->status = status; + result->problem = dup_trim(result->p, problem); + + va_start(ap, fmt); + result->detail = apr_pvsprintf(result->p, fmt, ap); + va_end(ap); + result->subproblems = NULL; + on_change(result); +} + +void md_result_printf(md_result_t *result, apr_status_t status, const char *fmt, ...) +{ + va_list ap; + + result->status = status; + va_start(ap, fmt); + result->detail = apr_pvsprintf(result->p, fmt, ap); + va_end(ap); + result->subproblems = NULL; + on_change(result); +} + +void md_result_delay_set(md_result_t *result, apr_time_t ready_at) +{ + result->ready_at = ready_at; + on_change(result); +} + +md_result_t*md_result_from_json(const struct md_json_t *json, apr_pool_t *p) +{ + md_result_t *result; + const char *s; + + result = md_result_make(p, APR_SUCCESS); + result->status = (int)md_json_getl(json, MD_KEY_STATUS, NULL); + result->problem = md_json_dups(p, json, MD_KEY_PROBLEM, NULL); + result->detail = md_json_dups(p, json, MD_KEY_DETAIL, NULL); + result->activity = md_json_dups(p, json, MD_KEY_ACTIVITY, NULL); + s = md_json_dups(p, json, MD_KEY_VALID_FROM, NULL); + if (s && *s) result->ready_at = apr_date_parse_rfc(s); + result->subproblems = md_json_dupj(p, json, MD_KEY_SUBPROBLEMS, NULL); + return result; +} + +struct md_json_t *md_result_to_json(const md_result_t *result, apr_pool_t *p) +{ + md_json_t *json; + char ts[APR_RFC822_DATE_LEN]; + + json = md_json_create(p); + md_json_setl(result->status, json, MD_KEY_STATUS, NULL); + if (result->status > 0) { + char buffer[HUGE_STRING_LEN]; + apr_strerror(result->status, buffer, sizeof(buffer)); + md_json_sets(buffer, json, "status-description", NULL); + } + if (result->problem) md_json_sets(result->problem, json, MD_KEY_PROBLEM, NULL); + if (result->detail) md_json_sets(result->detail, json, MD_KEY_DETAIL, NULL); + if (result->activity) md_json_sets(result->activity, json, MD_KEY_ACTIVITY, NULL); + if (result->ready_at > 0) { + apr_rfc822_date(ts, result->ready_at); + md_json_sets(ts, json, MD_KEY_VALID_FROM, NULL); + } + if (result->subproblems) { + md_json_setj(result->subproblems, json, MD_KEY_SUBPROBLEMS, NULL); + } + return json; +} + +static int str_cmp(const char *s1, const char *s2) +{ + if (s1 == s2) return 0; + if (!s1) return -1; + if (!s2) return 1; + return strcmp(s1, s2); +} + +int md_result_cmp(const md_result_t *r1, const md_result_t *r2) +{ + int n; + if (r1 == r2) return 0; + if (!r1) return -1; + if (!r2) return 1; + if ((n = r1->status - r2->status)) return n; + if ((n = str_cmp(r1->problem, r2->problem))) return n; + if ((n = str_cmp(r1->detail, r2->detail))) return n; + if ((n = str_cmp(r1->activity, r2->activity))) return n; + return (int)(r1->ready_at - r2->ready_at); +} + +void md_result_assign(md_result_t *dest, const md_result_t *src) +{ + dest->status = src->status; + dest->problem = src->problem; + dest->detail = src->detail; + dest->activity = src->activity; + dest->ready_at = src->ready_at; + dest->subproblems = src->subproblems; +} + +void md_result_dup(md_result_t *dest, const md_result_t *src) +{ + dest->status = src->status; + dest->problem = src->problem? dup_trim(dest->p, src->problem) : NULL; + dest->detail = src->detail? apr_pstrdup(dest->p, src->detail) : NULL; + dest->activity = src->activity? apr_pstrdup(dest->p, src->activity) : NULL; + dest->ready_at = src->ready_at; + dest->subproblems = src->subproblems? md_json_clone(dest->p, src->subproblems) : NULL; + on_change(dest); +} + +void md_result_log(md_result_t *result, unsigned int level) +{ + if (md_log_is_level(result->p, (md_log_level_t)level)) { + const char *sep = ""; + const char *msg = ""; + + if (result->md_name) { + msg = apr_psprintf(result->p, "md[%s]", result->md_name); + sep = " "; + } + if (result->activity) { + msg = apr_psprintf(result->p, "%s%swhile[%s]", msg, sep, result->activity); + sep = " "; + } + if (result->problem) { + msg = apr_psprintf(result->p, "%s%sproblem[%s]", msg, sep, result->problem); + sep = " "; + } + if (result->detail) { + msg = apr_psprintf(result->p, "%s%sdetail[%s]", msg, sep, result->detail); + sep = " "; + } + if (result->subproblems) { + msg = apr_psprintf(result->p, "%s%ssubproblems[%s]", msg, sep, + md_json_writep(result->subproblems, result->p, MD_JSON_FMT_COMPACT)); + sep = " "; + } + md_log_perror(MD_LOG_MARK, (md_log_level_t)level, result->status, result->p, "%s", msg); + } +} + +void md_result_on_change(md_result_t *result, md_result_change_cb *cb, void *data) +{ + result->on_change = cb; + result->on_change_data = data; +} + +apr_status_t md_result_raise(md_result_t *result, const char *event, apr_pool_t *p) +{ + if (result->on_raise) return result->on_raise(result, result->on_raise_data, event, p); + return APR_SUCCESS; +} + +void md_result_holler(md_result_t *result, const char *event, apr_pool_t *p) +{ + if (result->on_holler) result->on_holler(result, result->on_holler_data, event, p); +} + +void md_result_on_raise(md_result_t *result, md_result_raise_cb *cb, void *data) +{ + result->on_raise = cb; + result->on_raise_data = data; +} + +void md_result_on_holler(md_result_t *result, md_result_holler_cb *cb, void *data) +{ + result->on_holler = cb; + result->on_holler_data = data; +} diff --git a/modules/md/md_result.h b/modules/md/md_result.h new file mode 100644 index 0000000..e83bdd2 --- /dev/null +++ b/modules/md/md_result.h @@ -0,0 +1,87 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_md_md_result_h +#define mod_md_md_result_h + +struct md_json_t; +struct md_t; + +typedef struct md_result_t md_result_t; + +typedef void md_result_change_cb(md_result_t *result, void *data); +typedef apr_status_t md_result_raise_cb(md_result_t *result, void *data, const char *event, apr_pool_t *p); +typedef void md_result_holler_cb(md_result_t *result, void *data, const char *event, apr_pool_t *p); + +struct md_result_t { + apr_pool_t *p; + const char *md_name; + apr_status_t status; + const char *problem; + const char *detail; + const struct md_json_t *subproblems; + const char *activity; + apr_time_t ready_at; + md_result_change_cb *on_change; + void *on_change_data; + md_result_raise_cb *on_raise; + void *on_raise_data; + md_result_holler_cb *on_holler; + void *on_holler_data; +}; + +md_result_t *md_result_make(apr_pool_t *p, apr_status_t status); +md_result_t *md_result_md_make(apr_pool_t *p, const char *md_name); +void md_result_reset(md_result_t *result); + +void md_result_activity_set(md_result_t *result, const char *activity); +void md_result_activity_setn(md_result_t *result, const char *activity); +void md_result_activity_printf(md_result_t *result, const char *fmt, ...); + +void md_result_set(md_result_t *result, apr_status_t status, const char *detail); +void md_result_problem_set(md_result_t *result, apr_status_t status, + const char *problem, const char *detail, + const struct md_json_t *subproblems); +void md_result_problem_printf(md_result_t *result, apr_status_t status, + const char *problem, const char *fmt, ...); + +#define MD_RESULT_LOG_ID(logno) "urn:org:apache:httpd:log:"logno + +void md_result_printf(md_result_t *result, apr_status_t status, const char *fmt, ...); + +void md_result_delay_set(md_result_t *result, apr_time_t ready_at); + +md_result_t*md_result_from_json(const struct md_json_t *json, apr_pool_t *p); +struct md_json_t *md_result_to_json(const md_result_t *result, apr_pool_t *p); + +int md_result_cmp(const md_result_t *r1, const md_result_t *r2); + +void md_result_assign(md_result_t *dest, const md_result_t *src); +void md_result_dup(md_result_t *dest, const md_result_t *src); + +void md_result_log(md_result_t *result, unsigned int level); + +void md_result_on_change(md_result_t *result, md_result_change_cb *cb, void *data); + +/* events in the context of a result genesis */ + +apr_status_t md_result_raise(md_result_t *result, const char *event, apr_pool_t *p); +void md_result_holler(md_result_t *result, const char *event, apr_pool_t *p); + +void md_result_on_raise(md_result_t *result, md_result_raise_cb *cb, void *data); +void md_result_on_holler(md_result_t *result, md_result_holler_cb *cb, void *data); + +#endif /* mod_md_md_result_h */ diff --git a/modules/md/md_status.c b/modules/md/md_status.c new file mode 100644 index 0000000..936c653 --- /dev/null +++ b/modules/md/md_status.c @@ -0,0 +1,653 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "md_json.h" +#include "md.h" +#include "md_acme.h" +#include "md_crypt.h" +#include "md_event.h" +#include "md_log.h" +#include "md_ocsp.h" +#include "md_store.h" +#include "md_result.h" +#include "md_reg.h" +#include "md_util.h" +#include "md_status.h" + +#define MD_STATUS_WITH_SCTS 0 + +/**************************************************************************************************/ +/* certificate status information */ + +static apr_status_t status_get_cert_json(md_json_t **pjson, const md_cert_t *cert, apr_pool_t *p) +{ + const char *finger; + apr_status_t rv = APR_SUCCESS; + md_timeperiod_t valid; + md_json_t *json; + + json = md_json_create(p); + valid.start = md_cert_get_not_before(cert); + valid.end = md_cert_get_not_after(cert); + md_json_set_timeperiod(&valid, json, MD_KEY_VALID, NULL); + md_json_sets(md_cert_get_serial_number(cert, p), json, MD_KEY_SERIAL, NULL); + if (APR_SUCCESS != (rv = md_cert_to_sha256_fingerprint(&finger, cert, p))) goto leave; + md_json_sets(finger, json, MD_KEY_SHA256_FINGERPRINT, NULL); + +#if MD_STATUS_WITH_SCTS + do { + apr_array_header_t *scts; + const char *hex; + const md_sct *sct; + md_json_t *sctj; + int i; + + scts = apr_array_make(p, 5, sizeof(const md_sct*)); + if (APR_SUCCESS == md_cert_get_ct_scts(scts, p, cert)) { + for (i = 0; i < scts->nelts; ++i) { + sct = APR_ARRAY_IDX(scts, i, const md_sct*); + sctj = md_json_create(p); + + apr_rfc822_date(ts, sct->timestamp); + md_json_sets(ts, sctj, "signed", NULL); + md_json_setl(sct->version, sctj, MD_KEY_VERSION, NULL); + md_data_to_hex(&hex, 0, p, sct->logid); + md_json_sets(hex, sctj, "logid", NULL); + md_data_to_hex(&hex, 0, p, sct->signature); + md_json_sets(hex, sctj, "signature", NULL); + md_json_sets(md_nid_get_sname(sct->signature_type_nid), sctj, "signature-type", NULL); + md_json_addj(sctj, json, "scts", NULL); + } + } + while (0); +#endif +leave: + *pjson = (APR_SUCCESS == rv)? json : NULL; + return rv; +} + +static apr_status_t job_loadj(md_json_t **pjson, md_store_group_t group, const char *name, + struct md_reg_t *reg, int with_log, apr_pool_t *p) +{ + apr_status_t rv; + + md_store_t *store = md_reg_store_get(reg); + rv = md_store_load_json(store, group, name, MD_FN_JOB, pjson, p); + if (APR_SUCCESS == rv && !with_log) md_json_del(*pjson, MD_KEY_LOG, NULL); + return rv; +} + +static apr_status_t status_get_cert_json_ex( + md_json_t **pjson, + const md_cert_t *cert, + const md_t *md, + md_reg_t *reg, + md_ocsp_reg_t *ocsp, + int with_logs, + apr_pool_t *p) +{ + md_json_t *certj, *jobj; + md_timeperiod_t ocsp_valid; + md_ocsp_cert_stat_t cert_stat; + apr_status_t rv; + + if (APR_SUCCESS != (rv = status_get_cert_json(&certj, cert, p))) goto leave; + if (md->stapling && ocsp) { + rv = md_ocsp_get_meta(&cert_stat, &ocsp_valid, ocsp, cert, p, md); + if (APR_SUCCESS == rv) { + md_json_sets(md_ocsp_cert_stat_name(cert_stat), certj, MD_KEY_OCSP, MD_KEY_STATUS, NULL); + md_json_set_timeperiod(&ocsp_valid, certj, MD_KEY_OCSP, MD_KEY_VALID, NULL); + } + else if (!APR_STATUS_IS_ENOENT(rv)) goto leave; + rv = APR_SUCCESS; + if (APR_SUCCESS == job_loadj(&jobj, MD_SG_OCSP, md->name, reg, with_logs, p)) { + md_json_setj(jobj, certj, MD_KEY_OCSP, MD_KEY_RENEWAL, NULL); + } + } +leave: + *pjson = (APR_SUCCESS == rv)? certj : NULL; + return rv; +} + +static int get_cert_count(const md_t *md, int from_staging) +{ + if (!from_staging && md->cert_files && md->cert_files->nelts) { + return md->cert_files->nelts; + } + return md_pkeys_spec_count(md->pks); +} + +static const char *get_cert_name(const md_t *md, int i, int from_staging, apr_pool_t *p) +{ + if (!from_staging && md->cert_files && md->cert_files->nelts) { + /* static files configured, not from staging, used index names */ + return apr_psprintf(p, "%d", i); + } + return md_pkey_spec_name(md_pkeys_spec_get(md->pks, i)); +} + +static apr_status_t status_get_certs_json(md_json_t **pjson, apr_array_header_t *certs, + int from_staging, + const md_t *md, md_reg_t *reg, + md_ocsp_reg_t *ocsp, int with_logs, + apr_pool_t *p) +{ + md_json_t *json, *certj; + md_timeperiod_t certs_valid = {0, 0}, valid; + md_cert_t *cert; + int i; + apr_status_t rv = APR_SUCCESS; + + json = md_json_create(p); + for (i = 0; i < get_cert_count(md, from_staging); ++i) { + cert = APR_ARRAY_IDX(certs, i, md_cert_t*); + if (!cert) continue; + + rv = status_get_cert_json_ex(&certj, cert, md, reg, ocsp, with_logs, p); + if (APR_SUCCESS != rv) goto leave; + valid = md_cert_get_valid(cert); + certs_valid = i? md_timeperiod_common(&certs_valid, &valid) : valid; + md_json_setj(certj, json, get_cert_name(md, i, from_staging, p), NULL); + } + + if (certs_valid.start) { + md_json_set_timeperiod(&certs_valid, json, MD_KEY_VALID, NULL); + } +leave: + *pjson = (APR_SUCCESS == rv)? json : NULL; + return rv; +} + +static apr_status_t get_staging_certs_json(md_json_t **pjson, const md_t *md, + md_reg_t *reg, apr_pool_t *p) +{ + md_pkey_spec_t *spec; + int i; + apr_array_header_t *chain, *certs; + const md_cert_t *cert; + apr_status_t rv; + + certs = apr_array_make(p, 5, sizeof(md_cert_t*)); + for (i = 0; i < get_cert_count(md, 1); ++i) { + spec = md_pkeys_spec_get(md->pks, i); + cert = NULL; + rv = md_pubcert_load(md_reg_store_get(reg), MD_SG_STAGING, md->name, spec, &chain, p); + if (APR_SUCCESS == rv) { + cert = APR_ARRAY_IDX(chain, 0, const md_cert_t*); + } + APR_ARRAY_PUSH(certs, const md_cert_t*) = cert; + } + return status_get_certs_json(pjson, certs, 1, md, reg, NULL, 0, p); +} + +static apr_status_t status_get_md_json(md_json_t **pjson, const md_t *md, + md_reg_t *reg, md_ocsp_reg_t *ocsp, + int with_logs, apr_pool_t *p) +{ + md_json_t *mdj, *certsj, *jobj; + int renew; + const md_pubcert_t *pubcert; + const md_cert_t *cert = NULL; + apr_array_header_t *certs; + apr_status_t rv = APR_SUCCESS; + apr_time_t renew_at; + int i; + + mdj = md_to_public_json(md, p); + certs = apr_array_make(p, 5, sizeof(md_cert_t*)); + for (i = 0; i < get_cert_count(md, 0); ++i) { + cert = NULL; + if (APR_SUCCESS == md_reg_get_pubcert(&pubcert, reg, md, i, p)) { + cert = APR_ARRAY_IDX(pubcert->certs, 0, const md_cert_t*); + } + APR_ARRAY_PUSH(certs, const md_cert_t*) = cert; + } + + rv = status_get_certs_json(&certsj, certs, 0, md, reg, ocsp, with_logs, p); + if (APR_SUCCESS != rv) goto leave; + md_json_setj(certsj, mdj, MD_KEY_CERT, NULL); + + renew_at = md_reg_renew_at(reg, md, p); + if (renew_at > 0) { + md_json_set_time(renew_at, mdj, MD_KEY_RENEW_AT, NULL); + } + + md_json_setb(md->stapling, mdj, MD_KEY_STAPLING, NULL); + md_json_setb(md->watched, mdj, MD_KEY_WATCHED, NULL); + renew = md_reg_should_renew(reg, md, p); + if (renew) { + md_json_setb(renew, mdj, MD_KEY_RENEW, NULL); + rv = job_loadj(&jobj, MD_SG_STAGING, md->name, reg, with_logs, p); + if (APR_SUCCESS == rv) { + if (APR_SUCCESS == get_staging_certs_json(&certsj, md, reg, p)) { + md_json_setj(certsj, jobj, MD_KEY_CERT, NULL); + } + md_json_setj(jobj, mdj, MD_KEY_RENEWAL, NULL); + } + else if (APR_STATUS_IS_ENOENT(rv)) rv = APR_SUCCESS; + else goto leave; + } + +leave: + if (APR_SUCCESS != rv) { + md_json_setl(rv, mdj, MD_KEY_ERROR, NULL); + } + *pjson = mdj; + return rv; +} + +apr_status_t md_status_get_md_json(md_json_t **pjson, const md_t *md, + md_reg_t *reg, md_ocsp_reg_t *ocsp, apr_pool_t *p) +{ + return status_get_md_json(pjson, md, reg, ocsp, 1, p); +} + +apr_status_t md_status_get_json(md_json_t **pjson, apr_array_header_t *mds, + md_reg_t *reg, md_ocsp_reg_t *ocsp, apr_pool_t *p) +{ + md_json_t *json, *mdj; + const md_t *md; + int i; + + json = md_json_create(p); + md_json_sets(MOD_MD_VERSION, json, MD_KEY_VERSION, NULL); + for (i = 0; i < mds->nelts; ++i) { + md = APR_ARRAY_IDX(mds, i, const md_t *); + status_get_md_json(&mdj, md, reg, ocsp, 0, p); + md_json_addj(mdj, json, MD_KEY_MDS, NULL); + } + *pjson = json; + return APR_SUCCESS; +} + +/**************************************************************************************************/ +/* drive job persistence */ + +md_job_t *md_job_make(apr_pool_t *p, md_store_t *store, + md_store_group_t group, const char *name, + apr_time_t min_delay) +{ + md_job_t *job = apr_pcalloc(p, sizeof(*job)); + job->group = group; + job->mdomain = apr_pstrdup(p, name); + job->store = store; + job->p = p; + job->max_log = 128; + job->min_delay = min_delay; + return job; +} + +void md_job_set_group(md_job_t *job, md_store_group_t group) +{ + job->group = group; +} + +static void md_job_from_json(md_job_t *job, md_json_t *json, apr_pool_t *p) +{ + const char *s; + + /* not good, this is malloced from a temp pool */ + /*job->mdomain = md_json_gets(json, MD_KEY_NAME, NULL);*/ + job->finished = md_json_getb(json, MD_KEY_FINISHED, NULL); + job->notified = md_json_getb(json, MD_KEY_NOTIFIED, NULL); + job->notified_renewed = md_json_getb(json, MD_KEY_NOTIFIED_RENEWED, NULL); + s = md_json_dups(p, json, MD_KEY_NEXT_RUN, NULL); + if (s && *s) job->next_run = apr_date_parse_rfc(s); + s = md_json_dups(p, json, MD_KEY_LAST_RUN, NULL); + if (s && *s) job->last_run = apr_date_parse_rfc(s); + s = md_json_dups(p, json, MD_KEY_VALID_FROM, NULL); + if (s && *s) job->valid_from = apr_date_parse_rfc(s); + job->error_runs = (int)md_json_getl(json, MD_KEY_ERRORS, NULL); + if (md_json_has_key(json, MD_KEY_LAST, NULL)) { + job->last_result = md_result_from_json(md_json_getcj(json, MD_KEY_LAST, NULL), p); + } + job->log = md_json_getj(json, MD_KEY_LOG, NULL); +} + +static void job_to_json(md_json_t *json, const md_job_t *job, + md_result_t *result, apr_pool_t *p) +{ + char ts[APR_RFC822_DATE_LEN]; + + md_json_sets(job->mdomain, json, MD_KEY_NAME, NULL); + md_json_setb(job->finished, json, MD_KEY_FINISHED, NULL); + md_json_setb(job->notified, json, MD_KEY_NOTIFIED, NULL); + md_json_setb(job->notified_renewed, json, MD_KEY_NOTIFIED_RENEWED, NULL); + if (job->next_run > 0) { + apr_rfc822_date(ts, job->next_run); + md_json_sets(ts, json, MD_KEY_NEXT_RUN, NULL); + } + if (job->last_run > 0) { + apr_rfc822_date(ts, job->last_run); + md_json_sets(ts, json, MD_KEY_LAST_RUN, NULL); + } + if (job->valid_from > 0) { + apr_rfc822_date(ts, job->valid_from); + md_json_sets(ts, json, MD_KEY_VALID_FROM, NULL); + } + md_json_setl(job->error_runs, json, MD_KEY_ERRORS, NULL); + if (!result) result = job->last_result; + if (result) { + md_json_setj(md_result_to_json(result, p), json, MD_KEY_LAST, NULL); + } + if (job->log) md_json_setj(job->log, json, MD_KEY_LOG, NULL); +} + +apr_status_t md_job_load(md_job_t *job) +{ + md_json_t *jprops; + apr_status_t rv; + + rv = md_store_load_json(job->store, job->group, job->mdomain, MD_FN_JOB, &jprops, job->p); + if (APR_SUCCESS == rv) { + md_job_from_json(job, jprops, job->p); + } + return rv; +} + +apr_status_t md_job_save(md_job_t *job, md_result_t *result, apr_pool_t *p) +{ + md_json_t *jprops; + apr_status_t rv; + + jprops = md_json_create(p); + job_to_json(jprops, job, result, p); + rv = md_store_save_json(job->store, p, job->group, job->mdomain, MD_FN_JOB, jprops, 0); + if (APR_SUCCESS == rv) job->dirty = 0; + return rv; +} + +void md_job_log_append(md_job_t *job, const char *type, + const char *status, const char *detail) +{ + md_json_t *entry; + char ts[APR_RFC822_DATE_LEN]; + + entry = md_json_create(job->p); + apr_rfc822_date(ts, apr_time_now()); + md_json_sets(ts, entry, MD_KEY_WHEN, NULL); + md_json_sets(type, entry, MD_KEY_TYPE, NULL); + if (status) md_json_sets(status, entry, MD_KEY_STATUS, NULL); + if (detail) md_json_sets(detail, entry, MD_KEY_DETAIL, NULL); + if (!job->log) job->log = md_json_create(job->p); + md_json_insertj(entry, 0, job->log, MD_KEY_ENTRIES, NULL); + md_json_limita(job->max_log, job->log, MD_KEY_ENTRIES, NULL); + job->dirty = 1; +} + +typedef struct { + md_job_t *job; + const char *type; + md_json_t *entry; + size_t index; +} log_find_ctx; + +static int find_first_log_entry(void *baton, size_t index, md_json_t *entry) +{ + log_find_ctx *ctx = baton; + const char *etype; + + etype = md_json_gets(entry, MD_KEY_TYPE, NULL); + if (etype == ctx->type || (etype && ctx->type && !strcmp(etype, ctx->type))) { + ctx->entry = entry; + ctx->index = index; + return 0; + } + return 1; +} + +md_json_t *md_job_log_get_latest(md_job_t *job, const char *type) + +{ + log_find_ctx ctx; + + memset(&ctx, 0, sizeof(ctx)); + ctx.job = job; + ctx.type = type; + if (job->log) md_json_itera(find_first_log_entry, &ctx, job->log, MD_KEY_ENTRIES, NULL); + return ctx.entry; +} + +apr_time_t md_job_log_get_time_of_latest(md_job_t *job, const char *type) +{ + md_json_t *entry; + const char *s; + + entry = md_job_log_get_latest(job, type); + if (entry) { + s = md_json_gets(entry, MD_KEY_WHEN, NULL); + if (s) return apr_date_parse_rfc(s); + } + return 0; +} + +void md_status_take_stock(md_json_t **pjson, apr_array_header_t *mds, + md_reg_t *reg, apr_pool_t *p) +{ + const md_t *md; + md_job_t *job; + int i, complete, renewing, errored, ready, total; + md_json_t *json; + + json = md_json_create(p); + complete = renewing = errored = ready = total = 0; + for (i = 0; i < mds->nelts; ++i) { + md = APR_ARRAY_IDX(mds, i, const md_t *); + ++total; + switch (md->state) { + case MD_S_COMPLETE: ++complete; /* fall through */ + case MD_S_INCOMPLETE: + if (md_reg_should_renew(reg, md, p)) { + ++renewing; + job = md_reg_job_make(reg, md->name, p); + if (APR_SUCCESS == md_job_load(job)) { + if (job->error_runs > 0 + || (job->last_result && job->last_result->status != APR_SUCCESS)) { + ++errored; + } + else if (job->finished) { + ++ready; + } + } + } + break; + default: ++errored; break; + } + } + md_json_setl(total, json, MD_KEY_TOTAL, NULL); + md_json_setl(complete, json, MD_KEY_COMPLETE, NULL); + md_json_setl(renewing, json, MD_KEY_RENEWING, NULL); + md_json_setl(errored, json, MD_KEY_ERRORED, NULL); + md_json_setl(ready, json, MD_KEY_READY, NULL); + *pjson = json; +} + +typedef struct { + apr_pool_t *p; + md_job_t *job; + md_store_t *store; + md_result_t *last; + apr_time_t last_save; +} md_job_result_ctx; + +static void job_result_update(md_result_t *result, void *data) +{ + md_job_result_ctx *ctx = data; + apr_time_t now; + const char *msg, *sep; + + if (md_result_cmp(ctx->last, result)) { + now = apr_time_now(); + md_result_assign(ctx->last, result); + if (result->activity || result->problem || result->detail) { + msg = sep = ""; + if (result->activity) { + msg = apr_psprintf(result->p, "%s", result->activity); + sep = ": "; + } + if (result->detail) { + msg = apr_psprintf(result->p, "%s%s%s", msg, sep, result->detail); + sep = ", "; + } + if (result->problem) { + msg = apr_psprintf(result->p, "%s%sproblem: %s", msg, sep, result->problem); + sep = " "; + } + md_job_log_append(ctx->job, "progress", NULL, msg); + + if (ctx->store && apr_time_as_msec(now - ctx->last_save) > 500) { + md_job_save(ctx->job, result, ctx->p); + ctx->last_save = now; + } + } + } +} + +static apr_status_t job_result_raise(md_result_t *result, void *data, const char *event, apr_pool_t *p) +{ + md_job_result_ctx *ctx = data; + (void)p; + if (result == ctx->job->observing) { + return md_job_notify(ctx->job, event, result); + } + return APR_SUCCESS; +} + +static void job_result_holler(md_result_t *result, void *data, const char *event, apr_pool_t *p) +{ + md_job_result_ctx *ctx = data; + if (result == ctx->job->observing) { + md_event_holler(event, ctx->job->mdomain, ctx->job, result, p); + } +} + +static void job_observation_start(md_job_t *job, md_result_t *result, md_store_t *store) +{ + md_job_result_ctx *ctx; + + if (job->observing) md_result_on_change(job->observing, NULL, NULL); + job->observing = result; + + ctx = apr_pcalloc(result->p, sizeof(*ctx)); + ctx->p = result->p; + ctx->job = job; + ctx->store = store; + ctx->last = md_result_md_make(result->p, APR_SUCCESS); + md_result_assign(ctx->last, result); + md_result_on_change(result, job_result_update, ctx); + md_result_on_raise(result, job_result_raise, ctx); + md_result_on_holler(result, job_result_holler, ctx); +} + +static void job_observation_end(md_job_t *job) +{ + if (job->observing) md_result_on_change(job->observing, NULL, NULL); + job->observing = NULL; +} + +void md_job_start_run(md_job_t *job, md_result_t *result, md_store_t *store) +{ + job->fatal_error = 0; + job->last_run = apr_time_now(); + job_observation_start(job, result, store); + md_job_log_append(job, "starting", NULL, NULL); +} + +apr_time_t md_job_delay_on_errors(md_job_t *job, int err_count, const char *last_problem) +{ + apr_time_t delay = 0, max_delay = apr_time_from_sec(24*60*60); /* daily */ + unsigned char c; + + if (last_problem && md_acme_problem_is_input_related(last_problem)) { + /* If ACME server reported a problem and that problem indicates that our + * input values, e.g. our configuration, has something wrong, we always + * go to max delay as frequent retries are unlikely to resolve the situation. + * However, we should nevertheless retry daily, bc. it might be that there + * is a bug in the server. Unlikely, but... */ + delay = max_delay; + } + else if (err_count > 0) { + /* back off duration, depending on the errors we encounter in a row */ + delay = job->min_delay << (err_count - 1); + if (delay > max_delay) { + delay = max_delay; + } + } + if (delay > 0) { + /* jitter the delay by +/- 0-50%. + * Background: we see retries of jobs being too regular (e.g. all at midnight), + * possibly cumulating from many installations that restart their Apache at a + * fixed hour. This can contribute to an overload at the CA and a continuation + * of failure. + */ + md_rand_bytes(&c, sizeof(c), job->p); + delay += apr_time_from_sec((apr_time_sec(delay) * (c - 128)) / 256); + } + return delay; +} + +void md_job_end_run(md_job_t *job, md_result_t *result) +{ + if (APR_SUCCESS == result->status) { + job->finished = 1; + job->valid_from = result->ready_at; + job->error_runs = 0; + job->dirty = 1; + md_job_log_append(job, "finished", NULL, NULL); + } + else { + ++job->error_runs; + job->dirty = 1; + job->next_run = apr_time_now() + md_job_delay_on_errors(job, job->error_runs, result->problem); + } + job_observation_end(job); +} + +void md_job_retry_at(md_job_t *job, apr_time_t later) +{ + job->next_run = later; + job->dirty = 1; +} + +apr_status_t md_job_notify(md_job_t *job, const char *reason, md_result_t *result) +{ + apr_status_t rv; + + md_result_set(result, APR_SUCCESS, NULL); + rv = md_event_raise(reason, job->mdomain, job, result, job->p); + job->dirty = 1; + if (APR_SUCCESS == rv && APR_SUCCESS == result->status) { + job->notified = 1; + if (!strcmp("renewed", reason)) { + job->notified_renewed = 1; + } + } + else { + ++job->error_runs; + job->next_run = apr_time_now() + md_job_delay_on_errors(job, job->error_runs, result->problem); + } + return result->status; +} + diff --git a/modules/md/md_status.h b/modules/md/md_status.h new file mode 100644 index 0000000..f4d09bd --- /dev/null +++ b/modules/md/md_status.h @@ -0,0 +1,126 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef md_status_h +#define md_status_h + +struct md_json_t; +struct md_reg_t; +struct md_result_t; +struct md_ocsp_reg_t; + +#include "md_store.h" + +/** + * Get a JSON summary of the MD and its status (certificates, jobs, etc.). + */ +apr_status_t md_status_get_md_json(struct md_json_t **pjson, const md_t *md, + struct md_reg_t *reg, struct md_ocsp_reg_t *ocsp, + apr_pool_t *p); + +/** + * Get a JSON summary of all MDs and their status. + */ +apr_status_t md_status_get_json(struct md_json_t **pjson, apr_array_header_t *mds, + struct md_reg_t *reg, struct md_ocsp_reg_t *ocsp, + apr_pool_t *p); + +/** + * Take stock of all MDs given for a short overview. The JSON returned + * will carry integers for MD_KEY_COMPLETE, MD_KEY_RENEWING, + * MD_KEY_ERRORED, MD_KEY_READY and MD_KEY_TOTAL. + */ +void md_status_take_stock(struct md_json_t **pjson, apr_array_header_t *mds, + struct md_reg_t *reg, apr_pool_t *p); + + +typedef struct md_job_t md_job_t; + +struct md_job_t { + md_store_group_t group;/* group where job is persisted */ + const char *mdomain; /* Name of the MD this job is about */ + md_store_t *store; /* store where it is persisted */ + apr_pool_t *p; + apr_time_t next_run; /* Time this job wants to be processed next */ + apr_time_t last_run; /* Time this job ran last (or 0) */ + struct md_result_t *last_result; /* Result from last run */ + int finished; /* true iff the job finished successfully */ + int notified; /* true iff notifications were handled successfully */ + int notified_renewed; /* true iff a 'renewed' notification was handled successfully */ + apr_time_t valid_from; /* at which time the finished job results become valid, 0 if immediate */ + int error_runs; /* Number of errored runs of an unfinished job */ + int fatal_error; /* a fatal error is remedied by retrying */ + md_json_t *log; /* array of log objects with minimum fields + MD_KEY_WHEN (timestamp) and MD_KEY_TYPE (string) */ + apr_size_t max_log; /* max number of log entries, new ones replace oldest */ + int dirty; + struct md_result_t *observing; + apr_time_t min_delay; /* smallest delay a repeated attempt should have */ +}; + +/** + * Create a new job instance for the given MD name. + * Job load/save will work using the name. + */ +md_job_t *md_job_make(apr_pool_t *p, md_store_t *store, + md_store_group_t group, const char *name, + apr_time_t min_delay); + +void md_job_set_group(md_job_t *job, md_store_group_t group); + +/** + * Update the job from storage in /job->mdomain. + */ +apr_status_t md_job_load(md_job_t *job); + +/** + * Update storage from job in /job->mdomain. + */ +apr_status_t md_job_save(md_job_t *job, struct md_result_t *result, apr_pool_t *p); + +/** + * Append to the job's log. Timestamp is automatically added. + * @param type type of log entry + * @param status status of entry (maybe NULL) + * @param detail description of what happened + */ +void md_job_log_append(md_job_t *job, const char *type, + const char *status, const char *detail); + +/** + * Retrieve the latest log entry of a certain type. + */ +md_json_t *md_job_log_get_latest(md_job_t *job, const char *type); + +/** + * Get the time the latest log entry of the given type happened, or 0 if + * none is found. + */ +apr_time_t md_job_log_get_time_of_latest(md_job_t *job, const char *type); + +void md_job_start_run(md_job_t *job, struct md_result_t *result, md_store_t *store); +void md_job_end_run(md_job_t *job, struct md_result_t *result); +void md_job_retry_at(md_job_t *job, apr_time_t later); + +/** + * Given the number of errors and the last problem encountered, + * recommend a delay for the next attempt of job + */ +apr_time_t md_job_delay_on_errors(md_job_t *job, int err_count, const char *last_problem); + +apr_status_t md_job_notify(md_job_t *job, const char *reason, struct md_result_t *result); + +#endif /* md_status_h */ diff --git a/modules/md/md_store.c b/modules/md/md_store.c index a047ff3..59dbd67 100644 --- a/modules/md/md_store.c +++ b/modules/md/md_store.c @@ -55,22 +55,18 @@ static const char *GROUP_NAME[] = { "staging", "archive", "tmp", + "ocsp", NULL }; -const char *md_store_group_name(int group) +const char *md_store_group_name(unsigned int group) { - if ((size_t)group < sizeof(GROUP_NAME)/sizeof(GROUP_NAME[0])) { + if (group < sizeof(GROUP_NAME)/sizeof(GROUP_NAME[0])) { return GROUP_NAME[group]; } return "UNKNOWN"; } -void md_store_destroy(md_store_t *store) -{ - if (store->destroy) store->destroy(store); -} - apr_status_t md_store_load(md_store_t *store, md_store_group_t group, const char *name, const char *aspect, md_store_vtype_t vtype, void **pdata, @@ -145,6 +141,33 @@ int md_store_is_newer(md_store_t *store, md_store_group_t group1, md_store_group return store->is_newer(store, group1, group2, name, aspect, p); } +apr_time_t md_store_get_modified(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, apr_pool_t *p) +{ + return store->get_modified(store, group, name, aspect, p); +} + +apr_status_t md_store_iter_names(md_store_inspect *inspect, void *baton, md_store_t *store, + apr_pool_t *p, md_store_group_t group, const char *pattern) +{ + return store->iterate_names(inspect, baton, store, p, group, pattern); +} + +apr_status_t md_store_remove_not_modified_since(md_store_t *store, apr_pool_t *p, + apr_time_t modified, + md_store_group_t group, + const char *name, + const char *aspect) +{ + return store->remove_nms(store, p, modified, group, name, aspect); +} + +apr_status_t md_store_rename(md_store_t *store, apr_pool_t *p, + md_store_group_t group, const char *name, const char *to) +{ + return store->rename(store, p, group, name, to); +} + /**************************************************************************************************/ /* convenience */ @@ -231,55 +254,89 @@ typedef struct { apr_array_header_t *mds; } md_load_ctx; -apr_status_t md_pkey_load(md_store_t *store, md_store_group_t group, const char *name, - md_pkey_t **ppkey, apr_pool_t *p) -{ - return md_store_load(store, group, name, MD_FN_PRIVKEY, MD_SV_PKEY, (void**)ppkey, p); -} - -apr_status_t md_pkey_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name, - struct md_pkey_t *pkey, int create) +static const char *pk_filename(const char *keyname, const char *base, apr_pool_t *p) { - return md_store_save(store, p, group, name, MD_FN_PRIVKEY, MD_SV_PKEY, pkey, create); + char *s, *t; + /* We also run on various filesystems with difference upper/lower preserve matching + * rules. Normalize the names we use, since private key specifications are basically + * user input. */ + s = (keyname && apr_strnatcasecmp("rsa", keyname))? + apr_pstrcat(p, base, ".", keyname, ".pem", NULL) + : apr_pstrcat(p, base, ".pem", NULL); + for (t = s; *t; t++ ) + *t = (char)apr_tolower(*t); + return s; } -apr_status_t md_cert_load(md_store_t *store, md_store_group_t group, const char *name, - struct md_cert_t **pcert, apr_pool_t *p) +const char *md_pkey_filename(md_pkey_spec_t *spec, apr_pool_t *p) { - return md_store_load(store, group, name, MD_FN_CERT, MD_SV_CERT, (void**)pcert, p); + return pk_filename(md_pkey_spec_name(spec), "privkey", p); } -apr_status_t md_cert_save(md_store_t *store, apr_pool_t *p, - md_store_group_t group, const char *name, - struct md_cert_t *cert, int create) +const char *md_chain_filename(md_pkey_spec_t *spec, apr_pool_t *p) { - return md_store_save(store, p, group, name, MD_FN_CERT, MD_SV_CERT, cert, create); + return pk_filename(md_pkey_spec_name(spec), "pubcert", p); } -apr_status_t md_chain_load(md_store_t *store, md_store_group_t group, const char *name, - struct apr_array_header_t **pchain, apr_pool_t *p) +apr_status_t md_pkey_load(md_store_t *store, md_store_group_t group, const char *name, + md_pkey_spec_t *spec, md_pkey_t **ppkey, apr_pool_t *p) { - return md_store_load(store, group, name, MD_FN_CHAIN, MD_SV_CHAIN, (void**)pchain, p); + const char *fname = md_pkey_filename(spec, p); + return md_store_load(store, group, name, fname, MD_SV_PKEY, (void**)ppkey, p); } -apr_status_t md_chain_save(md_store_t *store, apr_pool_t *p, - md_store_group_t group, const char *name, - struct apr_array_header_t *chain, int create) +apr_status_t md_pkey_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name, + md_pkey_spec_t *spec, struct md_pkey_t *pkey, int create) { - return md_store_save(store, p, group, name, MD_FN_CHAIN, MD_SV_CHAIN, chain, create); + const char *fname = md_pkey_filename(spec, p); + return md_store_save(store, p, group, name, fname, MD_SV_PKEY, pkey, create); } apr_status_t md_pubcert_load(md_store_t *store, md_store_group_t group, const char *name, - struct apr_array_header_t **ppubcert, apr_pool_t *p) + md_pkey_spec_t *spec, struct apr_array_header_t **ppubcert, + apr_pool_t *p) { - return md_store_load(store, group, name, MD_FN_PUBCERT, MD_SV_CHAIN, (void**)ppubcert, p); + const char *fname = md_chain_filename(spec, p); + return md_store_load(store, group, name, fname, MD_SV_CHAIN, (void**)ppubcert, p); } apr_status_t md_pubcert_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name, - struct apr_array_header_t *pubcert, int create) + md_pkey_spec_t *spec, struct apr_array_header_t *pubcert, int create) +{ + const char *fname = md_chain_filename(spec, p); + return md_store_save(store, p, group, name, fname, MD_SV_CHAIN, pubcert, create); +} + +apr_status_t md_creds_load(md_store_t *store, md_store_group_t group, const char *name, + md_pkey_spec_t *spec, md_credentials_t **pcreds, apr_pool_t *p) +{ + md_credentials_t *creds = apr_pcalloc(p, sizeof(*creds)); + apr_status_t rv; + + creds->spec = spec; + if (APR_SUCCESS != (rv = md_pkey_load(store, group, name, spec, &creds->pkey, p))) { + goto leave; + } + /* chain is optional */ + rv = md_pubcert_load(store, group, name, spec, &creds->chain, p); + if (APR_STATUS_IS_ENOENT(rv)) rv = APR_SUCCESS; +leave: + *pcreds = (APR_SUCCESS == rv)? creds : NULL; + return rv; +} + +apr_status_t md_creds_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, + const char *name, md_credentials_t *creds, int create) { - return md_store_save(store, p, group, name, MD_FN_PUBCERT, MD_SV_CHAIN, pubcert, create); + apr_status_t rv; + + if (APR_SUCCESS != (rv = md_pkey_save(store, p, group, name, creds->spec, creds->pkey, create))) { + goto leave; + } + rv = md_pubcert_save(store, p, group, name, creds->spec, creds->chain, create); +leave: + return rv; } typedef struct { @@ -317,3 +374,12 @@ apr_status_t md_store_md_iter(md_store_md_inspect *inspect, void *baton, md_stor return md_store_iter(insp_md, &ctx, store, p, group, pattern, MD_FN_MD, MD_SV_JSON); } +apr_status_t md_store_lock_global(md_store_t *store, apr_pool_t *p, apr_time_t max_wait) +{ + return store->lock_global(store, p, max_wait); +} + +void md_store_unlock_global(md_store_t *store, apr_pool_t *p) +{ + store->unlock_global(store, p); +} diff --git a/modules/md/md_store.h b/modules/md/md_store.h index 5825189..73c840f 100644 --- a/modules/md/md_store.h +++ b/modules/md/md_store.h @@ -20,101 +20,208 @@ struct apr_array_header_t; struct md_cert_t; struct md_pkey_t; +struct md_pkey_spec_t; -typedef struct md_store_t md_store_t; - -typedef void md_store_destroy_cb(md_store_t *store); - -const char *md_store_group_name(int group); - - -typedef apr_status_t md_store_load_cb(md_store_t *store, md_store_group_t group, - const char *name, const char *aspect, - md_store_vtype_t vtype, void **pvalue, - apr_pool_t *p); -typedef apr_status_t md_store_save_cb(md_store_t *store, apr_pool_t *p, md_store_group_t group, - const char *name, const char *aspect, - md_store_vtype_t vtype, void *value, - int create); -typedef apr_status_t md_store_remove_cb(md_store_t *store, md_store_group_t group, - const char *name, const char *aspect, - apr_pool_t *p, int force); -typedef apr_status_t md_store_purge_cb(md_store_t *store, apr_pool_t *p, md_store_group_t group, - const char *name); +const char *md_store_group_name(unsigned int group); -typedef int md_store_inspect(void *baton, const char *name, const char *aspect, - md_store_vtype_t vtype, void *value, apr_pool_t *ptemp); - -typedef apr_status_t md_store_iter_cb(md_store_inspect *inspect, void *baton, md_store_t *store, - apr_pool_t *p, md_store_group_t group, const char *pattern, - const char *aspect, md_store_vtype_t vtype); - -typedef apr_status_t md_store_move_cb(md_store_t *store, apr_pool_t *p, md_store_group_t from, - md_store_group_t to, const char *name, int archive); - -typedef apr_status_t md_store_get_fname_cb(const char **pfname, - md_store_t *store, md_store_group_t group, - const char *name, const char *aspect, - apr_pool_t *p); - -typedef int md_store_is_newer_cb(md_store_t *store, - md_store_group_t group1, md_store_group_t group2, - const char *name, const char *aspect, apr_pool_t *p); +typedef struct md_store_t md_store_t; -struct md_store_t { - md_store_destroy_cb *destroy; +/** + * A store for domain related data. + * + * The Key for a piece of data is the set of 3 items + * + + + * + * Examples: + * "domains" + "greenbytes.de" + "pubcert.pem" + * "ocsp" + "greenbytes.de" + "ocsp-XXXXX.json" + * + * Storage groups are pre-defined, domain and aspect names can be freely chosen. + * + * Groups reflect use cases and come with security restrictions. The groups + * DOMAINS, ARCHIVE and NONE are only accessible during the startup + * phase of httpd. + * + * Private key are stored unencrypted only in restricted groups. Meaning that certificate + * keys in group DOMAINS are not encrypted, but only readable at httpd start/reload. + * Keys in unrestricted groups are encrypted using a pass phrase generated once and stored + * in NONE. + */ - md_store_save_cb *save; - md_store_load_cb *load; - md_store_remove_cb *remove; - md_store_move_cb *move; - md_store_iter_cb *iterate; - md_store_purge_cb *purge; - md_store_get_fname_cb *get_fname; - md_store_is_newer_cb *is_newer; -}; +/** Value types handled by a store */ +typedef enum { + MD_SV_TEXT, /* plain text, value is (char*) */ + MD_SV_JSON, /* JSON serialization, value is (md_json_t*) */ + MD_SV_CERT, /* PEM x509 certificate, value is (md_cert_t*) */ + MD_SV_PKEY, /* PEM private key, value is (md_pkey_t*) */ + MD_SV_CHAIN, /* list of PEM x509 certificates, value is + (apr_array_header_t*) of (md_cert*) */ +} md_store_vtype_t; + +/** Store storage groups */ +typedef enum { + MD_SG_NONE, /* top level of store, name MUST be NULL in calls */ + MD_SG_ACCOUNTS, /* ACME accounts */ + MD_SG_CHALLENGES, /* challenge response data for a domain */ + MD_SG_DOMAINS, /* live certificates and settings for a domain */ + MD_SG_STAGING, /* staged set of certificate and settings, maybe incomplete */ + MD_SG_ARCHIVE, /* Archived live sets of a domain */ + MD_SG_TMP, /* temporary domain storage */ + MD_SG_OCSP, /* OCSP stapling related domain data */ + MD_SG_COUNT, /* number of storage groups, used in setups */ +} md_store_group_t; + +#define MD_FN_MD "md.json" +#define MD_FN_JOB "job.json" +#define MD_FN_HTTPD_JSON "httpd.json" + +/* The corresponding names for current cert & key files are constructed + * in md_store and md_crypt. + */ -void md_store_destroy(md_store_t *store); +/* These three legacy filenames are only used in md_store_fs to + * upgrade 1.0 directories. They should not be used for any other + * purpose. + */ +#define MD_FN_PRIVKEY "privkey.pem" +#define MD_FN_PUBCERT "pubcert.pem" +#define MD_FN_CERT "cert.pem" +/** + * Load the JSON value at key "group/name/aspect", allocated from pool p. + * @return APR_ENOENT if there is no such value + */ apr_status_t md_store_load_json(md_store_t *store, md_store_group_t group, const char *name, const char *aspect, struct md_json_t **pdata, apr_pool_t *p); +/** + * Save the JSON value at key "group/name/aspect". If create != 0, fail if there + * already is a value for this key. + */ apr_status_t md_store_save_json(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name, const char *aspect, struct md_json_t *data, int create); - +/** + * Load the value of type at key "group/name/aspect", allocated from pool p. Usually, the + * type is expected to be the same as used in saving the value. Some conversions will work, + * others will fail the format. + * @return APR_ENOENT if there is no such value + */ apr_status_t md_store_load(md_store_t *store, md_store_group_t group, const char *name, const char *aspect, md_store_vtype_t vtype, void **pdata, apr_pool_t *p); +/** + * Save the JSON value at key "group/name/aspect". If create != 0, fail if there + * already is a value for this key. The provided data MUST be of the correct type. + */ apr_status_t md_store_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name, const char *aspect, md_store_vtype_t vtype, void *data, int create); + +/** + * Remove the value stored at key "group/name/aspect". Unless force != 0, a missing + * value will cause the call to fail with APR_ENOENT. + */ apr_status_t md_store_remove(md_store_t *store, md_store_group_t group, const char *name, const char *aspect, apr_pool_t *p, int force); +/** + * Remove everything matching key "group/name". + */ apr_status_t md_store_purge(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name); +/** + * Remove all items matching the name/aspect patterns that have not been + * modified since the given timestamp. + */ +apr_status_t md_store_remove_not_modified_since(md_store_t *store, apr_pool_t *p, + apr_time_t modified, + md_store_group_t group, + const char *name, + const char *aspect); + +/** + * inspect callback function. Invoked for each matched value. Values allocated from + * ptemp may disappear any time after the call returned. If this function returns + * 0, the iteration is aborted. + */ +typedef int md_store_inspect(void *baton, const char *name, const char *aspect, + md_store_vtype_t vtype, void *value, apr_pool_t *ptemp); +/** + * Iterator over all existing values matching the name pattern. Patterns are evaluated + * using apr_fnmatch() without flags. + */ apr_status_t md_store_iter(md_store_inspect *inspect, void *baton, md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *pattern, const char *aspect, md_store_vtype_t vtype); +/** + * Move everything matching key "from/name" from one group to another. If archive != 0, + * move any existing "to/name" into a new "archive/new_name" location. + */ apr_status_t md_store_move(md_store_t *store, apr_pool_t *p, md_store_group_t from, md_store_group_t to, const char *name, int archive); +/** + * Rename a group member. + */ +apr_status_t md_store_rename(md_store_t *store, apr_pool_t *p, + md_store_group_t group, const char *name, const char *to); + +/** + * Get the filename of an item stored in "group/name/aspect". The item does + * not have to exist. + */ apr_status_t md_store_get_fname(const char **pfname, md_store_t *store, md_store_group_t group, const char *name, const char *aspect, apr_pool_t *p); +/** + * Make a compare on the modification time of "group1/name/aspect" vs. "group2/name/aspect". + */ int md_store_is_newer(md_store_t *store, md_store_group_t group1, md_store_group_t group2, const char *name, const char *aspect, apr_pool_t *p); +/** + * Iterate over all names that exist in a group, e.g. there are items matching + * "group/pattern". The inspect function is called with the name and NULL aspect + * and value. + */ +apr_status_t md_store_iter_names(md_store_inspect *inspect, void *baton, md_store_t *store, + apr_pool_t *p, md_store_group_t group, const char *pattern); + +/** + * Get the modification time of the item store under "group/name/aspect". + * @return modification time or 0 if the item does not exist. + */ +apr_time_t md_store_get_modified(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, apr_pool_t *p); + +/** + * Acquire a cooperative, global lock on store modifications. + + * This will only prevent other children/processes/cluster nodes from + * doing the same and does not protect individual store functions from + * being called without it. + * @param store the store + * @param p memory pool to use + * @param max_wait maximum time to wait in order to acquire + * @return APR_SUCCESS when lock was obtained + */ +apr_status_t md_store_lock_global(md_store_t *store, apr_pool_t *p, apr_time_t max_wait); + +/** + * Realease the global store lock. Will do nothing if there is no lock. + */ +void md_store_unlock_global(md_store_t *store, apr_pool_t *p); + /**************************************************************************************************/ /* Storage handling utils */ @@ -134,24 +241,103 @@ apr_status_t md_store_md_iter(md_store_md_inspect *inspect, void *baton, md_stor apr_pool_t *p, md_store_group_t group, const char *pattern); +const char *md_pkey_filename(struct md_pkey_spec_t *spec, apr_pool_t *p); +const char *md_chain_filename(struct md_pkey_spec_t *spec, apr_pool_t *p); + apr_status_t md_pkey_load(md_store_t *store, md_store_group_t group, - const char *name, struct md_pkey_t **ppkey, apr_pool_t *p); + const char *name, struct md_pkey_spec_t *spec, + struct md_pkey_t **ppkey, apr_pool_t *p); apr_status_t md_pkey_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, - const char *name, struct md_pkey_t *pkey, int create); -apr_status_t md_cert_load(md_store_t *store, md_store_group_t group, - const char *name, struct md_cert_t **pcert, apr_pool_t *p); -apr_status_t md_cert_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, - const char *name, struct md_cert_t *cert, int create); -apr_status_t md_chain_load(md_store_t *store, md_store_group_t group, - const char *name, struct apr_array_header_t **pchain, apr_pool_t *p); -apr_status_t md_chain_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, - const char *name, struct apr_array_header_t *chain, int create); + const char *name, struct md_pkey_spec_t *spec, + struct md_pkey_t *pkey, int create); apr_status_t md_pubcert_load(md_store_t *store, md_store_group_t group, const char *name, - struct apr_array_header_t **ppubcert, apr_pool_t *p); + struct md_pkey_spec_t *spec, struct apr_array_header_t **ppubcert, + apr_pool_t *p); apr_status_t md_pubcert_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name, + struct md_pkey_spec_t *spec, struct apr_array_header_t *pubcert, int create); +/**************************************************************************************************/ +/* X509 complete credentials */ + +typedef struct md_credentials_t md_credentials_t; +struct md_credentials_t { + struct md_pkey_spec_t *spec; + struct md_pkey_t *pkey; + struct apr_array_header_t *chain; +}; + +apr_status_t md_creds_load(md_store_t *store, md_store_group_t group, const char *name, + struct md_pkey_spec_t *spec, md_credentials_t **pcreds, apr_pool_t *p); +apr_status_t md_creds_save(md_store_t *store, apr_pool_t *p, md_store_group_t group, + const char *name, md_credentials_t *creds, int create); + +/**************************************************************************************************/ +/* implementation interface */ + +typedef apr_status_t md_store_load_cb(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, + md_store_vtype_t vtype, void **pvalue, + apr_pool_t *p); +typedef apr_status_t md_store_save_cb(md_store_t *store, apr_pool_t *p, md_store_group_t group, + const char *name, const char *aspect, + md_store_vtype_t vtype, void *value, + int create); +typedef apr_status_t md_store_remove_cb(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, + apr_pool_t *p, int force); +typedef apr_status_t md_store_purge_cb(md_store_t *store, apr_pool_t *p, md_store_group_t group, + const char *name); + +typedef apr_status_t md_store_iter_cb(md_store_inspect *inspect, void *baton, md_store_t *store, + apr_pool_t *p, md_store_group_t group, const char *pattern, + const char *aspect, md_store_vtype_t vtype); + +typedef apr_status_t md_store_names_iter_cb(md_store_inspect *inspect, void *baton, md_store_t *store, + apr_pool_t *p, md_store_group_t group, const char *pattern); + +typedef apr_status_t md_store_move_cb(md_store_t *store, apr_pool_t *p, md_store_group_t from, + md_store_group_t to, const char *name, int archive); + +typedef apr_status_t md_store_rename_cb(md_store_t *store, apr_pool_t *p, md_store_group_t group, + const char *from, const char *to); + +typedef apr_status_t md_store_get_fname_cb(const char **pfname, + md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, + apr_pool_t *p); + +typedef int md_store_is_newer_cb(md_store_t *store, + md_store_group_t group1, md_store_group_t group2, + const char *name, const char *aspect, apr_pool_t *p); + +typedef apr_time_t md_store_get_modified_cb(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, apr_pool_t *p); + +typedef apr_status_t md_store_remove_nms_cb(md_store_t *store, apr_pool_t *p, + apr_time_t modified, md_store_group_t group, + const char *name, const char *aspect); +typedef apr_status_t md_store_lock_global_cb(md_store_t *store, apr_pool_t *p, apr_time_t max_wait); +typedef void md_store_unlock_global_cb(md_store_t *store, apr_pool_t *p); + +struct md_store_t { + md_store_save_cb *save; + md_store_load_cb *load; + md_store_remove_cb *remove; + md_store_move_cb *move; + md_store_rename_cb *rename; + md_store_iter_cb *iterate; + md_store_names_iter_cb *iterate_names; + md_store_purge_cb *purge; + md_store_get_fname_cb *get_fname; + md_store_is_newer_cb *is_newer; + md_store_get_modified_cb *get_modified; + md_store_remove_nms_cb *remove_nms; + md_store_lock_global_cb *lock_global; + md_store_unlock_global_cb *unlock_global; +}; + #endif /* mod_md_md_store_h */ diff --git a/modules/md/md_store_fs.c b/modules/md/md_store_fs.c index f399cea..35c24b4 100644 --- a/modules/md/md_store_fs.c +++ b/modules/md/md_store_fs.c @@ -39,6 +39,7 @@ /* file system based implementation of md_store_t */ #define MD_STORE_VERSION 3 +#define MD_FS_LOCK_NAME "store.lock" typedef struct { apr_fileperms_t dir; @@ -55,12 +56,13 @@ struct md_store_fs_t { md_store_fs_cb *event_cb; void *event_baton; - const unsigned char *key; - apr_size_t key_len; + md_data_t key; int plain_pkey[MD_SG_COUNT]; int port_80; int port_443; + + apr_file_t *global_lock; }; #define FS_STORE(store) (md_store_fs_t*)(((char*)store)-offsetof(md_store_fs_t, s)) @@ -78,12 +80,19 @@ static apr_status_t fs_remove(md_store_t *store, md_store_group_t group, apr_pool_t *p, int force); static apr_status_t fs_purge(md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *name); +static apr_status_t fs_remove_nms(md_store_t *store, apr_pool_t *p, + apr_time_t modified, md_store_group_t group, + const char *name, const char *aspect); static apr_status_t fs_move(md_store_t *store, apr_pool_t *p, md_store_group_t from, md_store_group_t to, const char *name, int archive); +static apr_status_t fs_rename(md_store_t *store, apr_pool_t *p, + md_store_group_t group, const char *from, const char *to); static apr_status_t fs_iterate(md_store_inspect *inspect, void *baton, md_store_t *store, apr_pool_t *p, md_store_group_t group, const char *pattern, const char *aspect, md_store_vtype_t vtype); +static apr_status_t fs_iterate_names(md_store_inspect *inspect, void *baton, md_store_t *store, + apr_pool_t *p, md_store_group_t group, const char *pattern); static apr_status_t fs_get_fname(const char **pfname, md_store_t *store, md_store_group_t group, @@ -92,23 +101,27 @@ static apr_status_t fs_get_fname(const char **pfname, static int fs_is_newer(md_store_t *store, md_store_group_t group1, md_store_group_t group2, const char *name, const char *aspect, apr_pool_t *p); +static apr_time_t fs_get_modified(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, apr_pool_t *p); + +static apr_status_t fs_lock_global(md_store_t *store, apr_pool_t *p, apr_time_t max_wait); +static void fs_unlock_global(md_store_t *store, apr_pool_t *p); + static apr_status_t init_store_file(md_store_fs_t *s_fs, const char *fname, apr_pool_t *p, apr_pool_t *ptemp) { md_json_t *json = md_json_create(p); const char *key64; - unsigned char *key; apr_status_t rv; md_json_setn(MD_STORE_VERSION, json, MD_KEY_STORE, MD_KEY_VERSION, NULL); - s_fs->key_len = FS_STORE_KLEN; - s_fs->key = key = apr_pcalloc(p, FS_STORE_KLEN); - if (APR_SUCCESS != (rv = md_rand_bytes(key, s_fs->key_len, p))) { + md_data_pinit(&s_fs->key, FS_STORE_KLEN, p); + if (APR_SUCCESS != (rv = md_rand_bytes((unsigned char*)s_fs->key.data, s_fs->key.len, p))) { return rv; } - key64 = md_util_base64url_encode((char *)key, s_fs->key_len, ptemp); + key64 = md_util_base64url_encode(&s_fs->key, ptemp); md_json_sets(key64, json, MD_KEY_KEY, NULL); rv = md_json_fcreatex(json, ptemp, MD_JSON_FMT_INDENT, fname, MD_FPROT_F_UONLY); memset((char*)key64, 0, strlen(key64)); @@ -122,13 +135,12 @@ static apr_status_t rename_pkey(void *baton, apr_pool_t *p, apr_pool_t *ptemp, { const char *from, *to; apr_status_t rv = APR_SUCCESS; - MD_CHK_VARS; (void)baton; (void)ftype; if ( MD_OK(md_util_path_merge(&from, ptemp, dir, name, NULL)) && MD_OK(md_util_path_merge(&to, ptemp, dir, MD_FN_PRIVKEY, NULL))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, "renaming %s/%s to %s", + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, p, "renaming %s/%s to %s", dir, name, MD_FN_PRIVKEY); return apr_file_rename(from, to, ptemp); } @@ -143,16 +155,15 @@ static apr_status_t mk_pubcert(void *baton, apr_pool_t *p, apr_pool_t *ptemp, apr_array_header_t *chain, *pubcert; const char *fname, *fpubcert; apr_status_t rv = APR_SUCCESS; - MD_CHK_VARS; (void)baton; (void)ftype; (void)p; if ( MD_OK(md_util_path_merge(&fpubcert, ptemp, dir, MD_FN_PUBCERT, NULL)) - && MD_IS_ERR(md_chain_fload(&pubcert, ptemp, fpubcert), ENOENT) + && APR_STATUS_IS_ENOENT(rv = md_chain_fload(&pubcert, ptemp, fpubcert)) && MD_OK(md_util_path_merge(&fname, ptemp, dir, name, NULL)) && MD_OK(md_cert_fload(&cert, ptemp, fname)) - && MD_OK(md_util_path_merge(&fname, ptemp, dir, MD_FN_CHAIN, NULL))) { + && MD_OK(md_util_path_merge(&fname, ptemp, dir, "chain.pem", NULL))) { rv = md_chain_fload(&chain, ptemp, fname); if (APR_STATUS_IS_ENOENT(rv)) { @@ -193,10 +204,9 @@ static apr_status_t read_store_file(md_store_fs_t *s_fs, const char *fname, apr_pool_t *p, apr_pool_t *ptemp) { md_json_t *json; - const char *key64, *key; + const char *key64; apr_status_t rv; double store_version; - MD_CHK_VARS; if (MD_OK(md_json_readf(&json, p, fname))) { store_version = md_json_getn(json, MD_KEY_STORE, MD_KEY_VERSION, NULL); @@ -215,11 +225,10 @@ static apr_status_t read_store_file(md_store_fs_t *s_fs, const char *fname, return APR_EINVAL; } - s_fs->key_len = md_util_base64url_decode(&key, key64, p); - s_fs->key = (const unsigned char*)key; - if (s_fs->key_len != FS_STORE_KLEN) { + md_util_base64url_decode(&s_fs->key, key64, p); + if (s_fs->key.len != FS_STORE_KLEN) { md_log_perror(MD_LOG_MARK, MD_LOG_ERR, 0, p, "key length unexpected: %" APR_SIZE_T_FMT, - s_fs->key_len); + s_fs->key.len); return APR_EINVAL; } @@ -237,8 +246,8 @@ static apr_status_t read_store_file(md_store_fs_t *s_fs, const char *fname, if (APR_SUCCESS == rv) { md_json_setn(MD_STORE_VERSION, json, MD_KEY_STORE, MD_KEY_VERSION, NULL); rv = md_json_freplace(json, ptemp, MD_JSON_FMT_INDENT, fname, MD_FPROT_F_UONLY); - } - md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, "migrated store"); + } + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, "migrated store"); } } return rv; @@ -249,10 +258,14 @@ static apr_status_t setup_store_file(void *baton, apr_pool_t *p, apr_pool_t *pte md_store_fs_t *s_fs = baton; const char *fname; apr_status_t rv; - MD_CHK_VARS; (void)ap; s_fs->plain_pkey[MD_SG_DOMAINS] = 1; + /* Added: the encryption of tls-alpn-01 certificate keys is not a security issue + * for these self-signed, short-lived certificates. Having them unencrypted let's + * use pass around the files insteak of an *SSL implementation dependent PKEY_something. + */ + s_fs->plain_pkey[MD_SG_CHALLENGES] = 1; s_fs->plain_pkey[MD_SG_TMP] = 1; if (!MD_OK(md_util_path_merge(&fname, ptemp, s_fs->base, FS_STORE_JSON, NULL))) { @@ -264,7 +277,7 @@ read: rv = read_store_file(s_fs, fname, p, ptemp); } else if (APR_STATUS_IS_ENOENT(rv) - && MD_IS_ERR(init_store_file(s_fs, fname, p, ptemp), EEXIST)) { + && APR_STATUS_IS_EEXIST(rv = init_store_file(s_fs, fname, p, ptemp))) { goto read; } return rv; @@ -274,7 +287,6 @@ apr_status_t md_store_fs_init(md_store_t **pstore, apr_pool_t *p, const char *pa { md_store_fs_t *s_fs; apr_status_t rv = APR_SUCCESS; - MD_CHK_VARS; s_fs = apr_pcalloc(p, sizeof(*s_fs)); @@ -282,11 +294,17 @@ apr_status_t md_store_fs_init(md_store_t **pstore, apr_pool_t *p, const char *pa s_fs->s.save = fs_save; s_fs->s.remove = fs_remove; s_fs->s.move = fs_move; + s_fs->s.rename = fs_rename; s_fs->s.purge = fs_purge; s_fs->s.iterate = fs_iterate; + s_fs->s.iterate_names = fs_iterate_names; s_fs->s.get_fname = fs_get_fname; s_fs->s.is_newer = fs_is_newer; - + s_fs->s.get_modified = fs_get_modified; + s_fs->s.remove_nms = fs_remove_nms; + s_fs->s.lock_global = fs_lock_global; + s_fs->s.unlock_global = fs_unlock_global; + /* by default, everything is only readable by the current user */ s_fs->def_perms.dir = MD_FPROT_D_UONLY; s_fs->def_perms.file = MD_FPROT_F_UONLY; @@ -300,20 +318,34 @@ apr_status_t md_store_fs_init(md_store_t **pstore, apr_pool_t *p, const char *pa /* challenges dir and files are readable by all, no secrets involved */ s_fs->group_perms[MD_SG_CHALLENGES].dir = MD_FPROT_D_UALL_WREAD; s_fs->group_perms[MD_SG_CHALLENGES].file = MD_FPROT_F_UALL_WREAD; + /* OCSP data is readable by all, no secrets involved */ + s_fs->group_perms[MD_SG_OCSP].dir = MD_FPROT_D_UALL_WREAD; + s_fs->group_perms[MD_SG_OCSP].file = MD_FPROT_F_UALL_WREAD; s_fs->base = apr_pstrdup(p, path); - - if (MD_IS_ERR(md_util_is_dir(s_fs->base, p), ENOENT) - && MD_OK(apr_dir_make_recursive(s_fs->base, s_fs->def_perms.dir, p))) { + + rv = md_util_is_dir(s_fs->base, p); + if (APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, rv, p, + "store directory does not exist, creating %s", s_fs->base); + rv = apr_dir_make_recursive(s_fs->base, s_fs->def_perms.dir, p); + if (APR_SUCCESS != rv) goto cleanup; rv = apr_file_perms_set(s_fs->base, MD_FPROT_D_UALL_WREAD); if (APR_STATUS_IS_ENOTIMPL(rv)) { rv = APR_SUCCESS; } + if (APR_SUCCESS != rv) goto cleanup; } - - if ((APR_SUCCESS != rv) || !MD_OK(md_util_pool_vdo(setup_store_file, s_fs, p, NULL))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "init fs store at %s", path); + else if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p, + "not a plain directory, maybe a symlink? %s", s_fs->base); + } + + rv = md_util_pool_vdo(setup_store_file, s_fs, p, NULL); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "init fs store at %s", s_fs->base); } +cleanup: *pstore = (rv == APR_SUCCESS)? &(s_fs->s) : NULL; return rv; } @@ -394,8 +426,8 @@ static void get_pass(const char **ppass, apr_size_t *plen, *plen = 0; } else { - *ppass = (const char *)s_fs->key; - *plen = s_fs->key_len; + *ppass = (const char *)s_fs->key.data; + *plen = s_fs->key.len; } } @@ -446,7 +478,6 @@ static apr_status_t pfs_load(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l md_store_group_t group; void **pvalue; apr_status_t rv; - MD_CHK_VARS; group = (md_store_group_t)va_arg(ap, int); name = va_arg(ap, const char *); @@ -460,7 +491,7 @@ static apr_status_t pfs_load(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l return rv; } -static apr_status_t dispatch(md_store_fs_t *s_fs, md_store_fs_ev_t ev, int group, +static apr_status_t dispatch(md_store_fs_t *s_fs, md_store_fs_ev_t ev, unsigned int group, const char *fname, apr_filetype_e ftype, apr_pool_t *p) { (void)ev; @@ -477,25 +508,31 @@ static apr_status_t mk_group_dir(const char **pdir, md_store_fs_t *s_fs, { const perms_t *perms; apr_status_t rv; - MD_CHK_VARS; perms = gperms(s_fs, group); - if (MD_OK(fs_get_dname(pdir, &s_fs->s, group, name, p)) && (MD_SG_NONE != group)) { - if ( !MD_OK(md_util_is_dir(*pdir, p)) - && MD_OK(apr_dir_make_recursive(*pdir, perms->dir, p))) { - rv = dispatch(s_fs, MD_S_FS_EV_CREATED, group, *pdir, APR_DIR, p); - } - - if (APR_SUCCESS == rv) { - rv = apr_file_perms_set(*pdir, perms->dir); - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, p, "mk_group_dir %s perm set", *pdir); - if (APR_STATUS_IS_ENOTIMPL(rv)) { - rv = APR_SUCCESS; - } - } + *pdir = NULL; + rv = fs_get_dname(pdir, &s_fs->s, group, name, p); + if ((APR_SUCCESS != rv) || (MD_SG_NONE == group)) goto cleanup; + + rv = md_util_is_dir(*pdir, p); + if (APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, "not a directory, creating %s", *pdir); + rv = apr_dir_make_recursive(*pdir, perms->dir, p); + if (APR_SUCCESS != rv) goto cleanup; + dispatch(s_fs, MD_S_FS_EV_CREATED, group, *pdir, APR_DIR, p); + } + + rv = apr_file_perms_set(*pdir, perms->dir); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, rv, p, "mk_group_dir %s perm set", *pdir); + if (APR_STATUS_IS_ENOTIMPL(rv)) { + rv = APR_SUCCESS; + } +cleanup: + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "mk_group_dir %d %s", + group, (*pdir? *pdir : (name? name : "(null)"))); } - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, p, "mk_group_dir %d %s", group, name); return rv; } @@ -507,7 +544,6 @@ static apr_status_t pfs_is_newer(void *baton, apr_pool_t *p, apr_pool_t *ptemp, apr_finfo_t inf1, inf2; int *pnewer; apr_status_t rv; - MD_CHK_VARS; (void)p; group1 = (md_store_group_t)va_arg(ap, int); @@ -527,7 +563,6 @@ static apr_status_t pfs_is_newer(void *baton, apr_pool_t *p, apr_pool_t *ptemp, return rv; } - static int fs_is_newer(md_store_t *store, md_store_group_t group1, md_store_group_t group2, const char *name, const char *aspect, apr_pool_t *p) { @@ -542,6 +577,44 @@ static int fs_is_newer(md_store_t *store, md_store_group_t group1, md_store_grou return 0; } +static apr_status_t pfs_get_modified(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +{ + md_store_fs_t *s_fs = baton; + const char *fname, *name, *aspect; + md_store_group_t group; + apr_finfo_t inf; + apr_time_t *pmtime; + apr_status_t rv; + + (void)p; + group = (md_store_group_t)va_arg(ap, int); + name = va_arg(ap, const char*); + aspect = va_arg(ap, const char*); + pmtime = va_arg(ap, apr_time_t*); + + *pmtime = 0; + if ( MD_OK(fs_get_fname(&fname, &s_fs->s, group, name, aspect, ptemp)) + && MD_OK(apr_stat(&inf, fname, APR_FINFO_MTIME, ptemp))) { + *pmtime = inf.mtime; + } + + return rv; +} + +static apr_time_t fs_get_modified(md_store_t *store, md_store_group_t group, + const char *name, const char *aspect, apr_pool_t *p) +{ + md_store_fs_t *s_fs = FS_STORE(store); + apr_time_t mtime; + apr_status_t rv; + + rv = md_util_pool_vdo(pfs_get_modified, s_fs, p, group, name, aspect, &mtime, NULL); + if (APR_SUCCESS == rv) { + return mtime; + } + return 0; +} + static apr_status_t pfs_save(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) { md_store_fs_t *s_fs = baton; @@ -554,7 +627,6 @@ static apr_status_t pfs_save(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l const perms_t *perms; const char *pass; apr_size_t pass_len; - MD_CHK_VARS; group = (md_store_group_t)va_arg(ap, int); name = va_arg(ap, const char*); @@ -569,7 +641,7 @@ static apr_status_t pfs_save(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l && MD_OK(mk_group_dir(&dir, s_fs, group, name, p)) && MD_OK(md_util_path_merge(&fpath, ptemp, dir, aspect, NULL))) { - md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, ptemp, "storing in %s", fpath); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, 0, ptemp, "storing in %s", fpath); switch (vtype) { case MD_SV_TEXT: rv = (create? md_text_fcreatex(fpath, perms->file, p, value) @@ -612,7 +684,6 @@ static apr_status_t pfs_remove(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va int force; apr_finfo_t info; md_store_group_t group; - MD_CHK_VARS; (void)p; group = (md_store_group_t)va_arg(ap, int); @@ -624,7 +695,7 @@ static apr_status_t pfs_remove(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va if ( MD_OK(md_util_path_merge(&dir, ptemp, s_fs->base, groupname, name, NULL)) && MD_OK(md_util_path_merge(&fpath, ptemp, dir, aspect, NULL))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, ptemp, "start remove of md %s/%s/%s", + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, ptemp, "start remove of md %s/%s/%s", groupname, name, aspect); if (!MD_OK(apr_stat(&info, dir, APR_FINFO_TYPE, ptemp))) { @@ -673,7 +744,6 @@ static apr_status_t pfs_purge(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_ const char *dir, *name, *groupname; md_store_group_t group; apr_status_t rv; - MD_CHK_VARS; (void)p; group = (md_store_group_t)va_arg(ap, int); @@ -685,7 +755,9 @@ static apr_status_t pfs_purge(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_ /* Remove all files in dir, there should be no sub-dirs */ rv = md_util_rm_recursive(dir, ptemp, 1); } - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "purge %s/%s (%s)", groupname, name, dir); + if (!APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE2, rv, ptemp, "purge %s/%s (%s)", groupname, name, dir); + } return APR_SUCCESS; } @@ -706,7 +778,9 @@ typedef struct { const char *aspect; md_store_vtype_t vtype; md_store_inspect *inspect; + const char *dirname; void *baton; + apr_time_t ts; } inspect_ctx; static apr_status_t insp(void *baton, apr_pool_t *p, apr_pool_t *ptemp, @@ -716,15 +790,38 @@ static apr_status_t insp(void *baton, apr_pool_t *p, apr_pool_t *ptemp, apr_status_t rv; void *value; const char *fpath; - MD_CHK_VARS; (void)ftype; md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, ptemp, "inspecting value at: %s/%s", dir, name); - if ( MD_OK(md_util_path_merge(&fpath, ptemp, dir, name, NULL)) - && MD_OK(fs_fload(&value, ctx->s_fs, fpath, ctx->group, ctx->vtype, p, ptemp)) - && !ctx->inspect(ctx->baton, name, ctx->aspect, ctx->vtype, value, ptemp)) { - return APR_EOF; - } + if (APR_SUCCESS == (rv = md_util_path_merge(&fpath, ptemp, dir, name, NULL))) { + rv = fs_fload(&value, ctx->s_fs, fpath, ctx->group, ctx->vtype, p, ptemp); + if (APR_SUCCESS == rv + && !ctx->inspect(ctx->baton, ctx->dirname, name, ctx->vtype, value, p)) { + return APR_EOF; + } + else if (APR_STATUS_IS_ENOENT(rv)) { + rv = APR_SUCCESS; + } + } + return rv; +} + +static apr_status_t insp_dir(void *baton, apr_pool_t *p, apr_pool_t *ptemp, + const char *dir, const char *name, apr_filetype_e ftype) +{ + inspect_ctx *ctx = baton; + apr_status_t rv; + const char *fpath; + + (void)ftype; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, ptemp, "inspecting dir at: %s/%s", dir, name); + if (MD_OK(md_util_path_merge(&fpath, p, dir, name, NULL))) { + ctx->dirname = name; + rv = md_util_files_do(insp, ctx, p, fpath, ctx->aspect, NULL); + if (APR_STATUS_IS_ENOENT(rv)) { + rv = APR_SUCCESS; + } + } return rv; } @@ -745,7 +842,97 @@ static apr_status_t fs_iterate(md_store_inspect *inspect, void *baton, md_store_ ctx.baton = baton; groupname = md_store_group_name(group); - rv = md_util_files_do(insp, &ctx, p, ctx.s_fs->base, groupname, ctx.pattern, aspect, NULL); + rv = md_util_files_do(insp_dir, &ctx, p, ctx.s_fs->base, groupname, pattern, NULL); + + return rv; +} + +static apr_status_t insp_name(void *baton, apr_pool_t *p, apr_pool_t *ptemp, + const char *dir, const char *name, apr_filetype_e ftype) +{ + inspect_ctx *ctx = baton; + + (void)ftype; + (void)p; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, ptemp, "inspecting name at: %s/%s", dir, name); + return ctx->inspect(ctx->baton, dir, name, 0, NULL, ptemp); +} + +static apr_status_t fs_iterate_names(md_store_inspect *inspect, void *baton, md_store_t *store, + apr_pool_t *p, md_store_group_t group, const char *pattern) +{ + const char *groupname; + apr_status_t rv; + inspect_ctx ctx; + + ctx.s_fs = FS_STORE(store); + ctx.group = group; + ctx.pattern = pattern; + ctx.inspect = inspect; + ctx.baton = baton; + groupname = md_store_group_name(group); + + rv = md_util_files_do(insp_name, &ctx, p, ctx.s_fs->base, groupname, pattern, NULL); + + return rv; +} + +static apr_status_t remove_nms_file(void *baton, apr_pool_t *p, apr_pool_t *ptemp, + const char *dir, const char *name, apr_filetype_e ftype) +{ + inspect_ctx *ctx = baton; + const char *fname; + apr_finfo_t inf; + apr_status_t rv = APR_SUCCESS; + + (void)p; + if (APR_DIR == ftype) goto leave; + if (APR_SUCCESS != (rv = md_util_path_merge(&fname, ptemp, dir, name, NULL))) goto leave; + if (APR_SUCCESS != (rv = apr_stat(&inf, fname, APR_FINFO_MTIME, ptemp))) goto leave; + if (inf.mtime >= ctx->ts) goto leave; + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, ptemp, "remove_nms file: %s/%s", dir, name); + rv = apr_file_remove(fname, ptemp); + +leave: + return rv; +} + +static apr_status_t remove_nms_dir(void *baton, apr_pool_t *p, apr_pool_t *ptemp, + const char *dir, const char *name, apr_filetype_e ftype) +{ + inspect_ctx *ctx = baton; + apr_status_t rv; + const char *fpath; + + (void)ftype; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE3, 0, ptemp, "remove_nms dir at: %s/%s", dir, name); + if (MD_OK(md_util_path_merge(&fpath, p, dir, name, NULL))) { + ctx->dirname = name; + rv = md_util_files_do(remove_nms_file, ctx, p, fpath, ctx->aspect, NULL); + if (APR_STATUS_IS_ENOENT(rv)) { + rv = APR_SUCCESS; + } + } + return rv; +} + +static apr_status_t fs_remove_nms(md_store_t *store, apr_pool_t *p, + apr_time_t modified, md_store_group_t group, + const char *name, const char *aspect) +{ + const char *groupname; + apr_status_t rv; + inspect_ctx ctx; + + ctx.s_fs = FS_STORE(store); + ctx.group = group; + ctx.pattern = name; + ctx.aspect = aspect; + ctx.ts = modified; + groupname = md_store_group_name(group); + + rv = md_util_files_do(remove_nms_dir, &ctx, p, ctx.s_fs->base, groupname, name, NULL); return rv; } @@ -760,7 +947,6 @@ static apr_status_t pfs_move(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l md_store_group_t from, to; int archive; apr_status_t rv; - MD_CHK_VARS; (void)p; from = (md_store_group_t)va_arg(ap, int); @@ -802,7 +988,7 @@ static apr_status_t pfs_move(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l narch_dir = apr_psprintf(ptemp, "%s.%d", arch_dir, n); rv = md_util_is_dir(narch_dir, ptemp); if (APR_STATUS_IS_ENOENT(rv)) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "using archive dir: %s", + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, ptemp, "using archive dir: %s", narch_dir); break; } @@ -817,7 +1003,7 @@ static apr_status_t pfs_move(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l while (n < 1000) { narch_dir = apr_psprintf(ptemp, "%s.%d", arch_dir, n); if (MD_OK(apr_dir_make(narch_dir, MD_FPROT_D_UONLY, ptemp))) { - md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, ptemp, "using archive dir: %s", + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, ptemp, "using archive dir: %s", narch_dir); break; } @@ -844,12 +1030,12 @@ static apr_status_t pfs_move(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l } if (!MD_OK(apr_file_rename(to_dir, narch_dir, ptemp))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", to_dir, narch_dir); goto out; } if (!MD_OK(apr_file_rename(from_dir, to_dir, ptemp))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", from_dir, to_dir); apr_file_rename(narch_dir, to_dir, ptemp); goto out; @@ -860,7 +1046,7 @@ static apr_status_t pfs_move(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_l } else if (APR_STATUS_IS_ENOENT(rv)) { if (APR_SUCCESS != (rv = apr_file_rename(from_dir, to_dir, ptemp))) { - md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", from_dir, to_dir); goto out; } @@ -881,3 +1067,103 @@ static apr_status_t fs_move(md_store_t *store, apr_pool_t *p, md_store_fs_t *s_fs = FS_STORE(store); return md_util_pool_vdo(pfs_move, s_fs, p, from, to, name, archive, NULL); } + +static apr_status_t pfs_rename(void *baton, apr_pool_t *p, apr_pool_t *ptemp, va_list ap) +{ + md_store_fs_t *s_fs = baton; + const char *group_name, *from_dir, *to_dir; + md_store_group_t group; + const char *from, *to; + apr_status_t rv; + + (void)p; + group = (md_store_group_t)va_arg(ap, int); + from = va_arg(ap, const char*); + to = va_arg(ap, const char*); + + group_name = md_store_group_name(group); + if ( !MD_OK(md_util_path_merge(&from_dir, ptemp, s_fs->base, group_name, from, NULL)) + || !MD_OK(md_util_path_merge(&to_dir, ptemp, s_fs->base, group_name, to, NULL))) { + goto out; + } + + if (APR_SUCCESS != (rv = apr_file_rename(from_dir, to_dir, ptemp)) + && !APR_STATUS_IS_ENOENT(rv)) { + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, ptemp, "rename from %s to %s", + from_dir, to_dir); + goto out; + } +out: + return rv; +} + +static apr_status_t fs_rename(md_store_t *store, apr_pool_t *p, + md_store_group_t group, const char *from, const char *to) +{ + md_store_fs_t *s_fs = FS_STORE(store); + return md_util_pool_vdo(pfs_rename, s_fs, p, group, from, to, NULL); +} + +static apr_status_t fs_lock_global(md_store_t *store, apr_pool_t *p, apr_time_t max_wait) +{ + md_store_fs_t *s_fs = FS_STORE(store); + apr_status_t rv; + const char *lpath; + apr_time_t end; + + if (s_fs->global_lock) { + rv = APR_EEXIST; + md_log_perror(MD_LOG_MARK, MD_LOG_ERR, rv, p, "already locked globally"); + goto cleanup; + } + + rv = md_util_path_merge(&lpath, p, s_fs->base, MD_FS_LOCK_NAME, NULL); + if (APR_SUCCESS != rv) goto cleanup; + end = apr_time_now() + max_wait; + + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, 0, p, + "acquire global lock: %s", lpath); + while (apr_time_now() < end) { + rv = apr_file_open(&s_fs->global_lock, lpath, + (APR_FOPEN_WRITE|APR_FOPEN_CREATE), + MD_FPROT_F_UALL_GREAD, p); + if (APR_SUCCESS != rv) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, p, + "unable to create/open lock file: %s", + lpath); + goto next_try; + } + rv = apr_file_lock(s_fs->global_lock, + APR_FLOCK_EXCLUSIVE|APR_FLOCK_NONBLOCK); + if (APR_SUCCESS == rv) { + goto cleanup; + } + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, p, + "unable to obtain lock on: %s", + lpath); + + next_try: + if (s_fs->global_lock) { + apr_file_close(s_fs->global_lock); + s_fs->global_lock = NULL; + } + apr_sleep(apr_time_from_msec(100)); + } + rv = APR_EGENERAL; + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, p, + "acquire global lock: %s", lpath); + +cleanup: + return rv; +} + +static void fs_unlock_global(md_store_t *store, apr_pool_t *p) +{ + md_store_fs_t *s_fs = FS_STORE(store); + + (void)p; + if (s_fs->global_lock) { + apr_file_close(s_fs->global_lock); + s_fs->global_lock = NULL; + } +} diff --git a/modules/md/md_store_fs.h b/modules/md/md_store_fs.h index 4167c9b..dcdb897 100644 --- a/modules/md/md_store_fs.h +++ b/modules/md/md_store_fs.h @@ -56,7 +56,7 @@ typedef enum { } md_store_fs_ev_t; typedef apr_status_t md_store_fs_cb(void *baton, struct md_store_t *store, - md_store_fs_ev_t ev, int group, + md_store_fs_ev_t ev, unsigned int group, const char *fname, apr_filetype_e ftype, apr_pool_t *p); diff --git a/modules/md/md_tailscale.c b/modules/md/md_tailscale.c new file mode 100644 index 0000000..c8d2bad --- /dev/null +++ b/modules/md/md_tailscale.c @@ -0,0 +1,383 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include + +#include "md.h" +#include "md_crypt.h" +#include "md_json.h" +#include "md_http.h" +#include "md_log.h" +#include "md_result.h" +#include "md_reg.h" +#include "md_store.h" +#include "md_util.h" + +#include "md_tailscale.h" + +typedef struct { + apr_pool_t *pool; + md_proto_driver_t *driver; + const char *unix_socket_path; + md_t *md; + apr_array_header_t *chain; + md_pkey_t *pkey; +} ts_ctx_t; + +static apr_status_t ts_init(md_proto_driver_t *d, md_result_t *result) +{ + ts_ctx_t *ts_ctx; + apr_uri_t uri; + const char *ca_url; + apr_status_t rv = APR_SUCCESS; + + md_result_set(result, APR_SUCCESS, NULL); + ts_ctx = apr_pcalloc(d->p, sizeof(*ts_ctx)); + ts_ctx->pool = d->p; + ts_ctx->driver = d; + ts_ctx->chain = apr_array_make(d->p, 5, sizeof(md_cert_t *)); + + ca_url = (d->md->ca_urls && !apr_is_empty_array(d->md->ca_urls))? + APR_ARRAY_IDX(d->md->ca_urls, 0, const char*) : NULL; + if (!ca_url) { + ca_url = MD_TAILSCALE_DEF_URL; + } + rv = apr_uri_parse(d->p, ca_url, &uri); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "error parsing CA URL `%s`", ca_url); + goto leave; + } + if (uri.scheme && uri.scheme[0] && strcmp("file", uri.scheme)) { + rv = APR_ENOTIMPL; + md_result_printf(result, rv, "non `file` URLs not supported, CA URL is `%s`", + ca_url); + goto leave; + } + if (uri.hostname && uri.hostname[0] && strcmp("localhost", uri.hostname)) { + rv = APR_ENOTIMPL; + md_result_printf(result, rv, "non `localhost` URLs not supported, CA URL is `%s`", + ca_url); + goto leave; + } + ts_ctx->unix_socket_path = uri.path; + d->baton = ts_ctx; + +leave: + return rv; +} + +static apr_status_t ts_preload_init(md_proto_driver_t *d, md_result_t *result) +{ + return ts_init(d, result); +} + +static apr_status_t ts_preload(md_proto_driver_t *d, + md_store_group_t load_group, md_result_t *result) +{ + apr_status_t rv; + md_t *md; + md_credentials_t *creds; + md_pkey_spec_t *pkspec; + apr_array_header_t *all_creds; + const char *name; + int i; + + name = d->md->name; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: preload start", name); + /* Load data from MD_SG_STAGING and save it into "load_group". + */ + if (APR_SUCCESS != (rv = md_load(d->store, MD_SG_STAGING, name, &md, d->p))) { + md_result_set(result, rv, "loading staged md.json"); + goto leave; + } + + /* tailscale generates one cert+key with key specification being whatever + * it chooses. Use the NULL spec here. + */ + all_creds = apr_array_make(d->p, 5, sizeof(md_credentials_t*)); + pkspec = NULL; + if (APR_SUCCESS != (rv = md_creds_load(d->store, MD_SG_STAGING, name, pkspec, &creds, d->p))) { + md_result_printf(result, rv, "loading staged credentials"); + goto leave; + } + if (!creds->chain) { + rv = APR_ENOENT; + md_result_printf(result, rv, "no certificate in staged credentials"); + goto leave; + } + if (APR_SUCCESS != (rv = md_check_cert_and_pkey(creds->chain, creds->pkey))) { + md_result_printf(result, rv, "certificate and private key do not match in staged credentials"); + goto leave; + } + APR_ARRAY_PUSH(all_creds, md_credentials_t*) = creds; + + md_result_activity_setn(result, "purging store tmp space"); + rv = md_store_purge(d->store, d->p, load_group, name); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, NULL); + goto leave; + } + + md_result_activity_setn(result, "saving staged md/privkey/pubcert"); + if (APR_SUCCESS != (rv = md_save(d->store, d->p, load_group, md, 1))) { + md_result_set(result, rv, "writing md.json"); + goto leave; + } + + for (i = 0; i < all_creds->nelts; ++i) { + creds = APR_ARRAY_IDX(all_creds, i, md_credentials_t*); + if (APR_SUCCESS != (rv = md_creds_save(d->store, d->p, load_group, name, creds, 1))) { + md_result_printf(result, rv, "writing credentials #%d", i); + goto leave; + } + } + + md_result_set(result, APR_SUCCESS, "saved staged data successfully"); + +leave: + md_result_log(result, MD_LOG_DEBUG); + return rv; +} + +static apr_status_t rv_of_response(const md_http_response_t *res) +{ + switch (res->status) { + case 200: + return APR_SUCCESS; + case 400: + return APR_EINVAL; + case 401: /* sectigo returns this instead of 403 */ + case 403: + return APR_EACCES; + case 404: + return APR_ENOENT; + default: + return APR_EGENERAL; + } + return APR_SUCCESS; +} + +static apr_status_t on_get_cert(const md_http_response_t *res, void *baton) +{ + ts_ctx_t *ts_ctx = baton; + apr_status_t rv; + + rv = rv_of_response(res); + if (APR_SUCCESS != rv) goto leave; + apr_array_clear(ts_ctx->chain); + rv = md_cert_chain_read_http(ts_ctx->chain, ts_ctx->pool, res); + if (APR_SUCCESS != rv) goto leave; + +leave: + return rv; +} + +static apr_status_t on_get_key(const md_http_response_t *res, void *baton) +{ + ts_ctx_t *ts_ctx = baton; + apr_status_t rv; + + rv = rv_of_response(res); + if (APR_SUCCESS != rv) goto leave; + rv = md_pkey_read_http(&ts_ctx->pkey, ts_ctx->pool, res); + if (APR_SUCCESS != rv) goto leave; + +leave: + return rv; +} + +static apr_status_t ts_renew(md_proto_driver_t *d, md_result_t *result) +{ + const char *name, *domain, *url; + apr_status_t rv = APR_ENOENT; + ts_ctx_t *ts_ctx = d->baton; + md_http_t *http; + const md_pubcert_t *pubcert; + md_cert_t *old_cert, *new_cert; + int reset_staging = d->reset; + + /* "renewing" the certificate from tailscale. Since tailscale has its + * own ideas on when to do this, we can only inspect the certificate + * it gives us and see if it is different from the current one we have. + * (if we have any. first time, lacking a cert, any it gives us is + * considered as 'renewed'.) + */ + name = d->md->name; + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: renewing cert", name); + + /* When not explicitly told to reset, we check the existing data. If + * it is incomplete or old, we trigger the reset for a clean start. */ + if (!reset_staging) { + md_result_activity_setn(result, "Checking staging area"); + rv = md_load(d->store, MD_SG_STAGING, d->md->name, &ts_ctx->md, d->p); + if (APR_SUCCESS == rv) { + /* So, we have a copy in staging, but is it a recent or an old one? */ + if (md_is_newer(d->store, MD_SG_DOMAINS, MD_SG_STAGING, d->md->name, d->p)) { + reset_staging = 1; + } + } + else if (APR_STATUS_IS_ENOENT(rv)) { + reset_staging = 1; + rv = APR_SUCCESS; + } + } + + if (reset_staging) { + md_result_activity_setn(result, "Resetting staging area"); + /* reset the staging area for this domain */ + rv = md_store_purge(d->store, d->p, MD_SG_STAGING, d->md->name); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE1, rv, d->p, + "%s: reset staging area", d->md->name); + if (APR_SUCCESS != rv && !APR_STATUS_IS_ENOENT(rv)) { + md_result_printf(result, rv, "resetting staging area"); + goto leave; + } + rv = APR_SUCCESS; + ts_ctx->md = NULL; + } + + if (!ts_ctx->md || !md_array_str_eq(ts_ctx->md->ca_urls, d->md->ca_urls, 1)) { + md_result_activity_printf(result, "Resetting staging for %s", d->md->name); + /* re-initialize staging */ + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, d->p, "%s: setup staging", d->md->name); + md_store_purge(d->store, d->p, MD_SG_STAGING, d->md->name); + ts_ctx->md = md_copy(d->p, d->md); + rv = md_save(d->store, d->p, MD_SG_STAGING, ts_ctx->md, 0); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "Saving MD information in staging area."); + md_result_log(result, MD_LOG_ERR); + goto leave; + } + } + + if (!ts_ctx->unix_socket_path) { + rv = APR_ENOTIMPL; + md_result_set(result, rv, "only unix sockets are supported for tailscale connections"); + goto leave; + } + + rv = md_util_is_unix_socket(ts_ctx->unix_socket_path, d->p); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "tailscale socket not available, may not be up: %s", + ts_ctx->unix_socket_path); + goto leave; + } + + rv = md_http_create(&http, d->p, + apr_psprintf(d->p, "Apache mod_md/%s", MOD_MD_VERSION), + NULL); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, "creating http context"); + goto leave; + } + md_http_set_unix_socket_path(http, ts_ctx->unix_socket_path); + + domain = (d->md->domains->nelts > 0)? + APR_ARRAY_IDX(d->md->domains, 0, const char*) : NULL; + if (!domain) { + rv = APR_EINVAL; + md_result_set(result, rv, "no domain names available"); + } + + url = apr_psprintf(d->p, "http://localhost/localapi/v0/cert/%s?type=crt", + domain); + rv = md_http_GET_perform(http, url, NULL, on_get_cert, ts_ctx); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, "retrieving certificate from tailscale"); + goto leave; + } + if (ts_ctx->chain->nelts <= 0) { + rv = APR_ENOENT; + md_result_set(result, rv, "tailscale returned no certificates"); + goto leave; + } + + /* Got the key and the chain, is it new? */ + rv = md_reg_get_pubcert(&pubcert, d->reg,d->md, 0, d->p); + if (APR_SUCCESS == rv) { + old_cert = APR_ARRAY_IDX(pubcert->certs, 0, md_cert_t*); + new_cert = APR_ARRAY_IDX(ts_ctx->chain, 0, md_cert_t*); + if (md_certs_are_equal(old_cert, new_cert)) { + /* tailscale has not renewed the certificate, yet */ + rv = APR_ENOENT; + md_result_set(result, rv, "tailscale has not renewed the certificate yet"); + /* let's check this daily */ + md_result_delay_set(result, apr_time_now() + apr_time_from_sec(MD_SECS_PER_DAY)); + goto leave; + } + } + + /* We have a new certificate (or had none before). + * Get the key and store both in STAGING. + */ + url = apr_psprintf(d->p, "http://localhost/localapi/v0/cert/%s?type=key", + domain); + rv = md_http_GET_perform(http, url, NULL, on_get_key, ts_ctx); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, "retrieving key from tailscale"); + goto leave; + } + + rv = md_pkey_save(d->store, d->p, MD_SG_STAGING, name, NULL, ts_ctx->pkey, 1); + if (APR_SUCCESS != rv) { + md_result_set(result, rv, "saving private key"); + goto leave; + } + + rv = md_pubcert_save(d->store, d->p, MD_SG_STAGING, name, + NULL, ts_ctx->chain, 1); + if (APR_SUCCESS != rv) { + md_result_printf(result, rv, "saving new certificate chain."); + goto leave; + } + + md_result_set(result, APR_SUCCESS, + "A new tailscale certificate has been retrieved successfully and can " + "be used. A graceful server restart is recommended."); + +leave: + md_result_log(result, MD_LOG_DEBUG); + return rv; +} + +static apr_status_t ts_complete_md(md_t *md, apr_pool_t *p) +{ + (void)p; + if (!md->ca_urls) { + md->ca_urls = apr_array_make(p, 3, sizeof(const char *)); + APR_ARRAY_PUSH(md->ca_urls, const char*) = MD_TAILSCALE_DEF_URL; + } + return APR_SUCCESS; +} + + +static md_proto_t TAILSCALE_PROTO = { + MD_PROTO_TAILSCALE, ts_init, ts_renew, + ts_preload_init, ts_preload, ts_complete_md, +}; + +apr_status_t md_tailscale_protos_add(apr_hash_t *protos, apr_pool_t *p) +{ + (void)p; + apr_hash_set(protos, MD_PROTO_TAILSCALE, sizeof(MD_PROTO_TAILSCALE)-1, &TAILSCALE_PROTO); + return APR_SUCCESS; +} diff --git a/modules/md/md_tailscale.h b/modules/md/md_tailscale.h new file mode 100644 index 0000000..67a874d --- /dev/null +++ b/modules/md/md_tailscale.h @@ -0,0 +1,25 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_md_md_tailscale_h +#define mod_md_md_tailscale_h + +#define MD_PROTO_TAILSCALE "tailscale" + +apr_status_t md_tailscale_protos_add(struct apr_hash_t *protos, apr_pool_t *p); + +#endif /* mod_md_md_tailscale_h */ + diff --git a/modules/md/md_time.c b/modules/md/md_time.c new file mode 100644 index 0000000..268ca83 --- /dev/null +++ b/modules/md/md_time.c @@ -0,0 +1,325 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +#include "md.h" +#include "md_time.h" + +apr_time_t md_timeperiod_length(const md_timeperiod_t *period) +{ + return (period->start < period->end)? (period->end - period->start) : 0; +} + +int md_timeperiod_contains(const md_timeperiod_t *period, apr_time_t time) +{ + return md_timeperiod_has_started(period, time) + && !md_timeperiod_has_ended(period, time); +} + +int md_timeperiod_has_started(const md_timeperiod_t *period, apr_time_t time) +{ + return (time >= period->start); +} + +int md_timeperiod_has_ended(const md_timeperiod_t *period, apr_time_t time) +{ + return (time >= period->start) && (time <= period->end); +} + +apr_interval_time_t md_timeperiod_remaining(const md_timeperiod_t *period, apr_time_t time) +{ + if (time < period->start) return md_timeperiod_length(period); + if (time < period->end) return period->end - time; + return 0; +} + +char *md_timeperiod_print(apr_pool_t *p, const md_timeperiod_t *period) +{ + char tstart[APR_RFC822_DATE_LEN]; + char tend[APR_RFC822_DATE_LEN]; + + apr_rfc822_date(tstart, period->start); + apr_rfc822_date(tend, period->end); + return apr_pstrcat(p, tstart, " - ", tend, NULL); +} + +static const char *duration_print(apr_pool_t *p, int roughly, apr_interval_time_t duration) +{ + const char *s = "", *sep = ""; + long days = (long)(apr_time_sec(duration) / MD_SECS_PER_DAY); + int rem = (int)(apr_time_sec(duration) % MD_SECS_PER_DAY); + + s = roughly? "~" : ""; + if (days > 0) { + s = apr_psprintf(p, "%s%ld days", s, days); + if (roughly) return s; + sep = " "; + } + if (rem > 0) { + int hours = (rem / MD_SECS_PER_HOUR); + rem = (rem % MD_SECS_PER_HOUR); + if (hours > 0) { + s = apr_psprintf(p, "%s%s%d hours", s, sep, hours); + if (roughly) return s; + sep = " "; + } + if (rem > 0) { + int minutes = (rem / 60); + rem = (rem % 60); + if (minutes > 0) { + s = apr_psprintf(p, "%s%s%d minutes", s, sep, minutes); + if (roughly) return s; + sep = " "; + } + if (rem > 0) { + s = apr_psprintf(p, "%s%s%d seconds", s, sep, rem); + if (roughly) return s; + sep = " "; + } + } + } + else if (days == 0) { + s = "0 seconds"; + if (duration != 0) { + s = apr_psprintf(p, "%d ms", (int)apr_time_msec(duration)); + } + } + return s; +} + +const char *md_duration_print(apr_pool_t *p, apr_interval_time_t duration) +{ + return duration_print(p, 0, duration); +} + +const char *md_duration_roughly(apr_pool_t *p, apr_interval_time_t duration) +{ + return duration_print(p, 1, duration); +} + +static const char *duration_format(apr_pool_t *p, apr_interval_time_t duration) +{ + const char *s = "0"; + int units = (int)(apr_time_sec(duration) / MD_SECS_PER_DAY); + int rem = (int)(apr_time_sec(duration) % MD_SECS_PER_DAY); + + if (rem == 0) { + s = apr_psprintf(p, "%dd", units); + } + else { + units = (int)(apr_time_sec(duration) / MD_SECS_PER_HOUR); + rem = (int)(apr_time_sec(duration) % MD_SECS_PER_HOUR); + if (rem == 0) { + s = apr_psprintf(p, "%dh", units); + } + else { + units = (int)(apr_time_sec(duration) / 60); + rem = (int)(apr_time_sec(duration) % 60); + if (rem == 0) { + s = apr_psprintf(p, "%dmi", units); + } + else { + units = (int)(apr_time_sec(duration)); + rem = (int)(apr_time_msec(duration) % 1000); + if (rem == 0) { + s = apr_psprintf(p, "%ds", units); + } + else { + s = apr_psprintf(p, "%dms", (int)(apr_time_msec(duration))); + } + } + } + } + return s; +} + +const char *md_duration_format(apr_pool_t *p, apr_interval_time_t duration) +{ + return duration_format(p, duration); +} + +apr_status_t md_duration_parse(apr_interval_time_t *ptimeout, const char *value, + const char *def_unit) +{ + char *endp; + apr_int64_t n; + + n = apr_strtoi64(value, &endp, 10); + if (errno) { + return errno; + } + if (!endp || !*endp) { + if (!def_unit) def_unit = "s"; + } + else if (endp == value) { + return APR_EINVAL; + } + else { + def_unit = endp; + } + + switch (*def_unit) { + case 'D': + case 'd': + *ptimeout = apr_time_from_sec(n * MD_SECS_PER_DAY); + break; + case 's': + case 'S': + *ptimeout = (apr_interval_time_t) apr_time_from_sec(n); + break; + case 'h': + case 'H': + /* Time is in hours */ + *ptimeout = (apr_interval_time_t) apr_time_from_sec(n * MD_SECS_PER_HOUR); + break; + case 'm': + case 'M': + switch (*(++def_unit)) { + /* Time is in milliseconds */ + case 's': + case 'S': + *ptimeout = (apr_interval_time_t) n * 1000; + break; + /* Time is in minutes */ + case 'i': + case 'I': + *ptimeout = (apr_interval_time_t) apr_time_from_sec(n * 60); + break; + default: + return APR_EGENERAL; + } + break; + default: + return APR_EGENERAL; + } + return APR_SUCCESS; +} + +static apr_status_t percentage_parse(const char *value, int *ppercent) +{ + char *endp; + apr_int64_t n; + + n = apr_strtoi64(value, &endp, 10); + if (errno) { + return errno; + } + if (*endp == '%') { + if (n < 0) { + return APR_BADARG; + } + *ppercent = (int)n; + return APR_SUCCESS; + } + return APR_EINVAL; +} + +apr_status_t md_timeslice_create(md_timeslice_t **pts, apr_pool_t *p, + apr_interval_time_t norm, apr_interval_time_t len) +{ + md_timeslice_t *ts; + + ts = apr_pcalloc(p, sizeof(*ts)); + ts->norm = norm; + ts->len = len; + *pts = ts; + return APR_SUCCESS; +} + +const char *md_timeslice_parse(md_timeslice_t **pts, apr_pool_t *p, + const char *val, apr_interval_time_t norm) +{ + md_timeslice_t *ts; + int percent = 0; + + *pts = NULL; + if (!val) { + return "cannot parse NULL value"; + } + + ts = apr_pcalloc(p, sizeof(*ts)); + if (md_duration_parse(&ts->len, val, "d") == APR_SUCCESS) { + *pts = ts; + return NULL; + } + else { + switch (percentage_parse(val, &percent)) { + case APR_SUCCESS: + ts->norm = norm; + ts->len = apr_time_from_sec((apr_time_sec(norm) * percent / 100L)); + *pts = ts; + return NULL; + case APR_BADARG: + return "percent must be less than 100"; + } + } + return "has unrecognized format"; +} + +const char *md_timeslice_format(const md_timeslice_t *ts, apr_pool_t *p) { + if (ts->norm > 0) { + int percent = (int)(((long)apr_time_sec(ts->len)) * 100L + / ((long)apr_time_sec(ts->norm))); + return apr_psprintf(p, "%d%%", percent); + } + return duration_format(p, ts->len); +} + +md_timeperiod_t md_timeperiod_slice_before_end(const md_timeperiod_t *period, + const md_timeslice_t *ts) +{ + md_timeperiod_t r; + apr_time_t duration = ts->len; + + if (ts->norm > 0) { + int percent = (int)(((long)apr_time_sec(ts->len)) * 100L + / ((long)apr_time_sec(ts->norm))); + apr_time_t plen = md_timeperiod_length(period); + if (apr_time_sec(plen) > 100) { + duration = apr_time_from_sec(apr_time_sec(plen) * percent / 100); + } + else { + duration = plen * percent / 100; + } + } + r.start = period->end - duration; + r.end = period->end; + return r; +} + +int md_timeslice_eq(const md_timeslice_t *ts1, const md_timeslice_t *ts2) +{ + if (ts1 == ts2) return 1; + if (!ts1 || !ts2) return 0; + return (ts1->norm == ts2->norm) && (ts1->len == ts2->len); +} + +md_timeperiod_t md_timeperiod_common(const md_timeperiod_t *a, const md_timeperiod_t *b) +{ + md_timeperiod_t c; + + c.start = (a->start > b->start)? a->start : b->start; + c.end = (a->end < b->end)? a->end : b->end; + if (c.start > c.end) { + c.start = c.end = 0; + } + return c; +} diff --git a/modules/md/md_time.h b/modules/md/md_time.h new file mode 100644 index 0000000..92bd9d8 --- /dev/null +++ b/modules/md/md_time.h @@ -0,0 +1,77 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_md_md_time_h +#define mod_md_md_time_h + +#include + +#define MD_SECS_PER_HOUR (60*60) +#define MD_SECS_PER_DAY (24*MD_SECS_PER_HOUR) + +typedef struct md_timeperiod_t md_timeperiod_t; + +struct md_timeperiod_t { + apr_time_t start; + apr_time_t end; +}; + +apr_time_t md_timeperiod_length(const md_timeperiod_t *period); + +int md_timeperiod_contains(const md_timeperiod_t *period, apr_time_t time); +int md_timeperiod_has_started(const md_timeperiod_t *period, apr_time_t time); +int md_timeperiod_has_ended(const md_timeperiod_t *period, apr_time_t time); +apr_interval_time_t md_timeperiod_remaining(const md_timeperiod_t *period, apr_time_t time); + +/** + * Return the timeperiod common between a and b. If both do not overlap, return {0,0}. + */ +md_timeperiod_t md_timeperiod_common(const md_timeperiod_t *a, const md_timeperiod_t *b); + +char *md_timeperiod_print(apr_pool_t *p, const md_timeperiod_t *period); + +/** + * Print a human readable form of the give duration in days/hours/min/sec + */ +const char *md_duration_print(apr_pool_t *p, apr_interval_time_t duration); +const char *md_duration_roughly(apr_pool_t *p, apr_interval_time_t duration); + +/** + * Parse a machine readable string duration in the form of NN[unit], where + * unit is d/h/mi/s/ms with the default given should the unit not be specified. + */ +apr_status_t md_duration_parse(apr_interval_time_t *ptimeout, const char *value, + const char *def_unit); +const char *md_duration_format(apr_pool_t *p, apr_interval_time_t duration); + +typedef struct { + apr_interval_time_t norm; /* if > 0, normalized base length */ + apr_interval_time_t len; /* length of the timespan */ +} md_timeslice_t; + +apr_status_t md_timeslice_create(md_timeslice_t **pts, apr_pool_t *p, + apr_interval_time_t norm, apr_interval_time_t len); + +int md_timeslice_eq(const md_timeslice_t *ts1, const md_timeslice_t *ts2); + +const char *md_timeslice_parse(md_timeslice_t **pts, apr_pool_t *p, + const char *val, apr_interval_time_t defnorm); +const char *md_timeslice_format(const md_timeslice_t *ts, apr_pool_t *p); + +md_timeperiod_t md_timeperiod_slice_before_end(const md_timeperiod_t *period, + const md_timeslice_t *ts); + +#endif /* md_util_h */ diff --git a/modules/md/md_util.c b/modules/md/md_util.c index 4e97d92..95ecc27 100644 --- a/modules/md/md_util.c +++ b/modules/md/md_util.c @@ -14,6 +14,7 @@ * limitations under the License. */ +#include #include #include @@ -24,6 +25,11 @@ #include #include +#if APR_HAVE_STDLIB_H +#include +#endif + +#include "md.h" #include "md_log.h" #include "md_util.h" @@ -35,8 +41,8 @@ apr_status_t md_util_pool_do(md_util_action *cb, void *baton, apr_pool_t *p) apr_pool_t *ptemp; apr_status_t rv = apr_pool_create(&ptemp, p); if (APR_SUCCESS == rv) { + apr_pool_tag(ptemp, "md_pool_do"); rv = cb(baton, p, ptemp); - apr_pool_destroy(ptemp); } return rv; @@ -49,6 +55,7 @@ static apr_status_t pool_vado(md_util_vaction *cb, void *baton, apr_pool_t *p, v rv = apr_pool_create(&ptemp, p); if (APR_SUCCESS == rv) { + apr_pool_tag(ptemp, "md_pool_vado"); rv = cb(baton, p, ptemp, ap); apr_pool_destroy(ptemp); } @@ -66,9 +73,170 @@ apr_status_t md_util_pool_vdo(md_util_vaction *cb, void *baton, apr_pool_t *p, . return rv; } +/**************************************************************************************************/ +/* data chunks */ + +void md_data_pinit(md_data_t *d, apr_size_t len, apr_pool_t *p) +{ + md_data_null(d); + d->data = apr_pcalloc(p, len); + d->len = len; +} + +md_data_t *md_data_pmake(apr_size_t len, apr_pool_t *p) +{ + md_data_t *d; + + d = apr_palloc(p, sizeof(*d)); + md_data_pinit(d, len, p); + return d; +} + +void md_data_init(md_data_t *d, const char *data, apr_size_t len) +{ + md_data_null(d); + d->len = len; + d->data = data; +} + +void md_data_init_str(md_data_t *d, const char *str) +{ + md_data_init(d, str, strlen(str)); +} + +void md_data_null(md_data_t *d) +{ + memset(d, 0, sizeof(*d)); +} + +void md_data_clear(md_data_t *d) +{ + if (d) { + if (d->data && d->free_data) d->free_data((void*)d->data); + memset(d, 0, sizeof(*d)); + } +} + +md_data_t *md_data_make_pcopy(apr_pool_t *p, const char *data, apr_size_t len) +{ + md_data_t *d; + + d = apr_palloc(p, sizeof(*d)); + d->len = len; + d->data = len? apr_pmemdup(p, data, len) : NULL; + return d; +} + +apr_status_t md_data_assign_copy(md_data_t *dest, const char *src, apr_size_t src_len) +{ + md_data_clear(dest); + if (src && src_len) { + dest->data = malloc(src_len); + if (!dest->data) return APR_ENOMEM; + memcpy((void*)dest->data, src, src_len); + dest->len = src_len; + dest->free_data = free; + } + return APR_SUCCESS; +} + +void md_data_assign_pcopy(md_data_t *dest, const char *src, apr_size_t src_len, apr_pool_t *p) +{ + md_data_clear(dest); + dest->data = (src && src_len)? apr_pmemdup(p, src, src_len) : NULL; + dest->len = dest->data? src_len : 0; +} + +static const char * const hex_const[] = { + "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0a", "0b", "0c", "0d", "0e", "0f", + "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "1a", "1b", "1c", "1d", "1e", "1f", + "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "2a", "2b", "2c", "2d", "2e", "2f", + "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3a", "3b", "3c", "3d", "3e", "3f", + "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "4a", "4b", "4c", "4d", "4e", "4f", + "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "5a", "5b", "5c", "5d", "5e", "5f", + "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6a", "6b", "6c", "6d", "6e", "6f", + "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "7a", "7b", "7c", "7d", "7e", "7f", + "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "8a", "8b", "8c", "8d", "8e", "8f", + "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9a", "9b", "9c", "9d", "9e", "9f", + "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "aa", "ab", "ac", "ad", "ae", "af", + "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "ba", "bb", "bc", "bd", "be", "bf", + "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "ca", "cb", "cc", "cd", "ce", "cf", + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "da", "db", "dc", "dd", "de", "df", + "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "ea", "eb", "ec", "ed", "ee", "ef", + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "fa", "fb", "fc", "fd", "fe", "ff", +}; + +apr_status_t md_data_to_hex(const char **phex, char separator, + apr_pool_t *p, const md_data_t *data) +{ + char *hex, *cp; + const char * x; + unsigned int i; + + cp = hex = apr_pcalloc(p, ((separator? 3 : 2) * data->len) + 1); + if (!hex) { + *phex = NULL; + return APR_ENOMEM; + } + for (i = 0; i < data->len; ++i) { + x = hex_const[(unsigned char)data->data[i]]; + if (i && separator) *cp++ = separator; + *cp++ = x[0]; + *cp++ = x[1]; + } + *phex = hex; + return APR_SUCCESS; +} + +/**************************************************************************************************/ +/* generic arrays */ + +int md_array_remove_at(struct apr_array_header_t *a, int idx) +{ + char *ps, *pe; + + if (idx < 0 || idx >= a->nelts) return 0; + if (idx+1 == a->nelts) { + --a->nelts; + } + else { + ps = (a->elts + (idx * a->elt_size)); + pe = ps + a->elt_size; + memmove(ps, pe, (size_t)((a->nelts - (idx+1)) * a->elt_size)); + --a->nelts; + } + return 1; +} + +int md_array_remove(struct apr_array_header_t *a, void *elem) +{ + int i, n, m; + void **pe; + + assert(sizeof(void*) == a->elt_size); + n = i = 0; + while (i < a->nelts) { + pe = &APR_ARRAY_IDX(a, i, void*); + if (*pe == elem) { + m = a->nelts - (i+1); + if (m > 0) memmove(pe, pe+1, (unsigned)m*sizeof(void*)); + a->nelts--; + n++; + continue; + } + ++i; + } + return n; +} + /**************************************************************************************************/ /* string related */ +int md_array_is_empty(const struct apr_array_header_t *array) +{ + return (array == NULL) || (array->nelts == 0); +} + char *md_util_str_tolower(char *s) { char *orig = s; @@ -104,7 +272,7 @@ int md_array_str_eq(const struct apr_array_header_t *a1, const char *s1, *s2; if (a1 == a2) return 1; - if (!a1) return 0; + if (!a1 || !a2) return 0; if (a1->nelts != a2->nelts) return 0; for (i = 0; i < a1->nelts; ++i) { s1 = APR_ARRAY_IDX(a1, i, const char *); @@ -194,8 +362,20 @@ apr_status_t md_util_fopen(FILE **pf, const char *fn, const char *mode) apr_status_t md_util_fcreatex(apr_file_t **pf, const char *fn, apr_fileperms_t perms, apr_pool_t *p) { - return apr_file_open(pf, fn, (APR_FOPEN_WRITE|APR_FOPEN_CREATE|APR_FOPEN_EXCL), - perms, p); + apr_status_t rv; + rv = apr_file_open(pf, fn, (APR_FOPEN_WRITE|APR_FOPEN_CREATE|APR_FOPEN_EXCL), + perms, p); + if (APR_SUCCESS == rv) { + /* See + * Some people set umask 007 to deny all world read/writability to files + * created by apache. While this is a noble effort, we need the store files + * to have the permissions as specified. */ + rv = apr_file_perms_set(fn, perms); + if (APR_STATUS_IS_ENOTIMPL(rv)) { + rv = APR_SUCCESS; + } + } + return rv; } apr_status_t md_util_is_dir(const char *path, apr_pool_t *pool) @@ -218,6 +398,21 @@ apr_status_t md_util_is_file(const char *path, apr_pool_t *pool) return rv; } +apr_status_t md_util_is_unix_socket(const char *path, apr_pool_t *pool) +{ + apr_finfo_t info; + apr_status_t rv = apr_stat(&info, path, APR_FINFO_TYPE, pool); + if (rv == APR_SUCCESS) { + rv = (info.filetype == APR_SOCK)? APR_SUCCESS : APR_EINVAL; + } + return rv; +} + +int md_file_exists(const char *fname, apr_pool_t *p) +{ + return (fname && *fname && APR_SUCCESS == md_util_is_file(fname, p)); +} + apr_status_t md_util_path_merge(const char **ppath, apr_pool_t *p, ...) { const char *segment, *path; @@ -248,7 +443,7 @@ apr_status_t md_util_freplace(const char *fpath, apr_fileperms_t perms, apr_pool creat: while (i < max && APR_EEXIST == (rv = md_util_fcreatex(&f, tmp, perms, p))) { ++i; - apr_sleep(apr_time_msec(50)); + apr_sleep(apr_time_from_msec(50)); } if (APR_EEXIST == rv && APR_SUCCESS == (rv = apr_file_remove(tmp, p)) @@ -312,6 +507,13 @@ apr_status_t md_text_fcreatex(const char *fpath, apr_fileperms_t perms, if (APR_SUCCESS == rv) { rv = write_text((void*)text, f, p); apr_file_close(f); + /* See : when a umask + * is set, files need to be assigned permissions explicitly. + * Otherwise, as in the issues reported, it will break our access model. */ + rv = apr_file_perms_set(fpath, perms); + if (APR_STATUS_IS_ENOTIMPL(rv)) { + rv = APR_SUCCESS; + } } return rv; } @@ -401,17 +603,25 @@ static apr_status_t match_and_do(md_util_fwalk_t *ctx, const char *path, int dep } pattern = APR_ARRAY_IDX(ctx->patterns, depth, const char *); + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, ptemp, "match_and_do " + "path=%s depth=%d pattern=%s", path, depth, pattern); rv = apr_dir_open(&d, path, ptemp); if (APR_SUCCESS != rv) { return rv; } while (APR_SUCCESS == (rv = apr_dir_read(&finfo, wanted, d))) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, ptemp, "match_and_do " + "candidate=%s", finfo.name); if (!strcmp(".", finfo.name) || !strcmp("..", finfo.name)) { continue; } if (APR_SUCCESS == apr_fnmatch(pattern, finfo.name, 0)) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, ptemp, "match_and_do " + "candidate=%s matches pattern", finfo.name); if (ndepth < ctx->patterns->nelts) { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, ptemp, "match_and_do " + "need to go deeper"); if (APR_DIR == finfo.filetype) { /* deeper and deeper, irgendwo in der tiefe leuchtet ein licht */ rv = md_util_path_merge(&npath, ptemp, path, finfo.name, NULL); @@ -421,6 +631,8 @@ static apr_status_t match_and_do(md_util_fwalk_t *ctx, const char *path, int dep } } else { + md_log_perror(MD_LOG_MARK, MD_LOG_TRACE4, 0, ptemp, "match_and_do " + "invoking inspector on name=%s", finfo.name); rv = ctx->cb(ctx->baton, p, ptemp, path, finfo.name, finfo.filetype); } } @@ -596,7 +808,7 @@ apr_status_t md_util_ftree_remove(const char *path, apr_pool_t *p) /* DNS name checks ********************************************************************************/ -int md_util_is_dns_name(apr_pool_t *p, const char *hostname, int need_fqdn) +int md_dns_is_name(apr_pool_t *p, const char *hostname, int need_fqdn) { char c, last = 0; const char *cp = hostname; @@ -637,6 +849,86 @@ int md_util_is_dns_name(apr_pool_t *p, const char *hostname, int need_fqdn) return 1; /* empty string not allowed */ } +int md_dns_is_wildcard(apr_pool_t *p, const char *domain) +{ + if (domain[0] != '*' || domain[1] != '.') return 0; + return md_dns_is_name(p, domain+2, 1); +} + +int md_dns_matches(const char *pattern, const char *domain) +{ + const char *s; + + if (!apr_strnatcasecmp(pattern, domain)) return 1; + if (pattern[0] == '*' && pattern[1] == '.') { + s = strchr(domain, '.'); + if (s && !apr_strnatcasecmp(pattern+1, s)) return 1; + } + return 0; +} + +apr_array_header_t *md_dns_make_minimal(apr_pool_t *p, apr_array_header_t *domains) +{ + apr_array_header_t *minimal; + const char *domain, *pattern; + int i, j, duplicate; + + minimal = apr_array_make(p, domains->nelts, sizeof(const char *)); + for (i = 0; i < domains->nelts; ++i) { + domain = APR_ARRAY_IDX(domains, i, const char*); + duplicate = 0; + /* is it matched in minimal already? */ + for (j = 0; j < minimal->nelts; ++j) { + pattern = APR_ARRAY_IDX(minimal, j, const char*); + if (md_dns_matches(pattern, domain)) { + duplicate = 1; + break; + } + } + if (!duplicate) { + if (!md_dns_is_wildcard(p, domain)) { + /* plain name, will we see a wildcard that replaces it? */ + for (j = i+1; j < domains->nelts; ++j) { + pattern = APR_ARRAY_IDX(domains, j, const char*); + if (md_dns_is_wildcard(p, pattern) && md_dns_matches(pattern, domain)) { + duplicate = 1; + break; + } + } + } + if (!duplicate) { + APR_ARRAY_PUSH(minimal, const char *) = domain; + } + } + } + return minimal; +} + +int md_dns_domains_match(const apr_array_header_t *domains, const char *name) +{ + const char *domain; + int i; + + for (i = 0; i < domains->nelts; ++i) { + domain = APR_ARRAY_IDX(domains, i, const char*); + if (md_dns_matches(domain, name)) return 1; + } + return 0; +} + +int md_is_wild_match(const apr_array_header_t *domains, const char *name) +{ + const char *domain; + int i; + + for (i = 0; i < domains->nelts; ++i) { + domain = APR_ARRAY_IDX(domains, i, const char*); + if (md_dns_matches(domain, name)) + return (domain[0] == '*' && domain[1] == '.'); + } + return 0; +} + const char *md_util_schemify(apr_pool_t *p, const char *s, const char *def_scheme) { const char *cp = s; @@ -670,7 +962,7 @@ static apr_status_t uri_check(apr_uri_t *uri_parsed, apr_pool_t *p, if (!uri_parsed->hostname) { err = "missing hostname"; } - else if (!md_util_is_dns_name(p, uri_parsed->hostname, 0)) { + else if (!md_dns_is_name(p, uri_parsed->hostname, 0)) { err = "invalid hostname"; } if (uri_parsed->port_str @@ -789,45 +1081,44 @@ apr_status_t md_util_try(md_util_try_fn *fn, void *baton, int ignore_errs, /* execute process ********************************************************************************/ -apr_status_t md_util_exec(apr_pool_t *p, const char *cmd, const char * const *argv, - int *exit_code) +apr_status_t md_util_exec(apr_pool_t *p, const char *cmd, + const char * const *argv, int *exit_code) { apr_status_t rv; apr_procattr_t *procattr; apr_proc_t *proc; apr_exit_why_e ewhy; - + char buffer[1024]; + *exit_code = 0; if (!(proc = apr_pcalloc(p, sizeof(*proc)))) { return APR_ENOMEM; } if ( APR_SUCCESS == (rv = apr_procattr_create(&procattr, p)) && APR_SUCCESS == (rv = apr_procattr_io_set(procattr, APR_NO_FILE, - APR_NO_PIPE, APR_NO_PIPE)) - && APR_SUCCESS == (rv = apr_procattr_cmdtype_set(procattr, APR_PROGRAM)) - && APR_SUCCESS == (rv = apr_proc_create(proc, cmd, argv, NULL, procattr, p)) - && APR_CHILD_DONE == (rv = apr_proc_wait(proc, exit_code, &ewhy, APR_WAIT))) { - /* let's not dwell on exit stati, but core should signal something's bad */ - if (*exit_code > 127 || APR_PROC_SIGNAL_CORE == ewhy) { - return APR_EINCOMPLETE; + APR_NO_PIPE, APR_FULL_BLOCK)) + && APR_SUCCESS == (rv = apr_procattr_cmdtype_set(procattr, APR_PROGRAM_ENV)) + && APR_SUCCESS == (rv = apr_proc_create(proc, cmd, argv, NULL, procattr, p))) { + + /* read stderr and log on INFO for possible fault analysis. */ + while(APR_SUCCESS == (rv = apr_file_gets(buffer, sizeof(buffer)-1, proc->err))) { + md_log_perror(MD_LOG_MARK, MD_LOG_INFO, 0, p, "cmd(%s) stderr: %s", cmd, buffer); + } + if (!APR_STATUS_IS_EOF(rv)) goto out; + apr_file_close(proc->err); + + if (APR_CHILD_DONE == (rv = apr_proc_wait(proc, exit_code, &ewhy, APR_WAIT))) { + /* let's not dwell on exit stati, but core should signal something's bad */ + if (*exit_code > 127 || APR_PROC_SIGNAL_CORE == ewhy) { + return APR_EINCOMPLETE; + } + return APR_SUCCESS; } - return APR_SUCCESS; } +out: return rv; } - -/* date/time encoding *****************************************************************************/ - -const char *md_print_duration(apr_pool_t *p, apr_interval_time_t duration) -{ - int secs = (int)(apr_time_sec(duration) % MD_SECS_PER_DAY); - return apr_psprintf(p, "%2d:%02d:%02d hours", - (int)secs/MD_SECS_PER_HOUR, (int)(secs%(MD_SECS_PER_HOUR))/60, - (int)(secs%60)); -} - - /* base64 url encoding ****************************************************************************/ #define N6 (unsigned int)-1 @@ -863,7 +1154,7 @@ static const unsigned char BASE64URL_CHARS[] = { #define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ] -apr_size_t md_util_base64url_decode(const char **decoded, const char *encoded, +apr_size_t md_util_base64url_decode(md_data_t *decoded, const char *encoded, apr_pool_t *pool) { const unsigned char *e = (const unsigned char *)encoded; @@ -877,10 +1168,10 @@ apr_size_t md_util_base64url_decode(const char **decoded, const char *encoded, } len = (int)(p - e); mlen = (len/4)*4; - *decoded = apr_pcalloc(pool, (apr_size_t)len + 1); + decoded->data = apr_pcalloc(pool, (apr_size_t)len + 1); i = 0; - d = (unsigned char*)*decoded; + d = (unsigned char*)decoded->data; for (; i < mlen; i += 4) { n = ((BASE64URL_UINT6[ e[i+0] ] << 18) + (BASE64URL_UINT6[ e[i+1] ] << 12) + @@ -909,14 +1200,15 @@ apr_size_t md_util_base64url_decode(const char **decoded, const char *encoded, default: /* do nothing */ break; } - return (apr_size_t)(mlen/4*3 + remain); + decoded->len = (apr_size_t)(mlen/4*3 + remain); + return decoded->len; } -const char *md_util_base64url_encode(const char *data, apr_size_t dlen, apr_pool_t *pool) +const char *md_util_base64url_encode(const md_data_t *data, apr_pool_t *pool) { - int i, len = (int)dlen; - apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */ - const unsigned char *udata = (const unsigned char*)data; + int i, len = (int)data->len; + apr_size_t slen = ((data->len+2)/3)*4 + 1; /* 0 terminated */ + const unsigned char *udata = (const unsigned char*)data->data; unsigned char *enc, *p = apr_pcalloc(pool, slen); enc = p; @@ -1252,3 +1544,23 @@ const char *md_link_find_relation(const apr_table_t *headers, return ctx.url; } +const char *md_util_parse_ct(apr_pool_t *pool, const char *cth) +{ + char *type; + const char *p; + apr_size_t hlen; + + if (!cth) return NULL; + + for( p = cth; *p && *p != ' ' && *p != ';'; ++p) + ; + hlen = (apr_size_t)(p - cth); + type = apr_pcalloc( pool, hlen + 1 ); + assert(type); + memcpy(type, cth, hlen); + type[hlen] = '\0'; + + return type; + /* Could parse and return parameters here, but we don't need any at present. + */ +} diff --git a/modules/md/md_util.h b/modules/md/md_util.h index 5b3a2ea..d974788 100644 --- a/modules/md/md_util.h +++ b/modules/md/md_util.h @@ -32,10 +32,80 @@ typedef apr_status_t md_util_vaction(void *baton, apr_pool_t *p, apr_pool_t *pte apr_status_t md_util_pool_do(md_util_action *cb, void *baton, apr_pool_t *p); apr_status_t md_util_pool_vdo(md_util_vaction *cb, void *baton, apr_pool_t *p, ...); +/**************************************************************************************************/ +/* data chunks */ + +typedef void md_data_free_fn(void *data); + +typedef struct md_data_t md_data_t; +struct md_data_t { + const char *data; + apr_size_t len; + md_data_free_fn *free_data; +}; + +/** + * Init the data to empty, overwriting any content. + */ +void md_data_null(md_data_t *d); + +/** + * Create a new md_data_t, providing `len` bytes allocated from pool `p`. + */ +md_data_t *md_data_pmake(apr_size_t len, apr_pool_t *p); +/** + * Initialize md_data_t 'd', providing `len` bytes allocated from pool `p`. + */ +void md_data_pinit(md_data_t *d, apr_size_t len, apr_pool_t *p); +/** + * Initialize md_data_t 'd', by borrowing 'len' bytes in `data` without copying. + * `d` will not take ownership. + */ +void md_data_init(md_data_t *d, const char *data, apr_size_t len); + +/** + * Initialize md_data_t 'd', by borrowing the NUL-terminated `str`. + * `d` will not take ownership. + */ +void md_data_init_str(md_data_t *d, const char *str); + +/** + * Free any present data and clear (NULL) it. Passing NULL is permitted. + */ +void md_data_clear(md_data_t *d); + +md_data_t *md_data_make_pcopy(apr_pool_t *p, const char *data, apr_size_t len); + +apr_status_t md_data_assign_copy(md_data_t *dest, const char *src, apr_size_t src_len); +void md_data_assign_pcopy(md_data_t *dest, const char *src, apr_size_t src_len, apr_pool_t *p); + +apr_status_t md_data_to_hex(const char **phex, char separator, + apr_pool_t *p, const md_data_t *data); + +/**************************************************************************************************/ +/* generic arrays */ + +/** + * In an array of pointers, remove all entries == elem. Returns the number + * of entries removed. + */ +int md_array_remove(struct apr_array_header_t *a, void *elem); + +/* + * Remove the ith entry from the array. + * @return != 0 iff an entry was removed, e.g. idx was not outside range + */ +int md_array_remove_at(struct apr_array_header_t *a, int idx); + /**************************************************************************************************/ /* string related */ char *md_util_str_tolower(char *s); +/** + * Return != 0 iff array is either NULL or empty + */ +int md_array_is_empty(const struct apr_array_header_t *array); + int md_array_str_index(const struct apr_array_header_t *array, const char *s, int start, int case_sensitive); @@ -44,9 +114,15 @@ int md_array_str_eq(const struct apr_array_header_t *a1, struct apr_array_header_t *md_array_str_clone(apr_pool_t *p, struct apr_array_header_t *array); +/** + * Create a new array with duplicates removed. + */ struct apr_array_header_t *md_array_str_compact(apr_pool_t *p, struct apr_array_header_t *src, int case_sensitive); +/** + * Create a new array with all occurrences of removed. + */ struct apr_array_header_t *md_array_str_remove(apr_pool_t *p, struct apr_array_header_t *src, const char *exclude, int case_sensitive); @@ -55,13 +131,53 @@ int md_array_str_add_missing(struct apr_array_header_t *dest, /**************************************************************************************************/ /* process execution */ + apr_status_t md_util_exec(apr_pool_t *p, const char *cmd, const char * const *argv, int *exit_code); /**************************************************************************************************/ /* dns name check */ -int md_util_is_dns_name(apr_pool_t *p, const char *hostname, int need_fqdn); +/** + * Is a host/domain name using allowed characters. Not a wildcard. + * @param domain name to check + * @param need_fqdn iff != 0, check that domain contains '.' + * @return != 0 iff domain looks like a non-wildcard, legal DNS domain name. + */ +int md_dns_is_name(apr_pool_t *p, const char *domain, int need_fqdn); + +/** + * Check if the given domain is a valid wildcard DNS name, e.g. *.example.org + * @param domain name to check + * @return != 0 iff domain is a DNS wildcard. + */ +int md_dns_is_wildcard(apr_pool_t *p, const char *domain); + +/** + * Determine iff pattern matches domain, including case-ignore and wildcard domains. + * It is assumed that both names follow dns syntax. + * @return != 0 iff pattern matches domain + */ +int md_dns_matches(const char *pattern, const char *domain); + +/** + * Create a new array with the minimal set out of the given domain names that match all + * of them. If none of the domains is a wildcard, only duplicates are removed. + * If domains contain a wildcard, any name matching the wildcard will be removed. + */ +struct apr_array_header_t *md_dns_make_minimal(apr_pool_t *p, + struct apr_array_header_t *domains); + +/** + * Determine if the given domains cover the name, including wildcard matching. + * @return != 0 iff name is matched by list of domains + */ +int md_dns_domains_match(const apr_array_header_t *domains, const char *name); + +/** + * @return != 0 iff `name` is matched by a wildcard pattern in `domains` + */ +int md_is_wild_match(const apr_array_header_t *domains, const char *name); /**************************************************************************************************/ /* file system related */ @@ -78,6 +194,8 @@ apr_status_t md_util_path_merge(const char **ppath, apr_pool_t *p, ...); apr_status_t md_util_is_dir(const char *path, apr_pool_t *pool); apr_status_t md_util_is_file(const char *path, apr_pool_t *pool); +apr_status_t md_util_is_unix_socket(const char *path, apr_pool_t *pool); +int md_file_exists(const char *fname, apr_pool_t *p); typedef apr_status_t md_util_file_cb(void *baton, struct apr_file_t *f, apr_pool_t *p); @@ -113,9 +231,8 @@ apr_status_t md_text_freplace(const char *fpath, apr_fileperms_t perms, /**************************************************************************************************/ /* base64 url encodings */ -const char *md_util_base64url_encode(const char *data, - apr_size_t len, apr_pool_t *pool); -apr_size_t md_util_base64url_decode(const char **decoded, const char *encoded, +const char *md_util_base64url_encode(const md_data_t *data, apr_pool_t *pool); +apr_size_t md_util_base64url_decode(md_data_t *decoded, const char *encoded, apr_pool_t *pool); /**************************************************************************************************/ @@ -128,6 +245,7 @@ apr_status_t md_util_abs_http_uri_check(apr_pool_t *p, const char *uri, const ch const char *md_link_find_relation(const struct apr_table_t *headers, apr_pool_t *pool, const char *relation); +const char *md_util_parse_ct(apr_pool_t *pool, const char *cth); /**************************************************************************************************/ /* retry logic */ @@ -137,12 +255,4 @@ apr_status_t md_util_try(md_util_try_fn *fn, void *baton, int ignore_errs, apr_interval_time_t timeout, apr_interval_time_t start_delay, apr_interval_time_t max_delay, int backoff); -/**************************************************************************************************/ -/* date/time related */ - -#define MD_SECS_PER_HOUR (60*60) -#define MD_SECS_PER_DAY (24*MD_SECS_PER_HOUR) - -const char *md_print_duration(apr_pool_t *p, apr_interval_time_t duration); - #endif /* md_util_h */ diff --git a/modules/md/md_version.h b/modules/md/md_version.h index 48e91a0..86a1821 100644 --- a/modules/md/md_version.h +++ b/modules/md/md_version.h @@ -27,7 +27,7 @@ * @macro * Version number of the md module as c string */ -#define MOD_MD_VERSION "1.1.17" +#define MOD_MD_VERSION "2.4.25" /** * @macro @@ -35,8 +35,9 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define MOD_MD_VERSION_NUM 0x010111 +#define MOD_MD_VERSION_NUM 0x020419 -#define MD_ACME_DEF_URL "https://acme-v01.api.letsencrypt.org/directory" +#define MD_ACME_DEF_URL "https://acme-v02.api.letsencrypt.org/directory" +#define MD_TAILSCALE_DEF_URL "file://localhost/var/run/tailscale/tailscaled.sock" #endif /* mod_md_md_version_h */ diff --git a/modules/md/mod_md.c b/modules/md/mod_md.c index 249a0f0..6d3f5b7 100644 --- a/modules/md/mod_md.c +++ b/modules/md/mod_md.c @@ -13,33 +13,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + #include #include #include -#include -#ifndef AP_ENABLE_EXCEPTION_HOOK -#define AP_ENABLE_EXCEPTION_HOOK 0 -#endif #include #include #include #include #include +#include #include #include #include +#include "mod_status.h" + #include "md.h" #include "md_curl.h" #include "md_crypt.h" +#include "md_event.h" #include "md_http.h" #include "md_json.h" #include "md_store.h" #include "md_store_fs.h" #include "md_log.h" +#include "md_ocsp.h" +#include "md_result.h" #include "md_reg.h" +#include "md_status.h" #include "md_util.h" #include "md_version.h" #include "md_acme.h" @@ -47,9 +50,10 @@ #include "mod_md.h" #include "mod_md_config.h" +#include "mod_md_drive.h" +#include "mod_md_ocsp.h" #include "mod_md_os.h" -#include "mod_ssl.h" -#include "mod_watchdog.h" +#include "mod_md_status.h" static void md_hooks(apr_pool_t *pool); @@ -66,14 +70,251 @@ AP_DECLARE_MODULE(md) = { #endif }; -static void md_merge_srv(md_t *md, md_srv_conf_t *base_sc, apr_pool_t *p) +/**************************************************************************************************/ +/* logging setup */ + +static server_rec *log_server; + +static int log_is_level(void *baton, apr_pool_t *p, md_log_level_t level) +{ + (void)baton; + (void)p; + if (log_server) { + return APLOG_IS_LEVEL(log_server, (int)level); + } + return level <= MD_LOG_INFO; +} + +#define LOG_BUF_LEN 16*1024 + +static void log_print(const char *file, int line, md_log_level_t level, + apr_status_t rv, void *baton, apr_pool_t *p, const char *fmt, va_list ap) +{ + if (log_is_level(baton, p, level)) { + char buffer[LOG_BUF_LEN]; + + memset(buffer, 0, sizeof(buffer)); + apr_vsnprintf(buffer, LOG_BUF_LEN-1, fmt, ap); + buffer[LOG_BUF_LEN-1] = '\0'; + + if (log_server) { + ap_log_error(file, line, APLOG_MODULE_INDEX, (int)level, rv, log_server, "%s", buffer); + } + else { + ap_log_perror(file, line, APLOG_MODULE_INDEX, (int)level, rv, p, "%s", buffer); + } + } +} + +/**************************************************************************************************/ +/* mod_ssl interface */ + +static void init_ssl(void) +{ + /* nop */ +} + +/**************************************************************************************************/ +/* lifecycle */ + +static apr_status_t cleanup_setups(void *dummy) +{ + (void)dummy; + log_server = NULL; + return APR_SUCCESS; +} + +static void init_setups(apr_pool_t *p, server_rec *base_server) +{ + log_server = base_server; + apr_pool_cleanup_register(p, NULL, cleanup_setups, apr_pool_cleanup_null); +} + +/**************************************************************************************************/ +/* notification handling */ + +typedef struct { + const char *reason; /* what the notification is about */ + apr_time_t min_interim; /* minimum time between notifying for this reason */ +} notify_rate; + +static notify_rate notify_rates[] = { + { "renewing", apr_time_from_sec(MD_SECS_PER_HOUR) }, /* once per hour */ + { "renewed", apr_time_from_sec(MD_SECS_PER_DAY) }, /* once per day */ + { "installed", apr_time_from_sec(MD_SECS_PER_DAY) }, /* once per day */ + { "expiring", apr_time_from_sec(MD_SECS_PER_DAY) }, /* once per day */ + { "errored", apr_time_from_sec(MD_SECS_PER_HOUR) }, /* once per hour */ + { "ocsp-renewed", apr_time_from_sec(MD_SECS_PER_DAY) }, /* once per day */ + { "ocsp-errored", apr_time_from_sec(MD_SECS_PER_HOUR) }, /* once per hour */ +}; + +static apr_status_t notify(md_job_t *job, const char *reason, + md_result_t *result, apr_pool_t *p, void *baton) +{ + md_mod_conf_t *mc = baton; + const char * const *argv; + const char *cmdline; + int exit_code; + apr_status_t rv = APR_SUCCESS; + apr_time_t min_interim = 0; + md_timeperiod_t since_last; + const char *log_msg_reason; + int i; + + log_msg_reason = apr_psprintf(p, "message-%s", reason); + for (i = 0; i < (int)(sizeof(notify_rates)/sizeof(notify_rates[0])); ++i) { + if (!strcmp(reason, notify_rates[i].reason)) { + min_interim = notify_rates[i].min_interim; + } + } + if (min_interim > 0) { + since_last.start = md_job_log_get_time_of_latest(job, log_msg_reason); + since_last.end = apr_time_now(); + if (since_last.start > 0 && md_timeperiod_length(&since_last) < min_interim) { + /* not enough time has passed since we sent the last notification + * for this reason. */ + md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, 0, p, APLOGNO(10267) + "%s: rate limiting notification about '%s'", job->mdomain, reason); + return APR_SUCCESS; + } + } + + if (!strcmp("renewed", reason)) { + if (mc->notify_cmd) { + cmdline = apr_psprintf(p, "%s %s", mc->notify_cmd, job->mdomain); + apr_tokenize_to_argv(cmdline, (char***)&argv, p); + rv = md_util_exec(p, argv[0], argv, &exit_code); + + if (APR_SUCCESS == rv && exit_code) rv = APR_EGENERAL; + if (APR_SUCCESS != rv) { + md_result_problem_printf(result, rv, MD_RESULT_LOG_ID(APLOGNO(10108)), + "MDNotifyCmd %s failed with exit code %d.", + mc->notify_cmd, exit_code); + md_result_log(result, MD_LOG_ERR); + md_job_log_append(job, "notify-error", result->problem, result->detail); + return rv; + } + } + md_log_perror(MD_LOG_MARK, MD_LOG_NOTICE, 0, p, APLOGNO(10059) + "The Managed Domain %s has been setup and changes " + "will be activated on next (graceful) server restart.", job->mdomain); + } + if (mc->message_cmd) { + cmdline = apr_psprintf(p, "%s %s %s", mc->message_cmd, reason, job->mdomain); + apr_tokenize_to_argv(cmdline, (char***)&argv, p); + rv = md_util_exec(p, argv[0], argv, &exit_code); + + if (APR_SUCCESS == rv && exit_code) rv = APR_EGENERAL; + if (APR_SUCCESS != rv) { + md_result_problem_printf(result, rv, MD_RESULT_LOG_ID(APLOGNO(10109)), + "MDMessageCmd %s failed with exit code %d.", + mc->message_cmd, exit_code); + md_result_log(result, MD_LOG_ERR); + md_job_log_append(job, "message-error", reason, result->detail); + return rv; + } + } + + md_job_log_append(job, log_msg_reason, NULL, NULL); + return APR_SUCCESS; +} + +static apr_status_t on_event(const char *event, const char *mdomain, void *baton, + md_job_t *job, md_result_t *result, apr_pool_t *p) +{ + (void)mdomain; + return notify(job, event, result, p, baton); +} + +/**************************************************************************************************/ +/* store setup */ + +static apr_status_t store_file_ev(void *baton, struct md_store_t *store, + md_store_fs_ev_t ev, unsigned int group, + const char *fname, apr_filetype_e ftype, + apr_pool_t *p) +{ + server_rec *s = baton; + apr_status_t rv; + + (void)store; + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, "store event=%d on %s %s (group %d)", + ev, (ftype == APR_DIR)? "dir" : "file", fname, group); + + /* Directories in group CHALLENGES, STAGING and OCSP are written to + * under a different user. Give her ownership. + */ + if (ftype == APR_DIR) { + switch (group) { + case MD_SG_CHALLENGES: + case MD_SG_STAGING: + case MD_SG_OCSP: + rv = md_make_worker_accessible(fname, p); + if (APR_ENOTIMPL != rv) { + return rv; + } + break; + default: + break; + } + } + return APR_SUCCESS; +} + +static apr_status_t check_group_dir(md_store_t *store, md_store_group_t group, + apr_pool_t *p, server_rec *s) { + const char *dir; + apr_status_t rv; + + if (APR_SUCCESS == (rv = md_store_get_fname(&dir, store, group, NULL, NULL, p)) + && APR_SUCCESS == (rv = apr_dir_make_recursive(dir, MD_FPROT_D_UALL_GREAD, p))) { + rv = store_file_ev(s, store, MD_S_FS_EV_CREATED, group, dir, APR_DIR, p); + } + return rv; +} + +static apr_status_t setup_store(md_store_t **pstore, md_mod_conf_t *mc, + apr_pool_t *p, server_rec *s) +{ + const char *base_dir; + apr_status_t rv; + + base_dir = ap_server_root_relative(p, mc->base_dir); + + if (APR_SUCCESS != (rv = md_store_fs_init(pstore, p, base_dir))) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10046)"setup store for %s", base_dir); + goto leave; + } + + md_store_fs_set_event_cb(*pstore, store_file_ev, s); + if (APR_SUCCESS != (rv = check_group_dir(*pstore, MD_SG_CHALLENGES, p, s)) + || APR_SUCCESS != (rv = check_group_dir(*pstore, MD_SG_STAGING, p, s)) + || APR_SUCCESS != (rv = check_group_dir(*pstore, MD_SG_ACCOUNTS, p, s)) + || APR_SUCCESS != (rv = check_group_dir(*pstore, MD_SG_OCSP, p, s)) + ) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10047) + "setup challenges directory"); + goto leave; + } + +leave: + return rv; +} + +/**************************************************************************************************/ +/* post config handling */ + +static void merge_srv_config(md_t *md, md_srv_conf_t *base_sc, apr_pool_t *p) +{ + const char *contact; + if (!md->sc) { md->sc = base_sc; } - if (!md->ca_url) { - md->ca_url = md_config_gets(md->sc, MD_CONFIG_CA_URL); + if (!md->ca_urls && md->sc->ca_urls) { + md->ca_urls = apr_array_copy(p, md->sc->ca_urls); } if (!md->ca_proto) { md->ca_proto = md_config_gets(md->sc, MD_CONFIG_CA_PROTO); @@ -81,90 +322,92 @@ static void md_merge_srv(md_t *md, md_srv_conf_t *base_sc, apr_pool_t *p) if (!md->ca_agreement) { md->ca_agreement = md_config_gets(md->sc, MD_CONFIG_CA_AGREEMENT); } - if (md->sc->s->server_admin && strcmp(DEFAULT_ADMIN, md->sc->s->server_admin)) { + contact = md_config_gets(md->sc, MD_CONFIG_CA_CONTACT); + if (md->contacts && md->contacts->nelts > 0) { + /* set explicitly */ + } + else if (contact && contact[0]) { apr_array_clear(md->contacts); - APR_ARRAY_PUSH(md->contacts, const char *) = - md_util_schemify(p, md->sc->s->server_admin, "mailto"); + APR_ARRAY_PUSH(md->contacts, const char *) = + md_util_schemify(p, contact, "mailto"); } - if (md->drive_mode == MD_DRIVE_DEFAULT) { - md->drive_mode = md_config_geti(md->sc, MD_CONFIG_DRIVE_MODE); + else if( md->sc->s->server_admin && strcmp(DEFAULT_ADMIN, md->sc->s->server_admin)) { + apr_array_clear(md->contacts); + APR_ARRAY_PUSH(md->contacts, const char *) = + md_util_schemify(p, md->sc->s->server_admin, "mailto"); } - if (md->renew_norm <= 0 && md->renew_window <= 0) { - md->renew_norm = md_config_get_interval(md->sc, MD_CONFIG_RENEW_NORM); - md->renew_window = md_config_get_interval(md->sc, MD_CONFIG_RENEW_WINDOW); + if (md->renew_mode == MD_RENEW_DEFAULT) { + md->renew_mode = md_config_geti(md->sc, MD_CONFIG_DRIVE_MODE); } + if (!md->renew_window) md_config_get_timespan(&md->renew_window, md->sc, MD_CONFIG_RENEW_WINDOW); + if (!md->warn_window) md_config_get_timespan(&md->warn_window, md->sc, MD_CONFIG_WARN_WINDOW); if (md->transitive < 0) { md->transitive = md_config_geti(md->sc, MD_CONFIG_TRANSITIVE); } if (!md->ca_challenges && md->sc->ca_challenges) { md->ca_challenges = apr_array_copy(p, md->sc->ca_challenges); - } - if (!md->pkey_spec) { - md->pkey_spec = md->sc->pkey_spec; - + } + if (md_pkeys_spec_is_empty(md->pks)) { + md->pks = md->sc->pks; } if (md->require_https < 0) { md->require_https = md_config_geti(md->sc, MD_CONFIG_REQUIRE_HTTPS); } + if (!md->ca_eab_kid) { + md->ca_eab_kid = md->sc->ca_eab_kid; + md->ca_eab_hmac = md->sc->ca_eab_hmac; + } if (md->must_staple < 0) { md->must_staple = md_config_geti(md->sc, MD_CONFIG_MUST_STAPLE); } + if (md->stapling < 0) { + md->stapling = md_config_geti(md->sc, MD_CONFIG_STAPLING); + } } -static apr_status_t check_coverage(md_t *md, const char *domain, server_rec *s, apr_pool_t *p) +static apr_status_t check_coverage(md_t *md, const char *domain, server_rec *s, + int *pupdates, apr_pool_t *p) { if (md_contains(md, domain, 0)) { return APR_SUCCESS; } else if (md->transitive) { APR_ARRAY_PUSH(md->domains, const char*) = apr_pstrdup(p, domain); + *pupdates |= MD_UPD_DOMAINS; return APR_SUCCESS; } else { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(10040) + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10040) "Virtual Host %s:%d matches Managed Domain '%s', but the " "name/alias %s itself is not managed. A requested MD certificate " "will not match ServerName.", s->server_hostname, s->port, md->name, domain); - return APR_EINVAL; + return APR_SUCCESS; } } -static apr_status_t md_covers_server(md_t *md, server_rec *s, apr_pool_t *p) +static apr_status_t md_cover_server(md_t *md, server_rec *s, int *pupdates, apr_pool_t *p) { apr_status_t rv; const char *name; int i; - - if (APR_SUCCESS == (rv = check_coverage(md, s->server_hostname, s, p)) && s->names) { - for (i = 0; i < s->names->nelts; ++i) { + + if (APR_SUCCESS == (rv = check_coverage(md, s->server_hostname, s, pupdates, p))) { + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, + "md[%s]: auto add, covers name %s", md->name, s->server_hostname); + for (i = 0; s->names && i < s->names->nelts; ++i) { name = APR_ARRAY_IDX(s->names, i, const char*); - if (APR_SUCCESS != (rv = check_coverage(md, name, s, p))) { + if (APR_SUCCESS != (rv = check_coverage(md, name, s, pupdates, p))) { break; } + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, + "md[%s]: auto add, covers alias %s", md->name, name); } } return rv; } -static int matches_port_somewhere(server_rec *s, int port) -{ - server_addr_rec *sa; - - for (sa = s->addrs; sa; sa = sa->next) { - if (sa->host_port == port) { - /* host_addr might be general (0.0.0.0) or specific, we count this as match */ - return 1; - } - if (sa->host_port == 0) { - /* wildcard port, answers to all ports. Rare, but may work. */ - return 1; - } - } - return 0; -} - -static int uses_port_only(server_rec *s, int port) +static int uses_port(server_rec *s, int port) { server_addr_rec *sa; int match = 0; @@ -181,839 +424,444 @@ static int uses_port_only(server_rec *s, int port) return match; } -static apr_status_t assign_to_servers(md_t *md, server_rec *base_server, - apr_pool_t *p, apr_pool_t *ptemp) +static apr_status_t detect_supported_protocols(md_mod_conf_t *mc, server_rec *s, + apr_pool_t *p, int log_level) { - server_rec *s, *s_https; - request_rec r; - md_srv_conf_t *sc; - md_mod_conf_t *mc; - apr_status_t rv = APR_SUCCESS; - int i; - const char *domain; - apr_array_header_t *servers; - - sc = md_config_get(base_server); - mc = sc->mc; - - /* Assign the MD to all server_rec configs that it matches. If there already - * is an assigned MD not equal this one, the configuration is in error. - */ - memset(&r, 0, sizeof(r)); - servers = apr_array_make(ptemp, 5, sizeof(server_rec*)); - - for (s = base_server; s; s = s->next) { - if (!mc->manage_base_server && s == base_server) { - /* we shall not assign ourselves to the base server */ - continue; - } - - r.server = s; - for (i = 0; i < md->domains->nelts; ++i) { - domain = APR_ARRAY_IDX(md->domains, i, const char*); - - if (ap_matches_request_vhost(&r, domain, s->port)) { - /* Create a unique md_srv_conf_t record for this server, if there is none yet */ - sc = md_config_get_unique(s, p); - - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10041) - "Server %s:%d matches md %s (config %s)", - s->server_hostname, s->port, md->name, sc->name); - - if (sc->assigned == md) { - /* already matched via another domain name */ - goto next_server; - } - else if (sc->assigned) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10042) - "conflict: MD %s matches server %s, but MD %s also matches.", - md->name, s->server_hostname, sc->assigned->name); - return APR_EINVAL; - } - - /* If this server_rec is only for http: requests. Defined - * alias names to not matter for this MD. - * (see gh issue https://github.com/icing/mod_md/issues/57) - * Otherwise, if server has name or an alias not covered, - * it is by default auto-added (config transitive). - * If mode is "manual", a generated certificate will not match - * all necessary names. */ - if ((!mc->local_80 || !uses_port_only(s, mc->local_80)) - && APR_SUCCESS != (rv = md_covers_server(md, s, p))) { - return rv; - } - - sc->assigned = md; - APR_ARRAY_PUSH(servers, server_rec*) = s; - - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10043) - "Managed Domain %s applies to vhost %s:%d", md->name, - s->server_hostname, s->port); - - goto next_server; - } - } - next_server: - continue; - } - - if (APR_SUCCESS == rv) { - if (apr_is_empty_array(servers)) { - if (md->drive_mode != MD_DRIVE_ALWAYS) { - /* Not an error, but looks suspicious */ - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(10045) - "No VirtualHost matches Managed Domain %s", md->name); - APR_ARRAY_PUSH(mc->unused_names, const char*) = md->name; - } - } - else { - const char *uri; - - /* Found matching server_rec's. Collect all 'ServerAdmin's into MD's contact list */ - apr_array_clear(md->contacts); - for (i = 0; i < servers->nelts; ++i) { - s = APR_ARRAY_IDX(servers, i, server_rec*); - if (s->server_admin && strcmp(DEFAULT_ADMIN, s->server_admin)) { - uri = md_util_schemify(p, s->server_admin, "mailto"); - if (md_array_str_index(md->contacts, uri, 0, 0) < 0) { - APR_ARRAY_PUSH(md->contacts, const char *) = uri; - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10044) - "%s: added contact %s", md->name, uri); - } - } - } - - if (md->require_https > MD_REQUIRE_OFF) { - /* We require https for this MD, but do we have port 443 (or a mapped one) - * available? */ - if (mc->local_443 <= 0) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10105) - "MDPortMap says there is no port for https (443), " - "but MD %s is configured to require https. This " - "only works when a 443 port is available.", md->name); - return APR_EINVAL; - - } - - /* Ok, we know which local port represents 443, do we have a server_rec - * for MD that has addresses with port 443? */ - s_https = NULL; - for (i = 0; i < servers->nelts; ++i) { - s = APR_ARRAY_IDX(servers, i, server_rec*); - if (matches_port_somewhere(s, mc->local_443)) { - s_https = s; - break; - } - } - - if (!s_https) { - /* Did not find any server_rec that matches this MD *and* has an - * s->addrs match for the https port. Suspicious. */ - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(10106) - "MD %s is configured to require https, but there seems to be " - "no VirtualHost for it that has port %d in its address list. " - "This looks as if it will not work.", - md->name, mc->local_443); - } - } - } - - } - return rv; -} - -static apr_status_t md_calc_md_list(apr_pool_t *p, apr_pool_t *plog, - apr_pool_t *ptemp, server_rec *base_server) -{ - md_srv_conf_t *sc; - md_mod_conf_t *mc; - md_t *md, *omd; - const char *domain; - apr_status_t rv = APR_SUCCESS; ap_listen_rec *lr; apr_sockaddr_t *sa; - int i, j; + int can_http, can_https; - (void)plog; - sc = md_config_get(base_server); - mc = sc->mc; - - mc->can_http = 0; - mc->can_https = 0; + if (mc->can_http >= 0 && mc->can_https >= 0) goto set_and_leave; + can_http = can_https = 0; for (lr = ap_listeners; lr; lr = lr->next) { for (sa = lr->bind_addr; sa; sa = sa->next) { - if (sa->port == mc->local_80 + if (sa->port == mc->local_80 && (!lr->protocol || !strncmp("http", lr->protocol, 4))) { - mc->can_http = 1; + can_http = 1; } else if (sa->port == mc->local_443 && (!lr->protocol || !strncmp("http", lr->protocol, 4))) { - mc->can_https = 1; + can_https = 1; } } } - - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10037) - "server seems%s reachable via http: (port 80->%d) " - "and%s reachable via https: (port 443->%d) ", - mc->can_http? "" : " not", mc->local_80, - mc->can_https? "" : " not", mc->local_443); - - /* Complete the properties of the MDs, now that we have the complete, merged - * server configurations. - */ - for (i = 0; i < mc->mds->nelts; ++i) { - md = APR_ARRAY_IDX(mc->mds, i, md_t*); - md_merge_srv(md, sc, p); - - /* Check that we have no overlap with the MDs already completed */ - for (j = 0; j < i; ++j) { - omd = APR_ARRAY_IDX(mc->mds, j, md_t*); - if ((domain = md_common_name(md, omd)) != NULL) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10038) - "two Managed Domains have an overlap in domain '%s'" - ", first definition in %s(line %d), second in %s(line %d)", - domain, md->defn_name, md->defn_line_number, - omd->defn_name, omd->defn_line_number); - return APR_EINVAL; - } - } + if (mc->can_http < 0) mc->can_http = can_http; + if (mc->can_https < 0) mc->can_https = can_https; + ap_log_error(APLOG_MARK, log_level, 0, s, APLOGNO(10037) + "server seems%s reachable via http: and%s reachable via https:", + mc->can_http? "" : " not", mc->can_https? "" : " not"); +set_and_leave: + return md_reg_set_props(mc->reg, p, mc->can_http, mc->can_https); +} - /* Assign MD to the server_rec configs that it matches. Perform some - * last finishing touches on the MD. */ - if (APR_SUCCESS != (rv = assign_to_servers(md, base_server, p, ptemp))) { - return rv; - } +static server_rec *get_public_https_server(md_t *md, const char *domain, server_rec *base_server) +{ + md_srv_conf_t *sc; + md_mod_conf_t *mc; + server_rec *s; + server_rec *res = NULL; + request_rec r; + int i; + int check_port = 1; + + sc = md_config_get(base_server); + mc = sc->mc; + memset(&r, 0, sizeof(r)); - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10039) - "Completed MD[%s, CA=%s, Proto=%s, Agreement=%s, Drive=%d, renew=%ld]", - md->name, md->ca_url, md->ca_proto, md->ca_agreement, - md->drive_mode, (long)md->renew_window); + if (md->ca_challenges && md->ca_challenges->nelts > 0) { + /* skip the port check if "tls-alpn-01" is pre-configured */ + check_port = !(md_array_str_index(md->ca_challenges, MD_AUTHZ_TYPE_TLSALPN01, 0, 0) >= 0); } - - return rv; -} -/**************************************************************************************************/ -/* store & registry setup */ + if (check_port && !mc->can_https) return NULL; -static apr_status_t store_file_ev(void *baton, struct md_store_t *store, - md_store_fs_ev_t ev, int group, - const char *fname, apr_filetype_e ftype, - apr_pool_t *p) -{ - server_rec *s = baton; - apr_status_t rv; - - (void)store; - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, "store event=%d on %s %s (group %d)", - ev, (ftype == APR_DIR)? "dir" : "file", fname, group); - - /* Directories in group CHALLENGES and STAGING are written to by our watchdog, - * running on certain mpms in a child process under a different user. Give them - * ownership. - */ - if (ftype == APR_DIR) { - switch (group) { - case MD_SG_CHALLENGES: - case MD_SG_STAGING: - rv = md_make_worker_accessible(fname, p); - if (APR_ENOTIMPL != rv) { - return rv; + /* find an ssl server matching domain from MD */ + for (s = base_server; s; s = s->next) { + sc = md_config_get(s); + if (!sc || !sc->is_ssl || !sc->assigned) continue; + if (base_server == s && !mc->manage_base_server) continue; + if (base_server != s && check_port && mc->local_443 > 0 && !uses_port(s, mc->local_443)) continue; + for (i = 0; i < sc->assigned->nelts; ++i) { + if (md == APR_ARRAY_IDX(sc->assigned, i, md_t*)) { + r.server = s; + if (ap_matches_request_vhost(&r, domain, s->port)) { + if (check_port) { + return s; + } + else { + /* there may be multiple matching servers because we ignore the port. + if possible, choose a server that supports the acme-tls/1 protocol */ + if (ap_is_allowed_protocol(NULL, NULL, s, PROTO_ACME_TLS_1)) { + return s; + } + res = s; + } } - break; - default: - break; + } } } - return APR_SUCCESS; + return res; } -static apr_status_t check_group_dir(md_store_t *store, md_store_group_t group, - apr_pool_t *p, server_rec *s) +static apr_status_t auto_add_domains(md_t *md, server_rec *base_server, apr_pool_t *p) { - const char *dir; - apr_status_t rv; - - if (APR_SUCCESS == (rv = md_store_get_fname(&dir, store, group, NULL, NULL, p)) - && APR_SUCCESS == (rv = apr_dir_make_recursive(dir, MD_FPROT_D_UALL_GREAD, p))) { - rv = store_file_ev(s, store, MD_S_FS_EV_CREATED, group, dir, APR_DIR, p); - } - return rv; -} + md_srv_conf_t *sc; + server_rec *s; + apr_status_t rv = APR_SUCCESS; + int updates; -static apr_status_t setup_store(md_store_t **pstore, md_mod_conf_t *mc, - apr_pool_t *p, server_rec *s) -{ - const char *base_dir; - apr_status_t rv; - MD_CHK_VARS; - - base_dir = ap_server_root_relative(p, mc->base_dir); - - if (!MD_OK(md_store_fs_init(pstore, p, base_dir))) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10046)"setup store for %s", base_dir); - goto out; + /* Ad all domain names used in SSL VirtualHosts, if not already there */ + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, base_server, + "md[%s]: auto add domains", md->name); + updates = 0; + for (s = base_server; s; s = s->next) { + sc = md_config_get(s); + if (!sc || !sc->is_ssl || !sc->assigned || sc->assigned->nelts != 1) continue; + if (md != APR_ARRAY_IDX(sc->assigned, 0, md_t*)) continue; + if (APR_SUCCESS != (rv = md_cover_server(md, s, &updates, p))) { + return rv; + } } - - md_store_fs_set_event_cb(*pstore, store_file_ev, s); - if ( !MD_OK(check_group_dir(*pstore, MD_SG_CHALLENGES, p, s)) - || !MD_OK(check_group_dir(*pstore, MD_SG_STAGING, p, s)) - || !MD_OK(check_group_dir(*pstore, MD_SG_ACCOUNTS, p, s))) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10047) - "setup challenges directory, call %s", MD_LAST_CHK); - } - -out: return rv; } -static apr_status_t setup_reg(md_reg_t **preg, apr_pool_t *p, server_rec *s, - int can_http, int can_https) +static void init_acme_tls_1_domains(md_t *md, server_rec *base_server) { md_srv_conf_t *sc; md_mod_conf_t *mc; - md_store_t *store; - apr_status_t rv; - MD_CHK_VARS; - - sc = md_config_get(s); - mc = sc->mc; - - if ( MD_OK(setup_store(&store, mc, p, s)) - && MD_OK(md_reg_init(preg, p, store, mc->proxy_url))) { - mc->reg = *preg; - return md_reg_set_props(*preg, p, can_http, can_https); - } - return rv; -} - -/**************************************************************************************************/ -/* logging setup */ - -static server_rec *log_server; - -static int log_is_level(void *baton, apr_pool_t *p, md_log_level_t level) -{ - (void)baton; - (void)p; - if (log_server) { - return APLOG_IS_LEVEL(log_server, (int)level); - } - return level <= MD_LOG_INFO; -} - -#define LOG_BUF_LEN 16*1024 - -static void log_print(const char *file, int line, md_log_level_t level, - apr_status_t rv, void *baton, apr_pool_t *p, const char *fmt, va_list ap) -{ - if (log_is_level(baton, p, level)) { - char buffer[LOG_BUF_LEN]; - - memset(buffer, 0, sizeof(buffer)); - apr_vsnprintf(buffer, LOG_BUF_LEN-1, fmt, ap); - buffer[LOG_BUF_LEN-1] = '\0'; + server_rec *s; + int i; + const char *domain; - if (log_server) { - ap_log_error(file, line, APLOG_MODULE_INDEX, level, rv, log_server, "%s",buffer); + /* Collect those domains that support the "acme-tls/1" protocol. This + * is part of the MD (and not tested dynamically), since challenge selection + * may be done outside the server, e.g. in the a2md command. */ + sc = md_config_get(base_server); + mc = sc->mc; + apr_array_clear(md->acme_tls_1_domains); + for (i = 0; i < md->domains->nelts; ++i) { + domain = APR_ARRAY_IDX(md->domains, i, const char*); + s = get_public_https_server(md, domain, base_server); + /* If we did not find a specific virtualhost for md and manage + * the base_server, that one is inspected */ + if (NULL == s && mc->manage_base_server) s = base_server; + if (NULL == s) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10168) + "%s: no https server_rec found for %s", md->name, domain); + continue; } - else { - ap_log_perror(file, line, APLOG_MODULE_INDEX, level, rv, p, "%s", buffer); + if (!ap_is_allowed_protocol(NULL, NULL, s, PROTO_ACME_TLS_1)) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10169) + "%s: https server_rec for %s does not have protocol %s enabled", + md->name, domain, PROTO_ACME_TLS_1); + continue; } + APR_ARRAY_PUSH(md->acme_tls_1_domains, const char*) = domain; } } -/**************************************************************************************************/ -/* lifecycle */ - -static apr_status_t cleanup_setups(void *dummy) -{ - (void)dummy; - log_server = NULL; - return APR_SUCCESS; -} - -static void init_setups(apr_pool_t *p, server_rec *base_server) -{ - log_server = base_server; - apr_pool_cleanup_register(p, NULL, cleanup_setups, apr_pool_cleanup_null); -} - -/**************************************************************************************************/ -/* mod_ssl interface */ - -static APR_OPTIONAL_FN_TYPE(ssl_is_https) *opt_ssl_is_https; - -static void init_ssl(void) +static apr_status_t link_md_to_servers(md_mod_conf_t *mc, md_t *md, server_rec *base_server, + apr_pool_t *p) { - opt_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); -} - -/**************************************************************************************************/ -/* watchdog based impl. */ - -#define MD_WATCHDOG_NAME "_md_" - -static APR_OPTIONAL_FN_TYPE(ap_watchdog_get_instance) *wd_get_instance; -static APR_OPTIONAL_FN_TYPE(ap_watchdog_register_callback) *wd_register_callback; -static APR_OPTIONAL_FN_TYPE(ap_watchdog_set_callback_interval) *wd_set_interval; + server_rec *s; + request_rec r; + md_srv_conf_t *sc; + int i; + const char *domain, *uri; -typedef struct { - md_t *md; + sc = md_config_get(base_server); - int stalled; - int renewed; - int renewal_notified; - apr_time_t restart_at; - int need_restart; - int restart_processed; + /* Assign the MD to all server_rec configs that it matches. If there already + * is an assigned MD not equal this one, the configuration is in error. + */ + memset(&r, 0, sizeof(r)); + for (s = base_server; s; s = s->next) { + if (!mc->manage_base_server && s == base_server) { + /* we shall not assign ourselves to the base server */ + continue; + } - apr_status_t last_rv; - apr_time_t next_check; - int error_runs; -} md_job_t; + r.server = s; + for (i = 0; i < md->domains->nelts; ++i) { + domain = APR_ARRAY_IDX(md->domains, i, const char*); -typedef struct { - apr_pool_t *p; - server_rec *s; - md_mod_conf_t *mc; - ap_watchdog_t *watchdog; - - apr_time_t next_change; - - apr_array_header_t *jobs; - md_reg_t *reg; -} md_watchdog; + if ((mc->match_mode == MD_MATCH_ALL && + ap_matches_request_vhost(&r, domain, s->port)) + || (((mc->match_mode == MD_MATCH_SERVERNAMES) || md_dns_is_wildcard(p, domain)) && + md_dns_matches(domain, s->server_hostname))) { + /* Create a unique md_srv_conf_t record for this server, if there is none yet */ + sc = md_config_get_unique(s, p); + if (!sc->assigned) sc->assigned = apr_array_make(p, 2, sizeof(md_t*)); + if (sc->assigned->nelts == 1 && mc->match_mode == MD_MATCH_SERVERNAMES) { + /* there is already an MD assigned for this server. But in + * this match mode, wildcard matches are pre-empted by non-wildcards */ + int existing_wild = md_is_wild_match( + APR_ARRAY_IDX(sc->assigned, 0, const md_t*)->domains, + s->server_hostname); + if (!existing_wild && md_dns_is_wildcard(p, domain)) + continue; /* do not add */ + if (existing_wild && !md_dns_is_wildcard(p, domain)) + sc->assigned->nelts = 0; /* overwrite existing */ + } + APR_ARRAY_PUSH(sc->assigned, md_t*) = md; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10041) + "Server %s:%d matches md %s (config %s, match-mode=%d) " + "for domain %s, has now %d MDs", + s->server_hostname, s->port, md->name, sc->name, + mc->match_mode, domain, (int)sc->assigned->nelts); -static void assess_renewal(md_watchdog *wd, md_job_t *job, apr_pool_t *ptemp) -{ - apr_time_t now = apr_time_now(); - if (now >= job->restart_at) { - job->need_restart = 1; - ap_log_error( APLOG_MARK, APLOG_TRACE1, 0, wd->s, - "md(%s): has been renewed, needs restart now", job->md->name); - } - else { - job->next_check = job->restart_at; - - if (job->renewal_notified) { - ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, wd->s, - "%s: renewed cert valid in %s", - job->md->name, md_print_duration(ptemp, job->restart_at - now)); - } - else { - char ts[APR_RFC822_DATE_LEN]; - - apr_rfc822_date(ts, job->restart_at); - ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, wd->s, APLOGNO(10051) - "%s: has been renewed successfully and should be activated at %s" - " (this requires a server restart latest in %s)", - job->md->name, ts, md_print_duration(ptemp, job->restart_at - now)); - job->renewal_notified = 1; + if (md->contacts && md->contacts->nelts > 0) { + /* set explicitly */ + } + else if (sc->ca_contact && sc->ca_contact[0]) { + uri = md_util_schemify(p, sc->ca_contact, "mailto"); + if (md_array_str_index(md->contacts, uri, 0, 0) < 0) { + APR_ARRAY_PUSH(md->contacts, const char *) = uri; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10044) + "%s: added contact %s", md->name, uri); + } + } + else if (s->server_admin && strcmp(DEFAULT_ADMIN, s->server_admin)) { + uri = md_util_schemify(p, s->server_admin, "mailto"); + if (md_array_str_index(md->contacts, uri, 0, 0) < 0) { + APR_ARRAY_PUSH(md->contacts, const char *) = uri; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(10237) + "%s: added contact %s", md->name, uri); + } + } + break; + } } } + return APR_SUCCESS; } -static apr_status_t load_job_props(md_reg_t *reg, md_job_t *job, apr_pool_t *p) -{ - md_store_t *store = md_reg_store_get(reg); - md_json_t *jprops; - apr_status_t rv; - - rv = md_store_load_json(store, MD_SG_STAGING, job->md->name, - MD_FN_JOB, &jprops, p); - if (APR_SUCCESS == rv) { - job->restart_processed = md_json_getb(jprops, MD_KEY_PROCESSED, NULL); - job->error_runs = (int)md_json_getl(jprops, MD_KEY_ERRORS, NULL); - } - return rv; -} - -static apr_status_t save_job_props(md_reg_t *reg, md_job_t *job, apr_pool_t *p) +static apr_status_t link_mds_to_servers(md_mod_conf_t *mc, server_rec *s, apr_pool_t *p) { - md_store_t *store = md_reg_store_get(reg); - md_json_t *jprops; - apr_status_t rv; - - rv = md_store_load_json(store, MD_SG_STAGING, job->md->name, MD_FN_JOB, &jprops, p); - if (APR_STATUS_IS_ENOENT(rv)) { - jprops = md_json_create(p); - rv = APR_SUCCESS; - } - if (APR_SUCCESS == rv) { - md_json_setb(job->restart_processed, jprops, MD_KEY_PROCESSED, NULL); - md_json_setl(job->error_runs, jprops, MD_KEY_ERRORS, NULL); - rv = md_store_save_json(store, p, MD_SG_STAGING, job->md->name, - MD_FN_JOB, jprops, 0); + int i; + md_t *md; + apr_status_t rv = APR_SUCCESS; + + apr_array_clear(mc->unused_names); + for (i = 0; i < mc->mds->nelts; ++i) { + md = APR_ARRAY_IDX(mc->mds, i, md_t*); + if (APR_SUCCESS != (rv = link_md_to_servers(mc, md, s, p))) { + goto leave; + } } +leave: return rv; } -static apr_status_t check_job(md_watchdog *wd, md_job_t *job, apr_pool_t *ptemp) +static apr_status_t merge_mds_with_conf(md_mod_conf_t *mc, apr_pool_t *p, + server_rec *base_server, int log_level) { + md_srv_conf_t *base_conf; + md_t *md, *omd; + const char *domain; + md_timeslice_t *ts; apr_status_t rv = APR_SUCCESS; - apr_time_t valid_from, delay; - int errored, renew, error_runs; - char ts[APR_RFC822_DATE_LEN]; - - if (apr_time_now() < job->next_check) { - /* Job needs to wait */ - return APR_EAGAIN; - } - - job->next_check = 0; - error_runs = job->error_runs; - - if (job->md->state == MD_S_MISSING) { - job->stalled = 1; - } - - if (job->stalled) { - /* Missing information, this will not change until configuration - * is changed and server restarted */ - rv = APR_INCOMPLETE; - ++job->error_runs; - goto out; - } - else if (job->renewed) { - assess_renewal(wd, job, ptemp); - } - else if (APR_SUCCESS == (rv = md_reg_assess(wd->reg, job->md, &errored, &renew, wd->p))) { - if (errored) { - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10050) - "md(%s): in error state", job->md->name); + int i, j; + + /* The global module configuration 'mc' keeps a list of all configured MDomains + * in the server. This list is collected during configuration processing and, + * in the post config phase, get updated from all merged server configurations + * before the server starts processing. + */ + base_conf = md_config_get(base_server); + md_config_get_timespan(&ts, base_conf, MD_CONFIG_RENEW_WINDOW); + if (ts) md_reg_set_renew_window_default(mc->reg, ts); + md_config_get_timespan(&ts, base_conf, MD_CONFIG_WARN_WINDOW); + if (ts) md_reg_set_warn_window_default(mc->reg, ts); + + /* Complete the properties of the MDs, now that we have the complete, merged + * server configurations. + */ + for (i = 0; i < mc->mds->nelts; ++i) { + md = APR_ARRAY_IDX(mc->mds, i, md_t*); + merge_srv_config(md, base_conf, p); + + if (mc->match_mode == MD_MATCH_ALL) { + /* Check that we have no overlap with the MDs already completed */ + for (j = 0; j < i; ++j) { + omd = APR_ARRAY_IDX(mc->mds, j, md_t*); + if ((domain = md_common_name(md, omd)) != NULL) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10038) + "two Managed Domains have an overlap in domain '%s'" + ", first definition in %s(line %d), second in %s(line %d)", + domain, md->defn_name, md->defn_line_number, + omd->defn_name, omd->defn_line_number); + return APR_EINVAL; + } + } } - else if (renew) { - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10052) - "md(%s): state=%d, driving", job->md->name, job->md->state); - - rv = md_reg_stage(wd->reg, job->md, NULL, 0, &valid_from, ptemp); - - if (APR_SUCCESS == rv) { - job->renewed = 1; - job->restart_at = valid_from; - assess_renewal(wd, job, ptemp); + + if (md->cert_files && md->cert_files->nelts) { + if (!md->pkey_files || (md->cert_files->nelts != md->pkey_files->nelts)) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10170) + "The Managed Domain '%s' " + "needs one MDCertificateKeyFile for each MDCertificateFile.", + md->name); + return APR_EINVAL; } } - else { - /* Renew is not necessary yet, leave job->next_check as 0 since - * that keeps the default schedule of running twice a day. */ - apr_rfc822_date(ts, job->md->expires); - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10053) - "md(%s): no need to renew yet, cert expires %s", job->md->name, ts); + else if (md->pkey_files && md->pkey_files->nelts + && (!md->cert_files || !md->cert_files->nelts)) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10171) + "The Managed Domain '%s' " + "has MDCertificateKeyFile(s) but no MDCertificateFile.", + md->name); + return APR_EINVAL; } - } - - if (APR_SUCCESS == rv) { - job->error_runs = 0; - } - else { - ap_log_error( APLOG_MARK, APLOG_ERR, rv, wd->s, APLOGNO(10056) - "processing %s", job->md->name); - ++job->error_runs; - /* back off duration, depending on the errors we encounter in a row */ - delay = apr_time_from_sec(5 << (job->error_runs - 1)); - if (delay > apr_time_from_sec(60*60)) { - delay = apr_time_from_sec(60*60); + + if (APLOG_IS_LEVEL(base_server, log_level)) { + ap_log_error(APLOG_MARK, log_level, 0, base_server, APLOGNO(10039) + "Completed MD[%s, CA=%s, Proto=%s, Agreement=%s, renew-mode=%d " + "renew_window=%s, warn_window=%s", + md->name, md->ca_effective, md->ca_proto, md->ca_agreement, md->renew_mode, + md->renew_window? md_timeslice_format(md->renew_window, p) : "unset", + md->warn_window? md_timeslice_format(md->warn_window, p) : "unset"); } - job->next_check = apr_time_now() + delay; - ap_log_error(APLOG_MARK, APLOG_INFO, 0, wd->s, APLOGNO(10057) - "%s: encountered error for the %d. time, next run in %s", - job->md->name, job->error_runs, md_print_duration(ptemp, delay)); - } - -out: - if (error_runs != job->error_runs) { - apr_status_t rv2 = save_job_props(wd->reg, job, ptemp); - ap_log_error(APLOG_MARK, APLOG_TRACE1, rv2, wd->s, "%s: saving job props", job->md->name); } - - job->last_rv = rv; return rv; } -static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp) +static apr_status_t check_invalid_duplicates(server_rec *base_server) { - md_watchdog *wd = baton; - apr_status_t rv = APR_SUCCESS; - md_job_t *job; - apr_time_t next_run, now; - int restart = 0; - int i; - - switch (state) { - case AP_WATCHDOG_STATE_STARTING: - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10054) - "md watchdog start, auto drive %d mds", wd->jobs->nelts); - assert(wd->reg); - - for (i = 0; i < wd->jobs->nelts; ++i) { - job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *); - load_job_props(wd->reg, job, ptemp); - } - break; - case AP_WATCHDOG_STATE_RUNNING: - - wd->next_change = 0; - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10055) - "md watchdog run, auto drive %d mds", wd->jobs->nelts); - - /* normally, we'd like to run at least twice a day */ - next_run = apr_time_now() + apr_time_from_sec(MD_SECS_PER_DAY / 2); - - /* Check on all the jobs we have */ - for (i = 0; i < wd->jobs->nelts; ++i) { - job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *); - - rv = check_job(wd, job, ptemp); - - if (job->need_restart && !job->restart_processed) { - restart = 1; - } - if (job->next_check && job->next_check < next_run) { - next_run = job->next_check; - } - } - - now = apr_time_now(); - if (APLOGdebug(wd->s)) { - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10107) - "next run in %s", md_print_duration(ptemp, next_run - now)); - } - wd_set_interval(wd->watchdog, next_run - now, wd, run_watchdog); - break; - - case AP_WATCHDOG_STATE_STOPPING: - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10058) - "md watchdog stopping"); - break; - } - - if (restart) { - const char *action, *names = ""; - int n; - - for (i = 0, n = 0; i < wd->jobs->nelts; ++i) { - job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *); - if (job->need_restart && !job->restart_processed) { - names = apr_psprintf(ptemp, "%s%s%s", names, n? " " : "", job->md->name); - ++n; - } - } + server_rec *s; + md_srv_conf_t *sc; - if (n > 0) { - int notified = 1; - - /* Run notify command for ready MDs (if configured) and persist that - * we have done so. This process might be reaped after n requests or die - * of another cause. The one taking over the watchdog need to notify again. - */ - if (wd->mc->notify_cmd) { - const char * const *argv; - const char *cmdline; - int exit_code; - - cmdline = apr_psprintf(ptemp, "%s %s", wd->mc->notify_cmd, names); - apr_tokenize_to_argv(cmdline, (char***)&argv, ptemp); - if (APR_SUCCESS == (rv = md_util_exec(ptemp, argv[0], argv, &exit_code))) { - ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, wd->s, APLOGNO(10108) - "notify command '%s' returned %d", - wd->mc->notify_cmd, exit_code); - } - else { - if (APR_EINCOMPLETE == rv && exit_code) { - rv = 0; - } - ap_log_error(APLOG_MARK, APLOG_ERR, rv, wd->s, APLOGNO(10109) - "executing MDNotifyCmd %s returned %d", - wd->mc->notify_cmd, exit_code); - notified = 0; - } - } - - if (notified) { - /* persist the jobs that were notified */ - for (i = 0, n = 0; i < wd->jobs->nelts; ++i) { - job = APR_ARRAY_IDX(wd->jobs, i, md_job_t *); - if (job->need_restart && !job->restart_processed) { - job->restart_processed = 1; - save_job_props(wd->reg, job, ptemp); - } - } - } - - /* FIXME: the server needs to start gracefully to take the new certificate in. - * This poses a variety of problems to solve satisfactory for everyone: - * - I myself, have no implementation for Windows - * - on *NIX, child processes run with less privileges, preventing - * the signal based restart trigger to work - * - admins want better control of timing windows for restarts, e.g. - * during less busy hours/days. - */ - rv = md_server_graceful(ptemp, wd->s); - if (APR_ENOTIMPL == rv) { - /* self-graceful restart not supported in this setup */ - action = " and changes will be activated on next (graceful) server restart."; - } - else { - action = " and server has been asked to restart now."; - } - ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, wd->s, APLOGNO(10059) - "The Managed Domain%s %s %s been setup%s", - (n > 1)? "s" : "", names, (n > 1)? "have" : "has", action); + ap_log_error( APLOG_MARK, APLOG_TRACE1, 0, base_server, + "checking duplicate ssl assignments"); + for (s = base_server; s; s = s->next) { + sc = md_config_get(s); + if (!sc || !sc->assigned) continue; + + if (sc->assigned->nelts > 1 && sc->is_ssl) { + /* duplicate assignment to SSL VirtualHost, not allowed */ + ap_log_error(APLOG_MARK, APLOG_ERR, 0, base_server, APLOGNO(10042) + "conflict: %d MDs match to SSL VirtualHost %s, there can at most be one.", + (int)sc->assigned->nelts, s->server_hostname); + return APR_EINVAL; } } - return APR_SUCCESS; } -static apr_status_t start_watchdog(apr_array_header_t *names, apr_pool_t *p, - md_reg_t *reg, server_rec *s, md_mod_conf_t *mc) +static apr_status_t check_usage(md_mod_conf_t *mc, md_t *md, server_rec *base_server, + apr_pool_t *p, apr_pool_t *ptemp) { - apr_allocator_t *allocator; - md_watchdog *wd; - apr_pool_t *wdp; - apr_status_t rv; - const char *name; - md_t *md; - md_job_t *job; - int i, errored, renew; - - wd_get_instance = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_get_instance); - wd_register_callback = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_register_callback); - wd_set_interval = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_set_callback_interval); - - if (!wd_get_instance || !wd_register_callback || !wd_set_interval) { - ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(10061) "mod_watchdog is required"); - return !OK; - } - - /* We want our own pool with own allocator to keep data across watchdog invocations */ - apr_allocator_create(&allocator); - apr_allocator_max_free_set(allocator, ap_max_mem_free); - rv = apr_pool_create_ex(&wdp, p, NULL, allocator); - if (rv != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10062) "md_watchdog: create pool"); - return rv; - } - apr_allocator_owner_set(allocator, wdp); - apr_pool_tag(wdp, "md_watchdog"); - - wd = apr_pcalloc(wdp, sizeof(*wd)); - wd->p = wdp; - wd->reg = reg; - wd->s = s; - wd->mc = mc; - - wd->jobs = apr_array_make(wd->p, 10, sizeof(md_job_t *)); - for (i = 0; i < names->nelts; ++i) { - name = APR_ARRAY_IDX(names, i, const char *); - md = md_reg_get(wd->reg, name, wd->p); - if (md) { - md_reg_assess(wd->reg, md, &errored, &renew, wd->p); - if (errored) { - ap_log_error( APLOG_MARK, APLOG_WARNING, 0, wd->s, APLOGNO(10063) - "md(%s): seems errored. Will not process this any further.", name); - } - else { - job = apr_pcalloc(wd->p, sizeof(*job)); - - job->md = md; - APR_ARRAY_PUSH(wd->jobs, md_job_t*) = job; - - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, wd->s, APLOGNO(10064) - "md(%s): state=%d, driving", name, md->state); - - load_job_props(reg, job, wd->p); - if (job->error_runs) { - /* We are just restarting. If we encounter jobs that had errors - * running the protocol on previous staging runs, we reset - * the staging area for it, in case we persisted something that - * causes a loop. */ - md_store_t *store = md_reg_store_get(wd->reg); - - md_store_purge(store, p, MD_SG_STAGING, job->md->name); - md_store_purge(store, p, MD_SG_CHALLENGES, job->md->name); - } + server_rec *s; + md_srv_conf_t *sc; + apr_status_t rv = APR_SUCCESS; + int i, has_ssl; + apr_array_header_t *servers; + + (void)p; + servers = apr_array_make(ptemp, 5, sizeof(server_rec*)); + has_ssl = 0; + for (s = base_server; s; s = s->next) { + sc = md_config_get(s); + if (!sc || !sc->assigned) continue; + for (i = 0; i < sc->assigned->nelts; ++i) { + if (md == APR_ARRAY_IDX(sc->assigned, i, md_t*)) { + APR_ARRAY_PUSH(servers, server_rec*) = s; + if (sc->is_ssl) has_ssl = 1; } } } - if (!wd->jobs->nelts) { - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10065) - "no managed domain in state to drive, no watchdog needed, " - "will check again on next server (graceful) restart"); - apr_pool_destroy(wd->p); - return APR_SUCCESS; + if (!has_ssl && md->require_https > MD_REQUIRE_OFF) { + /* We require https for this MD, but do we have a SSL vhost? */ + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(10105) + "MD %s does not match any VirtualHost with 'SSLEngine on', " + "but is configured to require https. This cannot work.", md->name); } - - if (APR_SUCCESS != (rv = wd_get_instance(&wd->watchdog, MD_WATCHDOG_NAME, 0, 1, wd->p))) { - ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10066) - "create md watchdog(%s)", MD_WATCHDOG_NAME); - return rv; + if (apr_is_empty_array(servers)) { + if (md->renew_mode != MD_RENEW_ALWAYS) { + /* Not an error, but looks suspicious */ + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(10045) + "No VirtualHost matches Managed Domain %s", md->name); + APR_ARRAY_PUSH(mc->unused_names, const char*) = md->name; + } } - rv = wd_register_callback(wd->watchdog, 0, wd, run_watchdog); - ap_log_error(APLOG_MARK, rv? APLOG_CRIT : APLOG_DEBUG, rv, s, APLOGNO(10067) - "register md watchdog(%s)", MD_WATCHDOG_NAME); return rv; } - -static void load_stage_sets(apr_array_header_t *names, apr_pool_t *p, - md_reg_t *reg, server_rec *s) + +static int init_cert_watch_status(md_mod_conf_t *mc, apr_pool_t *p, apr_pool_t *ptemp, server_rec *s) { - const char *name; - apr_status_t rv; - int i; - - for (i = 0; i < names->nelts; ++i) { - name = APR_ARRAY_IDX(names, i, const char*); - if (APR_SUCCESS == (rv = md_reg_load(reg, name, p))) { - ap_log_error( APLOG_MARK, APLOG_INFO, rv, s, APLOGNO(10068) - "%s: staged set activated", name); + md_t *md; + md_result_t *result; + int i, count; + + /* Calculate the list of MD names which we need to watch: + * - all MDs that are used somewhere + * - all MDs in drive mode 'AUTO' that are not in 'unused_names' + */ + count = 0; + result = md_result_make(ptemp, APR_SUCCESS); + for (i = 0; i < mc->mds->nelts; ++i) { + md = APR_ARRAY_IDX(mc->mds, i, md_t*); + md_result_set(result, APR_SUCCESS, NULL); + md->watched = 0; + if (md->state == MD_S_ERROR) { + md_result_set(result, APR_EGENERAL, + "in error state, unable to drive forward. This " + "indicates an incomplete or inconsistent configuration. " + "Please check the log for warnings in this regard."); + continue; } - else if (!APR_STATUS_IS_ENOENT(rv)) { - ap_log_error( APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10069) - "%s: error loading staged set", name); + + if (md->renew_mode == MD_RENEW_AUTO + && md_array_str_index(mc->unused_names, md->name, 0, 0) >= 0) { + /* This MD is not used in any virtualhost, do not watch */ + continue; + } + + if (md_will_renew_cert(md)) { + /* make a test init to detect early errors. */ + md_reg_test_init(mc->reg, md, mc->env, result, p); + if (APR_SUCCESS != result->status && result->detail) { + apr_hash_set(mc->init_errors, md->name, APR_HASH_KEY_STRING, apr_pstrdup(p, result->detail)); + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(10173) + "md[%s]: %s", md->name, result->detail); + } } + + md->watched = 1; + ++count; } - return; + return count; } -static apr_status_t md_post_config(apr_pool_t *p, apr_pool_t *plog, - apr_pool_t *ptemp, server_rec *s) +static apr_status_t md_post_config_before_ssl(apr_pool_t *p, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *s) { void *data = NULL; const char *mod_md_init_key = "mod_md_init_counter"; md_srv_conf_t *sc; md_mod_conf_t *mc; - md_reg_t *reg; - const md_t *md; - apr_array_header_t *drive_names; apr_status_t rv = APR_SUCCESS; - int i, dry_run = 0; + int dry_run = 0, log_level = APLOG_DEBUG; + md_store_t *store; apr_pool_userdata_get(&data, mod_md_init_key, s->process->pool); if (data == NULL) { /* At the first start, httpd makes a config check dry run. It * runs all config hooks to check if it can. If so, it does * this all again and starts serving requests. - * - * This is known. * * On a dry run, we therefore do all the cheap config things we - * need to do. Because otherwise mod_ssl fails because it calls - * us unprepared. - * But synching our configuration with the md store - * and determining which domains to drive and start a watchdog - * and all that, we do not. + * need to do to find out if the settings are ok. More expensive + * things we delay to the real run. */ - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10070) + dry_run = 1; + log_level = APLOG_TRACE1; + ap_log_error( APLOG_MARK, log_level, 0, s, APLOGNO(10070) "initializing post config dry run"); apr_pool_userdata_set((const void *)1, mod_md_init_key, apr_pool_cleanup_null, s->process->pool); - dry_run = 1; } else { ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(10071) @@ -1024,278 +872,478 @@ static apr_status_t md_post_config(apr_pool_t *p, apr_pool_t *plog, init_setups(p, s); md_log_set(log_is_level, log_print, NULL); - /* Check uniqueness of MDs, calculate global, configured MD list. - * If successful, we have a list of MD definitions that do not overlap. */ - /* We also need to find out if we can be reached on 80/443 from the outside (e.g. the CA) */ - if (APR_SUCCESS != (rv = md_calc_md_list(p, plog, ptemp, s))) { - return rv; - } - md_config_post_config(s, p); sc = md_config_get(s); mc = sc->mc; + mc->dry_run = dry_run; + + md_event_init(p); + md_event_subscribe(on_event, mc); - /* Synchronize the definitions we now have with the store via a registry (reg). */ - if (APR_SUCCESS != (rv = setup_reg(®, p, s, mc->can_http, mc->can_https))) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10072) - "setup md registry"); - goto out; + rv = setup_store(&store, mc, p, s); + if (APR_SUCCESS != rv) goto leave; + + rv = md_reg_create(&mc->reg, p, store, mc->proxy_url, mc->ca_certs, + mc->min_delay, mc->retry_failover, + mc->use_store_locks, mc->lock_wait_timeout); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10072) "setup md registry"); + goto leave; } - - if (APR_SUCCESS != (rv = md_reg_sync(reg, p, ptemp, mc->mds))) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10073) - "synching %d mds to registry", mc->mds->nelts); - } - - /* Determine the managed domains that are in auto drive_mode. For those, - * determine in which state they are: - * - UNKNOWN: should not happen, report, don't drive - * - ERROR: something we do not know how to fix, report, don't drive - * - INCOMPLETE/EXPIRED: need to drive them right away - * - COMPLETE: determine when cert expires, drive when the time comes - * - * Start the watchdog if we have anything, now or in the future. + + /* renew on 30% remaining /*/ + rv = md_ocsp_reg_make(&mc->ocsp, p, store, mc->ocsp_renew_window, + AP_SERVER_BASEVERSION, mc->proxy_url, + mc->min_delay); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10196) "setup ocsp registry"); + goto leave; + } + + init_ssl(); + + /* How to bootstrap this module: + * 1. find out if we know if http: and/or https: requests will arrive + * 2. apply the now complete configuration settings to the MDs + * 3. Link MDs to the server_recs they are used in. Detect unused MDs. + * 4. Update the store with the MDs. Change domain names, create new MDs, etc. + * Basically all MD properties that are configured directly. + * WARNING: this may change the name of an MD. If an MD loses the first + * of its domain names, it first gets the new first one as name. The + * store will find the old settings and "recover" the previous name. + * 5. Load any staged data from previous driving. + * 6. on a dry run, this is all we do + * 7. Read back the MD properties that reflect the existence and aspect of + * credentials that are in the store (or missing there). + * Expiry times, MD state, etc. + * 8. Determine the list of MDs that need driving/supervision. + * 9. Cleanup any left-overs in registry/store that are no longer needed for + * the list of MDs as we know it now. + * 10. If this list is non-empty, setup a watchdog to run. */ - drive_names = apr_array_make(ptemp, mc->mds->nelts+1, sizeof(const char *)); + /*1*/ + if (APR_SUCCESS != (rv = detect_supported_protocols(mc, s, p, log_level))) goto leave; + /*2*/ + if (APR_SUCCESS != (rv = merge_mds_with_conf(mc, p, s, log_level))) goto leave; + /*3*/ + if (APR_SUCCESS != (rv = link_mds_to_servers(mc, s, p))) goto leave; + /*4*/ + if (APR_SUCCESS != (rv = md_reg_lock_global(mc->reg, ptemp))) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10398) + "unable to obtain global registry lock, " + "renewed certificates may remain inactive on " + "this httpd instance!"); + /* FIXME: or should we fail the server start/reload here? */ + rv = APR_SUCCESS; + goto leave; + } + if (APR_SUCCESS != (rv = md_reg_sync_start(mc->reg, mc->mds, ptemp))) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10073) + "syncing %d mds to registry", mc->mds->nelts); + goto leave; + } + /*5*/ + md_reg_load_stagings(mc->reg, mc->mds, mc->env, p); +leave: + md_reg_unlock_global(mc->reg, ptemp); + return rv; +} + +static apr_status_t md_post_config_after_ssl(apr_pool_t *p, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *s) +{ + md_srv_conf_t *sc; + apr_status_t rv = APR_SUCCESS; + md_mod_conf_t *mc; + int watched, i; + md_t *md; + + (void)ptemp; + (void)plog; + sc = md_config_get(s); + + /*6*/ + if (!sc || !sc->mc || sc->mc->dry_run) goto leave; + mc = sc->mc; + + /*7*/ + if (APR_SUCCESS != (rv = check_invalid_duplicates(s))) { + goto leave; + } + apr_array_clear(mc->unused_names); for (i = 0; i < mc->mds->nelts; ++i) { - md = APR_ARRAY_IDX(mc->mds, i, const md_t *); - switch (md->drive_mode) { - case MD_DRIVE_AUTO: - if (md_array_str_index(mc->unused_names, md->name, 0, 0) >= 0) { - break; - } - /* fall through */ - case MD_DRIVE_ALWAYS: - APR_ARRAY_PUSH(drive_names, const char *) = md->name; - break; - default: - /* leave out */ - break; + md = APR_ARRAY_IDX(mc->mds, i, md_t *); + + ap_log_error( APLOG_MARK, APLOG_TRACE2, rv, s, "md{%s}: auto_add", md->name); + if (APR_SUCCESS != (rv = auto_add_domains(md, s, p))) { + goto leave; + } + init_acme_tls_1_domains(md, s); + ap_log_error( APLOG_MARK, APLOG_TRACE2, rv, s, "md{%s}: check_usage", md->name); + if (APR_SUCCESS != (rv = check_usage(mc, md, s, p, ptemp))) { + goto leave; + } + ap_log_error( APLOG_MARK, APLOG_TRACE2, rv, s, "md{%s}: sync_finish", md->name); + if (APR_SUCCESS != (rv = md_reg_sync_finish(mc->reg, md, p, ptemp))) { + ap_log_error( APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10172) + "md[%s]: error syncing to store", md->name); + goto leave; } } - - init_ssl(); - - if (dry_run) { - goto out; - } - - /* If there are MDs to drive, start a watchdog to check on them regularly */ - if (drive_names->nelts > 0) { + /*8*/ + ap_log_error( APLOG_MARK, APLOG_TRACE2, rv, s, "init_cert_watch"); + watched = init_cert_watch_status(mc, p, ptemp, s); + /*9*/ + ap_log_error( APLOG_MARK, APLOG_TRACE2, rv, s, "cleanup challenges"); + md_reg_cleanup_challenges(mc->reg, p, ptemp, mc->mds); + + /* From here on, the domains in the registry are readonly + * and only staging/challenges may be manipulated */ + md_reg_freeze_domains(mc->reg, mc->mds); + + if (watched) { + /*10*/ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10074) - "%d out of %d mds are configured for auto-drive", - drive_names->nelts, mc->mds->nelts); - - load_stage_sets(drive_names, p, reg, s); + "%d out of %d mds need watching", watched, mc->mds->nelts); + md_http_use_implementation(md_curl_get_impl(p)); - rv = start_watchdog(drive_names, p, reg, s, mc); + rv = md_renew_start_watching(mc, s, p); } else { - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10075) - "no mds to auto drive, no watchdog needed"); + ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10075) "no mds to supervise"); } -out: + + if (!mc->ocsp || md_ocsp_count(mc->ocsp) == 0) { + ap_log_error( APLOG_MARK, APLOG_TRACE1, 0, s, "no ocsp to manage"); + goto leave; + } + + md_http_use_implementation(md_curl_get_impl(p)); + rv = md_ocsp_start_watching(mc, s, p); + +leave: + ap_log_error( APLOG_MARK, APLOG_TRACE2, rv, s, "post_config done"); return rv; } /**************************************************************************************************/ -/* Access API to other httpd components */ +/* connection context */ + +typedef struct { + const char *protocol; +} md_conn_ctx; + +static const char *md_protocol_get(const conn_rec *c) +{ + md_conn_ctx *ctx; + + ctx = (md_conn_ctx*)ap_get_module_config(c->conn_config, &md_module); + return ctx? ctx->protocol : NULL; +} + +/**************************************************************************************************/ +/* ALPN handling */ + +static int md_protocol_propose(conn_rec *c, request_rec *r, + server_rec *s, + const apr_array_header_t *offers, + apr_array_header_t *proposals) +{ + (void)s; + if (!r && offers && ap_ssl_conn_is_ssl(c) + && ap_array_str_contains(offers, PROTO_ACME_TLS_1)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "proposing protocol '%s'", PROTO_ACME_TLS_1); + APR_ARRAY_PUSH(proposals, const char*) = PROTO_ACME_TLS_1; + return OK; + } + return DECLINED; +} -static int md_is_managed(server_rec *s) +static int md_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, + const char *protocol) { - md_srv_conf_t *conf = md_config_get(s); + md_conn_ctx *ctx; + + (void)s; + if (!r && ap_ssl_conn_is_ssl(c) && !strcmp(PROTO_ACME_TLS_1, protocol)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "switching protocol '%s'", PROTO_ACME_TLS_1); + ctx = apr_pcalloc(c->pool, sizeof(*ctx)); + ctx->protocol = PROTO_ACME_TLS_1; + ap_set_module_config(c->conn_config, &md_module, ctx); - if (conf && conf->assigned) { - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10076) - "%s: manages server %s", conf->assigned->name, s->server_hostname); - return 1; + c->keepalive = AP_CONN_CLOSE; + return OK; } - ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, - "server %s is not managed", s->server_hostname); - return 0; + return DECLINED; +} + + +/**************************************************************************************************/ +/* Access API to other httpd components */ + +static void fallback_fnames(apr_pool_t *p, md_pkey_spec_t *kspec, char **keyfn, char **certfn ) +{ + *keyfn = apr_pstrcat(p, "fallback-", md_pkey_filename(kspec, p), NULL); + *certfn = apr_pstrcat(p, "fallback-", md_chain_filename(kspec, p), NULL); } -static apr_status_t setup_fallback_cert(md_store_t *store, const md_t *md, - server_rec *s, apr_pool_t *p) +static apr_status_t make_fallback_cert(md_store_t *store, const md_t *md, md_pkey_spec_t *kspec, + server_rec *s, apr_pool_t *p, char *keyfn, char *crtfn) { md_pkey_t *pkey; md_cert_t *cert; - md_pkey_spec_t spec; apr_status_t rv; - MD_CHK_VARS; - - spec.type = MD_PKEY_TYPE_RSA; - spec.params.rsa.bits = MD_PKEY_RSA_BITS_DEF; - - if ( !MD_OK(md_pkey_gen(&pkey, p, &spec)) - || !MD_OK(md_store_save(store, p, MD_SG_DOMAINS, md->name, - MD_FN_FALLBACK_PKEY, MD_SV_PKEY, (void*)pkey, 0)) - || !MD_OK(md_cert_self_sign(&cert, "Apache Managed Domain Fallback", + + if (APR_SUCCESS != (rv = md_pkey_gen(&pkey, p, kspec)) + || APR_SUCCESS != (rv = md_store_save(store, p, MD_SG_DOMAINS, md->name, + keyfn, MD_SV_PKEY, (void*)pkey, 0)) + || APR_SUCCESS != (rv = md_cert_self_sign(&cert, "Apache Managed Domain Fallback", md->domains, pkey, apr_time_from_sec(14 * MD_SECS_PER_DAY), p)) - || !MD_OK(md_store_save(store, p, MD_SG_DOMAINS, md->name, - MD_FN_FALLBACK_CERT, MD_SV_CERT, (void*)cert, 0))) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, - "%s: setup fallback certificate, call %s", md->name, MD_LAST_CHK); + || APR_SUCCESS != (rv = md_store_save(store, p, MD_SG_DOMAINS, md->name, + crtfn, MD_SV_CERT, (void*)cert, 0))) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10174) + "%s: make fallback %s certificate", md->name, md_pkey_spec_name(kspec)); } return rv; } -static int fexists(const char *fname, apr_pool_t *p) -{ - return (*fname && APR_SUCCESS == md_util_is_file(fname, p)); -} - -static apr_status_t md_get_certificate(server_rec *s, apr_pool_t *p, - const char **pkeyfile, const char **pcertfile) +static apr_status_t get_certificates(server_rec *s, apr_pool_t *p, int fallback, + apr_array_header_t **pcert_files, + apr_array_header_t **pkey_files) { - apr_status_t rv = APR_ENOENT; + apr_status_t rv = APR_ENOENT; md_srv_conf_t *sc; md_reg_t *reg; md_store_t *store; const md_t *md; - MD_CHK_VARS; - - *pkeyfile = NULL; - *pcertfile = NULL; + apr_array_header_t *key_files, *chain_files; + const char *keyfile, *chainfile; + int i; + + *pkey_files = *pcert_files = NULL; + key_files = apr_array_make(p, 5, sizeof(const char*)); + chain_files = apr_array_make(p, 5, sizeof(const char*)); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10113) - "md_get_certificate called for vhost %s.", s->server_hostname); + "get_certificates called for vhost %s.", s->server_hostname); sc = md_config_get(s); if (!sc) { - ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, - "asked for certificate of server %s which has no md config", - s->server_hostname); - return APR_ENOENT; - } - - if (!sc->assigned) { - /* Hmm, mod_ssl (or someone like it) asks for certificates for a server - * where we did not assign a MD to. Either the user forgot to configure - * that server with SSL certs, has misspelled a server name or we have - * a bug that prevented us from taking responsibility for this server. - * Either way, make some polite noise */ - ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(10114) - "asked for certificate of server %s which has no MD assigned. This " - "could be ok, but most likely it is either a misconfiguration or " - "a bug. Please check server names and MD names carefully and if " - "everything checks open, please open an issue.", + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s, + "asked for certificate of server %s which has no md config", s->server_hostname); return APR_ENOENT; } - + assert(sc->mc); reg = sc->mc->reg; assert(reg); - - md = md_reg_get(reg, sc->assigned->name, p); - if (!md) { - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10115) - "unable to hand out certificates, as registry can no longer " - "find MD '%s'.", sc->assigned->name); + + sc->is_ssl = 1; + + if (!sc->assigned) { + /* With the new hooks in mod_ssl, we are invoked for all server_rec. It is + * therefore normal, when we have nothing to add here. */ return APR_ENOENT; } - - if (!MD_OK(md_reg_get_cred_files(reg, md, p, pkeyfile, pcertfile))) { - ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10110) - "retrieving credentials for MD %s", md->name); - return rv; - } - - if (!fexists(*pkeyfile, p) || !fexists(*pcertfile, p)) { - /* Provide temporary, self-signed certificate as fallback, so that - * clients do not get obscure TLS handshake errors or will see a fallback - * virtual host that is not intended to be served here. */ - store = md_reg_store_get(reg); - assert(store); + else if (sc->assigned->nelts != 1) { + if (!fallback) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(10238) + "conflict: %d MDs match Virtualhost %s which uses SSL, however " + "there can be at most 1.", + (int)sc->assigned->nelts, s->server_hostname); + } + return APR_EINVAL; + } + md = APR_ARRAY_IDX(sc->assigned, 0, const md_t*); + + if (md->cert_files && md->cert_files->nelts) { + apr_array_cat(chain_files, md->cert_files); + apr_array_cat(key_files, md->pkey_files); + rv = APR_SUCCESS; + } + else { + md_pkey_spec_t *spec; - md_store_get_fname(pkeyfile, store, MD_SG_DOMAINS, - md->name, MD_FN_FALLBACK_PKEY, p); - md_store_get_fname(pcertfile, store, MD_SG_DOMAINS, - md->name, MD_FN_FALLBACK_CERT, p); - if (!fexists(*pkeyfile, p) || !fexists(*pcertfile, p)) { - if (!MD_OK(setup_fallback_cert(store, md, s, p))) { + for (i = 0; i < md_cert_count(md); ++i) { + spec = md_pkeys_spec_get(md->pks, i); + rv = md_reg_get_cred_files(&keyfile, &chainfile, reg, MD_SG_DOMAINS, md, spec, p); + if (APR_SUCCESS == rv) { + APR_ARRAY_PUSH(key_files, const char*) = keyfile; + APR_ARRAY_PUSH(chain_files, const char*) = chainfile; + } + else if (APR_STATUS_IS_ENOENT(rv)) { + /* certificate for this pkey is not available, others might + * if pkeys have been added for a running mdomain. + * see issue #260 */ + rv = APR_SUCCESS; + } + else if (!APR_STATUS_IS_ENOENT(rv)) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10110) + "retrieving credentials for MD %s (%s)", + md->name, md_pkey_spec_name(spec)); return rv; } } - - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10116) - "%s: providing fallback certificate for server %s", - md->name, s->server_hostname); - return APR_EAGAIN; - } - - /* We have key and cert files, but they might no longer be valid or not - * match all domain names. Still use these files for now, but indicate that - * resources should no longer be served until we have a new certificate again. */ - if (md->state != MD_S_COMPLETE) { - rv = APR_EAGAIN; - } - ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10077) - "%s: providing certificate for server %s", md->name, s->server_hostname); + + if (md_array_is_empty(key_files)) { + if (fallback) { + /* Provide temporary, self-signed certificate as fallback, so that + * clients do not get obscure TLS handshake errors or will see a fallback + * virtual host that is not intended to be served here. */ + char *kfn, *cfn; + + store = md_reg_store_get(reg); + assert(store); + + for (i = 0; i < md_cert_count(md); ++i) { + spec = md_pkeys_spec_get(md->pks, i); + fallback_fnames(p, spec, &kfn, &cfn); + + md_store_get_fname(&keyfile, store, MD_SG_DOMAINS, md->name, kfn, p); + md_store_get_fname(&chainfile, store, MD_SG_DOMAINS, md->name, cfn, p); + if (!md_file_exists(keyfile, p) || !md_file_exists(chainfile, p)) { + if (APR_SUCCESS != (rv = make_fallback_cert(store, md, spec, s, p, kfn, cfn))) { + return rv; + } + } + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10116) + "%s: providing %s fallback certificate for server %s", + md->name, md_pkey_spec_name(spec), s->server_hostname); + APR_ARRAY_PUSH(key_files, const char*) = keyfile; + APR_ARRAY_PUSH(chain_files, const char*) = chainfile; + } + rv = APR_EAGAIN; + goto leave; + } + } + } + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10077) + "%s[state=%d]: providing certificates for server %s", + md->name, md->state, s->server_hostname); +leave: + if (!md_array_is_empty(key_files) && !md_array_is_empty(chain_files)) { + *pkey_files = key_files; + *pcert_files = chain_files; + } + else if (APR_SUCCESS == rv) { + rv = APR_ENOENT; + } return rv; } -static int compat_warned; -static apr_status_t md_get_credentials(server_rec *s, apr_pool_t *p, - const char **pkeyfile, - const char **pcertfile, - const char **pchainfile) +static int md_add_cert_files(server_rec *s, apr_pool_t *p, + apr_array_header_t *cert_files, + apr_array_header_t *key_files) { - *pchainfile = NULL; - if (!compat_warned) { - compat_warned = 1; - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, /* no APLOGNO */ - "You are using mod_md with an old patch to mod_ssl. This will " - " work for now, but support will be dropped in a future release."); - } - return md_get_certificate(s, p, pkeyfile, pcertfile); + apr_array_header_t *md_cert_files; + apr_array_header_t *md_key_files; + apr_status_t rv; + + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "hook ssl_add_cert_files for %s", + s->server_hostname); + rv = get_certificates(s, p, 0, &md_cert_files, &md_key_files); + if (APR_SUCCESS == rv) { + if (!apr_is_empty_array(cert_files)) { + /* downgraded fromm WARNING to DEBUG, since installing separate certificates + * may be a valid use case. */ + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10084) + "host '%s' is covered by a Managed Domain, but " + "certificate/key files are already configured " + "for it (most likely via SSLCertificateFile).", + s->server_hostname); + } + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, + "host '%s' is covered by a Managed Domaina and " + "is being provided with %d key/certificate files.", + s->server_hostname, md_cert_files->nelts); + apr_array_cat(cert_files, md_cert_files); + apr_array_cat(key_files, md_key_files); + return DONE; + } + return DECLINED; } -static int md_is_challenge(conn_rec *c, const char *servername, - X509 **pcert, EVP_PKEY **pkey) +static int md_add_fallback_cert_files(server_rec *s, apr_pool_t *p, + apr_array_header_t *cert_files, + apr_array_header_t *key_files) { - md_srv_conf_t *sc; - apr_size_t slen, sufflen = sizeof(MD_TLSSNI01_DNS_SUFFIX) - 1; + apr_array_header_t *md_cert_files; + apr_array_header_t *md_key_files; apr_status_t rv; - slen = strlen(servername); - if (slen <= sufflen - || apr_strnatcasecmp(MD_TLSSNI01_DNS_SUFFIX, servername + slen - sufflen)) { - return 0; + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "hook ssl_add_fallback_cert_files for %s", + s->server_hostname); + rv = get_certificates(s, p, 1, &md_cert_files, &md_key_files); + if (APR_EAGAIN == rv) { + apr_array_cat(cert_files, md_cert_files); + apr_array_cat(key_files, md_key_files); + return DONE; + } + return DECLINED; +} + +static int md_answer_challenge(conn_rec *c, const char *servername, + const char **pcert_pem, const char **pkey_pem) +{ + const char *protocol; + int hook_rv = DECLINED; + apr_status_t rv = APR_ENOENT; + md_srv_conf_t *sc; + md_store_t *store; + char *cert_name, *pkey_name; + const char *cert_pem, *key_pem; + int i; + + if (!servername + || !(protocol = md_protocol_get(c)) + || strcmp(PROTO_ACME_TLS_1, protocol)) { + goto cleanup; } - sc = md_config_get(c->base_server); - if (sc && sc->mc->reg) { - md_store_t *store = md_reg_store_get(sc->mc->reg); - md_cert_t *mdcert; - md_pkey_t *mdpkey; - - rv = md_store_load(store, MD_SG_CHALLENGES, servername, - MD_FN_TLSSNI01_CERT, MD_SV_CERT, (void**)&mdcert, c->pool); - if (APR_SUCCESS == rv && (*pcert = md_cert_get_X509(mdcert))) { - rv = md_store_load(store, MD_SG_CHALLENGES, servername, - MD_FN_TLSSNI01_PKEY, MD_SV_PKEY, (void**)&mdpkey, c->pool); - if (APR_SUCCESS == rv && (*pkey = md_pkey_get_EVP_PKEY(mdpkey))) { - ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(10078) - "%s: is a tls-sni-01 challenge host", servername); - return 1; - } - ap_log_cerror(APLOG_MARK, APLOG_WARNING, rv, c, APLOGNO(10079) - "%s: challenge data not complete, key unavailable", servername); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, APLOGNO(10080) - "%s: unknown TLS SNI challenge host", servername); - } + if (!sc || !sc->mc->reg) goto cleanup; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "Answer challenge[tls-alpn-01] for %s", servername); + store = md_reg_store_get(sc->mc->reg); + + for (i = 0; i < md_pkeys_spec_count( sc->pks ); i++) { + tls_alpn01_fnames(c->pool, md_pkeys_spec_get(sc->pks,i), + &pkey_name, &cert_name); + + rv = md_store_load(store, MD_SG_CHALLENGES, servername, cert_name, MD_SV_TEXT, + (void**)&cert_pem, c->pool); + if (APR_STATUS_IS_ENOENT(rv)) continue; + if (APR_SUCCESS != rv) goto cleanup; + + rv = md_store_load(store, MD_SG_CHALLENGES, servername, pkey_name, MD_SV_TEXT, + (void**)&key_pem, c->pool); + if (APR_STATUS_IS_ENOENT(rv)) continue; + if (APR_SUCCESS != rv) goto cleanup; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "Found challenge cert %s, key %s for %s", + cert_name, pkey_name, servername); + *pcert_pem = cert_pem; + *pkey_pem = key_pem; + hook_rv = OK; + break; } - *pcert = NULL; - *pkey = NULL; - return 0; + + if (DECLINED == hook_rv) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, APLOGNO(10080) + "%s: unknown tls-alpn-01 challenge host", servername); + } + +cleanup: + return hook_rv; } + /**************************************************************************************************/ -/* ACME challenge responses */ +/* ACME 'http-01' challenge responses */ #define WELL_KNOWN_PREFIX "/.well-known/" #define ACME_CHALLENGE_PREFIX WELL_KNOWN_PREFIX"acme-challenge/" @@ -1306,30 +1354,39 @@ static int md_http_challenge_pr(request_rec *r) const md_srv_conf_t *sc; const char *name, *data; md_reg_t *reg; - int configured; + const md_t *md; apr_status_t rv; - - if (r->parsed_uri.path + + if (r->parsed_uri.path && !strncmp(ACME_CHALLENGE_PREFIX, r->parsed_uri.path, sizeof(ACME_CHALLENGE_PREFIX)-1)) { sc = ap_get_module_config(r->server->module_config, &md_module); if (sc && sc->mc) { - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "access inside /.well-known/acme-challenge for %s%s", + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "access inside /.well-known/acme-challenge for %s%s", r->hostname, r->parsed_uri.path); - configured = (NULL != md_get_by_domain(sc->mc->mds, r->hostname)); + md = md_get_by_domain(sc->mc->mds, r->hostname); name = r->parsed_uri.path + sizeof(ACME_CHALLENGE_PREFIX)-1; reg = sc && sc->mc? sc->mc->reg : NULL; - + + if (md && md->ca_challenges + && md_array_str_index(md->ca_challenges, MD_AUTHZ_CHA_HTTP_01, 0, 1) < 0) { + /* The MD this challenge is for does not allow http-01 challanges, + * we have to decline. See #279 for a setup example where this + * is necessary. + */ + return DECLINED; + } + if (strlen(name) && !ap_strchr_c(name, '/') && reg) { md_store_t *store = md_reg_store_get(reg); - - rv = md_store_load(store, MD_SG_CHALLENGES, r->hostname, + + rv = md_store_load(store, MD_SG_CHALLENGES, r->hostname, MD_FN_HTTP01, MD_SV_TEXT, (void**)&data, r->pool); - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, "loading challenge for %s (%s)", r->hostname, r->uri); if (APR_SUCCESS == rv) { apr_size_t len = strlen(data); - + if (r->method_number != M_GET) { return HTTP_NOT_IMPLEMENTED; } @@ -1337,29 +1394,31 @@ static int md_http_challenge_pr(request_rec *r) * configured for. Let's send the content back */ r->status = HTTP_OK; apr_table_setn(r->headers_out, "Content-Length", apr_ltoa(r->pool, (long)len)); - + bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); apr_brigade_write(bb, NULL, NULL, data, len); ap_pass_brigade(r->output_filters, bb); apr_brigade_cleanup(bb); - + return DONE; } - else if (!configured) { - /* The request hostname is not for a configured domain. We are not + else if (!md || md->renew_mode == MD_RENEW_MANUAL + || (md->cert_files && md->cert_files->nelts + && md->renew_mode == MD_RENEW_AUTO)) { + /* The request hostname is not for a domain - or at least not for + * a domain that we renew ourselves. We are not * the sole authority here for /.well-known/acme-challenge (see PR62189). - * So, we decline to handle this and let others step in. + * So, we decline to handle this and give others a chance to provide + * the answer. */ return DECLINED; } else if (APR_STATUS_IS_ENOENT(rv)) { return HTTP_NOT_FOUND; } - else { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10081) - "loading challenge %s from store", name); - return HTTP_INTERNAL_SERVER_ERROR; - } + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10081) + "loading challenge %s from store", name); + return HTTP_INTERNAL_SERVER_ERROR; } } } @@ -1373,55 +1432,65 @@ static int md_require_https_maybe(request_rec *r) { const md_srv_conf_t *sc; apr_uri_t uri; - const char *s; + const char *s, *host; + const md_t *md; int status; - - if (opt_ssl_is_https && r->parsed_uri.path - && strncmp(WELL_KNOWN_PREFIX, r->parsed_uri.path, sizeof(WELL_KNOWN_PREFIX)-1)) { - - sc = ap_get_module_config(r->server->module_config, &md_module); - if (sc && sc->assigned && sc->assigned->require_https > MD_REQUIRE_OFF) { - if (opt_ssl_is_https(r->connection)) { - /* Using https: - * if 'permanent' and no one else set a HSTS header already, do it */ - if (sc->assigned->require_https == MD_REQUIRE_PERMANENT - && sc->mc->hsts_header && !apr_table_get(r->headers_out, MD_HSTS_HEADER)) { - apr_table_setn(r->headers_out, MD_HSTS_HEADER, sc->mc->hsts_header); - } + + /* Requests outside the /.well-known path are subject to possible + * https: redirects or HSTS header additions. + */ + sc = ap_get_module_config(r->server->module_config, &md_module); + if (!sc || !sc->assigned || !sc->assigned->nelts || !r->parsed_uri.path + || !strncmp(WELL_KNOWN_PREFIX, r->parsed_uri.path, sizeof(WELL_KNOWN_PREFIX)-1)) { + goto declined; + } + + host = ap_get_server_name_for_url(r); + md = md_get_for_domain(r->server, host); + if (!md) goto declined; + + if (ap_ssl_conn_is_ssl(r->connection)) { + /* Using https: + * if 'permanent' and no one else set a HSTS header already, do it */ + if (md->require_https == MD_REQUIRE_PERMANENT + && sc->mc->hsts_header && !apr_table_get(r->headers_out, MD_HSTS_HEADER)) { + apr_table_setn(r->headers_out, MD_HSTS_HEADER, sc->mc->hsts_header); + } + } + else { + if (md->require_https > MD_REQUIRE_OFF) { + /* Not using https:, but require it. Redirect. */ + if (r->method_number == M_GET) { + /* safe to use the old-fashioned codes */ + status = ((MD_REQUIRE_PERMANENT == md->require_https)? + HTTP_MOVED_PERMANENTLY : HTTP_MOVED_TEMPORARILY); } else { - /* Not using https:, but require it. Redirect. */ - if (r->method_number == M_GET) { - /* safe to use the old-fashioned codes */ - status = ((MD_REQUIRE_PERMANENT == sc->assigned->require_https)? - HTTP_MOVED_PERMANENTLY : HTTP_MOVED_TEMPORARILY); - } - else { - /* these should keep the method unchanged on retry */ - status = ((MD_REQUIRE_PERMANENT == sc->assigned->require_https)? - HTTP_PERMANENT_REDIRECT : HTTP_TEMPORARY_REDIRECT); - } - - s = ap_construct_url(r->pool, r->uri, r); - if (APR_SUCCESS == apr_uri_parse(r->pool, s, &uri)) { - uri.scheme = (char*)"https"; - uri.port = 443; - uri.port_str = (char*)"443"; - uri.query = r->parsed_uri.query; - uri.fragment = r->parsed_uri.fragment; - s = apr_uri_unparse(r->pool, &uri, APR_URI_UNP_OMITUSERINFO); - if (s && *s) { - apr_table_setn(r->headers_out, "Location", s); - return status; - } + /* these should keep the method unchanged on retry */ + status = ((MD_REQUIRE_PERMANENT == md->require_https)? + HTTP_PERMANENT_REDIRECT : HTTP_TEMPORARY_REDIRECT); + } + + s = ap_construct_url(r->pool, r->uri, r); + if (APR_SUCCESS == apr_uri_parse(r->pool, s, &uri)) { + uri.scheme = (char*)"https"; + uri.port = 443; + uri.port_str = (char*)"443"; + uri.query = r->parsed_uri.query; + uri.fragment = r->parsed_uri.fragment; + s = apr_uri_unparse(r->pool, &uri, APR_URI_UNP_OMITUSERINFO); + if (s && *s) { + apr_table_setn(r->headers_out, "Location", s); + return status; } } } } +declined: return DECLINED; } -/* Runs once per created child process. Perform any process +/* Runs once per created child process. Perform any process * related initialization here. */ static void md_child_init(apr_pool_t *pool, server_rec *s) @@ -1434,27 +1503,47 @@ static void md_child_init(apr_pool_t *pool, server_rec *s) */ static void md_hooks(apr_pool_t *pool) { - static const char *const mod_ssl[] = { "mod_ssl.c", NULL}; + static const char *const mod_ssl[] = { "mod_ssl.c", "mod_tls.c", NULL}; + static const char *const mod_wd[] = { "mod_watchdog.c", NULL}; + + /* Leave the ssl initialization to mod_ssl or friends. */ + md_acme_init(pool, AP_SERVER_BASEVERSION, 0); - md_acme_init(pool, AP_SERVER_BASEVERSION); - ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks"); - + /* Run once after configuration is set, before mod_ssl. + * Run again after mod_ssl is done. */ - ap_hook_post_config(md_post_config, NULL, mod_ssl, APR_HOOK_MIDDLE); - + ap_hook_post_config(md_post_config_before_ssl, NULL, mod_ssl, APR_HOOK_MIDDLE); + ap_hook_post_config(md_post_config_after_ssl, mod_ssl, mod_wd, APR_HOOK_LAST); + /* Run once after a child process has been created. */ ap_hook_child_init(md_child_init, NULL, mod_ssl, APR_HOOK_MIDDLE); /* answer challenges *very* early, before any configured authentication may strike */ - ap_hook_post_read_request(md_require_https_maybe, NULL, NULL, APR_HOOK_FIRST); + ap_hook_post_read_request(md_require_https_maybe, mod_ssl, NULL, APR_HOOK_MIDDLE); ap_hook_post_read_request(md_http_challenge_pr, NULL, NULL, APR_HOOK_MIDDLE); - APR_REGISTER_OPTIONAL_FN(md_is_managed); - APR_REGISTER_OPTIONAL_FN(md_get_certificate); - APR_REGISTER_OPTIONAL_FN(md_is_challenge); - APR_REGISTER_OPTIONAL_FN(md_get_credentials); + ap_hook_protocol_propose(md_protocol_propose, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_protocol_switch(md_protocol_switch, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_protocol_get(md_protocol_get, NULL, NULL, APR_HOOK_MIDDLE); + + /* Status request handlers and contributors */ + ap_hook_post_read_request(md_http_cert_status, NULL, mod_ssl, APR_HOOK_MIDDLE); + APR_OPTIONAL_HOOK(ap, status_hook, md_domains_status_hook, NULL, NULL, APR_HOOK_MIDDLE); + APR_OPTIONAL_HOOK(ap, status_hook, md_ocsp_status_hook, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_handler(md_status_handler, NULL, NULL, APR_HOOK_MIDDLE); + + ap_hook_ssl_answer_challenge(md_answer_challenge, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_ssl_add_cert_files(md_add_cert_files, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_ssl_add_fallback_cert_files(md_add_fallback_cert_files, NULL, NULL, APR_HOOK_MIDDLE); + +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 105) + ap_hook_ssl_ocsp_prime_hook(md_ocsp_prime_status, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_ssl_ocsp_get_resp_hook(md_ocsp_provide_status, NULL, NULL, APR_HOOK_MIDDLE); +#else +#error "This version of mod_md requires Apache httpd 2.4.48 or newer." +#endif /* AP_MODULE_MAGIC_AT_LEAST() */ } diff --git a/modules/md/mod_md.dsp b/modules/md/mod_md.dsp index c685f54..d99fb1c 100644 --- a/modules/md/mod_md.dsp +++ b/modules/md/mod_md.dsp @@ -43,7 +43,7 @@ RSC=rc.exe # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" # ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c -# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../server/mpm/winnt" "/I ../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/openssl/inc32" /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../core" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_md_src" /FD /c +# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../server/mpm/winnt" "/I ../ssl" /I "../../include" /I "../generators" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/openssl/inc32" /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../core" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_md_src" /FD /c # ADD BASE MTL /nologo /D "NDEBUG" /win32 # ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32 # ADD BASE RSC /l 0x409 /d "NDEBUG" @@ -75,7 +75,7 @@ PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).ma # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" # ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c -# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/openssl/inc32" /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../core" /src" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_md_src" /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../generators" /I "../../srclib/apr-util/include" /I "../../srclib/openssl/inc32" /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../core" /src" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_md_src" /FD /c # ADD BASE MTL /nologo /D "_DEBUG" /win32 # ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32 # ADD BASE RSC /l 0x409 /d "_DEBUG" @@ -109,10 +109,46 @@ SOURCE=./mod_md_config.c # End Source File # Begin Source File +SOURCE=./mod_md_drive.c +# End Source File +# Begin Source File + +SOURCE=./mod_md_ocsp.c +# End Source File +# Begin Source File + SOURCE=./mod_md_os.c # End Source File # Begin Source File +SOURCE=./mod_md_status.c +# End Source File +# Begin Source File + +SOURCE=./md_acme.c +# End Source File +# Begin Source File + +SOURCE=./md_acme_acct.c +# End Source File +# Begin Source File + +SOURCE=./md_acme_authz.c +# End Source File +# Begin Source File + +SOURCE=./md_acme_drive.c +# End Source File +# Begin Source File + +SOURCE=./md_acme_order.c +# End Source File +# Begin Source File + +SOURCE=./md_acmev2_drive.c +# End Source File +# Begin Source File + SOURCE=./md_core.c # End Source File # Begin Source File @@ -129,6 +165,10 @@ SOURCE=./md_http.c # End Source File # Begin Source File +SOURCE=./md_event.c +# End Source File +# Begin Source File + SOURCE=./md_json.c # End Source File # Begin Source File @@ -141,38 +181,41 @@ SOURCE=./md_log.c # End Source File # Begin Source File -SOURCE=./md_reg.c +SOURCE=./md_ocsp.c # End Source File # Begin Source File -SOURCE=./md_store.c +SOURCE=./md_reg.c # End Source File # Begin Source File -SOURCE=./md_store_fs.c +SOURCE=./md_result.c # End Source File # Begin Source File -SOURCE=./md_util.c +SOURCE=./md_status.c # End Source File # Begin Source File -SOURCE=./md_acme.c +SOURCE=./md_store.c # End Source File # Begin Source File -SOURCE=./md_acme_acct.c +SOURCE=./md_store_fs.c # End Source File # Begin Source File -SOURCE=./md_acme_authz.c +SOURCE=./md_tailscale.c # End Source File # Begin Source File -SOURCE=./md_acme_drive.c +SOURCE=./md_time.c # End Source File # Begin Source File +SOURCE=./md_util.c +# End Source File +# Begin Source File SOURCE=..\..\build\win32\httpd.rc # End Source File diff --git a/modules/md/mod_md.h b/modules/md/mod_md.h index 5ff8f52..805737d 100644 --- a/modules/md/mod_md.h +++ b/modules/md/mod_md.h @@ -17,34 +17,4 @@ #ifndef mod_md_mod_md_h #define mod_md_mod_md_h -#include -#include - -struct server_rec; - -APR_DECLARE_OPTIONAL_FN(int, - md_is_managed, (struct server_rec *)); - -/** - * Get the certificate/key for the managed domain (md_is_managed != 0). - * - * @return APR_EAGAIN if the real certificate is not available yet - */ -APR_DECLARE_OPTIONAL_FN(apr_status_t, - md_get_certificate, (struct server_rec *, apr_pool_t *, - const char **pkeyfile, - const char **pcertfile)); - -APR_DECLARE_OPTIONAL_FN(int, - md_is_challenge, (struct conn_rec *, const char *, - X509 **pcert, EVP_PKEY **pkey)); - -/* Backward compatibility to older mod_ssl patches, will generate - * a WARNING in the logs, use 'md_get_certificate' instead */ -APR_DECLARE_OPTIONAL_FN(apr_status_t, - md_get_credentials, (struct server_rec *, apr_pool_t *, - const char **pkeyfile, - const char **pcertfile, - const char **pchainfile)); - #endif /* mod_md_mod_md_h */ diff --git a/modules/md/mod_md.mak b/modules/md/mod_md.mak index 9d5881e..9779e6b 100644 --- a/modules/md/mod_md.mak +++ b/modules/md/mod_md.mak @@ -64,20 +64,31 @@ CLEAN : -@erase "$(INTDIR)\md_acme_acct.obj" -@erase "$(INTDIR)\md_acme_authz.obj" -@erase "$(INTDIR)\md_acme_drive.obj" + -@erase "$(INTDIR)\md_acme_order.obj" + -@erase "$(INTDIR)\md_acmev2_drive.obj" -@erase "$(INTDIR)\md_core.obj" -@erase "$(INTDIR)\md_crypt.obj" -@erase "$(INTDIR)\md_curl.obj" + -@erase "$(INTDIR)\md_event.obj" -@erase "$(INTDIR)\md_http.obj" -@erase "$(INTDIR)\md_json.obj" -@erase "$(INTDIR)\md_jws.obj" -@erase "$(INTDIR)\md_log.obj" + -@erase "$(INTDIR)\md_ocsp.obj" -@erase "$(INTDIR)\md_reg.obj" + -@erase "$(INTDIR)\md_result.obj" + -@erase "$(INTDIR)\md_status.obj" -@erase "$(INTDIR)\md_store.obj" -@erase "$(INTDIR)\md_store_fs.obj" + -@erase "$(INTDIR)\md_tailscale.obj" + -@erase "$(INTDIR)\md_time.obj" -@erase "$(INTDIR)\md_util.obj" -@erase "$(INTDIR)\mod_md.obj" -@erase "$(INTDIR)\mod_md.res" -@erase "$(INTDIR)\mod_md_config.obj" + -@erase "$(INTDIR)\mod_md_drive.obj" + -@erase "$(INTDIR)\mod_md_status.obj" + -@erase "$(INTDIR)\mod_md_ocsp.obj" -@erase "$(INTDIR)\mod_md_os.obj" -@erase "$(INTDIR)\mod_md_src.idb" -@erase "$(INTDIR)\mod_md_src.pdb" @@ -90,7 +101,7 @@ CLEAN : if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" CPP=cl.exe -CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../server/mpm/winnt" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" $(SSLINC) /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../ssl" /I "../core" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_md_src" /FD /I " ../ssl" /c +CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../../server/mpm/winnt" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" $(SSLINC) /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../ssl" /I "../core" /I "../generators" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_md_src" /FD /I " ../ssl" /c .c{$(INTDIR)}.obj:: $(CPP) @<< @@ -135,22 +146,33 @@ LINK32_FLAGS=kernel32.lib libhttpd.lib libapr-1.lib libaprutil-1.lib $(SSLCRP).l LINK32_OBJS= \ "$(INTDIR)\mod_md.obj" \ "$(INTDIR)\mod_md_config.obj" \ + "$(INTDIR)\mod_md_drive.obj" \ + "$(INTDIR)\mod_md_ocsp.obj" \ "$(INTDIR)\mod_md_os.obj" \ + "$(INTDIR)\mod_md_status.obj" \ "$(INTDIR)\md_core.obj" \ "$(INTDIR)\md_crypt.obj" \ "$(INTDIR)\md_curl.obj" \ + "$(INTDIR)\md_event.obj" \ "$(INTDIR)\md_http.obj" \ "$(INTDIR)\md_json.obj" \ "$(INTDIR)\md_jws.obj" \ "$(INTDIR)\md_log.obj" \ + "$(INTDIR)\md_ocsp.obj" \ "$(INTDIR)\md_reg.obj" \ + "$(INTDIR)\md_result.obj" \ + "$(INTDIR)\md_status.obj" \ "$(INTDIR)\md_store.obj" \ "$(INTDIR)\md_store_fs.obj" \ + "$(INTDIR)\md_tailscale.obj" \ + "$(INTDIR)\md_time.obj" \ "$(INTDIR)\md_util.obj" \ "$(INTDIR)\md_acme.obj" \ "$(INTDIR)\md_acme_acct.obj" \ "$(INTDIR)\md_acme_authz.obj" \ "$(INTDIR)\md_acme_drive.obj" \ + "$(INTDIR)\md_acme_order.obj" \ + "$(INTDIR)\md_acmev2_drive.obj" \ "$(INTDIR)\mod_md.res" \ "..\..\srclib\apr\Release\libapr-1.lib" \ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \ @@ -203,20 +225,31 @@ CLEAN : -@erase "$(INTDIR)\md_acme_acct.obj" -@erase "$(INTDIR)\md_acme_authz.obj" -@erase "$(INTDIR)\md_acme_drive.obj" + -@erase "$(INTDIR)\md_acme_order.obj" + -@erase "$(INTDIR)\md_acmev2_drive.obj" -@erase "$(INTDIR)\md_core.obj" -@erase "$(INTDIR)\md_crypt.obj" -@erase "$(INTDIR)\md_curl.obj" + -@erase "$(INTDIR)\md_event.obj" -@erase "$(INTDIR)\md_http.obj" -@erase "$(INTDIR)\md_json.obj" -@erase "$(INTDIR)\md_jws.obj" -@erase "$(INTDIR)\md_log.obj" + -@erase "$(INTDIR)\md_ocsp.obj" -@erase "$(INTDIR)\md_reg.obj" + -@erase "$(INTDIR)\md_result.obj" + -@erase "$(INTDIR)\md_status.obj" -@erase "$(INTDIR)\md_store.obj" -@erase "$(INTDIR)\md_store_fs.obj" + -@erase "$(INTDIR)\md_tailscale.obj" + -@erase "$(INTDIR)\md_time.obj" -@erase "$(INTDIR)\md_util.obj" -@erase "$(INTDIR)\mod_md.obj" -@erase "$(INTDIR)\mod_md.res" -@erase "$(INTDIR)\mod_md_config.obj" + -@erase "$(INTDIR)\mod_md_drive.obj" + -@erase "$(INTDIR)\mod_md_status.obj" + -@erase "$(INTDIR)\mod_md_ocsp.obj" -@erase "$(INTDIR)\mod_md_os.obj" -@erase "$(INTDIR)\mod_md_src.idb" -@erase "$(INTDIR)\mod_md_src.pdb" @@ -229,7 +262,7 @@ CLEAN : if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" CPP=cl.exe -CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" $(SSLINC) /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../core" /I "../ssl" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_md_src" /FD /EHsc /c +CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" $(SSLINC) /I "../../srclib/jansson/include" /I "../../srclib/curl/include" /I "../core" /I "../generators" /I "../ssl" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_md_src" /FD /EHsc /c .c{$(INTDIR)}.obj:: $(CPP) @<< @@ -274,22 +307,33 @@ LINK32_FLAGS=kernel32.lib libhttpd.lib libapr-1.lib libaprutil-1.lib $(SSLCRP).l LINK32_OBJS= \ "$(INTDIR)\mod_md.obj" \ "$(INTDIR)\mod_md_config.obj" \ + "$(INTDIR)\mod_md_drive.obj" \ + "$(INTDIR)\mod_md_ocsp.obj" \ "$(INTDIR)\mod_md_os.obj" \ + "$(INTDIR)\mod_md_status.obj" \ "$(INTDIR)\md_core.obj" \ "$(INTDIR)\md_crypt.obj" \ "$(INTDIR)\md_curl.obj" \ + "$(INTDIR)\md_event.obj" \ "$(INTDIR)\md_http.obj" \ "$(INTDIR)\md_json.obj" \ "$(INTDIR)\md_jws.obj" \ "$(INTDIR)\md_log.obj" \ + "$(INTDIR)\md_ocsp.obj" \ "$(INTDIR)\md_reg.obj" \ + "$(INTDIR)\md_result.obj" \ + "$(INTDIR)\md_status.obj" \ "$(INTDIR)\md_store.obj" \ "$(INTDIR)\md_store_fs.obj" \ + "$(INTDIR)\md_tailscale.obj" \ + "$(INTDIR)\md_time.obj" \ "$(INTDIR)\md_util.obj" \ "$(INTDIR)\md_acme.obj" \ "$(INTDIR)\md_acme_acct.obj" \ "$(INTDIR)\md_acme_authz.obj" \ "$(INTDIR)\md_acme_drive.obj" \ + "$(INTDIR)\md_acme_order.obj" \ + "$(INTDIR)\md_acmev2_drive.obj" \ "$(INTDIR)\mod_md.res" \ "..\..\srclib\apr\Debug\libapr-1.lib" \ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \ @@ -445,6 +489,16 @@ SOURCE=./md_acme_drive.c "$(INTDIR)\md_acme_drive.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./md_acme_order.c + +"$(INTDIR)\md_acme_order.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./md_acmev2_drive.c + +"$(INTDIR)\md_acmev2_drive.obj" : $(SOURCE) "$(INTDIR)" + + SOURCE=./md_core.c "$(INTDIR)\md_core.obj" : $(SOURCE) "$(INTDIR)" @@ -460,6 +514,11 @@ SOURCE=./md_curl.c "$(INTDIR)\md_curl.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./md_event.c + +"$(INTDIR)\md_event.obj" : $(SOURCE) "$(INTDIR)" + + SOURCE=./md_http.c "$(INTDIR)\md_http.obj" : $(SOURCE) "$(INTDIR)" @@ -480,11 +539,26 @@ SOURCE=./md_log.c "$(INTDIR)\md_log.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./md_ocsp.c + +"$(INTDIR)\md_ocsp.obj" : $(SOURCE) "$(INTDIR)" + + SOURCE=./md_reg.c "$(INTDIR)\md_reg.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./md_result.c + +"$(INTDIR)\md_result.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./md_status.c + +"$(INTDIR)\md_status.obj" : $(SOURCE) "$(INTDIR)" + + SOURCE=./md_store.c "$(INTDIR)\md_store.obj" : $(SOURCE) "$(INTDIR)" @@ -495,6 +569,16 @@ SOURCE=./md_store_fs.c "$(INTDIR)\md_store_fs.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./md_tailscale.c + +"$(INTDIR)\md_tailscale.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./md_time.c + +"$(INTDIR)\md_time.obj" : $(SOURCE) "$(INTDIR)" + + SOURCE=./md_util.c "$(INTDIR)\md_util.obj" : $(SOURCE) "$(INTDIR)" @@ -510,11 +594,25 @@ SOURCE=./mod_md_config.c "$(INTDIR)\mod_md_config.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./mod_md_drive.c + +"$(INTDIR)\mod_md_drive.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./mod_md_ocsp.c + +"$(INTDIR)\mod_md_ocsp.obj" : $(SOURCE) "$(INTDIR)" + + SOURCE=./mod_md_os.c "$(INTDIR)\mod_md_os.obj" : $(SOURCE) "$(INTDIR)" +SOURCE=./mod_md_status.c + +"$(INTDIR)\mod_md_status.obj" : $(SOURCE) "$(INTDIR)" + !ENDIF diff --git a/modules/md/mod_md_config.c b/modules/md/mod_md_config.c index 336a21b..31d06b4 100644 --- a/modules/md/mod_md_config.c +++ b/modules/md/mod_md_config.c @@ -26,70 +26,105 @@ #include #include "md.h" +#include "md_acme.h" #include "md_crypt.h" +#include "md_log.h" +#include "md_json.h" #include "md_util.h" #include "mod_md_private.h" #include "mod_md_config.h" -#define MD_CMD_MD "MDomain" -#define MD_CMD_OLD_MD "ManagedDomain" #define MD_CMD_MD_SECTION "mds = apr_array_make(pool, 5, sizeof(const md_t *)); mod_md_config->unused_names = apr_array_make(pool, 5, sizeof(const md_t *)); - + mod_md_config->env = apr_table_make(pool, 10); + mod_md_config->init_errors = apr_hash_make(pool); + apr_pool_cleanup_register(pool, NULL, cleanup_mod_config, apr_pool_cleanup_null); } @@ -125,46 +162,66 @@ static void srv_conf_props_clear(md_srv_conf_t *sc) { sc->transitive = DEF_VAL; sc->require_https = MD_REQUIRE_UNSET; - sc->drive_mode = DEF_VAL; + sc->renew_mode = DEF_VAL; sc->must_staple = DEF_VAL; - sc->pkey_spec = NULL; - sc->renew_norm = DEF_VAL; - sc->renew_window = DEF_VAL; - sc->ca_url = NULL; + sc->pks = NULL; + sc->renew_window = NULL; + sc->warn_window = NULL; + sc->ca_urls = NULL; + sc->ca_contact = NULL; sc->ca_proto = NULL; sc->ca_agreement = NULL; sc->ca_challenges = NULL; + sc->ca_eab_kid = NULL; + sc->ca_eab_hmac = NULL; + sc->stapling = DEF_VAL; + sc->staple_others = DEF_VAL; + sc->dns01_cmd = NULL; } static void srv_conf_props_copy(md_srv_conf_t *to, const md_srv_conf_t *from) { to->transitive = from->transitive; to->require_https = from->require_https; - to->drive_mode = from->drive_mode; + to->renew_mode = from->renew_mode; to->must_staple = from->must_staple; - to->pkey_spec = from->pkey_spec; - to->renew_norm = from->renew_norm; + to->pks = from->pks; + to->warn_window = from->warn_window; to->renew_window = from->renew_window; - to->ca_url = from->ca_url; + to->ca_urls = from->ca_urls; + to->ca_contact = from->ca_contact; to->ca_proto = from->ca_proto; to->ca_agreement = from->ca_agreement; to->ca_challenges = from->ca_challenges; + to->ca_eab_kid = from->ca_eab_kid; + to->ca_eab_hmac = from->ca_eab_hmac; + to->stapling = from->stapling; + to->staple_others = from->staple_others; + to->dns01_cmd = from->dns01_cmd; } static void srv_conf_props_apply(md_t *md, const md_srv_conf_t *from, apr_pool_t *p) { if (from->require_https != MD_REQUIRE_UNSET) md->require_https = from->require_https; if (from->transitive != DEF_VAL) md->transitive = from->transitive; - if (from->drive_mode != DEF_VAL) md->drive_mode = from->drive_mode; + if (from->renew_mode != DEF_VAL) md->renew_mode = from->renew_mode; if (from->must_staple != DEF_VAL) md->must_staple = from->must_staple; - if (from->pkey_spec) md->pkey_spec = from->pkey_spec; - if (from->renew_norm != DEF_VAL) md->renew_norm = from->renew_norm; - if (from->renew_window != DEF_VAL) md->renew_window = from->renew_window; - - if (from->ca_url) md->ca_url = from->ca_url; + if (from->pks) md->pks = md_pkeys_spec_clone(p, from->pks); + if (from->renew_window) md->renew_window = from->renew_window; + if (from->warn_window) md->warn_window = from->warn_window; + if (from->ca_urls) md->ca_urls = apr_array_copy(p, from->ca_urls); if (from->ca_proto) md->ca_proto = from->ca_proto; if (from->ca_agreement) md->ca_agreement = from->ca_agreement; + if (from->ca_contact) { + apr_array_clear(md->contacts); + APR_ARRAY_PUSH(md->contacts, const char *) = + md_util_schemify(p, from->ca_contact, "mailto"); + } if (from->ca_challenges) md->ca_challenges = apr_array_copy(p, from->ca_challenges); + if (from->ca_eab_kid) md->ca_eab_kid = from->ca_eab_kid; + if (from->ca_eab_hmac) md->ca_eab_hmac = from->ca_eab_hmac; + if (from->stapling != DEF_VAL) md->stapling = from->stapling; + if (from->dns01_cmd) md->dns01_cmd = from->dns01_cmd; } void *md_config_create_svr(apr_pool_t *pool, server_rec *s) @@ -190,23 +247,28 @@ static void *md_config_merge(apr_pool_t *pool, void *basev, void *addv) nsc = (md_srv_conf_t *)apr_pcalloc(pool, sizeof(md_srv_conf_t)); nsc->name = name; nsc->mc = add->mc? add->mc : base->mc; - nsc->assigned = add->assigned? add->assigned : base->assigned; nsc->transitive = (add->transitive != DEF_VAL)? add->transitive : base->transitive; nsc->require_https = (add->require_https != MD_REQUIRE_UNSET)? add->require_https : base->require_https; - nsc->drive_mode = (add->drive_mode != DEF_VAL)? add->drive_mode : base->drive_mode; + nsc->renew_mode = (add->renew_mode != DEF_VAL)? add->renew_mode : base->renew_mode; nsc->must_staple = (add->must_staple != DEF_VAL)? add->must_staple : base->must_staple; - nsc->pkey_spec = add->pkey_spec? add->pkey_spec : base->pkey_spec; - nsc->renew_window = (add->renew_norm != DEF_VAL)? add->renew_norm : base->renew_norm; - nsc->renew_window = (add->renew_window != DEF_VAL)? add->renew_window : base->renew_window; + nsc->pks = (!md_pkeys_spec_is_empty(add->pks))? add->pks : base->pks; + nsc->renew_window = add->renew_window? add->renew_window : base->renew_window; + nsc->warn_window = add->warn_window? add->warn_window : base->warn_window; - nsc->ca_url = add->ca_url? add->ca_url : base->ca_url; + nsc->ca_urls = add->ca_urls? apr_array_copy(pool, add->ca_urls) + : (base->ca_urls? apr_array_copy(pool, base->ca_urls) : NULL); + nsc->ca_contact = add->ca_contact? add->ca_contact : base->ca_contact; nsc->ca_proto = add->ca_proto? add->ca_proto : base->ca_proto; nsc->ca_agreement = add->ca_agreement? add->ca_agreement : base->ca_agreement; nsc->ca_challenges = (add->ca_challenges? apr_array_copy(pool, add->ca_challenges) : (base->ca_challenges? apr_array_copy(pool, base->ca_challenges) : NULL)); + nsc->ca_eab_kid = add->ca_eab_kid? add->ca_eab_kid : base->ca_eab_kid; + nsc->ca_eab_hmac = add->ca_eab_hmac? add->ca_eab_hmac : base->ca_eab_hmac; + nsc->stapling = (add->stapling != DEF_VAL)? add->stapling : base->stapling; + nsc->staple_others = (add->staple_others != DEF_VAL)? add->staple_others : base->staple_others; + nsc->dns01_cmd = (add->dns01_cmd)? add->dns01_cmd : base->dns01_cmd; nsc->current = NULL; - nsc->assigned = NULL; return nsc; } @@ -227,7 +289,7 @@ static int inside_section(cmd_parms *cmd, const char *section) { } static int inside_md_section(cmd_parms *cmd) { - return (inside_section(cmd, MD_CMD_MD_SECTION) || inside_section(cmd, MD_CMD_MD_OLD_SECTION)); + return (inside_section(cmd, MD_CMD_MD_SECTION) || inside_section(cmd, MD_CMD_MD2_SECTION)); } static const char *md_section_check(cmd_parms *cmd) { @@ -238,6 +300,46 @@ static const char *md_section_check(cmd_parms *cmd) { return NULL; } +#define MD_LOC_GLOBAL (0x01) +#define MD_LOC_MD (0x02) +#define MD_LOC_ELSE (0x04) +#define MD_LOC_ALL (0x07) +#define MD_LOC_NOT_MD (0x102) + +static const char *md_conf_check_location(cmd_parms *cmd, int flags) +{ + if (MD_LOC_GLOBAL == flags) { + return ap_check_cmd_context(cmd, GLOBAL_ONLY); + } + if (MD_LOC_NOT_MD == flags && inside_md_section(cmd)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, " is not allowed inside an '", + MD_CMD_MD_SECTION, "' context", NULL); + } + if (MD_LOC_MD == flags) { + return md_section_check(cmd); + } + else if ((MD_LOC_MD & flags) && inside_md_section(cmd)) { + return NULL; + } + return ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_LOCATION); +} + +static const char *set_on_off(int *pvalue, const char *s, apr_pool_t *p) +{ + if (!apr_strnatcasecmp("off", s)) { + *pvalue = 0; + } + else if (!apr_strnatcasecmp("on", s)) { + *pvalue = 1; + } + else { + return apr_pstrcat(p, "unknown '", s, + "', supported parameter values are 'on' and 'off'", NULL); + } + return NULL; +} + + static void add_domain_name(apr_array_header_t *domains, const char *name, apr_pool_t *p) { if (md_array_str_index(domains, name, 0, 0) < 0) { @@ -269,7 +371,7 @@ static const char *md_config_sec_start(cmd_parms *cmd, void *mconfig, const char int transitive = -1; (void)mconfig; - if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { return err; } @@ -284,11 +386,11 @@ static const char *md_config_sec_start(cmd_parms *cmd, void *mconfig, const char return MD_CMD_MD_SECTION " > section must specify a unique domain name"; } - name = ap_getword_white(cmd->pool, &arg); + name = ap_getword_conf(cmd->pool, &arg); domains = apr_array_make(cmd->pool, 5, sizeof(const char *)); add_domain_name(domains, name, cmd->pool); while (*arg != '\0') { - name = ap_getword_white(cmd->pool, &arg); + name = ap_getword_conf(cmd->pool, &arg); if (NULL != set_transitive(&transitive, name)) { add_domain_name(domains, name, cmd->pool); } @@ -355,8 +457,7 @@ static const char *md_config_set_names(cmd_parms *cmd, void *dc, int i, transitive = -1; (void)dc; - err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE); - if (err) { + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { return err; } @@ -385,16 +486,42 @@ static const char *md_config_set_names(cmd_parms *cmd, void *dc, return NULL; } -static const char *md_config_set_ca(cmd_parms *cmd, void *dc, const char *value) +static const char *md_config_set_ca(cmd_parms *cmd, void *dc, + int argc, char *const argv[]) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err, *url; + int i; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + if (!sc->ca_urls) { + sc->ca_urls = apr_array_make(cmd->pool, 3, sizeof(const char *)); + } + else { + apr_array_clear(sc->ca_urls); + } + for (i = 0; i < argc; ++i) { + if (APR_SUCCESS != md_get_ca_url_from_name(&url, cmd->pool, argv[i])) { + return url; + } + APR_ARRAY_PUSH(sc->ca_urls, const char *) = url; + } + return NULL; +} + +static const char *md_config_set_contact(cmd_parms *cmd, void *dc, const char *value) { md_srv_conf_t *sc = md_config_get(cmd->server); const char *err; (void)dc; - if (!inside_md_section(cmd) && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } - sc->ca_url = value; + sc->ca_contact = value; return NULL; } @@ -404,7 +531,7 @@ static const char *md_config_set_ca_proto(cmd_parms *cmd, void *dc, const char * const char *err; (void)dc; - if (!inside_md_section(cmd) && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } config->ca_proto = value; @@ -417,37 +544,37 @@ static const char *md_config_set_agreement(cmd_parms *cmd, void *dc, const char const char *err; (void)dc; - if (!inside_md_section(cmd) && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } config->ca_agreement = value; return NULL; } -static const char *md_config_set_drive_mode(cmd_parms *cmd, void *dc, const char *value) +static const char *md_config_set_renew_mode(cmd_parms *cmd, void *dc, const char *value) { md_srv_conf_t *config = md_config_get(cmd->server); const char *err; - md_drive_mode_t drive_mode; + md_renew_mode_t renew_mode; (void)dc; if (!apr_strnatcasecmp("auto", value) || !apr_strnatcasecmp("automatic", value)) { - drive_mode = MD_DRIVE_AUTO; + renew_mode = MD_RENEW_AUTO; } else if (!apr_strnatcasecmp("always", value)) { - drive_mode = MD_DRIVE_ALWAYS; + renew_mode = MD_RENEW_ALWAYS; } else if (!apr_strnatcasecmp("manual", value) || !apr_strnatcasecmp("stick", value)) { - drive_mode = MD_DRIVE_MANUAL; + renew_mode = MD_RENEW_MANUAL; } else { return apr_pstrcat(cmd->pool, "unknown MDDriveMode ", value, NULL); } - if (!inside_md_section(cmd) && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } - config->drive_mode = drive_mode; + config->renew_mode = renew_mode; return NULL; } @@ -457,54 +584,137 @@ static const char *md_config_set_must_staple(cmd_parms *cmd, void *dc, const cha const char *err; (void)dc; - if (!inside_md_section(cmd) && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } + return set_on_off(&config->must_staple, value, cmd->pool); +} - if (!apr_strnatcasecmp("off", value)) { - config->must_staple = 0; +static const char *md_config_set_stapling(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; } - else if (!apr_strnatcasecmp("on", value)) { - config->must_staple = 1; + return set_on_off(&config->stapling, value, cmd->pool); +} + +static const char *md_config_set_staple_others(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; } - else { - return apr_pstrcat(cmd->pool, "unknown '", value, - "', supported parameter values are 'on' and 'off'", NULL); + return set_on_off(&config->staple_others, value, cmd->pool); +} + +static const char *md_config_set_base_server(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err = md_conf_check_location(cmd, MD_LOC_NOT_MD); + + (void)dc; + if (err) return err; + return set_on_off(&config->mc->manage_base_server, value, cmd->pool); +} + +static const char *md_config_set_min_delay(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err = md_conf_check_location(cmd, MD_LOC_NOT_MD); + apr_time_t delay; + + (void)dc; + if (err) return err; + if (md_duration_parse(&delay, value, "s") != APR_SUCCESS) { + return "unrecognized duration format"; } + config->mc->min_delay = delay; return NULL; } -static const char *md_config_set_base_server(cmd_parms *cmd, void *dc, const char *value) +static const char *md_config_set_retry_failover(cmd_parms *cmd, void *dc, const char *value) { md_srv_conf_t *config = md_config_get(cmd->server); - const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); + const char *err = md_conf_check_location(cmd, MD_LOC_NOT_MD); + int retry_failover; (void)dc; - if (!err) { - if (!apr_strnatcasecmp("off", value)) { - config->mc->manage_base_server = 0; - } - else if (!apr_strnatcasecmp("on", value)) { - config->mc->manage_base_server = 1; - } - else { - err = apr_pstrcat(cmd->pool, "unknown '", value, - "', supported parameter values are 'on' and 'off'", NULL); + if (err) return err; + retry_failover = atoi(value); + if (retry_failover <= 0) { + return "invalid argument, must be a number > 0"; + } + config->mc->retry_failover = retry_failover; + return NULL; +} + +static const char *md_config_set_store_locks(cmd_parms *cmd, void *dc, const char *s) +{ + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err = md_conf_check_location(cmd, MD_LOC_NOT_MD); + int use_store_locks; + apr_time_t wait_time = 0; + + (void)dc; + if (err) { + return err; + } + else if (!apr_strnatcasecmp("off", s)) { + use_store_locks = 0; + } + else if (!apr_strnatcasecmp("on", s)) { + use_store_locks = 1; + } + else { + if (md_duration_parse(&wait_time, s, "s") != APR_SUCCESS) { + return "neither 'on', 'off' or a duration specified"; } + use_store_locks = (wait_time != 0); } - return err; + config->mc->use_store_locks = use_store_locks; + if (wait_time) { + config->mc->lock_wait_timeout = wait_time; + } + return NULL; } -static const char *md_config_set_require_https(cmd_parms *cmd, void *dc, const char *value) +static const char *md_config_set_match_mode(cmd_parms *cmd, void *dc, const char *s) { md_srv_conf_t *config = md_config_get(cmd->server); - const char *err; + const char *err = md_conf_check_location(cmd, MD_LOC_NOT_MD); (void)dc; - if (!inside_md_section(cmd) && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if (err) { return err; } + else if (!apr_strnatcasecmp("all", s)) { + config->mc->match_mode = MD_MATCH_ALL; + } + else if (!apr_strnatcasecmp("servernames", s)) { + config->mc->match_mode = MD_MATCH_SERVERNAMES; + } + else { + return "invalid argument, must be a 'all' or 'servernames'"; + } + return NULL; +} +static const char *md_config_set_require_https(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err; + + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + (void)dc; if (!apr_strnatcasecmp("off", value)) { config->require_https = MD_REQUIRE_OFF; } @@ -521,98 +731,48 @@ static const char *md_config_set_require_https(cmd_parms *cmd, void *dc, const c return NULL; } -static apr_status_t duration_parse(const char *value, apr_interval_time_t *ptimeout, - const char *def_unit) -{ - char *endp; - long funits = 1; - apr_status_t rv; - apr_int64_t n; - - n = apr_strtoi64(value, &endp, 10); - if (errno) { - return errno; - } - if (!endp || !*endp) { - if (strcmp(def_unit, "d") == 0) { - def_unit = "s"; - funits = MD_SECS_PER_DAY; - } - } - else if (endp == value) { - return APR_EINVAL; - } - else if (*endp == 'd') { - *ptimeout = apr_time_from_sec(n * MD_SECS_PER_DAY); - return APR_SUCCESS; - } - else { - def_unit = endp; - } - rv = ap_timeout_parameter_parse(value, ptimeout, def_unit); - if (APR_SUCCESS == rv && funits > 1) { - *ptimeout *= funits; - } - return rv; -} - -static apr_status_t percentage_parse(const char *value, int *ppercent) +static const char *md_config_set_renew_window(cmd_parms *cmd, void *dc, const char *value) { - char *endp; - apr_int64_t n; + md_srv_conf_t *config = md_config_get(cmd->server); + const char *err; - n = apr_strtoi64(value, &endp, 10); - if (errno) { - return errno; + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; } - if (*endp == '%') { - if (n < 0 || n >= 100) { - return APR_BADARG; - } - *ppercent = (int)n; - return APR_SUCCESS; + err = md_timeslice_parse(&config->renew_window, cmd->pool, value, MD_TIME_LIFE_NORM); + if (!err && config->renew_window->norm + && (config->renew_window->len >= config->renew_window->norm)) { + err = "a length of 100% or more is not allowed."; } - return APR_EINVAL; + if (err) return apr_psprintf(cmd->pool, "MDRenewWindow %s", err); + return NULL; } -static const char *md_config_set_renew_window(cmd_parms *cmd, void *dc, const char *value) +static const char *md_config_set_warn_window(cmd_parms *cmd, void *dc, const char *value) { md_srv_conf_t *config = md_config_get(cmd->server); const char *err; - apr_interval_time_t timeout; - int percent = 0; (void)dc; - if (!inside_md_section(cmd) - && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } - - /* Inspired by http_core.c */ - if (duration_parse(value, &timeout, "d") == APR_SUCCESS) { - config->renew_norm = 0; - config->renew_window = timeout; - return NULL; - } - else { - switch (percentage_parse(value, &percent)) { - case APR_SUCCESS: - config->renew_norm = apr_time_from_sec(100 * MD_SECS_PER_DAY); - config->renew_window = apr_time_from_sec(percent * MD_SECS_PER_DAY); - return NULL; - case APR_BADARG: - return "MDRenewWindow as percent must be less than 100"; - } + err = md_timeslice_parse(&config->warn_window, cmd->pool, value, MD_TIME_LIFE_NORM); + if (!err && config->warn_window->norm + && (config->warn_window->len >= config->warn_window->norm)) { + err = "a length of 100% or more is not allowed."; } - return "MDRenewWindow has unrecognized format"; + if (err) return apr_psprintf(cmd->pool, "MDWarnWindow %s", err); + return NULL; } static const char *md_config_set_proxy(cmd_parms *cmd, void *arg, const char *value) { md_srv_conf_t *sc = md_config_get(cmd->server); - const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); + const char *err; - if (err) { + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { return err; } md_util_abs_http_uri_check(cmd->pool, value, &err); @@ -627,9 +787,9 @@ static const char *md_config_set_proxy(cmd_parms *cmd, void *arg, const char *va static const char *md_config_set_store_dir(cmd_parms *cmd, void *arg, const char *value) { md_srv_conf_t *sc = md_config_get(cmd->server); - const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); + const char *err; - if (err) { + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { return err; } sc->mc->base_dir = value; @@ -640,11 +800,19 @@ static const char *md_config_set_store_dir(cmd_parms *cmd, void *arg, const char static const char *set_port_map(md_mod_conf_t *mc, const char *value) { int net_port, local_port; - char *endp; + const char *endp; - net_port = (int)apr_strtoi64(value, &endp, 10); - if (errno) { - return "unable to parse first port number"; + if (!strncmp("http:", value, sizeof("http:") - 1)) { + net_port = 80; endp = value + sizeof("http") - 1; + } + else if (!strncmp("https:", value, sizeof("https:") - 1)) { + net_port = 443; endp = value + sizeof("https") - 1; + } + else { + net_port = (int)apr_strtoi64(value, (char**)&endp, 10); + if (errno) { + return "unable to parse first port number"; + } } if (!endp || *endp != ':') { return "no ':' after first port number"; @@ -654,7 +822,7 @@ static const char *set_port_map(md_mod_conf_t *mc, const char *value) local_port = 0; } else { - local_port = (int)apr_strtoi64(endp, &endp, 10); + local_port = (int)apr_strtoi64(endp, (char**)&endp, 10); if (errno) { return "unable to parse second port number"; } @@ -679,10 +847,10 @@ static const char *md_config_set_port_map(cmd_parms *cmd, void *arg, const char *v1, const char *v2) { md_srv_conf_t *sc = md_config_get(cmd->server); - const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); + const char *err; (void)arg; - if (!err) { + if (!(err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { err = set_port_map(sc->mc, v1); } if (!err && v2) { @@ -700,14 +868,16 @@ static const char *md_config_set_cha_tyes(cmd_parms *cmd, void *dc, int i; (void)dc; - if (!inside_md_section(cmd) - && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } pcha = &config->ca_challenges; ca_challenges = *pcha; - if (!ca_challenges) { + if (ca_challenges) { + apr_array_clear(ca_challenges); + } + else { *pcha = ca_challenges = apr_array_make(cmd->pool, 5, sizeof(const char *)); } for (i = 0; i < argc; ++i) { @@ -723,60 +893,81 @@ static const char *md_config_set_pkeys(cmd_parms *cmd, void *dc, md_srv_conf_t *config = md_config_get(cmd->server); const char *err, *ptype; apr_int64_t bits; + int i; (void)dc; - if (!inside_md_section(cmd) - && (err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { return err; } if (argc <= 0) { return "needs to specify the private key type"; } - ptype = argv[0]; - if (!apr_strnatcasecmp("Default", ptype)) { - if (argc > 1) { - return "type 'Default' takes no parameter"; - } - if (!config->pkey_spec) { - config->pkey_spec = apr_pcalloc(cmd->pool, sizeof(*config->pkey_spec)); + config->pks = md_pkeys_spec_make(cmd->pool); + for (i = 0; i < argc; ++i) { + ptype = argv[i]; + if (!apr_strnatcasecmp("Default", ptype)) { + if (argc > 1) { + return "'Default' allows no other parameter"; + } + md_pkeys_spec_add_default(config->pks); } - config->pkey_spec->type = MD_PKEY_TYPE_DEFAULT; - return NULL; - } - else if (!apr_strnatcasecmp("RSA", ptype)) { - if (argc == 1) { - bits = MD_PKEY_RSA_BITS_DEF; + else if (strlen(ptype) > 3 + && (ptype[0] == 'R' || ptype[0] == 'r') + && (ptype[1] == 'S' || ptype[1] == 's') + && (ptype[2] == 'A' || ptype[2] == 'a') + && isdigit(ptype[3])) { + bits = (int)apr_atoi64(ptype+3); + if (bits < MD_PKEY_RSA_BITS_MIN) { + return apr_psprintf(cmd->pool, + "must be %d or higher in order to be considered safe.", + MD_PKEY_RSA_BITS_MIN); + } + if (bits >= INT_MAX) { + return apr_psprintf(cmd->pool, "is too large for an RSA key length."); + } + if (md_pkeys_spec_contains_rsa(config->pks)) { + return "two keys of type 'RSA' are not possible."; + } + md_pkeys_spec_add_rsa(config->pks, (unsigned int)bits); } - else if (argc == 2) { - bits = (int)apr_atoi64(argv[1]); - if (bits < MD_PKEY_RSA_BITS_MIN || bits >= INT_MAX) { - return apr_psprintf(cmd->pool, "must be %d or higher in order to be considered " - "safe. Too large a value will slow down everything. Larger then 4096 probably does " - "not make sense unless quantum cryptography really changes spin.", - MD_PKEY_RSA_BITS_MIN); + else if (!apr_strnatcasecmp("RSA", ptype)) { + if (i+1 >= argc || !isdigit(argv[i+1][0])) { + bits = MD_PKEY_RSA_BITS_DEF; + } + else { + ++i; + bits = (int)apr_atoi64(argv[i]); + if (bits < MD_PKEY_RSA_BITS_MIN) { + return apr_psprintf(cmd->pool, + "must be %d or higher in order to be considered safe.", + MD_PKEY_RSA_BITS_MIN); + } + if (bits >= INT_MAX) { + return apr_psprintf(cmd->pool, "is too large for an RSA key length."); + } + } + if (md_pkeys_spec_contains_rsa(config->pks)) { + return "two keys of type 'RSA' are not possible."; } + md_pkeys_spec_add_rsa(config->pks, (unsigned int)bits); } else { - return "key type 'RSA' has only one optional parameter, the number of bits"; - } - - if (!config->pkey_spec) { - config->pkey_spec = apr_pcalloc(cmd->pool, sizeof(*config->pkey_spec)); + if (md_pkeys_spec_contains_ec(config->pks, argv[i])) { + return apr_psprintf(cmd->pool, "two keys of type '%s' are not possible.", argv[i]); + } + md_pkeys_spec_add_ec(config->pks, argv[i]); } - config->pkey_spec->type = MD_PKEY_TYPE_RSA; - config->pkey_spec->params.rsa.bits = (unsigned int)bits; - return NULL; } - return apr_pstrcat(cmd->pool, "unsupported private key type \"", ptype, "\"", NULL); + return NULL; } static const char *md_config_set_notify_cmd(cmd_parms *cmd, void *mconfig, const char *arg) { md_srv_conf_t *sc = md_config_get(cmd->server); - const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); + const char *err; - if (err) { + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { return err; } sc->mc->notify_cmd = arg; @@ -784,70 +975,335 @@ static const char *md_config_set_notify_cmd(cmd_parms *cmd, void *mconfig, const return NULL; } -static const char *md_config_set_names_old(cmd_parms *cmd, void *dc, - int argc, char *const argv[]) +static const char *md_config_set_msg_cmd(cmd_parms *cmd, void *mconfig, const char *arg) { - ap_log_error( APLOG_MARK, APLOG_WARNING, 0, cmd->server, - "mod_md: directive 'ManagedDomain' is deprecated, replace with 'MDomain'."); - return md_config_set_names(cmd, dc, argc, argv); + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { + return err; + } + sc->mc->message_cmd = arg; + (void)mconfig; + return NULL; } -static const char *md_config_sec_start_old(cmd_parms *cmd, void *mconfig, const char *arg) +static const char *md_config_set_dns01_cmd(cmd_parms *cmd, void *mconfig, const char *arg) { - ap_log_error( APLOG_MARK, APLOG_WARNING, 0, cmd->server, - "mod_md: directive 'server); + const char *err; + + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + + if (inside_md_section(cmd)) { + sc->dns01_cmd = arg; + } else { + apr_table_set(sc->mc->env, MD_KEY_CMD_DNS01, arg); + } + + (void)mconfig; + return NULL; +} + +static const char *md_config_set_dns01_version(cmd_parms *cmd, void *mconfig, const char *value) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)mconfig; + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { + return err; + } + if (!strcmp("1", value) || !strcmp("2", value)) { + apr_table_set(sc->mc->env, MD_KEY_DNS01_VERSION, value); + } + else { + return "Only versions `1` and `2` are supported"; + } + return NULL; +} + +static const char *md_config_add_cert_file(cmd_parms *cmd, void *mconfig, const char *arg) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err, *fpath; + + (void)mconfig; + if ((err = md_conf_check_location(cmd, MD_LOC_MD))) return err; + assert(sc->current); + fpath = ap_server_root_relative(cmd->pool, arg); + if (!fpath) return apr_psprintf(cmd->pool, "certificate file not found: %s", arg); + if (!sc->current->cert_files) { + sc->current->cert_files = apr_array_make(cmd->pool, 3, sizeof(char*)); + } + APR_ARRAY_PUSH(sc->current->cert_files, const char*) = fpath; + return NULL; +} + +static const char *md_config_add_key_file(cmd_parms *cmd, void *mconfig, const char *arg) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err, *fpath; + + (void)mconfig; + if ((err = md_conf_check_location(cmd, MD_LOC_MD))) return err; + assert(sc->current); + fpath = ap_server_root_relative(cmd->pool, arg); + if (!fpath) return apr_psprintf(cmd->pool, "certificate key file not found: %s", arg); + if (!sc->current->pkey_files) { + sc->current->pkey_files = apr_array_make(cmd->pool, 3, sizeof(char*)); + } + APR_ARRAY_PUSH(sc->current->pkey_files, const char*) = fpath; + return NULL; +} + +static const char *md_config_set_server_status(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + return set_on_off(&sc->mc->server_status_enabled, value, cmd->pool); +} + +static const char *md_config_set_certificate_status(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + return set_on_off(&sc->mc->certificate_status_enabled, value, cmd->pool); +} + +static const char *md_config_set_ocsp_keep_window(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + err = md_timeslice_parse(&sc->mc->ocsp_keep_window, cmd->pool, value, MD_TIME_OCSP_KEEP_NORM); + if (err) return apr_psprintf(cmd->pool, "MDStaplingKeepResponse %s", err); + return NULL; +} + +static const char *md_config_set_ocsp_renew_window(cmd_parms *cmd, void *dc, const char *value) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + err = md_timeslice_parse(&sc->mc->ocsp_renew_window, cmd->pool, value, MD_TIME_LIFE_NORM); + if (!err && sc->mc->ocsp_renew_window->norm + && (sc->mc->ocsp_renew_window->len >= sc->mc->ocsp_renew_window->norm)) { + err = "with a length of 100% or more is not allowed."; + } + if (err) return apr_psprintf(cmd->pool, "MDStaplingRenewWindow %s", err); + return NULL; +} + +static const char *md_config_set_cert_check(cmd_parms *cmd, void *dc, + const char *name, const char *url) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + sc->mc->cert_check_name = name; + sc->mc->cert_check_url = url; + return NULL; +} + +static const char *md_config_set_activation_delay(cmd_parms *cmd, void *mconfig, const char *arg) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + apr_interval_time_t delay; + + (void)mconfig; + if ((err = md_conf_check_location(cmd, MD_LOC_NOT_MD))) { + return err; + } + if (md_duration_parse(&delay, arg, "d") != APR_SUCCESS) { + return "unrecognized duration format"; + } + apr_table_set(sc->mc->env, MD_KEY_ACTIVATION_DELAY, md_duration_format(cmd->pool, delay)); + return NULL; +} + +static const char *md_config_set_ca_certs(cmd_parms *cmd, void *dc, const char *path) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + + (void)dc; + sc->mc->ca_certs = path; + return NULL; +} + +static const char *md_config_set_eab(cmd_parms *cmd, void *dc, + const char *keyid, const char *hmac) +{ + md_srv_conf_t *sc = md_config_get(cmd->server); + const char *err; + + (void)dc; + if ((err = md_conf_check_location(cmd, MD_LOC_ALL))) { + return err; + } + if (!hmac) { + if (!apr_strnatcasecmp("None", keyid)) { + keyid = "none"; + } + else { + /* a JSON file keeping keyid and hmac */ + const char *fpath; + apr_status_t rv; + md_json_t *json; + + /* If only dumping the config, don't verify the file */ + if (ap_state_query(AP_SQ_RUN_MODE) == AP_SQ_RM_CONFIG_DUMP) { + goto leave; + } + + fpath = ap_server_root_relative(cmd->pool, keyid); + if (!fpath) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": Invalid file path ", keyid, NULL); + } + if (!md_file_exists(fpath, cmd->pool)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": file not found: ", fpath, NULL); + } + + rv = md_json_readf(&json, cmd->pool, fpath); + if (APR_SUCCESS != rv) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": error reading JSON file ", fpath, NULL); + } + keyid = md_json_gets(json, MD_KEY_KID, NULL); + if (!keyid || !*keyid) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": JSON does not contain '", MD_KEY_KID, + "' element in file ", fpath, NULL); + } + hmac = md_json_gets(json, MD_KEY_HMAC, NULL); + if (!hmac || !*hmac) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": JSON does not contain '", MD_KEY_HMAC, + "' element in file ", fpath, NULL); + } + } + } +leave: + sc->ca_eab_kid = keyid; + sc->ca_eab_hmac = hmac; + return NULL; } const command_rec md_cmds[] = { - AP_INIT_TAKE1( MD_CMD_CA, md_config_set_ca, NULL, RSRC_CONF, - "URL of CA issuing the certificates"), - AP_INIT_TAKE1( MD_CMD_CAAGREEMENT, md_config_set_agreement, NULL, RSRC_CONF, - "URL of CA Terms-of-Service agreement you accept"), - AP_INIT_TAKE_ARGV( MD_CMD_CACHALLENGES, md_config_set_cha_tyes, NULL, RSRC_CONF, + AP_INIT_TAKE_ARGV("MDCertificateAuthority", md_config_set_ca, NULL, RSRC_CONF, + "URL(s) or known name(s) of CA issuing the certificates"), + AP_INIT_TAKE1("MDCertificateAgreement", md_config_set_agreement, NULL, RSRC_CONF, + "either 'accepted' or the URL of CA Terms-of-Service agreement you accept"), + AP_INIT_TAKE_ARGV("MDCAChallenges", md_config_set_cha_tyes, NULL, RSRC_CONF, "A list of challenge types to be used."), - AP_INIT_TAKE1( MD_CMD_CAPROTO, md_config_set_ca_proto, NULL, RSRC_CONF, + AP_INIT_TAKE1("MDCertificateProtocol", md_config_set_ca_proto, NULL, RSRC_CONF, "Protocol used to obtain/renew certificates"), - AP_INIT_TAKE1( MD_CMD_DRIVEMODE, md_config_set_drive_mode, NULL, RSRC_CONF, - "method of obtaining certificates for the managed domain"), - AP_INIT_TAKE_ARGV( MD_CMD_MD, md_config_set_names, NULL, RSRC_CONF, + AP_INIT_TAKE1("MDContactEmail", md_config_set_contact, NULL, RSRC_CONF, + "Email address used for account registration"), + AP_INIT_TAKE1("MDDriveMode", md_config_set_renew_mode, NULL, RSRC_CONF, + "deprecated, older name for MDRenewMode"), + AP_INIT_TAKE1("MDRenewMode", md_config_set_renew_mode, NULL, RSRC_CONF, + "Controls how renewal of Managed Domain certificates shall be handled."), + AP_INIT_TAKE_ARGV("MDomain", md_config_set_names, NULL, RSRC_CONF, "A group of server names with one certificate"), - AP_INIT_RAW_ARGS( MD_CMD_MD_SECTION, md_config_sec_start, NULL, RSRC_CONF, + AP_INIT_RAW_ARGS(MD_CMD_MD_SECTION, md_config_sec_start, NULL, RSRC_CONF, "Container for a managed domain with common settings and certificate."), - AP_INIT_TAKE_ARGV( MD_CMD_MEMBER, md_config_sec_add_members, NULL, RSRC_CONF, + AP_INIT_RAW_ARGS(MD_CMD_MD2_SECTION, md_config_sec_start, NULL, RSRC_CONF, + "Short form for container."), + AP_INIT_TAKE_ARGV("MDMember", md_config_sec_add_members, NULL, RSRC_CONF, "Define domain name(s) part of the Managed Domain. Use 'auto' or " "'manual' to enable/disable auto adding names from virtual hosts."), - AP_INIT_TAKE_ARGV( MD_CMD_MEMBERS, md_config_sec_add_members, NULL, RSRC_CONF, + AP_INIT_TAKE_ARGV("MDMembers", md_config_sec_add_members, NULL, RSRC_CONF, "Define domain name(s) part of the Managed Domain. Use 'auto' or " "'manual' to enable/disable auto adding names from virtual hosts."), - AP_INIT_TAKE1( MD_CMD_MUSTSTAPLE, md_config_set_must_staple, NULL, RSRC_CONF, + AP_INIT_TAKE1("MDMustStaple", md_config_set_must_staple, NULL, RSRC_CONF, "Enable/Disable the Must-Staple flag for new certificates."), - AP_INIT_TAKE12( MD_CMD_PORTMAP, md_config_set_port_map, NULL, RSRC_CONF, + AP_INIT_TAKE12("MDPortMap", md_config_set_port_map, NULL, RSRC_CONF, "Declare the mapped ports 80 and 443 on the local server. E.g. 80:8000 " "to indicate that the server port 8000 is reachable as port 80 from the " "internet. Use 80:- to indicate that port 80 is not reachable from " "the outside."), - AP_INIT_TAKE_ARGV( MD_CMD_PKEYS, md_config_set_pkeys, NULL, RSRC_CONF, + AP_INIT_TAKE_ARGV("MDPrivateKeys", md_config_set_pkeys, NULL, RSRC_CONF, "set the type and parameters for private key generation"), - AP_INIT_TAKE1( MD_CMD_PROXY, md_config_set_proxy, NULL, RSRC_CONF, + AP_INIT_TAKE1("MDHttpProxy", md_config_set_proxy, NULL, RSRC_CONF, "URL of a HTTP(S) proxy to use for outgoing connections"), - AP_INIT_TAKE1( MD_CMD_STOREDIR, md_config_set_store_dir, NULL, RSRC_CONF, + AP_INIT_TAKE1("MDStoreDir", md_config_set_store_dir, NULL, RSRC_CONF, "the directory for file system storage of managed domain data."), - AP_INIT_TAKE1( MD_CMD_RENEWWINDOW, md_config_set_renew_window, NULL, RSRC_CONF, - "Time length for renewal before certificate expires (defaults to days)"), - AP_INIT_TAKE1( MD_CMD_REQUIREHTTPS, md_config_set_require_https, NULL, RSRC_CONF, + AP_INIT_TAKE1("MDRenewWindow", md_config_set_renew_window, NULL, RSRC_CONF, + "Time length for renewal before certificate expires (defaults to days)."), + AP_INIT_TAKE1("MDRequireHttps", md_config_set_require_https, NULL, RSRC_CONF|OR_AUTHCFG, "Redirect non-secure requests to the https: equivalent."), - AP_INIT_RAW_ARGS(MD_CMD_NOTIFYCMD, md_config_set_notify_cmd, NULL, RSRC_CONF, - "set the command and optional arguments to run when signup/renew of domain is complete."), - AP_INIT_TAKE1( MD_CMD_BASE_SERVER, md_config_set_base_server, NULL, RSRC_CONF, - "allow managing of base server outside virtual hosts."), - -/* This will disappear soon */ - AP_INIT_TAKE_ARGV( MD_CMD_OLD_MD, md_config_set_names_old, NULL, RSRC_CONF, - "Deprecated, replace with 'MDomain'."), - AP_INIT_RAW_ARGS( MD_CMD_MD_OLD_SECTION, md_config_sec_start_old, NULL, RSRC_CONF, - "Deprecated, replace with 'hsts_max_age > 0) { mc->hsts_header = apr_psprintf(p, "max-age=%d", mc->hsts_max_age); } + +#if AP_MODULE_MAGIC_AT_LEAST(20180906, 2) + if (mc->base_dir == NULL) { + mc->base_dir = ap_state_dir_relative(p, MD_DEFAULT_BASE_DIR); + } +#endif return APR_SUCCESS; } @@ -874,6 +1336,7 @@ static md_srv_conf_t *config_get_int(server_rec *s, apr_pool_t *p) ap_assert(sc); if (sc->s != s && p) { sc = md_config_merge(p, &defconf, sc); + sc->s = s; sc->name = apr_pstrcat(p, CONF_S_NAME(s), sc->name, NULL); sc->mc = md_mod_conf_get(p, 1); ap_set_module_config(s->module_config, &md_module, sc); @@ -900,8 +1363,8 @@ md_srv_conf_t *md_config_cget(conn_rec *c) const char *md_config_gets(const md_srv_conf_t *sc, md_config_var_t var) { switch (var) { - case MD_CONFIG_CA_URL: - return sc->ca_url? sc->ca_url : defconf.ca_url; + case MD_CONFIG_CA_CONTACT: + return sc->ca_contact? sc->ca_contact : defconf.ca_contact; case MD_CONFIG_CA_PROTO: return sc->ca_proto? sc->ca_proto : defconf.ca_proto; case MD_CONFIG_BASE_DIR: @@ -921,30 +1384,49 @@ int md_config_geti(const md_srv_conf_t *sc, md_config_var_t var) { switch (var) { case MD_CONFIG_DRIVE_MODE: - return (sc->drive_mode != DEF_VAL)? sc->drive_mode : defconf.drive_mode; - case MD_CONFIG_LOCAL_80: - return sc->mc->local_80; - case MD_CONFIG_LOCAL_443: - return sc->mc->local_443; + return (sc->renew_mode != DEF_VAL)? sc->renew_mode : defconf.renew_mode; case MD_CONFIG_TRANSITIVE: return (sc->transitive != DEF_VAL)? sc->transitive : defconf.transitive; case MD_CONFIG_REQUIRE_HTTPS: return (sc->require_https != MD_REQUIRE_UNSET)? sc->require_https : defconf.require_https; case MD_CONFIG_MUST_STAPLE: return (sc->must_staple != DEF_VAL)? sc->must_staple : defconf.must_staple; + case MD_CONFIG_STAPLING: + return (sc->stapling != DEF_VAL)? sc->stapling : defconf.stapling; + case MD_CONFIG_STAPLE_OTHERS: + return (sc->staple_others != DEF_VAL)? sc->staple_others : defconf.staple_others; default: return 0; } } -apr_interval_time_t md_config_get_interval(const md_srv_conf_t *sc, md_config_var_t var) +void md_config_get_timespan(md_timeslice_t **pspan, const md_srv_conf_t *sc, md_config_var_t var) { switch (var) { - case MD_CONFIG_RENEW_NORM: - return (sc->renew_norm != DEF_VAL)? sc->renew_norm : defconf.renew_norm; case MD_CONFIG_RENEW_WINDOW: - return (sc->renew_window != DEF_VAL)? sc->renew_window : defconf.renew_window; + *pspan = sc->renew_window? sc->renew_window : defconf.renew_window; + break; + case MD_CONFIG_WARN_WINDOW: + *pspan = sc->warn_window? sc->warn_window : defconf.warn_window; + break; default: - return 0; + break; + } +} + +const md_t *md_get_for_domain(server_rec *s, const char *domain) +{ + md_srv_conf_t *sc; + const md_t *md; + int i; + + sc = md_config_get(s); + for (i = 0; sc && sc->assigned && i < sc->assigned->nelts; ++i) { + md = APR_ARRAY_IDX(sc->assigned, i, const md_t*); + if (md_contains(md, domain, 0)) goto leave; } + md = NULL; +leave: + return md; } + diff --git a/modules/md/mod_md_config.h b/modules/md/mod_md_config.h index 7c7df51..7e87440 100644 --- a/modules/md/mod_md_config.h +++ b/modules/md/mod_md_config.h @@ -17,32 +17,42 @@ #ifndef mod_md_md_config_h #define mod_md_md_config_h +struct apr_hash_t; struct md_store_t; struct md_reg_t; -struct md_pkey_spec_t; +struct md_ocsp_reg_t; +struct md_pkeys_spec_t; typedef enum { - MD_CONFIG_CA_URL, + MD_CONFIG_CA_CONTACT, MD_CONFIG_CA_PROTO, MD_CONFIG_BASE_DIR, MD_CONFIG_CA_AGREEMENT, MD_CONFIG_DRIVE_MODE, - MD_CONFIG_LOCAL_80, - MD_CONFIG_LOCAL_443, - MD_CONFIG_RENEW_NORM, MD_CONFIG_RENEW_WINDOW, + MD_CONFIG_WARN_WINDOW, MD_CONFIG_TRANSITIVE, MD_CONFIG_PROXY, MD_CONFIG_REQUIRE_HTTPS, MD_CONFIG_MUST_STAPLE, MD_CONFIG_NOTIFY_CMD, + MD_CONFIG_MESSGE_CMD, + MD_CONFIG_STAPLING, + MD_CONFIG_STAPLE_OTHERS, } md_config_var_t; -typedef struct { +typedef enum { + MD_MATCH_ALL, + MD_MATCH_SERVERNAMES, +} md_match_mode_t; + +typedef struct md_mod_conf_t md_mod_conf_t; +struct md_mod_conf_t { apr_array_header_t *mds; /* all md_t* defined in the config, shared */ const char *base_dir; /* base dir for store */ const char *proxy_url; /* proxy url to use (or NULL) */ - struct md_reg_t *reg; /* md registry instance, singleton, shared */ + struct md_reg_t *reg; /* md registry instance */ + struct md_ocsp_reg_t *ocsp; /* ocsp status registry */ int local_80; /* On which port http:80 arrives */ int local_443; /* On which port https:443 arrives */ @@ -52,9 +62,25 @@ typedef struct { int hsts_max_age; /* max-age of HSTS (rfc6797) header */ const char *hsts_header; /* computed HTST header to use or NULL */ apr_array_header_t *unused_names; /* post config, names of all MDs not assigned to a vhost */ + struct apr_hash_t *init_errors; /* init errors reported with MD name as key */ const char *notify_cmd; /* notification command to execute on signup/renew */ -} md_mod_conf_t; + const char *message_cmd; /* message command to execute on signup/renew/warnings */ + struct apr_table_t *env; /* environment for operation */ + int dry_run; /* != 0 iff config dry run */ + int server_status_enabled; /* if module should add to server-status handler */ + int certificate_status_enabled; /* if module should expose /.httpd/certificate-status */ + md_timeslice_t *ocsp_keep_window; /* time that we keep ocsp responses around */ + md_timeslice_t *ocsp_renew_window; /* time before exp. that we start renewing ocsp resp. */ + const char *cert_check_name; /* name of the linked certificate check site */ + const char *cert_check_url; /* url "template for" checking a certificate */ + const char *ca_certs; /* root certificates to use for connections */ + apr_time_t min_delay; /* minimum delay for retries */ + int retry_failover; /* number of errors to trigger CA failover */ + int use_store_locks; /* use locks when updating store */ + apr_time_t lock_wait_timeout; /* fail after this time when unable to obtain lock */ + md_match_mode_t match_mode; /* how dns names are match to vhosts */ +}; typedef struct md_srv_conf_t { const char *name; @@ -63,21 +89,28 @@ typedef struct md_srv_conf_t { int transitive; /* != 0 iff VirtualHost names/aliases are auto-added */ md_require_t require_https; /* If MDs require https: access */ - int drive_mode; /* mode of obtaining credentials */ + int renew_mode; /* mode of obtaining credentials */ int must_staple; /* certificates should set the OCSP Must Staple extension */ - struct md_pkey_spec_t *pkey_spec; /* specification for generating private keys */ - apr_interval_time_t renew_norm; /* If > 0, use as normalizing value for cert lifetime - * Example: renew_norm=90d renew_win=30d, cert lives - * for 12 days => renewal 4 days before */ - apr_interval_time_t renew_window; /* time before expiration that starts renewal */ + struct md_pkeys_spec_t *pks; /* specification for private keys */ + md_timeslice_t *renew_window; /* time before expiration that starts renewal */ + md_timeslice_t *warn_window; /* time before expiration that warning are sent out */ - const char *ca_url; /* url of CA certificate service */ + struct apr_array_header_t *ca_urls; /* urls of CAs */ + const char *ca_contact; /* contact email registered to account */ const char *ca_proto; /* protocol used vs CA (e.g. ACME) */ const char *ca_agreement; /* accepted agreement uri between CA and user */ struct apr_array_header_t *ca_challenges; /* challenge types configured */ + const char *ca_eab_kid; /* != NULL, external account binding keyid */ + const char *ca_eab_hmac; /* != NULL, external account binding hmac */ + + int stapling; /* OCSP stapling enabled */ + int staple_others; /* Provide OCSP stapling for non-MD certificates */ + + const char *dns01_cmd; /* DNS challenge command, override global command */ md_t *current; /* md currently defined in section */ - md_t *assigned; /* post_config: MD that applies to this server or NULL */ + struct apr_array_header_t *assigned; /* post_config: MDs that apply to this server */ + int is_ssl; /* SSLEngine is enabled here */ } md_srv_conf_t; void *md_config_create_svr(apr_pool_t *pool, server_rec *s); @@ -97,6 +130,9 @@ md_srv_conf_t *md_config_get_unique(server_rec *s, apr_pool_t *p); const char *md_config_gets(const md_srv_conf_t *config, md_config_var_t var); int md_config_geti(const md_srv_conf_t *config, md_config_var_t var); -apr_interval_time_t md_config_get_interval(const md_srv_conf_t *config, md_config_var_t var); + +void md_config_get_timespan(md_timeslice_t **pspan, const md_srv_conf_t *sc, md_config_var_t var); + +const md_t *md_get_for_domain(server_rec *s, const char *domain); #endif /* md_config_h */ diff --git a/modules/md/mod_md_drive.c b/modules/md/mod_md_drive.c new file mode 100644 index 0000000..5565f44 --- /dev/null +++ b/modules/md/mod_md_drive.c @@ -0,0 +1,345 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mod_watchdog.h" + +#include "md.h" +#include "md_curl.h" +#include "md_crypt.h" +#include "md_event.h" +#include "md_http.h" +#include "md_json.h" +#include "md_status.h" +#include "md_store.h" +#include "md_store_fs.h" +#include "md_log.h" +#include "md_result.h" +#include "md_reg.h" +#include "md_util.h" +#include "md_version.h" +#include "md_acme.h" +#include "md_acme_authz.h" + +#include "mod_md.h" +#include "mod_md_private.h" +#include "mod_md_config.h" +#include "mod_md_status.h" +#include "mod_md_drive.h" + +/**************************************************************************************************/ +/* watchdog based impl. */ + +#define MD_RENEW_WATCHDOG_NAME "_md_renew_" + +static APR_OPTIONAL_FN_TYPE(ap_watchdog_get_instance) *wd_get_instance; +static APR_OPTIONAL_FN_TYPE(ap_watchdog_register_callback) *wd_register_callback; +static APR_OPTIONAL_FN_TYPE(ap_watchdog_set_callback_interval) *wd_set_interval; + +struct md_renew_ctx_t { + apr_pool_t *p; + server_rec *s; + md_mod_conf_t *mc; + ap_watchdog_t *watchdog; + + apr_array_header_t *jobs; +}; + +static void process_drive_job(md_renew_ctx_t *dctx, md_job_t *job, apr_pool_t *ptemp) +{ + const md_t *md; + md_result_t *result = NULL; + apr_status_t rv; + + md_job_load(job); + /* Evaluate again on loaded value. Values will change when watchdog switches child process */ + if (apr_time_now() < job->next_run) return; + + job->next_run = 0; + if (job->finished && job->notified_renewed) { + /* finished and notification handled, nothing to do. */ + goto leave; + } + + md = md_get_by_name(dctx->mc->mds, job->mdomain); + AP_DEBUG_ASSERT(md); + + result = md_result_md_make(ptemp, md->name); + if (job->last_result) md_result_assign(result, job->last_result); + + if (md->state == MD_S_MISSING_INFORMATION) { + /* Missing information, this will not change until configuration + * is changed and server reloaded. */ + job->fatal_error = 1; + job->next_run = 0; + goto leave; + } + + if (md_will_renew_cert(md)) { + /* Renew the MDs credentials in a STAGING area. Might be invoked repeatedly + * without discarding previous/intermediate results. + * Only returns SUCCESS when the renewal is complete, e.g. STAGING has a + * complete set of new credentials. + */ + ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, dctx->s, APLOGNO(10052) + "md(%s): state=%d, driving", job->mdomain, md->state); + + if (!md_reg_should_renew(dctx->mc->reg, md, dctx->p)) { + ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, dctx->s, APLOGNO(10053) + "md(%s): no need to renew", job->mdomain); + goto expiry; + } + + /* The (possibly configured) event handler may veto renewals. This + * is used in cluster installtations, see #233. */ + rv = md_event_raise("renewing", md->name, job, result, ptemp); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_INFO, 0, dctx->s, APLOGNO(10060) + "%s: event-handler for 'renewing' returned %d, preventing renewal to proceed.", + job->mdomain, rv); + goto leave; + } + + md_job_start_run(job, result, md_reg_store_get(dctx->mc->reg)); + md_reg_renew(dctx->mc->reg, md, dctx->mc->env, 0, job->error_runs, result, ptemp); + md_job_end_run(job, result); + + if (APR_SUCCESS == result->status) { + /* Finished jobs might take a while before the results become valid. + * If that is in the future, request to run then */ + if (apr_time_now() < result->ready_at) { + md_job_retry_at(job, result->ready_at); + goto leave; + } + + if (!job->notified_renewed) { + md_job_save(job, result, ptemp); + md_job_notify(job, "renewed", result); + } + } + else { + ap_log_error( APLOG_MARK, APLOG_ERR, result->status, dctx->s, APLOGNO(10056) + "processing %s: %s", job->mdomain, result->detail); + md_job_log_append(job, "renewal-error", result->problem, result->detail); + md_event_holler("errored", job->mdomain, job, result, ptemp); + ap_log_error(APLOG_MARK, APLOG_INFO, 0, dctx->s, APLOGNO(10057) + "%s: encountered error for the %d. time, next run in %s", + job->mdomain, job->error_runs, + md_duration_print(ptemp, job->next_run - apr_time_now())); + } + } + +expiry: + if (!job->finished && md_reg_should_warn(dctx->mc->reg, md, dctx->p)) { + ap_log_error( APLOG_MARK, APLOG_TRACE1, 0, dctx->s, + "md(%s): warn about expiration", md->name); + md_job_start_run(job, result, md_reg_store_get(dctx->mc->reg)); + md_job_notify(job, "expiring", result); + md_job_end_run(job, result); + } + +leave: + if (job->dirty && result) { + rv = md_job_save(job, result, ptemp); + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, dctx->s, "%s: saving job props", job->mdomain); + } +} + +int md_will_renew_cert(const md_t *md) +{ + if (md->renew_mode == MD_RENEW_MANUAL) { + return 0; + } + else if (md->renew_mode == MD_RENEW_AUTO && md->cert_files && md->cert_files->nelts) { + return 0; + } + return 1; +} + +static apr_time_t next_run_default(void) +{ + /* we'd like to run at least twice a day by default */ + return apr_time_now() + apr_time_from_sec(MD_SECS_PER_DAY / 2); +} + +static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp) +{ + md_renew_ctx_t *dctx = baton; + md_job_t *job; + apr_time_t next_run, wait_time; + int i; + + /* mod_watchdog invoked us as a single thread inside the whole server (on this machine). + * This might be a repeated run inside the same child (mod_watchdog keeps affinity as + * long as the child lives) or another/new child. + */ + switch (state) { + case AP_WATCHDOG_STATE_STARTING: + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, dctx->s, APLOGNO(10054) + "md watchdog start, auto drive %d mds", dctx->jobs->nelts); + break; + + case AP_WATCHDOG_STATE_RUNNING: + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, dctx->s, APLOGNO(10055) + "md watchdog run, auto drive %d mds", dctx->jobs->nelts); + + /* Process all drive jobs. They will update their next_run property + * and we schedule ourself at the earliest of all. A job may specify 0 + * as next_run to indicate that it wants to participate in the normal + * regular runs. */ + next_run = next_run_default(); + for (i = 0; i < dctx->jobs->nelts; ++i) { + job = APR_ARRAY_IDX(dctx->jobs, i, md_job_t *); + + if (apr_time_now() >= job->next_run) { + process_drive_job(dctx, job, ptemp); + } + + if (job->next_run && job->next_run < next_run) { + next_run = job->next_run; + } + } + + wait_time = next_run - apr_time_now(); + if (APLOGdebug(dctx->s)) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, dctx->s, APLOGNO(10107) + "next run in %s", md_duration_print(ptemp, wait_time)); + } + wd_set_interval(dctx->watchdog, wait_time, dctx, run_watchdog); + break; + + case AP_WATCHDOG_STATE_STOPPING: + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, dctx->s, APLOGNO(10058) + "md watchdog stopping"); + break; + } + + return APR_SUCCESS; +} + +apr_status_t md_renew_start_watching(md_mod_conf_t *mc, server_rec *s, apr_pool_t *p) +{ + apr_allocator_t *allocator; + md_renew_ctx_t *dctx; + apr_pool_t *dctxp; + apr_status_t rv; + md_t *md; + md_job_t *job; + int i; + + /* We use mod_watchdog to run a single thread in one of the child processes + * to monitor the MDs marked as watched, using the const data in the list + * mc->mds of our MD structures. + * + * The data in mc cannot be changed, as we may spawn copies in new child processes + * of the original data at any time. The child which hosts the watchdog thread + * may also die or be recycled, which causes a new watchdog thread to run + * in another process with the original data. + * + * Instead, we use our store to persist changes in group STAGING. This is + * kept writable to child processes, but the data stored there is not live. + * However, mod_watchdog makes sure that we only ever have a single thread in + * our server (on this machine) that writes there. Other processes, e.g. informing + * the user about progress, only read from there. + * + * All changes during driving an MD are stored as files in MG_SG_STAGING/. + * All will have "md.json" and "job.json". There may be a range of other files used + * by the protocol obtaining the certificate/keys. + * + * + */ + wd_get_instance = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_get_instance); + wd_register_callback = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_register_callback); + wd_set_interval = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_set_callback_interval); + + if (!wd_get_instance || !wd_register_callback || !wd_set_interval) { + ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(10061) "mod_watchdog is required"); + return !OK; + } + + /* We want our own pool with own allocator to keep data across watchdog invocations. + * Since we'll run in a single watchdog thread, using our own allocator will prevent + * any confusion in the parent pool. */ + apr_allocator_create(&allocator); + apr_allocator_max_free_set(allocator, 1); + rv = apr_pool_create_ex(&dctxp, p, NULL, allocator); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10062) "md_renew_watchdog: create pool"); + return rv; + } + apr_allocator_owner_set(allocator, dctxp); + apr_pool_tag(dctxp, "md_renew_watchdog"); + + dctx = apr_pcalloc(dctxp, sizeof(*dctx)); + dctx->p = dctxp; + dctx->s = s; + dctx->mc = mc; + + dctx->jobs = apr_array_make(dctx->p, mc->mds->nelts, sizeof(md_job_t *)); + for (i = 0; i < mc->mds->nelts; ++i) { + md = APR_ARRAY_IDX(mc->mds, i, md_t*); + if (!md || !md->watched) continue; + + job = md_reg_job_make(mc->reg, md->name, p); + APR_ARRAY_PUSH(dctx->jobs, md_job_t*) = job; + ap_log_error( APLOG_MARK, APLOG_TRACE1, 0, dctx->s, + "md(%s): state=%d, created drive job", md->name, md->state); + + md_job_load(job); + if (job->error_runs) { + /* Server has just restarted. If we encounter an MD job with errors + * on a previous driving, we purge its STAGING area. + * This will reset the driving for the MD. It may run into the same + * error again, or in case of race/confusion/our error/CA error, it + * might allow the MD to succeed by a fresh start. + */ + ap_log_error( APLOG_MARK, APLOG_NOTICE, 0, dctx->s, APLOGNO(10064) + "md(%s): previous drive job showed %d errors, purging STAGING " + "area to reset.", md->name, job->error_runs); + md_store_purge(md_reg_store_get(dctx->mc->reg), p, MD_SG_STAGING, md->name); + md_store_purge(md_reg_store_get(dctx->mc->reg), p, MD_SG_CHALLENGES, md->name); + job->error_runs = 0; + } + } + + if (!dctx->jobs->nelts) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10065) + "no managed domain to drive, no watchdog needed."); + apr_pool_destroy(dctx->p); + return APR_SUCCESS; + } + + if (APR_SUCCESS != (rv = wd_get_instance(&dctx->watchdog, MD_RENEW_WATCHDOG_NAME, 0, 1, dctx->p))) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10066) + "create md renew watchdog(%s)", MD_RENEW_WATCHDOG_NAME); + return rv; + } + rv = wd_register_callback(dctx->watchdog, 0, dctx, run_watchdog); + ap_log_error(APLOG_MARK, rv? APLOG_CRIT : APLOG_DEBUG, rv, s, APLOGNO(10067) + "register md renew watchdog(%s)", MD_RENEW_WATCHDOG_NAME); + return rv; +} diff --git a/modules/md/mod_md_drive.h b/modules/md/mod_md_drive.h new file mode 100644 index 0000000..40d6d67 --- /dev/null +++ b/modules/md/mod_md_drive.h @@ -0,0 +1,35 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_md_md_drive_h +#define mod_md_md_drive_h + +struct md_mod_conf_t; +struct md_reg_t; + +typedef struct md_renew_ctx_t md_renew_ctx_t; + +int md_will_renew_cert(const md_t *md); + +/** + * Start driving the certificate renewal for MDs marked with watched. + */ +apr_status_t md_renew_start_watching(struct md_mod_conf_t *mc, server_rec *s, apr_pool_t *p); + + + + +#endif /* mod_md_md_drive_h */ diff --git a/modules/md/mod_md_ocsp.c b/modules/md/mod_md_ocsp.c new file mode 100644 index 0000000..1d1e282 --- /dev/null +++ b/modules/md/mod_md_ocsp.c @@ -0,0 +1,272 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mod_watchdog.h" + +#include "md.h" +#include "md_crypt.h" +#include "md_http.h" +#include "md_json.h" +#include "md_ocsp.h" +#include "md_store.h" +#include "md_log.h" +#include "md_reg.h" +#include "md_time.h" +#include "md_util.h" + +#include "mod_md.h" +#include "mod_md_config.h" +#include "mod_md_private.h" +#include "mod_md_ocsp.h" + +static int staple_here(md_srv_conf_t *sc) +{ + if (!sc || !sc->mc->ocsp) return 0; + if (sc->assigned + && sc->assigned->nelts == 1 + && APR_ARRAY_IDX(sc->assigned, 0, const md_t*)->stapling) return 1; + return (md_config_geti(sc, MD_CONFIG_STAPLING) + && md_config_geti(sc, MD_CONFIG_STAPLE_OTHERS)); +} + +int md_ocsp_prime_status(server_rec *s, apr_pool_t *p, + const char *id, apr_size_t id_len, const char *pem) +{ + md_srv_conf_t *sc; + const md_t *md; + apr_array_header_t *chain; + apr_status_t rv = APR_ENOENT; + + sc = md_config_get(s); + if (!staple_here(sc)) goto cleanup; + + md = ((sc->assigned && sc->assigned->nelts == 1)? + APR_ARRAY_IDX(sc->assigned, 0, const md_t*) : NULL); + chain = apr_array_make(p, 5, sizeof(md_cert_t*)); + rv = md_cert_read_chain(chain, p, pem, strlen(pem)); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10268) "init stapling for: %s, " + "unable to parse PEM data", md? md->name : s->server_hostname); + goto cleanup; + } + else if (chain->nelts < 2) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10269) "init stapling for: %s, " + "need at least 2 certificates in PEM data", md? md->name : s->server_hostname); + rv = APR_EINVAL; + goto cleanup; + } + + rv = md_ocsp_prime(sc->mc->ocsp, id, id_len, + APR_ARRAY_IDX(chain, 0, md_cert_t*), + APR_ARRAY_IDX(chain, 1, md_cert_t*), md); + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, s, "init stapling for: %s", + md? md->name : s->server_hostname); + +cleanup: + return (APR_SUCCESS == rv)? OK : DECLINED; +} + +typedef struct { + unsigned char *der; + apr_size_t der_len; +} ocsp_copy_ctx_t; + +int md_ocsp_provide_status(server_rec *s, conn_rec *c, + const char *id, apr_size_t id_len, + ap_ssl_ocsp_copy_resp *cb, void *userdata) +{ + md_srv_conf_t *sc; + const md_t *md; + apr_status_t rv; + + sc = md_config_get(s); + if (!staple_here(sc)) goto declined; + + md = ((sc->assigned && sc->assigned->nelts == 1)? + APR_ARRAY_IDX(sc->assigned, 0, const md_t*) : NULL); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "get stapling for: %s", + md? md->name : s->server_hostname); + + rv = md_ocsp_get_status(cb, userdata, sc->mc->ocsp, id, id_len, c->pool, md); + if (APR_STATUS_IS_ENOENT(rv)) goto declined; + return OK; + +declined: + return DECLINED; +} + + +/**************************************************************************************************/ +/* watchdog based impl. */ + +#define MD_OCSP_WATCHDOG_NAME "_md_ocsp_" + +static APR_OPTIONAL_FN_TYPE(ap_watchdog_get_instance) *wd_get_instance; +static APR_OPTIONAL_FN_TYPE(ap_watchdog_register_callback) *wd_register_callback; +static APR_OPTIONAL_FN_TYPE(ap_watchdog_set_callback_interval) *wd_set_interval; + +typedef struct md_ocsp_ctx_t md_ocsp_ctx_t; + +struct md_ocsp_ctx_t { + apr_pool_t *p; + server_rec *s; + md_mod_conf_t *mc; + ap_watchdog_t *watchdog; +}; + +static apr_time_t next_run_default(void) +{ + /* we'd like to run at least hourly */ + return apr_time_now() + apr_time_from_sec(MD_SECS_PER_HOUR); +} + +static apr_status_t run_watchdog(int state, void *baton, apr_pool_t *ptemp) +{ + md_ocsp_ctx_t *octx = baton; + apr_time_t next_run, wait_time; + + /* mod_watchdog invoked us as a single thread inside the whole server (on this machine). + * This might be a repeated run inside the same child (mod_watchdog keeps affinity as + * long as the child lives) or another/new child. + */ + switch (state) { + case AP_WATCHDOG_STATE_STARTING: + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, octx->s, APLOGNO(10197) + "md ocsp watchdog start, ocsp stapling %d certificates", + (int)md_ocsp_count(octx->mc->ocsp)); + break; + + case AP_WATCHDOG_STATE_RUNNING: + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, octx->s, APLOGNO(10198) + "md ocsp watchdog run, ocsp stapling %d certificates", + (int)md_ocsp_count(octx->mc->ocsp)); + + /* Process all drive jobs. They will update their next_run property + * and we schedule ourself at the earliest of all. A job may specify 0 + * as next_run to indicate that it wants to participate in the normal + * regular runs. */ + next_run = next_run_default(); + + md_ocsp_renew(octx->mc->ocsp, octx->p, ptemp, &next_run); + + wait_time = next_run - apr_time_now(); + if (APLOGdebug(octx->s)) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, octx->s, APLOGNO(10199) + "md ocsp watchdog next run in %s", + md_duration_print(ptemp, wait_time)); + } + wd_set_interval(octx->watchdog, wait_time, octx, run_watchdog); + break; + + case AP_WATCHDOG_STATE_STOPPING: + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, octx->s, APLOGNO(10200) + "md ocsp watchdog stopping"); + break; + } + + return APR_SUCCESS; +} + +static apr_status_t ocsp_remove_old_responses(md_mod_conf_t *mc, apr_pool_t *p) +{ + md_timeperiod_t keep_norm, keep; + + keep_norm.end = apr_time_now(); + keep_norm.start = keep_norm.end - MD_TIME_OCSP_KEEP_NORM; + keep = md_timeperiod_slice_before_end(&keep_norm, mc->ocsp_keep_window); + /* remove any ocsp response older than keep.start */ + return md_ocsp_remove_responses_older_than(mc->ocsp, p, keep.start); +} + +apr_status_t md_ocsp_start_watching(md_mod_conf_t *mc, server_rec *s, apr_pool_t *p) +{ + apr_allocator_t *allocator; + md_ocsp_ctx_t *octx; + apr_pool_t *octxp; + apr_status_t rv; + + wd_get_instance = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_get_instance); + wd_register_callback = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_register_callback); + wd_set_interval = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_set_callback_interval); + + if (!wd_get_instance || !wd_register_callback || !wd_set_interval) { + ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(10201) + "mod_watchdog is required for OCSP stapling"); + return APR_EGENERAL; + } + + /* We want our own pool with own allocator to keep data across watchdog invocations. + * Since we'll run in a single watchdog thread, using our own allocator will prevent + * any confusion in the parent pool. */ + apr_allocator_create(&allocator); + apr_allocator_max_free_set(allocator, 1); + rv = apr_pool_create_ex(&octxp, p, NULL, allocator); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10205) "md_ocsp_watchdog: create pool"); + return rv; + } + apr_allocator_owner_set(allocator, octxp); + apr_pool_tag(octxp, "md_ocsp_watchdog"); + + octx = apr_pcalloc(octxp, sizeof(*octx)); + octx->p = octxp; + octx->s = s; + octx->mc = mc; + + /* Time for some house keeping, before the server goes live (again): + * - we store OCSP responses for each certificate individually by its SHA-1 id + * - this means, as long as certificate do not change, the number of response + * files remains stable. + * - But when a certificate changes (is replaced), the response is obsolete + * - we do not get notified when a certificate is no longer used. An admin + * might just reconfigure or change the content of a file (backup/restore etc.) + * - also, certificates might be added by some openssl config commands or other + * modules that we do not immediately see right at startup. We cannot assume + * that any OCSP response we cannot relate to a certificate RIGHT NOW, is no + * longer needed. + * - since the response files are relatively small, we have no problem with + * keeping them around for a while. We just do not want an ever growing store. + * - The simplest and effective way seems to be to just remove files older + * a certain amount of time. Take a 7 day default and let the admin configure + * it for very special setups. + */ + ocsp_remove_old_responses(mc, octx->p); + + rv = wd_get_instance(&octx->watchdog, MD_OCSP_WATCHDOG_NAME, 0, 1, octx->p); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10202) + "create md ocsp watchdog(%s)", MD_OCSP_WATCHDOG_NAME); + return rv; + } + rv = wd_register_callback(octx->watchdog, 0, octx, run_watchdog); + ap_log_error(APLOG_MARK, rv? APLOG_CRIT : APLOG_DEBUG, rv, s, APLOGNO(10203) + "register md ocsp watchdog(%s)", MD_OCSP_WATCHDOG_NAME); + return rv; +} + + + diff --git a/modules/md/mod_md_ocsp.h b/modules/md/mod_md_ocsp.h new file mode 100644 index 0000000..a3f9502 --- /dev/null +++ b/modules/md/mod_md_ocsp.h @@ -0,0 +1,33 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_md_md_ocsp_h +#define mod_md_md_ocsp_h + + +int md_ocsp_prime_status(server_rec *s, apr_pool_t *p, + const char *id, apr_size_t id_len, const char *pem); + +int md_ocsp_provide_status(server_rec *s, conn_rec *c, const char *id, apr_size_t id_len, + ap_ssl_ocsp_copy_resp *cb, void *userdata); + +/** + * Start watchdog for retrieving/updating ocsp status. + */ +apr_status_t md_ocsp_start_watching(struct md_mod_conf_t *mc, server_rec *s, apr_pool_t *p); + + +#endif /* mod_md_md_ocsp_h */ diff --git a/modules/md/mod_md_os.c b/modules/md/mod_md_os.c index f96d566..06a5bee 100644 --- a/modules/md/mod_md_os.c +++ b/modules/md/mod_md_os.c @@ -17,10 +17,6 @@ #include #include -#ifndef AP_ENABLE_EXCEPTION_HOOK -#define AP_ENABLE_EXCEPTION_HOOK 0 -#endif - #include #include #include @@ -29,9 +25,6 @@ #if APR_HAVE_UNISTD_H #include #endif -#ifdef WIN32 -#include "mpm_winnt.h" -#endif #if AP_NEED_SET_MUTEX_PERMS #include "unixd.h" #endif @@ -41,14 +34,20 @@ apr_status_t md_try_chown(const char *fname, unsigned int uid, int gid, apr_pool_t *p) { -#if AP_NEED_SET_MUTEX_PERMS - if (-1 == chown(fname, (uid_t)uid, (gid_t)gid)) { - apr_status_t rv = APR_FROM_OS_ERROR(errno); - if (!APR_STATUS_IS_ENOENT(rv)) { - ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10082) - "Can't change owner of %s", fname); +#if AP_NEED_SET_MUTEX_PERMS && HAVE_UNISTD_H + /* Since we only switch user when running as root, we only need to chown directories + * in that case. Otherwise, the server will ignore any "user/group" directives and + * child processes have the same privileges as the parent. + */ + if (!geteuid()) { + if (-1 == chown(fname, (uid_t)uid, (gid_t)gid)) { + apr_status_t rv = APR_FROM_OS_ERROR(errno); + if (!APR_STATUS_IS_ENOENT(rv)) { + ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10082) + "Can't change owner of %s", fname); + } + return rv; } - return rv; } return APR_SUCCESS; #else @@ -58,10 +57,10 @@ apr_status_t md_try_chown(const char *fname, unsigned int uid, int gid, apr_pool apr_status_t md_make_worker_accessible(const char *fname, apr_pool_t *p) { -#if AP_NEED_SET_MUTEX_PERMS - return md_try_chown(fname, ap_unixd_config.user_id, -1, p); -#else +#ifdef WIN32 return APR_ENOTIMPL; +#else + return md_try_chown(fname, ap_unixd_config.user_id, -1, p); #endif } diff --git a/modules/md/mod_md_status.c b/modules/md/mod_md_status.c new file mode 100644 index 0000000..6b29256 --- /dev/null +++ b/modules/md/mod_md_status.c @@ -0,0 +1,987 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mod_status.h" + +#include "md.h" +#include "md_curl.h" +#include "md_crypt.h" +#include "md_http.h" +#include "md_ocsp.h" +#include "md_json.h" +#include "md_status.h" +#include "md_store.h" +#include "md_store_fs.h" +#include "md_log.h" +#include "md_reg.h" +#include "md_util.h" +#include "md_version.h" +#include "md_acme.h" +#include "md_acme_authz.h" + +#include "mod_md.h" +#include "mod_md_private.h" +#include "mod_md_config.h" +#include "mod_md_drive.h" +#include "mod_md_status.h" + +/**************************************************************************************************/ +/* Certificate status */ + +#define APACHE_PREFIX "/.httpd/" +#define MD_STATUS_RESOURCE APACHE_PREFIX"certificate-status" +#define HTML_STATUS(X) (!((X)->flags & AP_STATUS_SHORT)) + +int md_http_cert_status(request_rec *r) +{ + int i; + md_json_t *resp, *mdj, *cj; + const md_srv_conf_t *sc; + const md_t *md; + md_pkey_spec_t *spec; + const char *keyname; + apr_bucket_brigade *bb; + apr_status_t rv; + + if (!r->parsed_uri.path || strcmp(MD_STATUS_RESOURCE, r->parsed_uri.path)) + return DECLINED; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "requesting status for: %s", r->hostname); + + /* We are looking for information about a staged certificate */ + sc = ap_get_module_config(r->server->module_config, &md_module); + if (!sc || !sc->mc || !sc->mc->reg || !sc->mc->certificate_status_enabled) return DECLINED; + md = md_get_by_domain(sc->mc->mds, r->hostname); + if (!md) return DECLINED; + + if (r->method_number != M_GET) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "md(%s): status supports only GET", md->name); + return HTTP_NOT_IMPLEMENTED; + } + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "requesting status for MD: %s", md->name); + + rv = md_status_get_md_json(&mdj, md, sc->mc->reg, sc->mc->ocsp, r->pool); + if (APR_SUCCESS != rv) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10204) + "loading md status for %s", md->name); + return HTTP_INTERNAL_SERVER_ERROR; + } + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "status for MD: %s is %s", md->name, md_json_writep(mdj, r->pool, MD_JSON_FMT_INDENT)); + + resp = md_json_create(r->pool); + + if (md_json_has_key(mdj, MD_KEY_CERT, MD_KEY_VALID, NULL)) { + md_json_setj(md_json_getj(mdj, MD_KEY_CERT, MD_KEY_VALID, NULL), resp, MD_KEY_VALID, NULL); + } + + for (i = 0; i < md_cert_count(md); ++i) { + spec = md_pkeys_spec_get(md->pks, i); + keyname = md_pkey_spec_name(spec); + cj = md_json_create(r->pool); + + if (md_json_has_key(mdj, MD_KEY_CERT, keyname, MD_KEY_VALID, NULL)) { + md_json_setj(md_json_getj(mdj, MD_KEY_CERT, keyname, MD_KEY_VALID, NULL), + cj, MD_KEY_VALID, NULL); + } + + if (md_json_has_key(mdj, MD_KEY_CERT, keyname, MD_KEY_SERIAL, NULL)) { + md_json_sets(md_json_gets(mdj, MD_KEY_CERT, keyname, MD_KEY_SERIAL, NULL), + cj, MD_KEY_SERIAL, NULL); + } + if (md_json_has_key(mdj, MD_KEY_CERT, keyname, MD_KEY_SHA256_FINGERPRINT, NULL)) { + md_json_sets(md_json_gets(mdj, MD_KEY_CERT, keyname, MD_KEY_SHA256_FINGERPRINT, NULL), + cj, MD_KEY_SHA256_FINGERPRINT, NULL); + } + md_json_setj(cj, resp, keyname, NULL ); + } + + if (md_json_has_key(mdj, MD_KEY_RENEWAL, NULL)) { + /* copy over the information we want to make public about this: + * - when not finished, add an empty object to indicate something is going on + * - when a certificate is staged, add the information from that */ + cj = md_json_getj(mdj, MD_KEY_RENEWAL, MD_KEY_CERT, NULL); + cj = cj? cj : md_json_create(r->pool); + md_json_setj(cj, resp, MD_KEY_RENEWAL, MD_KEY_CERT, NULL); + } + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "md[%s]: sending status", md->name); + apr_table_set(r->headers_out, "Content-Type", "application/json"); + bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + md_json_writeb(resp, MD_JSON_FMT_INDENT, bb); + ap_pass_brigade(r->output_filters, bb); + apr_brigade_cleanup(bb); + + return DONE; +} + +/**************************************************************************************************/ +/* Status hook */ + +typedef struct { + apr_pool_t *p; + const md_mod_conf_t *mc; + apr_bucket_brigade *bb; + int flags; + const char *prefix; + const char *separator; +} status_ctx; + +typedef struct status_info status_info; + +static void add_json_val(status_ctx *ctx, md_json_t *j); + +typedef void add_status_fn(status_ctx *ctx, md_json_t *mdj, const status_info *info); + +struct status_info { + const char *label; + const char *key; + add_status_fn *fn; +}; + +static void si_val_status(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + const char *s = "unknown"; + apr_time_t until; + (void)info; + switch (md_json_getl(mdj, info->key, NULL)) { + case MD_S_INCOMPLETE: + s = md_json_gets(mdj, MD_KEY_STATE_DESCR, NULL); + s = s? apr_psprintf(ctx->p, "incomplete: %s", s) : "incomplete"; + break; + case MD_S_EXPIRED_DEPRECATED: + case MD_S_COMPLETE: + until = md_json_get_time(mdj, MD_KEY_CERT, MD_KEY_VALID, MD_KEY_UNTIL, NULL); + s = (!until || until > apr_time_now())? "good" : "expired"; + break; + case MD_S_ERROR: s = "error"; break; + case MD_S_MISSING_INFORMATION: s = "missing information"; break; + default: break; + } + if (HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, s); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%s: %s\n", + ctx->prefix, info->label, s); + } +} + +static void si_val_url(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + const char *url, *s; + + s = url = md_json_gets(mdj, info->key, NULL); + if (!url) return; + s = md_get_ca_name_from_url(ctx->p, url); + if (HTML_STATUS(ctx)) { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s", + ap_escape_html2(ctx->p, url, 1), + ap_escape_html2(ctx->p, s, 1)); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%sName: %s\n", + ctx->prefix, info->label, s); + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%sURL: %s\n", + ctx->prefix, info->label, url); + } +} + +static void print_date(status_ctx *ctx, apr_time_t timestamp, const char *title) +{ + apr_bucket_brigade *bb = ctx->bb; + if (timestamp > 0) { + char ts[128]; + char ts2[128]; + apr_time_exp_t texp; + apr_size_t len; + + apr_time_exp_gmt(&texp, timestamp); + apr_strftime(ts, &len, sizeof(ts2)-1, "%Y-%m-%d", &texp); + ts[len] = '\0'; + if (!title) { + apr_strftime(ts2, &len, sizeof(ts)-1, "%Y-%m-%dT%H:%M:%SZ", &texp); + ts2[len] = '\0'; + title = ts2; + } + if (HTML_STATUS(ctx)) { + apr_brigade_printf(bb, NULL, NULL, + "%s", + ap_escape_html2(bb->p, title, 1), ts); + } + else { + apr_brigade_printf(bb, NULL, NULL, "%s%s: %s\n", + ctx->prefix, title, ts); + } + } +} + +static void print_time(status_ctx *ctx, const char *label, apr_time_t t) +{ + apr_bucket_brigade *bb = ctx->bb; + apr_time_t now; + const char *pre, *post, *sep; + char ts[APR_RFC822_DATE_LEN]; + char ts2[128]; + apr_time_exp_t texp; + apr_size_t len; + apr_interval_time_t delta; + + if (t == 0) { + /* timestamp is 0, we use that for "not set" */ + return; + } + apr_time_exp_gmt(&texp, t); + now = apr_time_now(); + pre = post = ""; + sep = (label && strlen(label))? " " : ""; + delta = 0; + if (HTML_STATUS(ctx)) { + apr_rfc822_date(ts, t); + if (t > now) { + delta = t - now; + pre = "in "; + } + else { + delta = now - t; + post = " ago"; + } + if (delta >= (4 * apr_time_from_sec(MD_SECS_PER_DAY))) { + apr_strftime(ts2, &len, sizeof(ts2)-1, "%Y-%m-%d", &texp); + ts2[len] = '\0'; + apr_brigade_printf(bb, NULL, NULL, "%s%s%s", + label, sep, ts, ts2); + } + else { + apr_brigade_printf(bb, NULL, NULL, "%s%s%s%s%s", + label, sep, ts, pre, md_duration_roughly(bb->p, delta), post); + } + } + else { + delta = t - now; + apr_brigade_printf(bb, NULL, NULL, "%s%s: %" APR_TIME_T_FMT "\n", + ctx->prefix, label, apr_time_sec(delta)); + } +} + +static void si_val_valid_time(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + const char *sfrom, *suntil, *sep, *title; + apr_time_t from, until; + + sep = NULL; + sfrom = md_json_gets(mdj, info->key, MD_KEY_FROM, NULL); + from = sfrom? apr_date_parse_rfc(sfrom) : 0; + suntil = md_json_gets(mdj, info->key, MD_KEY_UNTIL, NULL); + until = suntil?apr_date_parse_rfc(suntil) : 0; + + if (HTML_STATUS(ctx)) { + if (from > apr_time_now()) { + apr_brigade_puts(ctx->bb, NULL, NULL, "from "); + print_date(ctx, from, sfrom); + sep = " "; + } + if (until) { + if (sep) apr_brigade_puts(ctx->bb, NULL, NULL, sep); + apr_brigade_puts(ctx->bb, NULL, NULL, "until "); + title = sfrom? apr_psprintf(ctx->p, "%s - %s", sfrom, suntil) : suntil; + print_date(ctx, until, title); + } + } + else { + if (from > apr_time_now()) { + print_date(ctx, from, + apr_pstrcat(ctx->p, info->label, "From", NULL)); + } + if (until) { + print_date(ctx, until, + apr_pstrcat(ctx->p, info->label, "Until", NULL)); + } + } +} + +static void si_add_header(status_ctx *ctx, const status_info *info) +{ + if (HTML_STATUS(ctx)) { + const char *html = ap_escape_html2(ctx->p, info->label, 1); + apr_brigade_printf(ctx->bb, NULL, NULL, "%s", html, html); + } +} + +static void si_val_cert_valid_time(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + md_json_t *jcert; + status_info sub = *info; + + sub.key = MD_KEY_VALID; + jcert = md_json_getj(mdj, info->key, NULL); + if (jcert) si_val_valid_time(ctx, jcert, &sub); +} + +static void val_url_print(status_ctx *ctx, const status_info *info, + const char*url, const char *proto, int i) +{ + const char *s; + + if (proto && !strcmp(proto, "tailscale")) { + s = "tailscale"; + } + else if (url) { + s = md_get_ca_name_from_url(ctx->p, url); + } + else { + return; + } + if (HTML_STATUS(ctx)) { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%s", + i? " " : "", + ap_escape_html2(ctx->p, url, 1), + ap_escape_html2(ctx->p, s, 1)); + } + else if (i == 0) { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%sName: %s\n", + ctx->prefix, info->label, s); + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%sURL: %s\n", + ctx->prefix, info->label, url); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%sName%d: %s\n", + ctx->prefix, info->label, i, s); + apr_brigade_printf(ctx->bb, NULL, NULL, "%s%sURL%d: %s\n", + ctx->prefix, info->label, i, url); + } +} + +static void si_val_ca_urls(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + md_json_t *jcert; + const char *proto, *url; + apr_array_header_t *urls; + int i; + + jcert = md_json_getj(mdj, info->key, NULL); + if (!jcert) { + return; + } + + proto = md_json_gets(jcert, MD_KEY_PROTO, NULL); + url = md_json_gets(jcert, MD_KEY_URL, NULL); + if (url) { + /* print the effective CA url used, if set */ + val_url_print(ctx, info, url, proto, 0); + } + else { + /* print the available CA urls configured */ + urls = apr_array_make(ctx->p, 3, sizeof(const char*)); + md_json_getsa(urls, jcert, MD_KEY_URLS, NULL); + for (i = 0; i < urls->nelts; ++i) { + url = APR_ARRAY_IDX(urls, i, const char*); + val_url_print(ctx, info, url, proto, i); + } + } +} + +static int count_certs(void *baton, const char *key, md_json_t *json) +{ + int *pcount = baton; + + (void)json; + if (strcmp(key, MD_KEY_VALID)) { + *pcount += 1; + } + return 1; +} + +static void print_job_summary(status_ctx *ctx, md_json_t *mdj, const char *key, + const char *separator) +{ + apr_bucket_brigade *bb = ctx->bb; + char buffer[HUGE_STRING_LEN]; + apr_status_t rv; + int finished, errors, cert_count; + apr_time_t t; + const char *s, *line; + + if (!md_json_has_key(mdj, key, NULL)) { + return; + } + + finished = md_json_getb(mdj, key, MD_KEY_FINISHED, NULL); + errors = (int)md_json_getl(mdj, key, MD_KEY_ERRORS, NULL); + rv = (apr_status_t)md_json_getl(mdj, key, MD_KEY_LAST, MD_KEY_STATUS, NULL); + + line = separator? separator : ""; + + if (rv != APR_SUCCESS) { + char *errstr = apr_strerror(rv, buffer, sizeof(buffer)); + s = md_json_gets(mdj, key, MD_KEY_LAST, MD_KEY_PROBLEM, NULL); + if (HTML_STATUS(ctx)) { + line = apr_psprintf(bb->p, "%s Error[%s]: %s", line, + errstr, s? s : ""); + } + else { + apr_brigade_printf(bb, NULL, NULL, "%sLastStatus: %s\n", ctx->prefix, errstr); + apr_brigade_printf(bb, NULL, NULL, "%sLastProblem: %s\n", ctx->prefix, s); + } + } + + if (!HTML_STATUS(ctx)) { + apr_brigade_printf(bb, NULL, NULL, "%sFinished: %s\n", ctx->prefix, + finished ? "yes" : "no"); + } + if (finished) { + cert_count = 0; + md_json_iterkey(count_certs, &cert_count, mdj, key, MD_KEY_CERT, NULL); + if (HTML_STATUS(ctx)) { + if (cert_count > 0) { + line =apr_psprintf(bb->p, "%s finished, %d new certificate%s staged.", + line, cert_count, cert_count > 1? "s" : ""); + } + else { + line = apr_psprintf(bb->p, "%s finished successfully.", line); + } + } + else { + apr_brigade_printf(bb, NULL, NULL, "%sNewStaged: %d\n", ctx->prefix, cert_count); + } + } + else { + s = md_json_gets(mdj, key, MD_KEY_LAST, MD_KEY_DETAIL, NULL); + if (s) { + if (HTML_STATUS(ctx)) { + line = apr_psprintf(bb->p, "%s %s", line, s); + } + else { + apr_brigade_printf(bb, NULL, NULL, "%sLastDetail: %s\n", ctx->prefix, s); + } + } + } + + errors = (int)md_json_getl(mdj, MD_KEY_ERRORS, NULL); + if (errors > 0) { + if (HTML_STATUS(ctx)) { + line = apr_psprintf(bb->p, "%s (%d retr%s) ", line, + errors, (errors > 1)? "y" : "ies"); + } + else { + apr_brigade_printf(bb, NULL, NULL, "%sRetries: %d\n", ctx->prefix, errors); + } + } + + if (HTML_STATUS(ctx)) { + apr_brigade_puts(bb, NULL, NULL, line); + } + + t = md_json_get_time(mdj, key, MD_KEY_NEXT_RUN, NULL); + if (t > apr_time_now() && !finished) { + print_time(ctx, + HTML_STATUS(ctx) ? "\nNext run" : "NextRun", + t); + } + else if (line[0] != '\0') { + if (HTML_STATUS(ctx)) { + apr_brigade_puts(bb, NULL, NULL, "\nOngoing..."); + } + else { + apr_brigade_printf(bb, NULL, NULL, "%s: Ongoing\n", ctx->prefix); + } + } +} + +static void si_val_activity(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + apr_time_t t; + const char *prefix = ctx->prefix; + + (void)info; + if (!HTML_STATUS(ctx)) { + ctx->prefix = apr_pstrcat(ctx->p, prefix, info->label, NULL); + } + + if (md_json_has_key(mdj, MD_KEY_RENEWAL, NULL)) { + print_job_summary(ctx, mdj, MD_KEY_RENEWAL, NULL); + return; + } + + t = md_json_get_time(mdj, MD_KEY_RENEW_AT, NULL); + if (t > apr_time_now()) { + print_time(ctx, "Renew", t); + } + else if (t) { + if (HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, "Pending"); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s: %s", ctx->prefix, "Pending"); + } + } + else if (MD_RENEW_MANUAL == md_json_getl(mdj, MD_KEY_RENEW_MODE, NULL)) { + if (HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, "Manual renew"); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s: %s", ctx->prefix, "Manual renew"); + } + } + if (!HTML_STATUS(ctx)) { + ctx->prefix = prefix; + } +} + +static int cert_check_iter(void *baton, const char *key, md_json_t *json) +{ + status_ctx *ctx = baton; + const char *fingerprint; + + fingerprint = md_json_gets(json, MD_KEY_SHA256_FINGERPRINT, NULL); + if (fingerprint) { + if (HTML_STATUS(ctx)) { + apr_brigade_printf(ctx->bb, NULL, NULL, + "%s[%s]
", + ctx->mc->cert_check_url, fingerprint, + ctx->mc->cert_check_name, key); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, + "%sType: %s\n", + ctx->prefix, + key); + apr_brigade_printf(ctx->bb, NULL, NULL, + "%sName: %s\n", + ctx->prefix, + ctx->mc->cert_check_name); + apr_brigade_printf(ctx->bb, NULL, NULL, + "%sURL: %s%s\n", + ctx->prefix, + ctx->mc->cert_check_url, fingerprint); + apr_brigade_printf(ctx->bb, NULL, NULL, + "%sFingerprint: %s\n", + ctx->prefix, + fingerprint); + } + } + return 1; +} + +static void si_val_remote_check(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + (void)info; + if (ctx->mc->cert_check_name && ctx->mc->cert_check_url) { + const char *prefix = ctx->prefix; + if (!HTML_STATUS(ctx)) { + ctx->prefix = apr_pstrcat(ctx->p, prefix, info->label, NULL); + } + md_json_iterkey(cert_check_iter, ctx, mdj, MD_KEY_CERT, NULL); + if (!HTML_STATUS(ctx)) { + ctx->prefix = prefix; + } + } +} + +static void si_val_stapling(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + (void)info; + if (!md_json_getb(mdj, MD_KEY_STAPLING, NULL)) return; + if (HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, "on"); + } + else { + apr_brigade_printf(ctx->bb, NULL, NULL, "%s: on", ctx->prefix); + } +} + +static int json_iter_val(void *data, size_t index, md_json_t *json) +{ + status_ctx *ctx = data; + const char *prefix = ctx->prefix; + if (HTML_STATUS(ctx)) { + if (index) apr_brigade_puts(ctx->bb, NULL, NULL, ctx->separator); + } + else { + ctx->prefix = apr_pstrcat(ctx->p, prefix, apr_psprintf(ctx->p, "[%" APR_SIZE_T_FMT "]", index), NULL); + } + add_json_val(ctx, json); + if (!HTML_STATUS(ctx)) { + ctx->prefix = prefix; + } + return 1; +} + +static void add_json_val(status_ctx *ctx, md_json_t *j) +{ + if (!j) return; + if (md_json_is(MD_JSON_TYPE_ARRAY, j, NULL)) { + md_json_itera(json_iter_val, ctx, j, NULL); + return; + } + if (!HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, ctx->prefix); + apr_brigade_puts(ctx->bb, NULL, NULL, ": "); + } + if (md_json_is(MD_JSON_TYPE_INT, j, NULL)) { + md_json_writeb(j, MD_JSON_FMT_COMPACT, ctx->bb); + } + else if (md_json_is(MD_JSON_TYPE_STRING, j, NULL)) { + apr_brigade_puts(ctx->bb, NULL, NULL, md_json_gets(j, NULL)); + } + else if (md_json_is(MD_JSON_TYPE_OBJECT, j, NULL)) { + md_json_writeb(j, MD_JSON_FMT_COMPACT, ctx->bb); + } + else if (md_json_is(MD_JSON_TYPE_BOOL, j, NULL)) { + apr_brigade_puts(ctx->bb, NULL, NULL, md_json_getb(j, NULL)? "on" : "off"); + } + if (!HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, "\n"); + } +} + +static void si_val_names(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + const char *prefix = ctx->prefix; + if (HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, "
"); + } + else { + ctx->prefix = apr_pstrcat(ctx->p, prefix, info->label, NULL); + } + add_json_val(ctx, md_json_getj(mdj, info->key, NULL)); + if (HTML_STATUS(ctx)) { + apr_brigade_puts(ctx->bb, NULL, NULL, "
"); + } + else { + ctx->prefix = prefix; + } +} + +static void add_status_cell(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + if (info->fn) { + info->fn(ctx, mdj, info); + } + else { + const char *prefix = ctx->prefix; + if (!HTML_STATUS(ctx)) { + ctx->prefix = apr_pstrcat(ctx->p, prefix, info->label, NULL); + } + add_json_val(ctx, md_json_getj(mdj, info->key, NULL)); + if (!HTML_STATUS(ctx)) { + ctx->prefix = prefix; + } + } +} + +static const status_info status_infos[] = { + { "Domain", MD_KEY_NAME, NULL }, + { "Names", MD_KEY_DOMAINS, si_val_names }, + { "Status", MD_KEY_STATE, si_val_status }, + { "Valid", MD_KEY_CERT, si_val_cert_valid_time }, + { "CA", MD_KEY_CA, si_val_ca_urls }, + { "Stapling", MD_KEY_STAPLING, si_val_stapling }, + { "CheckAt", MD_KEY_SHA256_FINGERPRINT, si_val_remote_check }, + { "Activity", MD_KEY_NOTIFIED, si_val_activity }, +}; + +static int add_md_row(void *baton, apr_size_t index, md_json_t *mdj) +{ + status_ctx *ctx = baton; + const char *prefix = ctx->prefix; + int i; + + if (HTML_STATUS(ctx)) { + apr_brigade_printf(ctx->bb, NULL, NULL, "", (index % 2)? "odd" : "even"); + for (i = 0; i < (int)(sizeof(status_infos)/sizeof(status_infos[0])); ++i) { + apr_brigade_puts(ctx->bb, NULL, NULL, ""); + add_status_cell(ctx, mdj, &status_infos[i]); + apr_brigade_puts(ctx->bb, NULL, NULL, ""); + } + apr_brigade_puts(ctx->bb, NULL, NULL, ""); + } else { + for (i = 0; i < (int)(sizeof(status_infos)/sizeof(status_infos[0])); ++i) { + ctx->prefix = apr_pstrcat(ctx->p, prefix, apr_psprintf(ctx->p, "[%" APR_SIZE_T_FMT "]", index), NULL); + add_status_cell(ctx, mdj, &status_infos[i]); + ctx->prefix = prefix; + } + } + return 1; +} + +static int md_name_cmp(const void *v1, const void *v2) +{ + return strcmp((*(const md_t**)v1)->name, (*(const md_t**)v2)->name); +} + +int md_domains_status_hook(request_rec *r, int flags) +{ + const md_srv_conf_t *sc; + const md_mod_conf_t *mc; + int i; + status_ctx ctx; + apr_array_header_t *mds; + md_json_t *jstatus, *jstock; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "server-status for managed domains, start"); + sc = ap_get_module_config(r->server->module_config, &md_module); + if (!sc) return DECLINED; + mc = sc->mc; + if (!mc || !mc->server_status_enabled) return DECLINED; + + ctx.p = r->pool; + ctx.mc = mc; + ctx.bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + ctx.flags = flags; + ctx.prefix = "ManagedCertificates"; + ctx.separator = " "; + + mds = apr_array_copy(r->pool, mc->mds); + qsort(mds->elts, (size_t)mds->nelts, sizeof(md_t *), md_name_cmp); + + if (!HTML_STATUS(&ctx)) { + int total = 0, complete = 0, renewing = 0, errored = 0, ready = 0; + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "no-html managed domain status summary"); + if (mc->mds->nelts > 0) { + md_status_take_stock(&jstock, mds, mc->reg, r->pool); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "got JSON managed domain status summary"); + total = (int)md_json_getl(jstock, MD_KEY_TOTAL, NULL); + complete = (int)md_json_getl(jstock, MD_KEY_COMPLETE, NULL); + renewing = (int)md_json_getl(jstock, MD_KEY_RENEWING, NULL); + errored = (int)md_json_getl(jstock, MD_KEY_ERRORED, NULL); + ready = (int)md_json_getl(jstock, MD_KEY_READY, NULL); + } + apr_brigade_printf(ctx.bb, NULL, NULL, "%sTotal: %d\n", ctx.prefix, total); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sOK: %d\n", ctx.prefix, complete); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sRenew: %d\n", ctx.prefix, renewing); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sErrored: %d\n", ctx.prefix, errored); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sReady: %d\n", ctx.prefix, ready); + } + if (mc->mds->nelts > 0) { + md_status_get_json(&jstatus, mds, mc->reg, mc->ocsp, r->pool); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "got JSON managed domain status"); + if (HTML_STATUS(&ctx)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "html managed domain status table"); + apr_brigade_puts(ctx.bb, NULL, NULL, + "
\n

Managed Certificates

\n\n"); + for (i = 0; i < (int)(sizeof(status_infos)/sizeof(status_infos[0])); ++i) { + si_add_header(&ctx, &status_infos[i]); + } + apr_brigade_puts(ctx.bb, NULL, NULL, "\n"); + } + else { + ctx.prefix = "ManagedDomain"; + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "iterating JSON managed domain status"); + md_json_itera(add_md_row, &ctx, jstatus, MD_KEY_MDS, NULL); + if (HTML_STATUS(&ctx)) { + apr_brigade_puts(ctx.bb, NULL, NULL, "\n\n
\n"); + } + } + + ap_pass_brigade(r->output_filters, ctx.bb); + apr_brigade_cleanup(ctx.bb); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "server-status for managed domains, end"); + + return OK; +} + +static void si_val_ocsp_activity(status_ctx *ctx, md_json_t *mdj, const status_info *info) +{ + apr_time_t t; + const char *prefix = ctx->prefix; + + (void)info; + if (!HTML_STATUS(ctx)) { + ctx->prefix = apr_pstrcat(ctx->p, prefix, info->label, NULL); + } + t = md_json_get_time(mdj, MD_KEY_RENEW_AT, NULL); + print_time(ctx, "Refresh", t); + print_job_summary(ctx, mdj, MD_KEY_RENEWAL, ": "); + if (!HTML_STATUS(ctx)) { + ctx->prefix = prefix; + } +} + +static const status_info ocsp_status_infos[] = { + { "Domain", MD_KEY_DOMAIN, NULL }, + { "CertificateID", MD_KEY_ID, NULL }, + { "OCSPStatus", MD_KEY_STATUS, NULL }, + { "StaplingValid", MD_KEY_VALID, si_val_valid_time }, + { "Responder", MD_KEY_URL, si_val_url }, + { "Activity", MD_KEY_NOTIFIED, si_val_ocsp_activity }, +}; + +static int add_ocsp_row(void *baton, apr_size_t index, md_json_t *mdj) +{ + status_ctx *ctx = baton; + const char *prefix = ctx->prefix; + int i; + + if (HTML_STATUS(ctx)) { + apr_brigade_printf(ctx->bb, NULL, NULL, "", (index % 2)? "odd" : "even"); + for (i = 0; i < (int)(sizeof(ocsp_status_infos)/sizeof(ocsp_status_infos[0])); ++i) { + apr_brigade_puts(ctx->bb, NULL, NULL, ""); + add_status_cell(ctx, mdj, &ocsp_status_infos[i]); + apr_brigade_puts(ctx->bb, NULL, NULL, ""); + } + apr_brigade_puts(ctx->bb, NULL, NULL, ""); + } else { + for (i = 0; i < (int)(sizeof(ocsp_status_infos)/sizeof(ocsp_status_infos[0])); ++i) { + ctx->prefix = apr_pstrcat(ctx->p, prefix, apr_psprintf(ctx->p, "[%" APR_SIZE_T_FMT "]", index), NULL); + add_status_cell(ctx, mdj, &ocsp_status_infos[i]); + ctx->prefix = prefix; + } + } + return 1; +} + +int md_ocsp_status_hook(request_rec *r, int flags) +{ + const md_srv_conf_t *sc; + const md_mod_conf_t *mc; + int i; + status_ctx ctx; + md_json_t *jstatus, *jstock; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "server-status for ocsp stapling, start"); + sc = ap_get_module_config(r->server->module_config, &md_module); + if (!sc) return DECLINED; + mc = sc->mc; + if (!mc || !mc->server_status_enabled) return DECLINED; + + ctx.p = r->pool; + ctx.mc = mc; + ctx.bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + ctx.flags = flags; + ctx.prefix = "ManagedStaplings"; + ctx.separator = " "; + + if (!HTML_STATUS(&ctx)) { + int total = 0, good = 0, revoked = 0, unknown = 0; + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "no-html ocsp stapling status summary"); + if (md_ocsp_count(mc->ocsp) > 0) { + md_ocsp_get_summary(&jstock, mc->ocsp, r->pool); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "got JSON ocsp stapling status summary"); + total = (int)md_json_getl(jstock, MD_KEY_TOTAL, NULL); + good = (int)md_json_getl(jstock, MD_KEY_GOOD, NULL); + revoked = (int)md_json_getl(jstock, MD_KEY_REVOKED, NULL); + unknown = (int)md_json_getl(jstock, MD_KEY_UNKNOWN, NULL); + } + apr_brigade_printf(ctx.bb, NULL, NULL, "%sTotal: %d\n", ctx.prefix, total); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sOK: %d\n", ctx.prefix, good); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sRenew: %d\n", ctx.prefix, revoked); + apr_brigade_printf(ctx.bb, NULL, NULL, "%sErrored: %d\n", ctx.prefix, unknown); + } + if (md_ocsp_count(mc->ocsp) > 0) { + md_ocsp_get_status_all(&jstatus, mc->ocsp, r->pool); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "got JSON ocsp stapling status"); + if (HTML_STATUS(&ctx)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "html ocsp stapling status table"); + apr_brigade_puts(ctx.bb, NULL, NULL, + "
\n

Managed Staplings

\n\n"); + for (i = 0; i < (int)(sizeof(ocsp_status_infos)/sizeof(ocsp_status_infos[0])); ++i) { + si_add_header(&ctx, &ocsp_status_infos[i]); + } + apr_brigade_puts(ctx.bb, NULL, NULL, "\n"); + } + else { + ctx.prefix = "ManagedStapling"; + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "iterating JSON ocsp stapling status"); + md_json_itera(add_ocsp_row, &ctx, jstatus, MD_KEY_OCSPS, NULL); + if (HTML_STATUS(&ctx)) { + apr_brigade_puts(ctx.bb, NULL, NULL, "\n\n
\n"); + } + } + + ap_pass_brigade(r->output_filters, ctx.bb); + apr_brigade_cleanup(ctx.bb); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "server-status for ocsp stapling, end"); + + return OK; +} + +/**************************************************************************************************/ +/* Status handlers */ + +int md_status_handler(request_rec *r) +{ + const md_srv_conf_t *sc; + const md_mod_conf_t *mc; + apr_array_header_t *mds; + md_json_t *jstatus; + apr_bucket_brigade *bb; + const md_t *md; + const char *name; + + if (strcmp(r->handler, "md-status")) { + return DECLINED; + } + + sc = ap_get_module_config(r->server->module_config, &md_module); + if (!sc) return DECLINED; + mc = sc->mc; + if (!mc) return DECLINED; + + if (r->method_number != M_GET) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "md-status supports only GET"); + return HTTP_NOT_IMPLEMENTED; + } + + jstatus = NULL; + md = NULL; + if (r->path_info && r->path_info[0] == '/' && r->path_info[1] != '\0') { + name = strrchr(r->path_info, '/') + 1; + md = md_get_by_name(mc->mds, name); + if (!md) md = md_get_by_domain(mc->mds, name); + } + + if (md) { + md_status_get_md_json(&jstatus, md, mc->reg, mc->ocsp, r->pool); + } + else { + mds = apr_array_copy(r->pool, mc->mds); + qsort(mds->elts, (size_t)mds->nelts, sizeof(md_t *), md_name_cmp); + md_status_get_json(&jstatus, mds, mc->reg, mc->ocsp, r->pool); + } + + if (jstatus) { + apr_table_set(r->headers_out, "Content-Type", "application/json"); + bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + md_json_writeb(jstatus, MD_JSON_FMT_INDENT, bb); + ap_pass_brigade(r->output_filters, bb); + apr_brigade_cleanup(bb); + + return DONE; + } + return DECLINED; +} + diff --git a/modules/md/mod_md_status.h b/modules/md/mod_md_status.h new file mode 100644 index 0000000..f347826 --- /dev/null +++ b/modules/md/mod_md_status.h @@ -0,0 +1,27 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_md_md_status_h +#define mod_md_md_status_h + +int md_http_cert_status(request_rec *r); + +int md_domains_status_hook(request_rec *r, int flags); +int md_ocsp_status_hook(request_rec *r, int flags); + +int md_status_handler(request_rec *r); + +#endif /* mod_md_md_status_h */ diff --git a/modules/metadata/mod_cern_meta.c b/modules/metadata/mod_cern_meta.c index 09a41e1..3f36b2d 100644 --- a/modules/metadata/mod_cern_meta.c +++ b/modules/metadata/mod_cern_meta.c @@ -240,7 +240,7 @@ static int scan_meta_file(request_rec *r, apr_file_t *f) while (apr_isspace(*l)) ++l; - if (!strcasecmp(w, "Content-type")) { + if (!ap_cstr_casecmp(w, "Content-type")) { char *tmp; /* Nuke trailing whitespace */ @@ -252,7 +252,7 @@ static int scan_meta_file(request_rec *r, apr_file_t *f) ap_content_type_tolower(tmp); ap_set_content_type(r, tmp); } - else if (!strcasecmp(w, "Status")) { + else if (!ap_cstr_casecmp(w, "Status")) { sscanf(l, "%d", &r->status); r->status_line = apr_pstrdup(r->pool, l); } diff --git a/modules/metadata/mod_headers.c b/modules/metadata/mod_headers.c index 1ea970d..ef812cd 100644 --- a/modules/metadata/mod_headers.c +++ b/modules/metadata/mod_headers.c @@ -78,13 +78,12 @@ #include "httpd.h" #include "http_config.h" #include "http_request.h" +#include "http_ssl.h" #include "http_log.h" #include "util_filter.h" #include "http_protocol.h" #include "ap_expr.h" -#include "mod_ssl.h" /* for the ssl_var_lookup optional function defn */ - /* format_tag_hash is initialized during pre-config */ static apr_hash_t *format_tag_hash; @@ -161,9 +160,6 @@ typedef struct { module AP_MODULE_DECLARE_DATA headers_module; -/* Pointer to ssl_var_lookup, if available. */ -static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *header_ssl_lookup = NULL; - /* * Tag formatting functions */ @@ -210,17 +206,12 @@ static const char *header_request_env_var(request_rec *r, char *a) static const char *header_request_ssl_var(request_rec *r, char *name) { - if (header_ssl_lookup) { - const char *val = header_ssl_lookup(r->pool, r->server, - r->connection, r, name); - if (val && val[0]) - return unwrap_header(r->pool, val); - else - return "(null)"; - } - else { + const char *val = ap_ssl_var_lookup(r->pool, r->server, + r->connection, r, name); + if (val && val[0]) + return unwrap_header(r->pool, val); + else return "(null)"; - } } static const char *header_request_loadavg(request_rec *r, char *a) @@ -668,7 +659,7 @@ static const char *process_regexp(header_entry *hdr, const char *value, static int echo_header(void *v, const char *key, const char *val) { - edit_do *ed = v; + echo_do *ed = (echo_do *)v; /* If the input header (key) matches the regex, echo it intact to * r->headers_out. @@ -791,14 +782,14 @@ static int do_headers_fixup(request_rec *r, apr_table_t *headers, } break; case hdr_set: - if (!strcasecmp(hdr->header, "Content-Type")) { + if (!ap_cstr_casecmp(hdr->header, "Content-Type")) { ap_set_content_type(r, process_tags(hdr, r)); } apr_table_setn(headers, hdr->header, process_tags(hdr, r)); break; case hdr_setifempty: if (NULL == apr_table_get(headers, hdr->header)) { - if (!strcasecmp(hdr->header, "Content-Type")) { + if (!ap_cstr_casecmp(hdr->header, "Content-Type")) { ap_set_content_type(r, process_tags(hdr, r)); } apr_table_setn(headers, hdr->header, process_tags(hdr, r)); @@ -814,7 +805,7 @@ static int do_headers_fixup(request_rec *r, apr_table_t *headers, break; case hdr_edit: case hdr_edit_r: - if (!strcasecmp(hdr->header, "Content-Type") && r->content_type) { + if (!ap_cstr_casecmp(hdr->header, "Content-Type") && r->content_type) { const char *repl = process_regexp(hdr, r->content_type, r); if (repl == NULL) return 0; @@ -989,7 +980,6 @@ static int header_pre_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp) static int header_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { - header_ssl_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); return OK; } diff --git a/modules/metadata/mod_mime_magic.c b/modules/metadata/mod_mime_magic.c index 22dadaf..7dac4fd 100644 --- a/modules/metadata/mod_mime_magic.c +++ b/modules/metadata/mod_mime_magic.c @@ -257,7 +257,7 @@ static int fsmagic(request_rec *r, const char *fn); #define L_MAIL 8 /* Electronic mail */ #define L_NEWS 9 /* Usenet Netnews */ -static const char *types[] = +static const char *const types[] = { "text/html", /* HTML */ "text/plain", /* "c program text", */ @@ -462,7 +462,6 @@ typedef struct { typedef struct { magic_rsl *head; /* result string list */ magic_rsl *tail; - unsigned suf_recursion; /* recursion depth in suffix check */ } magic_req_rec; /* @@ -606,7 +605,7 @@ static int magic_rsl_putchar(request_rec *r, char c) /* high overhead for 1 char - just hope they don't do this much */ str[0] = c; str[1] = '\0'; - return magic_rsl_add(r, str); + return magic_rsl_add(r, apr_pstrdup(r->pool, str)); } /* allocate and copy a contiguous string from a result string list */ @@ -984,7 +983,7 @@ static int apprentice(server_rec *s, apr_pool_t *p) #if MIME_MAGIC_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01516) - MODNAME ": apprentice conf=%x file=%s m=%s m->next=%s last=%s", + MODNAME ": apprentice conf=%pp file=%s m=%s m->next=%s last=%s", conf, conf->magicfile ? conf->magicfile : "NULL", conf->magic ? "set" : "NULL", @@ -1276,7 +1275,7 @@ static int parse(server_rec *serv, apr_pool_t *p, char *l, int lineno) #if MIME_MAGIC_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, serv, APLOGNO(01525) - MODNAME ": parse line=%d m=%x next=%x cont=%d desc=%s", + MODNAME ": parse line=%d m=%pp next=%pp cont=%d desc=%s", lineno, m, m->next, m->cont_level, m->desc); #endif /* MIME_MAGIC_DEBUG */ @@ -1541,7 +1540,7 @@ static int match(request_rec *r, unsigned char *s, apr_size_t nbytes) #if MIME_MAGIC_DEBUG ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01529) - MODNAME ": match conf=%x file=%s m=%s m->next=%s last=%s", + MODNAME ": match conf=%pp file=%s m=%s m->next=%s last=%s", conf, conf->magicfile ? conf->magicfile : "NULL", conf->magic ? "set" : "NULL", @@ -1591,7 +1590,7 @@ static int match(request_rec *r, unsigned char *s, apr_size_t nbytes) #if MIME_MAGIC_DEBUG rule_counter++; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01532) - MODNAME ": line=%d mc=%x mc->next=%x cont=%d desc=%s", + MODNAME ": line=%d mc=%pp mc->next=%pp cont=%d desc=%s", m_cont->lineno, m_cont, m_cont->next, m_cont->cont_level, m_cont->desc); @@ -2044,12 +2043,12 @@ static int ascmagic(request_rec *r, unsigned char *buf, apr_size_t nbytes) * - uncompress old into new, using method, return sizeof new */ -static struct { - char *magic; +static const struct { + const char *magic; apr_size_t maglen; - char *argv[3]; + const char *argv[3]; int silent; - char *encoding; /* MUST be lowercase */ + const char *encoding; /* MUST be lowercase */ } compr[] = { /* we use gzip here rather than uncompress because we have to pass @@ -2077,7 +2076,7 @@ static struct { }, }; -static int ncompr = sizeof(compr) / sizeof(compr[0]); +#define ncompr (sizeof(compr) / sizeof(compr[0])) static int zmagic(request_rec *r, unsigned char *buf, apr_size_t nbytes) { @@ -2177,11 +2176,12 @@ static int uncompress(request_rec *r, int method, parm.method = method; /* We make a sub_pool so that we can collect our child early, otherwise - * there are cases (i.e. generating directory indicies with mod_autoindex) + * there are cases (i.e. generating directory indices with mod_autoindex) * where we would end up with LOTS of zombies. */ if (apr_pool_create(&sub_context, r->pool) != APR_SUCCESS) return -1; + apr_pool_tag(sub_context, "magic_uncompress"); if ((rv = create_uncompress_child(&parm, sub_context, &pipe_out)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01553) diff --git a/modules/metadata/mod_remoteip.c b/modules/metadata/mod_remoteip.c index 4572ce1..045e988 100644 --- a/modules/metadata/mod_remoteip.c +++ b/modules/metadata/mod_remoteip.c @@ -393,7 +393,7 @@ static void remoteip_warn_enable_conflict(remoteip_addr_info *prev, server_rec * ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, new, APLOGNO(03491) "RemoteIPProxyProtocol: previous setting for %s:%hu from virtual " - "host {%s:%hu in %s} is being overriden by virtual host " + "host {%s:%hu in %s} is being overridden by virtual host " "{%s:%hu in %s}; new setting is '%s'", buf, prev->addr->port, prev->source->server_hostname, prev->source->addrs->host_port, prev->source->defn_name, @@ -987,15 +987,13 @@ static remoteip_parse_status_t remoteip_process_v2_header(conn_rec *c, return HDR_ERROR; #endif default: - /* unsupported protocol, keep local connection address */ - return HDR_DONE; + /* unsupported protocol */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10183) + "RemoteIPProxyProtocol: unsupported protocol %.2hx", + (unsigned short)hdr->v2.fam); + return HDR_ERROR; } break; /* we got a sockaddr now */ - - case 0x00: /* LOCAL command */ - /* keep local connection address for LOCAL */ - return HDR_DONE; - default: /* not a supported command */ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03507) @@ -1087,11 +1085,24 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f, /* try to read a header's worth of data */ while (!ctx->done) { if (APR_BRIGADE_EMPTY(ctx->bb)) { - ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, - ctx->need - ctx->rcvd); + apr_off_t got, want = ctx->need - ctx->rcvd; + + ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, want); if (ret != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10184) + "failed reading input"); return ret; } + + ret = apr_brigade_length(ctx->bb, 1, &got); + if (ret || got > want) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10185) + "RemoteIPProxyProtocol header too long, " + "got %" APR_OFF_T_FMT " expected %" APR_OFF_T_FMT, + got, want); + f->c->aborted = 1; + return APR_ECONNABORTED; + } } if (APR_BRIGADE_EMPTY(ctx->bb)) { return block == APR_NONBLOCK_READ ? APR_SUCCESS : APR_EOF; @@ -1139,6 +1150,13 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f, if (ctx->rcvd >= MIN_V2_HDR_LEN) { ctx->need = MIN_V2_HDR_LEN + remoteip_get_v2_len((proxy_header *) ctx->header); + if (ctx->need > sizeof(proxy_v2)) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(10186) + "RemoteIPProxyProtocol protocol header length too long"); + f->c->aborted = 1; + apr_brigade_destroy(ctx->bb); + return APR_ECONNABORTED; + } } if (ctx->rcvd >= ctx->need) { psts = remoteip_process_v2_header(f->c, conn_conf, diff --git a/modules/metadata/mod_unique_id.c b/modules/metadata/mod_unique_id.c index 0b05fbf..2555749 100644 --- a/modules/metadata/mod_unique_id.c +++ b/modules/metadata/mod_unique_id.c @@ -26,6 +26,11 @@ #include "apr_general.h" /* for APR_OFFSETOF */ #include "apr_network_io.h" +#ifdef APR_HAS_THREADS +#include "apr_atomic.h" /* for apr_atomic_inc32 */ +#include "mpm_common.h" /* for ap_mpm_query */ +#endif + #include "httpd.h" #include "http_config.h" #include "http_log.h" @@ -104,7 +109,7 @@ typedef struct { /* * Sun Jun 7 05:43:49 CEST 1998 -- Alvaro * More comments: - * 1) The UUencoding prodecure is now done in a general way, avoiding the problems + * 1) The UUencoding procedure is now done in a general way, avoiding the problems * with sizes and paddings that can arise depending on the architecture. Now the * offsets and sizes of the elements of the unique_id_rec structure are calculated * in unique_id_global_init; and then used to duplicate the structure without the @@ -123,6 +128,10 @@ typedef struct { * XXX: thrashing. */ static unique_id_rec cur_unique_id; +static apr_uint32_t cur_unique_counter; +#ifdef APR_HAS_THREADS +static int is_threaded_mpm; +#endif /* * Number of elements in the structure unique_id_rec. @@ -160,6 +169,11 @@ static int unique_id_global_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *pt static void unique_id_child_init(apr_pool_t *p, server_rec *s) { +#ifdef APR_HAS_THREADS + is_threaded_mpm = 0; + ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded_mpm); +#endif + ap_random_insecure_bytes(&cur_unique_id.root, sizeof(cur_unique_id.root)); @@ -168,28 +182,29 @@ static void unique_id_child_init(apr_pool_t *p, server_rec *s) * against restart problems, and a little less protection against a clock * going backwards in time. */ - ap_random_insecure_bytes(&cur_unique_id.counter, - sizeof(cur_unique_id.counter)); + ap_random_insecure_bytes(&cur_unique_counter, + sizeof(cur_unique_counter)); } -/* NOTE: This is *NOT* the same encoding used by base64encode ... the last two - * characters should be + and /. But those two characters have very special - * meanings in URLs, and we want to make it easy to use identifiers in - * URLs. So we replace them with @ and -. - */ +/* Use the base64url encoding per RFC 4648, avoiding characters which + * are not safe in URLs. ### TODO: can switch to apr_encode_*. */ static const char uuencoder[64] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '@', '-', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_', }; +#ifndef APR_UINT16_MAX +#define APR_UINT16_MAX 0xffffu +#endif + static const char *gen_unique_id(const request_rec *r) { char *str; /* - * Buffer padded with two final bytes, used to copy the unique_id_red + * Buffer padded with two final bytes, used to copy the unique_id_rec * structure without the internal paddings that it could have. */ unique_id_rec new_unique_id; @@ -197,14 +212,24 @@ static const char *gen_unique_id(const request_rec *r) unique_id_rec foo; unsigned char pad[2]; } paddedbuf; + apr_uint32_t counter; unsigned char *x,*y; - unsigned short counter; int i,j,k; memcpy(&new_unique_id.root, &cur_unique_id.root, ROOT_SIZE); - new_unique_id.counter = cur_unique_id.counter; new_unique_id.stamp = htonl((unsigned int)apr_time_sec(r->request_time)); new_unique_id.thread_index = htonl((unsigned int)r->connection->id); +#ifdef APR_HAS_THREADS + if (is_threaded_mpm) + counter = apr_atomic_inc32(&cur_unique_counter); + else +#endif + counter = cur_unique_counter++; + + /* The counter is two bytes for the uuencoded unique id, in network + * byte order. + */ + new_unique_id.counter = htons(counter % APR_UINT16_MAX); /* we'll use a temporal buffer to avoid uuencoding the possible internal * paddings of the original structure */ @@ -236,11 +261,6 @@ static const char *gen_unique_id(const request_rec *r) } str[k++] = '\0'; - /* and increment the identifier for the next call */ - - counter = ntohs(new_unique_id.counter) + 1; - cur_unique_id.counter = htons(counter); - return str; } diff --git a/modules/metadata/mod_usertrack.c b/modules/metadata/mod_usertrack.c index 73a9f45..55252ec 100644 --- a/modules/metadata/mod_usertrack.c +++ b/modules/metadata/mod_usertrack.c @@ -86,6 +86,9 @@ typedef struct { const char *cookie_domain; char *regexp_string; /* used to compile regexp; save for debugging */ ap_regex_t *regexp; /* used to find usertrack cookie in cookie header */ + int is_secure; + int is_httponly; + const char *samesite; } cookie_dir_rec; /* Make Cookie: Now we have to generate something that is going to be @@ -143,6 +146,21 @@ static void make_cookie(request_rec *r) : ""), NULL); } + if (dcfg->samesite != NULL) { + new_cookie = apr_pstrcat(r->pool, new_cookie, "; ", + dcfg->samesite, + NULL); + } + if (dcfg->is_secure) { + new_cookie = apr_pstrcat(r->pool, new_cookie, "; Secure", + NULL); + } + if (dcfg->is_httponly) { + new_cookie = apr_pstrcat(r->pool, new_cookie, "; HttpOnly", + NULL); + } + + apr_table_addn(r->err_headers_out, (dcfg->style == CT_COOKIE2 ? "Set-Cookie2" : "Set-Cookie"), @@ -266,9 +284,9 @@ static void *make_cookie_dir(apr_pool_t *p, char *d) dcfg = (cookie_dir_rec *) apr_pcalloc(p, sizeof(cookie_dir_rec)); dcfg->cookie_name = COOKIE_NAME; - dcfg->cookie_domain = NULL; dcfg->style = CT_UNSET; - dcfg->enabled = 0; + /* calloc'ed to disabled: enabled, cookie_domain, samesite, is_secure, + * is_httponly */ /* In case the user does not use the CookieName directive, * we need to compile the regexp for the default cookie name. */ @@ -277,14 +295,6 @@ static void *make_cookie_dir(apr_pool_t *p, char *d) return dcfg; } -static const char *set_cookie_enable(cmd_parms *cmd, void *mconfig, int arg) -{ - cookie_dir_rec *dcfg = mconfig; - - dcfg->enabled = arg; - return NULL; -} - static const char *set_cookie_exp(cmd_parms *parms, void *dummy, const char *arg) { @@ -429,6 +439,31 @@ static const char *set_cookie_style(cmd_parms *cmd, void *mconfig, return NULL; } +/* + * SameSite enabled disabled + */ + +static const char *set_samesite_value(cmd_parms *cmd, void *mconfig, + const char *name) +{ + cookie_dir_rec *dcfg; + + dcfg = (cookie_dir_rec *) mconfig; + + if (strcasecmp(name, "strict") == 0) { + dcfg->samesite = "SameSite=Strict"; + } else if (strcasecmp(name, "lax") == 0) { + dcfg->samesite = "SameSite=Lax"; + } else if (strcasecmp(name, "none") == 0) { + dcfg->samesite = "SameSite=None"; + } else { + return "CookieSameSite accepts 'Strict', 'Lax', or 'None'"; + } + + + return NULL; +} + static const command_rec cookie_log_cmds[] = { AP_INIT_TAKE1("CookieExpires", set_cookie_exp, NULL, OR_FILEINFO, "an expiry date code"), @@ -436,10 +471,20 @@ static const command_rec cookie_log_cmds[] = { "domain to which this cookie applies"), AP_INIT_TAKE1("CookieStyle", set_cookie_style, NULL, OR_FILEINFO, "'Netscape', 'Cookie' (RFC2109), or 'Cookie2' (RFC2965)"), - AP_INIT_FLAG("CookieTracking", set_cookie_enable, NULL, OR_FILEINFO, + AP_INIT_FLAG("CookieTracking", ap_set_flag_slot, + (void *)APR_OFFSETOF(cookie_dir_rec, enabled), OR_FILEINFO, "whether or not to enable cookies"), AP_INIT_TAKE1("CookieName", set_cookie_name, NULL, OR_FILEINFO, "name of the tracking cookie"), + AP_INIT_TAKE1("CookieSameSite", set_samesite_value, NULL, OR_FILEINFO, + "SameSite setting"), + AP_INIT_FLAG("CookieSecure", ap_set_flag_slot, + (void *)APR_OFFSETOF(cookie_dir_rec, is_secure), OR_FILEINFO, + "is cookie secure"), + AP_INIT_FLAG("CookieHttpOnly", ap_set_flag_slot, + (void *)APR_OFFSETOF(cookie_dir_rec, is_httponly),OR_FILEINFO, + "is cookie http only"), + {NULL} }; diff --git a/modules/proxy/ajp.h b/modules/proxy/ajp.h index c119a7e..a950ee9 100644 --- a/modules/proxy/ajp.h +++ b/modules/proxy/ajp.h @@ -414,11 +414,13 @@ apr_status_t ajp_ilink_receive(apr_socket_t *sock, ajp_msg_t *msg); * @param r current request * @param buffsize max size of the AJP packet. * @param uri requested uri + * @param secret authentication secret * @return APR_SUCCESS or error */ apr_status_t ajp_send_header(apr_socket_t *sock, request_rec *r, apr_size_t buffsize, - apr_uri_t *uri); + apr_uri_t *uri, + const char *secret); /** * Read the ajp message and return the type of the message. diff --git a/modules/proxy/ajp_header.c b/modules/proxy/ajp_header.c index 67353a7..0266a7d 100644 --- a/modules/proxy/ajp_header.c +++ b/modules/proxy/ajp_header.c @@ -17,6 +17,8 @@ #include "ajp_header.h" #include "ajp.h" +#include "util_script.h" + APLOG_USE_MODULE(proxy_ajp); static const char *response_trans_headers[] = { @@ -59,6 +61,7 @@ static int sc_for_req_header(const char *header_name) if (len < 4 || len > 15) return UNKNOWN_METHOD; + memset(header, 0, sizeof header); while (*p) header[i++] = apr_toupper(*p++); header[i] = '\0'; @@ -213,7 +216,8 @@ AJPV13_REQUEST/AJPV14_REQUEST= static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg, request_rec *r, - apr_uri_t *uri) + apr_uri_t *uri, + const char *secret) { int method; apr_uint32_t i, num_headers = 0; @@ -293,17 +297,15 @@ static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg, i, elts[i].key, elts[i].val); } -/* XXXX need to figure out how to do this - if (s->secret) { + if (secret) { if (ajp_msg_append_uint8(msg, SC_A_SECRET) || - ajp_msg_append_string(msg, s->secret)) { + ajp_msg_append_string(msg, secret)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03228) - "Error ajp_marshal_into_msgb - " + "ajp_marshal_into_msgb: " "Error appending secret"); return APR_EGENERAL; } } - */ if (r->user) { if (ajp_msg_append_uint8(msg, SC_A_REMOTE_USER) || @@ -584,8 +586,15 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, r->headers_out = save_table; } else { - r->headers_out = NULL; + /* + * Reset headers, but not to NULL because things below the chain expect + * this to be non NULL e.g. the ap_content_length_filter. + */ + r->headers_out = apr_table_make(r->pool, 1); num_headers = 0; + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10405) + "ajp_unmarshal_response: Bad number of headers"); + return rc; } ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, @@ -633,15 +642,15 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, } /* Set-Cookie need additional processing */ - if (!strcasecmp(stringname, "Set-Cookie")) { + if (!ap_cstr_casecmp(stringname, "Set-Cookie")) { value = ap_proxy_cookie_reverse_map(r, dconf, value); } /* Location, Content-Location, URI and Destination need additional * processing */ - else if (!strcasecmp(stringname, "Location") - || !strcasecmp(stringname, "Content-Location") - || !strcasecmp(stringname, "URI") - || !strcasecmp(stringname, "Destination")) + else if (!ap_cstr_casecmp(stringname, "Location") + || !ap_cstr_casecmp(stringname, "Content-Location") + || !ap_cstr_casecmp(stringname, "URI") + || !ap_cstr_casecmp(stringname, "Destination")) { value = ap_proxy_location_reverse_map(r, dconf, value); } @@ -654,7 +663,7 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, apr_table_add(r->headers_out, stringname, value); /* Content-type needs an additional handling */ - if (strcasecmp(stringname, "Content-Type") == 0) { + if (ap_cstr_casecmp(stringname, "Content-Type") == 0) { /* add corresponding filter */ ap_set_content_type(r, apr_pstrdup(r->pool, value)); ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, @@ -662,6 +671,14 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, } } + /* AJP has its own body framing mechanism which we don't + * match against any provided Content-Length, so let the + * core determine C-L vs T-E based on what's actually sent. + */ + if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR)) + apr_table_unset(r->headers_out, "Content-Length"); + apr_table_unset(r->headers_out, "Transfer-Encoding"); + return APR_SUCCESS; } @@ -671,7 +688,8 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, apr_status_t ajp_send_header(apr_socket_t *sock, request_rec *r, apr_size_t buffsize, - apr_uri_t *uri) + apr_uri_t *uri, + const char *secret) { ajp_msg_t *msg; apr_status_t rc; @@ -683,7 +701,7 @@ apr_status_t ajp_send_header(apr_socket_t *sock, return rc; } - rc = ajp_marshal_into_msgb(msg, r, uri); + rc = ajp_marshal_into_msgb(msg, r, uri, secret); if (rc != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00988) "ajp_send_header: ajp_marshal_into_msgb failed"); diff --git a/modules/proxy/balancers/mod_lbmethod_heartbeat.c b/modules/proxy/balancers/mod_lbmethod_heartbeat.c index 7aeaf71..0534e5b 100644 --- a/modules/proxy/balancers/mod_lbmethod_heartbeat.c +++ b/modules/proxy/balancers/mod_lbmethod_heartbeat.c @@ -115,7 +115,6 @@ static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers, { char *t; - int lineno = 0; apr_bucket_alloc_t *ba = apr_bucket_alloc_create(pool); apr_bucket_brigade *bb = apr_brigade_create(pool, ba); apr_bucket_brigade *tmpbb = apr_brigade_create(pool, ba); @@ -137,7 +136,6 @@ static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers, rv = apr_brigade_split_line(tmpbb, bb, APR_BLOCK_READ, sizeof(buf)); - lineno++; if (rv) { return rv; @@ -281,6 +279,7 @@ static proxy_worker *find_best_hb(proxy_balancer *balancer, } apr_pool_create(&tpool, r->pool); + apr_pool_tag(tpool, "lb_heartbeat_tpool"); servers = apr_hash_make(tpool); @@ -302,7 +301,7 @@ static proxy_worker *find_best_hb(proxy_balancer *balancer, if (!server) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(01214) - "lb_heartbeat: No server for worker %s", (*worker)->s->name); + "lb_heartbeat: No server for worker %s", (*worker)->s->name_ex); continue; } diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c index 69a35ce..c9cef7c 100644 --- a/modules/proxy/mod_proxy.c +++ b/modules/proxy/mod_proxy.c @@ -17,6 +17,7 @@ #include "mod_proxy.h" #include "mod_core.h" #include "apr_optional.h" +#include "apr_strings.h" #include "scoreboard.h" #include "mod_status.h" #include "proxy_util.h" @@ -29,10 +30,6 @@ APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *)); APR_DECLARE_OPTIONAL_FN(int, ssl_engine_set, (conn_rec *, ap_conf_vector_t *, int proxy, int enable)); -APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *)); -APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, - (apr_pool_t *, server_rec *, - conn_rec *, request_rec *, char *)); #endif #ifndef MAX @@ -55,6 +52,9 @@ proxy_hcmethods_t PROXY_DECLARE_DATA proxy_hcmethods[] = { {GET, "GET", 1}, {CPING, "CPING", 0}, {PROVIDER, "PROVIDER", 0}, + {OPTIONS11, "OPTIONS11", 1}, + {HEAD11, "HEAD11", 1}, + {GET11, "GET11", 1}, {EOT, NULL, 1} }; @@ -149,7 +149,7 @@ static const char *set_worker_param(apr_pool_t *p, return "Max must be a positive number"; worker->s->hmax = ival; } - /* XXX: More inteligent naming needed */ + /* XXX: More intelligent naming needed */ else if (!strcasecmp(key, "smax")) { /* Maximum number of connections to remote that * will not be destroyed @@ -224,6 +224,24 @@ static const char *set_worker_param(apr_pool_t *p, return "EnableReuse must be On|Off"; worker->s->disablereuse_set = 1; } + else if (!strcasecmp(key, "addressttl")) { + /* Address TTL in seconds + */ + apr_interval_time_t ttl; + if (strcmp(val, "-1") == 0) { + worker->s->address_ttl = -1; + } + else if (ap_timeout_parameter_parse(val, &ttl, "s") == APR_SUCCESS + && (ttl <= apr_time_from_sec(APR_INT32_MAX)) + && (ttl % apr_time_from_sec(1)) == 0) { + worker->s->address_ttl = apr_time_sec(ttl); + } + else { + return "AddressTTL must be -1 or a number of seconds not " + "exceeding " APR_STRINGIFY(APR_INT32_MAX); + } + worker->s->address_ttl_set = 1; + } else if (!strcasecmp(key, "route")) { /* Worker route. */ @@ -308,13 +326,14 @@ static const char *set_worker_param(apr_pool_t *p, worker->s->conn_timeout_set = 1; } else if (!strcasecmp(key, "flusher")) { - if (strlen(val) >= sizeof(worker->s->flusher)) - apr_psprintf(p, "flusher name length must be < %d characters", - (int)sizeof(worker->s->flusher)); - PROXY_STRNCPY(worker->s->flusher, val); + if (PROXY_STRNCPY(worker->s->flusher, val) != APR_SUCCESS) { + return apr_psprintf(p, "flusher name length must be < %d characters", + (int)sizeof(worker->s->flusher)); + } } else if (!strcasecmp(key, "upgrade")) { - if (PROXY_STRNCPY(worker->s->upgrade, val) != APR_SUCCESS) { + if (PROXY_STRNCPY(worker->s->upgrade, + strcasecmp(val, "ANY") ? val : "*") != APR_SUCCESS) { return apr_psprintf(p, "upgrade protocol length must be < %d characters", (int)sizeof(worker->s->upgrade)); } @@ -327,6 +346,12 @@ static const char *set_worker_param(apr_pool_t *p, worker->s->response_field_size = (s ? s : HUGE_STRING_LEN); worker->s->response_field_size_set = 1; } + else if (!strcasecmp(key, "secret")) { + if (PROXY_STRNCPY(worker->s->secret, val) != APR_SUCCESS) { + return apr_psprintf(p, "Secret length must be < %d characters", + (int)sizeof(worker->s->secret)); + } + } else { if (set_worker_hc_param_f) { return set_worker_hc_param_f(p, s, worker, key, val, NULL); @@ -557,6 +582,201 @@ static int alias_match(const char *uri, const char *alias_fakename) return urip - uri; } +/* + * Inspired by mod_jk's jk_servlet_normalize(). + */ +static int alias_match_servlet(apr_pool_t *p, + const char **urip, + const char *alias) +{ + char *map; + const char *uri = *urip; + apr_array_header_t *stack; + int map_pos, uri_pos, alias_pos, first_pos; + int alias_depth = 0, depth; + + /* Both uri and alias should start with '/' */ + if (uri[0] != '/' || alias[0] != '/') { + return 0; + } + + stack = apr_array_make(p, 5, sizeof(int)); + map = apr_palloc(p, strlen(uri) + 1); + map[0] = '/'; + map[1] = '\0'; + + map_pos = uri_pos = alias_pos = first_pos = 1; + while (uri[uri_pos] != '\0') { + /* Remove path parameters ;foo=bar/ from any path segment */ + if (uri[uri_pos] == ';') { + do { + uri_pos++; + } while (uri[uri_pos] != '/' && uri[uri_pos] != '\0'); + continue; + } + + if (map[map_pos - 1] == '/') { + /* Collapse ///// sequences to / */ + if (uri[uri_pos] == '/') { + do { + uri_pos++; + } while (uri[uri_pos] == '/'); + continue; + } + + if (uri[uri_pos] == '.') { + /* Remove /./ segments */ + if (uri[uri_pos + 1] == '/' + || uri[uri_pos + 1] == ';' + || uri[uri_pos + 1] == '\0') { + uri_pos++; + if (uri[uri_pos] == '/') { + uri_pos++; + } + continue; + } + + /* Remove /xx/../ segments */ + if (uri[uri_pos + 1] == '.' + && (uri[uri_pos + 2] == '/' + || uri[uri_pos + 2] == ';' + || uri[uri_pos + 2] == '\0')) { + /* Wind map segment back the previous one */ + if (map_pos == 1) { + /* Above root */ + return 0; + } + do { + map_pos--; + } while (map[map_pos - 1] != '/'); + map[map_pos] = '\0'; + + /* Wind alias segment back, unless in deeper segment */ + if (alias_depth == stack->nelts) { + if (alias[alias_pos] == '\0') { + alias_pos--; + } + while (alias_pos > 0 && alias[alias_pos] == '/') { + alias_pos--; + } + while (alias_pos > 0 && alias[alias_pos - 1] != '/') { + alias_pos--; + } + AP_DEBUG_ASSERT(alias_pos > 0); + alias_depth--; + } + apr_array_pop(stack); + + /* Move uri forward to the next segment */ + uri_pos += 2; + if (uri[uri_pos] == '/') { + uri_pos++; + } + first_pos = 0; + continue; + } + } + if (first_pos) { + while (uri[first_pos] == '/') { + first_pos++; + } + } + + /* New segment */ + APR_ARRAY_PUSH(stack, int) = first_pos ? first_pos : uri_pos; + if (alias[alias_pos] != '\0') { + if (alias[alias_pos - 1] != '/') { + /* Remain in pair with uri segments */ + do { + alias_pos++; + } while (alias[alias_pos - 1] != '/' && alias[alias_pos]); + } + while (alias[alias_pos] == '/') { + alias_pos++; + } + if (alias[alias_pos] != '\0') { + alias_depth++; + } + } + } + + if (alias[alias_pos] != '\0') { + int *match = &APR_ARRAY_IDX(stack, alias_depth - 1, int); + if (*match) { + if (alias[alias_pos] != uri[uri_pos]) { + /* Current segment does not match */ + *match = 0; + } + else if (alias[alias_pos + 1] == '\0' + && alias[alias_pos] != '/') { + if (uri[uri_pos + 1] == ';') { + /* We'll preserve the parameters of the last + * segment if it does not end with '/', so mark + * the match as negative for below handling. + */ + *match = -(uri_pos + 1); + } + else if (uri[uri_pos + 1] != '/' + && uri[uri_pos + 1] != '\0') { + /* Last segment does not match all the way */ + *match = 0; + } + } + } + /* Don't go past the segment if the uri isn't there yet */ + if (alias[alias_pos] != '/' || uri[uri_pos] == '/') { + alias_pos++; + } + } + + if (uri[uri_pos] == '/') { + first_pos = uri_pos + 1; + } + map[map_pos++] = uri[uri_pos++]; + map[map_pos] = '\0'; + } + + /* Can't reach the end of uri before the end of the alias, + * for example if uri is "/" and alias is "/examples" + */ + if (alias[alias_pos] != '\0') { + return 0; + } + + /* Check whether each alias segment matched */ + for (depth = 0; depth < alias_depth; ++depth) { + if (!APR_ARRAY_IDX(stack, depth, int)) { + return 0; + } + } + + /* If alias_depth == stack->nelts we have a full match, i.e. + * uri == alias so we can return uri_pos as is (the end of uri) + */ + if (alias_depth < stack->nelts) { + /* Return the segment following the alias */ + uri_pos = APR_ARRAY_IDX(stack, alias_depth, int); + if (alias_depth) { + /* But if the last segment of the alias does not end with '/' + * and the corresponding segment of the uri has parameters, + * we want to forward those parameters (see above for the + * negative pos trick/mark). + */ + int pos = APR_ARRAY_IDX(stack, alias_depth - 1, int); + if (pos < 0) { + uri_pos = -pos; + } + } + } + /* If the alias lacks a trailing slash, take it from the uri (if any) */ + if (alias[alias_pos - 1] != '/' && uri[uri_pos - 1] == '/') { + uri_pos--; + } + + *urip = map; + return uri_pos; +} + /* Detect if an absoluteURI should be proxied or not. Note that we * have to do this during this phase because later phases are * "short-circuiting"... i.e. translate_names will end when the first @@ -578,11 +798,12 @@ static int proxy_detect(request_rec *r) if (conf->req && r->parsed_uri.scheme) { /* but it might be something vhosted */ - if (!(r->parsed_uri.hostname - && !strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) - && ap_matches_request_vhost(r, r->parsed_uri.hostname, - (apr_port_t)(r->parsed_uri.port_str ? r->parsed_uri.port - : ap_default_port(r))))) { + if (!r->parsed_uri.hostname + || ap_cstr_casecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0 + || !ap_matches_request_vhost(r, r->parsed_uri.hostname, + (apr_port_t)(r->parsed_uri.port_str + ? r->parsed_uri.port + : ap_default_port(r)))) { r->proxyreq = PROXYREQ_PROXY; r->uri = r->unparsed_uri; r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL); @@ -667,6 +888,7 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent, int mismatch = 0; unsigned int nocanon = ent->flags & PROXYPASS_NOCANON; const char *use_uri = nocanon ? r->unparsed_uri : r->uri; + const char *servlet_uri = NULL; if (dconf && (dconf->interpolate_env == 1) && (ent->flags & PROXYPASS_INTERPOLATE)) { fake = proxy_interpolate(r, ent->fake); @@ -727,7 +949,14 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent, } } else { - len = alias_match(r->uri, fake); + if ((ent->flags & PROXYPASS_MAP_SERVLET) == PROXYPASS_MAP_SERVLET) { + servlet_uri = r->uri; + len = alias_match_servlet(r->pool, &servlet_uri, fake); + nocanon = 0; /* ignored since servlet's normalization applies */ + } + else { + len = alias_match(r->uri, fake); + } if (len != 0) { if ((real[0] == '!') && (real[1] == '\0')) { @@ -736,7 +965,7 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent, "'%s'; declining", r->uri); return DECLINED; } - if (nocanon && len != alias_match(r->unparsed_uri, ent->fake)) { + if (nocanon && len != alias_match(r->unparsed_uri, fake)) { mismatch = 1; use_uri = r->uri; } @@ -752,6 +981,17 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent, } if (found) { + unsigned int encoded = ent->flags & PROXYPASS_MAP_ENCODED; + + /* A proxy module is assigned this URL, check whether it's interested + * in the request itself (e.g. proxy_wstunnel cares about Upgrade + * requests only, and could hand over to proxy_http otherwise). + */ + int rc = proxy_run_check_trans(r, found + 6); + if (rc != OK && rc != DECLINED) { + return HTTP_CONTINUE; + } + r->filename = found; r->handler = "proxy-server"; r->proxyreq = PROXYREQ_REVERSE; @@ -762,29 +1002,67 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent, if (ent->flags & PROXYPASS_NOQUERY) { apr_table_setn(r->notes, "proxy-noquery", "1"); } + if (encoded) { + apr_table_setn(r->notes, "proxy-noencode", "1"); + } - ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(03464) - "URI path '%s' matches proxy handler '%s'", r->uri, - found); - - return OK; + if (servlet_uri) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10248) + "Servlet path '%s' (%s) matches proxy handler '%s'", + r->uri, servlet_uri, found); + /* Apply servlet normalization to r->uri so that or any + * directory context match does not have to handle path parameters. + * We change r->uri in-place so that r->parsed_uri.path is updated + * too. Since normalized servlet_uri is necessarily shorter than + * the original r->uri, strcpy() is fine. + */ + AP_DEBUG_ASSERT(strlen(r->uri) >= strlen(servlet_uri)); + strcpy(r->uri, servlet_uri); + } + else { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(03464) + "URI path '%s' matches proxy handler '%s'", r->uri, + found); + } + return (encoded) ? DONE : OK; } - return DONE; + return HTTP_CONTINUE; } -static int proxy_trans(request_rec *r) +static int proxy_trans(request_rec *r, int pre_trans) { - int i; + int i, enc; struct proxy_alias *ent; proxy_dir_conf *dconf; proxy_server_conf *conf; if (r->proxyreq) { /* someone has already set up the proxy, it was possibly ourselves - * in proxy_detect + * in proxy_detect (DONE will prevent further decoding of r->uri, + * only if proxyreq is set before pre_trans already). */ - return OK; + return pre_trans ? DONE : OK; + } + + /* In early pre_trans hook, r->uri was not manipulated yet so we are + * compliant with RFC1945 at this point. Otherwise, it probably isn't + * an issue because this is a hybrid proxy/origin server. + */ + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + conf = (proxy_server_conf *) ap_get_module_config(r->server->module_config, + &proxy_module); + + /* Always and only do PROXY_MAP_ENCODED mapping in pre_trans, when + * r->uri is still encoded, or we might consider for instance that + * a decoded sub-delim is now a delimiter (e.g. "%3B" => ';' for + * path parameters), which it's not. + */ + if ((pre_trans && !conf->map_encoded_one) + || (!pre_trans && conf->map_encoded_all)) { + /* Fast path, nothing at this stage */ + return DECLINED; } if ((r->unparsed_uri[0] == '*' && r->unparsed_uri[1] == '\0') @@ -796,37 +1074,42 @@ static int proxy_trans(request_rec *r) return DECLINED; } - /* XXX: since r->uri has been manipulated already we're not really - * compliant with RFC1945 at this point. But this probably isn't - * an issue because this is a hybrid proxy/origin server. - */ - - dconf = ap_get_module_config(r->per_dir_config, &proxy_module); - /* short way - this location is reverse proxied? */ if (dconf->alias) { - int rv = ap_proxy_trans_match(r, dconf->alias, dconf); - if (DONE != rv) { - return rv; + enc = (dconf->alias->flags & PROXYPASS_MAP_ENCODED) != 0; + if (!(pre_trans ^ enc)) { + int rv = ap_proxy_trans_match(r, dconf->alias, dconf); + if (rv != HTTP_CONTINUE) { + return rv; + } } } - conf = (proxy_server_conf *) ap_get_module_config(r->server->module_config, - &proxy_module); - /* long way - walk the list of aliases, find a match */ - if (conf->aliases->nelts) { - ent = (struct proxy_alias *) conf->aliases->elts; - for (i = 0; i < conf->aliases->nelts; i++) { - int rv = ap_proxy_trans_match(r, &ent[i], dconf); - if (DONE != rv) { + for (i = 0; i < conf->aliases->nelts; i++) { + ent = &((struct proxy_alias *)conf->aliases->elts)[i]; + enc = (ent->flags & PROXYPASS_MAP_ENCODED) != 0; + if (!(pre_trans ^ enc)) { + int rv = ap_proxy_trans_match(r, ent, dconf); + if (rv != HTTP_CONTINUE) { return rv; } } } + return DECLINED; } +static int proxy_pre_translate_name(request_rec *r) +{ + return proxy_trans(r, 1); +} + +static int proxy_translate_name(request_rec *r) +{ + return proxy_trans(r, 0); +} + static int proxy_walk(request_rec *r) { proxy_server_conf *sconf = ap_get_module_config(r->server->module_config, @@ -857,6 +1140,7 @@ static int proxy_walk(request_rec *r) if (entry_proxy->refs && entry_proxy->refs->nelts) { if (!rxpool) { apr_pool_create(&rxpool, r->pool); + apr_pool_tag(rxpool, "proxy_rxpool"); } nmatch = entry_proxy->refs->nelts; pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t)); @@ -979,7 +1263,7 @@ static int proxy_needsdomain(request_rec *r, const char *url, const char *domain /* If host does contain a dot already, or it is "localhost", decline */ if (strchr(r->parsed_uri.hostname, '.') != NULL /* has domain, or IPv4 literal */ || strchr(r->parsed_uri.hostname, ':') != NULL /* IPv6 literal */ - || strcasecmp(r->parsed_uri.hostname, "localhost") == 0) + || ap_cstr_casecmp(r->parsed_uri.hostname, "localhost") == 0) return DECLINED; /* host name has a dot already */ ref = apr_table_get(r->headers_in, "Referer"); @@ -1049,9 +1333,10 @@ static int proxy_handler(request_rec *r) char *end; maxfwd = apr_strtoi64(str, &end, 10); if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) { - return ap_proxyerror(r, HTTP_BAD_REQUEST, - apr_psprintf(r->pool, - "Max-Forwards value '%s' could not be parsed", str)); + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10188) + "Max-Forwards value '%s' could not be parsed", str); + return ap_proxyerror(r, HTTP_BAD_REQUEST, + "Max-Forwards request header could not be parsed"); } else if (maxfwd == 0) { switch (r->method_number) { @@ -1185,23 +1470,31 @@ static int proxy_handler(request_rec *r) if (strcmp(ents[i].scheme, "*") == 0 || (ents[i].use_regex && ap_regexec(ents[i].regexp, url, 0, NULL, 0) == 0) || - (p2 == NULL && strcasecmp(scheme, ents[i].scheme) == 0) || + (p2 == NULL && ap_cstr_casecmp(scheme, ents[i].scheme) == 0) || (p2 != NULL && - strncasecmp(url, ents[i].scheme, + ap_cstr_casecmpn(url, ents[i].scheme, strlen(ents[i].scheme)) == 0)) { /* handle the scheme */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01142) "Trying to run scheme_handler against proxy"); + + if (ents[i].creds) { + apr_table_set(r->notes, "proxy-basic-creds", ents[i].creds); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "Using proxy auth creds %s", ents[i].creds); + } + access_status = proxy_run_scheme_handler(r, worker, conf, url, ents[i].hostname, ents[i].port); + if (ents[i].creds) apr_table_unset(r->notes, "proxy-basic-creds"); + /* Did the scheme handler process the request? */ if (access_status != DECLINED) { const char *cl_a; - char *end; apr_off_t cl; /* @@ -1211,18 +1504,17 @@ static int proxy_handler(request_rec *r) if (access_status != HTTP_BAD_GATEWAY) { goto cleanup; } + cl_a = apr_table_get(r->headers_in, "Content-Length"); - if (cl_a) { - apr_strtoff(&cl, cl_a, &end, 10); + if (cl_a && (!ap_parse_strict_length(&cl, cl_a) + || cl > 0)) { /* * The request body is of length > 0. We cannot * retry with a direct connection since we already * sent (parts of) the request body to the proxy * and do not have any longer. */ - if (cl > 0) { - goto cleanup; - } + goto cleanup; } /* * Transfer-Encoding was set as input header, so we had @@ -1347,6 +1639,8 @@ static void * create_proxy_config(apr_pool_t *p, server_rec *s) ps->forward = NULL; ps->reverse = NULL; ps->domain = NULL; + ps->map_encoded_one = 0; + ps->map_encoded_all = 1; ps->id = apr_psprintf(p, "p%x", 1); /* simply for storage size */ ps->viaopt = via_off; /* initially backward compatible with 1.3.1 */ ps->viaopt_set = 0; /* 0 means default */ @@ -1373,6 +1667,7 @@ static void * create_proxy_config(apr_pool_t *p, server_rec *s) ps->source_address = NULL; ps->source_address_set = 0; apr_pool_create_ex(&ps->pool, p, NULL, NULL); + apr_pool_tag(ps->pool, "proxy_server_conf"); return ps; } @@ -1503,6 +1798,9 @@ static void * merge_proxy_config(apr_pool_t *p, void *basev, void *overridesv) ps->forward = overrides->forward ? overrides->forward : base->forward; ps->reverse = overrides->reverse ? overrides->reverse : base->reverse; + ps->map_encoded_one = overrides->map_encoded_one || base->map_encoded_one; + ps->map_encoded_all = overrides->map_encoded_all && base->map_encoded_all; + ps->domain = (overrides->domain == NULL) ? base->domain : overrides->domain; ps->id = (overrides->id == NULL) ? base->id : overrides->id; ps->viaopt = (overrides->viaopt_set == 0) ? base->viaopt : overrides->viaopt; @@ -1560,6 +1858,7 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy) new->raliases = apr_array_make(p, 10, sizeof(struct proxy_alias)); new->cookie_paths = apr_array_make(p, 10, sizeof(struct proxy_alias)); new->cookie_domains = apr_array_make(p, 10, sizeof(struct proxy_alias)); + new->error_override_codes = apr_array_make(p, 10, sizeof(int)); new->preserve_host_set = 0; new->preserve_host = 0; new->interpolate_env = -1; /* unset */ @@ -1567,10 +1866,17 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy) new->error_override_set = 0; new->add_forwarded_headers = 1; new->add_forwarded_headers_set = 0; + new->forward_100_continue = 1; + new->forward_100_continue_set = 0; return (void *) new; } +static int int_order(const void *i1, const void *i2) +{ + return *(const int *)i1 - *(const int *)i2; +} + static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) { proxy_dir_conf *new = (proxy_dir_conf *) apr_pcalloc(p, sizeof(proxy_dir_conf)); @@ -1588,6 +1894,17 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) = apr_array_append(p, base->cookie_paths, add->cookie_paths); new->cookie_domains = apr_array_append(p, base->cookie_domains, add->cookie_domains); + new->error_override_codes + = apr_array_append(p, base->error_override_codes, add->error_override_codes); + /* Keep the array sorted for binary search (since "base" and "add" are + * already sorted, it's only needed only if both are merged). + */ + if (base->error_override_codes->nelts + && add->error_override_codes->nelts) { + qsort(new->error_override_codes->elts, + new->error_override_codes->nelts, + sizeof(int), int_order); + } new->interpolate_env = (add->interpolate_env == -1) ? base->interpolate_env : add->interpolate_env; new->preserve_host = (add->preserve_host_set == 0) ? base->preserve_host @@ -1603,12 +1920,17 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) : add->add_forwarded_headers; new->add_forwarded_headers_set = add->add_forwarded_headers_set || base->add_forwarded_headers_set; - + new->forward_100_continue = + (add->forward_100_continue_set == 0) ? base->forward_100_continue + : add->forward_100_continue; + new->forward_100_continue_set = add->forward_100_continue_set + || base->forward_100_continue_set; + return new; } -static const char * - add_proxy(cmd_parms *cmd, void *dummy, const char *f1, const char *r1, int regex) +static const char *add_proxy(cmd_parms *cmd, void *dummy, const char *f1, + const char *r1, const char *creds, int regex) { server_rec *s = cmd->server; proxy_server_conf *conf = @@ -1666,19 +1988,24 @@ static const char * new->port = port; new->regexp = reg; new->use_regex = regex; + if (creds) { + new->creds = apr_pstrcat(cmd->pool, "Basic ", + ap_pbase64encode(cmd->pool, (char *)creds), + NULL); + } return NULL; } -static const char * - add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1) +static const char *add_proxy_noregex(cmd_parms *cmd, void *dummy, const char *f1, + const char *r1, const char *creds) { - return add_proxy(cmd, dummy, f1, r1, 0); + return add_proxy(cmd, dummy, f1, r1, creds, 0); } -static const char * - add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1, const char *r1) +static const char *add_proxy_regex(cmd_parms *cmd, void *dummy, const char *f1, + const char *r1, const char *creds) { - return add_proxy(cmd, dummy, f1, r1, 1); + return add_proxy(cmd, dummy, f1, r1, creds, 1); } PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url) @@ -1688,8 +2015,8 @@ PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url) * We could be passed a URL during the config stage that contains * the UDS path... ignore it */ - if (!strncasecmp(url, "unix:", 5) && - ((ptr = ap_strchr_c(url, '|')) != NULL)) { + if (!ap_cstr_casecmpn(url, "unix:", 5) && + ((ptr = ap_strchr_c(url + 5, '|')) != NULL)) { /* move past the 'unix:...|' UDS path info */ const char *ret, *c; @@ -1721,12 +2048,14 @@ static const char * struct proxy_alias *new; char *f = cmd->path; char *r = NULL; + const char *real; char *word; apr_table_t *params = apr_table_make(cmd->pool, 5); const apr_array_header_t *arr; const apr_table_entry_t *elts; int i; - int use_regex = is_regex; + unsigned int worker_type = (is_regex) ? AP_PROXY_WORKER_IS_MATCH + : AP_PROXY_WORKER_IS_PREFIX; unsigned int flags = 0; const char *err; @@ -1742,7 +2071,7 @@ static const char * if (is_regex) { return "ProxyPassMatch invalid syntax ('~' usage)."; } - use_regex = 1; + worker_type = AP_PROXY_WORKER_IS_MATCH; continue; } f = word; @@ -1777,15 +2106,39 @@ static const char * "in the form 'key=value'."; } } - else + else { *val++ = '\0'; - apr_table_setn(params, word, val); + } + if (!strcasecmp(word, "mapping")) { + if (!strcasecmp(val, "encoded")) { + flags |= PROXYPASS_MAP_ENCODED; + } + else if (!strcasecmp(val, "servlet")) { + flags |= PROXYPASS_MAP_SERVLET; + } + else { + return "unknown mapping"; + } + } + else { + apr_table_setn(params, word, val); + } } - }; + } + if (flags & PROXYPASS_MAP_ENCODED) { + conf->map_encoded_one = 1; + } + else { + conf->map_encoded_all = 0; + } if (r == NULL) { return "ProxyPass|ProxyPassMatch needs a path when not defined in a location"; } + if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, r))) { + return "ProxyPass|ProxyPassMatch uses an invalid \"unix:\" URL"; + } + /* if per directory, save away the single alias */ if (cmd->path) { @@ -1793,7 +2146,7 @@ static const char * dconf->alias_set = 1; new = dconf->alias; if (apr_fnmatch_test(f)) { - use_regex = 1; + worker_type = AP_PROXY_WORKER_IS_MATCH; } } /* if per server, add to the alias array */ @@ -1802,9 +2155,9 @@ static const char * } new->fake = apr_pstrdup(cmd->pool, f); - new->real = apr_pstrdup(cmd->pool, ap_proxy_de_socketfy(cmd->pool, r)); + new->real = apr_pstrdup(cmd->pool, real); new->flags = flags; - if (use_regex) { + if (worker_type & AP_PROXY_WORKER_IS_MATCH) { new->regex = ap_pregcomp(cmd->pool, f, AP_REG_EXTENDED); if (new->regex == NULL) return "Regular expression could not be compiled."; @@ -1828,7 +2181,7 @@ static const char * * cannot be parsed anyway with apr_uri_parse later on in * ap_proxy_define_balancer / ap_proxy_update_balancer */ - if (use_regex) { + if (worker_type & AP_PROXY_WORKER_IS_MATCH) { fake_copy = NULL; } else { @@ -1851,15 +2204,20 @@ static const char * new->balancer = balancer; } else { - proxy_worker *worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->pool, r)); int reuse = 0; + proxy_worker *worker = ap_proxy_get_worker_ex(cmd->temp_pool, NULL, + conf, new->real, + worker_type); if (!worker) { - const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r, 0); + const char *err; + err = ap_proxy_define_worker_ex(cmd->pool, &worker, NULL, + conf, r, worker_type); if (err) return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL); PROXY_COPY_CONF_PARAMS(worker, conf); - } else { + } + else { reuse = 1; ap_log_error(APLOG_MARK, APLOG_INFO, 0, cmd->server, APLOGNO(01145) "Sharing worker '%s' instead of creating new worker '%s'", @@ -2078,14 +2436,50 @@ static const char * } static const char * - set_proxy_error_override(cmd_parms *parms, void *dconf, int flag) + set_proxy_error_override(cmd_parms *parms, void *dconf, const char *arg) { proxy_dir_conf *conf = dconf; - conf->error_override = flag; - conf->error_override_set = 1; + if (strcasecmp(arg, "Off") == 0) { + conf->error_override = 0; + conf->error_override_set = 1; + } + else if (strcasecmp(arg, "On") == 0) { + conf->error_override = 1; + conf->error_override_set = 1; + } + else if (conf->error_override_set == 1) { + int *newcode; + int argcode, i; + if (!apr_isdigit(arg[0])) + return "ProxyErrorOverride: status codes to intercept must be numeric"; + if (!conf->error_override) + return "ProxyErrorOverride: status codes must follow a value of 'on'"; + + argcode = strtol(arg, NULL, 10); + if (!ap_is_HTTP_ERROR(argcode)) + return "ProxyErrorOverride: status codes to intercept must be valid HTTP Status Codes >=400 && <600"; + + newcode = apr_array_push(conf->error_override_codes); + *newcode = argcode; + + /* Keep the array sorted for binary search. */ + for (i = conf->error_override_codes->nelts - 1; i > 0; --i) { + int *oldcode = &((int *)conf->error_override_codes->elts)[i - 1]; + if (*oldcode <= argcode) { + break; + } + *newcode = *oldcode; + *oldcode = argcode; + newcode = oldcode; + } + } + else + return "ProxyErrorOverride first parameter must be one of: off | on"; + return NULL; } + static const char * add_proxy_http_headers(cmd_parms *parms, void *dconf, int flag) { @@ -2103,6 +2497,14 @@ static const char * conf->preserve_host_set = 1; return NULL; } +static const char * + forward_100_continue(cmd_parms *parms, void *dconf, int flag) +{ + proxy_dir_conf *conf = dconf; + conf->forward_100_continue = flag; + conf->forward_100_continue_set = 1; + return NULL; +} static const char * set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg) @@ -2279,6 +2681,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) proxy_worker *worker; char *path = cmd->path; char *name = NULL; + const char *real; char *word; apr_table_t *params = apr_table_make(cmd->pool, 5); const apr_array_header_t *arr; @@ -2319,6 +2722,9 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) return "BalancerMember must define balancer name when outside section"; if (!name) return "BalancerMember must define remote proxy server"; + if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) { + return "BalancerMember uses an invalid \"unix:\" URL"; + } ap_str_tolower(path); /* lowercase scheme://hostname */ @@ -2331,7 +2737,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) } /* Try to find existing worker */ - worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, ap_proxy_de_socketfy(cmd->temp_pool, name)); + worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, real); if (!worker) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01147) "Defining worker '%s' for balancer '%s'", @@ -2361,7 +2767,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) elts[i].key, elts[i].val, ap_proxy_worker_name(cmd->pool, worker)); } else { err = set_worker_param(cmd->pool, cmd->server, worker, elts[i].key, - elts[i].val); + elts[i].val); if (err) return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL); } @@ -2380,6 +2786,7 @@ static const char * char *word, *val; proxy_balancer *balancer = NULL; proxy_worker *worker = NULL; + unsigned int worker_type = 0; int in_proxy_section = 0; /* XXX: Should this be NOT_IN_DIRECTORY|NOT_IN_FILES? */ const char *err = ap_check_cmd_context(cmd, NOT_IN_HTACCESS); @@ -2396,6 +2803,13 @@ static const char * name = ap_getword_conf(cmd->temp_pool, &pargs); if ((word = ap_strchr(name, '>'))) *word = '\0'; + if (strncasecmp(cmd->directive->parent->directive + 6, + "Match", 5) == 0) { + worker_type = AP_PROXY_WORKER_IS_MATCH; + } + else { + worker_type = AP_PROXY_WORKER_IS_PREFIX; + } in_proxy_section = 1; } else { @@ -2420,11 +2834,18 @@ static const char * } } else { - worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->temp_pool, name)); + const char *real; + + if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) { + return "ProxySet uses an invalid \"unix:\" URL"; + } + + worker = ap_proxy_get_worker_ex(cmd->temp_pool, NULL, conf, + real, worker_type); if (!worker) { if (in_proxy_section) { - err = ap_proxy_define_worker(cmd->pool, &worker, NULL, - conf, name, 0); + err = ap_proxy_define_worker_ex(cmd->pool, &worker, NULL, + conf, name, worker_type); if (err) return apr_pstrcat(cmd->temp_pool, "ProxySet ", err, NULL); @@ -2478,7 +2899,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) char *word, *val; proxy_balancer *balancer = NULL; proxy_worker *worker = NULL; - + unsigned int worker_type = AP_PROXY_WORKER_IS_PREFIX; const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT); proxy_server_conf *sconf = (proxy_server_conf *) ap_get_module_config(cmd->server->module_config, &proxy_module); @@ -2516,6 +2937,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) if (!r) { return "Regex could not be compiled"; } + worker_type = AP_PROXY_WORKER_IS_MATCH; } /* initialize our config and fetch it */ @@ -2562,11 +2984,17 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) } } else { - worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf, - ap_proxy_de_socketfy(cmd->temp_pool, (char*)conf->p)); + const char *real; + + if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, conf->p))) { + return " uses an invalid \"unix:\" URL"; + } + + worker = ap_proxy_get_worker_ex(cmd->temp_pool, NULL, sconf, + real, worker_type); if (!worker) { - err = ap_proxy_define_worker(cmd->pool, &worker, NULL, - sconf, conf->p, 0); + err = ap_proxy_define_worker_ex(cmd->pool, &worker, NULL, sconf, + conf->p, worker_type); if (err) return apr_pstrcat(cmd->temp_pool, thiscmd->name, " ", err, NULL); @@ -2616,9 +3044,9 @@ static const command_rec proxy_cmds[] = "location, in regular expression syntax"), AP_INIT_FLAG("ProxyRequests", set_proxy_req, NULL, RSRC_CONF, "on if the true proxy requests should be accepted"), - AP_INIT_TAKE2("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF, + AP_INIT_TAKE23("ProxyRemote", add_proxy_noregex, NULL, RSRC_CONF, "a scheme, partial URL or '*' and a proxy server"), - AP_INIT_TAKE2("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF, + AP_INIT_TAKE23("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF, "a regex pattern and a proxy server"), AP_INIT_FLAG("ProxyPassInterpolateEnv", ap_set_flag_slot_char, (void*)APR_OFFSETOF(proxy_dir_conf, interpolate_env), @@ -2647,7 +3075,7 @@ static const command_rec proxy_cmds[] = "The default intranet domain name (in absence of a domain in the URL)"), AP_INIT_TAKE1("ProxyVia", set_via_opt, NULL, RSRC_CONF, "Configure Via: proxy header header to one of: on | off | block | full"), - AP_INIT_FLAG("ProxyErrorOverride", set_proxy_error_override, NULL, RSRC_CONF|ACCESS_CONF, + AP_INIT_ITERATE("ProxyErrorOverride", set_proxy_error_override, NULL, RSRC_CONF|ACCESS_CONF, "use our error handling pages instead of the servers' we are proxying"), AP_INIT_FLAG("ProxyPreserveHost", set_preserve_host, NULL, RSRC_CONF|ACCESS_CONF, "on if we should preserve host header while proxying"), @@ -2676,14 +3104,15 @@ static const command_rec proxy_cmds[] = "Configure local source IP used for request forward"), AP_INIT_FLAG("ProxyAddHeaders", add_proxy_http_headers, NULL, RSRC_CONF|ACCESS_CONF, "on if X-Forwarded-* headers should be added or completed"), + AP_INIT_FLAG("Proxy100Continue", forward_100_continue, NULL, RSRC_CONF|ACCESS_CONF, + "on if 100-Continue should be forwarded to the origin server, off if the " + "proxy should handle it by itself"), {NULL} }; static APR_OPTIONAL_FN_TYPE(ssl_proxy_enable) *proxy_ssl_enable = NULL; static APR_OPTIONAL_FN_TYPE(ssl_engine_disable) *proxy_ssl_disable = NULL; static APR_OPTIONAL_FN_TYPE(ssl_engine_set) *proxy_ssl_engine = NULL; -static APR_OPTIONAL_FN_TYPE(ssl_is_https) *proxy_is_https = NULL; -static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *proxy_ssl_val = NULL; PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c) { @@ -2691,20 +3120,15 @@ PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c) * if c == NULL just check if the optional function was imported * else run the optional function so ssl filters are inserted */ - if (proxy_ssl_enable) { - return c ? proxy_ssl_enable(c) : 1; + if (c == NULL) { + return ap_ssl_has_outgoing_handlers(); } - - return 0; + return ap_ssl_bind_outgoing(c, NULL, 1) == OK; } PROXY_DECLARE(int) ap_proxy_ssl_disable(conn_rec *c) { - if (proxy_ssl_disable) { - return proxy_ssl_disable(c); - } - - return 0; + return ap_ssl_bind_outgoing(c, NULL, 0) == OK; } PROXY_DECLARE(int) ap_proxy_ssl_engine(conn_rec *c, @@ -2715,41 +3139,22 @@ PROXY_DECLARE(int) ap_proxy_ssl_engine(conn_rec *c, * if c == NULL just check if the optional function was imported * else run the optional function so ssl filters are inserted */ - if (proxy_ssl_engine) { - return c ? proxy_ssl_engine(c, per_dir_config, 1, enable) : 1; + if (c == NULL) { + return ap_ssl_has_outgoing_handlers(); } - - if (!per_dir_config) { - if (enable) { - return ap_proxy_ssl_enable(c); - } - else { - return ap_proxy_ssl_disable(c); - } - } - - return 0; + return ap_ssl_bind_outgoing(c, per_dir_config, enable) == OK; } PROXY_DECLARE(int) ap_proxy_conn_is_https(conn_rec *c) { - if (proxy_is_https) { - return proxy_is_https(c); - } - else - return 0; + return ap_ssl_conn_is_ssl(c); } PROXY_DECLARE(const char *) ap_proxy_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *var) { - if (proxy_ssl_val) { - /* XXX Perhaps the casting useless */ - return (const char *)proxy_ssl_val(p, s, c, r, (char *)var); - } - else - return NULL; + return ap_ssl_var_lookup(p, s, c, r, var); } static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog, @@ -2767,8 +3172,6 @@ static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog, proxy_ssl_enable = APR_RETRIEVE_OPTIONAL_FN(ssl_proxy_enable); proxy_ssl_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable); proxy_ssl_engine = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_set); - proxy_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); - proxy_ssl_val = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); ap_proxy_strmatch_path = apr_strmatch_precompile(pconf, "path=", 0); ap_proxy_strmatch_domain = apr_strmatch_precompile(pconf, "domain=", 0); @@ -2867,7 +3270,7 @@ static int proxy_status_hook(request_rec *r, int flags) } else { ap_rprintf(r, "ProxyBalancer[%d]Worker[%d]Name: %s\n", - i, n, (*worker)->s->name); + i, n, (*worker)->s->name_ex); ap_rprintf(r, "ProxyBalancer[%d]Worker[%d]Status: %s\n", i, n, ap_proxy_parse_wstatus(r->pool, *worker)); ap_rprintf(r, "ProxyBalancer[%d]Worker[%d]Elected: %" @@ -2939,45 +3342,49 @@ static void child_init(apr_pool_t *p, server_rec *s) */ worker = (proxy_worker *)conf->workers->elts; for (i = 0; i < conf->workers->nelts; i++, worker++) { - ap_proxy_initialize_worker(worker, s, conf->pool); + ap_proxy_initialize_worker(worker, s, p); } /* Create and initialize forward worker if defined */ if (conf->req_set && conf->req) { proxy_worker *forward; - ap_proxy_define_worker(p, &forward, NULL, NULL, "http://www.apache.org", 0); + ap_proxy_define_worker(conf->pool, &forward, NULL, NULL, + "http://www.apache.org", 0); conf->forward = forward; PROXY_STRNCPY(conf->forward->s->name, "proxy:forward"); + PROXY_STRNCPY(conf->forward->s->name_ex, "proxy:forward"); PROXY_STRNCPY(conf->forward->s->hostname, "*"); /* for compatibility */ PROXY_STRNCPY(conf->forward->s->hostname_ex, "*"); PROXY_STRNCPY(conf->forward->s->scheme, "*"); conf->forward->hash.def = conf->forward->s->hash.def = - ap_proxy_hashfunc(conf->forward->s->name, PROXY_HASHFUNC_DEFAULT); + ap_proxy_hashfunc(conf->forward->s->name_ex, PROXY_HASHFUNC_DEFAULT); conf->forward->hash.fnv = conf->forward->s->hash.fnv = - ap_proxy_hashfunc(conf->forward->s->name, PROXY_HASHFUNC_FNV); + ap_proxy_hashfunc(conf->forward->s->name_ex, PROXY_HASHFUNC_FNV); /* Do not disable worker in case of errors */ conf->forward->s->status |= PROXY_WORKER_IGNORE_ERRORS; /* Mark as the "generic" worker */ conf->forward->s->status |= PROXY_WORKER_GENERIC; - ap_proxy_initialize_worker(conf->forward, s, conf->pool); + ap_proxy_initialize_worker(conf->forward, s, p); /* Disable address cache for generic forward worker */ conf->forward->s->is_address_reusable = 0; } if (!reverse) { - ap_proxy_define_worker(p, &reverse, NULL, NULL, "http://www.apache.org", 0); + ap_proxy_define_worker(conf->pool, &reverse, NULL, NULL, + "http://www.apache.org", 0); PROXY_STRNCPY(reverse->s->name, "proxy:reverse"); + PROXY_STRNCPY(reverse->s->name_ex, "proxy:reverse"); PROXY_STRNCPY(reverse->s->hostname, "*"); /* for compatibility */ PROXY_STRNCPY(reverse->s->hostname_ex, "*"); PROXY_STRNCPY(reverse->s->scheme, "*"); reverse->hash.def = reverse->s->hash.def = - ap_proxy_hashfunc(reverse->s->name, PROXY_HASHFUNC_DEFAULT); + ap_proxy_hashfunc(reverse->s->name_ex, PROXY_HASHFUNC_DEFAULT); reverse->hash.fnv = reverse->s->hash.fnv = - ap_proxy_hashfunc(reverse->s->name, PROXY_HASHFUNC_FNV); + ap_proxy_hashfunc(reverse->s->name_ex, PROXY_HASHFUNC_FNV); /* Do not disable worker in case of errors */ reverse->s->status |= PROXY_WORKER_IGNORE_ERRORS; /* Mark as the "generic" worker */ reverse->s->status |= PROXY_WORKER_GENERIC; conf->reverse = reverse; - ap_proxy_initialize_worker(conf->reverse, s, conf->pool); + ap_proxy_initialize_worker(conf->reverse, s, p); /* Disable address cache for generic reverse worker */ reverse->s->is_address_reusable = 0; } @@ -3003,7 +3410,7 @@ static int proxy_pre_config(apr_pool_t *pconf, apr_pool_t *plog, APR_OPTIONAL_HOOK(ap, status_hook, proxy_status_hook, NULL, NULL, APR_HOOK_MIDDLE); - /* Reset workers count on gracefull restart */ + /* Reset workers count on graceful restart */ proxy_lb_workers = 0; set_worker_hc_param_f = APR_RETRIEVE_OPTIONAL_FN(set_worker_hc_param); return OK; @@ -3023,7 +3430,10 @@ static void register_hooks(apr_pool_t *p) /* handler */ ap_hook_handler(proxy_handler, NULL, NULL, APR_HOOK_FIRST); /* filename-to-URI translation */ - ap_hook_translate_name(proxy_trans, aszSucc, NULL, APR_HOOK_FIRST); + ap_hook_pre_translate_name(proxy_pre_translate_name, NULL, NULL, + APR_HOOK_MIDDLE); + ap_hook_translate_name(proxy_translate_name, aszSucc, NULL, + APR_HOOK_FIRST); /* walk entries and suppress default TRACE behavior */ ap_hook_map_to_storage(proxy_map_location, NULL,NULL, APR_HOOK_FIRST); /* fixups */ @@ -3058,6 +3468,7 @@ APR_HOOK_STRUCT( APR_HOOK_LINK(pre_request) APR_HOOK_LINK(post_request) APR_HOOK_LINK(request_status) + APR_HOOK_LINK(check_trans) ) APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, scheme_handler, @@ -3066,6 +3477,9 @@ APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, scheme_handler, char *url, const char *proxyhost, apr_port_t proxyport),(r,worker,conf, url,proxyhost,proxyport),DECLINED) +APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, check_trans, + (request_rec *r, const char *url), + (r, url), DECLINED) APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(proxy, PROXY, int, canon_handler, (request_rec *r, char *url),(r, url),DECLINED) diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h index aabd09f..51a55f8 100644 --- a/modules/proxy/mod_proxy.h +++ b/modules/proxy/mod_proxy.h @@ -58,6 +58,7 @@ #include "http_main.h" #include "http_log.h" #include "http_connection.h" +#include "http_ssl.h" #include "util_filter.h" #include "util_ebcdic.h" #include "ap_provider.h" @@ -75,8 +76,12 @@ enum enctype { enc_path, enc_search, enc_user, enc_fpath, enc_parm }; +/* Flags for ap_proxy_canonenc_ex */ +#define PROXY_CANONENC_FORCEDEC 0x01 +#define PROXY_CANONENC_NOENCODEDSLASHENCODING 0x02 + typedef enum { - NONE, TCP, OPTIONS, HEAD, GET, CPING, PROVIDER, EOT + NONE, TCP, OPTIONS, HEAD, GET, CPING, PROVIDER, OPTIONS11, HEAD11, GET11, EOT } hcmethod_t; typedef struct { @@ -116,6 +121,7 @@ struct proxy_remote { const char *protocol; /* the scheme used to talk to this proxy */ const char *hostname; /* the hostname of this proxy */ ap_regex_t *regexp; /* compiled regex (if any) for the remote */ + const char *creds; /* auth credentials (if any) for the proxy */ int use_regex; /* simple boolean. True if we have a regex pattern */ apr_port_t port; /* the port for this proxy */ }; @@ -123,6 +129,8 @@ struct proxy_remote { #define PROXYPASS_NOCANON 0x01 #define PROXYPASS_INTERPOLATE 0x02 #define PROXYPASS_NOQUERY 0x04 +#define PROXYPASS_MAP_ENCODED 0x08 +#define PROXYPASS_MAP_SERVLET 0x18 /* + MAP_ENCODED */ struct proxy_alias { const char *real; const char *fake; @@ -199,9 +207,10 @@ typedef struct { unsigned int inherit_set:1; unsigned int ppinherit:1; unsigned int ppinherit_set:1; + unsigned int map_encoded_one:1; + unsigned int map_encoded_all:1; } proxy_server_conf; - typedef struct { const char *p; /* The path */ ap_regex_t *r; /* Is this a regex? */ @@ -240,6 +249,10 @@ typedef struct { /** Named back references */ apr_array_header_t *refs; + unsigned int forward_100_continue:1; + unsigned int forward_100_continue_set:1; + + apr_array_header_t *error_override_codes; } proxy_dir_conf; /* if we interpolate env vars per-request, we'll need a per-request @@ -251,6 +264,8 @@ typedef struct { apr_array_header_t* cookie_domains; } proxy_req_conf; +struct proxy_address; /* opaque TTL'ed and refcount'ed address */ + typedef struct { conn_rec *connection; request_rec *r; /* Request record of the backend request @@ -276,6 +291,9 @@ typedef struct { * and its scpool/bucket_alloc (NULL before), * must be left cleaned when used (locally). */ + apr_pool_t *uds_pool; /* Subpool for reusing UDS paths */ + apr_pool_t *fwd_pool; /* Subpool for reusing ProxyRemote infos */ + struct proxy_address *address; /* Current remote address */ } proxy_conn_rec; typedef struct { @@ -285,12 +303,15 @@ typedef struct { /* Connection pool */ struct proxy_conn_pool { - apr_pool_t *pool; /* The pool used in constructor and destructor calls */ - apr_sockaddr_t *addr; /* Preparsed remote address info */ - apr_reslist_t *res; /* Connection resource list */ - proxy_conn_rec *conn; /* Single connection for prefork mpm */ + apr_pool_t *pool; /* The pool used in constructor and destructor calls */ + apr_sockaddr_t *addr; /* Preparsed remote address info */ + apr_reslist_t *res; /* Connection resource list */ + proxy_conn_rec *conn; /* Single connection for prefork mpm */ + apr_pool_t *dns_pool; /* The pool used for worker scoped DNS resolutions */ }; +#define AP_VOLATILIZE_T(T, x) (*(T volatile *)&(x)) + /* worker status bits */ /* * NOTE: Keep up-to-date w/ proxy_wstat_tbl[] @@ -343,6 +364,8 @@ PROXY_WORKER_HC_FAIL ) #define PROXY_WORKER_IS_HCFAILED(f) ( (f)->s->status & PROXY_WORKER_HC_FAIL ) +#define PROXY_WORKER_IS_ERROR(f) ( (f)->s->status & PROXY_WORKER_IN_ERROR ) + #define PROXY_WORKER_IS(f, b) ( (f)->s->status & (b) ) /* default worker retry timeout in seconds */ @@ -357,8 +380,10 @@ PROXY_WORKER_HC_FAIL ) #define PROXY_WORKER_MAX_HOSTNAME_SIZE 64 #define PROXY_BALANCER_MAX_HOSTNAME_SIZE PROXY_WORKER_MAX_HOSTNAME_SIZE #define PROXY_BALANCER_MAX_STICKY_SIZE 64 +#define PROXY_WORKER_MAX_SECRET_SIZE 64 #define PROXY_RFC1035_HOSTNAME_SIZE 256 +#define PROXY_WORKER_EXT_NAME_SIZE 384 /* RFC-1035 mentions limits of 255 for host-names and 253 for domain-names, * dotted together(?) this would fit the below size (+ trailing NUL). @@ -379,6 +404,15 @@ do { \ (w)->s->io_buffer_size_set = (c)->io_buffer_size_set; \ } while (0) +#define PROXY_SHOULD_PING_100_CONTINUE(w, r) \ + ((w)->s->ping_timeout_set \ + && (PROXYREQ_REVERSE == (r)->proxyreq) \ + && ap_request_has_body((r))) + +#define PROXY_DO_100_CONTINUE(w, r) \ + (PROXY_SHOULD_PING_100_CONTINUE(w, r) \ + && !apr_table_get((r)->subprocess_env, "force-proxy-request-1.0")) + /* use 2 hashes */ typedef struct { unsigned int def; @@ -441,6 +475,7 @@ typedef struct { unsigned int keepalive_set:1; unsigned int disablereuse_set:1; unsigned int was_malloced:1; + unsigned int is_name_matchable:1; char hcuri[PROXY_WORKER_MAX_ROUTE_SIZE]; /* health check uri */ char hcexpr[PROXY_WORKER_MAX_SCHEME_SIZE]; /* name of condition expr for health check */ int passes; /* number of successes for check to pass */ @@ -453,6 +488,11 @@ typedef struct { char hostname_ex[PROXY_RFC1035_HOSTNAME_SIZE]; /* RFC1035 compliant version of the remote backend address */ apr_size_t response_field_size; /* Size of proxy response buffer in bytes. */ unsigned int response_field_size_set:1; + char secret[PROXY_WORKER_MAX_SECRET_SIZE]; /* authentication secret (e.g. AJP13) */ + char name_ex[PROXY_WORKER_EXT_NAME_SIZE]; /* Extended name (>96 chars for 2.4.x) */ + unsigned int address_ttl_set:1; + apr_int32_t address_ttl; /* backend address' TTL (seconds) */ + apr_uint32_t address_expiry; /* backend address' next expiry time */ } proxy_worker_shared; #define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared))) @@ -464,9 +504,12 @@ struct proxy_worker { proxy_conn_pool *cp; /* Connection pool to use */ proxy_worker_shared *s; /* Shared data */ proxy_balancer *balancer; /* which balancer am I in? */ +#if APR_HAS_THREADS apr_thread_mutex_t *tmutex; /* Thread lock for updating address cache */ +#endif void *context; /* general purpose storage */ ap_conf_vector_t *section_config; /* -section wherein defined */ + struct proxy_address *volatile address; /* current worker address (if reusable) */ }; /* default to health check every 30 seconds */ @@ -523,7 +566,9 @@ struct proxy_balancer { apr_time_t wupdated; /* timestamp of last change to workers list */ proxy_balancer_method *lbmethod; apr_global_mutex_t *gmutex; /* global lock for updating list of workers */ +#if APR_HAS_THREADS apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */ +#endif proxy_server_conf *sconf; void *context; /* general purpose storage */ proxy_balancer_shared *s; /* Shared data */ @@ -602,6 +647,8 @@ APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, scheme_handler, (request_rec *r, proxy_worker *worker, proxy_server_conf *conf, char *url, const char *proxyhost, apr_port_t proxyport)) +APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, check_trans, + (request_rec *r, const char *url)) APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, canon_handler, (request_rec *r, char *url)) @@ -643,6 +690,8 @@ PROXY_DECLARE(apr_status_t) ap_proxy_strncpy(char *dst, const char *src, apr_size_t dlen); PROXY_DECLARE(int) ap_proxy_hex2c(const char *x); PROXY_DECLARE(void) ap_proxy_c2hex(int ch, char *x); +PROXY_DECLARE(char *)ap_proxy_canonenc_ex(apr_pool_t *p, const char *x, int len, enum enctype t, + int flags, int proxyreq); PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, enum enctype t, int forcedec, int proxyreq); PROXY_DECLARE(char *)ap_proxy_canon_netloc(apr_pool_t *p, char **const urlp, char **userp, @@ -656,7 +705,7 @@ PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *c * @param conf server configuration * @param hostname hostname from request URI * @param addr resolved address of hostname, or NULL if not known - * @return OK on success, or else an errro + * @return OK on success, or else an error */ PROXY_DECLARE(int) ap_proxy_checkproxyblock2(request_rec *r, proxy_server_conf *conf, const char *hostname, apr_sockaddr_t *addr); @@ -708,7 +757,42 @@ PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p, proxy_worker *worker); /** - * Get the worker from proxy configuration + * Return whether a worker upgrade configuration matches Upgrade header + * @param p memory pool used for displaying worker name + * @param worker the worker + * @param upgrade the Upgrade header to match + * @param dflt default protocol (NULL for none) + * @return 1 (true) or 0 (false) + */ +PROXY_DECLARE(int) ap_proxy_worker_can_upgrade(apr_pool_t *p, + const proxy_worker *worker, + const char *upgrade, + const char *dflt); + +/* Bitmask for ap_proxy_{define,get}_worker_ex(). */ +#define AP_PROXY_WORKER_IS_PREFIX (1u << 0) +#define AP_PROXY_WORKER_IS_MATCH (1u << 1) +#define AP_PROXY_WORKER_IS_MALLOCED (1u << 2) +#define AP_PROXY_WORKER_NO_UDS (1u << 3) + +/** + * Get the worker from proxy configuration, looking for either PREFIXED or + * MATCHED or both types of workers according to given mask + * @param p memory pool used for finding worker + * @param balancer the balancer that the worker belongs to + * @param conf current proxy server configuration + * @param url url to find the worker from + * @param mask bitmask of AP_PROXY_WORKER_IS_* + * @return proxy_worker or NULL if not found + */ +PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker_ex(apr_pool_t *p, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url, + unsigned int mask); + +/** + * Get the worker from proxy configuration, both types * @param p memory pool used for finding worker * @param balancer the balancer that the worker belongs to * @param conf current proxy server configuration @@ -719,7 +803,26 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, proxy_balancer *balancer, proxy_server_conf *conf, const char *url); + /** + * Define and Allocate space for the worker to proxy configuration, of either + * PREFIXED or MATCHED type according to given mask + * @param p memory pool to allocate worker from + * @param worker the new worker + * @param balancer the balancer that the worker belongs to + * @param conf current proxy server configuration + * @param url url containing worker name + * @param mask bitmask of AP_PROXY_WORKER_IS_* + * @return error message or NULL if successful (*worker is new worker) + */ +PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p, + proxy_worker **worker, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url, + unsigned int mask); + + /** * Define and Allocate space for the worker to proxy configuration * @param p memory pool to allocate worker from * @param worker the new worker @@ -736,6 +839,25 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, const char *url, int do_malloc); +/** + * Define and Allocate space for the ap_strcmp_match()able worker to proxy + * configuration. + * @param p memory pool to allocate worker from + * @param worker the new worker + * @param balancer the balancer that the worker belongs to + * @param conf current proxy server configuration + * @param url url containing worker name (produces match pattern) + * @param do_malloc true if shared struct should be malloced + * @return error message or NULL if successful (*worker is new worker) + * @deprecated Replaced by ap_proxy_define_worker_ex() + */ +PROXY_DECLARE(char *) ap_proxy_define_match_worker(apr_pool_t *p, + proxy_worker **worker, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url, + int do_malloc); + /** * Share a defined proxy worker via shm * @param worker worker to be shared @@ -912,6 +1034,29 @@ PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker, request_rec *r, proxy_server_conf *conf); +/* Bitmask for ap_proxy_determine_address() */ +#define PROXY_DETERMINE_ADDRESS_CHECK (1u << 0) +/** + * Resolve an address, reusing the one of the worker if any. + * @param proxy_function calling proxy scheme (http, ajp, ...) + * @param conn proxy connection the address is used for + * @param hostname host to resolve (should be the worker's if reusable) + * @param hostport port to resolve (should be the worker's if reusable) + * @param flags bitmask of PROXY_DETERMINE_ADDRESS_* + * @param r current request (if any) + * @param s current server (or NULL if r != NULL and ap_proxyerror() + * should be called on error) + * @return APR_SUCCESS or an error, APR_EEXIST if the address is still + * the same and PROXY_DETERMINE_ADDRESS_CHECK is asked + */ +PROXY_DECLARE(apr_status_t) ap_proxy_determine_address(const char *proxy_function, + proxy_conn_rec *conn, + const char *hostname, + apr_port_t hostport, + unsigned int flags, + request_rec *r, + server_rec *s); + /** * Determine backend hostname and port * @param p memory pool used for processing @@ -1112,13 +1257,27 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec *s, proxy_server_conf *conf); +/** + * Configure and create workers (and balancer) in mod_balancer. + * @param r request + * @param params table with the parameters like b=mycluster etc. + * @return 404 when the worker/balancer doesn't exist, + * 400 if something is invalid + * 200 for success. + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, balancer_manage, + (request_rec *, apr_table_t *params)); /** * Find the matched alias for this request and setup for proxy handler * @param r request * @param ent proxy_alias record * @param dconf per-dir config or NULL - * @return DECLINED, DONE or OK if matched + * @return OK if the alias matched, + * DONE if the alias matched and r->uri was normalized so + * no further transformation should happen on it, + * DECLINED if proxying is disabled for this alias, + * HTTP_CONTINUE if the alias did not match */ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r, struct proxy_alias *ent, @@ -1150,6 +1309,55 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, char **old_cl_val, char **old_te_val); +/** + * Prefetch the client request body (in memory), up to a limit. + * Read what's in the client pipe. If nonblocking is set and read is EAGAIN, + * pass a FLUSH bucket to the backend and read again in blocking mode. + * @param r client request + * @param backend backend connection + * @param input_brigade input brigade to use/fill + * @param block blocking or non-blocking mode + * @param bytes_read number of bytes read + * @param max_read maximum number of bytes to read + * @return OK or HTTP_* error code + * @note max_read is rounded up to APR_BUCKET_BUFF_SIZE + */ +PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_read_type_e block, + apr_off_t *bytes_read, + apr_off_t max_read); + +/** + * Spool the client request body to memory, or disk above given limit. + * @param r client request + * @param backend backend connection + * @param input_brigade input brigade to use/fill + * @param bytes_spooled number of bytes spooled + * @param max_mem_spool maximum number of in-memory bytes + * @return OK or HTTP_* error code + */ +PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_off_t *bytes_spooled, + apr_off_t max_mem_spool); + +/** + * Read what's in the client pipe. If the read would block (EAGAIN), + * pass a FLUSH bucket to the backend and read again in blocking mode. + * @param r client request + * @param backend backend connection + * @param input_brigade brigade to use/fill + * @param max_read maximum number of bytes to read + * @return OK or HTTP_* error code + */ +PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_off_t max_read); + /** * @param bucket_alloc bucket allocator * @param r request @@ -1164,6 +1372,41 @@ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, conn_rec *origin, apr_bucket_brigade *bb, int flush); +struct proxy_tunnel_conn; /* opaque */ +typedef struct { + request_rec *r; + const char *scheme; + apr_pollset_t *pollset; + apr_array_header_t *pfds; + apr_interval_time_t timeout; + struct proxy_tunnel_conn *client, + *origin; + apr_size_t read_buf_size; + int replied; /* TODO 2.5+: one bit to merge in below bitmask */ + unsigned int nohalfclose :1; +} proxy_tunnel_rec; + +/** + * Create a tunnel, to be activated by ap_proxy_tunnel_run(). + * @param tunnel tunnel created + * @param r client request + * @param c_o connection to origin + * @param scheme caller proxy scheme (connect, ws(s), http(s), ...) + * @return APR_SUCCESS or error status + */ +PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **tunnel, + request_rec *r, conn_rec *c_o, + const char *scheme); + +/** + * Forward anything from either side of the tunnel to the other, + * until one end aborts or a polling timeout/error occurs. + * @param tunnel tunnel to run + * @return OK if completion is full, HTTP_GATEWAY_TIME_OUT on timeout + * or another HTTP_ error otherwise. + */ +PROXY_DECLARE(int) ap_proxy_tunnel_run(proxy_tunnel_rec *tunnel); + /** * Clear the headers referenced by the Connection header from the given * table, and remove the Connection header. @@ -1174,6 +1417,15 @@ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, APR_DECLARE_OPTIONAL_FN(int, ap_proxy_clear_connection, (request_rec *r, apr_table_t *headers)); +/** + * Do a AJP CPING and wait for CPONG on the socket + * + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, ajp_handle_cping_cpong, + (apr_socket_t *sock, request_rec *r, + apr_interval_time_t timeout)); + + /** * @param socket socket to test * @return TRUE if socket is connected/active @@ -1193,6 +1445,15 @@ PROXY_DECLARE(int) ap_proxy_is_socket_connected(apr_socket_t *socket); */ int ap_proxy_lb_workers(void); +/** + * Returns 1 if a response with the given status should be overridden. + * + * @param conf proxy directory configuration + * @param code http status code + * @return 1 if code is considered an error-code, 0 otherwise + */ +PROXY_DECLARE(int) ap_proxy_should_override(proxy_dir_conf *conf, int code); + /** * Return the port number of a known scheme (eg: http -> 80). * @param scheme scheme to test @@ -1238,6 +1499,15 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r, apr_bucket_brigade *from, apr_bucket_brigade *to); +/* + * The flags for ap_proxy_transfer_between_connections(), where for legacy and + * compatibility reasons FLUSH_EACH and FLUSH_AFTER are boolean values. + */ +#define AP_PROXY_TRANSFER_FLUSH_EACH (0x00) +#define AP_PROXY_TRANSFER_FLUSH_AFTER (0x01) +#define AP_PROXY_TRANSFER_YIELD_PENDING (0x02) +#define AP_PROXY_TRANSFER_YIELD_MAX_READS (0x04) + /* * Sends all data that can be read non blocking from the input filter chain of * c_i and send it down the output filter chain of c_o. For reading it uses @@ -1255,10 +1525,12 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r, * @param name string for logging from where data was pulled * @param sent if not NULL will be set to 1 if data was sent through c_o * @param bsize maximum amount of data pulled in one iteration from c_i - * @param after if set flush data on c_o only once after the loop + * @param flags AP_PROXY_TRANSFER_* bitmask * @return apr_status_t of the operation. Could be any error returned from * either the input filter chain of c_i or the output filter chain - * of c_o. APR_EPIPE if the outgoing connection was aborted. + * of c_o, APR_EPIPE if the outgoing connection was aborted, or + * APR_INCOMPLETE if AP_PROXY_TRANSFER_YIELD_PENDING was set and + * the output stack gets full before the input stack is exhausted. */ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections( request_rec *r, @@ -1269,7 +1541,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections( const char *name, int *sent, apr_off_t bsize, - int after); + int flags); extern module PROXY_DECLARE_DATA proxy_module; diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c index 73716af..32ec912 100644 --- a/modules/proxy/mod_proxy_ajp.c +++ b/modules/proxy/mod_proxy_ajp.c @@ -35,7 +35,7 @@ static int proxy_ajp_canon(request_rec *r, char *url) apr_port_t port, def_port; /* ap_port_of_scheme() */ - if (strncasecmp(url, "ajp:", 4) == 0) { + if (ap_cstr_casecmpn(url, "ajp:", 4) == 0) { url += 4; } else { @@ -65,13 +65,37 @@ static int proxy_ajp_canon(request_rec *r, char *url) if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } + else if (apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the encoded path already */ + search = r->args; + } else { - path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, - r->proxyreq); + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags, + r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } search = r->args; } - if (path == NULL) - return HTTP_BAD_REQUEST; + /* + * If we have a raw control character or a ' ' in nocanon path or + * r->args, correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10418) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } + if (search && *ap_scan_vchar_obstext(search)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10406) + "To be forwarded query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } if (port != def_port) apr_snprintf(sport, sizeof(sport), ":%d", port); @@ -126,11 +150,8 @@ static apr_off_t get_content_length(request_rec * r) if (r->main == NULL) { const char *clp = apr_table_get(r->headers_in, "Content-Length"); - if (clp) { - char *errp; - if (apr_strtoff(&len, clp, &errp, 10) || *errp || len < 0) { - len = 0; /* parse error */ - } + if (clp && !ap_parse_strict_length(&len, clp)) { + len = -1; /* parse error */ } } @@ -193,6 +214,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, apr_off_t content_length = 0; int original_status = r->status; const char *original_status_line = r->status_line; + const char *secret = NULL; if (psf->io_buffer_size_set) maxsize = psf->io_buffer_size; @@ -202,18 +224,20 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, maxsize = AJP_MSG_BUFFER_SZ; maxsize = APR_ALIGN(maxsize, 1024); + if (*conn->worker->s->secret) + secret = conn->worker->s->secret; + /* * Send the AJP request to the remote server */ /* send request headers */ - status = ajp_send_header(conn->sock, r, maxsize, uri); + status = ajp_send_header(conn->sock, r, maxsize, uri, secret); if (status != APR_SUCCESS) { conn->close = 1; ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00868) - "request failed to %pI (%s)", - conn->worker->cp->addr, - conn->worker->s->hostname_ex); + "request failed to %pI (%s:%hu)", + conn->addr, conn->hostname, conn->port); if (status == AJP_EOVERFLOW) return HTTP_BAD_REQUEST; else if (status == AJP_EBAD_METHOD) { @@ -242,19 +266,34 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, return HTTP_INTERNAL_SERVER_ERROR; } - /* read the first bloc of data */ + /* read the first block of data */ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc); tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); - if (tenc && (strcasecmp(tenc, "chunked") == 0)) { - /* The AJP protocol does not want body data yet */ - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870) "request is chunked"); + if (tenc) { + if (ap_cstr_casecmp(tenc, "chunked") == 0) { + /* The AJP protocol does not want body data yet */ + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870) + "request is chunked"); + } + else { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396) + "%s Transfer-Encoding is not supported", + tenc); + /* We had a failure: Close connection to backend */ + conn->close = 1; + return HTTP_INTERNAL_SERVER_ERROR; + } } else { /* Get client provided Content-Length header */ content_length = get_content_length(r); - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - maxsize - AJP_HEADER_SZ); - + if (content_length < 0) { + status = APR_EINVAL; + } + else { + status = ap_get_brigade(r->input_filters, input_brigade, + AP_MODE_READBYTES, APR_BLOCK_READ, + maxsize - AJP_HEADER_SZ); + } if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close = 1; @@ -295,9 +334,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, conn->close = 1; apr_brigade_destroy(input_brigade); ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00876) - "send failed to %pI (%s)", - conn->worker->cp->addr, - conn->worker->s->hostname_ex); + "send failed to %pI (%s:%hu)", + conn->addr, conn->hostname, conn->port); /* * It is fatal when we failed to send a (part) of the request * body. @@ -336,15 +374,15 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, conn->close = 1; apr_brigade_destroy(input_brigade); ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00878) - "read response failed from %pI (%s)", - conn->worker->cp->addr, - conn->worker->s->hostname_ex); + "read response failed from %pI (%s:%hu)", + conn->addr, conn->hostname, conn->port); /* If we had a successful cping/cpong and then a timeout * we assume it is a request that cause a back-end timeout, * but doesn't affect the whole worker. */ - if (APR_STATUS_IS_TIMEUP(status) && conn->worker->s->ping_timeout_set) { + if (APR_STATUS_IS_TIMEUP(status) && + conn->worker->s->ping_timeout_set) { return HTTP_GATEWAY_TIME_OUT; } @@ -470,7 +508,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, /* If we are overriding the errors, we can't put the content * of the page into the brigade. */ - if (!conf->error_override || !ap_is_HTTP_ERROR(r->status)) { + if (!ap_proxy_should_override(conf, r->status)) { /* AJP13_SEND_BODY_CHUNK with zero length * is explicit flush message */ @@ -493,8 +531,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, * error status so that an underlying error (eg HTTP_NOT_FOUND) * doesn't become an HTTP_OK. */ - if (conf->error_override && !ap_is_HTTP_ERROR(r->status) - && ap_is_HTTP_ERROR(original_status)) { + if (ap_proxy_should_override(conf, original_status)) { r->status = original_status; r->status_line = original_status_line; } @@ -543,7 +580,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, if (status != APR_SUCCESS) { backend_failed = 1; } - if (!conf->error_override || !ap_is_HTTP_ERROR(r->status)) { + if (!ap_proxy_should_override(conf, r->status)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if (ap_pass_brigade(r->output_filters, @@ -634,11 +671,10 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, } else { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00892) - "got response from %pI (%s)", - conn->worker->cp->addr, - conn->worker->s->hostname_ex); + "got response from %pI (%s:%hu)", + conn->addr, conn->hostname, conn->port); - if (conf->error_override && ap_is_HTTP_ERROR(r->status)) { + if (ap_proxy_should_override(conf, r->status)) { /* clear r->status for override error, otherwise ErrorDocument * thinks that this is a recursive error, and doesn't find the * custom error page @@ -658,9 +694,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, if (backend_failed) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00893) - "dialog to %pI (%s) failed", - conn->worker->cp->addr, - conn->worker->s->hostname_ex); + "dialog to %pI (%s:%hu) failed", + conn->addr, conn->hostname, conn->port); /* * If we already send data, signal a broken backend connection * upwards in the chain. @@ -676,7 +711,18 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, */ rv = HTTP_SERVICE_UNAVAILABLE; } else { - rv = HTTP_INTERNAL_SERVER_ERROR; + /* If we had a successful cping/cpong and then a timeout + * we assume it is a request that cause a back-end timeout, + * but doesn't affect the whole worker. + */ + if (APR_STATUS_IS_TIMEUP(status) && + conn->worker->s->ping_timeout_set) { + apr_table_setn(r->notes, "proxy_timedout", "1"); + rv = HTTP_GATEWAY_TIME_OUT; + } + else { + rv = HTTP_INTERNAL_SERVER_ERROR; + } } } else if (client_failed) { @@ -735,7 +781,7 @@ static int proxy_ajp_handler(request_rec *r, proxy_worker *worker, apr_pool_t *p = r->pool; apr_uri_t *uri; - if (strncasecmp(url, "ajp:", 4) != 0) { + if (ap_cstr_casecmpn(url, "ajp:", 4) != 0) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00894) "declining URL %s", url); return DECLINED; } @@ -794,8 +840,8 @@ static int proxy_ajp_handler(request_rec *r, proxy_worker *worker, if (status != APR_SUCCESS) { backend->close = 1; ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00897) - "cping/cpong failed to %pI (%s)", - worker->cp->addr, worker->s->hostname_ex); + "cping/cpong failed to %pI (%s:%hu)", + backend->addr, backend->hostname, backend->port); status = HTTP_SERVICE_UNAVAILABLE; retry++; continue; @@ -816,6 +862,7 @@ static void ap_proxy_http_register_hook(apr_pool_t *p) { proxy_hook_scheme_handler(proxy_ajp_handler, NULL, NULL, APR_HOOK_FIRST); proxy_hook_canon_handler(proxy_ajp_canon, NULL, NULL, APR_HOOK_FIRST); + APR_REGISTER_OPTIONAL_FN(ajp_handle_cping_cpong); } AP_DECLARE_MODULE(proxy_ajp) = { diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c index c59f5e9..b8b452d 100644 --- a/modules/proxy/mod_proxy_balancer.c +++ b/modules/proxy/mod_proxy_balancer.c @@ -75,7 +75,7 @@ static int proxy_balancer_canon(request_rec *r, char *url) apr_port_t port = 0; /* TODO: offset of BALANCER_PREFIX ?? */ - if (strncasecmp(url, "balancer:", 9) == 0) { + if (ap_cstr_casecmpn(url, "balancer:", 9) == 0) { url += 9; } else { @@ -102,13 +102,37 @@ static int proxy_balancer_canon(request_rec *r, char *url) if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } + else if (apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the encoded path already */ + search = r->args; + } else { - path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, - r->proxyreq); + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags, + r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } search = r->args; } - if (path == NULL) - return HTTP_BAD_REQUEST; + /* + * If we have a raw control character or a ' ' in nocanon path or + * r->args, correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10416) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } + if (search && *ap_scan_vchar_obstext(search)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10407) + "To be forwarded query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } r->filename = apr_pstrcat(r->pool, "proxy:" BALANCER_PREFIX, host, "/", path, (search) ? "?" : "", (search) ? search : "", NULL); @@ -346,23 +370,27 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer, proxy_worker *candidate = NULL; apr_status_t rv; +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163) "%s: Lock failed for find_best_worker()", balancer->s->name); return NULL; } +#endif candidate = (*balancer->lbmethod->finder)(balancer, r); if (candidate) candidate->s->elected++; +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164) "%s: Unlock failed for find_best_worker()", balancer->s->name); } +#endif if (candidate == NULL) { /* All the workers are in error state or disabled. @@ -376,7 +404,7 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer, /* XXX: This can perhaps be build using some * smarter mechanism, like tread_cond. * But since the statuses can came from - * different childs, use the provided algo. + * different children, use the provided algo. */ apr_interval_time_t timeout = balancer->s->timeout; apr_interval_time_t step, tval = 0; @@ -417,7 +445,7 @@ static int rewrite_url(request_rec *r, proxy_worker *worker, NULL)); } - *url = apr_pstrcat(r->pool, worker->s->name, path, NULL); + *url = apr_pstrcat(r->pool, worker->s->name_ex, path, NULL); return OK; } @@ -451,8 +479,9 @@ static void force_recovery(proxy_balancer *balancer, server_rec *s) ++(*worker)->s->retries; (*worker)->s->status &= ~PROXY_WORKER_IN_ERROR; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01165) - "%s: Forcing recovery for worker (%s)", - balancer->s->name, (*worker)->s->hostname_ex); + "%s: Forcing recovery for worker (%s:%d)", + balancer->s->name, (*worker)->s->hostname_ex, + (int)(*worker)->s->port); } } } @@ -492,11 +521,13 @@ static int proxy_balancer_pre_request(proxy_worker **worker, /* Step 2: Lock the LoadBalancer * XXX: perhaps we need the process lock here */ +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166) "%s: Lock failed for pre_request", (*balancer)->s->name); return DECLINED; } +#endif /* Step 3: force recovery */ force_recovery(*balancer, r->server); @@ -557,20 +588,24 @@ static int proxy_balancer_pre_request(proxy_worker **worker, ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167) "%s: All workers are in error state for route (%s)", (*balancer)->s->name, route); +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168) "%s: Unlock failed for pre_request", (*balancer)->s->name); } +#endif return HTTP_SERVICE_UNAVAILABLE; } } +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169) "%s: Unlock failed for pre_request", (*balancer)->s->name); } +#endif if (!*worker) { runtime = find_best_worker(*balancer, r); if (!runtime) { @@ -608,7 +643,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker, apr_table_setn(r->subprocess_env, "BALANCER_NAME", (*balancer)->s->name); apr_table_setn(r->subprocess_env, - "BALANCER_WORKER_NAME", (*worker)->s->name); + "BALANCER_WORKER_NAME", (*worker)->s->name_ex); apr_table_setn(r->subprocess_env, "BALANCER_WORKER_ROUTE", (*worker)->s->route); @@ -631,7 +666,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker, } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01172) "%s: worker (%s) rewritten to %s", - (*balancer)->s->name, (*worker)->s->name, *url); + (*balancer)->s->name, (*worker)->s->name_ex, *url); return access_status; } @@ -644,12 +679,14 @@ static int proxy_balancer_post_request(proxy_worker *worker, apr_status_t rv; +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173) "%s: Lock failed for post_request", balancer->s->name); return HTTP_INTERNAL_SERVER_ERROR; } +#endif if (!apr_is_empty_array(balancer->errstatuses) && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) { @@ -681,11 +718,12 @@ static int proxy_balancer_post_request(proxy_worker *worker, worker->s->error_time = apr_time_now(); } - +#if APR_HAS_THREADS if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175) "%s: Unlock failed for post_request", balancer->s->name); } +#endif ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176) "proxy_balancer_post_request for (%s)", balancer->s->name); @@ -784,7 +822,7 @@ static apr_status_t lock_remove(void *data) /* * First try to compute an unique ID for each vhost with minimal criteria, * that is the first Host/IP:port and ServerName. For most cases this should - * be enough and avoids changing the ID unnecessarily accross restart (or + * be enough and avoids changing the ID unnecessarily across restart (or * stop/start w.r.t. persisted files) for things that this module does not * care about. * @@ -945,7 +983,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */ balancer->max_workers = balancer->workers->nelts + balancer->growth; - /* Create global mutex */ rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type, balancer->s->sname, s, pconf, 0); @@ -955,7 +992,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, balancer->s->sname); return HTTP_INTERNAL_SERVER_ERROR; } - apr_pool_cleanup_register(pconf, (void *)s, lock_remove, apr_pool_cleanup_null); @@ -1078,6 +1114,8 @@ static void push2table(const char *input, apr_table_t *params, } ap_unescape_url(key); ap_unescape_url(val); + /* hcuri, worker name, balancer name, at least are escaped when building the form, so twice */ + ap_unescape_url(val); if (allowed == NULL) { /* allow all */ apr_table_set(params, key, val); } @@ -1095,105 +1133,32 @@ static void push2table(const char *input, apr_table_t *params, } } -/* Manages the loadfactors and member status - * The balancer, worker and nonce are obtained from - * the request args (?b=...&w=...&nonce=....). - * All other params are pulled from any POST - * data that exists. - * TODO: - * /...//balancer/worker/nonce - */ -static int balancer_handler(request_rec *r) +/* Returns non-zero if the Referer: header value passed matches the + * host of the request. */ +static int safe_referer(request_rec *r, const char *ref) { - void *sconf; - proxy_server_conf *conf; - proxy_balancer *balancer, *bsel = NULL; - proxy_worker *worker, *wsel = NULL; - proxy_worker **workers = NULL; - apr_table_t *params; - int i, n; - int ok2change = 1; - const char *name; - const char *action; - apr_status_t rv; - - /* is this for us? */ - if (strcmp(r->handler, "balancer-manager")) { - return DECLINED; - } - - r->allowed = 0 - | (AP_METHOD_BIT << M_GET) - | (AP_METHOD_BIT << M_POST); - if ((r->method_number != M_GET) && (r->method_number != M_POST)) { - return DECLINED; - } - - sconf = r->server->module_config; - conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); - params = apr_table_make(r->pool, 10); - - balancer = (proxy_balancer *)conf->balancers->elts; - for (i = 0; i < conf->balancers->nelts; i++, balancer++) { - if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189) - "%s: Lock failed for balancer_handler", - balancer->s->name); - } - ap_proxy_sync_balancer(balancer, r->server, conf); - if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190) - "%s: Unlock failed for balancer_handler", - balancer->s->name); - } - } + apr_uri_t uri; - if (r->args && (r->method_number == M_GET)) { - const char *allowed[] = { "w", "b", "nonce", "xml", NULL }; - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01191) "parsing r->args"); - - push2table(r->args, params, allowed, r->pool); - } - if (r->method_number == M_POST) { - apr_bucket_brigade *ib; - apr_size_t len = 1024; - char *buf = apr_pcalloc(r->pool, len+1); - - ib = apr_brigade_create(r->connection->pool, r->connection->bucket_alloc); - rv = ap_get_brigade(r->input_filters, ib, AP_MODE_READBYTES, - APR_BLOCK_READ, len); - if (rv != APR_SUCCESS) { - return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); - } - apr_brigade_flatten(ib, buf, &len); - buf[len] = '\0'; - push2table(buf, params, NULL, r->pool); - } - if ((name = apr_table_get(params, "b"))) - bsel = ap_proxy_get_balancer(r->pool, conf, - apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL), 0); - - if ((name = apr_table_get(params, "w"))) { - wsel = ap_proxy_get_worker(r->pool, bsel, conf, name); - } + if (apr_uri_parse(r->pool, ref, &uri) || !uri.hostname) + return 0; + return strcasecmp(uri.hostname, ap_get_server_name(r)) == 0; +} - /* Check that the supplied nonce matches this server's nonce; - * otherwise ignore all parameters, to prevent a CSRF attack. */ - if (!bsel || - (*bsel->s->nonce && - ( - (name = apr_table_get(params, "nonce")) == NULL || - strcmp(bsel->s->nonce, name) != 0 - ) - ) - ) { - apr_table_clear(params); - ok2change = 0; - } +/* + * Process the paramters and add or update the worker of the + * balancer. Must only be called if the nonce has been validated to + * match, to avoid XSS attacks. + */ +static int balancer_process_balancer_worker(request_rec *r, proxy_server_conf *conf, + proxy_balancer *bsel, + proxy_worker *wsel, + apr_table_t *params) +{ + apr_status_t rv; /* First set the params */ - if (wsel && ok2change) { + if (wsel) { const char *val; int was_usable = PROXY_WORKER_IS_USABLE(wsel); @@ -1275,7 +1240,7 @@ static int balancer_handler(request_rec *r) if ((val = apr_table_get(params, "w_hm"))) { proxy_hcmethods_t *method = proxy_hcmethods; for (; method->name; method++) { - if (!strcasecmp(method->name, val) && method->implemented) + if (!ap_cstr_casecmp(method->name, val) && method->implemented) wsel->s->method = method->method; } } @@ -1292,7 +1257,7 @@ static int balancer_handler(request_rec *r) *wsel->s->hcexpr = '\0'; } /* If the health check method doesn't support an expr, then null it */ - if (wsel->s->method == NONE || wsel->s->method == TCP) { + if (wsel->s->method == NONE || wsel->s->method == TCP || wsel->s->method == CPING) { *wsel->s->hcexpr = '\0'; } /* if enabling, we need to reset all lb params */ @@ -1302,7 +1267,7 @@ static int balancer_handler(request_rec *r) } - if (bsel && ok2change) { + if (bsel) { const char *val; int ival; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01193) @@ -1359,11 +1324,13 @@ static int balancer_handler(request_rec *r) proxy_worker *nworker; nworker = ap_proxy_get_worker(r->pool, bsel, conf, val); if (!nworker && storage->num_free_slots(bsel->wslot)) { +#if APR_HAS_THREADS if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194) "%s: Lock failed for adding worker", bsel->s->name); } +#endif ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0); if (!ret) { unsigned int index; @@ -1372,63 +1339,137 @@ static int balancer_handler(request_rec *r) if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195) "worker slotmem_grab failed"); +#if APR_HAS_THREADS if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196) "%s: Unlock failed for adding worker", bsel->s->name); } +#endif return HTTP_BAD_REQUEST; } if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197) "worker slotmem_dptr failed"); +#if APR_HAS_THREADS if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198) "%s: Unlock failed for adding worker", bsel->s->name); } +#endif return HTTP_BAD_REQUEST; } if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199) "Cannot share worker"); +#if APR_HAS_THREADS if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200) "%s: Unlock failed for adding worker", bsel->s->name); } +#endif return HTTP_BAD_REQUEST; } if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201) "Cannot init worker"); +#if APR_HAS_THREADS if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202) "%s: Unlock failed for adding worker", bsel->s->name); } +#endif return HTTP_BAD_REQUEST; } /* sync all timestamps */ bsel->wupdated = bsel->s->wupdated = nworker->s->updated = apr_time_now(); /* by default, all new workers are disabled */ ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, 1, nworker); + } else { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10163) + "%s: failed to add worker %s", + bsel->s->name, val); +#if APR_HAS_THREADS + PROXY_GLOBAL_UNLOCK(bsel); +#endif + return HTTP_BAD_REQUEST; } +#if APR_HAS_THREADS if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203) "%s: Unlock failed for adding worker", bsel->s->name); } +#endif + } else { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10164) + "%s: failed to add worker %s", + bsel->s->name, val); + return HTTP_BAD_REQUEST; } } } + return APR_SUCCESS; +} + +/* + * Process a request for balancer or worker management from another module + */ +static apr_status_t balancer_manage(request_rec *r, apr_table_t *params) +{ + void *sconf; + proxy_server_conf *conf; + proxy_balancer *bsel = NULL; + proxy_worker *wsel = NULL; + const char *name; + sconf = r->server->module_config; + conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); + + /* Process the parameters */ + if ((name = apr_table_get(params, "b"))) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage " + "balancer: %s", name); + bsel = ap_proxy_get_balancer(r->pool, conf, + apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL), 0); + } + + if ((name = apr_table_get(params, "w"))) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage " + "worker: %s", name); + wsel = ap_proxy_get_worker(r->pool, bsel, conf, name); + } + if (bsel) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage " + "balancer: %s", bsel->s->name); + return(balancer_process_balancer_worker(r, conf, bsel, wsel, params)); + } + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "balancer_manage failed: " + "No balancer!"); + return HTTP_BAD_REQUEST; +} +/* + * builds the page and links to configure via HTLM or XML. + */ +static void balancer_display_page(request_rec *r, proxy_server_conf *conf, + proxy_balancer *bsel, + proxy_worker *wsel, + int usexml) +{ + const char *action; + proxy_balancer *balancer; + proxy_worker *worker; + proxy_worker **workers; + int i, n; action = ap_construct_url(r->pool, r->uri, r); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01204) "genning page"); - if (apr_table_get(params, "xml")) { + if (usexml) { char date[APR_RFC822_DATE_LEN]; ap_set_content_type(r, "text/xml"); ap_rputs("\n", r); @@ -1440,7 +1481,7 @@ static int balancer_handler(request_rec *r) /* Start proxy_balancer */ ap_rvputs(r, " ", balancer->s->name, "\n", NULL); if (*balancer->s->sticky) { - ap_rvputs(r, " ", balancer->s->sticky, + ap_rvputs(r, " ", ap_escape_html(r->pool, balancer->s->sticky), "\n", NULL); ap_rprintf(r, " %s\n", @@ -1551,7 +1592,7 @@ static int balancer_handler(request_rec *r) ap_rprintf(r, " %d\n", worker->s->lbset); /* End proxy_worker_stat */ - if (!strcasecmp(worker->s->scheme, "ajp")) { + if (!ap_cstr_casecmp(worker->s->scheme, "ajp")) { ap_rputs(" ", r); switch (worker->s->flush_packets) { case flush_off: @@ -1650,10 +1691,10 @@ static int balancer_handler(request_rec *r) for (i = 0; i < conf->balancers->nelts; i++) { ap_rputs("
\n

LoadBalancer Status for ", r); - ap_rvputs(r, "", NULL); + "\">", NULL); ap_rvputs(r, balancer->s->name, " [",balancer->s->sname, "]

\n", NULL); ap_rputs("\n\n" "" @@ -1664,11 +1705,11 @@ static int balancer_handler(request_rec *r) balancer->max_workers - (int)storage->num_free_slots(balancer->wslot)); if (*balancer->s->sticky) { if (strcmp(balancer->s->sticky, balancer->s->sticky_path)) { - ap_rvputs(r, "\n", NULL); ap_rprintf(r, "\n", !balancer->s->inactive ? "Yes" : "No"); - ap_rputs("
MaxMembersStickySessionDisableFailoverTimeoutFailoverAttemptsMethod", balancer->s->sticky, " | ", - balancer->s->sticky_path, NULL); + ap_rvputs(r, "", ap_escape_html(r->pool, balancer->s->sticky), " | ", + ap_escape_html(r->pool, balancer->s->sticky_path), NULL); } else { - ap_rvputs(r, "", balancer->s->sticky, NULL); + ap_rvputs(r, "", ap_escape_html(r->pool, balancer->s->sticky), NULL); } } else { @@ -1688,7 +1729,7 @@ static int balancer_handler(request_rec *r) ap_rvputs(r, balancer->s->vpath, "%s
\n
", r); + ap_rputs("\n\n
", r); ap_rputs("\n\n" "" "" @@ -1703,12 +1744,12 @@ static int balancer_handler(request_rec *r) for (n = 0; n < balancer->workers->nelts; n++) { char fbuf[50]; worker = *workers; - ap_rvputs(r, "\n", NULL); ap_rvputs(r, "", apr_time_as_msec(worker->s->interval)); ap_rprintf(r, "", worker->s->passes,worker->s->pcount); ap_rprintf(r, "", worker->s->fails, worker->s->fcount); - ap_rprintf(r, "", worker->s->hcuri); + ap_rprintf(r, "", ap_escape_html(r->pool, worker->s->hcuri)); ap_rprintf(r, "\n", r); @@ -1747,20 +1788,20 @@ static int balancer_handler(request_rec *r) if (wsel && bsel) { ap_rputs("

Edit worker settings for ", r); ap_rvputs(r, (*wsel->s->uds_path?"":""), ap_proxy_worker_name(r->pool, wsel), (*wsel->s->uds_path?"":""), "

\n", NULL); - ap_rputs("\n", NULL); + ap_rputs("pool, action), "\">\n", NULL); ap_rputs("
Worker URLRouteRouteRedir
", NULL); + "\">", NULL); ap_rvputs(r, (*worker->s->uds_path ? "" : ""), ap_proxy_worker_name(r->pool, worker), (*worker->s->uds_path ? "" : ""), "", ap_escape_html(r->pool, worker->s->route), @@ -1730,7 +1771,7 @@ static int balancer_handler(request_rec *r) ap_rprintf(r, "%" APR_TIME_T_FMT "ms%d (%d)%d (%d)%s%s%s", worker->s->hcexpr); } ap_rputs("
\n", (float)(wsel->s->lbfactor)/100.0); ap_rputs("\n", wsel->s->lbset); ap_rputs("\n", r); + ap_rputs("\">\n", r); ap_rputs("\n", r); + ap_rputs("\">\n", r); ap_rputs("", r); ap_rputs("\n", r); } ap_rputs("\n", r); ap_rvputs(r, "
Load factor:
LB Set:
Route:
Route Redirect:
Status:" "" @@ -1798,22 +1839,22 @@ static int balancer_handler(request_rec *r) ap_rputs("\n", r); - ap_rprintf(r, "\n", apr_time_as_msec(wsel->s->interval)); - ap_rprintf(r, "\n", wsel->s->passes); - ap_rprintf(r, "\n", wsel->s->fails); - ap_rprintf(r, "\n", ap_escape_html(r->pool, wsel->s->hcuri)); + ap_rprintf(r, "\n", ap_escape_html(r->pool, wsel->s->hcuri)); ap_rputs("
Ignore Errors
Expr\n
Interval (ms)Interval (ms)
Passes triggerPasses trigger
Fails trigger)Fails trigger)
HC uri
HC uri
\n
\n\n", NULL); + ap_rvputs(r, "value=\"", ap_escape_uri(r->pool, wsel->s->name_ex), "\">\n", NULL); ap_rvputs(r, "\n", NULL); + ap_rvputs(r, "value=\"", ap_escape_html(r->pool, bsel->s->name + sizeof(BALANCER_PREFIX) - 1), + "\">\n", NULL); ap_rvputs(r, "\n", NULL); ap_rputs("\n", r); @@ -1823,9 +1864,9 @@ static int balancer_handler(request_rec *r) const ap_list_provider_names_t *pname; int i; ap_rputs("

Edit balancer settings for ", r); - ap_rvputs(r, bsel->s->name, "

\n", NULL); - ap_rputs("
\n", NULL); + ap_rvputs(r, ap_escape_html(r->pool, bsel->s->name), "\n", NULL); + ap_rputs("pool, action), "\">\n", NULL); ap_rputs("\n", r); provs = ap_list_provider_names(r->pool, PROXY_LBMETHOD, "0"); if (provs) { @@ -1846,15 +1887,16 @@ static int balancer_handler(request_rec *r) ap_rprintf(r, "value='%d'>\n", bsel->s->max_attempts); ap_rputs("", r); create_radio("b_sforce", bsel->s->sticky_force, r); + ap_rputs("\n", r); ap_rputs("\n", r); + ap_rputs("\">    (Use '-' to delete)\n", r); if (storage->num_free_slots(bsel->wslot) != 0) { ap_rputs("\n", r); ap_rvputs(r, "
Disable Failover:
Sticky Session:s->sticky, bsel->s->sticky_path)) { - ap_rvputs(r, "value ='", bsel->s->sticky, " | ", - bsel->s->sticky_path, NULL); + ap_rvputs(r, "value =\"", ap_escape_html(r->pool, bsel->s->sticky), " | ", + ap_escape_html(r->pool, bsel->s->sticky_path), NULL); } else { - ap_rvputs(r, "value ='", bsel->s->sticky, NULL); + ap_rvputs(r, "value =\"", ap_escape_html(r->pool, bsel->s->sticky), NULL); } - ap_rputs("'>    (Use '-' to delete)
Add New Worker:" "    Are you sure? " @@ -1862,8 +1904,8 @@ static int balancer_handler(request_rec *r) } ap_rputs("
\n\n", NULL); + ap_rvputs(r, "value=\"", ap_escape_html(r->pool, bsel->s->name + sizeof(BALANCER_PREFIX) - 1), + "\">\n", NULL); ap_rvputs(r, "\n", NULL); ap_rputs("
\n", r); @@ -1873,6 +1915,123 @@ static int balancer_handler(request_rec *r) ap_rputs("\n", r); ap_rflush(r); } +} + +/* Manages the loadfactors and member status + * The balancer, worker and nonce are obtained from + * the request args (?b=...&w=...&nonce=....). + * All other params are pulled from any POST + * data that exists. + * TODO: + * /...//balancer/worker/nonce + */ +static int balancer_handler(request_rec *r) +{ + void *sconf; + proxy_server_conf *conf; + proxy_balancer *balancer, *bsel = NULL; + proxy_worker *wsel = NULL; + apr_table_t *params; + int i; + const char *name, *ref; + apr_status_t rv; + + /* is this for us? */ + if (strcmp(r->handler, "balancer-manager")) { + return DECLINED; + } + + r->allowed = 0 + | (AP_METHOD_BIT << M_GET) + | (AP_METHOD_BIT << M_POST); + if ((r->method_number != M_GET) && (r->method_number != M_POST)) { + return DECLINED; + } + + sconf = r->server->module_config; + conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); + params = apr_table_make(r->pool, 10); + + balancer = (proxy_balancer *)conf->balancers->elts; + for (i = 0; i < conf->balancers->nelts; i++, balancer++) { +#if APR_HAS_THREADS + if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189) + "%s: Lock failed for balancer_handler", + balancer->s->name); + } +#endif + ap_proxy_sync_balancer(balancer, r->server, conf); +#if APR_HAS_THREADS + if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190) + "%s: Unlock failed for balancer_handler", + balancer->s->name); + } +#endif + } + + if (r->args && (r->method_number == M_GET)) { + const char *allowed[] = { "w", "b", "nonce", "xml", NULL }; + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01191) "parsing r->args"); + + push2table(r->args, params, allowed, r->pool); + } + if (r->method_number == M_POST) { + apr_bucket_brigade *ib; + apr_size_t len = 1024; + char *buf = apr_pcalloc(r->pool, len+1); + + ib = apr_brigade_create(r->connection->pool, r->connection->bucket_alloc); + rv = ap_get_brigade(r->input_filters, ib, AP_MODE_READBYTES, + APR_BLOCK_READ, len); + if (rv != APR_SUCCESS) { + return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); + } + apr_brigade_flatten(ib, buf, &len); + buf[len] = '\0'; + push2table(buf, params, NULL, r->pool); + } + + /* Ignore parameters if this looks like XSRF */ + ref = apr_table_get(r->headers_in, "Referer"); + if (apr_table_elts(params) + && (!ref || !safe_referer(r, ref))) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10187) + "ignoring params in balancer-manager cross-site access %s: %s", ref, ap_get_server_name(r)); + apr_table_clear(params); + } + + /* Process the parameters */ + if ((name = apr_table_get(params, "b"))) + bsel = ap_proxy_get_balancer(r->pool, conf, + apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL), 0); + + if ((name = apr_table_get(params, "w"))) { + wsel = ap_proxy_get_worker(r->pool, bsel, conf, name); + } + + + /* Check that the supplied nonce matches this server's nonce; + * otherwise ignore all parameters, to prevent a CSRF + * attack. */ + if (bsel + && (*bsel->s->nonce + && ((name = apr_table_get(params, "nonce")) != NULL + && strcmp(bsel->s->nonce, name) == 0))) { + /* Process the parameters and add the worker to the balancer */ + rv = balancer_process_balancer_worker(r, conf, bsel, wsel, params); + if (rv != APR_SUCCESS) { + return HTTP_BAD_REQUEST; + } + } + + /* display the HTML or XML page */ + if (apr_table_get(params, "xml")) { + balancer_display_page(r, conf, bsel, wsel, 1); + } else { + balancer_display_page(r, conf, bsel, wsel, 0); + } return DONE; } @@ -1905,7 +2064,7 @@ static void balancer_child_init(apr_pool_t *p, server_rec *s) balancer->s->name); exit(1); /* Ugly, but what else? */ } - init_balancer_members(conf->pool, s, balancer); + init_balancer_members(p, s, balancer); } s = s->next; } @@ -1921,6 +2080,7 @@ static void ap_proxy_balancer_register_hook(apr_pool_t *p) static const char *const aszPred[] = { "mpm_winnt.c", "mod_slotmem_shm.c", NULL}; static const char *const aszPred2[] = { "mod_proxy.c", NULL}; /* manager handler */ + APR_REGISTER_OPTIONAL_FN(balancer_manage); ap_hook_post_config(balancer_post_config, aszPred2, NULL, APR_HOOK_MIDDLE); ap_hook_pre_config(balancer_pre_config, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_handler(balancer_handler, NULL, NULL, APR_HOOK_FIRST); diff --git a/modules/proxy/mod_proxy_connect.c b/modules/proxy/mod_proxy_connect.c index 7be6a6a..5a68135 100644 --- a/modules/proxy/mod_proxy_connect.c +++ b/modules/proxy/mod_proxy_connect.c @@ -39,7 +39,7 @@ module AP_MODULE_DECLARE_DATA proxy_connect_module; * that may be okay, since the data is supposed to * be transparent. In fact, this doesn't log at all * yet. 8^) - * FIXME: doesn't check any headers initally sent from the + * FIXME: doesn't check any headers initially sent from the * client. * FIXME: should allow authentication, but hopefully the * generic proxy authentication is good enough. @@ -156,25 +156,21 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, apr_socket_t *sock; conn_rec *c = r->connection; conn_rec *backconn; - int done = 0; - apr_bucket_brigade *bb_front; - apr_bucket_brigade *bb_back; apr_status_t rv; apr_size_t nbytes; char buffer[HUGE_STRING_LEN]; - apr_socket_t *client_socket = ap_get_conn_socket(c); + + apr_bucket_brigade *bb; + proxy_tunnel_rec *tunnel; int failed, rc; - apr_pollset_t *pollset; - apr_pollfd_t pollfd; - const apr_pollfd_t *signalled; - apr_int32_t pollcnt, pi; - apr_int16_t pollevent; - apr_sockaddr_t *nexthop; apr_uri_t uri; const char *connectname; apr_port_t connectport = 0; + apr_sockaddr_t *nexthop; + + apr_interval_time_t current_timeout; /* is this for us? */ if (r->method_number != M_CONNECT) { @@ -261,28 +257,6 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, } } - /* setup polling for connection */ - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()"); - - if ((rv = apr_pollset_create(&pollset, 2, r->pool, 0)) != APR_SUCCESS) { - apr_socket_close(sock); - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01020) - "error apr_pollset_create()"); - return HTTP_INTERNAL_SERVER_ERROR; - } - - /* Add client side to the poll */ - pollfd.p = r->pool; - pollfd.desc_type = APR_POLL_SOCKET; - pollfd.reqevents = APR_POLLIN | APR_POLLHUP; - pollfd.desc.s = client_socket; - pollfd.client_data = NULL; - apr_pollset_add(pollset, &pollfd); - - /* Add the server side to the poll */ - pollfd.desc.s = sock; - apr_pollset_add(pollset, &pollfd); - /* * Step Three: Send the Request * @@ -300,13 +274,22 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, return HTTP_INTERNAL_SERVER_ERROR; } ap_proxy_ssl_engine(backconn, r->per_dir_config, 0); + + /* + * save the timeout of the socket because core_pre_connection + * will set it to base_server->timeout + * (core TimeOut directive). + */ + apr_socket_timeout_get(sock, ¤t_timeout); rc = ap_run_pre_connection(backconn, sock); if (rc != OK && rc != DONE) { backconn->aborted = 1; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01022) "pre_connection setup failed (%d)", rc); + apr_socket_close(sock); return HTTP_INTERNAL_SERVER_ERROR; } + apr_socket_timeout_set(sock, current_timeout); ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "connection complete to %pI (%s)", @@ -314,9 +297,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu", backconn->local_addr->port)); - - bb_front = apr_brigade_create(p, c->bucket_alloc); - bb_back = apr_brigade_create(p, backconn->bucket_alloc); + bb = apr_brigade_create(p, c->bucket_alloc); /* If we are connecting through a remote proxy, we need to pass * the CONNECT request on to it. @@ -326,24 +307,24 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, */ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "sending the CONNECT request to the remote proxy"); - ap_fprintf(backconn->output_filters, bb_back, + ap_fprintf(backconn->output_filters, bb, "CONNECT %s HTTP/1.0" CRLF, r->uri); - ap_fprintf(backconn->output_filters, bb_back, + ap_fprintf(backconn->output_filters, bb, "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner()); - ap_fflush(backconn->output_filters, bb_back); + ap_fflush(backconn->output_filters, bb); } else { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Returning 200 OK"); nbytes = apr_snprintf(buffer, sizeof(buffer), "HTTP/1.0 200 Connection Established" CRLF); ap_xlate_proto_to_ascii(buffer, nbytes); - ap_fwrite(c->output_filters, bb_front, buffer, nbytes); + ap_fwrite(c->output_filters, bb, buffer, nbytes); nbytes = apr_snprintf(buffer, sizeof(buffer), "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner()); ap_xlate_proto_to_ascii(buffer, nbytes); - ap_fwrite(c->output_filters, bb_front, buffer, nbytes); - ap_fflush(c->output_filters, bb_front); + ap_fwrite(c->output_filters, bb, buffer, nbytes); + ap_fflush(c->output_filters, bb); #if 0 /* This is safer code, but it doesn't work yet. I'm leaving it * here so that I can fix it later. @@ -354,8 +335,7 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, ap_rflush(r); #endif } - - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()"); + apr_brigade_cleanup(bb); /* * Step Four: Handle Data Transfer @@ -363,88 +343,28 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, * Handle two way transfer of data over the socket (this is a tunnel). */ - /* we are now acting as a tunnel - the input/output filter stacks should - * not contain any non-connection filters. - */ - r->output_filters = c->output_filters; - r->proto_output_filters = c->output_filters; - r->input_filters = c->input_filters; - r->proto_input_filters = c->input_filters; -/* r->sent_bodyct = 1;*/ - - do { /* Loop until done (one side closes the connection, or an error) */ - rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled); - if (rv != APR_SUCCESS) { - if (APR_STATUS_IS_EINTR(rv)) { - continue; - } - apr_socket_close(sock); - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01023) "error apr_poll()"); - return HTTP_INTERNAL_SERVER_ERROR; - } -#ifdef DEBUGGING - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01024) - "woke from poll(), i=%d", pollcnt); -#endif - - for (pi = 0; pi < pollcnt; pi++) { - const apr_pollfd_t *cur = &signalled[pi]; + /* r->sent_bodyct = 1; */ - if (cur->desc.s == sock) { - pollevent = cur->rtnevents; - if (pollevent & (APR_POLLIN | APR_POLLHUP)) { -#ifdef DEBUGGING - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01025) - "sock was readable"); -#endif - done |= ap_proxy_transfer_between_connections(r, backconn, - c, bb_back, - bb_front, - "sock", NULL, - CONN_BLKSZ, 1) - != APR_SUCCESS; - } - else if (pollevent & APR_POLLERR) { - ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(01026) - "err on backconn"); - backconn->aborted = 1; - done = 1; - } - } - else if (cur->desc.s == client_socket) { - pollevent = cur->rtnevents; - if (pollevent & (APR_POLLIN | APR_POLLHUP)) { -#ifdef DEBUGGING - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01027) - "client was readable"); -#endif - done |= ap_proxy_transfer_between_connections(r, c, - backconn, - bb_front, - bb_back, - "client", - NULL, - CONN_BLKSZ, 1) - != APR_SUCCESS; - } - else if (pollevent & APR_POLLERR) { - ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02827) - "err on client"); - c->aborted = 1; - done = 1; - } - } - else { - ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01028) - "unknown socket in pollset"); - done = 1; - } + rv = ap_proxy_tunnel_create(&tunnel, r, backconn, "CONNECT"); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10208) + "can't create tunnel for %pI (%s)", + nexthop, connectname); + return HTTP_INTERNAL_SERVER_ERROR; + } + rc = ap_proxy_tunnel_run(tunnel); + if (ap_is_HTTP_ERROR(rc)) { + if (rc == HTTP_GATEWAY_TIME_OUT) { + /* ap_proxy_tunnel_run() didn't log this */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10224) + "tunnel timed out"); } - } while (!done); - - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "finished with poll() - cleaning up"); + /* Don't send an error page if we sent data already */ + if (proxyport && !tunnel->replied) { + return rc; + } + } /* * Step Five: Clean Up @@ -457,8 +377,6 @@ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, else ap_lingering_close(backconn); - c->keepalive = AP_CONN_CLOSE; - return OK; } diff --git a/modules/proxy/mod_proxy_express.c b/modules/proxy/mod_proxy_express.c index 0f5d604..5d458c4 100644 --- a/modules/proxy/mod_proxy_express.c +++ b/modules/proxy/mod_proxy_express.c @@ -19,6 +19,11 @@ module AP_MODULE_DECLARE_DATA proxy_express_module; +#include "apr_version.h" +#if !APR_VERSION_AT_LEAST(2,0,0) +#include "apu_version.h" +#endif + static int proxy_available = 0; typedef struct { @@ -115,6 +120,10 @@ static int xlate_name(request_rec *r) struct proxy_alias *ralias; proxy_dir_conf *dconf; express_server_conf *sconf; +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + const apr_dbm_driver_t *driver; + const apu_err_t *err; +#endif sconf = ap_get_module_config(r->server->module_config, &proxy_express_module); dconf = ap_get_module_config(r->per_dir_config, &proxy_module); @@ -132,11 +141,31 @@ static int xlate_name(request_rec *r) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01002) "proxy_express: Opening DBM file: %s (%s)", sconf->dbmfile, sconf->dbmtype); + +#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 7) + rv = apr_dbm_get_driver(&driver, sconf->dbmtype, &err, r->pool); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, + APLOGNO(10275) "The dbm library '%s' could not be loaded: %s (%s: %d)", + sconf->dbmtype, err->msg, err->reason, err->rc); + return DECLINED; + } + + rv = apr_dbm_open2(&db, driver, sconf->dbmfile, APR_DBM_READONLY, + APR_OS_DEFAULT, r->pool); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, + APLOGNO(10276) "The '%s' file '%s' could not be loaded", + sconf->dbmtype, sconf->dbmfile); + return DECLINED; + } +#else rv = apr_dbm_open_ex(&db, sconf->dbmtype, sconf->dbmfile, APR_DBM_READONLY, APR_OS_DEFAULT, r->pool); if (rv != APR_SUCCESS) { return DECLINED; } +#endif name = ap_get_server_name(r); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01003) diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c index 2e97408..d420df6 100644 --- a/modules/proxy/mod_proxy_fcgi.c +++ b/modules/proxy/mod_proxy_fcgi.c @@ -92,15 +92,30 @@ static int proxy_fcgi_canon(request_rec *r, char *url) host = apr_pstrcat(r->pool, "[", host, "]", NULL); } - if (apr_table_get(r->notes, "proxy-nocanon")) { - path = url; /* this is the raw path */ + if (apr_table_get(r->notes, "proxy-nocanon") + || apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the raw/encoded path */ } else { - path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, - r->proxyreq); + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags, + r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } + } + /* + * If we have a raw control character or a ' ' in nocanon path, + * correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10414) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; } - if (path == NULL) - return HTTP_BAD_REQUEST; r->filename = apr_pstrcat(r->pool, "proxy:fcgi://", host, sport, "/", path, NULL); @@ -164,7 +179,7 @@ static int proxy_fcgi_canon(request_rec *r, char *url) ProxyFCGISetEnvIf "reqenv('PATH_INFO') =~ m#/foo(\d+)\.php$#" PATH_INFO "/foo.php" ProxyFCGISetEnvIf "reqenv('PATH_TRANSLATED') =~ m#(/.*foo)(\d+)(.*)#" PATH_TRANSLATED "$1$3" */ -static void fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf) +static apr_status_t fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf) { sei_entry *entries; const char *err, *src; @@ -175,10 +190,21 @@ static void fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf) for (i = 0; i < dconf->env_fixups->nelts; i++) { sei_entry *entry = &entries[i]; + rc = ap_expr_exec_re(r, entry->cond, AP_MAX_REG_MATCH, regm, &src, &err); + if (rc < 0) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10241) + "fix_cgivars: Condition eval returned %d: %s", + rc, err); + return APR_EGENERAL; + } + else if (rc == 0) { + continue; /* evaluated false */ + } + if (entry->envname[0] == '!') { apr_table_unset(r->subprocess_env, entry->envname+1); } - else if (0 < (rc = ap_expr_exec_re(r, entry->cond, AP_MAX_REG_MATCH, regm, &src, &err))) { + else { const char *val = ap_expr_str_exec_re(r, entry->subst, AP_MAX_REG_MATCH, regm, &src, &err); if (err) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(03514) @@ -195,10 +221,8 @@ static void fix_cgivars(request_rec *r, fcgi_dirconf_t *dconf) } apr_table_setn(r->subprocess_env, entry->envname, val); } - else { - ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, "fix_cgivars: Condition returned %d", rc); - } } + return APR_SUCCESS; } /* Wrapper for apr_socket_sendv that handles updating the worker stats. */ @@ -367,7 +391,9 @@ static apr_status_t send_environment(proxy_conn_rec *conn, request_rec *r, /* XXX are there any FastCGI specific env vars we need to send? */ /* Give admins final option to fine-tune env vars */ - fix_cgivars(r, dconf); + if (APR_SUCCESS != (rv = fix_cgivars(r, dconf))) { + return rv; + } /* XXX mod_cgi/mod_cgid use ap_create_environment here, which fills in * the TZ value specially. We could use that, but it would mean @@ -521,7 +547,8 @@ static int handle_headers(request_rec *r, int *state, static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf, request_rec *r, apr_pool_t *setaside_pool, apr_uint16_t request_id, const char **err, - int *bad_request, int *has_responded) + int *bad_request, int *has_responded, + apr_bucket_brigade *input_brigade) { apr_bucket_brigade *ib, *ob; int seen_end_of_headers = 0, done = 0, ignore_body = 0; @@ -583,9 +610,26 @@ static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf, int last_stdin = 0; char *iobuf_cursor; - rv = ap_get_brigade(r->input_filters, ib, - AP_MODE_READBYTES, APR_BLOCK_READ, - iobuf_size); + if (APR_BRIGADE_EMPTY(input_brigade)) { + rv = ap_get_brigade(r->input_filters, ib, + AP_MODE_READBYTES, APR_BLOCK_READ, + iobuf_size); + } + else { + apr_bucket *e; + APR_BRIGADE_CONCAT(ib, input_brigade); + rv = apr_brigade_partition(ib, iobuf_size, &e); + if (rv == APR_SUCCESS) { + while (e != APR_BRIGADE_SENTINEL(ib) + && APR_BUCKET_IS_METADATA(e)) { + e = APR_BUCKET_NEXT(e); + } + apr_brigade_split_ex(ib, e, input_brigade); + } + else if (rv == APR_INCOMPLETE) { + rv = APR_SUCCESS; + } + } if (rv != APR_SUCCESS) { *err = "reading input brigade"; *bad_request = 1; @@ -735,6 +779,15 @@ recv_again: status = ap_scan_script_header_err_brigade_ex(r, ob, NULL, APLOG_MODULE_INDEX); + + /* FCGI has its own body framing mechanism which we don't + * match against any provided Content-Length, so let the + * core determine C-L vs T-E based on what's actually sent. + */ + if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR)) + apr_table_unset(r->headers_out, "Content-Length"); + apr_table_unset(r->headers_out, "Transfer-Encoding"); + /* suck in all the rest */ if (status != OK) { apr_bucket *tmp_b; @@ -771,8 +824,7 @@ recv_again: } } - if (conf->error_override - && ap_is_HTTP_ERROR(r->status) && ap_is_initial_req(r)) { + if (ap_proxy_should_override(conf, r->status) && ap_is_initial_req(r)) { /* * set script_error_status to discard * everything after the headers @@ -924,7 +976,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, conn_rec *origin, proxy_dir_conf *conf, apr_uri_t *uri, - char *url, char *server_portstr) + char *url, char *server_portstr, + apr_bucket_brigade *input_brigade) { /* Request IDs are arbitrary numbers that we assign to a * single request. This would allow multiplex/pipelining of @@ -948,6 +1001,7 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, } apr_pool_create(&temp_pool, r->pool); + apr_pool_tag(temp_pool, "proxy_fcgi_do_request"); /* Step 2: Send Environment via FCGI_PARAMS */ rv = send_environment(conn, r, temp_pool, request_id); @@ -960,7 +1014,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, /* Step 3: Read records from the back end server and handle them. */ rv = dispatch(conn, conf, r, temp_pool, request_id, - &err, &bad_request, &has_responded); + &err, &bad_request, &has_responded, + input_brigade); if (rv != APR_SUCCESS) { /* If the client aborted the connection during retrieval or (partially) * sending the response, don't return a HTTP_SERVICE_UNAVAILABLE, since @@ -996,6 +1051,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, #define FCGI_SCHEME "FCGI" +#define MAX_MEM_SPOOL 16384 + /* * This handles fcgi:(dest) URLs */ @@ -1008,6 +1065,8 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, char server_portstr[32]; conn_rec *origin = NULL; proxy_conn_rec *backend = NULL; + apr_bucket_brigade *input_brigade; + apr_off_t input_bytes = 0; apr_uri_t *uri; proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, @@ -1050,6 +1109,101 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, goto cleanup; } + /* We possibly reuse input data prefetched in previous call(s), e.g. for a + * balancer fallback scenario. + */ + apr_pool_userdata_get((void **)&input_brigade, "proxy-fcgi-input", p); + if (input_brigade == NULL) { + const char *old_te = apr_table_get(r->headers_in, "Transfer-Encoding"); + const char *old_cl = NULL; + if (old_te) { + apr_table_unset(r->headers_in, "Content-Length"); + } + else { + old_cl = apr_table_get(r->headers_in, "Content-Length"); + } + + input_brigade = apr_brigade_create(p, r->connection->bucket_alloc); + apr_pool_userdata_setn(input_brigade, "proxy-fcgi-input", NULL, p); + + /* Prefetch (nonlocking) the request body so to increase the chance + * to get the whole (or enough) body and determine Content-Length vs + * chunked or spooled. By doing this before connecting or reusing the + * backend, we want to minimize the delay between this connection is + * considered alive and the first bytes sent (should the client's link + * be slow or some input filter retain the data). This is a best effort + * to prevent the backend from closing (from under us) what it thinks is + * an idle connection, hence to reduce to the minimum the unavoidable + * local is_socket_connected() vs remote keepalive race condition. + */ + status = ap_proxy_prefetch_input(r, backend, input_brigade, + APR_NONBLOCK_READ, &input_bytes, + MAX_MEM_SPOOL); + if (status != OK) { + goto cleanup; + } + + /* + * The request body is streamed by default, using either C-L or + * chunked T-E, like this: + * + * The whole body (including no body) was received on prefetch, i.e. + * the input brigade ends with EOS => C-L = input_bytes. + * + * C-L is known and reliable, i.e. only protocol filters in the input + * chain thus none should change the body => use C-L from client. + * + * The administrator has not "proxy-sendcl" which prevents T-E => use + * T-E and chunks. + * + * Otherwise we need to determine and set a content-length, so spool + * the entire request body to memory/temporary file (MAX_MEM_SPOOL), + * such that we finally know its length => C-L = input_bytes. + */ + if (!APR_BRIGADE_EMPTY(input_brigade) + && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + /* The whole thing fit, so our decision is trivial, use the input + * bytes for the Content-Length. If we expected no body, and read + * no body, do not set the Content-Length. + */ + if (old_cl || old_te || input_bytes) { + apr_table_setn(r->headers_in, "Content-Length", + apr_off_t_toa(p, input_bytes)); + if (old_te) { + apr_table_unset(r->headers_in, "Transfer-Encoding"); + } + } + } + else if (old_cl && r->input_filters == r->proto_input_filters) { + /* Streaming is possible by preserving the existing C-L */ + } + else if (!apr_table_get(r->subprocess_env, "proxy-sendcl")) { + /* Streaming is possible using T-E: chunked */ + } + else { + /* No streaming, C-L is the only option so spool to memory/file */ + apr_bucket_brigade *tmp_bb; + apr_off_t remaining_bytes = 0; + + AP_DEBUG_ASSERT(MAX_MEM_SPOOL >= input_bytes); + tmp_bb = apr_brigade_create(p, r->connection->bucket_alloc); + status = ap_proxy_spool_input(r, backend, tmp_bb, &remaining_bytes, + MAX_MEM_SPOOL - input_bytes); + if (status != OK) { + goto cleanup; + } + + APR_BRIGADE_CONCAT(input_brigade, tmp_bb); + input_bytes += remaining_bytes; + + apr_table_setn(r->headers_in, "Content-Length", + apr_off_t_toa(p, input_bytes)); + if (old_te) { + apr_table_unset(r->headers_in, "Transfer-Encoding"); + } + } + } + /* This scheme handler does not reuse connections by default, to * avoid tying up a fastcgi that isn't expecting to work on * parallel requests. But if the user went out of their way to @@ -1074,7 +1228,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, /* Step Three: Process the Request */ status = fcgi_do_request(p, r, backend, origin, dconf, uri, url, - server_portstr); + server_portstr, input_brigade); cleanup: ap_proxy_release_connection(FCGI_SCHEME, backend, r->server); diff --git a/modules/proxy/mod_proxy_fdpass.c b/modules/proxy/mod_proxy_fdpass.c index 195b0fd..8f9893d 100644 --- a/modules/proxy/mod_proxy_fdpass.c +++ b/modules/proxy/mod_proxy_fdpass.c @@ -32,7 +32,7 @@ static int proxy_fdpass_canon(request_rec *r, char *url) { const char *path; - if (strncasecmp(url, "fd://", 5) == 0) { + if (ap_cstr_casecmpn(url, "fd://", 5) == 0) { url += 5; } else { @@ -129,7 +129,7 @@ static int proxy_fdpass_handler(request_rec *r, proxy_worker *worker, apr_socket_t *sock; apr_socket_t *clientsock; - if (strncasecmp(url, "fd://", 5) == 0) { + if (ap_cstr_casecmpn(url, "fd://", 5) == 0) { url += 5; } else { diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c index 4a10987..e0032e5 100644 --- a/modules/proxy/mod_proxy_ftp.c +++ b/modules/proxy/mod_proxy_ftp.c @@ -23,11 +23,6 @@ #endif #include "apr_version.h" -#if (APR_MAJOR_VERSION < 1) -#undef apr_socket_create -#define apr_socket_create apr_socket_create_ex -#endif - #define AUTODETECT_PWD /* Automatic timestamping (Last-Modified header) based on MDTM is used if: * 1) the FTP server supports the MDTM command and @@ -218,7 +213,7 @@ static int ftp_check_string(const char *x) * (EBCDIC) machines either. */ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, - char *buff, apr_size_t bufflen, int *eos) + char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen) { apr_bucket *e; apr_status_t rv; @@ -230,6 +225,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, /* start with an empty string */ buff[0] = 0; *eos = 0; + *outlen = 0; /* loop through each brigade */ while (!found) { @@ -273,6 +269,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, if (len > 0) { memcpy(pos, response, len); pos += len; + *outlen += len; } } apr_bucket_delete(e); @@ -292,9 +289,11 @@ static int proxy_ftp_canon(request_rec *r, char *url) apr_pool_t *p = r->pool; const char *err; apr_port_t port, def_port; + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; /* */ - if (strncasecmp(url, "ftp:", 4) == 0) { + if (ap_cstr_casecmpn(url, "ftp:", 4) == 0) { url += 4; } else { @@ -330,7 +329,8 @@ static int proxy_ftp_canon(request_rec *r, char *url) else parms = ""; - path = ap_proxy_canonenc(p, url, strlen(url), enc_path, 0, r->proxyreq); + path = ap_proxy_canonenc_ex(p, url, strlen(url), enc_path, flags, + r->proxyreq); if (path == NULL) return HTTP_BAD_REQUEST; if (!ftp_check_string(path)) @@ -385,28 +385,36 @@ static int ftp_getrc_msg(conn_rec *ftp_ctrl, apr_bucket_brigade *bb, char *msgbu char buff[5]; char *mb = msgbuf, *me = &msgbuf[msglen]; apr_status_t rv; + apr_size_t nread; + int eos; - if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { + if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { return -1; } /* ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233) "<%s", response); */ + if (nread < 4) { + ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response); + *mb = '\0'; + return -1; + } + if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) || - !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) + !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) status = 0; else status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0'; mb = apr_cpystrn(mb, response + 4, me - mb); - if (response[3] == '-') { + if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */ memcpy(buff, response, 3); buff[3] = ' '; do { - if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { + if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { return -1; } mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb); @@ -494,7 +502,7 @@ static apr_status_t proxy_send_dir_filter(ap_filter_t *f, path = apr_uri_unparse(p, &f->r->parsed_uri, APR_URI_UNP_OMITSITEPART | APR_URI_UNP_OMITQUERY); /* If path began with /%2f, change the basedir */ - if (strncasecmp(path, "/%2f", 4) == 0) { + if (ap_cstr_casecmpn(path, "/%2f", 4) == 0) { basedir = "/%2f"; } @@ -813,17 +821,19 @@ proxy_ftp_command(const char *cmd, request_rec *r, conn_rec *ftp_ctrl, APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(c->bucket_alloc)); ap_pass_brigade(ftp_ctrl->output_filters, bb); - /* strip off the CRLF for logging */ - apr_cpystrn(message, cmd, sizeof(message)); - if ((crlf = strchr(message, '\r')) != NULL || - (crlf = strchr(message, '\n')) != NULL) - *crlf = '\0'; - if (strncmp(message,"PASS ", 5) == 0) - strcpy(&message[5], "****"); - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, ">%s", message); + if (APLOGrtrace2(r)) { + /* strip off the CRLF for logging */ + apr_cpystrn(message, cmd, sizeof(message)); + if ((crlf = strchr(message, '\r')) != NULL || + (crlf = strchr(message, '\n')) != NULL) + *crlf = '\0'; + if (strncmp(message,"PASS ", 5) == 0) + strcpy(&message[5], "****"); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, ">%s", message); + } } - rc = ftp_getrc_msg(ftp_ctrl, bb, message, sizeof message); + rc = ftp_getrc_msg(ftp_ctrl, bb, message, sizeof(message)); if (rc == -1 || rc == 421) strcpy(message,""); if ((crlf = strchr(message, '\r')) != NULL || @@ -909,7 +919,7 @@ static char *ftp_get_PWD(request_rec *r, conn_rec *ftp_ctrl, apr_bucket_brigade * with username and password (which was presumably queried from the user) * supplied in the Authorization: header. * Note that we "invent" a realm name which consists of the - * ftp://user@host part of the reqest (sans password -if supplied but invalid-) + * ftp://user@host part of the request (sans password -if supplied but invalid-) */ static int ftp_unauthorized(request_rec *r, int log_it) { @@ -965,12 +975,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, conn_rec *c = r->connection; proxy_conn_rec *backend; apr_socket_t *sock, *local_sock, *data_sock = NULL; - apr_sockaddr_t *connect_addr = NULL; - apr_status_t rv; conn_rec *origin, *data = NULL; apr_status_t err = APR_SUCCESS; - apr_status_t uerr = APR_SUCCESS; - apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc); + apr_bucket_brigade *bb; char *buf, *connectname; apr_port_t connectport; char *ftpmessage = NULL; @@ -993,8 +1000,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, /* stuff for PASV mode */ int connect = 0, use_port = 0; char dates[APR_RFC822_DATE_LEN]; + apr_status_t rv; int status; - apr_pool_t *address_pool; /* is this for us? */ if (proxyhost) { @@ -1003,7 +1010,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, proxyhost); return DECLINED; /* proxy connections are via HTTP */ } - if (strncasecmp(url, "ftp:", 4)) { + if (ap_cstr_casecmpn(url, "ftp:", 4)) { ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "declining URL %s - not ftp:", url); return DECLINED; /* only interested in FTP */ @@ -1024,8 +1031,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, /* We break the URL into host, port, path-search */ if (r->parsed_uri.hostname == NULL) { if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) { - return ap_proxyerror(r, HTTP_BAD_REQUEST, - apr_psprintf(p, "URI cannot be parsed: %s", url)); + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10189) + "URI cannot be parsed: %s", url); + return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed"); } connectname = uri.hostname; connectport = uri.port; @@ -1074,7 +1082,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, * still smaller that the URL is logged regularly. */ if ((password = apr_table_get(r->headers_in, "Authorization")) != NULL - && strcasecmp(ap_getword(r->pool, &password, ' '), "Basic") == 0 + && ap_cstr_casecmp(ap_getword(r->pool, &password, ' '), "Basic") == 0 && (password = ap_pbase64decode(r->pool, password))[0] != ':') { /* Check the decoded string for special characters. */ if (!ftp_check_string(password)) { @@ -1107,61 +1115,35 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01036) "connecting %s to %s:%d", url, connectname, connectport); - if (worker->s->is_address_reusable) { - if (!worker->cp->addr) { - if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock"); - return HTTP_INTERNAL_SERVER_ERROR; + /* create space for state information */ + backend = ap_get_module_config(c->conn_config, &proxy_ftp_module); + if (!backend) { + status = ap_proxy_acquire_connection("FTP", &backend, worker, r->server); + if (status != OK) { + if (backend) { + backend->close = 1; + ap_proxy_release_connection("FTP", backend, r->server); } + return status; } - connect_addr = worker->cp->addr; - address_pool = worker->cp->pool; + ap_set_module_config(c->conn_config, &proxy_ftp_module, backend); } - else - address_pool = r->pool; - /* do a DNS lookup for the destination host */ - if (!connect_addr) - err = apr_sockaddr_info_get(&(connect_addr), - connectname, APR_UNSPEC, - connectport, 0, - address_pool); - if (worker->s->is_address_reusable && !worker->cp->addr) { - worker->cp->addr = connect_addr; - if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock"); - } - } /* * get all the possible IP addresses for the destname and loop through * them until we get a successful connection */ + err = ap_proxy_determine_address("FTP", backend, connectname, connectport, + 0, r, r->server); if (APR_SUCCESS != err) { - return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p, - "DNS lookup failure for: ", - connectname, NULL)); + return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY, + "Error resolving backend address"); } /* check if ProxyBlock directive on this host */ - if (OK != ap_proxy_checkproxyblock2(r, conf, connectname, connect_addr)) { - return ap_proxyerror(r, HTTP_FORBIDDEN, - "Connect to remote machine blocked"); - } - - /* create space for state information */ - backend = (proxy_conn_rec *) ap_get_module_config(c->conn_config, &proxy_ftp_module); - if (!backend) { - status = ap_proxy_acquire_connection("FTP", &backend, worker, r->server); - if (status != OK) { - if (backend) { - backend->close = 1; - ap_proxy_release_connection("FTP", backend, r->server); - } - return status; - } - /* TODO: see if ftp could use determine_connection */ - backend->addr = connect_addr; - ap_set_module_config(c->conn_config, &proxy_ftp_module, backend); + if (OK != ap_proxy_checkproxyblock2(r, conf, connectname, backend->addr)) { + return ftp_proxyerror(r, backend, HTTP_FORBIDDEN, + "Connect to remote machine blocked"); } @@ -1171,21 +1153,15 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, * We have determined who to connect to. Now make the connection. */ - if (ap_proxy_connect_backend("FTP", backend, worker, r->server)) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01039) - "an error occurred creating a new connection to %pI (%s)", - connect_addr, connectname); proxy_ftp_cleanup(r, backend); return HTTP_SERVICE_UNAVAILABLE; } - if (!backend->connection) { - status = ap_proxy_connection_create_ex("FTP", backend, r); - if (status != OK) { - proxy_ftp_cleanup(r, backend); - return status; - } + status = ap_proxy_connection_create_ex("FTP", backend, r); + if (status != OK) { + proxy_ftp_cleanup(r, backend); + return status; } /* Use old naming */ @@ -1203,6 +1179,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, * correct directory... */ + bb = apr_brigade_create(p, c->bucket_alloc); /* possible results: */ /* 120 Service ready in nnn minutes. */ @@ -1306,7 +1283,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, /* Special handling for leading "%2f": this enforces a "cwd /" * out of the $HOME directory which was the starting point after login */ - if (strncasecmp(path, "%2f", 3) == 0) { + if (ap_cstr_casecmpn(path, "%2f", 3) == 0) { path += 3; while (*path == '/') /* skip leading '/' (after root %2f) */ ++path; @@ -1520,7 +1497,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, "PASV contacting host %d.%d.%d.%d:%d", h3, h2, h1, h0, pasvport); - if ((rv = apr_socket_create(&data_sock, connect_addr->family, SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) { + if ((rv = apr_socket_create(&data_sock, backend->addr->family, + SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01045) "error creating PASV socket"); proxy_ftp_cleanup(r, backend); @@ -1542,7 +1520,14 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, } /* make the connection */ - apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d", h3, h2, h1, h0), connect_addr->family, pasvport, 0, p); + err = apr_sockaddr_info_get(&pasv_addr, apr_psprintf(p, "%d.%d.%d.%d", + h3, h2, h1, h0), + backend->addr->family, pasvport, 0, p); + if (APR_SUCCESS != err) { + return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY, + apr_pstrcat(p, "DNS lookup failure for: ", + connectname, NULL)); + } rv = apr_socket_connect(data_sock, pasv_addr); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01048) @@ -1565,7 +1550,8 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, apr_port_t local_port; unsigned int h0, h1, h2, h3, p0, p1; - if ((rv = apr_socket_create(&local_sock, connect_addr->family, SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) { + if ((rv = apr_socket_create(&local_sock, backend->addr->family, + SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01049) "error creating local socket"); proxy_ftp_cleanup(r, backend); @@ -1585,7 +1571,12 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, #endif /* _OSD_POSIX */ } - apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool); + err = apr_sockaddr_info_get(&local_addr, local_ip, APR_UNSPEC, local_port, 0, r->pool); + if (APR_SUCCESS != err) { + return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY, + apr_pstrcat(p, "DNS lookup failure for: ", + connectname, NULL)); + } if ((rv = apr_socket_bind(local_sock, local_addr)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01051) diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c index 2783a58..70f1de8 100644 --- a/modules/proxy/mod_proxy_hcheck.c +++ b/modules/proxy/mod_proxy_hcheck.c @@ -20,6 +20,7 @@ #if APR_HAS_THREADS #include "apr_thread_pool.h" #endif +#include "http_ssl.h" module AP_MODULE_DECLARE_DATA proxy_hcheck_module; @@ -33,7 +34,6 @@ module AP_MODULE_DECLARE_DATA proxy_hcheck_module; #endif #else #define HC_USE_THREADS 0 -typedef void apr_thread_pool_t; #endif typedef struct { @@ -65,6 +65,7 @@ typedef struct { const char *method; /* Method string for the HTTP/AJP request */ const char *req; /* pre-formatted HTTP/AJP request */ proxy_worker *w; /* Pointer to the actual worker */ + const char *protocol; /* HTTP 1.0 or 1.1? */ } wctx_t; typedef struct { @@ -73,9 +74,11 @@ typedef struct { proxy_balancer *balancer; proxy_worker *worker; proxy_worker *hc; - apr_time_t now; + apr_time_t *now; } baton_t; +static APR_OPTIONAL_FN_TYPE(ajp_handle_cping_cpong) *ajp_handle_cping_cpong = NULL; + static void *hc_create_config(apr_pool_t *p, server_rec *s) { sctx_t *ctx = apr_pcalloc(p, sizeof(sctx_t)); @@ -89,7 +92,10 @@ static void *hc_create_config(apr_pool_t *p, server_rec *s) } static ap_watchdog_t *watchdog; -static int tpsize = HC_THREADPOOL_SIZE; +#if HC_USE_THREADS +static apr_thread_pool_t *hctp; +static int tpsize; +#endif /* * This serves double duty by not only validating (and creating) @@ -110,6 +116,10 @@ static const char *set_worker_hc_param(apr_pool_t *p, if (!worker && !v) { return "Bad call to set_worker_hc_param()"; } + if (!ctx) { + ctx = hc_create_config(p, s); + ap_set_module_config(s->module_config, &proxy_hcheck_module, ctx); + } temp = (hc_template_t *)v; if (!strcasecmp(key, "hctemplate")) { hc_template_t *template; @@ -333,7 +343,8 @@ static const char *set_hc_tpsize (cmd_parms *cmd, void *dummy, const char *arg) */ static request_rec *create_request_rec(apr_pool_t *p, server_rec *s, proxy_balancer *balancer, - const char *method) + const char *method, + const char *protocol) { request_rec *r; @@ -391,10 +402,12 @@ static request_rec *create_request_rec(apr_pool_t *p, server_rec *s, else { r->header_only = 0; } - r->protocol = "HTTP/1.0"; r->proto_num = HTTP_VERSION(1, 0); - + if ( protocol && (protocol[7] == '1') ) { + r->protocol = "HTTP/1.1"; + r->proto_num = HTTP_VERSION(1, 1); + } r->hostname = NULL; return r; @@ -418,31 +431,43 @@ static void create_hcheck_req(wctx_t *wctx, proxy_worker *hc, { char *req = NULL; const char *method = NULL; + const char *protocol = NULL; + + /* TODO: Fold into switch/case below? This seems more obvious */ + if ( (hc->s->method == OPTIONS11) || (hc->s->method == HEAD11) || (hc->s->method == GET11) ) { + protocol = "HTTP/1.1"; + } else { + protocol = "HTTP/1.0"; + } switch (hc->s->method) { case OPTIONS: + case OPTIONS11: method = "OPTIONS"; req = apr_psprintf(p, - "OPTIONS * HTTP/1.0\r\n" + "OPTIONS * %s\r\n" "Host: %s:%d\r\n" - "\r\n", + "\r\n", protocol, hc->s->hostname_ex, (int)hc->s->port); break; case HEAD: + case HEAD11: method = "HEAD"; /* fallthru */ case GET: + case GET11: if (!method) { /* did we fall thru? If not, we are GET */ method = "GET"; } req = apr_psprintf(p, - "%s %s%s%s HTTP/1.0\r\n" + "%s %s%s%s %s\r\n" "Host: %s:%d\r\n" "\r\n", method, (wctx->path ? wctx->path : ""), (wctx->path && *hc->s->hcuri ? "/" : "" ), (*hc->s->hcuri ? hc->s->hcuri : ""), + protocol, hc->s->hostname_ex, (int)hc->s->port); break; @@ -451,6 +476,7 @@ static void create_hcheck_req(wctx_t *wctx, proxy_worker *hc, } wctx->req = req; wctx->method = method; + wctx->protocol = protocol; } static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker, @@ -463,7 +489,7 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker, if (!hc) { apr_uri_t uri; apr_status_t rv; - const char *url = worker->s->name; + const char *url = worker->s->name_ex; wctx_t *wctx = apr_pcalloc(ctx->p, sizeof(wctx_t)); port = (worker->s->port ? worker->s->port @@ -473,16 +499,25 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker, worker, worker->s->scheme, worker->s->hostname_ex, (int)port); - ap_proxy_define_worker(ctx->p, &hc, NULL, NULL, worker->s->name, 0); + ap_proxy_define_worker(ctx->p, &hc, NULL, NULL, worker->s->name_ex, 0); apr_snprintf(hc->s->name, sizeof hc->s->name, "%pp", worker); + apr_snprintf(hc->s->name_ex, sizeof hc->s->name_ex, "%pp", worker); PROXY_STRNCPY(hc->s->hostname, worker->s->hostname); /* for compatibility */ PROXY_STRNCPY(hc->s->hostname_ex, worker->s->hostname_ex); PROXY_STRNCPY(hc->s->scheme, worker->s->scheme); PROXY_STRNCPY(hc->s->hcuri, worker->s->hcuri); PROXY_STRNCPY(hc->s->hcexpr, worker->s->hcexpr); - hc->hash.def = hc->s->hash.def = ap_proxy_hashfunc(hc->s->name, PROXY_HASHFUNC_DEFAULT); - hc->hash.fnv = hc->s->hash.fnv = ap_proxy_hashfunc(hc->s->name, PROXY_HASHFUNC_FNV); + hc->hash.def = hc->s->hash.def = ap_proxy_hashfunc(hc->s->name_ex, + PROXY_HASHFUNC_DEFAULT); + hc->hash.fnv = hc->s->hash.fnv = ap_proxy_hashfunc(hc->s->name_ex, + PROXY_HASHFUNC_FNV); hc->s->port = port; + hc->s->conn_timeout_set = worker->s->conn_timeout_set; + hc->s->conn_timeout = worker->s->conn_timeout; + hc->s->ping_timeout_set = worker->s->ping_timeout_set; + hc->s->ping_timeout = worker->s->ping_timeout; + hc->s->timeout_set = worker->s->timeout_set; + hc->s->timeout = worker->s->timeout; /* Do not disable worker in case of errors */ hc->s->status |= PROXY_WORKER_IGNORE_ERRORS; /* Mark as the "generic" worker */ @@ -516,33 +551,78 @@ static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker, return hc; } -static int hc_determine_connection(sctx_t *ctx, proxy_worker *worker, - apr_sockaddr_t **addr, apr_pool_t *p) +static int hc_determine_connection(const char *proxy_function, + proxy_conn_rec *backend, + server_rec *s) { - apr_status_t rv = APR_SUCCESS; + proxy_worker *worker = backend->worker; + apr_status_t rv; + /* * normally, this is done in ap_proxy_determine_connection(). * TODO: Look at using ap_proxy_determine_connection() with a * fake request_rec */ - if (worker->cp->addr) { - *addr = worker->cp->addr; + rv = ap_proxy_determine_address(proxy_function, backend, + worker->s->hostname_ex, worker->s->port, + 0, NULL, s); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(03249) + "DNS lookup failure for: %s:%hu", + worker->s->hostname_ex, worker->s->port); + return !OK; } - else { - rv = apr_sockaddr_info_get(addr, worker->s->hostname_ex, - APR_UNSPEC, worker->s->port, 0, p); - if (rv != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03249) - "DNS lookup failure for: %s:%d", - worker->s->hostname_ex, (int)worker->s->port); + + return OK; +} + +static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *backend, + server_rec *s, int status) +{ + if (backend) { + backend->close = 1; + ap_proxy_release_connection(proxy_function, backend, s); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03251) + "Health check %s Status (%d) for %s.", + ap_proxy_show_hcmethod(backend->worker->s->method), + status, + backend->worker->s->name_ex); + } + if (status != OK) { + return APR_EGENERAL; + } + return APR_SUCCESS; +} + +static int hc_get_backend(const char *proxy_function, proxy_conn_rec **backend, + proxy_worker *hc, sctx_t *ctx) +{ + int status; + + status = ap_proxy_acquire_connection(proxy_function, backend, hc, ctx->s); + if (status != OK) { + return status; + } + + if (strcmp(hc->s->scheme, "https") == 0 || strcmp(hc->s->scheme, "wss") == 0 ) { + if (!ap_ssl_has_outgoing_handlers()) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252) + "mod_ssl not configured?"); + return !OK; } + (*backend)->is_ssl = 1; } - return (rv == APR_SUCCESS ? OK : !OK); + + return hc_determine_connection(proxy_function, *backend, ctx->s); } -static apr_status_t hc_init_worker(sctx_t *ctx, proxy_worker *worker) +static apr_status_t hc_init_baton(baton_t *baton) { + sctx_t *ctx = baton->ctx; + proxy_worker *worker = baton->worker, *hc; apr_status_t rv = APR_SUCCESS; + int once = 0; + /* * Since this is the watchdog, workers never actually handle a * request here, and so the local data isn't initialized (of @@ -555,52 +635,67 @@ static apr_status_t hc_init_worker(sctx_t *ctx, proxy_worker *worker) ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ctx->s, APLOGNO(03250) "Cannot init worker"); return rv; } - if (worker->s->is_address_reusable && !worker->s->disablereuse && - hc_determine_connection(ctx, worker, &worker->cp->addr, - worker->cp->pool) != OK) { + once = 1; + } + + baton->hc = hc = hc_get_hcworker(ctx, worker, baton->ptemp); + + /* Try to resolve the worker address once if it's reusable */ + if (once && worker->s->is_address_reusable) { + proxy_conn_rec *backend = NULL; + if (hc_get_backend("HCHECK", &backend, hc, ctx)) { rv = APR_EGENERAL; } + if (backend) { + backend->close = 1; + ap_proxy_release_connection("HCHECK", backend, ctx->s); + } } - return rv; -} -static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *backend, - server_rec *s, int status) -{ - if (backend) { - backend->close = 1; - ap_proxy_release_connection(proxy_function, backend, s); - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03251) - "Health check %s Status (%d) for %s.", - ap_proxy_show_hcmethod(backend->worker->s->method), - status, - backend->worker->s->name); - } - if (status != OK) { - return APR_EGENERAL; - } - return APR_SUCCESS; + return rv; } -static int hc_get_backend(const char *proxy_function, proxy_conn_rec **backend, - proxy_worker *hc, sctx_t *ctx, apr_pool_t *ptemp) +static apr_status_t hc_check_cping(baton_t *baton, apr_thread_t *thread) { int status; - status = ap_proxy_acquire_connection(proxy_function, backend, hc, ctx->s); - if (status == OK) { - (*backend)->addr = hc->cp->addr; - (*backend)->hostname = hc->s->hostname_ex; - if (strcmp(hc->s->scheme, "https") == 0) { - if (!ap_proxy_ssl_enable(NULL)) { - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252) - "mod_ssl not configured?"); - return !OK; - } - (*backend)->is_ssl = 1; - } + sctx_t *ctx = baton->ctx; + proxy_worker *hc = baton->hc; + proxy_conn_rec *backend = NULL; + apr_pool_t *ptemp = baton->ptemp; + request_rec *r; + apr_interval_time_t timeout; + if (!ajp_handle_cping_cpong) { + return APR_ENOTIMPL; + } + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, baton->ctx->s, "HCCPING starting"); + if ((status = hc_get_backend("HCCPING", &backend, hc, ctx)) != OK) { + return backend_cleanup("HCCPING", backend, ctx->s, status); + } + if ((status = ap_proxy_connect_backend("HCCPING", backend, hc, ctx->s)) != OK) { + return backend_cleanup("HCCPING", backend, ctx->s, status); } - return hc_determine_connection(ctx, hc, &(*backend)->addr, ptemp); + r = create_request_rec(ptemp, ctx->s, baton->balancer, "CPING", NULL); + if ((status = ap_proxy_connection_create_ex("HCCPING", backend, r)) != OK) { + return backend_cleanup("HCCPING", backend, ctx->s, status); + } + set_request_connection(r, backend->connection); + backend->connection->current_thread = thread; + + if (hc->s->ping_timeout_set) { + timeout = hc->s->ping_timeout; + } else if ( hc->s->conn_timeout_set) { + timeout = hc->s->conn_timeout; + } else if ( hc->s->timeout_set) { + timeout = hc->s->timeout; + } else { + /* default to socket timeout */ + apr_socket_timeout_get(backend->sock, &timeout); + } + status = ajp_handle_cping_cpong(backend->sock, r, timeout); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, baton->ctx->s, "HCCPING done %d", status); + return backend_cleanup("HCCPING", backend, ctx->s, status); } static apr_status_t hc_check_tcp(baton_t *baton) @@ -610,7 +705,7 @@ static apr_status_t hc_check_tcp(baton_t *baton) proxy_worker *hc = baton->hc; proxy_conn_rec *backend = NULL; - status = hc_get_backend("HCTCP", &backend, hc, ctx, baton->ptemp); + status = hc_get_backend("HCTCP", &backend, hc, ctx); if (status == OK) { status = ap_proxy_connect_backend("HCTCP", backend, hc, ctx->s); /* does an unconditional ap_proxy_is_socket_connected() */ @@ -636,6 +731,7 @@ static int hc_read_headers(request_rec *r) { char buffer[HUGE_STRING_LEN]; int len; + const char *ct; len = ap_getline(buffer, sizeof(buffer), r, 1); if (len <= 0) { @@ -670,6 +766,7 @@ static int hc_read_headers(request_rec *r) } else { return !OK; } + /* OK, 1st line is OK... scarf in the headers */ while ((len = ap_getline(buffer, sizeof(buffer), r, 1)) > 0) { char *value, *end; @@ -686,6 +783,11 @@ static int hc_read_headers(request_rec *r) *end = '\0'; apr_table_add(r->headers_out, buffer, value); } + + /* Set the Content-Type for the request if set */ + if ((ct = apr_table_get(r->headers_out, "Content-Type")) != NULL) + ap_set_content_type(r, ct); + return OK; } @@ -736,7 +838,7 @@ static int hc_read_body(request_rec *r, apr_bucket_brigade *bb) * then apply those to the resulting response, otherwise * any status code 2xx or 3xx is considered "passing" */ -static apr_status_t hc_check_http(baton_t *baton) +static apr_status_t hc_check_http(baton_t *baton, apr_thread_t *thread) { int status; proxy_conn_rec *backend = NULL; @@ -754,20 +856,19 @@ static apr_status_t hc_check_http(baton_t *baton) return APR_ENOTIMPL; } - if ((status = hc_get_backend("HCOH", &backend, hc, ctx, ptemp)) != OK) { + if ((status = hc_get_backend("HCOH", &backend, hc, ctx)) != OK) { return backend_cleanup("HCOH", backend, ctx->s, status); } if ((status = ap_proxy_connect_backend("HCOH", backend, hc, ctx->s)) != OK) { return backend_cleanup("HCOH", backend, ctx->s, status); } - r = create_request_rec(ptemp, ctx->s, baton->balancer, wctx->method); - if (!backend->connection) { - if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) { - return backend_cleanup("HCOH", backend, ctx->s, status); - } + r = create_request_rec(ptemp, ctx->s, baton->balancer, wctx->method, wctx->protocol); + if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) { + return backend_cleanup("HCOH", backend, ctx->s, status); } set_request_connection(r, backend->connection); + backend->connection->current_thread = thread; bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); @@ -796,22 +897,22 @@ static apr_status_t hc_check_http(baton_t *baton) if (ok > 0) { ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s, "Condition %s for %s (%s): passed", worker->s->hcexpr, - hc->s->name, worker->s->name); + hc->s->name_ex, worker->s->name_ex); } else if (ok < 0 || err) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, ctx->s, APLOGNO(03301) "Error on checking condition %s for %s (%s): %s", worker->s->hcexpr, - hc->s->name, worker->s->name, err); + hc->s->name_ex, worker->s->name_ex, err); status = !OK; } else { ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s, "Condition %s for %s (%s) : failed", worker->s->hcexpr, - hc->s->name, worker->s->name); + hc->s->name_ex, worker->s->name_ex); status = !OK; } } else if (r->status < 200 || r->status > 399) { ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s, "Response status %i for %s (%s): failed", r->status, - hc->s->name, worker->s->name); + hc->s->name_ex, worker->s->name_ex); status = !OK; } return backend_cleanup("HCOH", backend, ctx->s, status); @@ -823,29 +924,31 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b) server_rec *s = baton->ctx->s; proxy_worker *worker = baton->worker; proxy_worker *hc = baton->hc; - apr_time_t now = baton->now; + apr_time_t now; apr_status_t rv; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03256) "%sHealth checking %s", (thread ? "Threaded " : ""), - worker->s->name); + worker->s->name_ex); - worker->s->updated = now; if (hc->s->method == TCP) { rv = hc_check_tcp(baton); } + else if (hc->s->method == CPING) { + rv = hc_check_cping(baton, thread); + } else { - rv = hc_check_http(baton); + rv = hc_check_http(baton, thread); } + + now = apr_time_now(); if (rv == APR_ENOTIMPL) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(03257) "Somehow tried to use unimplemented hcheck method: %d", (int)hc->s->method); - apr_pool_destroy(baton->ptemp); - return NULL; } /* what state are we in ? */ - if (PROXY_WORKER_IS_HCFAILED(worker)) { + else if (PROXY_WORKER_IS_HCFAILED(worker) || PROXY_WORKER_IS_ERROR(worker)) { if (rv == APR_SUCCESS) { worker->s->pcount += 1; if (worker->s->pcount >= worker->s->passes) { @@ -854,11 +957,12 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b) worker->s->pcount = 0; ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03302) "%sHealth check ENABLING %s", (thread ? "Threaded " : ""), - worker->s->name); + worker->s->name_ex); } } - } else { + } + else { if (rv != APR_SUCCESS) { worker->s->error_time = now; worker->s->fcount += 1; @@ -867,11 +971,16 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b) worker->s->fcount = 0; ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03303) "%sHealth check DISABLING %s", (thread ? "Threaded " : ""), - worker->s->name); + worker->s->name_ex); } } } + if (baton->now) { + *baton->now = now; + } apr_pool_destroy(baton->ptemp); + worker->s->updated = now; + return NULL; } @@ -879,12 +988,10 @@ static apr_status_t hc_watchdog_callback(int state, void *data, apr_pool_t *pool) { apr_status_t rv = APR_SUCCESS; - apr_time_t now = apr_time_now(); proxy_balancer *balancer; sctx_t *ctx = (sctx_t *)data; server_rec *s = ctx->s; proxy_server_conf *conf; - static apr_thread_pool_t *hctp = NULL; switch (state) { case AP_WATCHDOG_STATE_STARTING: @@ -911,15 +1018,11 @@ static apr_status_t hc_watchdog_callback(int state, void *data, "Skipping apr_thread_pool_create()"); hctp = NULL; } - #endif break; case AP_WATCHDOG_STATE_RUNNING: /* loop thru all workers */ - ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, s, - "Run of %s watchdog.", - HCHECK_WATHCHDOG_NAME); if (s) { int i; conf = (proxy_server_conf *) ap_get_module_config(s->module_config, &proxy_module); @@ -927,45 +1030,52 @@ static apr_status_t hc_watchdog_callback(int state, void *data, ctx->s = s; for (i = 0; i < conf->balancers->nelts; i++, balancer++) { int n; + apr_time_t now; proxy_worker **workers; proxy_worker *worker; /* Have any new balancers or workers been added dynamically? */ ap_proxy_sync_balancer(balancer, s, conf); workers = (proxy_worker **)balancer->workers->elts; + now = apr_time_now(); for (n = 0; n < balancer->workers->nelts; n++) { worker = *workers; if (!PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED) && - (worker->s->method != NONE) && - (now > worker->s->updated + worker->s->interval)) { + (worker->s->method != NONE) && + (worker->s->updated != 0) && + (now > worker->s->updated + worker->s->interval)) { baton_t *baton; apr_pool_t *ptemp; + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, "Checking %s worker: %s [%d] (%pp)", balancer->s->name, - worker->s->name, worker->s->method, worker); + worker->s->name_ex, worker->s->method, worker); - if ((rv = hc_init_worker(ctx, worker)) != APR_SUCCESS) { - return rv; - } - /* This pool must last the lifetime of the (possible) thread */ + /* This pool has the lifetime of the check */ apr_pool_create(&ptemp, ctx->p); apr_pool_tag(ptemp, "hc_request"); - baton = apr_palloc(ptemp, sizeof(baton_t)); + baton = apr_pcalloc(ptemp, sizeof(baton_t)); baton->ctx = ctx; - baton->now = now; baton->balancer = balancer; baton->worker = worker; baton->ptemp = ptemp; - baton->hc = hc_get_hcworker(ctx, worker, ptemp); - - if (!hctp) { - hc_check(NULL, baton); + if ((rv = hc_init_baton(baton))) { + worker->s->updated = now; + apr_pool_destroy(ptemp); + return rv; } + worker->s->updated = 0; #if HC_USE_THREADS - else { - rv = apr_thread_pool_push(hctp, hc_check, (void *)baton, - APR_THREAD_TASK_PRIORITY_NORMAL, NULL); + if (hctp) { + apr_thread_pool_push(hctp, hc_check, (void *)baton, + APR_THREAD_TASK_PRIORITY_NORMAL, + NULL); } + else #endif + { + baton->now = &now; + hc_check(NULL, baton); + } } workers++; } @@ -984,9 +1094,9 @@ static apr_status_t hc_watchdog_callback(int state, void *data, ap_log_error(APLOG_MARK, APLOG_INFO, rv, s, APLOGNO(03315) "apr_thread_pool_destroy() failed"); } + hctp = NULL; } #endif - hctp = NULL; break; } return rv; @@ -994,7 +1104,22 @@ static apr_status_t hc_watchdog_callback(int state, void *data, static int hc_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) { +#if HC_USE_THREADS + hctp = NULL; tpsize = HC_THREADPOOL_SIZE; +#endif + + ajp_handle_cping_cpong = APR_RETRIEVE_OPTIONAL_FN(ajp_handle_cping_cpong); + if (ajp_handle_cping_cpong) { + proxy_hcmethods_t *method = proxy_hcmethods; + for (; method->name; method++) { + if (method->method == CPING) { + method->implemented = 1; + break; + } + } + } + return OK; } static int hc_post_config(apr_pool_t *p, apr_pool_t *plog, @@ -1050,6 +1175,7 @@ static int hc_post_config(apr_pool_t *p, apr_pool_t *plog, "watchdog callback registered (%s for %s)", HCHECK_WATHCHDOG_NAME, s->server_hostname); s = s->next; } + return OK; } @@ -1060,6 +1186,8 @@ static void hc_show_exprs(request_rec *r) int i; sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config, &proxy_hcheck_module); + if (!ctx) + return; if (apr_is_empty_table(ctx->conditions)) return; @@ -1089,6 +1217,8 @@ static void hc_select_exprs(request_rec *r, const char *expr) int i; sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config, &proxy_hcheck_module); + if (!ctx) + return; if (apr_is_empty_table(ctx->conditions)) return; @@ -1112,6 +1242,8 @@ static int hc_valid_expr(request_rec *r, const char *expr) int i; sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config, &proxy_hcheck_module); + if (!ctx) + return 0; if (apr_is_empty_table(ctx->conditions)) return 0; diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c index 56af9a8..bd57b4d 100644 --- a/modules/proxy/mod_proxy_http.c +++ b/modules/proxy/mod_proxy_http.c @@ -31,36 +31,71 @@ static apr_status_t ap_proxy_http_cleanup(const char *scheme, static apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n, request_rec *r, int flags, int *read); +static const char *get_url_scheme(const char **url, int *is_ssl) +{ + const char *u = *url; + + switch (u[0]) { + case 'h': + case 'H': + if (strncasecmp(u + 1, "ttp", 3) == 0) { + if (u[4] == ':') { + *is_ssl = 0; + *url = u + 5; + return "http"; + } + if (apr_tolower(u[4]) == 's' && u[5] == ':') { + *is_ssl = 1; + *url = u + 6; + return "https"; + } + } + break; + + case 'w': + case 'W': + if (apr_tolower(u[1]) == 's') { + if (u[2] == ':') { + *is_ssl = 0; + *url = u + 3; + return "ws"; + } + if (apr_tolower(u[2]) == 's' && u[3] == ':') { + *is_ssl = 1; + *url = u + 4; + return "wss"; + } + } + break; + } + + *is_ssl = 0; + return NULL; +} + /* * Canonicalise http-like URLs. * scheme is the scheme for the URL * url is the URL starting with the first '/' - * def_port is the default port for this scheme. */ static int proxy_http_canon(request_rec *r, char *url) { + const char *base_url = url; char *host, *path, sport[7]; char *search = NULL; const char *err; const char *scheme; apr_port_t port, def_port; + int is_ssl = 0; - /* ap_port_of_scheme() */ - if (strncasecmp(url, "http:", 5) == 0) { - url += 5; - scheme = "http"; - } - else if (strncasecmp(url, "https:", 6) == 0) { - url += 6; - scheme = "https"; - } - else { + scheme = get_url_scheme((const char **)&url, &is_ssl); + if (!scheme) { return DECLINED; } - port = def_port = ap_proxy_port_of_scheme(scheme); + port = def_port = (is_ssl) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, - "HTTP: canonicalising URL %s", url); + "HTTP: canonicalising URL %s", base_url); /* do syntatic check. * We break the URL into host, port, path, search @@ -68,7 +103,7 @@ static int proxy_http_canon(request_rec *r, char *url) err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); if (err) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01083) - "error parsing URL %s: %s", url, err); + "error parsing URL %s: %s", base_url, err); return HTTP_BAD_REQUEST; } @@ -86,9 +121,19 @@ static int proxy_http_canon(request_rec *r, char *url) if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } + else if (apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the encoded path already */ + search = r->args; + } else { - path = ap_proxy_canonenc(r->pool, url, strlen(url), - enc_path, 0, r->proxyreq); + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, + flags, r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } search = r->args; } break; @@ -96,9 +141,22 @@ static int proxy_http_canon(request_rec *r, char *url) path = url; break; } - - if (path == NULL) - return HTTP_BAD_REQUEST; + /* + * If we have a raw control character or a ' ' in nocanon path or + * r->args, correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10415) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } + if (search && *ap_scan_vchar_obstext(search)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10408) + "To be forwarded query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } if (port != def_port) apr_snprintf(sport, sizeof(sport), ":%d", port); @@ -108,8 +166,9 @@ static int proxy_http_canon(request_rec *r, char *url) if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */ host = apr_pstrcat(r->pool, "[", host, "]", NULL); } + r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport, - "/", path, (search) ? "?" : "", (search) ? search : "", NULL); + "/", path, (search) ? "?" : "", search, NULL); return OK; } @@ -216,486 +275,230 @@ static void add_cl(apr_pool_t *p, APR_BRIGADE_INSERT_TAIL(header_brigade, e); } -#define ASCII_CRLF "\015\012" -#define ASCII_ZERO "\060" - -static void terminate_headers(apr_bucket_alloc_t *bucket_alloc, - apr_bucket_brigade *header_brigade) -{ - apr_bucket *e; - - /* add empty line at the end of the headers */ - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); -} - +#ifndef CRLF_ASCII +#define CRLF_ASCII "\015\012" +#endif +#ifndef ZERO_ASCII +#define ZERO_ASCII "\060" +#endif #define MAX_MEM_SPOOL 16384 -static int stream_reqbody_chunked(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade) -{ - int seen_eos = 0, rv = OK; - apr_size_t hdr_len; - apr_off_t bytes; - apr_status_t status; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *bb; - apr_bucket *e; - - add_te_chunked(p, bucket_alloc, header_brigade); - terminate_headers(bucket_alloc, header_brigade); +typedef enum { + RB_INIT = 0, + RB_STREAM_CL, + RB_STREAM_CHUNKED, + RB_SPOOL_CL +} rb_methods; - while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) - { - char chunk_hdr[20]; /* must be here due to transient bucket. */ +typedef struct { + apr_pool_t *p; + request_rec *r; + const char *proto; + proxy_worker *worker; + proxy_server_conf *sconf; - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; - - /* We can't pass this EOS to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - apr_bucket_delete(e); - } + char server_portstr[32]; + proxy_conn_rec *backend; + conn_rec *origin; - apr_brigade_length(input_brigade, 1, &bytes); + apr_bucket_alloc_t *bucket_alloc; + apr_bucket_brigade *header_brigade; + apr_bucket_brigade *input_brigade; + char *old_cl_val, *old_te_val; + apr_off_t cl_val; - hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), - "%" APR_UINT64_T_HEX_FMT CRLF, - (apr_uint64_t)bytes); + rb_methods rb_method; - ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); - e = apr_bucket_transient_create(chunk_hdr, hdr_len, - bucket_alloc); - APR_BRIGADE_INSERT_HEAD(input_brigade, e); + const char *upgrade; - /* - * Append the end-of-chunk CRLF - */ - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(input_brigade, e); + unsigned int do_100_continue :1, + prefetch_nonblocking :1, + force10 :1; +} proxy_http_req_t; - if (header_brigade) { - /* we never sent the header brigade, so go ahead and - * take care of that now - */ - bb = header_brigade; +static int stream_reqbody(proxy_http_req_t *req) +{ + request_rec *r = req->r; + int seen_eos = 0, rv = OK; + apr_size_t hdr_len; + char chunk_hdr[20]; /* must be here due to transient bucket. */ + conn_rec *origin = req->origin; + proxy_conn_rec *p_conn = req->backend; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; + rb_methods rb_method = req->rb_method; + apr_off_t bytes, bytes_streamed = 0; + apr_bucket *e; - /* - * Save input_brigade in bb brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * bb brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &bb, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; + do { + if (APR_BRIGADE_EMPTY(input_brigade) + && APR_BRIGADE_EMPTY(header_brigade)) { + rv = ap_proxy_read_input(r, p_conn, input_brigade, + HUGE_STRING_LEN); + if (rv != OK) { + return rv; } - - header_brigade = NULL; - } - else { - bb = input_brigade; } - /* The request is flushed below this loop with chunk EOS header */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0); - if (rv != OK) { - return rv; - } - - if (seen_eos) { - break; - } - - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - HUGE_STRING_LEN); - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); - } - } - - if (header_brigade) { - /* we never sent the header brigade because there was no request body; - * send it now - */ - bb = header_brigade; - } - else { if (!APR_BRIGADE_EMPTY(input_brigade)) { - /* input brigade still has an EOS which we can't pass to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - AP_DEBUG_ASSERT(APR_BUCKET_IS_EOS(e)); - apr_bucket_delete(e); - } - bb = input_brigade; - } - - e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF - /* */ - ASCII_CRLF, - 5, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - } - - /* Now we have headers-only, or the chunk EOS mark; flush it */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1); - return rv; -} - -static int stream_reqbody_cl(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - char *old_cl_val) -{ - int seen_eos = 0, rv = 0; - apr_status_t status = APR_SUCCESS; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *bb; - apr_bucket *e; - apr_off_t cl_val = 0; - apr_off_t bytes; - apr_off_t bytes_streamed = 0; - - if (old_cl_val) { - char *endstr; - - add_cl(p, bucket_alloc, header_brigade, old_cl_val); - status = apr_strtoff(&cl_val, old_cl_val, &endstr, 10); - - if (status || *endstr || endstr == old_cl_val || cl_val < 0) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) - "could not parse request Content-Length (%s)", - old_cl_val); - return HTTP_BAD_REQUEST; - } - } - terminate_headers(bucket_alloc, header_brigade); + /* If this brigade contains EOS, remove it and be done. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; - while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) - { - apr_brigade_length(input_brigade, 1, &bytes); - bytes_streamed += bytes; + /* We can't pass this EOS to the output_filters. */ + e = APR_BRIGADE_LAST(input_brigade); + apr_bucket_delete(e); + } - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; + apr_brigade_length(input_brigade, 1, &bytes); + bytes_streamed += bytes; - /* We can't pass this EOS to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - apr_bucket_delete(e); + if (rb_method == RB_STREAM_CHUNKED) { + if (bytes) { + /* + * Prepend the size of the chunk + */ + hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), + "%" APR_UINT64_T_HEX_FMT CRLF, + (apr_uint64_t)bytes); + ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); + e = apr_bucket_transient_create(chunk_hdr, hdr_len, + bucket_alloc); + APR_BRIGADE_INSERT_HEAD(input_brigade, e); + + /* + * Append the end-of-chunk CRLF + */ + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + if (seen_eos) { + /* + * Append the tailing 0-size chunk + */ + e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII + /* */ + CRLF_ASCII, + 5, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + } + else if (rb_method == RB_STREAM_CL + && (bytes_streamed > req->cl_val + || (seen_eos && bytes_streamed < req->cl_val))) { + /* C-L != bytes streamed?!? + * + * Prevent HTTP Request/Response Splitting. + * + * We can't stream more (or less) bytes at the back end since + * they could be interpreted in separate requests (more bytes + * now would start a new request, less bytes would make the + * first bytes of the next request be part of the current one). + * + * It can't happen from the client connection here thanks to + * ap_http_filter(), but some module's filter may be playing + * bad games, hence the HTTP_INTERNAL_SERVER_ERROR. + */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) + "read %s bytes of request body than expected " + "(got %" APR_OFF_T_FMT ", expected " + "%" APR_OFF_T_FMT ")", + bytes_streamed > req->cl_val ? "more" : "less", + bytes_streamed, req->cl_val); + return HTTP_INTERNAL_SERVER_ERROR; + } - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); + if (seen_eos && apr_table_get(r->subprocess_env, + "proxy-sendextracrlf")) { + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); APR_BRIGADE_INSERT_TAIL(input_brigade, e); } } - /* C-L < bytes streamed?!? - * We will error out after the body is completely - * consumed, but we can't stream more bytes at the - * back end since they would in part be interpreted - * as another request! If nothing is sent, then - * just send nothing. - * - * Prevents HTTP Response Splitting. + /* If we never sent the header brigade, go ahead and take care of + * that now by prepending it (once only since header_brigade will be + * empty afterward). */ - if (bytes_streamed > cl_val) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) - "read more bytes of request body than expected " - "(got %" APR_OFF_T_FMT ", expected %" APR_OFF_T_FMT ")", - bytes_streamed, cl_val); - return HTTP_INTERNAL_SERVER_ERROR; - } - - if (header_brigade) { - /* we never sent the header brigade, so go ahead and - * take care of that now - */ - bb = header_brigade; - - /* - * Save input_brigade in bb brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * bb brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &bb, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - header_brigade = NULL; - } - else { - bb = input_brigade; - } + APR_BRIGADE_PREPEND(input_brigade, header_brigade); - /* Once we hit EOS, we are ready to flush. */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos); + /* Flush here on EOS because we won't ap_proxy_read_input() again. */ + rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, + input_brigade, seen_eos); if (rv != OK) { - return rv ; - } - - if (seen_eos) { - break; - } - - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - HUGE_STRING_LEN); - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02609) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + return rv; } - } - - if (bytes_streamed != cl_val) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087) - "client %s given Content-Length did not match" - " number of body bytes read", r->connection->client_ip); - return HTTP_BAD_REQUEST; - } - - if (header_brigade) { - /* we never sent the header brigade since there was no request - * body; send it now with the flush flag - */ - bb = header_brigade; - return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1)); - } + } while (!seen_eos); return OK; } -static int spool_reqbody_cl(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - int force_cl) +static void terminate_headers(proxy_http_req_t *req) { - int seen_eos = 0; - apr_status_t status; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *body_brigade; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; apr_bucket *e; - apr_off_t bytes, bytes_spooled = 0, fsize = 0; - apr_file_t *tmpfile = NULL; - apr_off_t limit; - - body_brigade = apr_brigade_create(p, bucket_alloc); - - limit = ap_get_limit_req_body(r); - - while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) - { - /* If this brigade contains EOS, either stop or remove it. */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { - seen_eos = 1; + char *buf; - /* We can't pass this EOS to the output_filters. */ - e = APR_BRIGADE_LAST(input_brigade); - apr_bucket_delete(e); + /* + * Handle Connection: header if we do HTTP/1.1 request: + * If we plan to close the backend connection sent Connection: close + * otherwise sent Connection: Keep-Alive. + */ + if (!req->force10) { + if (req->upgrade) { + buf = apr_pstrdup(req->p, "Connection: Upgrade" CRLF); + ap_xlate_proto_to_ascii(buf, strlen(buf)); + e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(req->header_brigade, e); + + /* Tell the backend that it can upgrade the connection. */ + buf = apr_pstrcat(req->p, "Upgrade: ", req->upgrade, CRLF, NULL); } - - apr_brigade_length(input_brigade, 1, &bytes); - - if (bytes_spooled + bytes > MAX_MEM_SPOOL) { - /* - * LimitRequestBody does not affect Proxy requests (Should it?). - * Let it take effect if we decide to store the body in a - * temporary file on disk. - */ - if (limit && (bytes_spooled + bytes > limit)) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) - "Request body is larger than the configured " - "limit of %" APR_OFF_T_FMT, limit); - return HTTP_REQUEST_ENTITY_TOO_LARGE; - } - /* can't spool any more in memory; write latest brigade to disk */ - if (tmpfile == NULL) { - const char *temp_dir; - char *template; - - status = apr_temp_dir_get(&temp_dir, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) - "search for temporary directory failed"); - return HTTP_INTERNAL_SERVER_ERROR; - } - apr_filepath_merge(&template, temp_dir, - "modproxy.tmp.XXXXXX", - APR_FILEPATH_NATIVE, p); - status = apr_file_mktemp(&tmpfile, template, 0, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) - "creation of temporary file in directory " - "%s failed", temp_dir); - return HTTP_INTERNAL_SERVER_ERROR; - } - } - for (e = APR_BRIGADE_FIRST(input_brigade); - e != APR_BRIGADE_SENTINEL(input_brigade); - e = APR_BUCKET_NEXT(e)) { - const char *data; - apr_size_t bytes_read, bytes_written; - - apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); - status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); - if (status != APR_SUCCESS) { - const char *tmpfile_name; - - if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { - tmpfile_name = "(unknown)"; - } - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) - "write to temporary file %s failed", - tmpfile_name); - return HTTP_INTERNAL_SERVER_ERROR; - } - AP_DEBUG_ASSERT(bytes_read == bytes_written); - fsize += bytes_written; - } - apr_brigade_cleanup(input_brigade); + else if (ap_proxy_connection_reusable(req->backend)) { + buf = apr_pstrdup(req->p, "Connection: Keep-Alive" CRLF); } else { - - /* - * Save input_brigade in body_brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * body_brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - } - - bytes_spooled += bytes; - - if (seen_eos) { - break; - } - - status = ap_get_brigade(r->input_filters, input_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - HUGE_STRING_LEN); - - if (status != APR_SUCCESS) { - conn_rec *c = r->connection; - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02610) - "read request body failed to %pI (%s)" - " from %s (%s)", p_conn->addr, - p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + buf = apr_pstrdup(req->p, "Connection: close" CRLF); } + ap_xlate_proto_to_ascii(buf, strlen(buf)); + e = apr_bucket_pool_create(buf, strlen(buf), req->p, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(req->header_brigade, e); } - if (bytes_spooled || force_cl) { - add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled)); - } - terminate_headers(bucket_alloc, header_brigade); - APR_BRIGADE_CONCAT(header_brigade, body_brigade); - if (tmpfile) { - apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p); - } - if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { - e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); - } - /* This is all a single brigade, pass with flush flagged */ - return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1)); + /* add empty line at the end of the headers */ + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(req->header_brigade, e); } -static -int ap_proxy_http_request(apr_pool_t *p, request_rec *r, - proxy_conn_rec *p_conn, proxy_worker *worker, - proxy_server_conf *conf, - apr_uri_t *uri, - char *url, char *server_portstr) +static int ap_proxy_http_prefetch(proxy_http_req_t *req, + apr_uri_t *uri, char *url) { + apr_pool_t *p = req->p; + request_rec *r = req->r; conn_rec *c = r->connection; - apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc; - apr_bucket_brigade *header_brigade; - apr_bucket_brigade *input_brigade; - apr_bucket_brigade *temp_brigade; + proxy_conn_rec *p_conn = req->backend; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; apr_bucket *e; - char *buf; - apr_status_t status; - enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL}; - enum rb_methods rb_method = RB_INIT; - char *old_cl_val = NULL; - char *old_te_val = NULL; apr_off_t bytes_read = 0; apr_off_t bytes; - int force10, rv; - conn_rec *origin = p_conn->connection; - - if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { - if (r->expecting_100) { - return HTTP_EXPECTATION_FAILED; - } - force10 = 1; - } else { - force10 = 0; - } + int rv; - header_brigade = apr_brigade_create(p, bucket_alloc); rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn, - worker, conf, uri, url, server_portstr, - &old_cl_val, &old_te_val); + req->worker, req->sconf, + uri, url, req->server_portstr, + &req->old_cl_val, &req->old_te_val); if (rv != OK) { return rv; } - /* We have headers, let's figure out our request body... */ - input_brigade = apr_brigade_create(p, bucket_alloc); - /* sub-requests never use keepalives, and mustn't pass request bodies. * Because the new logic looks at input_brigade, we will self-terminate * input_brigade and jump past all of the request body logic... * Reading anything with ap_get_brigade is likely to consume the - * main request's body or read beyond EOS - which would be unplesant. + * main request's body or read beyond EOS - which would be unpleasant. * * An exception: when a kept_body is present, then subrequest CAN use * pass request bodies, and we DONT skip the body. @@ -703,9 +506,9 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, if (!r->kept_body && r->main) { /* XXX: Why DON'T sub-requests use keepalives? */ p_conn->close = 1; - old_cl_val = NULL; - old_te_val = NULL; - rb_method = RB_STREAM_CL; + req->old_te_val = NULL; + req->old_cl_val = NULL; + req->rb_method = RB_STREAM_CL; e = apr_bucket_eos_create(input_brigade->bucket_alloc); APR_BRIGADE_INSERT_TAIL(input_brigade, e); goto skip_body; @@ -719,73 +522,29 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * encoding has been done by the extensions' handler, and * do not modify add_te_chunked's logic */ - if (old_te_val && strcasecmp(old_te_val, "chunked") != 0) { + if (req->old_te_val && ap_cstr_casecmp(req->old_te_val, "chunked") != 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01093) - "%s Transfer-Encoding is not supported", old_te_val); + "%s Transfer-Encoding is not supported", + req->old_te_val); return HTTP_INTERNAL_SERVER_ERROR; } - if (old_cl_val && old_te_val) { + if (req->old_cl_val && req->old_te_val) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01094) "client %s (%s) requested Transfer-Encoding " "chunked body with Content-Length (C-L ignored)", c->client_ip, c->remote_host ? c->remote_host: ""); - old_cl_val = NULL; - origin->keepalive = AP_CONN_CLOSE; + req->old_cl_val = NULL; p_conn->close = 1; } - /* Prefetch MAX_MEM_SPOOL bytes - * - * This helps us avoid any election of C-L v.s. T-E - * request bodies, since we are willing to keep in - * memory this much data, in any case. This gives - * us an instant C-L election if the body is of some - * reasonable size. - */ - temp_brigade = apr_brigade_create(p, bucket_alloc); - do { - status = ap_get_brigade(r->input_filters, temp_brigade, - AP_MODE_READBYTES, APR_BLOCK_READ, - MAX_MEM_SPOOL - bytes_read); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) - "prefetch request body failed to %pI (%s)" - " from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return ap_map_http_request_error(status, HTTP_BAD_REQUEST); - } - - apr_brigade_length(temp_brigade, 1, &bytes); - bytes_read += bytes; - - /* - * Save temp_brigade in input_brigade. (At least) in the SSL case - * temp_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * input_brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); - if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) - "processing prefetched request body failed" - " to %pI (%s) from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", - c->client_ip, c->remote_host ? c->remote_host: ""); - return HTTP_INTERNAL_SERVER_ERROR; - } - - /* Ensure we don't hit a wall where we have a buffer too small - * for ap_get_brigade's filters to fetch us another bucket, - * surrender once we hit 80 bytes less than MAX_MEM_SPOOL - * (an arbitrary value.) - */ - } while ((bytes_read < MAX_MEM_SPOOL - 80) - && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))); + rv = ap_proxy_prefetch_input(r, req->backend, input_brigade, + req->prefetch_nonblocking ? APR_NONBLOCK_READ + : APR_BLOCK_READ, + &bytes_read, MAX_MEM_SPOOL); + if (rv != OK) { + return rv; + } /* Use chunked request body encoding or send a content-length body? * @@ -812,7 +571,7 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * To reduce server resource use, setenv proxy-sendchunked * * Then address specific servers with conditional setenv - * options to restore the default behavior where desireable. + * options to restore the default behavior where desirable. * * We have to compute content length by reading the entire request * body; if request body is not small, we'll spool the remaining @@ -822,7 +581,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * is absent, and the filters are unchanged (the body won't * be resized by another content filter). */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + if (!APR_BRIGADE_EMPTY(input_brigade) + && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { /* The whole thing fit, so our decision is trivial, use * the filtered bytes read from the client for the request * body Content-Length. @@ -830,34 +590,41 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * If we expected no body, and read no body, do not set * the Content-Length. */ - if (old_cl_val || old_te_val || bytes_read) { - old_cl_val = apr_off_t_toa(r->pool, bytes_read); + if (req->old_cl_val || req->old_te_val || bytes_read) { + req->old_cl_val = apr_off_t_toa(r->pool, bytes_read); + req->cl_val = bytes_read; } - rb_method = RB_STREAM_CL; + req->rb_method = RB_STREAM_CL; } - else if (old_te_val) { - if (force10 + else if (req->old_te_val) { + if (req->force10 || (apr_table_get(r->subprocess_env, "proxy-sendcl") && !apr_table_get(r->subprocess_env, "proxy-sendchunks") && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) { - rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } else { - rb_method = RB_STREAM_CHUNKED; + req->rb_method = RB_STREAM_CHUNKED; } } - else if (old_cl_val) { + else if (req->old_cl_val) { if (r->input_filters == r->proto_input_filters) { - rb_method = RB_STREAM_CL; + if (!ap_parse_strict_length(&req->cl_val, req->old_cl_val)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085) + "could not parse request Content-Length (%s)", + req->old_cl_val); + return HTTP_BAD_REQUEST; + } + req->rb_method = RB_STREAM_CL; } - else if (!force10 + else if (!req->force10 && (apr_table_get(r->subprocess_env, "proxy-sendchunks") || apr_table_get(r->subprocess_env, "proxy-sendchunked")) && !apr_table_get(r->subprocess_env, "proxy-sendcl")) { - rb_method = RB_STREAM_CHUNKED; + req->rb_method = RB_STREAM_CHUNKED; } else { - rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } } else { @@ -865,44 +632,60 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, * requests, and has the behavior that it will not add any C-L * when the old_cl_val is NULL. */ - rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } -/* Yes I hate gotos. This is the subrequest shortcut */ -skip_body: - /* - * Handle Connection: header if we do HTTP/1.1 request: - * If we plan to close the backend connection sent Connection: close - * otherwise sent Connection: Keep-Alive. - */ - if (!force10) { - if (!ap_proxy_connection_reusable(p_conn)) { - buf = apr_pstrdup(p, "Connection: close" CRLF); - } - else { - buf = apr_pstrdup(p, "Connection: Keep-Alive" CRLF); - } - ap_xlate_proto_to_ascii(buf, strlen(buf)); - e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); - } - - /* send the request body, if any. */ - switch(rb_method) { + switch (req->rb_method) { case RB_STREAM_CHUNKED: - rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade, - input_brigade); + add_te_chunked(req->p, bucket_alloc, header_brigade); break; + case RB_STREAM_CL: - rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade, - input_brigade, old_cl_val); + if (req->old_cl_val) { + add_cl(req->p, bucket_alloc, header_brigade, req->old_cl_val); + } break; + + default: /* => RB_SPOOL_CL */ + /* If we have to spool the body, do it now, before connecting or + * reusing the backend connection. + */ + rv = ap_proxy_spool_input(r, p_conn, input_brigade, + &bytes, MAX_MEM_SPOOL); + if (rv != OK) { + return rv; + } + if (bytes || req->old_te_val || req->old_cl_val) { + add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes)); + } + } + +/* Yes I hate gotos. This is the subrequest shortcut */ +skip_body: + terminate_headers(req); + + return OK; +} + +static int ap_proxy_http_request(proxy_http_req_t *req) +{ + int rv; + request_rec *r = req->r; + + /* send the request header/body, if any. */ + switch (req->rb_method) { case RB_SPOOL_CL: - rv = spool_reqbody_cl(p, r, p_conn, origin, header_brigade, - input_brigade, (old_cl_val != NULL) - || (old_te_val != NULL) - || (bytes_read > 0)); + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: + if (req->do_100_continue) { + rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend, + req->origin, req->header_brigade, 1); + } + else { + rv = stream_reqbody(req); + } break; + default: /* shouldn't be possible */ rv = HTTP_INTERNAL_SERVER_ERROR; @@ -910,10 +693,12 @@ skip_body: } if (rv != OK) { + conn_rec *c = r->connection; /* apr_status_t value has been logged in lower level method */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01097) "pass request body failed to %pI (%s) from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", + req->backend->addr, + req->backend->hostname ? req->backend->hostname: "", c->client_ip, c->remote_host ? c->remote_host: ""); return rv; } @@ -950,6 +735,7 @@ static request_rec *make_fake_req(conn_rec *c, request_rec *r) request_rec *rp; apr_pool_create(&pool, c->pool); + apr_pool_tag(pool, "proxy_http_rp"); rp = apr_pcalloc(pool, sizeof(*r)); @@ -1001,14 +787,14 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c, }; int i; for (i = 0; date_hdrs[i]; ++i) { - if (!strcasecmp(date_hdrs[i], key)) { + if (!ap_cstr_casecmp(date_hdrs[i], key)) { apr_table_add(r->headers_out, key, date_canon(r->pool, value)); return; } } for (i = 0; transform_hdrs[i].name; ++i) { - if (!strcasecmp(transform_hdrs[i].name, key)) { + if (!ap_cstr_casecmp(transform_hdrs[i].name, key)) { apr_table_add(r->headers_out, key, (*transform_hdrs[i].func)(r, c, value)); return; @@ -1025,7 +811,7 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c, * any sense at all, since we depend on buffer still containing * what was read by ap_getline() upon return. */ -static void ap_proxy_read_headers(request_rec *r, request_rec *rr, +static apr_status_t ap_proxy_read_headers(request_rec *r, request_rec *rr, char *buffer, int size, conn_rec *c, int *pread_len) { @@ -1057,19 +843,26 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, rc = ap_proxygetline(tmp_bb, buffer, size, rr, AP_GETLINE_FOLD | AP_GETLINE_NOSPC_EOL, &len); - if (len <= 0) - break; - if (APR_STATUS_IS_ENOSPC(rc)) { - /* The header could not fit in the provided buffer, warn. - * XXX: falls through with the truncated header, 5xx instead? - */ - int trunc = (len > 128 ? 128 : len) / 2; - ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124) - "header size is over the limit allowed by " - "ResponseFieldSize (%d bytes). " - "Bad response header: '%.*s[...]%s'", - size, trunc, buffer, buffer + len - trunc); + if (rc != APR_SUCCESS) { + if (APR_STATUS_IS_ENOSPC(rc)) { + int trunc = (len > 128 ? 128 : len) / 2; + ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124) + "header size is over the limit allowed by " + "ResponseFieldSize (%d bytes). " + "Bad response header: '%.*s[...]%s'", + size, trunc, buffer, buffer + len - trunc); + } + else { + ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10404) + "Error reading headers from backend"); + } + r->headers_out = NULL; + return rc; + } + + if (len <= 0) { + break; } else { ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "%s", buffer); @@ -1092,7 +885,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, if (psc->badopt == bad_error) { /* Nope, it wasn't even an extra HTTP header. Give up. */ r->headers_out = NULL; - return; + return APR_EINVAL; } else if (psc->badopt == bad_body) { /* if we've already started loading headers_out, then @@ -1106,13 +899,13 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, "in headers returned by %s (%s)", r->uri, r->method); *pread_len = len; - return; + return APR_SUCCESS; } else { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01099) "No HTTP headers returned by %s (%s)", r->uri, r->method); - return; + return APR_SUCCESS; } } } @@ -1142,6 +935,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, process_proxy_header(r, dconf, buffer, value); saw_headers = 1; } + return APR_SUCCESS; } @@ -1188,13 +982,48 @@ static int add_trailers(void *data, const char *key, const char *val) return 1; } +static int send_continue_body(proxy_http_req_t *req) +{ + int status; + + /* Send the request body (fully). */ + switch(req->rb_method) { + case RB_SPOOL_CL: + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: + status = stream_reqbody(req); + break; + default: + /* Shouldn't happen */ + status = HTTP_INTERNAL_SERVER_ERROR; + break; + } + if (status != OK) { + conn_rec *c = req->r->connection; + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req->r, + APLOGNO(10154) "pass request body failed " + "to %pI (%s) from %s (%s) with status %i", + req->backend->addr, + req->backend->hostname ? req->backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : "", + status); + req->backend->close = 1; + } + return status; +} + static -apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, - proxy_conn_rec **backend_ptr, - proxy_worker *worker, - proxy_server_conf *conf, - char *server_portstr) { +int ap_proxy_http_process_response(proxy_http_req_t *req) +{ + apr_pool_t *p = req->p; + request_rec *r = req->r; conn_rec *c = r->connection; + proxy_worker *worker = req->worker; + proxy_conn_rec *backend = req->backend; + conn_rec *origin = req->origin; + int do_100_continue = req->do_100_continue; + int status; + char *buffer; char fixed_buffer[HUGE_STRING_LEN]; const char *buf; @@ -1217,19 +1046,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, int proxy_status = OK; const char *original_status_line = r->status_line; const char *proxy_status_line = NULL; - proxy_conn_rec *backend = *backend_ptr; - conn_rec *origin = backend->connection; apr_interval_time_t old_timeout = 0; proxy_dir_conf *dconf; - int do_100_continue; dconf = ap_get_module_config(r->per_dir_config, &proxy_module); - do_100_continue = (worker->s->ping_timeout_set - && ap_request_has_body(r) - && (PROXYREQ_REVERSE == r->proxyreq) - && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); - bb = apr_brigade_create(p, c->bucket_alloc); pass_bb = apr_brigade_create(p, c->bucket_alloc); @@ -1248,7 +1069,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, } /* Setup for 100-Continue timeout if appropriate */ - if (do_100_continue) { + if (do_100_continue && worker->s->ping_timeout_set) { apr_socket_timeout_get(backend->sock, &old_timeout); if (worker->s->ping_timeout != old_timeout) { apr_status_t rc; @@ -1273,6 +1094,9 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, origin->local_addr->port)); do { apr_status_t rc; + const char *upgrade = NULL; + int major = 0, minor = 0; + int toclose = 0; apr_brigade_cleanup(bb); @@ -1291,7 +1115,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, apr_table_setn(r->notes, "proxy_timedout", "1"); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout"); if (do_100_continue) { - return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, "Timeout on 100-Continue"); + return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, + "Timeout on 100-Continue"); } } /* @@ -1340,15 +1165,30 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, ap_pass_brigade(r->output_filters, bb); /* Mark the backend connection for closing */ backend->close = 1; - /* Need to return OK to avoid sending an error message */ - return OK; + if (origin->keepalives) { + /* We already had a request on this backend connection and + * might just have run into a keepalive race. Hence we + * think positive and assume that the backend is fine and + * we do not need to signal an error on backend side. + */ + return OK; + } + /* + * This happened on our first request on this connection to the + * backend. This indicates something fishy with the backend. + * Return HTTP_INTERNAL_SERVER_ERROR to signal an unrecoverable + * server error. We do not worry about r->status code and a + * possible error response here as the ap_http_outerror_filter + * will fix all of this for us. + */ + return HTTP_INTERNAL_SERVER_ERROR; } - else if (!c->keepalives) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105) - "NOT Closing connection to client" - " although reading from backend server %s:%d" - " failed.", - backend->hostname, backend->port); + if (!c->keepalives) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105) + "NOT Closing connection to client" + " although reading from backend server %s:%d" + " failed.", + backend->hostname, backend->port); } return ap_proxyerror(r, HTTP_BAD_GATEWAY, "Error reading from remote server"); @@ -1360,9 +1200,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, * This is buggy if we ever see an HTTP/1.10 */ if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) { - int major, minor; - int toclose; - major = buffer[5] - '0'; minor = buffer[7] - '0'; @@ -1371,8 +1208,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, */ if ((major != 1) || (len >= response_field_size - 1)) { return ap_proxyerror(r, HTTP_BAD_GATEWAY, - apr_pstrcat(p, "Corrupt status line returned by remote " - "server: ", buffer, NULL)); + apr_pstrcat(p, "Corrupt status line returned " + "by remote server: ", buffer, NULL)); } backasswards = 0; @@ -1404,7 +1241,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, /* read the headers. */ /* N.B. for HTTP/1.0 clients, we have to fold line-wrapped headers*/ - /* Also, take care with headers with multiple occurences. */ + /* Also, take care with headers with multiple occurrences. */ /* First, tuck away all already existing cookies */ save_table = apr_table_make(r->pool, 2); @@ -1412,10 +1249,10 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, "Set-Cookie", NULL); /* shove the headers direct into r->headers_out */ - ap_proxy_read_headers(r, backend->r, buffer, response_field_size, origin, - &pread_len); + rc = ap_proxy_read_headers(r, backend->r, buffer, response_field_size, + origin, &pread_len); - if (r->headers_out == NULL) { + if (rc != APR_SUCCESS || r->headers_out == NULL) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106) "bad HTTP/%d.%d header returned by %s (%s)", major, minor, r->uri, r->method); @@ -1443,9 +1280,14 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, save_table); } + /* + * Save a possible Transfer-Encoding header as we need it later for + * ap_http_filter to know where to end. + */ + te = apr_table_get(r->headers_out, "Transfer-Encoding"); + /* can't have both Content-Length and Transfer-Encoding */ - if (apr_table_get(r->headers_out, "Transfer-Encoding") - && apr_table_get(r->headers_out, "Content-Length")) { + if (te && apr_table_get(r->headers_out, "Content-Length")) { /* * 2616 section 4.4, point 3: "if both Transfer-Encoding * and Content-Length are received, the latter MUST be @@ -1463,16 +1305,29 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, backend->close = 1; } - /* - * Save a possible Transfer-Encoding header as we need it later for - * ap_http_filter to know where to end. - */ - te = apr_table_get(r->headers_out, "Transfer-Encoding"); + upgrade = apr_table_get(r->headers_out, "Upgrade"); + if (proxy_status == HTTP_SWITCHING_PROTOCOLS) { + if (!upgrade || !req->upgrade || (strcasecmp(req->upgrade, + upgrade) != 0)) { + return ap_proxyerror(r, HTTP_BAD_GATEWAY, + apr_pstrcat(p, "Unexpected Upgrade: ", + upgrade ? upgrade : "n/a", + " (expecting ", + req->upgrade ? req->upgrade + : "n/a", ")", + NULL)); + } + backend->close = 1; + } /* strip connection listed hop-by-hop headers from response */ toclose = ap_proxy_clear_connection_fn(r, r->headers_out); if (toclose) { backend->close = 1; + if (toclose < 0) { + return ap_proxyerror(r, HTTP_BAD_GATEWAY, + "Malformed connection header"); + } } if ((buf = apr_table_get(r->headers_out, "Content-Type"))) { @@ -1491,7 +1346,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, r->headers_out = ap_proxy_clean_warnings(p, r->headers_out); /* handle Via header in response */ - if (conf->viaopt != via_off && conf->viaopt != via_block) { + if (req->sconf->viaopt != via_off + && req->sconf->viaopt != via_block) { const char *server_name = ap_get_server_name(r); /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, * then the server name returned by ap_get_server_name() is the @@ -1502,18 +1358,18 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, server_name = r->server->server_hostname; /* create a "Via:" response header entry and merge it */ apr_table_addn(r->headers_out, "Via", - (conf->viaopt == via_full) + (req->sconf->viaopt == via_full) ? apr_psprintf(p, "%d.%d %s%s (%s)", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, - server_portstr, + req->server_portstr, AP_SERVER_BASEVERSION) : apr_psprintf(p, "%d.%d %s%s", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, - server_portstr) + req->server_portstr) ); } @@ -1522,27 +1378,25 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, backend->close = 1; origin->keepalive = AP_CONN_CLOSE; } + else { + /* + * Keep track of the number of keepalives we processed on this + * connection. + */ + origin->keepalives++; + } + } else { /* an http/0.9 response */ backasswards = 1; - r->status = 200; + r->status = proxy_status = 200; r->status_line = "200 OK"; backend->close = 1; } if (ap_is_HTTP_INFO(proxy_status)) { - interim_response++; - /* Reset to old timeout iff we've adjusted it */ - if (do_100_continue - && (r->status == HTTP_CONTINUE) - && (worker->s->ping_timeout != old_timeout)) { - apr_socket_timeout_set(backend->sock, old_timeout); - } - } - else { - interim_response = 0; - } - if (interim_response) { + const char *policy = NULL; + /* RFC2616 tells us to forward this. * * OTOH, an interim response here may mean the backend @@ -1555,15 +1409,32 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, * * So let's make it configurable. * - * We need to set "r->expecting_100 = 1" otherwise origin - * server behaviour will apply. + * We need to force "r->expecting_100 = 1" for RFC behaviour + * otherwise ap_send_interim_response() does nothing when + * the client did not ask for 100-continue. + * + * 101 Switching Protocol has its own configuration which + * shouldn't be interfered by "proxy-interim-response". */ - const char *policy = apr_table_get(r->subprocess_env, - "proxy-interim-response"); + if (proxy_status != HTTP_SWITCHING_PROTOCOLS) { + policy = apr_table_get(r->subprocess_env, + "proxy-interim-response"); + } ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "HTTP: received interim %d response", r->status); + "HTTP: received interim %d response (policy: %s)", + r->status, policy ? policy : "n/a"); if (!policy - || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) { + || (!strcasecmp(policy, "RFC") + && (proxy_status != HTTP_CONTINUE + || (r->expecting_100 = 1)))) { + switch (proxy_status) { + case HTTP_SWITCHING_PROTOCOLS: + AP_DEBUG_ASSERT(upgrade != NULL); + apr_table_setn(r->headers_out, "Connection", "Upgrade"); + apr_table_setn(r->headers_out, "Upgrade", + apr_pstrdup(p, upgrade)); + break; + } ap_send_interim_response(r, 1); } /* FIXME: refine this to be able to specify per-response-status @@ -1573,57 +1444,144 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108) "undefined proxy interim response policy"); } + interim_response++; } - /* Moved the fixups of Date headers and those affected by - * ProxyPassReverse/etc from here to ap_proxy_read_headers + else { + interim_response = 0; + } + + /* If we still do 100-continue (end-to-end or ping), either the + * current response is the expected "100 Continue" and we are done + * with this mode, or this is another interim response and we'll wait + * for the next one, or this is a final response and hence the backend + * did not honor our expectation. */ + if (do_100_continue && (!interim_response + || proxy_status == HTTP_CONTINUE)) { + /* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers + * A server that responds with a final status code before + * reading the entire message body SHOULD indicate in that + * response whether it intends to close the connection or + * continue reading and discarding the request message. + * + * So, if this response is not an interim 100 Continue, we can + * avoid sending the request body if the backend responded with + * "Connection: close" or HTTP < 1.1, and either let the core + * discard it or the caller try another balancer member with the + * same body (given status 503, though not implemented yet). + */ + int do_send_body = (proxy_status == HTTP_CONTINUE + || (!toclose && major > 0 && minor > 0)); - if ((proxy_status == 401) && (dconf->error_override)) { - const char *buf; - const char *wa = "WWW-Authenticate"; - if ((buf = apr_table_get(r->headers_out, wa))) { - apr_table_set(r->err_headers_out, wa, buf); - } else { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109) - "origin server sent 401 without " - "WWW-Authenticate header"); + /* Reset to old timeout iff we've adjusted it. */ + if (worker->s->ping_timeout_set) { + apr_socket_timeout_set(backend->sock, old_timeout); } - } - r->sent_bodyct = 1; - /* - * Is it an HTTP/0.9 response or did we maybe preread the 1st line of - * the response? If so, load the extra data. These are 2 mutually - * exclusive possibilities, that just happen to require very - * similar behavior. - */ - if (backasswards || pread_len) { - apr_ssize_t cntr = (apr_ssize_t)pread_len; - if (backasswards) { - /*@@@FIXME: - * At this point in response processing of a 0.9 response, - * we don't know yet whether data is binary or not. - * mod_charset_lite will get control later on, so it cannot - * decide on the conversion of this buffer full of data. - * However, chances are that we are not really talking to an - * HTTP/0.9 server, but to some different protocol, therefore - * the best guess IMHO is to always treat the buffer as "text/x": + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10153) + "HTTP: %s100 continue sent by %pI (%s): " + "%ssending body (response: HTTP/%i.%i %s)", + proxy_status != HTTP_CONTINUE ? "no " : "", + backend->addr, + backend->hostname ? backend->hostname : "", + do_send_body ? "" : "not ", + major, minor, proxy_status_line); + + if (do_send_body) { + status = send_continue_body(req); + if (status != OK) { + return status; + } + } + else { + /* If we don't read the client connection any further, since + * there are pending data it should be "Connection: close"d to + * prevent reuse. We don't exactly c->keepalive = AP_CONN_CLOSE + * here though, because error_override or a potential retry on + * another backend could finally read that data and finalize + * the request processing, making keep-alive possible. So what + * we do is leaving r->expecting_100 alone, ap_set_keepalive() + * will do the right thing according to the final response and + * any later update of r->expecting_100. */ - ap_xlate_proto_to_ascii(buffer, len); - cntr = (apr_ssize_t)len; } - e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); + + /* Once only! */ + do_100_continue = 0; + } + + if (proxy_status == HTTP_SWITCHING_PROTOCOLS) { + apr_status_t rv; + proxy_tunnel_rec *tunnel; + apr_interval_time_t client_timeout = -1, + backend_timeout = -1; + + /* If we didn't send the full body yet, do it now */ + if (do_100_continue) { + r->expecting_100 = 0; + status = send_continue_body(req); + if (status != OK) { + return status; + } + } + + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239) + "HTTP: tunneling protocol %s", upgrade); + + rv = ap_proxy_tunnel_create(&tunnel, r, origin, upgrade); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240) + "can't create tunnel for %s", upgrade); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* Set timeout to the highest configured for client or backend */ + apr_socket_timeout_get(backend->sock, &backend_timeout); + apr_socket_timeout_get(ap_get_conn_socket(c), &client_timeout); + if (backend_timeout >= 0 && backend_timeout > client_timeout) { + tunnel->timeout = backend_timeout; + } + else { + tunnel->timeout = client_timeout; + } + + /* Let proxy tunnel forward everything */ + status = ap_proxy_tunnel_run(tunnel); + + /* We are done with both connections */ + return DONE; + } + + if (interim_response) { + /* Already forwarded above, read next response */ + continue; } + + /* Moved the fixups of Date headers and those affected by + * ProxyPassReverse/etc from here to ap_proxy_read_headers + */ + /* PR 41646: get HEAD right with ProxyErrorOverride */ - if (ap_is_HTTP_ERROR(r->status) && dconf->error_override) { + if (ap_proxy_should_override(dconf, proxy_status)) { + if (proxy_status == HTTP_UNAUTHORIZED) { + const char *buf; + const char *wa = "WWW-Authenticate"; + if ((buf = apr_table_get(r->headers_out, wa))) { + apr_table_set(r->err_headers_out, wa, buf); + } else { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109) + "origin server sent 401 without " + "WWW-Authenticate header"); + } + } + /* clear r->status for override error, otherwise ErrorDocument * thinks that this is a recursive error, and doesn't find the * custom error page */ r->status = HTTP_OK; /* Discard body, if one is expected */ - if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) { + if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) { const char *tmp; /* Add minimal headers needed to allow http_in filter * detecting end of body without waiting for a timeout. */ @@ -1646,11 +1604,49 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, return proxy_status; } + /* Forward back Upgrade header if it matches the configured one(s), it + * may be an HTTP_UPGRADE_REQUIRED response or some other status where + * Upgrade makes sense to negotiate the protocol by other means. + */ + if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade, + (*req->proto == 'w') + ? "WebSocket" : NULL)) { + apr_table_setn(r->headers_out, "Connection", "Upgrade"); + apr_table_setn(r->headers_out, "Upgrade", apr_pstrdup(p, upgrade)); + } + + r->sent_bodyct = 1; + /* + * Is it an HTTP/0.9 response or did we maybe preread the 1st line of + * the response? If so, load the extra data. These are 2 mutually + * exclusive possibilities, that just happen to require very + * similar behavior. + */ + if (backasswards || pread_len) { + apr_ssize_t cntr = (apr_ssize_t)pread_len; + if (backasswards) { + /*@@@FIXME: + * At this point in response processing of a 0.9 response, + * we don't know yet whether data is binary or not. + * mod_charset_lite will get control later on, so it cannot + * decide on the conversion of this buffer full of data. + * However, chances are that we are not really talking to an + * HTTP/0.9 server, but to some different protocol, therefore + * the best guess IMHO is to always treat the buffer as "text/x": + */ + ap_xlate_proto_to_ascii(buffer, len); + cntr = (apr_ssize_t)len; + } + e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, e); + } + /* send body - but only if a body is expected */ if ((!r->header_only) && /* not HEAD request */ - !interim_response && /* not any 1xx response */ (proxy_status != HTTP_NO_CONTENT) && /* not 204 */ (proxy_status != HTTP_NOT_MODIFIED)) { /* not 304 */ + apr_read_type_e mode; + int finish; /* We need to copy the output headers and treat them as input * headers as well. BUT, we need to do this before we remove @@ -1671,152 +1667,148 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "start body send"); - /* - * if we are overriding the errors, we can't put the content - * of the page into the brigade + /* read the body, pass it to the output filters */ + + /* Handle the case where the error document is itself reverse + * proxied and was successful. We must maintain any previous + * error status so that an underlying error (eg HTTP_NOT_FOUND) + * doesn't become an HTTP_OK. */ - if (!dconf->error_override || !ap_is_HTTP_ERROR(proxy_status)) { - /* read the body, pass it to the output filters */ - apr_read_type_e mode = APR_NONBLOCK_READ; - int finish = FALSE; - - /* Handle the case where the error document is itself reverse - * proxied and was successful. We must maintain any previous - * error status so that an underlying error (eg HTTP_NOT_FOUND) - * doesn't become an HTTP_OK. - */ - if (dconf->error_override && !ap_is_HTTP_ERROR(proxy_status) - && ap_is_HTTP_ERROR(original_status)) { - r->status = original_status; - r->status_line = original_status_line; - } + if (ap_proxy_should_override(dconf, original_status)) { + r->status = original_status; + r->status_line = original_status_line; + } - do { - apr_off_t readbytes; - apr_status_t rv; - - rv = ap_get_brigade(backend->r->input_filters, bb, - AP_MODE_READBYTES, mode, - conf->io_buffer_size); - - /* ap_get_brigade will return success with an empty brigade - * for a non-blocking read which would block: */ - if (mode == APR_NONBLOCK_READ - && (APR_STATUS_IS_EAGAIN(rv) - || (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) { - /* flush to the client and switch to blocking mode */ - e = apr_bucket_flush_create(c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); - if (ap_pass_brigade(r->output_filters, bb) - || c->aborted) { - backend->close = 1; - break; - } - apr_brigade_cleanup(bb); - mode = APR_BLOCK_READ; - continue; - } - else if (rv == APR_EOF) { - backend->close = 1; - break; - } - else if (rv != APR_SUCCESS) { - /* In this case, we are in real trouble because - * our backend bailed on us. Pass along a 502 error - * error bucket - */ - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110) - "error reading response"); - ap_proxy_backend_broke(r, bb); - ap_pass_brigade(r->output_filters, bb); - backend_broke = 1; + mode = APR_NONBLOCK_READ; + finish = FALSE; + do { + apr_off_t readbytes; + apr_status_t rv; + + rv = ap_get_brigade(backend->r->input_filters, bb, + AP_MODE_READBYTES, mode, + req->sconf->io_buffer_size); + + /* ap_get_brigade will return success with an empty brigade + * for a non-blocking read which would block: */ + if (mode == APR_NONBLOCK_READ + && (APR_STATUS_IS_EAGAIN(rv) + || (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) { + /* flush to the client and switch to blocking mode */ + e = apr_bucket_flush_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, e); + if (ap_pass_brigade(r->output_filters, bb) + || c->aborted) { backend->close = 1; break; } - /* next time try a non-blocking read */ - mode = APR_NONBLOCK_READ; + apr_brigade_cleanup(bb); + mode = APR_BLOCK_READ; + continue; + } + else if (rv == APR_EOF) { + backend->close = 1; + break; + } + else if (rv != APR_SUCCESS) { + /* In this case, we are in real trouble because + * our backend bailed on us. Pass along a 502 error + * error bucket + */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110) + "error reading response"); + apr_brigade_cleanup(bb); + ap_proxy_backend_broke(r, bb); + ap_pass_brigade(r->output_filters, bb); + backend_broke = 1; + backend->close = 1; + break; + } + /* next time try a non-blocking read */ + mode = APR_NONBLOCK_READ; - if (!apr_is_empty_table(backend->r->trailers_in)) { - apr_table_do(add_trailers, r->trailers_out, - backend->r->trailers_in, NULL); - apr_table_clear(backend->r->trailers_in); - } + if (!apr_is_empty_table(backend->r->trailers_in)) { + apr_table_do(add_trailers, r->trailers_out, + backend->r->trailers_in, NULL); + apr_table_clear(backend->r->trailers_in); + } - apr_brigade_length(bb, 0, &readbytes); - backend->worker->s->read += readbytes; + apr_brigade_length(bb, 0, &readbytes); + backend->worker->s->read += readbytes; #if DEBUGGING - { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111) - "readbytes: %#x", readbytes); - } + { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111) + "readbytes: %#x", readbytes); + } #endif - /* sanity check */ - if (APR_BRIGADE_EMPTY(bb)) { - break; - } + /* sanity check */ + if (APR_BRIGADE_EMPTY(bb)) { + break; + } - /* Switch the allocator lifetime of the buckets */ - ap_proxy_buckets_lifetime_transform(r, bb, pass_bb); - - /* found the last brigade? */ - if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) { - - /* signal that we must leave */ - finish = TRUE; - - /* the brigade may contain transient buckets that contain - * data that lives only as long as the backend connection. - * Force a setaside so these transient buckets become heap - * buckets that live as long as the request. - */ - for (e = APR_BRIGADE_FIRST(pass_bb); e - != APR_BRIGADE_SENTINEL(pass_bb); e - = APR_BUCKET_NEXT(e)) { - apr_bucket_setaside(e, r->pool); - } - - /* finally it is safe to clean up the brigade from the - * connection pool, as we have forced a setaside on all - * buckets. - */ - apr_brigade_cleanup(bb); - - /* make sure we release the backend connection as soon - * as we know we are done, so that the backend isn't - * left waiting for a slow client to eventually - * acknowledge the data. - */ - ap_proxy_release_connection(backend->worker->s->scheme, - backend, r->server); - /* Ensure that the backend is not reused */ - *backend_ptr = NULL; + /* Switch the allocator lifetime of the buckets */ + ap_proxy_buckets_lifetime_transform(r, bb, pass_bb); - } + /* found the last brigade? */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) { - /* try send what we read */ - if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS - || c->aborted) { - /* Ack! Phbtt! Die! User aborted! */ - /* Only close backend if we haven't got all from the - * backend. Furthermore if *backend_ptr is NULL it is no - * longer safe to fiddle around with backend as it might - * be already in use by another thread. - */ - if (*backend_ptr) { - backend->close = 1; /* this causes socket close below */ - } - finish = TRUE; + /* signal that we must leave */ + finish = TRUE; + + /* the brigade may contain transient buckets that contain + * data that lives only as long as the backend connection. + * Force a setaside so these transient buckets become heap + * buckets that live as long as the request. + */ + for (e = APR_BRIGADE_FIRST(pass_bb); e + != APR_BRIGADE_SENTINEL(pass_bb); e + = APR_BUCKET_NEXT(e)) { + apr_bucket_setaside(e, r->pool); } - /* make sure we always clean up after ourselves */ - apr_brigade_cleanup(pass_bb); + /* finally it is safe to clean up the brigade from the + * connection pool, as we have forced a setaside on all + * buckets. + */ apr_brigade_cleanup(bb); - } while (!finish); - } + /* make sure we release the backend connection as soon + * as we know we are done, so that the backend isn't + * left waiting for a slow client to eventually + * acknowledge the data. + */ + ap_proxy_release_connection(backend->worker->s->scheme, + backend, r->server); + /* Ensure that the backend is not reused */ + req->backend = NULL; + + } + + /* try send what we read */ + if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS + || c->aborted) { + /* Ack! Phbtt! Die! User aborted! */ + /* Only close backend if we haven't got all from the + * backend. Furthermore if req->backend is NULL it is no + * longer safe to fiddle around with backend as it might + * be already in use by another thread. + */ + if (req->backend) { + /* this causes socket close below */ + req->backend->close = 1; + } + finish = TRUE; + } + + /* make sure we always clean up after ourselves */ + apr_brigade_cleanup(pass_bb); + apr_brigade_cleanup(bb); + + } while (!finish); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "end body send"); } - else if (!interim_response) { + else { ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "header only"); /* make sure we release the backend connection as soon @@ -1826,7 +1818,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, */ ap_proxy_release_connection(backend->worker->s->scheme, backend, r->server); - *backend_ptr = NULL; + /* Ensure that the backend is not reused */ + req->backend = NULL; /* Pass EOS bucket down the filter chain. */ e = apr_bucket_eos_create(c->bucket_alloc); @@ -1880,62 +1873,108 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, apr_port_t proxyport) { int status; - char server_portstr[32]; - char *scheme; - const char *proxy_function; - const char *u; + const char *scheme; + const char *u = url; + proxy_http_req_t *req = NULL; proxy_conn_rec *backend = NULL; + apr_bucket_brigade *input_brigade = NULL; int is_ssl = 0; conn_rec *c = r->connection; + proxy_dir_conf *dconf; int retry = 0; + char *locurl = url; + int toclose = 0; /* * Use a shorter-lived pool to reduce memory usage * and avoid a memory leak */ apr_pool_t *p = r->pool; - apr_uri_t *uri = apr_palloc(p, sizeof(*uri)); + apr_uri_t *uri; - /* find the scheme */ - u = strchr(url, ':'); - if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') - return DECLINED; - if ((u - url) > 14) - return HTTP_BAD_REQUEST; - scheme = apr_pstrmemdup(p, url, u - url); - /* scheme is lowercase */ - ap_str_tolower(scheme); - /* is it for us? */ - if (strcmp(scheme, "https") == 0) { - if (!ap_proxy_ssl_enable(NULL)) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112) - "HTTPS: declining URL %s (mod_ssl not configured?)", - url); - return DECLINED; - } - is_ssl = 1; - proxy_function = "HTTPS"; + scheme = get_url_scheme(&u, &is_ssl); + if (!scheme && proxyname && strncasecmp(url, "ftp:", 4) == 0) { + u = url + 4; + scheme = "ftp"; + is_ssl = 0; } - else if (!(strcmp(scheme, "http") == 0 || (strcmp(scheme, "ftp") == 0 && proxyname))) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113) "HTTP: declining URL %s", - url); - return DECLINED; /* only interested in HTTP, or FTP via proxy */ + if (!scheme || u[0] != '/' || u[1] != '/' || u[2] == '\0') { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113) + "HTTP: declining URL %s", url); + return DECLINED; /* only interested in HTTP, WS or FTP via proxy */ } - else { - if (*scheme == 'h') - proxy_function = "HTTP"; - else - proxy_function = "FTP"; + if (is_ssl && !ap_ssl_has_outgoing_handlers()) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112) + "HTTP: declining URL %s (mod_ssl not configured?)", url); + return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url); - /* create space for state information */ - if ((status = ap_proxy_acquire_connection(proxy_function, &backend, - worker, r->server)) != OK) - goto cleanup; + if ((status = ap_proxy_acquire_connection(scheme, &backend, + worker, r->server)) != OK) { + return status; + } backend->is_ssl = is_ssl; + req = apr_pcalloc(p, sizeof(*req)); + req->p = p; + req->r = r; + req->sconf = conf; + req->worker = worker; + req->backend = backend; + req->proto = scheme; + req->bucket_alloc = c->bucket_alloc; + req->rb_method = RB_INIT; + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { + req->force10 = 1; + } + else if (*worker->s->upgrade || *req->proto == 'w') { + /* Forward Upgrade header if it matches the configured one(s), + * the default being "WebSocket" for ws[s] schemes. + */ + const char *upgrade = apr_table_get(r->headers_in, "Upgrade"); + if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade, + (*req->proto == 'w') + ? "WebSocket" : NULL)) { + req->upgrade = upgrade; + } + } + + /* We possibly reuse input data prefetched in previous call(s), e.g. for a + * balancer fallback scenario, and in this case the 100 continue settings + * should be consistent between balancer members. If not, we need to ignore + * Proxy100Continue on=>off once we tried to prefetch already, otherwise + * the HTTP_IN filter won't send 100 Continue for us anymore, and we might + * deadlock with the client waiting for each other. Note that off=>on is + * not an issue because in this case r->expecting_100 is false (the 100 + * Continue is out already), but we make sure that prefetch will be + * nonblocking to avoid passing more time there. + */ + apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p); + + /* Should we handle end-to-end or ping 100-continue? */ + if (!req->force10 + && ((r->expecting_100 && (dconf->forward_100_continue || input_brigade)) + || PROXY_SHOULD_PING_100_CONTINUE(worker, r))) { + /* Tell ap_proxy_create_hdrbrgd() to preserve/add the Expect header */ + apr_table_setn(r->notes, "proxy-100-continue", "1"); + req->do_100_continue = 1; + } + + /* Should we block while prefetching the body or try nonblocking and flush + * data to the backend ASAP? + */ + if (input_brigade + || req->do_100_continue + || apr_table_get(r->subprocess_env, + "proxy-prefetch-nonblocking")) { + req->prefetch_nonblocking = 1; + } + /* * In the case that we are handling a reverse proxy connection and this * is not a request that is coming over an already kept alive connection @@ -1949,20 +1988,68 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, backend->close = 1; } + /* Step One: Determine Who To Connect To */ + uri = apr_palloc(p, sizeof(*uri)); + if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, + uri, &locurl, proxyname, + proxyport, req->server_portstr, + sizeof(req->server_portstr)))) + goto cleanup; + + /* The header is always (re-)built since it depends on worker settings, + * but the body can be fetched only once (even partially), so it's saved + * in between proxy_http_handler() calls should we come back here. + */ + req->header_brigade = apr_brigade_create(p, req->bucket_alloc); + if (input_brigade == NULL) { + input_brigade = apr_brigade_create(p, req->bucket_alloc); + apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p); + } + req->input_brigade = input_brigade; + + /* Prefetch (nonlocking) the request body so to increase the chance to get + * the whole (or enough) body and determine Content-Length vs chunked or + * spooled. By doing this before connecting or reusing the backend, we want + * to minimize the delay between this connection is considered alive and + * the first bytes sent (should the client's link be slow or some input + * filter retain the data). This is a best effort to prevent the backend + * from closing (from under us) what it thinks is an idle connection, hence + * to reduce to the minimum the unavoidable local is_socket_connected() vs + * remote keepalive race condition. + */ + if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) + goto cleanup; + + /* We need to reset backend->close now, since ap_proxy_http_prefetch() set + * it to disable the reuse of the connection *after* this request (no keep- + * alive), not to close any reusable connection before this request. However + * assure what is expected later by using a local flag and do the right thing + * when ap_proxy_connect_backend() below provides the connection to close. + */ + toclose = backend->close; + backend->close = 0; + while (retry < 2) { - char *locurl = url; + if (retry) { + char *newurl = url; - /* Step One: Determine Who To Connect To */ - if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, - uri, &locurl, proxyname, - proxyport, server_portstr, - sizeof(server_portstr))) != OK) - break; + /* Step One (again): (Re)Determine Who To Connect To */ + if ((status = ap_proxy_determine_connection(p, r, conf, worker, + backend, uri, &newurl, proxyname, proxyport, + req->server_portstr, sizeof(req->server_portstr)))) + break; + + /* The code assumes locurl is not changed during the loop, or + * ap_proxy_http_prefetch() would have to be called every time, + * and header_brigade be changed accordingly... + */ + AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0); + } /* Step Two: Make the Connection */ - if (ap_proxy_check_connection(proxy_function, backend, r->server, 1, + if (ap_proxy_check_connection(scheme, backend, r->server, 1, PROXY_CHECK_CONN_EMPTY) - && ap_proxy_connect_backend(proxy_function, backend, worker, + && ap_proxy_connect_backend(scheme, backend, worker, r->server)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01114) "HTTP: failed to make connection to backend: %s", @@ -1972,54 +2059,45 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, } /* Step Three: Create conn_rec */ - if (!backend->connection) { - if ((status = ap_proxy_connection_create_ex(proxy_function, - backend, r)) != OK) - break; - /* - * On SSL connections set a note on the connection what CN is - * requested, such that mod_ssl can check if it is requested to do - * so. - */ - if (backend->ssl_hostname) { - apr_table_setn(backend->connection->notes, - "proxy-request-hostname", - backend->ssl_hostname); - } + if ((status = ap_proxy_connection_create_ex(scheme, backend, r)) != OK) + break; + req->origin = backend->connection; + + /* Don't recycle the connection if prefetch (above) told not to do so */ + if (toclose) { + backend->close = 1; + req->origin->keepalive = AP_CONN_CLOSE; } /* Step Four: Send the Request * On the off-chance that we forced a 100-Continue as a * kinda HTTP ping test, allow for retries */ - if ((status = ap_proxy_http_request(p, r, backend, worker, - conf, uri, locurl, server_portstr)) != OK) { - if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set) { - backend->close = 1; + status = ap_proxy_http_request(req); + if (status != OK) { + if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) { ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115) - "HTTP: 100-Continue failed to %pI (%s)", - worker->cp->addr, worker->s->hostname_ex); + "HTTP: 100-Continue failed to %pI (%s:%d)", + backend->addr, backend->hostname, backend->port); + backend->close = 1; retry++; continue; - } else { - break; } - + break; } /* Step Five: Receive the Response... Fall thru to cleanup */ - status = ap_proxy_http_process_response(p, r, &backend, worker, - conf, server_portstr); + status = ap_proxy_http_process_response(req); break; } /* Step Six: Clean Up */ cleanup: - if (backend) { + if (req->backend) { if (status != OK) - backend->close = 1; - ap_proxy_http_cleanup(proxy_function, r, backend); + req->backend->close = 1; + ap_proxy_http_cleanup(scheme, r, req->backend); } return status; } diff --git a/modules/proxy/mod_proxy_scgi.c b/modules/proxy/mod_proxy_scgi.c index 11f75de..d63c833 100644 --- a/modules/proxy/mod_proxy_scgi.c +++ b/modules/proxy/mod_proxy_scgi.c @@ -179,8 +179,10 @@ static int scgi_canon(request_rec *r, char *url) char *host, sport[sizeof(":65535")]; const char *err, *path; apr_port_t port, def_port; + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; - if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) { + if (ap_cstr_casecmpn(url, SCHEME "://", sizeof(SCHEME) + 2)) { return DECLINED; } url += sizeof(SCHEME); /* Keep slashes */ @@ -205,8 +207,8 @@ static int scgi_canon(request_rec *r, char *url) host = apr_pstrcat(r->pool, "[", host, "]", NULL); } - path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, - r->proxyreq); + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags, + r->proxyreq); if (!path) { return HTTP_BAD_REQUEST; } @@ -388,6 +390,14 @@ static int pass_response(request_rec *r, proxy_conn_rec *conn) return status; } + /* SCGI has its own body framing mechanism which we don't + * match against any provided Content-Length, so let the + * core determine C-L vs T-E based on what's actually sent. + */ + if (!apr_table_get(r->subprocess_env, AP_TRUST_CGILIKE_CL_ENVVAR)) + apr_table_unset(r->headers_out, "Content-Length"); + apr_table_unset(r->headers_out, "Transfer-Encoding"); + conf = ap_get_module_config(r->per_dir_config, &proxy_scgi_module); if (conf->sendfile && conf->sendfile != scgi_sendfile_off) { short err = 1; @@ -434,7 +444,7 @@ static int pass_response(request_rec *r, proxy_conn_rec *conn) if (location && *location == '/') { scgi_request_config *req_conf = apr_palloc(r->pool, sizeof(*req_conf)); - if (strcasecmp(location_header, "Location")) { + if (ap_cstr_casecmp(location_header, "Location")) { if (err) { apr_table_unset(r->err_headers_out, location_header); } @@ -533,7 +543,7 @@ static int scgi_handler(request_rec *r, proxy_worker *worker, apr_uri_t *uri; char dummy; - if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) { + if (ap_cstr_casecmpn(url, SCHEME "://", sizeof(SCHEME) + 2)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00865) "declining URL %s", url); return DECLINED; diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c index c5d4f8e..4e57196 100644 --- a/modules/proxy/mod_proxy_uwsgi.c +++ b/modules/proxy/mod_proxy_uwsgi.c @@ -84,10 +84,29 @@ static int uwsgi_canon(request_rec *r, char *url) host = apr_pstrcat(r->pool, "[", host, "]", NULL); } - path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, - r->proxyreq); - if (!path) { - return HTTP_BAD_REQUEST; + if (apr_table_get(r->notes, "proxy-nocanon") + || apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the raw/encoded path */ + } + else { + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags, + r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } + } + /* + * If we have a raw control character or a ' ' in nocanon path, + * correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10417) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; } r->filename = @@ -136,7 +155,7 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn) int j; apr_size_t headerlen = 4; - apr_uint16_t pktsize, keylen, vallen; + apr_size_t pktsize, keylen, vallen; const char *script_name; const char *path_info; const char *auth; @@ -175,7 +194,16 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn) env = (apr_table_entry_t *) env_table->elts; for (j = 0; j < env_table->nelts; ++j) { - headerlen += 2 + strlen(env[j].key) + 2 + strlen(env[j].val); + headerlen += 2 + strlen(env[j].key) + 2 + (env[j].val ? strlen(env[j].val) : 0); + } + + pktsize = headerlen - 4; + if (pktsize > APR_UINT16_MAX) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10259) + "can't send headers to %s:%u: packet size too " + "large (%" APR_SIZE_T_FMT ")", + conn->hostname, conn->port, pktsize); + return HTTP_INTERNAL_SERVER_ERROR; } ptr = buf = apr_palloc(r->pool, headerlen); @@ -189,15 +217,15 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn) memcpy(ptr, env[j].key, keylen); ptr += keylen; - vallen = strlen(env[j].val); + vallen = env[j].val ? strlen(env[j].val) : 0; *ptr++ = (apr_byte_t) (vallen & 0xff); *ptr++ = (apr_byte_t) ((vallen >> 8) & 0xff); - memcpy(ptr, env[j].val, vallen); + if (env[j].val) { + memcpy(ptr, env[j].val, vallen); + } ptr += vallen; } - pktsize = headerlen - 4; - buf[0] = 0; buf[1] = (apr_byte_t) (pktsize & 0xff); buf[2] = (apr_byte_t) ((pktsize >> 8) & 0xff); @@ -238,6 +266,7 @@ static request_rec *make_fake_req(conn_rec *c, request_rec *r) request_rec *rp; apr_pool_create(&pool, c->pool); + apr_pool_tag(pool, "proxy_uwsgi_rp"); rp = apr_pcalloc(pool, sizeof(*r)); @@ -297,18 +326,16 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend, pass_bb = apr_brigade_create(r->pool, c->bucket_alloc); len = ap_getline(buffer, sizeof(buffer), rp, 1); - if (len <= 0) { - /* oops */ + /* invalid or empty */ return HTTP_INTERNAL_SERVER_ERROR; } - backend->worker->s->read += len; - - if (len >= sizeof(buffer) - 1) { - /* oops */ + if ((apr_size_t)len >= sizeof(buffer)) { + /* too long */ return HTTP_INTERNAL_SERVER_ERROR; } + /* Position of http status code */ if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) { status_start = 9; @@ -317,8 +344,8 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend, status_start = 7; } else { - /* oops */ - return HTTP_INTERNAL_SERVER_ERROR; + /* not HTTP */ + return HTTP_BAD_GATEWAY; } status_end = status_start + 3; @@ -338,21 +365,50 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend, } r->status_line = apr_pstrdup(r->pool, &buffer[status_start]); - /* start parsing headers */ + /* parse headers */ while ((len = ap_getline(buffer, sizeof(buffer), rp, 1)) > 0) { + if ((apr_size_t)len >= sizeof(buffer)) { + /* too long */ + len = -1; + break; + } value = strchr(buffer, ':'); - /* invalid header skip */ - if (!value) - continue; - *value = '\0'; - ++value; + if (!value) { + /* invalid header */ + len = -1; + break; + } + *value++ = '\0'; + if (*ap_scan_http_token(buffer)) { + /* invalid name */ + len = -1; + break; + } while (apr_isspace(*value)) ++value; for (end = &value[strlen(value) - 1]; end > value && apr_isspace(*end); --end) *end = '\0'; + if (*ap_scan_http_field_content(value)) { + /* invalid value */ + len = -1; + break; + } apr_table_add(r->headers_out, buffer, value); } + if (len < 0) { + /* Reset headers, but not to NULL because things below the chain expect + * this to be non NULL e.g. the ap_content_length_filter. + */ + r->headers_out = apr_table_make(r->pool, 1); + return HTTP_BAD_GATEWAY; + } + + /* T-E wins over C-L */ + if (apr_table_get(r->headers_out, "Transfer-Encoding")) { + apr_table_unset(r->headers_out, "Content-Length"); + backend->close = 1; + } if ((buf = apr_table_get(r->headers_out, "Content-Type"))) { ap_set_content_type(r, apr_pstrdup(r->pool, buf)); @@ -362,9 +418,9 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend, #if AP_MODULE_MAGIC_AT_LEAST(20101106,0) dconf = ap_get_module_config(r->per_dir_config, &proxy_module); - if (dconf->error_override && ap_is_HTTP_ERROR(r->status)) { + if (ap_proxy_should_override(dconf, r->status)) { #else - if (conf->error_override && ap_is_HTTP_ERROR(r->status)) { + if (ap_proxy_should_override(conf, r->status)) { #endif int status = r->status; r->status = HTTP_OK; @@ -446,11 +502,8 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, const char *proxyname, apr_port_t proxyport) { int status; - int delta = 0; - int decode_status; proxy_conn_rec *backend = NULL; apr_pool_t *p = r->pool; - size_t w_len; char server_portstr[32]; char *u_path_info; apr_uri_t *uri; @@ -462,24 +515,23 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, uri = apr_palloc(r->pool, sizeof(*uri)); - /* ADD PATH_INFO */ -#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) - w_len = strlen(worker->s->name); -#else - w_len = strlen(worker->name); -#endif - u_path_info = r->filename + 6 + w_len; - if (u_path_info[0] != '/') { - delta = 1; + /* ADD PATH_INFO (unescaped) */ + u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/'); + if (!u_path_info) { + u_path_info = apr_pstrdup(r->pool, "/"); } - decode_status = ap_unescape_url(url + w_len - delta); - if (decode_status) { + else if (ap_unescape_url(u_path_info) != OK) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100) - "unable to decode uri: %s", url + w_len - delta); + "unable to decode uwsgi uri: %s", url); return HTTP_INTERNAL_SERVER_ERROR; } - apr_table_add(r->subprocess_env, "PATH_INFO", url + w_len - delta); - + else { + /* Remove duplicate slashes at the beginning of PATH_INFO */ + while (u_path_info[1] == '/') { + u_path_info++; + } + } + apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info); /* Create space for state information */ status = ap_proxy_acquire_connection(UWSGI_SCHEME, &backend, worker, @@ -509,12 +561,10 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, } /* Step Three: Create conn_rec */ - if (!backend->connection) { - if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend, - r->connection, - r->server)) != OK) - goto cleanup; - } + if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend, + r->connection, + r->server)) != OK) + goto cleanup; /* Step Four: Process the Request */ if (((status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)) != OK) diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c index 9dda010..30ba1b4 100644 --- a/modules/proxy/mod_proxy_wstunnel.c +++ b/modules/proxy/mod_proxy_wstunnel.c @@ -15,9 +15,43 @@ */ #include "mod_proxy.h" +#include "http_config.h" module AP_MODULE_DECLARE_DATA proxy_wstunnel_module; +typedef struct { + unsigned int fallback_to_proxy_http :1, + fallback_to_proxy_http_set :1; +} proxyws_dir_conf; + +static int can_fallback_to_proxy_http; + +static int proxy_wstunnel_check_trans(request_rec *r, const char *url) +{ + proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config, + &proxy_wstunnel_module); + + if (can_fallback_to_proxy_http && dconf->fallback_to_proxy_http) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "check_trans fallback"); + return DECLINED; + } + + if (ap_cstr_casecmpn(url, "ws:", 3) != 0 + && ap_cstr_casecmpn(url, "wss:", 4) != 0) { + return DECLINED; + } + + if (!apr_table_get(r->headers_in, "Upgrade")) { + /* No Upgrade, let mod_proxy_http handle it (for instance). + * Note: anything but OK/DECLINED will do (i.e. bypass wstunnel w/o + * aborting the request), HTTP_UPGRADE_REQUIRED is documentary... + */ + return HTTP_UPGRADE_REQUIRED; + } + + return OK; +} + /* * Canonicalise http-like URLs. * scheme is the scheme for the URL @@ -26,19 +60,26 @@ module AP_MODULE_DECLARE_DATA proxy_wstunnel_module; */ static int proxy_wstunnel_canon(request_rec *r, char *url) { + proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config, + &proxy_wstunnel_module); char *host, *path, sport[7]; char *search = NULL; const char *err; char *scheme; apr_port_t port, def_port; + if (can_fallback_to_proxy_http && dconf->fallback_to_proxy_http) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "canon fallback"); + return DECLINED; + } + /* ap_port_of_scheme() */ - if (strncasecmp(url, "ws:", 3) == 0) { + if (ap_cstr_casecmpn(url, "ws:", 3) == 0) { url += 3; scheme = "ws:"; def_port = apr_uri_port_of_scheme("http"); } - else if (strncasecmp(url, "wss:", 4) == 0) { + else if (ap_cstr_casecmpn(url, "wss:", 4) == 0) { url += 4; scheme = "wss:"; def_port = apr_uri_port_of_scheme("https"); @@ -69,15 +110,42 @@ static int proxy_wstunnel_canon(request_rec *r, char *url) if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } + else if (apr_table_get(r->notes, "proxy-noencode")) { + path = url; /* this is the encoded path already */ + search = r->args; + } else { - path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, - r->proxyreq); + core_dir_config *d = ap_get_core_module_config(r->per_dir_config); + int flags = d->allow_encoded_slashes && !d->decode_encoded_slashes ? PROXY_CANONENC_NOENCODEDSLASHENCODING : 0; + + path = ap_proxy_canonenc_ex(r->pool, url, strlen(url), enc_path, flags, + r->proxyreq); + if (!path) { + return HTTP_BAD_REQUEST; + } search = r->args; } - if (path == NULL) - return HTTP_BAD_REQUEST; + /* + * If we have a raw control character or a ' ' in nocanon path or + * r->args, correct encoding was missed. + */ + if (path == url && *ap_scan_vchar_obstext(path)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10419) + "To be forwarded path contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } + if (search && *ap_scan_vchar_obstext(search)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10409) + "To be forwarded query string contains control " + "characters or spaces"); + return HTTP_FORBIDDEN; + } - apr_snprintf(sport, sizeof(sport), ":%d", port); + if (port != def_port) + apr_snprintf(sport, sizeof(sport), ":%d", port); + else + sport[0] = '\0'; if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */ @@ -280,112 +348,166 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker, char *url, const char *proxyname, apr_port_t proxyport) { + proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config, + &proxy_wstunnel_module); int status; char server_portstr[32]; proxy_conn_rec *backend = NULL; + const char *upgrade; char *scheme; - int retry; apr_pool_t *p = r->pool; + char *locurl = url; apr_uri_t *uri; int is_ssl = 0; - const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket"; - if (strncasecmp(url, "wss:", 4) == 0) { + if (can_fallback_to_proxy_http && dconf->fallback_to_proxy_http) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "handler fallback"); + return DECLINED; + } + + if (ap_cstr_casecmpn(url, "wss:", 4) == 0) { scheme = "WSS"; is_ssl = 1; } - else if (strncasecmp(url, "ws:", 3) == 0) { + else if (ap_cstr_casecmpn(url, "ws:", 3) == 0) { scheme = "WS"; } else { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450) "declining URL %s", url); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450) + "declining URL %s", url); return DECLINED; } - - if (ap_cstr_casecmp(upgrade_method, "NONE") != 0) { - const char *upgrade; - upgrade = apr_table_get(r->headers_in, "Upgrade"); - if (!upgrade || (ap_cstr_casecmp(upgrade, upgrade_method) != 0 && - ap_cstr_casecmp(upgrade_method, "ANY") !=0)) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02900) - "declining URL %s (not %s, Upgrade: header is %s)", - url, upgrade_method, upgrade ? upgrade : "missing"); - return DECLINED; - } + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "serving URL %s", url); + + upgrade = apr_table_get(r->headers_in, "Upgrade"); + if (!upgrade || !ap_proxy_worker_can_upgrade(p, worker, upgrade, + "WebSocket")) { + const char *worker_upgrade = *worker->s->upgrade ? worker->s->upgrade + : "WebSocket"; + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02900) + "require upgrade for URL %s " + "(Upgrade header is %s, expecting %s)", + url, upgrade ? upgrade : "missing", worker_upgrade); + apr_table_setn(r->err_headers_out, "Connection", "Upgrade"); + apr_table_setn(r->err_headers_out, "Upgrade", worker_upgrade); + return HTTP_UPGRADE_REQUIRED; } uri = apr_palloc(p, sizeof(*uri)); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02451) "serving URL %s", url); /* create space for state information */ - status = ap_proxy_acquire_connection(scheme, &backend, worker, - r->server); + status = ap_proxy_acquire_connection(scheme, &backend, worker, r->server); if (status != OK) { - if (backend) { - backend->close = 1; - ap_proxy_release_connection(scheme, backend, r->server); - } - return status; + goto cleanup; } backend->is_ssl = is_ssl; backend->close = 0; - retry = 0; - while (retry < 2) { - char *locurl = url; - /* Step One: Determine Who To Connect To */ - status = ap_proxy_determine_connection(p, r, conf, worker, backend, - uri, &locurl, proxyname, proxyport, - server_portstr, - sizeof(server_portstr)); - - if (status != OK) - break; - - /* Step Two: Make the Connection */ - if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452) - "failed to make connection to backend: %s", - backend->hostname); - status = HTTP_SERVICE_UNAVAILABLE; - break; - } - - /* Step Three: Create conn_rec */ - if (!backend->connection) { - status = ap_proxy_connection_create_ex(scheme, backend, r); - if (status != OK) { - break; - } - } - - backend->close = 1; /* must be after ap_proxy_determine_connection */ + /* Step One: Determine Who To Connect To */ + status = ap_proxy_determine_connection(p, r, conf, worker, backend, + uri, &locurl, proxyname, proxyport, + server_portstr, + sizeof(server_portstr)); + if (status != OK) { + goto cleanup; + } + /* Step Two: Make the Connection */ + if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452) + "failed to make connection to backend: %s", + backend->hostname); + status = HTTP_SERVICE_UNAVAILABLE; + goto cleanup; + } - /* Step Three: Process the Request */ - status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl, - server_portstr); - break; + /* Step Three: Create conn_rec */ + status = ap_proxy_connection_create_ex(scheme, backend, r); + if (status != OK) { + goto cleanup; } + /* Step Four: Process the Request */ + status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl, + server_portstr); + +cleanup: /* Do not close the socket */ - ap_proxy_release_connection(scheme, backend, r->server); + if (backend) { + backend->close = 1; + ap_proxy_release_connection(scheme, backend, r->server); + } return status; } -static void ap_proxy_http_register_hook(apr_pool_t *p) +static void *create_proxyws_dir_config(apr_pool_t *p, char *dummy) +{ + proxyws_dir_conf *new = + (proxyws_dir_conf *) apr_pcalloc(p, sizeof(proxyws_dir_conf)); + + new->fallback_to_proxy_http = 1; + + return (void *) new; +} + +static void *merge_proxyws_dir_config(apr_pool_t *p, void *vbase, void *vadd) +{ + proxyws_dir_conf *new = apr_pcalloc(p, sizeof(proxyws_dir_conf)), + *add = vadd, *base = vbase; + + new->fallback_to_proxy_http = (add->fallback_to_proxy_http_set) + ? add->fallback_to_proxy_http + : base->fallback_to_proxy_http; + new->fallback_to_proxy_http_set = (add->fallback_to_proxy_http_set + || base->fallback_to_proxy_http_set); + + return new; +} + +static const char * proxyws_fallback_to_proxy_http(cmd_parms *cmd, void *conf, int arg) +{ + proxyws_dir_conf *dconf = conf; + dconf->fallback_to_proxy_http = !!arg; + dconf->fallback_to_proxy_http_set = 1; + return NULL; +} + +static int proxy_wstunnel_post_config(apr_pool_t *pconf, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *s) +{ + can_fallback_to_proxy_http = + (ap_find_linked_module("mod_proxy_http.c") != NULL); + + return OK; +} + +static const command_rec ws_proxy_cmds[] = +{ + AP_INIT_FLAG("ProxyWebsocketFallbackToProxyHttp", + proxyws_fallback_to_proxy_http, NULL, RSRC_CONF|ACCESS_CONF, + "whether to let mod_proxy_http handle the upgrade and tunneling, " + "On by default"), + + {NULL} +}; + +static void ws_proxy_hooks(apr_pool_t *p) { - proxy_hook_scheme_handler(proxy_wstunnel_handler, NULL, NULL, APR_HOOK_FIRST); - proxy_hook_canon_handler(proxy_wstunnel_canon, NULL, NULL, APR_HOOK_FIRST); + static const char * const aszSucc[] = { "mod_proxy_http.c", NULL}; + ap_hook_post_config(proxy_wstunnel_post_config, NULL, NULL, APR_HOOK_MIDDLE); + proxy_hook_scheme_handler(proxy_wstunnel_handler, NULL, aszSucc, APR_HOOK_FIRST); + proxy_hook_check_trans(proxy_wstunnel_check_trans, NULL, aszSucc, APR_HOOK_MIDDLE); + proxy_hook_canon_handler(proxy_wstunnel_canon, NULL, aszSucc, APR_HOOK_FIRST); } AP_DECLARE_MODULE(proxy_wstunnel) = { STANDARD20_MODULE_STUFF, - NULL, /* create per-directory config structure */ - NULL, /* merge per-directory config structures */ + create_proxyws_dir_config, /* create per-directory config structure */ + merge_proxyws_dir_config, /* merge per-directory config structures */ NULL, /* create per-server config structure */ NULL, /* merge per-server config structures */ - NULL, /* command apr_table_t */ - ap_proxy_http_register_hook /* register hooks */ + ws_proxy_cmds, /* command apr_table_t */ + ws_proxy_hooks /* register hooks */ }; diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c index cbf8826..a54a4fa 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c @@ -19,22 +19,22 @@ #include "ap_mpm.h" #include "scoreboard.h" #include "apr_version.h" +#include "apr_strings.h" #include "apr_hash.h" +#include "apr_atomic.h" +#include "http_core.h" #include "proxy_util.h" #include "ajp.h" #include "scgi.h" +#include "mpm_common.h" /* for ap_max_mem_free */ + #include "mod_http2.h" /* for http2_get_num_workers() */ #if APR_HAVE_UNISTD_H #include /* for getpid() */ #endif -#if (APR_MAJOR_VERSION < 1) -#undef apr_socket_create -#define apr_socket_create apr_socket_create_ex -#endif - #if APR_HAVE_SYS_UN_H #include #endif @@ -47,7 +47,7 @@ APLOG_USE_MODULE(proxy); /* * Opaque structure containing target server info when * using a forward proxy. - * Up to now only used in combination with HTTP CONNECT. + * Up to now only used in combination with HTTP CONNECT to ProxyRemote */ typedef struct { int use_http_connect; /* Use SSL Tunneling via HTTP CONNECT */ @@ -56,6 +56,17 @@ typedef struct { const char *proxy_auth; /* Proxy authorization */ } forward_info; +/* + * Opaque structure containing a refcounted and TTL'ed address. + */ +typedef struct proxy_address { + apr_sockaddr_t *addr; /* Remote address info */ + const char *hostname; /* Remote host name */ + apr_port_t hostport; /* Remote host port */ + apr_uint32_t refcount; /* Number of conns and/or worker using it */ + apr_uint32_t expiry; /* Expiry timestamp (seconds to proxy_start_time) */ +} proxy_address; + /* Global balancer counter */ int PROXY_DECLARE_DATA proxy_lb_workers = 0; static int lb_workers_limit = 0; @@ -64,6 +75,8 @@ const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_domain; extern apr_global_mutex_t *proxy_mutex; +static const apr_time_t *proxy_start_time; /* epoch for expiring addresses */ + static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r); static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r); static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r); @@ -204,14 +217,16 @@ PROXY_DECLARE(void) ap_proxy_c2hex(int ch, char *x) * and encodes those which must be encoded, and does not touch * those which must not be touched. */ -PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, - enum enctype t, int forcedec, - int proxyreq) +PROXY_DECLARE(char *)ap_proxy_canonenc_ex(apr_pool_t *p, const char *x, int len, + enum enctype t, int flags, + int proxyreq) { int i, j, ch; char *y; char *allowed; /* characters which should not be encoded */ char *reserved; /* characters which much not be en/de-coded */ + int forcedec = flags & PROXY_CANONENC_FORCEDEC; + int noencslashesenc = flags & PROXY_CANONENC_NOENCODEDSLASHENCODING; /* * N.B. in addition to :@&=, this allows ';' in an http path @@ -260,17 +275,29 @@ PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, * decode it if not already done. do not decode reverse proxied URLs * unless specifically forced */ - if ((forcedec || (proxyreq && proxyreq != PROXYREQ_REVERSE)) && ch == '%') { + if ((forcedec || noencslashesenc + || (proxyreq && proxyreq != PROXYREQ_REVERSE)) && ch == '%') { if (!apr_isxdigit(x[i + 1]) || !apr_isxdigit(x[i + 2])) { return NULL; } ch = ap_proxy_hex2c(&x[i + 1]); - i += 2; if (ch != 0 && strchr(reserved, ch)) { /* keep it encoded */ - ap_proxy_c2hex(ch, &y[j]); - j += 2; + y[j++] = x[i++]; + y[j++] = x[i++]; + y[j] = x[i]; continue; } + if (noencslashesenc && !forcedec && (proxyreq == PROXYREQ_REVERSE)) { + /* + * In the reverse proxy case when we only want to keep encoded + * slashes untouched revert back to '%' which will cause + * '%' to be encoded in the following. + */ + ch = '%'; + } + else { + i += 2; + } } /* recode it, if necessary */ if (!apr_isalnum(ch) && !strchr(allowed, ch)) { @@ -285,6 +312,22 @@ PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, return y; } +/* + * Convert a URL-encoded string to canonical form. + * It decodes characters which need not be encoded, + * and encodes those which must be encoded, and does not touch + * those which must not be touched. + */ +PROXY_DECLARE(char *)ap_proxy_canonenc(apr_pool_t *p, const char *x, int len, + enum enctype t, int forcedec, + int proxyreq) +{ + int flags; + + flags = forcedec ? PROXY_CANONENC_FORCEDEC : 0; + return ap_proxy_canonenc_ex(p, x, len, t, flags, proxyreq); +} + /* * Parses network-location. * urlp on input the URL; on output the path, after the leading / @@ -366,14 +409,15 @@ PROXY_DECLARE(char *) return NULL; } -PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message) +static int proxyerror_core(request_rec *r, int statuscode, const char *message, + apr_status_t rv) { - const char *uri = ap_escape_html(r->pool, r->uri); + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00898) + "%s returned by %s", message, r->uri); + apr_table_setn(r->notes, "error-notes", apr_pstrcat(r->pool, - "The proxy server could not handle the request ", ap_escape_html(r->pool, r->method), " ", uri, - ".

\n" + "The proxy server could not handle the request

" "Reason: ", ap_escape_html(r->pool, message), "

", NULL)); @@ -382,11 +426,14 @@ PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *mes apr_table_setn(r->notes, "verbose-error-to", "*"); r->status_line = apr_psprintf(r->pool, "%3.3u Proxy Error", statuscode); - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00898) "%s returned by %s", message, - r->uri); return statuscode; } +PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message) +{ + return proxyerror_core(r, statuscode, message, 0); +} + static const char * proxy_get_host_of_request(request_rec *r) { @@ -890,20 +937,20 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r, * translate url http://example.com/foo/bar/that to /bash/that */ for (n = 0; n < balancer->workers->nelts; n++) { - l2 = strlen((*worker)->s->name); + l2 = strlen((*worker)->s->name_ex); if (urlpart) { /* urlpart (l3) assuredly starts with its own '/' */ - if ((*worker)->s->name[l2 - 1] == '/') + if ((*worker)->s->name_ex[l2 - 1] == '/') --l2; if (l1 >= l2 + l3 - && strncasecmp((*worker)->s->name, url, l2) == 0 + && strncasecmp((*worker)->s->name_ex, url, l2) == 0 && strncmp(urlpart, url + l2, l3) == 0) { u = apr_pstrcat(r->pool, ent[i].fake, &url[l2 + l3], NULL); return ap_is_url(u) ? u : ap_construct_url(r->pool, u, r); } } - else if (l1 >= l2 && strncasecmp((*worker)->s->name, url, l2) == 0) { + else if (l1 >= l2 && strncasecmp((*worker)->s->name_ex, url, l2) == 0) { /* edge case where fake is just "/"... avoid double slash */ if ((ent[i].fake[0] == '/') && (ent[i].fake[1] == 0) && (url[l2] == '/')) { u = apr_pstrdup(r->pool, &url[l2]); @@ -1080,7 +1127,7 @@ PROXY_DECLARE(int) ap_proxy_valid_balancer_name(char *name, int i) { if (!i) i = sizeof(BALANCER_PREFIX)-1; - return (!strncasecmp(name, BALANCER_PREFIX, i)); + return (!ap_cstr_casecmpn(name, BALANCER_PREFIX, i)); } @@ -1172,11 +1219,13 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p, * exist, that's OK at this time. We check when we share and sync */ lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0"); - + (*balancer)->lbmethod = lbmethod; + (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *)); +#if APR_HAS_THREADS (*balancer)->gmutex = NULL; (*balancer)->tmutex = NULL; - (*balancer)->lbmethod = lbmethod; +#endif if (do_malloc) bshared = ap_malloc(sizeof(proxy_balancer_shared)); @@ -1188,8 +1237,11 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p, bshared->was_malloced = (do_malloc != 0); PROXY_STRNCPY(bshared->lbpname, "byrequests"); if (PROXY_STRNCPY(bshared->name, uri) != APR_SUCCESS) { + if (do_malloc) free(bshared); return apr_psprintf(p, "balancer name (%s) too long", uri); } + (*balancer)->lbmethod_set = 1; + /* * We do the below for verification. The real sname will be * done post_config @@ -1198,6 +1250,7 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p, &sname); sname = apr_pstrcat(p, conf->id, "_", sname, NULL); if (PROXY_STRNCPY(bshared->sname, sname) != APR_SUCCESS) { + if (do_malloc) free(bshared); return apr_psprintf(p, "balancer safe-name (%s) too long", sname); } bshared->hash.def = ap_proxy_hashfunc(bshared->name, PROXY_HASHFUNC_DEFAULT); @@ -1244,6 +1297,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer, lbmethod = ap_lookup_provider(PROXY_LBMETHOD, balancer->s->lbpname, "0"); if (lbmethod) { balancer->lbmethod = lbmethod; + balancer->lbmethod_set = 1; } else { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, ap_server_conf, APLOGNO(02432) "Cannot find LB Method: %s", balancer->s->lbpname); @@ -1252,10 +1306,11 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer, if (*balancer->s->nonce == PROXY_UNSET_NONCE) { char nonce[APR_UUID_FORMATTED_LENGTH + 1]; apr_uuid_t uuid; - /* Retrieve a UUID and store the nonce for the lifetime of - * the process. - */ - apr_uuid_get(&uuid); + + /* Generate a pseudo-UUID from the PRNG to use as a nonce for + * the lifetime of the process. uuid.data is a char array so + * this is an adequate substitute for apr_uuid_get(). */ + ap_random_insecure_bytes(uuid.data, sizeof uuid.data); apr_uuid_format(nonce, &uuid); rv = PROXY_STRNCPY(balancer->s->nonce, nonce); } @@ -1264,7 +1319,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer, PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p) { +#if APR_HAS_THREADS apr_status_t rv = APR_SUCCESS; +#endif ap_slotmem_provider_t *storage = balancer->storage; apr_size_t size; unsigned int num; @@ -1304,6 +1361,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance if (balancer->lbmethod && balancer->lbmethod->reset) balancer->lbmethod->reset(balancer, s); +#if APR_HAS_THREADS if (balancer->tmutex == NULL) { rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p); if (rv != APR_SUCCESS) { @@ -1312,6 +1370,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance return rv; } } +#endif return APR_SUCCESS; } @@ -1335,6 +1394,7 @@ static proxy_worker *proxy_balancer_get_best_worker(proxy_balancer *balancer, balancer->lbmethod->name, balancer->s->name); apr_pool_create(&tpool, r->pool); + apr_pool_tag(tpool, "proxy_lb_best"); spares = apr_array_make(tpool, 1, sizeof(proxy_worker*)); standbys = apr_array_make(tpool, 1, sizeof(proxy_worker*)); @@ -1424,7 +1484,8 @@ static proxy_worker *proxy_balancer_get_best_worker(proxy_balancer *balancer, if (best_worker) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(10123) "proxy: %s selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d", - balancer->lbmethod->name, best_worker->s->name, best_worker->s->busy, best_worker->s->lbstatus); + balancer->lbmethod->name, best_worker->s->name_ex, + best_worker->s->busy, best_worker->s->lbstatus); } return best_worker; @@ -1451,61 +1512,136 @@ static void socket_cleanup(proxy_conn_rec *conn) apr_pool_clear(conn->scpool); } -static apr_status_t conn_pool_cleanup(void *theworker) +static void address_cleanup(proxy_conn_rec *conn) { - proxy_worker *worker = (proxy_worker *)theworker; - if (worker->cp->res) { - worker->cp->pool = NULL; + conn->address = NULL; + conn->addr = NULL; + conn->hostname = NULL; + conn->port = 0; + conn->uds_path = NULL; + if (conn->uds_pool) { + apr_pool_clear(conn->uds_pool); + } + if (conn->sock) { + socket_cleanup(conn); } +} + +static apr_status_t conn_pool_cleanup(void *theworker) +{ + ((proxy_worker *)theworker)->cp = NULL; return APR_SUCCESS; } -static void init_conn_pool(apr_pool_t *p, proxy_worker *worker) +static apr_pool_t *make_conn_subpool(apr_pool_t *p, const char *tag, + server_rec *s) +{ + apr_pool_t *sp = NULL; + apr_allocator_t *alloc; + apr_thread_mutex_t *mutex; + apr_status_t rv; + + rv = apr_allocator_create(&alloc); + if (rv == APR_SUCCESS) { + rv = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, p); + if (rv == APR_SUCCESS) { + apr_allocator_mutex_set(alloc, mutex); + apr_allocator_max_free_set(alloc, ap_max_mem_free); + rv = apr_pool_create_ex(&sp, p, NULL, alloc); + } + else { + apr_allocator_destroy(alloc); + } + } + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10474) + "failed to create %s pool", tag); + ap_abort_on_oom(); + return NULL; /* not reached */ + } + apr_allocator_owner_set(alloc, sp); + apr_pool_tag(sp, tag); + + return sp; +} + +static void init_conn_pool(apr_pool_t *p, proxy_worker *worker, server_rec *s) { - apr_pool_t *pool; proxy_conn_pool *cp; - /* - * Create a connection pool's subpool. - * This pool is used for connection recycling. - * Once the worker is added it is never removed but - * it can be disabled. - */ - apr_pool_create(&pool, p); - apr_pool_tag(pool, "proxy_worker_cp"); /* * Alloc from the same pool as worker. * proxy_conn_pool is permanently attached to the worker. */ cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool)); - cp->pool = pool; worker->cp = cp; + + /* + * We need a first pool (cp->pool) to maintain the connections attached to + * the worker and a second one (cp->dns_pool) to maintain the DNS addresses + * in use (TTL'ed, refcounted). New connections are created as/on a subpool + * of cp->pool and new addresses as/on a subpool of cp->dns_pool, such that + * both leaks (the subpools can be destroyed when the connections and/or + * addresses are over) and race conditions (the creation/destruction of + * subpools is protected by the parent pool's mutex) can be avoided. + * + * cp->dns_pool is created before cp->pool because when a connection on the + * latter is destroyed it might destroy an address on the former, so when + * the base pools are destroyed (e.g. child exit) we thusly make sure that + * cp->dns_pool and its subpools are still alive when cp->pool gets killed. + * + * Both cp->dns_pool and cp->pool have their own allocator/mutex too since + * acquiring connections and addresses don't need to contend. + */ + cp->dns_pool = make_conn_subpool(p, "proxy_worker_dns", s); + cp->pool = make_conn_subpool(p, "proxy_worker_cp", s); + + /* When p is cleaning up the child is exiting, signal that to e.g. avoid + * destroying the subpools explicitely in connection_destructor() when + * they have been destroyed already by the reslist cleanup. + */ + apr_pool_pre_cleanup_register(p, worker, conn_pool_cleanup); } PROXY_DECLARE(int) ap_proxy_connection_reusable(proxy_conn_rec *conn) { proxy_worker *worker = conn->worker; - return ! (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse); + return !(conn->close + || conn->forward + || worker->s->disablereuse + || !worker->s->is_address_reusable); } -static apr_status_t connection_cleanup(void *theconn) +static proxy_conn_rec *connection_make(apr_pool_t *p, proxy_worker *worker) { - proxy_conn_rec *conn = (proxy_conn_rec *)theconn; - proxy_worker *worker = conn->worker; + proxy_conn_rec *conn; + + conn = apr_pcalloc(p, sizeof(proxy_conn_rec)); + conn->pool = p; + conn->worker = worker; /* - * If the connection pool is NULL the worker - * cleanup has been run. Just return. + * Create another subpool that manages the data for the + * socket and the connection member of the proxy_conn_rec struct as we + * destroy this data more frequently than other data in the proxy_conn_rec + * struct like hostname and addr (at least in the case where we have + * keepalive connections that timed out). + * + * XXX: this is really needed only when worker->s->is_address_reusable, + * otherwise conn->scpool = conn->pool would be fine. For now we + * can't change it since it's (kind of) part of the API. */ - if (!worker->cp->pool) { - return APR_SUCCESS; - } + apr_pool_create(&conn->scpool, p); + apr_pool_tag(conn->scpool, "proxy_conn_scpool"); - if (conn->r) { - apr_pool_destroy(conn->r->pool); - conn->r = NULL; - } + return conn; +} + +static void connection_cleanup(void *theconn) +{ + proxy_conn_rec *conn = (proxy_conn_rec *)theconn; + proxy_worker *worker = conn->worker; /* Sanity check: Did we already return the pooled connection? */ if (conn->inreslist) { @@ -1513,37 +1649,43 @@ static apr_status_t connection_cleanup(void *theconn) "Pooled connection 0x%pp for worker %s has been" " already returned to the connection pool.", conn, ap_proxy_worker_name(conn->pool, worker)); - return APR_SUCCESS; + return; } - /* determine if the connection need to be closed */ - if (!worker->s->is_address_reusable || worker->s->disablereuse) { + if (conn->r) { + apr_pool_destroy(conn->r->pool); + conn->r = NULL; + } + + /* determine if the connection should be cleared, closed or reused */ + if (!worker->s->is_address_reusable) { apr_pool_t *p = conn->pool; apr_pool_clear(p); - conn = apr_pcalloc(p, sizeof(proxy_conn_rec)); - conn->pool = p; - conn->worker = worker; - apr_pool_create(&(conn->scpool), p); - apr_pool_tag(conn->scpool, "proxy_conn_scpool"); + conn = connection_make(p, worker); } else if (conn->close - || (conn->connection - && conn->connection->keepalive == AP_CONN_CLOSE)) { + || conn->forward + || (conn->connection + && conn->connection->keepalive == AP_CONN_CLOSE) + || worker->s->disablereuse) { socket_cleanup(conn); conn->close = 0; } + else if (conn->is_ssl) { + /* Unbind/reset the SSL connection dir config (sslconn->dc) from + * r->per_dir_config, r will likely get destroyed before this proxy + * conn is reused. + */ + ap_proxy_ssl_engine(conn->connection, worker->section_config, 1); + } if (worker->s->hmax && worker->cp->res) { conn->inreslist = 1; apr_reslist_release(worker->cp->res, (void *)conn); } - else - { + else { worker->cp->conn = conn; } - - /* Always return the SUCCESS */ - return APR_SUCCESS; } /* DEPRECATED */ @@ -1584,35 +1726,21 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ssl_connection_cleanup(proxy_conn_rec *conn static apr_status_t connection_constructor(void **resource, void *params, apr_pool_t *pool) { - apr_pool_t *ctx; - apr_pool_t *scpool; + apr_pool_t *p; proxy_conn_rec *conn; proxy_worker *worker = (proxy_worker *)params; /* - * Create the subpool for each connection + * Create a subpool for each connection * This keeps the memory consumption constant - * when disconnecting from backend. - */ - apr_pool_create(&ctx, pool); - apr_pool_tag(ctx, "proxy_conn_pool"); - /* - * Create another subpool that manages the data for the - * socket and the connection member of the proxy_conn_rec struct as we - * destroy this data more frequently than other data in the proxy_conn_rec - * struct like hostname and addr (at least in the case where we have - * keepalive connections that timed out). + * when it's recycled or destroyed. */ - apr_pool_create(&scpool, ctx); - apr_pool_tag(scpool, "proxy_conn_scpool"); - conn = apr_pcalloc(ctx, sizeof(proxy_conn_rec)); - - conn->pool = ctx; - conn->scpool = scpool; - conn->worker = worker; + apr_pool_create(&p, pool); + apr_pool_tag(p, "proxy_conn_pool"); + conn = connection_make(p, worker); conn->inreslist = 1; - *resource = conn; + *resource = conn; return APR_SUCCESS; } @@ -1623,7 +1751,7 @@ static apr_status_t connection_destructor(void *resource, void *params, proxy_worker *worker = params; /* Destroy the pool only if not called from reslist_destroy */ - if (worker->cp->pool) { + if (worker->cp) { proxy_conn_rec *conn = resource; apr_pool_destroy(conn->pool); } @@ -1640,15 +1768,73 @@ PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p, { if (!(*worker->s->uds_path) || !p) { /* just in case */ - return worker->s->name; + return worker->s->name_ex; } - return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name, NULL); + return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name_ex, NULL); } -PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, - proxy_balancer *balancer, - proxy_server_conf *conf, - const char *url) +PROXY_DECLARE(int) ap_proxy_worker_can_upgrade(apr_pool_t *p, + const proxy_worker *worker, + const char *upgrade, + const char *dflt) +{ + /* Find in worker->s->upgrade list (if any) */ + const char *worker_upgrade = worker->s->upgrade; + if (*worker_upgrade) { + return (strcmp(worker_upgrade, "*") == 0 + || ap_cstr_casecmp(worker_upgrade, upgrade) == 0 + || ap_find_token(p, worker_upgrade, upgrade)); + } + + /* Compare to the provided default (if any) */ + return (dflt && ap_cstr_casecmp(dflt, upgrade) == 0); +} + +/* + * Taken from ap_strcmp_match() : + * Match = 0, NoMatch = 1, Abort = -1, Inval = -2 + * Based loosely on sections of wildmat.c by Rich Salz + * Hmmm... shouldn't this really go component by component? + * + * Adds handling of the "\" => "" unescaping. + */ +static int ap_proxy_strcmp_ematch(const char *str, const char *expected) +{ + apr_size_t x, y; + + for (x = 0, y = 0; expected[y]; ++y, ++x) { + if (expected[y] == '$' && apr_isdigit(expected[y + 1])) { + do { + y += 2; + } while (expected[y] == '$' && apr_isdigit(expected[y + 1])); + if (!expected[y]) + return 0; + while (str[x]) { + int ret; + if ((ret = ap_proxy_strcmp_ematch(&str[x++], &expected[y])) != 1) + return ret; + } + return -1; + } + else if (!str[x]) { + return -1; + } + else if (expected[y] == '\\' && !expected[++y]) { + /* NUL is an invalid char! */ + return -2; + } + if (str[x] != expected[y]) + return 1; + } + /* We got all the way through the worker path without a difference */ + return 0; +} + +PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker_ex(apr_pool_t *p, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url, + unsigned int mask) { proxy_worker *worker; proxy_worker *max_worker = NULL; @@ -1664,7 +1850,12 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, return NULL; } - url = ap_proxy_de_socketfy(p, url); + if (!(mask & AP_PROXY_WORKER_NO_UDS)) { + url = ap_proxy_de_socketfy(p, url); + if (!url) { + return NULL; + } + } c = ap_strchr_c(url, ':'); if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') { @@ -1674,6 +1865,11 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, url_length = strlen(url); url_copy = apr_pstrmemdup(p, url, url_length); + /* Default to lookup for both _PREFIX and _MATCH workers */ + if (!(mask & (AP_PROXY_WORKER_IS_PREFIX | AP_PROXY_WORKER_IS_MATCH))) { + mask |= AP_PROXY_WORKER_IS_PREFIX | AP_PROXY_WORKER_IS_MATCH; + } + /* * We need to find the start of the path and * therefore we know the length of the scheme://hostname/ @@ -1704,22 +1900,35 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, proxy_worker **workers = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++, workers++) { worker = *workers; - if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + if ( ((worker_name_length = strlen(worker->s->name_ex)) <= url_length) && (worker_name_length >= min_match) && (worker_name_length > max_match) - && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { + && (worker->s->is_name_matchable + || ((mask & AP_PROXY_WORKER_IS_PREFIX) + && strncmp(url_copy, worker->s->name_ex, + worker_name_length) == 0)) + && (!worker->s->is_name_matchable + || ((mask & AP_PROXY_WORKER_IS_MATCH) + && ap_proxy_strcmp_ematch(url_copy, + worker->s->name_ex) == 0)) ) { max_worker = worker; max_match = worker_name_length; } - } } else { worker = (proxy_worker *)conf->workers->elts; for (i = 0; i < conf->workers->nelts; i++, worker++) { - if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + if ( ((worker_name_length = strlen(worker->s->name_ex)) <= url_length) && (worker_name_length >= min_match) && (worker_name_length > max_match) - && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { + && (worker->s->is_name_matchable + || ((mask & AP_PROXY_WORKER_IS_PREFIX) + && strncmp(url_copy, worker->s->name_ex, + worker_name_length) == 0)) + && (!worker->s->is_name_matchable + || ((mask & AP_PROXY_WORKER_IS_MATCH) + && ap_proxy_strcmp_ematch(url_copy, + worker->s->name_ex) == 0)) ) { max_worker = worker; max_match = worker_name_length; } @@ -1729,6 +1938,14 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, return max_worker; } +PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url) +{ + return ap_proxy_get_worker_ex(p, balancer, conf, url, 0); +} + /* * To create a worker from scratch first we define the * specifics of the worker; this is all local data. @@ -1736,46 +1953,98 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, * shared. This allows for dynamic addition during * config and runtime. */ -PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, +PROXY_DECLARE(char *) ap_proxy_define_worker_ex(apr_pool_t *p, proxy_worker **worker, proxy_balancer *balancer, proxy_server_conf *conf, const char *url, - int do_malloc) + unsigned int mask) { - int rv; - apr_uri_t uri, urisock; + apr_status_t rv; proxy_worker_shared *wshared; - char *ptr, *sockpath = NULL; + const char *ptr = NULL, *sockpath = NULL, *pdollars = NULL; + apr_port_t port_of_scheme; + int address_not_reusable = 0; + apr_uri_t uri; /* * Look to see if we are using UDS: * require format: unix:/path/foo/bar.sock|http://ignored/path2/ * This results in talking http to the socket at /path/foo/bar.sock */ - ptr = ap_strchr((char *)url, '|'); - if (ptr) { - *ptr = '\0'; - rv = apr_uri_parse(p, url, &urisock); - if (rv == APR_SUCCESS && !strcasecmp(urisock.scheme, "unix")) { - sockpath = ap_runtime_dir_relative(p, urisock.path);; - url = ptr+1; /* so we get the scheme for the uds */ + if (!ap_cstr_casecmpn(url, "unix:", 5) + && (ptr = ap_strchr_c(url + 5, '|'))) { + rv = apr_uri_parse(p, apr_pstrmemdup(p, url, ptr - url), &uri); + if (rv == APR_SUCCESS) { + sockpath = ap_runtime_dir_relative(p, uri.path);; + ptr++; /* so we get the scheme for the uds */ } else { - *ptr = '|'; + ptr = url; + } + } + else { + ptr = url; + } + + if (mask & AP_PROXY_WORKER_IS_MATCH) { + /* apr_uri_parse() will accept the '$' sign anywhere in the URL but + * in the :port part, and we don't want scheme://host:port$1$2/path + * to fail (e.g. "ProxyPassMatch ^/(a|b)(/.*)? http://host:port$2"). + * So we trim all the $n from the :port and prepend them in uri.path + * afterward for apr_uri_unparse() to restore the original URL below. + * If a dollar substitution is found in the hostname[:port] part of + * the URL, reusing address and connections in the same worker is not + * possible (the current implementation of active connections cache + * handles/assumes a single origin server:port per worker only), so + * we set address_not_reusable here during parsing to take that into + * account in the worker settings below. + */ +#define IS_REF(x) (x[0] == '$' && apr_isdigit(x[1])) + const char *pos = ap_strstr_c(ptr, "://"); + if (pos) { + pos += 3; + while (*pos && *pos != ':' && *pos != '/') { + if (*pos == '$') { + address_not_reusable = 1; + } + pos++; + } + if (*pos == ':') { + pos++; + while (*pos && !IS_REF(pos) && *pos != '/') { + pos++; + } + if (IS_REF(pos)) { + struct iovec vec[2]; + const char *path = pos + 2; + while (*path && *path != '/') { + path++; + } + pdollars = apr_pstrmemdup(p, pos, path - pos); + vec[0].iov_base = (void *)ptr; + vec[0].iov_len = pos - ptr; + vec[1].iov_base = (void *)path; + vec[1].iov_len = strlen(path); + ptr = apr_pstrcatv(p, vec, 2, NULL); + address_not_reusable = 1; + } + } } +#undef IS_REF } - rv = apr_uri_parse(p, url, &uri); + /* Normalize the url (worker name) */ + rv = apr_uri_parse(p, ptr, &uri); if (rv != APR_SUCCESS) { return apr_pstrcat(p, "Unable to parse URL: ", url, NULL); } if (!uri.scheme) { return apr_pstrcat(p, "URL must be absolute!: ", url, NULL); } - /* allow for unix:/path|http: */ if (!uri.hostname) { if (sockpath) { + /* allow for unix:/path|http: */ uri.hostname = "localhost"; } else { @@ -1786,6 +2055,16 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, ap_str_tolower(uri.hostname); } ap_str_tolower(uri.scheme); + port_of_scheme = ap_proxy_port_of_scheme(uri.scheme); + if (uri.port && uri.port == port_of_scheme) { + uri.port = 0; + } + if (pdollars) { + /* Restore/prepend pdollars into the path. */ + uri.path = apr_pstrcat(p, pdollars, uri.path, NULL); + } + ptr = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD); + /* * Workers can be associated w/ balancers or on their * own; ie: the generic reverse-proxy or a worker @@ -1809,26 +2088,25 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, /* we need to allocate space here */ *worker = apr_palloc(p, sizeof(proxy_worker)); } - memset(*worker, 0, sizeof(proxy_worker)); + /* right here we just want to tuck away the worker info. * if called during config, we don't have shm setup yet, * so just note the info for later. */ - if (do_malloc) + if (mask & AP_PROXY_WORKER_IS_MALLOCED) wshared = ap_malloc(sizeof(proxy_worker_shared)); /* will be freed ap_proxy_share_worker */ else wshared = apr_palloc(p, sizeof(proxy_worker_shared)); - memset(wshared, 0, sizeof(proxy_worker_shared)); - wshared->port = (uri.port ? uri.port : ap_proxy_port_of_scheme(uri.scheme)); - if (uri.port && uri.port == ap_proxy_port_of_scheme(uri.scheme)) { - uri.port = 0; + if (PROXY_STRNCPY(wshared->name_ex, ptr) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(10366) + "Alert! worker name (%s) too long; truncated to: %s", ptr, wshared->name_ex); } - ptr = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD); if (PROXY_STRNCPY(wshared->name, ptr) != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(02808) - "Alert! worker name (%s) too long; truncated to: %s", ptr, wshared->name); + ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(010118) + "worker name (%s) too long; truncated for legacy modules that do not use " + "proxy_worker_shared->name_ex: %s", ptr, wshared->name); } if (PROXY_STRNCPY(wshared->scheme, uri.scheme) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(010117) @@ -1842,17 +2120,44 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, "worker hostname (%s) too long; truncated for legacy modules that do not use " "proxy_worker_shared->hostname_ex: %s", uri.hostname, wshared->hostname); } + wshared->port = (uri.port) ? uri.port : port_of_scheme; wshared->flush_packets = flush_off; wshared->flush_wait = PROXY_FLUSH_WAIT; - wshared->is_address_reusable = 1; + wshared->address_ttl = (address_not_reusable) ? 0 : -1; + wshared->is_address_reusable = (address_not_reusable == 0); + wshared->disablereuse = (address_not_reusable != 0); wshared->lbfactor = 100; wshared->passes = 1; wshared->fails = 1; wshared->interval = apr_time_from_sec(HCHECK_WATHCHDOG_DEFAULT_INTERVAL); wshared->smax = -1; - wshared->hash.def = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_DEFAULT); - wshared->hash.fnv = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_FNV); - wshared->was_malloced = (do_malloc != 0); + wshared->hash.def = ap_proxy_hashfunc(wshared->name_ex, PROXY_HASHFUNC_DEFAULT); + wshared->hash.fnv = ap_proxy_hashfunc(wshared->name_ex, PROXY_HASHFUNC_FNV); + wshared->was_malloced = (mask & AP_PROXY_WORKER_IS_MALLOCED) != 0; + if (mask & AP_PROXY_WORKER_IS_MATCH) { + wshared->is_name_matchable = 1; + + /* Before AP_PROXY_WORKER_IS_MATCH (< 2.4.47), a regex worker with + * dollar substitution was never matched against any actual URL, thus + * the requests fell through the generic worker. Now if a ProyPassMatch + * matches, a worker (and its parameters) is always used to determine + * the properties of the connection with the origin server. So for + * instance the same "timeout=" will be enforced for all the requests + * matched by the same ProyPassMatch worker, which is an improvement + * compared to the global/vhost [Proxy]Timeout applied by the generic + * worker. Likewise, address and connection reuse is the default for + * a ProyPassMatch worker with no dollar substitution, just like a + * "normal" worker. However to avoid DNS and connection reuse compat + * issues, connection reuse is disabled by default if there is any + * substitution in the uri-path (an explicit enablereuse=on can still + * opt-in), and reuse is even disabled definitively for substitutions + * happening in the hostname[:port] (is_address_reusable was unset + * above so it will prevent enablereuse=on to apply anyway). + */ + if (ap_strchr_c(wshared->name, '$')) { + wshared->disablereuse = 1; + } + } if (sockpath) { if (PROXY_STRNCPY(wshared->uds_path, sockpath) != APR_SUCCESS) { return apr_psprintf(p, "worker uds path (%s) too long", sockpath); @@ -1875,6 +2180,33 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, return NULL; } +PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, + proxy_worker **worker, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url, + int do_malloc) +{ + return ap_proxy_define_worker_ex(p, worker, balancer, conf, url, + AP_PROXY_WORKER_IS_PREFIX | + (do_malloc ? AP_PROXY_WORKER_IS_MALLOCED + : 0)); +} + +/* DEPRECATED */ +PROXY_DECLARE(char *) ap_proxy_define_match_worker(apr_pool_t *p, + proxy_worker **worker, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url, + int do_malloc) +{ + return ap_proxy_define_worker_ex(p, worker, balancer, conf, url, + AP_PROXY_WORKER_IS_MATCH | + (do_malloc ? AP_PROXY_WORKER_IS_MALLOCED + : 0)); +} + /* * Create an already defined worker and free up memory */ @@ -1899,6 +2231,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_wo if (APLOGdebug(ap_server_conf)) { apr_pool_t *pool; apr_pool_create(&pool, ap_server_conf->process->pool); + apr_pool_tag(pool, "proxy_worker_name"); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02338) "%s shm[%d] (0x%pp) for worker: %s", action, i, (void *)shm, ap_proxy_worker_name(pool, worker)); @@ -1929,12 +2262,23 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser if (!worker->s->retry_set) { worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY); } - /* By default address is reusable unless DisableReuse is set */ - if (worker->s->disablereuse) { + /* Consistently set address and connection reusabilty: when reuse + * is disabled by configuration, or when the address is known already + * to not be reusable for this worker (in any case, thus ignore/force + * DisableReuse). + */ + if (!worker->s->address_ttl || (!worker->s->address_ttl_set + && worker->s->disablereuse)) { worker->s->is_address_reusable = 0; } - else { - worker->s->is_address_reusable = 1; + if (!worker->s->is_address_reusable && !worker->s->disablereuse) { + /* Explicit enablereuse=on can't work in this case, warn user. */ + if (worker->s->disablereuse_set) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10400) + "enablereuse/disablereuse ignored for worker %s", + ap_proxy_worker_name(p, worker)); + } + worker->s->disablereuse = 1; } /* @@ -1979,67 +2323,71 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser ap_proxy_worker_name(p, worker)); } else { - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927) - "initializing worker %s local", - ap_proxy_worker_name(p, worker)); apr_global_mutex_lock(proxy_mutex); - /* Now init local worker data */ - if (worker->tmutex == NULL) { - rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p); - if (rv != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00928) - "can not create worker thread mutex"); + /* Check again after we got the lock if we are still uninitialized */ + if (!(AP_VOLATILIZE_T(unsigned int, worker->local_status) & PROXY_WORKER_INITIALIZED)) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927) + "initializing worker %s local", + ap_proxy_worker_name(p, worker)); + /* Now init local worker data */ +#if APR_HAS_THREADS + if (worker->tmutex == NULL) { + rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00928) + "can not create worker thread mutex"); + apr_global_mutex_unlock(proxy_mutex); + return rv; + } + } +#endif + if (worker->cp == NULL) + init_conn_pool(p, worker, s); + if (worker->cp == NULL) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929) + "can not create connection pool"); apr_global_mutex_unlock(proxy_mutex); - return rv; + return APR_EGENERAL; } - } - if (worker->cp == NULL) - init_conn_pool(p, worker); - if (worker->cp == NULL) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929) - "can not create connection pool"); - apr_global_mutex_unlock(proxy_mutex); - return APR_EGENERAL; - } - - if (worker->s->hmax) { - rv = apr_reslist_create(&(worker->cp->res), - worker->s->min, worker->s->smax, - worker->s->hmax, worker->s->ttl, - connection_constructor, connection_destructor, - worker, worker->cp->pool); - apr_pool_cleanup_register(worker->cp->pool, (void *)worker, - conn_pool_cleanup, - apr_pool_cleanup_null); - - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930) - "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d", - getpid(), worker->s->hostname_ex, worker->s->min, - worker->s->hmax, worker->s->smax); + if (worker->s->hmax) { + rv = apr_reslist_create(&(worker->cp->res), + worker->s->min, worker->s->smax, + worker->s->hmax, worker->s->ttl, + connection_constructor, connection_destructor, + worker, worker->cp->pool); + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930) + "initialized pool in child %" APR_PID_T_FMT " for (%s:%d) min=%d max=%d smax=%d", + getpid(), worker->s->hostname_ex, (int)worker->s->port, + worker->s->min, worker->s->hmax, worker->s->smax); + + /* Set the acquire timeout */ + if (rv == APR_SUCCESS && worker->s->acquire_set) { + apr_reslist_timeout_set(worker->cp->res, worker->s->acquire); + } - /* Set the acquire timeout */ - if (rv == APR_SUCCESS && worker->s->acquire_set) { - apr_reslist_timeout_set(worker->cp->res, worker->s->acquire); } + else { + void *conn; - } - else { - void *conn; - - rv = connection_constructor(&conn, worker, worker->cp->pool); - worker->cp->conn = conn; + rv = connection_constructor(&conn, worker, worker->cp->pool); + worker->cp->conn = conn; - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00931) - "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)", - getpid(), worker->s->hostname_ex); + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00931) + "initialized single connection worker in child %" APR_PID_T_FMT " for (%s:%d)", + getpid(), worker->s->hostname_ex, + (int)worker->s->port); + } + if (rv == APR_SUCCESS) { + worker->local_status |= (PROXY_WORKER_INITIALIZED); + } } apr_global_mutex_unlock(proxy_mutex); } if (rv == APR_SUCCESS) { worker->s->status |= (PROXY_WORKER_INITIALIZED); - worker->local_status |= (PROXY_WORKER_INITIALIZED); } return rv; } @@ -2050,8 +2398,9 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke if (worker->s->status & PROXY_WORKER_IN_ERROR) { if (PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED)) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(3305) - "%s: Won't retry worker (%s): stopped", - proxy_function, worker->s->hostname_ex); + "%s: Won't retry worker (%s:%d): stopped", + proxy_function, worker->s->hostname_ex, + (int)worker->s->port); return DECLINED; } if ((worker->s->status & PROXY_WORKER_IGNORE_ERRORS) @@ -2059,14 +2408,16 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke ++worker->s->retries; worker->s->status &= ~PROXY_WORKER_IN_ERROR; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00932) - "%s: worker for (%s) has been marked for retry", - proxy_function, worker->s->hostname_ex); + "%s: worker for (%s:%d) has been marked for retry", + proxy_function, worker->s->hostname_ex, + (int)worker->s->port); return OK; } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00933) - "%s: too soon to retry worker for (%s)", - proxy_function, worker->s->hostname_ex); + "%s: too soon to retry worker for (%s:%d)", + proxy_function, worker->s->hostname_ex, + (int)worker->s->port); return DECLINED; } } @@ -2080,33 +2431,43 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke * were passed a UDS url (eg: from mod_proxy) and adjust uds_path * as required. */ -static void fix_uds_filename(request_rec *r, char **url) +static int fix_uds_filename(request_rec *r, char **url) { - char *ptr, *ptr2; - if (!r || !r->filename) return; + char *uds_url = r->filename + 6, *origin_url; if (!strncmp(r->filename, "proxy:", 6) && - (ptr2 = ap_strcasestr(r->filename, "unix:")) && - (ptr = ap_strchr(ptr2, '|'))) { + !ap_cstr_casecmpn(uds_url, "unix:", 5) && + (origin_url = ap_strchr(uds_url + 5, '|'))) { + char *uds_path = NULL; + apr_size_t url_len; apr_uri_t urisock; apr_status_t rv; - *ptr = '\0'; - rv = apr_uri_parse(r->pool, ptr2, &urisock); - if (rv == APR_SUCCESS) { - char *rurl = ptr+1; - char *sockpath = ap_runtime_dir_relative(r->pool, urisock.path); - apr_table_setn(r->notes, "uds_path", sockpath); - *url = apr_pstrdup(r->pool, rurl); /* so we get the scheme for the uds */ - /* r->filename starts w/ "proxy:", so add after that */ - memmove(r->filename+6, rurl, strlen(rurl)+1); - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "*: rewrite of url due to UDS(%s): %s (%s)", - sockpath, *url, r->filename); + + *origin_url = '\0'; + rv = apr_uri_parse(r->pool, uds_url, &urisock); + *origin_url++ = '|'; + + if (rv == APR_SUCCESS && urisock.path && (!urisock.hostname + || !urisock.hostname[0])) { + uds_path = ap_runtime_dir_relative(r->pool, urisock.path); } - else { - *ptr = '|'; + if (!uds_path) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10292) + "Invalid proxy UDS filename (%s)", r->filename); + return 0; } + apr_table_setn(r->notes, "uds_path", uds_path); + + /* Remove the UDS path from *url and r->filename */ + url_len = strlen(origin_url); + *url = apr_pstrmemdup(r->pool, origin_url, url_len); + memcpy(uds_url, *url, url_len + 1); + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "*: rewrite of url due to UDS(%s): %s (%s)", + uds_path, *url, r->filename); } + return 1; } PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, @@ -2118,20 +2479,22 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, access_status = proxy_run_pre_request(worker, balancer, r, conf, url); if (access_status == DECLINED && *balancer == NULL) { - *worker = ap_proxy_get_worker(r->pool, NULL, conf, *url); + const int forward = (r->proxyreq == PROXYREQ_PROXY); + *worker = ap_proxy_get_worker_ex(r->pool, NULL, conf, *url, + forward ? AP_PROXY_WORKER_NO_UDS : 0); if (*worker) { ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "%s: found worker %s for %s", - (*worker)->s->scheme, (*worker)->s->name, *url); - *balancer = NULL; - fix_uds_filename(r, url); + (*worker)->s->scheme, (*worker)->s->name_ex, *url); + if (!forward && !fix_uds_filename(r, url)) { + return HTTP_INTERNAL_SERVER_ERROR; + } access_status = OK; } - else if (r->proxyreq == PROXYREQ_PROXY) { + else if (forward) { if (conf->forward) { ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "*: found forward proxy worker for %s", *url); - *balancer = NULL; *worker = conf->forward; access_status = OK; /* @@ -2145,8 +2508,8 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, else if (r->proxyreq == PROXYREQ_REVERSE) { if (conf->reverse) { ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, - "*: using default reverse proxy worker for %s (no keepalive)", *url); - *balancer = NULL; + "*: using default reverse proxy worker for %s " + "(no keepalive)", *url); *worker = conf->reverse; access_status = OK; /* @@ -2155,7 +2518,9 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, * regarding the Connection header in the request. */ apr_table_setn(r->subprocess_env, "proxy-nokeepalive", "1"); - fix_uds_filename(r, url); + if (!fix_uds_filename(r, url)) { + return HTTP_INTERNAL_SERVER_ERROR; + } } } } @@ -2287,8 +2652,9 @@ PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function, if (!PROXY_WORKER_IS_USABLE(worker)) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00940) - "%s: disabled connection for (%s)", - proxy_function, worker->s->hostname_ex); + "%s: disabled connection for (%s:%d)", + proxy_function, worker->s->hostname_ex, + (int)worker->s->port); return HTTP_SERVICE_UNAVAILABLE; } } @@ -2299,24 +2665,26 @@ PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function, else { /* create the new connection if the previous was destroyed */ if (!worker->cp->conn) { - connection_constructor((void **)conn, worker, worker->cp->pool); + rv = connection_constructor((void **)conn, worker, worker->cp->pool); } else { *conn = worker->cp->conn; worker->cp->conn = NULL; + rv = APR_SUCCESS; } - rv = APR_SUCCESS; } if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00941) - "%s: failed to acquire connection for (%s)", - proxy_function, worker->s->hostname_ex); + "%s: failed to acquire connection for (%s:%d)", + proxy_function, worker->s->hostname_ex, + (int)worker->s->port); return HTTP_SERVICE_UNAVAILABLE; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00942) - "%s: has acquired connection for (%s)", - proxy_function, worker->s->hostname_ex); + "%s: has acquired connection for (%s:%d)", + proxy_function, worker->s->hostname_ex, + (int)worker->s->port); (*conn)->worker = worker; (*conn)->close = 0; @@ -2330,33 +2698,380 @@ PROXY_DECLARE(int) ap_proxy_release_connection(const char *proxy_function, server_rec *s) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00943) - "%s: has released connection for (%s)", - proxy_function, conn->worker->s->hostname_ex); + "%s: has released connection for (%s:%d)", + proxy_function, conn->worker->s->hostname_ex, + (int)conn->worker->s->port); connection_cleanup(conn); return OK; } -PROXY_DECLARE(int) -ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, - proxy_server_conf *conf, - proxy_worker *worker, - proxy_conn_rec *conn, - apr_uri_t *uri, - char **url, - const char *proxyname, - apr_port_t proxyport, - char *server_portstr, - int server_portstr_size) +static APR_INLINE void proxy_address_inc(proxy_address *address) { - int server_port; - apr_status_t err = APR_SUCCESS; - apr_status_t uerr = APR_SUCCESS; - const char *uds_path; + apr_uint32_t old = apr_atomic_inc32(&address->refcount); + ap_assert(old > 0 && old < APR_UINT32_MAX); +} - /* - * Break up the URL to determine the host to connect to - */ +static APR_INLINE void proxy_address_dec(proxy_address *address) +{ + /* Use _add32(, -1) since _dec32()'s returned value does not help */ + apr_uint32_t old = apr_atomic_add32(&address->refcount, -1); + ap_assert(old > 0); + if (old == 1) { + apr_pool_destroy(address->addr->pool); + } +} + +static apr_status_t proxy_address_cleanup(void *address) +{ + proxy_address_dec(address); + return APR_SUCCESS; +} + +static APR_INLINE proxy_address *worker_address_get(proxy_worker *worker) +{ + /* No _readptr() so let's _casptr(, NULL, NULL) instead */ + return apr_atomic_casptr((void *)&worker->address, NULL, NULL); +} + +/* XXX: Call when PROXY_THREAD_LOCK()ed only! */ +static APR_INLINE void worker_address_set(proxy_worker *worker, + proxy_address *to) +{ + proxy_address *old = apr_atomic_xchgptr((void *)&worker->address, to); + if (old && old != to) { + proxy_address_dec(old); + } +} + +static apr_status_t worker_address_resolve(proxy_worker *worker, + apr_sockaddr_t **paddr, + const char *hostname, + apr_port_t hostport, + const char *proxy_function, + request_rec *r, server_rec *s) +{ + apr_status_t rv; + apr_pool_t *pool = NULL; + + apr_pool_create(&pool, worker->cp->dns_pool); + rv = apr_sockaddr_info_get(paddr, hostname, APR_UNSPEC, + hostport, 0, pool); + if (rv != APR_SUCCESS) { + if (r && !s) { + proxyerror_core(r, HTTP_INTERNAL_SERVER_ERROR, + apr_pstrcat(pool, + "DNS lookup failure for: ", + hostname, NULL), + rv); + } + else if (r) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(10477) + "%s: resolving worker %s address", + proxy_function, hostname); + } + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10478) + "%s: resolving worker %s address", + proxy_function, hostname); + } + apr_pool_destroy(pool); + return rv; + } + + if (r ? APLOGrdebug(r) : APLOGdebug(s)) { + char *addrs = NULL; + apr_sockaddr_t *addr = *paddr; + for (; addr; addr = addr->next) { + addrs = apr_psprintf(pool, "%s%s%pI", + addrs ? ", " : "", + addrs ? addrs : "", + addr); + } + if (r) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10479) + "%s: %s resolved to %s", + proxy_function, hostname, addrs); + } + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10480) + "%s: %s resolved to %s", + proxy_function, hostname, addrs); + } + } + + return APR_SUCCESS; +} + +static int proxy_addrs_equal(const apr_sockaddr_t *addr1, + const apr_sockaddr_t *addr2) +{ + const apr_sockaddr_t *base2 = addr2, *pos2; + while (addr1 && addr2) { + for (pos2 = base2; pos2; pos2 = pos2->next) { + if (apr_sockaddr_equal(pos2, addr1)) { + break; + } + } + if (!pos2) { + return 0; + } + addr1 = addr1->next; + addr2 = addr2->next; + } + if (addr1 || addr2) { + return 0; + } + return 1; +} + +PROXY_DECLARE(apr_status_t) ap_proxy_determine_address(const char *proxy_function, + proxy_conn_rec *conn, + const char *hostname, + apr_port_t hostport, + unsigned int flags, + request_rec *r, + server_rec *s) +{ + proxy_worker *worker = conn->worker; + apr_status_t rv; + + /* + * Worker can have the single constant backend adress. + * The single DNS lookup is used once per worker. + * If dynamic change is needed then set the addr to NULL + * inside dynamic config to force the lookup. + * The worker's addressTTL parameter may also be configured + * to perform the DNS lookups only when the TTL expires, + * or each time if that TTL is zero. + */ + if (!worker->s->is_address_reusable) { + conn->hostname = apr_pstrdup(conn->pool, hostname); + conn->port = hostport; + + rv = apr_sockaddr_info_get(&conn->addr, hostname, APR_UNSPEC, + hostport, 0, conn->pool); + if (rv != APR_SUCCESS) { + if (r && !s) { + proxyerror_core(r, HTTP_INTERNAL_SERVER_ERROR, + apr_pstrcat(r->pool, "DNS lookup failure for: ", + hostname, NULL), rv); + } + else if (r) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(10475) + "%s: resolving backend %s address", + proxy_function, hostname); + } + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(10476) + "%s: resolving backend %s address", + proxy_function, hostname); + } + return rv; + } + } + else { + apr_sockaddr_t *addr = NULL; + proxy_address *address = NULL; + apr_int32_t ttl = worker->s->address_ttl; + apr_uint32_t now = 0; + + if (flags & PROXY_DETERMINE_ADDRESS_CHECK) { + /* The caller wants to check if the address changed, return + * APR_EEXIST if not, otherwise fall through to update the + * worker's for everyone to switch. + */ + if (!conn->addr) { + /* Need something to compare with */ + return APR_EINVAL; + } + rv = worker_address_resolve(worker, &addr, + hostname, hostport, + proxy_function, r, s); + if (rv != APR_SUCCESS) { + return rv; + } + if (proxy_addrs_equal(conn->addr, addr)) { + apr_pool_destroy(addr->pool); + return APR_EEXIST; + } + } + + AP_DEBUG_ASSERT(ttl != 0); + if (ttl > 0) { + /* TODO: use a monotonic clock here */ + now = apr_time_sec(apr_time_now() - *proxy_start_time); + } + + /* Addresses are refcounted, destroyed when their refcount reaches 0. + * + * One ref is taken by worker->address as the worker's current/latest + * address, it's dropped when that address expires/changes (see below). + * The other refs are taken by the connections when using/switching to + * the current worker address (also below), they are dropped when the + * conns are destroyed (by the reslist though it should never happen + * if hmax is greater than the number of threads) OR for an expired + * conn->address when it's replaced by the new worker->address below. + * + * Dereferencing worker->address requires holding the worker mutex or + * some concurrent connection processing might change/destroy it at any + * time. So only conn->address is safe to dereference anywhere (unless + * NULL..) since it has at least the lifetime of the connection. + */ + if (!addr) { + address = worker_address_get(worker); + } + if (!address + || conn->address != address + || apr_atomic_read32(&address->expiry) <= now) { + PROXY_THREAD_LOCK(worker); + + /* Re-check while locked, might be a new address already */ + if (!addr) { + address = worker_address_get(worker); + } + if (!address || apr_atomic_read32(&address->expiry) <= now) { + if (!addr) { + rv = worker_address_resolve(worker, &addr, + hostname, hostport, + proxy_function, r, s); + if (rv != APR_SUCCESS) { + PROXY_THREAD_UNLOCK(worker); + return rv; + } + + /* Recompute "now" should the DNS be slow + * TODO: use a monotonic clock here + */ + now = apr_time_sec(apr_time_now() - *proxy_start_time); + } + + address = apr_pcalloc(addr->pool, sizeof(*address)); + address->hostname = apr_pstrdup(addr->pool, hostname); + address->hostport = hostport; + address->addr = addr; + + if (ttl > 0) { + /* We keep each worker's expiry date shared accross all the + * children so that they update their address at the same + * time, regardless of whether a specific child forced an + * address to expire at some point (for connect() issues). + */ + address->expiry = apr_atomic_read32(&worker->s->address_expiry); + if (address->expiry <= now) { + apr_uint32_t new_expiry = address->expiry + ttl; + while (new_expiry <= now) { + new_expiry += ttl; + } + new_expiry = apr_atomic_cas32(&worker->s->address_expiry, + new_expiry, address->expiry); + /* race lost? well the expiry should grow anyway.. */ + AP_DEBUG_ASSERT(new_expiry > now); + address->expiry = new_expiry; + } + } + else { + /* Never expires */ + address->expiry = APR_UINT32_MAX; + } + + /* One ref is for worker->address in any case */ + if (worker->address || worker->cp->addr) { + apr_atomic_set32(&address->refcount, 1); + } + else { + /* Set worker->cp->addr once for compat with third-party + * modules. This addr never changed before and can't change + * underneath users now because of some TTL configuration. + * So we take one more ref for worker->cp->addr to remain + * allocated forever (though it might not be up to date..). + * Modules should use conn->addr instead of worker->cp-addr + * to get the actual address used by each conn, determined + * at connect() time. + */ + apr_atomic_set32(&address->refcount, 2); + worker->cp->addr = address->addr; + } + + /* Publish the changes. The old worker address (if any) is no + * longer used by this worker, it will be destroyed now if the + * worker is the last user (refcount == 1) or by the last conn + * using it (refcount > 1). + */ + worker_address_set(worker, address); + } + + /* Take the ref for conn->address (before dropping the mutex so to + * let no chance for this address be killed before it's used!) + */ + proxy_address_inc(address); + + PROXY_THREAD_UNLOCK(worker); + + /* Kill any socket using the old address */ + if (conn->sock) { + if (r ? APLOGrdebug(r) : APLOGdebug(s)) { + /* XXX: this requires the old conn->addr[ess] to still + * be alive since it's not copied by apr_socket_connect() + * in ap_proxy_connect_backend(). + */ + apr_sockaddr_t *local_addr = NULL; + apr_sockaddr_t *remote_addr = NULL; + apr_socket_addr_get(&local_addr, APR_LOCAL, conn->sock); + apr_socket_addr_get(&remote_addr, APR_REMOTE, conn->sock); + if (r) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10481) + "%s: closing connection to %s (%pI<>%pI) on " + "address change", proxy_function, hostname, + local_addr, remote_addr); + } + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10482) + "%s: closing connection to %s (%pI<>%pI) on " + "address change", proxy_function, hostname, + local_addr, remote_addr); + } + } + socket_cleanup(conn); + } + + /* Kill the old address (if any) and use the new one */ + if (conn->address) { + apr_pool_cleanup_run(conn->pool, conn->address, + proxy_address_cleanup); + } + apr_pool_cleanup_register(conn->pool, address, + proxy_address_cleanup, + apr_pool_cleanup_null); + address_cleanup(conn); + conn->address = address; + conn->hostname = address->hostname; + conn->port = address->hostport; + conn->addr = address->addr; + } + } + + return APR_SUCCESS; +} + +PROXY_DECLARE(int) +ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, + proxy_server_conf *conf, + proxy_worker *worker, + proxy_conn_rec *conn, + apr_uri_t *uri, + char **url, + const char *proxyname, + apr_port_t proxyport, + char *server_portstr, + int server_portstr_size) +{ + int server_port; + const char *uds_path; + + /* + * Break up the URL to determine the host to connect to + */ /* we break the URL into host, port, uri */ if (APR_SUCCESS != apr_uri_parse(p, *url, uri)) { @@ -2371,6 +3086,12 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00944) "connecting %s to %s:%d", *url, uri->hostname, uri->port); + /* Close a possible existing socket if we are told to do so */ + if (conn->close) { + socket_cleanup(conn); + conn->close = 0; + } + /* * allocate these out of the specified connection pool * The scheme handler decides if this is permanent or @@ -2397,129 +3118,122 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, * to check host and port on the conn and be careful about * spilling the cached addr from the worker. */ - uds_path = (*worker->s->uds_path ? worker->s->uds_path : apr_table_get(r->notes, "uds_path")); + uds_path = (*worker->s->uds_path + ? worker->s->uds_path + : apr_table_get(r->notes, "uds_path")); if (uds_path) { - if (conn->uds_path == NULL) { - /* use (*conn)->pool instead of worker->cp->pool to match lifetime */ - conn->uds_path = apr_pstrdup(conn->pool, uds_path); - } - if (conn->uds_path) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545) - "%s: has determined UDS as %s", - uri->scheme, conn->uds_path); - } - else { - /* should never happen */ - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02546) - "%s: cannot determine UDS (%s)", - uri->scheme, uds_path); - - } - /* - * In UDS cases, some structs are NULL. Protect from de-refs - * and provide info for logging at the same time. - */ - if (!conn->addr) { - apr_sockaddr_t *sa; - apr_sockaddr_info_get(&sa, NULL, APR_UNSPEC, 0, 0, conn->pool); - conn->addr = sa; + if (!conn->uds_path || strcmp(conn->uds_path, uds_path) != 0) { + apr_pool_t *pool = conn->pool; + if (conn->uds_path) { + address_cleanup(conn); + if (!conn->uds_pool) { + apr_pool_create(&conn->uds_pool, worker->cp->dns_pool); + } + pool = conn->uds_pool; + } + /* + * In UDS cases, some structs are NULL. Protect from de-refs + * and provide info for logging at the same time. + */ +#if APR_HAVE_SOCKADDR_UN + apr_sockaddr_info_get(&conn->addr, uds_path, APR_UNIX, 0, 0, pool); + if (conn->addr && conn->addr->hostname) { + conn->uds_path = conn->addr->hostname; + } + else { + conn->uds_path = apr_pstrdup(pool, uds_path); + } +#else + apr_sockaddr_info_get(&conn->addr, NULL, APR_UNSPEC, 0, 0, pool); + conn->uds_path = apr_pstrdup(pool, uds_path); +#endif + conn->hostname = apr_pstrdup(pool, uri->hostname); + conn->port = uri->port; } - conn->hostname = "httpd-UDS"; - conn->port = 0; + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545) + "%s: has determined UDS as %s (for %s:%hu)", + uri->scheme, conn->uds_path, conn->hostname, conn->port); } else { - int will_reuse = worker->s->is_address_reusable && !worker->s->disablereuse; - if (!conn->hostname || !will_reuse) { - if (proxyname) { - conn->hostname = apr_pstrdup(conn->pool, proxyname); - conn->port = proxyport; - /* - * If we have a forward proxy and the protocol is HTTPS, - * then we need to prepend a HTTP CONNECT request before - * sending our actual HTTPS requests. - * Save our real backend data for using it later during HTTP CONNECT. + const char *hostname = uri->hostname; + apr_port_t hostport = uri->port; + + /* Not a remote CONNECT until further notice */ + conn->forward = NULL; + + if (proxyname) { + hostname = proxyname; + hostport = proxyport; + + /* + * If we have a remote proxy and the protocol is HTTPS, + * then we need to prepend a HTTP CONNECT request before + * sending our actual HTTPS requests. + */ + if (conn->is_ssl) { + forward_info *forward; + const char *proxy_auth; + + /* Do we want to pass Proxy-Authorization along? + * If we haven't used it, then YES + * If we have used it then MAYBE: RFC2616 says we MAY propagate it. + * So let's make it configurable by env. + * The logic here is the same used in mod_proxy_http. */ - if (conn->is_ssl) { - const char *proxy_auth; + proxy_auth = apr_table_get(r->notes, "proxy-basic-creds"); + if (proxy_auth == NULL + && (r->user == NULL /* we haven't yet authenticated */ + || apr_table_get(r->subprocess_env, "Proxy-Chain-Auth"))) { + proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization"); + } + if (proxy_auth != NULL && proxy_auth[0] == '\0') { + proxy_auth = NULL; + } - forward_info *forward = apr_pcalloc(conn->pool, sizeof(forward_info)); + /* Reset forward info if they changed */ + if (!(forward = conn->forward) + || forward->target_port != uri->port + || ap_cstr_casecmp(forward->target_host, uri->hostname) != 0 + || (forward->proxy_auth != NULL) != (proxy_auth != NULL) + || (forward->proxy_auth != NULL && proxy_auth != NULL && + strcmp(forward->proxy_auth, proxy_auth) != 0)) { + apr_pool_t *fwd_pool = conn->pool; + if (worker->s->is_address_reusable) { + if (conn->fwd_pool) { + apr_pool_clear(conn->fwd_pool); + } + else { + apr_pool_create(&conn->fwd_pool, conn->pool); + } + fwd_pool = conn->fwd_pool; + } + forward = apr_pcalloc(fwd_pool, sizeof(forward_info)); conn->forward = forward; + + /* + * Save our real backend data for using it later during HTTP CONNECT. + */ forward->use_http_connect = 1; - forward->target_host = apr_pstrdup(conn->pool, uri->hostname); + forward->target_host = apr_pstrdup(fwd_pool, uri->hostname); forward->target_port = uri->port; - /* Do we want to pass Proxy-Authorization along? - * If we haven't used it, then YES - * If we have used it then MAYBE: RFC2616 says we MAY propagate it. - * So let's make it configurable by env. - * The logic here is the same used in mod_proxy_http. - */ - proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization"); - if (proxy_auth != NULL && - proxy_auth[0] != '\0' && - r->user == NULL && /* we haven't yet authenticated */ - apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) { - forward->proxy_auth = apr_pstrdup(conn->pool, proxy_auth); + if (proxy_auth) { + forward->proxy_auth = apr_pstrdup(fwd_pool, proxy_auth); } } } - else { - conn->hostname = apr_pstrdup(conn->pool, uri->hostname); - conn->port = uri->port; - } - if (!will_reuse) { - /* - * Only do a lookup if we should not reuse the backend address. - * Otherwise we will look it up once for the worker. - */ - err = apr_sockaddr_info_get(&(conn->addr), - conn->hostname, APR_UNSPEC, - conn->port, 0, - conn->pool); - } - socket_cleanup(conn); - conn->close = 0; } - if (will_reuse) { - /* - * Looking up the backend address for the worker only makes sense if - * we can reuse the address. - */ - if (!worker->cp->addr) { - if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock"); - return HTTP_INTERNAL_SERVER_ERROR; - } - /* - * Worker can have the single constant backend address. - * The single DNS lookup is used once per worker. - * If dynamic change is needed then set the addr to NULL - * inside dynamic config to force the lookup. - */ - err = apr_sockaddr_info_get(&(worker->cp->addr), - conn->hostname, APR_UNSPEC, - conn->port, 0, - worker->cp->pool); - conn->addr = worker->cp->addr; - if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock"); - } - } - else { - conn->addr = worker->cp->addr; - } + if (conn->hostname + && (conn->port != hostport + || ap_cstr_casecmp(conn->hostname, hostname) != 0)) { + address_cleanup(conn); } - } - /* Close a possible existing socket if we are told to do so */ - if (conn->close) { - socket_cleanup(conn); - conn->close = 0; - } - if (err != APR_SUCCESS) { - return ap_proxyerror(r, HTTP_BAD_GATEWAY, - apr_pstrcat(p, "DNS lookup failure for: ", - conn->hostname, NULL)); + /* Resolve the connection address with the determined hostname/port */ + if (ap_proxy_determine_address(uri->scheme, conn, hostname, hostport, + 0, r, NULL)) { + return HTTP_INTERNAL_SERVER_ERROR; + } } /* Get the server port for the Via headers */ @@ -2578,7 +3292,8 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, } } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00947) - "connected %s to %s:%d", *url, conn->hostname, conn->port); + "connecting %s to %pI (%s:%hu)", *url, + conn->addr, conn->hostname, conn->port); return OK; } @@ -2679,7 +3394,8 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend, nbytes = apr_snprintf(buffer, sizeof(buffer), "CONNECT %s:%d HTTP/1.0" CRLF, forward->target_host, forward->target_port); - /* Add proxy authorization from the initial request if necessary */ + /* Add proxy authorization from the configuration, or initial + * request if necessary */ if (forward->proxy_auth != NULL) { nbytes += apr_snprintf(buffer + nbytes, sizeof(buffer) - nbytes, "Proxy-Authorization: %s" CRLF, @@ -2835,7 +3551,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_check_connection(const char *scheme, /* Filter chain is OK and empty, yet we can't determine from * ap_check_pipeline (actually ap_core_input_filter) whether * an empty non-blocking read is EAGAIN or EOF on the socket - * side (it's always SUCCESS), so check it explicitely here. + * side (it's always SUCCESS), so check it explicitly here. */ if (ap_proxy_is_socket_connected(conn->sock)) { rv = APR_SUCCESS; @@ -2882,7 +3598,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_check_connection(const char *scheme, "%s: backend socket is disconnected.", scheme); } else { - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, server, APLOGNO(03408) + ap_log_error(APLOG_MARK, APLOG_INFO, 0, server, APLOGNO(03408) "%s: reusable backend connection is not empty: " "forcibly closed", scheme); } @@ -2902,11 +3618,14 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, { apr_status_t rv; int loglevel; - apr_sockaddr_t *backend_addr = conn->addr; + forward_info *forward = conn->forward; + apr_sockaddr_t *backend_addr; /* the local address to use for the outgoing connection */ apr_sockaddr_t *local_addr; apr_socket_t *newsock; void *sconf = s->module_config; + int address_reusable = worker->s->is_address_reusable; + int did_dns_lookup = 0; proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); @@ -2915,6 +3634,16 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, return DECLINED; } + /* We'll set conn->addr to the address actually connect()ed, so if the + * network connection is not reused (per ap_proxy_check_connection() + * above) we need to reset conn->addr to the first resolved address + * and try to connect it first. + */ + if (conn->address && rv != APR_SUCCESS) { + conn->addr = conn->address->addr; + } + backend_addr = conn->addr; + while (rv != APR_SUCCESS && (backend_addr || conn->uds_path)) { #if APR_HAVE_SYS_UN_H if (conn->uds_path) @@ -2924,10 +3653,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, if (rv != APR_SUCCESS) { loglevel = APLOG_ERR; ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(02453) - "%s: error creating Unix domain socket for " - "target %s", + "%s: error creating Unix domain socket " + "%s (%s:%hu)", proxy_function, - worker->s->hostname_ex); + conn->uds_path, + conn->hostname, conn->port); break; } conn->connection = NULL; @@ -2937,19 +3667,18 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, apr_socket_close(newsock); ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(02454) "%s: attempt to connect to Unix domain socket " - "%s (%s) failed", - proxy_function, - conn->uds_path, - worker->s->hostname_ex); + "%s (%s:%hu) failed", + proxy_function, conn->uds_path, + conn->hostname, conn->port); break; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02823) "%s: connection established with Unix domain socket " - "%s (%s)", + "%s (%s:%hu)", proxy_function, conn->uds_path, - worker->s->hostname_ex); + conn->hostname, conn->port); } else #endif @@ -2959,11 +3688,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, conn->scpool)) != APR_SUCCESS) { loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00952) - "%s: error creating fam %d socket for " - "target %s", + "%s: error creating fam %d socket to %pI for " + "(%s:%hu)", proxy_function, - backend_addr->family, - worker->s->hostname_ex); + backend_addr->family, backend_addr, + conn->hostname, conn->port); /* * this could be an IPv6 address from the DNS but the * local machine won't give us an IPv6 socket; hopefully the @@ -3012,8 +3741,9 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, } } ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s, - "%s: fam %d socket created to connect to %s", - proxy_function, backend_addr->family, worker->s->hostname_ex); + "%s: fam %d socket created for %pI (%s:%hu)", + proxy_function, backend_addr->family, backend_addr, + conn->hostname, conn->port); if (conf->source_address_set) { local_addr = apr_pmemdup(conn->scpool, conf->source_address, @@ -3035,19 +3765,45 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, apr_socket_close(newsock); loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00957) - "%s: attempt to connect to %pI (%s) failed", - proxy_function, - backend_addr, - worker->s->hostname_ex); + "%s: attempt to connect to %pI (%s:%hu) failed", + proxy_function, backend_addr, + conn->hostname, conn->port); backend_addr = backend_addr->next; + /* + * If we run out of resolved IP's when connecting and if + * we cache the resolution in the worker the resolution + * might have changed. Hence try a DNS lookup to see if this + * helps. + */ + if (!backend_addr && address_reusable && !did_dns_lookup) { + /* Issue a new DNS lookup to check if the address changed, + * in which case (SUCCESS) restart the loop with the new + * one(s), otherwise leave (nothing we can do about it). + */ + if (ap_proxy_determine_address(proxy_function, conn, + conn->hostname, conn->port, + PROXY_DETERMINE_ADDRESS_CHECK, + NULL, s) == APR_SUCCESS) { + backend_addr = conn->addr; + } + + /* + * In case of an error backend_addr will be NULL which + * is enough to leave the loop. If successful we'll retry + * the new addresses only once. + */ + did_dns_lookup = 1; + } continue; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02824) - "%s: connection established with %pI (%s)", - proxy_function, - backend_addr, - worker->s->hostname_ex); + "%s: connection established with %pI (%s:%hu)", + proxy_function, backend_addr, + conn->hostname, conn->port); + + /* Set the actual sockaddr we are connected to */ + conn->addr = backend_addr; } /* Set a timeout on the socket */ @@ -3063,13 +3819,12 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, conn->sock = newsock; - if (!conn->uds_path && conn->forward) { - forward_info *forward = (forward_info *)conn->forward; + if (forward && forward->use_http_connect) { /* * For HTTP CONNECT we need to prepend CONNECT request before * sending our actual HTTPS requests. */ - if (forward->use_http_connect) { + { rv = send_http_connect(conn, s); /* If an error occurred, loop round and try again */ if (rv != APR_SUCCESS) { @@ -3077,11 +3832,11 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, apr_socket_close(newsock); loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00958) - "%s: attempt to connect to %s:%d " - "via http CONNECT through %pI (%s) failed", + "%s: attempt to connect to %s:%hu " + "via http CONNECT through %pI (%s:%hu) failed", proxy_function, forward->target_host, forward->target_port, - backend_addr, worker->s->hostname_ex); + backend_addr, conn->hostname, conn->port); backend_addr = backend_addr->next; continue; } @@ -3101,9 +3856,10 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, worker->s->error_time = apr_time_now(); worker->s->status |= PROXY_WORKER_IN_ERROR; ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00959) - "ap_proxy_connect_backend disabling worker for (%s) for %" - APR_TIME_T_FMT "s", - worker->s->hostname_ex, apr_time_sec(worker->s->retry)); + "ap_proxy_connect_backend disabling worker for (%s:%hu) " + "for %" APR_TIME_T_FMT "s", + worker->s->hostname_ex, (int)worker->s->port, + apr_time_sec(worker->s->retry)); } } else { @@ -3172,6 +3928,12 @@ static int proxy_connection_create(const char *proxy_function, apr_bucket_alloc_t *bucket_alloc; if (conn->connection) { + if (conn->is_ssl) { + /* on reuse, reinit the SSL connection dir config with the current + * r->per_dir_config, the previous one was reset on release. + */ + ap_proxy_ssl_engine(conn->connection, per_dir_config, 1); + } return OK; } @@ -3189,7 +3951,7 @@ static int proxy_connection_create(const char *proxy_function, * the peer reset the connection already; ap_run_create_connection() * closed the socket */ - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00960) "%s: an error occurred creating a " "new connection to %pI (%s)", proxy_function, backend_addr, conn->hostname); @@ -3207,6 +3969,16 @@ static int proxy_connection_create(const char *proxy_function, backend_addr, conn->hostname); return HTTP_INTERNAL_SERVER_ERROR; } + if (conn->ssl_hostname) { + /* Set a note on the connection about what CN is requested, + * such that mod_ssl can check if it is requested to do so. + */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, conn->connection, + "%s: set SNI to %s for (%s)", proxy_function, + conn->ssl_hostname, conn->hostname); + apr_table_setn(conn->connection->notes, "proxy-request-hostname", + conn->ssl_hostname); + } } else { /* TODO: See if this will break FTP */ @@ -3269,6 +4041,45 @@ int ap_proxy_lb_workers(void) return lb_workers_limit; } +static APR_INLINE int error_code_overridden(const int *elts, int nelts, + int code) +{ + int min = 0; + int max = nelts - 1; + AP_DEBUG_ASSERT(max >= 0); + + while (min < max) { + int mid = (min + max) / 2; + int val = elts[mid]; + + if (val < code) { + min = mid + 1; + } + else if (val > code) { + max = mid - 1; + } + else { + return 1; + } + } + + return elts[min] == code; +} + +PROXY_DECLARE(int) ap_proxy_should_override(proxy_dir_conf *conf, int code) +{ + if (!conf->error_override) + return 0; + + if (apr_is_empty_array(conf->error_override_codes)) + return ap_is_HTTP_ERROR(code); + + /* Since error_override_codes is sorted, apply binary search. */ + return error_code_overridden((int *)conf->error_override_codes->elts, + conf->error_override_codes->nelts, + code); +} + PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r, apr_bucket_brigade *brigade) { @@ -3416,16 +4227,15 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec } if (!found) { proxy_worker **runtime; + /* XXX: a thread mutex is maybe enough here */ apr_global_mutex_lock(proxy_mutex); runtime = apr_array_push(b->workers); - *runtime = apr_palloc(conf->pool, sizeof(proxy_worker)); + *runtime = apr_pcalloc(conf->pool, sizeof(proxy_worker)); apr_global_mutex_unlock(proxy_mutex); (*runtime)->hash = shm->hash; - (*runtime)->context = NULL; - (*runtime)->cp = NULL; (*runtime)->balancer = b; (*runtime)->s = shm; - (*runtime)->tmutex = NULL; + rv = ap_proxy_initialize_worker(*runtime, s, conf->pool); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker"); @@ -3433,7 +4243,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02403) "grabbing shm[%d] (0x%pp) for worker: %s", i, (void *)shm, - (*runtime)->s->name); + (*runtime)->s->name_ex); } } if (b->s->need_reset) { @@ -3565,98 +4375,126 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, char **old_cl_val, char **old_te_val) { + int rc = OK; conn_rec *c = r->connection; int counter; char *buf; + apr_table_t *saved_headers_in = r->headers_in; + const char *saved_host = apr_table_get(saved_headers_in, "Host"); const apr_array_header_t *headers_in_array; const apr_table_entry_t *headers_in; - apr_table_t *saved_headers_in; apr_bucket *e; - int do_100_continue; + int force10 = 0, do_100_continue = 0; conn_rec *origin = p_conn->connection; + const char *host, *creds, *val; proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_module); /* + * HTTP "Ping" test? Easiest is 100-Continue. However: * To be compliant, we only use 100-Continue for requests with bodies. * We also make sure we won't be talking HTTP/1.0 as well. */ - do_100_continue = (worker->s->ping_timeout_set - && ap_request_has_body(r) - && (PROXYREQ_REVERSE == r->proxyreq) - && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); - if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { - /* - * According to RFC 2616 8.2.3 we are not allowed to forward an - * Expect: 100-continue to an HTTP/1.0 server. Instead we MUST return - * a HTTP_EXPECTATION_FAILED - */ - if (r->expecting_100) { - return HTTP_EXPECTATION_FAILED; + force10 = 1; + } + else if (apr_table_get(r->notes, "proxy-100-continue") + || PROXY_SHOULD_PING_100_CONTINUE(worker, r)) { + do_100_continue = 1; + } + if (force10 || apr_table_get(r->subprocess_env, "proxy-nokeepalive")) { + if (origin) { + origin->keepalive = AP_CONN_CLOSE; } - buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL); p_conn->close = 1; - } else { - buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL); } - if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) { - origin->keepalive = AP_CONN_CLOSE; - p_conn->close = 1; - } - ap_xlate_proto_to_ascii(buf, strlen(buf)); - e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); - if (dconf->preserve_host == 0) { - if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */ - if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { - buf = apr_pstrcat(p, "Host: [", uri->hostname, "]:", - uri->port_str, CRLF, NULL); - } else { - buf = apr_pstrcat(p, "Host: [", uri->hostname, "]", CRLF, NULL); - } - } else { - if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { - buf = apr_pstrcat(p, "Host: ", uri->hostname, ":", - uri->port_str, CRLF, NULL); - } else { - buf = apr_pstrcat(p, "Host: ", uri->hostname, CRLF, NULL); - } - } - } - else { - /* don't want to use r->hostname, as the incoming header might have a - * port attached - */ - const char* hostname = apr_table_get(r->headers_in,"Host"); - if (!hostname) { - hostname = r->server->server_hostname; - ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092) - "no HTTP 0.9 request (with no host line) " - "on incoming request and preserve host set " - "forcing hostname to be %s for uri %s", - hostname, r->uri); - } - buf = apr_pstrcat(p, "Host: ", hostname, CRLF, NULL); + + if (force10) { + buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL); + } + else { + buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL); } ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); /* - * Save the original headers in here and restore them when leaving, since - * we will apply proxy purpose only modifications (eg. clearing hop-by-hop - * headers, add Via or X-Forwarded-* or Expect...), whereas the originals - * will be needed later to prepare the correct response and logging. + * Make a copy on r->headers_in for the request we make to the backend, + * modify the copy in place according to our configuration and connection + * handling, use it to fill in the forwarded headers' brigade, and finally + * restore the saved/original ones in r->headers_in. * * Note: We need to take r->pool for apr_table_copy as the key / value * pairs in r->headers_in have been created out of r->pool and * p might be (and actually is) a longer living pool. * This would trigger the bad pool ancestry abort in apr_table_copy if * apr is compiled with APR_POOL_DEBUG. + * + * icing: if p indeed lives longer than r->pool, we should allocate + * all new header values from r->pool as well and avoid leakage. */ - saved_headers_in = r->headers_in; r->headers_in = apr_table_copy(r->pool, saved_headers_in); + /* Return the original Transfer-Encoding and/or Content-Length values + * then drop the headers, they must be set by the proxy handler based + * on the actual body being forwarded. + */ + if ((*old_te_val = (char *)apr_table_get(r->headers_in, + "Transfer-Encoding"))) { + apr_table_unset(r->headers_in, "Transfer-Encoding"); + } + if ((*old_cl_val = (char *)apr_table_get(r->headers_in, + "Content-Length"))) { + apr_table_unset(r->headers_in, "Content-Length"); + } + + /* Clear out hop-by-hop request headers not to forward */ + if (ap_proxy_clear_connection(r, r->headers_in) < 0) { + rc = HTTP_BAD_REQUEST; + goto cleanup; + } + + /* RFC2616 13.5.1 says we should strip these */ + apr_table_unset(r->headers_in, "Keep-Alive"); + apr_table_unset(r->headers_in, "Upgrade"); + apr_table_unset(r->headers_in, "Trailer"); + apr_table_unset(r->headers_in, "TE"); + + /* Compute Host header */ + if (dconf->preserve_host == 0) { + if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */ + if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { + host = apr_pstrcat(r->pool, "[", uri->hostname, "]:", + uri->port_str, NULL); + } else { + host = apr_pstrcat(r->pool, "[", uri->hostname, "]", NULL); + } + } else { + if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { + host = apr_pstrcat(r->pool, uri->hostname, ":", + uri->port_str, NULL); + } else { + host = uri->hostname; + } + } + apr_table_setn(r->headers_in, "Host", host); + } + else { + /* don't want to use r->hostname as the incoming header might have a + * port attached, let's use the original header. + */ + host = saved_host; + if (!host) { + host = r->server->server_hostname; + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092) + "no HTTP 0.9 request (with no host line) " + "on incoming request and preserve host set " + "forcing hostname to be %s for uri %s", + host, r->uri); + apr_table_setn(r->headers_in, "Host", host); + } + } + /* handle Via */ if (conf->viaopt == via_block) { /* Block all outgoing Via: headers */ @@ -3690,23 +4528,19 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, * to backend */ if (do_100_continue) { - const char *val; - - if (!r->expecting_100) { - /* Don't forward any "100 Continue" response if the client is - * not expecting it. - */ - apr_table_setn(r->subprocess_env, "proxy-interim-response", - "Suppress"); - } - /* Add the Expect header if not already there. */ - if (((val = apr_table_get(r->headers_in, "Expect")) == NULL) - || (strcasecmp(val, "100-Continue") != 0 /* fast path */ - && !ap_find_token(r->pool, val, "100-Continue"))) { + if (!(val = apr_table_get(r->headers_in, "Expect")) + || (ap_cstr_casecmp(val, "100-Continue") != 0 /* fast path */ + && !ap_find_token(r->pool, val, "100-Continue"))) { apr_table_mergen(r->headers_in, "Expect", "100-Continue"); } } + else { + /* XXX: we should strip the 100-continue token only from the + * Expect header, but are there others actually used anywhere? + */ + apr_table_unset(r->headers_in, "Expect"); + } /* X-Forwarded-*: handling * @@ -3730,8 +4564,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, */ if (dconf->add_forwarded_headers) { if (PROXYREQ_REVERSE == r->proxyreq) { - const char *buf; - /* Add X-Forwarded-For: so that the upstream has a chance to * determine, where the original request came from. */ @@ -3741,8 +4573,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, /* Add X-Forwarded-Host: so that upstream knows what the * original request hostname was. */ - if ((buf = apr_table_get(r->headers_in, "Host"))) { - apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf); + if (saved_host) { + apr_table_mergen(r->headers_in, "X-Forwarded-Host", + saved_host); } /* Add X-Forwarded-Server: so that upstream knows what the @@ -3754,79 +4587,315 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, } } + /* Do we want to strip Proxy-Authorization ? + * If we haven't used it, then NO + * If we have used it then MAYBE: RFC2616 says we MAY propagate it. + * So let's make it configurable by env. + */ + if (r->user != NULL /* we've authenticated */ + && !apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) { + apr_table_unset(r->headers_in, "Proxy-Authorization"); + } + + /* for sub-requests, ignore freshness/expiry headers */ + if (r->main) { + apr_table_unset(r->headers_in, "If-Match"); + apr_table_unset(r->headers_in, "If-Modified-Since"); + apr_table_unset(r->headers_in, "If-Range"); + apr_table_unset(r->headers_in, "If-Unmodified-Since"); + apr_table_unset(r->headers_in, "If-None-Match"); + } + + creds = apr_table_get(r->notes, "proxy-basic-creds"); + if (creds) { + apr_table_mergen(r->headers_in, "Proxy-Authorization", creds); + } + + /* run hook to fixup the request we are about to send */ proxy_run_fixups(r); - if (ap_proxy_clear_connection(r, r->headers_in) < 0) { - return HTTP_BAD_REQUEST; + + /* We used to send `Host: ` always first, so let's keep it that + * way. No telling which legacy backend is relying on this. + * If proxy_run_fixups() changed the value, use it (though removal + * is ignored). + */ + val = apr_table_get(r->headers_in, "Host"); + if (val) { + apr_table_unset(r->headers_in, "Host"); + host = val; } + buf = apr_pstrcat(p, "Host: ", host, CRLF, NULL); + ap_xlate_proto_to_ascii(buf, strlen(buf)); + e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(header_brigade, e); - /* send request headers */ + /* Append the (remaining) headers to the brigade */ headers_in_array = apr_table_elts(r->headers_in); headers_in = (const apr_table_entry_t *) headers_in_array->elts; for (counter = 0; counter < headers_in_array->nelts; counter++) { if (headers_in[counter].key == NULL - || headers_in[counter].val == NULL + || headers_in[counter].val == NULL) { + continue; + } - /* Already sent */ - || !strcasecmp(headers_in[counter].key, "Host") + buf = apr_pstrcat(p, headers_in[counter].key, ": ", + headers_in[counter].val, CRLF, + NULL); + ap_xlate_proto_to_ascii(buf, strlen(buf)); + e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } - /* Clear out hop-by-hop request headers not to send - * RFC2616 13.5.1 says we should strip these headers - */ - || !strcasecmp(headers_in[counter].key, "Keep-Alive") - || !strcasecmp(headers_in[counter].key, "TE") - || !strcasecmp(headers_in[counter].key, "Trailer") - || !strcasecmp(headers_in[counter].key, "Upgrade") +cleanup: + r->headers_in = saved_headers_in; + return rc; +} - ) { - continue; +PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_read_type_e block, + apr_off_t *bytes_read, + apr_off_t max_read) +{ + apr_pool_t *p = r->pool; + conn_rec *c = r->connection; + apr_bucket_brigade *temp_brigade; + apr_status_t status; + apr_off_t bytes; + + *bytes_read = 0; + if (max_read < APR_BUCKET_BUFF_SIZE) { + max_read = APR_BUCKET_BUFF_SIZE; + } + + /* Prefetch max_read bytes + * + * This helps us avoid any election of C-L v.s. T-E + * request bodies, since we are willing to keep in + * memory this much data, in any case. This gives + * us an instant C-L election if the body is of some + * reasonable size. + */ + temp_brigade = apr_brigade_create(p, input_brigade->bucket_alloc); + + /* Account for saved input, if any. */ + apr_brigade_length(input_brigade, 0, bytes_read); + + /* Ensure we don't hit a wall where we have a buffer too small for + * ap_get_brigade's filters to fetch us another bucket, surrender + * once we hit 80 bytes (an arbitrary value) less than max_read. + */ + while (*bytes_read < max_read - 80 + && (APR_BRIGADE_EMPTY(input_brigade) + || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { + status = ap_get_brigade(r->input_filters, temp_brigade, + AP_MODE_READBYTES, block, + max_read - *bytes_read); + /* ap_get_brigade may return success with an empty brigade + * for a non-blocking read which would block + */ + if (block == APR_NONBLOCK_READ + && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) + || APR_STATUS_IS_EAGAIN(status))) { + break; + } + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) + "prefetch request body failed to %pI (%s)" + " from %s (%s)", backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : ""); + return ap_map_http_request_error(status, HTTP_BAD_REQUEST); } - /* Do we want to strip Proxy-Authorization ? - * If we haven't used it, then NO - * If we have used it then MAYBE: RFC2616 says we MAY propagate it. - * So let's make it configurable by env. + + apr_brigade_length(temp_brigade, 1, &bytes); + *bytes_read += bytes; + + /* + * Save temp_brigade in input_brigade. (At least) in the SSL case + * temp_brigade contains transient buckets whose data would get + * overwritten during the next call of ap_get_brigade in the loop. + * ap_save_brigade ensures these buckets to be set aside. + * Calling ap_save_brigade with NULL as filter is OK, because + * input_brigade already has been created and does not need to get + * created by ap_save_brigade. */ - if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) { - if (r->user != NULL) { /* we've authenticated */ - if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) { - continue; - } + status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) + "processing prefetched request body failed" + " to %pI (%s) from %s (%s)", backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : ""); + return HTTP_INTERNAL_SERVER_ERROR; + } + } + + return OK; +} + +PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *bb, + apr_off_t max_read) +{ + apr_bucket_alloc_t *bucket_alloc = bb->bucket_alloc; + apr_read_type_e block = (backend->connection) ? APR_NONBLOCK_READ + : APR_BLOCK_READ; + apr_status_t status; + int rv; + + for (;;) { + apr_brigade_cleanup(bb); + status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, + block, max_read); + if (block == APR_BLOCK_READ + || (!(status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) + && !APR_STATUS_IS_EAGAIN(status))) { + break; + } + + /* Flush and retry (blocking) */ + apr_brigade_cleanup(bb); + rv = ap_proxy_pass_brigade(bucket_alloc, r, backend, + backend->connection, bb, 1); + if (rv != OK) { + return rv; + } + block = APR_BLOCK_READ; + } + + if (status != APR_SUCCESS) { + conn_rec *c = r->connection; + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) + "read request body failed to %pI (%s)" + " from %s (%s)", backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, c->remote_host ? c->remote_host : ""); + return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + } + + return OK; +} + +PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r, + proxy_conn_rec *backend, + apr_bucket_brigade *input_brigade, + apr_off_t *bytes_spooled, + apr_off_t max_mem_spool) +{ + apr_pool_t *p = r->pool; + int seen_eos = 0, rv = OK; + apr_status_t status = APR_SUCCESS; + apr_bucket_alloc_t *bucket_alloc = input_brigade->bucket_alloc; + apr_bucket_brigade *body_brigade; + apr_bucket *e; + apr_off_t bytes, fsize = 0; + apr_file_t *tmpfile = NULL; + + *bytes_spooled = 0; + body_brigade = apr_brigade_create(p, bucket_alloc); + + do { + if (APR_BRIGADE_EMPTY(input_brigade)) { + rv = ap_proxy_read_input(r, backend, input_brigade, + HUGE_STRING_LEN); + if (rv != OK) { + return rv; } } - /* Skip Transfer-Encoding and Content-Length for now. - */ - if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) { - *old_te_val = headers_in[counter].val; - continue; + /* If this brigade contains EOS, either stop or remove it. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; } - if (!strcasecmp(headers_in[counter].key, "Content-Length")) { - *old_cl_val = headers_in[counter].val; - continue; + + apr_brigade_length(input_brigade, 1, &bytes); + + if (*bytes_spooled + bytes > max_mem_spool) { + /* can't spool any more in memory; write latest brigade to disk */ + if (tmpfile == NULL) { + const char *temp_dir; + char *template; + + status = apr_temp_dir_get(&temp_dir, p); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) + "search for temporary directory failed"); + return HTTP_INTERNAL_SERVER_ERROR; + } + apr_filepath_merge(&template, temp_dir, + "modproxy.tmp.XXXXXX", + APR_FILEPATH_NATIVE, p); + status = apr_file_mktemp(&tmpfile, template, 0, p); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) + "creation of temporary file in directory " + "%s failed", temp_dir); + return HTTP_INTERNAL_SERVER_ERROR; + } + } + for (e = APR_BRIGADE_FIRST(input_brigade); + e != APR_BRIGADE_SENTINEL(input_brigade); + e = APR_BUCKET_NEXT(e)) { + const char *data; + apr_size_t bytes_read, bytes_written; + + apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); + status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); + if (status != APR_SUCCESS) { + const char *tmpfile_name; + + if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { + tmpfile_name = "(unknown)"; + } + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) + "write to temporary file %s failed", + tmpfile_name); + return HTTP_INTERNAL_SERVER_ERROR; + } + AP_DEBUG_ASSERT(bytes_read == bytes_written); + fsize += bytes_written; + } + apr_brigade_cleanup(input_brigade); } + else { - /* for sub-requests, ignore freshness/expiry headers */ - if (r->main) { - if ( !strcasecmp(headers_in[counter].key, "If-Match") - || !strcasecmp(headers_in[counter].key, "If-Modified-Since") - || !strcasecmp(headers_in[counter].key, "If-Range") - || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since") - || !strcasecmp(headers_in[counter].key, "If-None-Match")) { - continue; + /* + * Save input_brigade in body_brigade. (At least) in the SSL case + * input_brigade contains transient buckets whose data would get + * overwritten during the next call of ap_get_brigade in the loop. + * ap_save_brigade ensures these buckets to be set aside. + * Calling ap_save_brigade with NULL as filter is OK, because + * body_brigade already has been created and does not need to get + * created by ap_save_brigade. + */ + status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); + if (status != APR_SUCCESS) { + return HTTP_INTERNAL_SERVER_ERROR; } + } - buf = apr_pstrcat(p, headers_in[counter].key, ": ", - headers_in[counter].val, CRLF, - NULL); - ap_xlate_proto_to_ascii(buf, strlen(buf)); - e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); - } + *bytes_spooled += bytes; + } while (!seen_eos); - /* Restore the original headers in (see comment above), - * we won't modify them anymore. - */ - r->headers_in = saved_headers_in; + APR_BRIGADE_CONCAT(input_brigade, body_brigade); + if (tmpfile) { + apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); + } + if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { + e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + if (tmpfile) { + /* We dropped metadata buckets when spooling to tmpfile, + * terminate with EOS to allow for flushing in a one go. + */ + e = apr_bucket_eos_create(bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } return OK; } @@ -3899,7 +4968,7 @@ PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme) } else { proxy_schemes_t *pscheme; for (pscheme = pschemes; pscheme->name != NULL; ++pscheme) { - if (strcasecmp(scheme, pscheme->name) == 0) { + if (ap_cstr_casecmp(scheme, pscheme->name) == 0) { return pscheme->default_port; } } @@ -3908,6 +4977,23 @@ PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme) return 0; } +static APR_INLINE int ap_filter_should_yield(ap_filter_t *f) +{ + return f->c->data_in_output_filters; +} + +static APR_INLINE int ap_filter_output_pending(conn_rec *c) +{ + ap_filter_t *f = c->output_filters; + while (f->next) { + f = f->next; + } + if (f->frec->filter_func.out_func(f, NULL)) { + return AP_FILTER_ERROR; + } + return c->data_in_output_filters ? OK : DECLINED; +} + PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r, apr_bucket_brigade *from, apr_bucket_brigade *to) @@ -3946,6 +5032,16 @@ PROXY_DECLARE(apr_status_t) ap_proxy_buckets_lifetime_transform(request_rec *r, return rv; } +/* An arbitrary large value to address pathological case where we keep + * reading from one side only, without scheduling the other direction for + * too long. This can happen with large MTU and small read buffers, like + * micro-benchmarking huge files bidirectional transfer with client, proxy + * and backend on localhost for instance. Though we could just ignore the + * case and let the sender stop by itself at some point when/if it needs to + * receive data, or the receiver stop when/if it needs to send... + */ +#define PROXY_TRANSFER_MAX_READS 10000 + PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections( request_rec *r, conn_rec *c_i, @@ -3955,81 +5051,576 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections( const char *name, int *sent, apr_off_t bsize, - int after) + int flags) { apr_status_t rv; + int flush_each = 0; + unsigned int num_reads = 0; #ifdef DEBUGGING apr_off_t len; #endif - do { + /* + * Compat: since FLUSH_EACH is default (and zero) for legacy reasons, we + * pretend it's no FLUSH_AFTER nor YIELD_PENDING flags, the latter because + * flushing would defeat the purpose of checking for pending data (hence + * determine whether or not the output chain/stack is full for stopping). + */ + if (!(flags & (AP_PROXY_TRANSFER_FLUSH_AFTER | + AP_PROXY_TRANSFER_YIELD_PENDING))) { + flush_each = 1; + } + + for (;;) { apr_brigade_cleanup(bb_i); rv = ap_get_brigade(c_i->input_filters, bb_i, AP_MODE_READBYTES, APR_NONBLOCK_READ, bsize); - if (rv == APR_SUCCESS) { - if (c_o->aborted) { - return APR_EPIPE; - } - if (APR_BRIGADE_EMPTY(bb_i)) { - break; + if (rv != APR_SUCCESS) { + if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03308) + "ap_proxy_transfer_between_connections: " + "error on %s - ap_get_brigade", + name); + if (rv == APR_INCOMPLETE) { + /* Don't return APR_INCOMPLETE, it'd mean "should yield" + * for the caller, while it means "incomplete body" here + * from ap_http_filter(), which is an error. + */ + rv = APR_EGENERAL; + } } + break; + } + + if (c_o->aborted) { + apr_brigade_cleanup(bb_i); + flags &= ~AP_PROXY_TRANSFER_FLUSH_AFTER; + rv = APR_EPIPE; + break; + } + if (APR_BRIGADE_EMPTY(bb_i)) { + break; + } #ifdef DEBUGGING - len = -1; - apr_brigade_length(bb_i, 0, &len); - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03306) - "ap_proxy_transfer_between_connections: " - "read %" APR_OFF_T_FMT - " bytes from %s", len, name); + len = -1; + apr_brigade_length(bb_i, 0, &len); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03306) + "ap_proxy_transfer_between_connections: " + "read %" APR_OFF_T_FMT + " bytes from %s", len, name); #endif - if (sent) { - *sent = 1; - } - ap_proxy_buckets_lifetime_transform(r, bb_i, bb_o); - if (!after) { - apr_bucket *b; + if (sent) { + *sent = 1; + } + ap_proxy_buckets_lifetime_transform(r, bb_i, bb_o); + if (flush_each) { + apr_bucket *b; + /* + * Do not use ap_fflush here since this would cause the flush + * bucket to be sent in a separate brigade afterwards which + * causes some filters to set aside the buckets from the first + * brigade and process them when FLUSH arrives in the second + * brigade. As set asides of our transformed buckets involve + * memory copying we try to avoid this. If we have the flush + * bucket in the first brigade they directly process the + * buckets without setting them aside. + */ + b = apr_bucket_flush_create(bb_o->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb_o, b); + } + rv = ap_pass_brigade(c_o->output_filters, bb_o); + apr_brigade_cleanup(bb_o); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(03307) + "ap_proxy_transfer_between_connections: " + "error on %s - ap_pass_brigade", + name); + flags &= ~AP_PROXY_TRANSFER_FLUSH_AFTER; + break; + } - /* - * Do not use ap_fflush here since this would cause the flush - * bucket to be sent in a separate brigade afterwards which - * causes some filters to set aside the buckets from the first - * brigade and process them when the flush arrives in the second - * brigade. As set asides of our transformed buckets involve - * memory copying we try to avoid this. If we have the flush - * bucket in the first brigade they directly process the - * buckets without setting them aside. - */ - b = apr_bucket_flush_create(bb_o->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb_o, b); - } - rv = ap_pass_brigade(c_o->output_filters, bb_o); - if (rv != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(03307) + /* Yield if the output filters stack is full? This is to avoid + * blocking and give the caller a chance to POLLOUT async. + */ + if ((flags & AP_PROXY_TRANSFER_YIELD_PENDING) + && ap_filter_should_yield(c_o->output_filters)) { + int rc = ap_filter_output_pending(c_o); + if (rc == OK) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "ap_proxy_transfer_between_connections: " - "error on %s - ap_pass_brigade", - name); + "yield (output pending)"); + rv = APR_INCOMPLETE; + break; + } + if (rc != DECLINED) { + rv = AP_FILTER_ERROR; + break; } - } else if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03308) + } + + /* Yield if we keep hold of the thread for too long? This gives + * the caller a chance to schedule the other direction too. + */ + if ((flags & AP_PROXY_TRANSFER_YIELD_MAX_READS) + && ++num_reads > PROXY_TRANSFER_MAX_READS) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "ap_proxy_transfer_between_connections: " - "error on %s - ap_get_brigade", - name); + "yield (max reads)"); + rv = APR_SUCCESS; + break; } - } while (rv == APR_SUCCESS); + } - if (after) { + if (flags & AP_PROXY_TRANSFER_FLUSH_AFTER) { ap_fflush(c_o->output_filters, bb_o); + apr_brigade_cleanup(bb_o); } + apr_brigade_cleanup(bb_i); ap_log_rerror(APLOG_MARK, APLOG_TRACE2, rv, r, - "ap_proxy_transfer_between_connections complete"); + "ap_proxy_transfer_between_connections complete (%s %pI)", + (c_i == r->connection) ? "to" : "from", + (c_i == r->connection) ? c_o->client_addr + : c_i->client_addr); if (APR_STATUS_IS_EAGAIN(rv)) { rv = APR_SUCCESS; } - return rv; } +struct proxy_tunnel_conn { + /* the other side of the tunnel */ + struct proxy_tunnel_conn *other; + + conn_rec *c; + const char *name; + + apr_pollfd_t *pfd; + apr_bucket_brigade *bb; + + unsigned int down_in:1, + down_out:1; +}; + +PROXY_DECLARE(apr_status_t) ap_proxy_tunnel_create(proxy_tunnel_rec **ptunnel, + request_rec *r, conn_rec *c_o, + const char *scheme) +{ + apr_status_t rv; + conn_rec *c_i = r->connection; + apr_interval_time_t client_timeout = -1, origin_timeout = -1; + proxy_tunnel_rec *tunnel; + + *ptunnel = NULL; + + tunnel = apr_pcalloc(r->pool, sizeof(*tunnel)); + + rv = apr_pollset_create(&tunnel->pollset, 2, r->pool, APR_POLLSET_NOCOPY); + if (rv != APR_SUCCESS) { + return rv; + } + + tunnel->r = r; + tunnel->scheme = apr_pstrdup(r->pool, scheme); + tunnel->client = apr_pcalloc(r->pool, sizeof(struct proxy_tunnel_conn)); + tunnel->origin = apr_pcalloc(r->pool, sizeof(struct proxy_tunnel_conn)); + tunnel->pfds = apr_array_make(r->pool, 2, sizeof(apr_pollfd_t)); + tunnel->read_buf_size = ap_get_read_buf_size(r); + tunnel->client->other = tunnel->origin; + tunnel->origin->other = tunnel->client; + tunnel->timeout = -1; + + tunnel->client->c = c_i; + tunnel->client->name = "client"; + tunnel->client->bb = apr_brigade_create(c_i->pool, c_i->bucket_alloc); + tunnel->client->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t); + tunnel->client->pfd->p = r->pool; + tunnel->client->pfd->desc_type = APR_NO_DESC; + rv = ap_get_pollfd_from_conn(tunnel->client->c, + tunnel->client->pfd, &client_timeout); + if (rv != APR_SUCCESS) { + return rv; + } + tunnel->client->pfd->client_data = tunnel->client; + if (tunnel->client->pfd->desc_type == APR_POLL_SOCKET) { + apr_socket_opt_set(tunnel->client->pfd->desc.s, APR_SO_NONBLOCK, 1); + } + + tunnel->origin->c = c_o; + tunnel->origin->name = "origin"; + tunnel->origin->bb = apr_brigade_create(c_o->pool, c_o->bucket_alloc); + tunnel->origin->pfd = &APR_ARRAY_PUSH(tunnel->pfds, apr_pollfd_t); + tunnel->origin->pfd->p = r->pool; + tunnel->origin->pfd->desc_type = APR_POLL_SOCKET; + tunnel->origin->pfd->desc.s = ap_get_conn_socket(c_o); + tunnel->origin->pfd->client_data = tunnel->origin; + apr_socket_timeout_get(tunnel->origin->pfd->desc.s, &origin_timeout); + apr_socket_opt_set(tunnel->origin->pfd->desc.s, APR_SO_NONBLOCK, 1); + + /* Defaults to the largest timeout of both connections */ + tunnel->timeout = (client_timeout >= 0 && client_timeout > origin_timeout ? + client_timeout : origin_timeout); + + /* No coalescing filters */ + ap_remove_output_filter_byhandle(c_i->output_filters, + "SSL/TLS Coalescing Filter"); + ap_remove_output_filter_byhandle(c_o->output_filters, + "SSL/TLS Coalescing Filter"); + + /* Bidirectional non-HTTP stream will confuse mod_reqtimeoout */ + ap_remove_input_filter_byhandle(c_i->input_filters, "reqtimeout"); + + /* The input/output filter stacks should contain connection filters only */ + r->input_filters = r->proto_input_filters = c_i->input_filters; + r->output_filters = r->proto_output_filters = c_i->output_filters; + + /* Won't be reused after tunneling */ + c_i->keepalive = AP_CONN_CLOSE; + c_o->keepalive = AP_CONN_CLOSE; + + /* Disable half-close forwarding for this request? */ + if (apr_table_get(r->subprocess_env, "proxy-nohalfclose")) { + tunnel->nohalfclose = 1; + } + + if (tunnel->client->pfd->desc_type == APR_POLL_SOCKET) { + /* Both ends are sockets, the poll strategy is: + * - poll both sides POLLOUT + * - when one side is writable, remove the POLLOUT + * and add POLLIN to the other side. + * - tunnel arriving data, remove POLLIN from the source + * again and add POLLOUT to the receiving side + * - on EOF on read, remove the POLLIN from that side + * Repeat until both sides are down */ + tunnel->client->pfd->reqevents = APR_POLLOUT | APR_POLLERR; + tunnel->origin->pfd->reqevents = APR_POLLOUT | APR_POLLERR; + if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd)) || + (rv = apr_pollset_add(tunnel->pollset, tunnel->client->pfd))) { + return rv; + } + } + else if (tunnel->client->pfd->desc_type == APR_POLL_FILE) { + /* Input is a PIPE fd, the poll strategy is: + * - always POLLIN on origin + * - use socket strategy described above for client only + * otherwise the same + */ + tunnel->client->pfd->reqevents = 0; + tunnel->origin->pfd->reqevents = APR_POLLIN | APR_POLLHUP | + APR_POLLOUT | APR_POLLERR; + if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) { + return rv; + } + } + else { + /* input is already closed, unsual, but we know nothing about + * the tunneled protocol. */ + tunnel->client->down_in = 1; + tunnel->origin->pfd->reqevents = APR_POLLIN | APR_POLLHUP; + if ((rv = apr_pollset_add(tunnel->pollset, tunnel->origin->pfd))) { + return rv; + } + } + + *ptunnel = tunnel; + return APR_SUCCESS; +} + +static void add_pollset(apr_pollset_t *pollset, apr_pollfd_t *pfd, + apr_int16_t events) +{ + apr_status_t rv; + + AP_DEBUG_ASSERT((pfd->reqevents & events) == 0); + + if (pfd->reqevents) { + rv = apr_pollset_remove(pollset, pfd); + if (rv != APR_SUCCESS) { + AP_DEBUG_ASSERT(1); + } + } + + if (events & APR_POLLIN) { + events |= APR_POLLHUP; + } + pfd->reqevents |= events | APR_POLLERR; + rv = apr_pollset_add(pollset, pfd); + if (rv != APR_SUCCESS) { + AP_DEBUG_ASSERT(1); + } +} + +static void del_pollset(apr_pollset_t *pollset, apr_pollfd_t *pfd, + apr_int16_t events) +{ + apr_status_t rv; + + AP_DEBUG_ASSERT((pfd->reqevents & events) != 0); + + rv = apr_pollset_remove(pollset, pfd); + if (rv != APR_SUCCESS) { + AP_DEBUG_ASSERT(0); + return; + } + + if (events & APR_POLLIN) { + events |= APR_POLLHUP; + } + if (pfd->reqevents & ~(events | APR_POLLERR)) { + pfd->reqevents &= ~events; + rv = apr_pollset_add(pollset, pfd); + if (rv != APR_SUCCESS) { + AP_DEBUG_ASSERT(0); + return; + } + } + else { + pfd->reqevents = 0; + } +} + +static int proxy_tunnel_forward(proxy_tunnel_rec *tunnel, + struct proxy_tunnel_conn *in) +{ + struct proxy_tunnel_conn *out = in->other; + apr_status_t rv; + int sent = 0; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, tunnel->r, + "proxy: %s: %s input ready", + tunnel->scheme, in->name); + + rv = ap_proxy_transfer_between_connections(tunnel->r, + in->c, out->c, + in->bb, out->bb, + in->name, &sent, + tunnel->read_buf_size, + AP_PROXY_TRANSFER_YIELD_PENDING | + AP_PROXY_TRANSFER_YIELD_MAX_READS); + if (sent && out == tunnel->client) { + tunnel->replied = 1; + } + if (rv != APR_SUCCESS) { + if (APR_STATUS_IS_INCOMPLETE(rv)) { + /* Pause POLLIN while waiting for POLLOUT on the other + * side, hence avoid filling the output filters even + * more to avoid blocking there. + */ + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, tunnel->r, + "proxy: %s: %s wait writable", + tunnel->scheme, out->name); + } + else if (APR_STATUS_IS_EOF(rv)) { + /* Stop POLLIN and wait for POLLOUT (flush) on the + * other side to shut it down. + */ + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, tunnel->r, + "proxy: %s: %s read shutdown", + tunnel->scheme, in->name); + if (tunnel->nohalfclose) { + /* No half-close forwarding, we are done both ways as + * soon as one side shuts down. + */ + return DONE; + } + in->down_in = 1; + } + else { + /* Real failure, bail out */ + return HTTP_INTERNAL_SERVER_ERROR; + } + + del_pollset(tunnel->pollset, in->pfd, APR_POLLIN); + if (out->pfd->desc_type == APR_POLL_SOCKET) { + /* if the output is a SOCKET, we can stop polling the input + * until the output signals POLLOUT again. */ + add_pollset(tunnel->pollset, out->pfd, APR_POLLOUT); + } + else { + /* We can't use POLLOUT in this direction for the only + * APR_POLL_FILE case we have so far (mod_h2's "signal" pipe), + * we assume that the client's ouput filters chain will block/flush + * if necessary (i.e. no pending data), hence that the origin + * is EOF when reaching here. This direction is over. */ + ap_assert(in->down_in && APR_STATUS_IS_EOF(rv)); + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, tunnel->r, + "proxy: %s: %s write shutdown", + tunnel->scheme, out->name); + out->down_out = 1; + } + } + + return OK; +} + +PROXY_DECLARE(int) ap_proxy_tunnel_run(proxy_tunnel_rec *tunnel) +{ + int status = OK, rc; + request_rec *r = tunnel->r; + apr_pollset_t *pollset = tunnel->pollset; + struct proxy_tunnel_conn *client = tunnel->client, + *origin = tunnel->origin; + apr_interval_time_t timeout = tunnel->timeout >= 0 ? tunnel->timeout : -1; + const char *scheme = tunnel->scheme; + apr_status_t rv; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10212) + "proxy: %s: tunnel running (timeout %lf)", + scheme, timeout >= 0 ? (double)timeout / APR_USEC_PER_SEC + : (double)-1.0); + + /* Loop until both directions of the connection are closed, + * or a failure occurs. + */ + do { + const apr_pollfd_t *results; + apr_int32_t nresults, i; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, + "proxy: %s: polling (client=%hx, origin=%hx)", + scheme, client->pfd->reqevents, origin->pfd->reqevents); + do { + rv = apr_pollset_poll(pollset, timeout, &nresults, &results); + } while (APR_STATUS_IS_EINTR(rv)); + + if (rv != APR_SUCCESS) { + if (APR_STATUS_IS_TIMEUP(rv)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, APLOGNO(10213) + "proxy: %s: polling timed out " + "(client=%hx, origin=%hx)", + scheme, client->pfd->reqevents, + origin->pfd->reqevents); + status = HTTP_GATEWAY_TIME_OUT; + } + else { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10214) + "proxy: %s: polling failed", scheme); + status = HTTP_INTERNAL_SERVER_ERROR; + } + goto done; + } + + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, APLOGNO(10215) + "proxy: %s: woken up, %i result(s)", scheme, nresults); + + for (i = 0; i < nresults; i++) { + const apr_pollfd_t *pfd = &results[i]; + struct proxy_tunnel_conn *tc = pfd->client_data; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, + "proxy: %s: #%i: %s: %hx/%hx", scheme, i, + tc->name, pfd->rtnevents, tc->pfd->reqevents); + + /* sanity check */ + if (pfd->desc.s != client->pfd->desc.s + && pfd->desc.s != origin->pfd->desc.s) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10222) + "proxy: %s: unknown socket in pollset", scheme); + status = HTTP_INTERNAL_SERVER_ERROR; + goto done; + } + + if (!(pfd->rtnevents & (APR_POLLIN | APR_POLLOUT | + APR_POLLHUP | APR_POLLERR))) { + /* this catches POLLNVAL etc.. */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10220) + "proxy: %s: polling events error (%x)", + scheme, pfd->rtnevents); + status = HTTP_INTERNAL_SERVER_ERROR; + goto done; + } + + /* We want to write if we asked for POLLOUT and got: + * - POLLOUT: the socket is ready for write; + * - !POLLIN: the socket is in error state (POLLERR) so we let + * the user know by failing the write and log, OR the socket + * is shutdown for read already (POLLHUP) so we have to + * shutdown for write. + */ + if ((tc->pfd->reqevents & APR_POLLOUT) + && ((pfd->rtnevents & APR_POLLOUT) + || !(tc->pfd->reqevents & APR_POLLIN) + || !(pfd->rtnevents & (APR_POLLIN | APR_POLLHUP)))) { + struct proxy_tunnel_conn *out = tc, *in = tc->other; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r, + "proxy: %s: %s output ready", + scheme, out->name); + + rc = ap_filter_output_pending(out->c); + if (rc == OK) { + /* Keep polling out (only) */ + continue; + } + if (rc != DECLINED) { + /* Real failure, bail out */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10221) + "proxy: %s: %s flushing failed (%i)", + scheme, out->name, rc); + status = rc; + goto done; + } + + /* No more pending data. If the other side is not readable + * anymore it's time to shutdown for write (this direction + * is over). Otherwise back to normal business. + */ + del_pollset(pollset, out->pfd, APR_POLLOUT); + if (in->down_in) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, + "proxy: %s: %s write shutdown", + scheme, out->name); + apr_socket_shutdown(out->pfd->desc.s, 1); + out->down_out = 1; + } + else { + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, + "proxy: %s: %s resume writable", + scheme, out->name); + add_pollset(pollset, in->pfd, APR_POLLIN); + + /* Flush any pending input data now, we don't know when + * the next POLLIN will trigger and retaining data might + * deadlock the underlying protocol. We don't check for + * pending data first with ap_filter_input_pending() since + * the read from proxy_tunnel_forward() is nonblocking + * anyway and returning OK if there's no data. + */ + rc = proxy_tunnel_forward(tunnel, in); + if (rc != OK) { + status = rc; + goto done; + } + } + } + + /* We want to read if we asked for POLLIN|HUP and got: + * - POLLIN|HUP: the socket is ready for read or EOF (POLLHUP); + * - !POLLOUT: the socket is in error state (POLLERR) so we let + * the user know by failing the read and log. + */ + if ((tc->pfd->reqevents & APR_POLLIN) + && ((pfd->rtnevents & (APR_POLLIN | APR_POLLHUP)) + || !(pfd->rtnevents & APR_POLLOUT))) { + rc = proxy_tunnel_forward(tunnel, tc); + if (rc != OK) { + status = rc; + goto done; + } + } + } + } while (!client->down_out || !origin->down_out); + +done: + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(10223) + "proxy: %s: tunneling returns (%i)", scheme, status); + if (status == DONE) { + status = OK; + } + return status; +} + PROXY_DECLARE (const char *) ap_proxy_show_hcmethod(hcmethod_t method) { proxy_hcmethods_t *m = proxy_hcmethods; @@ -4046,4 +5637,14 @@ void proxy_util_register_hooks(apr_pool_t *p) APR_REGISTER_OPTIONAL_FN(ap_proxy_retry_worker); APR_REGISTER_OPTIONAL_FN(ap_proxy_clear_connection); APR_REGISTER_OPTIONAL_FN(proxy_balancer_get_best_worker); + + { + apr_time_t *start_time = ap_retained_data_get("proxy_start_time"); + if (start_time == NULL) { + start_time = ap_retained_data_create("proxy_start_time", + sizeof(*start_time)); + *start_time = apr_time_now(); + } + proxy_start_time = start_time; + } } diff --git a/modules/proxy/proxy_util.h b/modules/proxy/proxy_util.h index 202be8d..bc131da 100644 --- a/modules/proxy/proxy_util.h +++ b/modules/proxy/proxy_util.h @@ -31,9 +31,9 @@ PROXY_DECLARE(int) ap_proxy_is_domainname(struct dirconn_entry *This, apr_pool_t PROXY_DECLARE(int) ap_proxy_is_hostname(struct dirconn_entry *This, apr_pool_t *p); PROXY_DECLARE(int) ap_proxy_is_word(struct dirconn_entry *This, apr_pool_t *p); -PROXY_DECLARE_DATA extern int proxy_lb_workers; -PROXY_DECLARE_DATA extern const apr_strmatch_pattern *ap_proxy_strmatch_path; -PROXY_DECLARE_DATA extern const apr_strmatch_pattern *ap_proxy_strmatch_domain; +extern PROXY_DECLARE_DATA int proxy_lb_workers; +extern PROXY_DECLARE_DATA const apr_strmatch_pattern *ap_proxy_strmatch_path; +extern PROXY_DECLARE_DATA const apr_strmatch_pattern *ap_proxy_strmatch_domain; /** * Register optional functions declared within proxy_util.c. diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c index 64e6e4a..fa8d406 100644 --- a/modules/session/mod_session.c +++ b/modules/session/mod_session.c @@ -128,7 +128,7 @@ static apr_status_t ap_session_load(request_rec * r, session_rec ** z) now = apr_time_now(); if (zz) { - /* load the session attibutes */ + /* load the session attributes */ rv = ap_run_session_decode(r, zz); /* having a session we cannot decode is just as good as having @@ -142,6 +142,7 @@ static apr_status_t ap_session_load(request_rec * r, session_rec ** z) /* invalidate session if session is expired */ if (zz && zz->expiry && zz->expiry < now) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "session is expired"); zz = NULL; } } @@ -180,6 +181,7 @@ static apr_status_t ap_session_save(request_rec * r, session_rec * z) { if (z) { apr_time_t now = apr_time_now(); + apr_time_t initialExpiry = z->expiry; int rv = 0; session_dir_conf *dconf = ap_get_module_config(r->per_dir_config, @@ -210,6 +212,17 @@ static apr_status_t ap_session_save(request_rec * r, session_rec * z) z->expiry = now + z->maxage * APR_USEC_PER_SEC; } + /* don't save if the only change is the expiry by a small amount */ + if (!z->dirty && dconf->expiry_update_time + && (z->expiry - initialExpiry < dconf->expiry_update_time)) { + return APR_SUCCESS; + } + + /* also don't save sessions that didn't change at all */ + if (!z->dirty && !z->maxage) { + return APR_SUCCESS; + } + /* encode the session */ rv = ap_run_session_encode(r, z); if (OK != rv) { @@ -304,15 +317,17 @@ static apr_status_t ap_session_set(request_rec * r, session_rec * z, static int identity_count(void *v, const char *key, const char *val) { - int *count = v; - *count += strlen(key) * 3 + strlen(val) * 3 + 1; + apr_size_t *count = v; + + *count += strlen(key) * 3 + strlen(val) * 3 + 2; return 1; } static int identity_concat(void *v, const char *key, const char *val) { char *slider = v; - int length = strlen(slider); + apr_size_t length = strlen(slider); + slider += length; if (length) { *slider = '&'; @@ -341,9 +356,9 @@ static int identity_concat(void *v, const char *key, const char *val) */ static apr_status_t session_identity_encode(request_rec * r, session_rec * z) { - char *buffer = NULL; - int length = 0; + apr_size_t length = 0; + if (z->expiry) { char *expiry = apr_psprintf(z->pool, "%" APR_INT64_T_FMT, z->expiry); apr_table_setn(z->entries, SESSION_EXPIRY, expiry); @@ -392,8 +407,8 @@ static apr_status_t session_identity_decode(request_rec * r, session_rec * z) char *plast = NULL; const char *psep = "="; char *key = apr_strtok(pair, psep, &plast); - char *val = apr_strtok(NULL, psep, &plast); if (key && *key) { + char *val = apr_strtok(NULL, sep, &plast); if (!val || !*val) { apr_table_unset(z->entries, key); } @@ -556,6 +571,10 @@ static void *merge_session_dir_config(apr_pool_t * p, void *basev, void *addv) new->env_set = add->env_set || base->env_set; new->includes = apr_array_append(p, base->includes, add->includes); new->excludes = apr_array_append(p, base->excludes, add->excludes); + new->expiry_update_time = (add->expiry_update_set == 0) + ? base->expiry_update_time + : add->expiry_update_time; + new->expiry_update_set = add->expiry_update_set || base->expiry_update_set; return new; } @@ -625,6 +644,21 @@ static const char *add_session_exclude(cmd_parms * cmd, void *dconf, const char return NULL; } +static const char * + set_session_expiry_update(cmd_parms * parms, void *dconf, const char *arg) +{ + session_dir_conf *conf = dconf; + + conf->expiry_update_time = atoi(arg); + if (conf->expiry_update_time < 0) { + return "SessionExpiryUpdateInterval must be zero (disable) or a positive value"; + } + conf->expiry_update_time = apr_time_from_sec(conf->expiry_update_time); + conf->expiry_update_set = 1; + + return NULL; +} + static const command_rec session_cmds[] = { @@ -640,6 +674,9 @@ static const command_rec session_cmds[] = "URL prefixes to include in the session. Defaults to all URLs"), AP_INIT_TAKE1("SessionExclude", add_session_exclude, NULL, RSRC_CONF|OR_AUTHCFG, "URL prefixes to exclude from the session. Defaults to no URLs"), + AP_INIT_TAKE1("SessionExpiryUpdateInterval", set_session_expiry_update, NULL, RSRC_CONF|OR_AUTHCFG, + "time interval for which a session's expiry time may change " + "without having to be rewritten. Zero to disable"), {NULL} }; diff --git a/modules/session/mod_session.h b/modules/session/mod_session.h index a6dd5e9..bdeb532 100644 --- a/modules/session/mod_session.h +++ b/modules/session/mod_session.h @@ -115,6 +115,9 @@ typedef struct { * URLs included if empty */ apr_array_header_t *excludes; /* URL prefixes to be excluded. No * URLs excluded if empty */ + apr_time_t expiry_update_time; /* seconds the session expiry may change and + * not have to be rewritten */ + int expiry_update_set; } session_dir_conf; /** diff --git a/modules/session/mod_session_cookie.c b/modules/session/mod_session_cookie.c index a010ee7..36168b7 100644 --- a/modules/session/mod_session_cookie.c +++ b/modules/session/mod_session_cookie.c @@ -60,9 +60,6 @@ static apr_status_t session_cookie_save(request_rec * r, session_rec * z) session_cookie_dir_conf *conf = ap_get_module_config(r->per_dir_config, &session_cookie_module); - /* don't cache auth protected pages */ - apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); - /* create RFC2109 compliant cookie */ if (conf->name_set) { if (z->encoded && z->encoded[0]) { @@ -162,6 +159,9 @@ static apr_status_t session_cookie_load(request_rec * r, session_rec ** z) /* put the session in the notes so we don't have to parse it again */ apr_table_setn(m->notes, note, (char *)zz); + /* don't cache auth protected pages */ + apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); + return OK; } diff --git a/modules/session/mod_session_crypto.c b/modules/session/mod_session_crypto.c index 996620d..fe39f2c 100644 --- a/modules/session/mod_session_crypto.c +++ b/modules/session/mod_session_crypto.c @@ -293,7 +293,7 @@ static apr_status_t encrypt_string(request_rec * r, const apr_crypto_t *f, *cipher, APR_MODE_CBC, 1, 4096, f, r->pool); if (APR_STATUS_IS_ENOKEY(res)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, res, r, APLOGNO(01825) - "the passphrase '%s' was empty", passphrase); + "failure generating key from passphrase"); } if (APR_STATUS_IS_EPADDING(res)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, res, r, APLOGNO(01826) @@ -391,6 +391,8 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, return res; } + res = APR_ECRYPT; /* in case we exhaust all passphrases */ + /* try each passphrase in turn */ for (; i < dconf->passphrases->nelts; i++) { const char *passphrase = APR_ARRAY_IDX(dconf->passphrases, i, char *); @@ -415,7 +417,7 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, f, r->pool); if (APR_STATUS_IS_ENOKEY(res)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, res, r, APLOGNO(01832) - "the passphrase '%s' was empty", passphrase); + "failure generating key from passphrase"); continue; } else if (APR_STATUS_IS_EPADDING(res)) { diff --git a/modules/session/mod_session_dbd.c b/modules/session/mod_session_dbd.c index 0be7306..f683da2 100644 --- a/modules/session/mod_session_dbd.c +++ b/modules/session/mod_session_dbd.c @@ -245,6 +245,9 @@ static apr_status_t session_dbd_load(request_rec * r, session_rec ** z) /* put the session in the notes so we don't have to parse it again */ apr_table_setn(m->notes, note, (char *)zz); + /* don't cache pages with a session */ + apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); + return OK; } @@ -409,9 +412,6 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z) if (conf->name_set || conf->name2_set) { char *oldkey = NULL, *newkey = NULL; - /* don't cache pages with a session */ - apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); - /* if the session is new or changed, make a new session ID */ if (z->uuid) { oldkey = apr_pcalloc(r->pool, APR_UUID_FORMATTED_LENGTH + 1); @@ -458,7 +458,7 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z) else if (conf->peruser) { /* don't cache pages with a session */ - apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); + apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); if (r->user) { ret = dbd_save(r, r->user, r->user, z->encoded, z->expiry); diff --git a/modules/slotmem/mod_slotmem_shm.c b/modules/slotmem/mod_slotmem_shm.c index 6dda8f6..4d14faf 100644 --- a/modules/slotmem/mod_slotmem_shm.c +++ b/modules/slotmem/mod_slotmem_shm.c @@ -56,7 +56,7 @@ struct ap_slotmem_instance_t { }; /* - * Layout for SHM and persited file : + * Layout for SHM and persisted file : * * +-------------------------------------------------------------+~> * | desc | num_free | base (slots) | inuse (array) | md5 | desc | compat.. @@ -92,7 +92,7 @@ static int slotmem_filenames(apr_pool_t *pool, const char *fname = NULL, *pname = NULL; if (slotname && *slotname && strcasecmp(slotname, "none") != 0) { - if (slotname[0] != '/') { + if (!ap_os_is_path_absolute(pool, slotname)) { /* Each generation needs its own file name. */ int generation = 0; ap_mpm_query(AP_MPMQ_GENERATION, &generation); @@ -109,7 +109,7 @@ static int slotmem_filenames(apr_pool_t *pool, if (persistname) { /* Persisted file names are immutable... */ - if (slotname[0] != '/') { + if (!ap_os_is_path_absolute(pool, slotname)) { pname = apr_pstrcat(pool, DEFAULT_SLOTMEM_PREFIX, slotname, DEFAULT_SLOTMEM_SUFFIX, DEFAULT_SLOTMEM_PERSIST_SUFFIX, diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c index 9fdf9e0..fb66d18 100644 --- a/modules/ssl/mod_ssl.c +++ b/modules/ssl/mod_ssl.c @@ -25,8 +25,7 @@ */ #include "ssl_private.h" -#include "mod_ssl.h" -#include "mod_ssl_openssl.h" + #include "util_md5.h" #include "util_mutex.h" #include "ap_provider.h" @@ -75,11 +74,9 @@ static const command_rec ssl_config_cmds[] = { SSL_CMD_SRV(SessionCache, TAKE1, "SSL Session Cache storage " "('none', 'nonenotnull', 'dbm:/path/to/file')") -#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) SSL_CMD_SRV(CryptoDevice, TAKE1, "SSL external Crypto Device usage " "('builtin', '...')") -#endif SSL_CMD_SRV(RandomSeed, TAKE23, "SSL Pseudo Random Number Generator (PRNG) seeding source " "('startup|connect builtin|file:/path|exec:/path [bytes]')") @@ -94,7 +91,7 @@ static const command_rec ssl_config_cmds[] = { "Enable FIPS-140 mode " "(`on', `off')") SSL_CMD_ALL(CipherSuite, TAKE12, - "Colon-delimited list of permitted SSL Ciphers, optional preceeded " + "Colon-delimited list of permitted SSL Ciphers, optional preceded " "by protocol identifier ('XXX:...:XXX' - see manual)") SSL_CMD_SRV(CertificateFile, TAKE1, "SSL Server Certificate file " @@ -187,7 +184,7 @@ static const command_rec ssl_config_cmds[] = { "('[+-][" SSL_PROTOCOLS "] ...' - see manual)") SSL_CMD_PXY(ProxyCipherSuite, TAKE12, "SSL Proxy: colon-delimited list of permitted SSL ciphers " - ", optionally preceeded by protocol specifier ('XXX:...:XXX' - see manual)") + ", optionally preceded by protocol specifier ('XXX:...:XXX' - see manual)") SSL_CMD_PXY(ProxyVerify, TAKE1, "SSL Proxy: whether to verify the remote certificate " "('on' or 'off')") @@ -328,12 +325,17 @@ static int modssl_is_prelinked(void) static apr_status_t ssl_cleanup_pre_config(void *data) { - /* - * Try to kill the internals of the SSL library. +#if HAVE_OPENSSL_INIT_SSL || (OPENSSL_VERSION_NUMBER >= 0x10100000L && \ + !defined(LIBRESSL_VERSION_NUMBER)) + /* Openssl v1.1+ handles all termination automatically from + * OPENSSL_init_ssl(). Do nothing in this case. */ -#ifdef HAVE_FIPS - FIPS_mode_set(0); -#endif + +#else + /* Termination below is for legacy Openssl versions v1.0.x and + * older. + */ + /* Corresponds to OBJ_create()s */ OBJ_cleanup(); /* Corresponds to OPENSSL_load_builtin_modules() */ @@ -373,12 +375,14 @@ static apr_status_t ssl_cleanup_pre_config(void *data) if (!modssl_running_statically) { CRYPTO_cleanup_all_ex_data(); } +#endif /* * TODO: determine somewhere we can safely shove out diagnostics * (when enabled) at this late stage in the game: * CRYPTO_mem_leaks_fp(stderr); */ + return APR_SUCCESS; } @@ -388,16 +392,23 @@ static int ssl_hook_pre_config(apr_pool_t *pconf, { modssl_running_statically = modssl_is_prelinked(); - /* Some OpenSSL internals are allocated per-thread, make sure they - * are associated to the/our same thread-id until cleaned up. +#if HAVE_OPENSSL_INIT_SSL || (OPENSSL_VERSION_NUMBER >= 0x10100000L && \ + !defined(LIBRESSL_VERSION_NUMBER)) + /* Openssl v1.1+ handles all initialisation automatically, apart + * from hints as to how we want to use the library. + * + * We tell openssl we want to include engine support. + */ + OPENSSL_init_ssl(OPENSSL_INIT_ENGINE_ALL_BUILTIN, NULL); + +#else + /* Configuration below is for legacy versions Openssl v1.0 and + * older. */ + #if APR_HAS_THREADS && MODSSL_USE_OPENSSL_PRE_1_1_API ssl_util_thread_id_setup(pconf); #endif - - /* We must register the library in full, to ensure our configuration - * code can successfully test the SSL environment. - */ #if MODSSL_USE_OPENSSL_PRE_1_1_API || defined(LIBRESSL_VERSION_NUMBER) (void)CRYPTO_malloc_init(); #else @@ -411,6 +422,7 @@ static int ssl_hook_pre_config(apr_pool_t *pconf, #endif OpenSSL_add_all_algorithms(); OPENSSL_load_builtin_modules(); +#endif if (OBJ_txt2nid("id-on-dnsSRV") == NID_undef) { (void)OBJ_create("1.3.6.1.5.5.7.8.7", "id-on-dnsSRV", @@ -445,17 +457,30 @@ static int ssl_hook_pre_config(apr_pool_t *pconf, } static SSLConnRec *ssl_init_connection_ctx(conn_rec *c, - ap_conf_vector_t *per_dir_config) + ap_conf_vector_t *per_dir_config, + int reinit) { SSLConnRec *sslconn = myConnConfig(c); - SSLSrvConfigRec *sc; - - if (sslconn) { + int need_setup = 0; + + /* mod_proxy's (r->)per_dir_config has the lifetime of the request, thus + * it uses ssl_engine_set() to reset sslconn->dc when reusing SSL backend + * connections, so we must fall through here. But in the case where we are + * called from ssl_init_ssl_connection() with no per_dir_config (which also + * includes mod_proxy's later run_pre_connection call), sslconn->dc should + * be preserved if it's already set. + */ + if (!sslconn) { + sslconn = apr_pcalloc(c->pool, sizeof(*sslconn)); + need_setup = 1; + } + else if (!reinit) { return sslconn; } - sslconn = apr_pcalloc(c->pool, sizeof(*sslconn)); - + /* Reinit dc in any case because it may be r->per_dir_config scoped + * and thus a caller like mod_proxy needs to update it per request. + */ if (per_dir_config) { sslconn->dc = ap_get_module_config(per_dir_config, &ssl_module); } @@ -464,12 +489,19 @@ static SSLConnRec *ssl_init_connection_ctx(conn_rec *c, &ssl_module); } - sslconn->server = c->base_server; - sslconn->verify_depth = UNSET; - sc = mySrvConfig(c->base_server); - sslconn->cipher_suite = sc->server->auth.cipher_suite; + if (need_setup) { + sslconn->server = c->base_server; + sslconn->verify_depth = UNSET; + if (c->outgoing) { + sslconn->cipher_suite = sslconn->dc->proxy->auth.cipher_suite; + } + else { + SSLSrvConfigRec *sc = mySrvConfig(c->base_server); + sslconn->cipher_suite = sc->server->auth.cipher_suite; + } - myConnConfigSet(c, sslconn); + myConnConfigSet(c, sslconn); + } return sslconn; } @@ -480,10 +512,11 @@ static int ssl_engine_status(conn_rec *c, SSLConnRec *sslconn) return DECLINED; } if (sslconn) { + /* This connection has already been configured. Check what applies. */ if (sslconn->disabled) { return SUSPENDED; } - if (sslconn->is_proxy) { + if (c->outgoing) { if (!sslconn->dc->proxy_enabled) { return DECLINED; } @@ -495,54 +528,48 @@ static int ssl_engine_status(conn_rec *c, SSLConnRec *sslconn) } } else { - if (mySrvConfig(c->base_server)->enabled != SSL_ENABLED_TRUE) { + /* we decline by default for outgoing connections and for incoming + * where the base_server is not enabled. */ + if (c->outgoing || mySrvConfig(c->base_server)->enabled != SSL_ENABLED_TRUE) { return DECLINED; } } return OK; } -static int ssl_engine_set(conn_rec *c, - ap_conf_vector_t *per_dir_config, - int proxy, int enable) +static int ssl_hook_ssl_bind_outgoing(conn_rec *c, + ap_conf_vector_t *per_dir_config, + int enable_ssl) { SSLConnRec *sslconn; int status; - - if (proxy) { - sslconn = ssl_init_connection_ctx(c, per_dir_config); - sslconn->is_proxy = 1; - } - else { - sslconn = myConnConfig(c); + + sslconn = ssl_init_connection_ctx(c, per_dir_config, 1); + if (sslconn->ssl) { + /* we are already bound to this connection. We have rebound + * or removed the reference to a previous per_dir_config, + * there is nothing more to do. */ + return OK; } status = ssl_engine_status(c, sslconn); - - if (proxy && status == DECLINED) { - if (enable) { + if (enable_ssl) { + if (status != OK) { SSLSrvConfigRec *sc = mySrvConfig(sslconn->server); - ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(01961) - "SSL Proxy requested for %s but not enabled " - "[Hint: SSLProxyEngine]", sc->vhost_id); + sslconn->disabled = 1; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10272) + "SSL Proxy requested for %s but not enabled for us.", + sc->vhost_id); + } + else { + sslconn->disabled = 0; + return OK; } - sslconn->disabled = 1; } - else if (sslconn) { - sslconn->disabled = !enable; + else { + sslconn->disabled = 1; } - - return status != DECLINED; -} - -static int ssl_proxy_enable(conn_rec *c) -{ - return ssl_engine_set(c, NULL, 1, 1); -} - -static int ssl_engine_disable(conn_rec *c) -{ - return ssl_engine_set(c, NULL, 0, 0); + return DECLINED; } int ssl_init_ssl_connection(conn_rec *c, request_rec *r) @@ -558,7 +585,7 @@ int ssl_init_ssl_connection(conn_rec *c, request_rec *r) /* * Create or retrieve SSL context */ - sslconn = ssl_init_connection_ctx(c, r ? r->per_dir_config : NULL); + sslconn = ssl_init_connection_ctx(c, r ? r->per_dir_config : NULL, 0); server = sslconn->server; sc = mySrvConfig(server); @@ -566,9 +593,9 @@ int ssl_init_ssl_connection(conn_rec *c, request_rec *r) * Seed the Pseudo Random Number Generator (PRNG) */ ssl_rand_seed(server, c->pool, SSL_RSCTX_CONNECT, - sslconn->is_proxy ? "Proxy: " : "Server: "); + c->outgoing ? "Proxy: " : "Server: "); - mctx = myCtxConfig(sslconn, sc); + mctx = myConnCtxConfig(c, sc); /* * Create a new SSL connection with the configured server SSL context and @@ -586,7 +613,7 @@ int ssl_init_ssl_connection(conn_rec *c, request_rec *r) return DECLINED; /* XXX */ } - rc = ssl_run_pre_handshake(c, ssl, sslconn->is_proxy ? 1 : 0); + rc = ssl_run_pre_handshake(c, ssl, c->outgoing ? 1 : 0); if (rc != OK && rc != DECLINED) { return rc; } @@ -718,10 +745,7 @@ static void ssl_register_hooks(apr_pool_t *p) APR_HOOK_MIDDLE); ssl_var_register(p); - - APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable); - APR_REGISTER_OPTIONAL_FN(ssl_engine_disable); - APR_REGISTER_OPTIONAL_FN(ssl_engine_set); + ap_hook_ssl_bind_outgoing (ssl_hook_ssl_bind_outgoing, NULL, NULL, APR_HOOK_MIDDLE); ap_register_auth_provider(p, AUTHZ_PROVIDER_GROUP, "ssl", AUTHZ_PROVIDER_VERSION, diff --git a/modules/ssl/mod_ssl.h b/modules/ssl/mod_ssl.h index 24a65a0..a360911 100644 --- a/modules/ssl/mod_ssl.h +++ b/modules/ssl/mod_ssl.h @@ -29,6 +29,7 @@ #include "httpd.h" #include "http_config.h" #include "apr_optional.h" +#include "apr_tables.h" /* for apr_array_header_t */ /* Create a set of SSL_DECLARE(type), SSL_DECLARE_NONSTD(type) and * SSL_DECLARE_DATA with appropriate export and import tags for the platform @@ -86,6 +87,34 @@ APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *)); APR_DECLARE_OPTIONAL_FN(int, ssl_engine_set, (conn_rec *, ap_conf_vector_t *, int proxy, int enable)); + +/* Check for availability of new hooks */ +#define SSL_CERT_HOOKS +#ifdef SSL_CERT_HOOKS + +/** Lets others add certificate and key files to the given server. + * For each cert a key must also be added. + * @param cert_file and array of const char* with the path to the certificate chain + * @param key_file and array of const char* with the path to the private key file + */ +APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, add_cert_files, + (server_rec *s, apr_pool_t *p, + apr_array_header_t *cert_files, + apr_array_header_t *key_files)) + +/** In case no certificates are available for a server, this + * lets other modules add a fallback certificate for the time + * being. Regular requests against this server will be answered + * with a 503. + * @param cert_file and array of const char* with the path to the certificate chain + * @param key_file and array of const char* with the path to the private key file + */ +APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, add_fallback_cert_files, + (server_rec *s, apr_pool_t *p, + apr_array_header_t *cert_files, + apr_array_header_t *key_files)) + +#endif /* SSL_CERT_HOOKS */ #endif /* __MOD_SSL_H__ */ /** @} */ diff --git a/modules/ssl/mod_ssl_openssl.h b/modules/ssl/mod_ssl_openssl.h index 0fa654a..e251bd9 100644 --- a/modules/ssl/mod_ssl_openssl.h +++ b/modules/ssl/mod_ssl_openssl.h @@ -30,14 +30,17 @@ /* OpenSSL headers */ -#ifndef SSL_PRIVATE_H #include -#if (OPENSSL_VERSION_NUMBER >= 0x10001000) +#if OPENSSL_VERSION_NUMBER >= 0x30000000 +#include /* for OPENSSL_API_LEVEL */ +#endif +#if OPENSSL_VERSION_NUMBER >= 0x10001000 /* must be defined before including ssl.h */ #define OPENSSL_NO_SSL_INTERN #endif #include -#endif +#include +#include /** * init_server hook -- allow SSL_CTX-specific initialization to be performed by @@ -69,5 +72,45 @@ APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, pre_handshake, APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, proxy_post_handshake, (conn_rec *c, SSL *ssl)) +/** On TLS connections that do not relate to a configured virtual host, + * allow other modules to provide a X509 certificate and EVP_PKEY to + * be used on the connection. This first hook which does not + * return DECLINED will determine the outcome. */ +APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, answer_challenge, + (conn_rec *c, const char *server_name, + X509 **pcert, EVP_PKEY **pkey)) + +/** During post_config phase, ask around if someone wants to provide + * OCSP stapling status information for the given cert (with the also + * provided issuer certificate). The first hook which does not + * return DECLINED promises to take responsibility (and respond + * in later calls via hook ssl_get_stapling_status). + * If no hook takes over, mod_ssl's own stapling implementation will + * be applied (if configured). + */ +APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, init_stapling_status, + (server_rec *s, apr_pool_t *p, + X509 *cert, X509 *issuer)) + +/** Anyone answering positive to ssl_init_stapling_status for a + * certificate, needs to register here and supply the actual OCSP stapling + * status data (OCSP_RESP) for a new connection. + * A hook supplying the response data must return APR_SUCCESS. + * The data is returned in DER encoded bytes via pder and pderlen. The + * returned pointer may be NULL, which indicates that data is (currently) + * unavailable. + * If DER data is returned, it MUST come from a response with + * status OCSP_RESPONSE_STATUS_SUCCESSFUL and V_OCSP_CERTSTATUS_GOOD + * or V_OCSP_CERTSTATUS_REVOKED, not V_OCSP_CERTSTATUS_UNKNOWN. This means + * errors in OCSP retrieval are to be handled/logged by the hook and + * are not done by mod_ssl. + * Any DER bytes returned MUST be allocated via malloc() and ownership + * passes to mod_ssl. Meaning, the hook must return a malloced copy of + * the data it has. mod_ssl (or OpenSSL) will free it. + */ +APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, get_stapling_status, + (unsigned char **pder, int *pderlen, + conn_rec *c, server_rec *s, X509 *cert)) + #endif /* __MOD_SSL_OPENSSL_H__ */ /** @} */ diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c index 6c10bb5..9af6f70 100644 --- a/modules/ssl/ssl_engine_config.c +++ b/modules/ssl/ssl_engine_config.c @@ -27,6 +27,7 @@ damned if you don't.'' -- Unknown */ #include "ssl_private.h" + #include "util_mutex.h" #include "ap_provider.h" @@ -75,6 +76,13 @@ SSLModConfigRec *ssl_config_global_create(server_rec *s) mc->stapling_refresh_mutex = NULL; #endif +#ifdef HAVE_OPENSSL_KEYLOG + mc->keylog_file = NULL; +#endif +#ifdef HAVE_FIPS + mc->fips = UNSET; +#endif + apr_pool_userdata_set(mc, SSL_MOD_CONFIG_KEY, apr_pool_cleanup_null, pool); @@ -220,9 +228,6 @@ static SSLSrvConfigRec *ssl_config_server_new(apr_pool_t *p) #ifdef HAVE_TLSEXT sc->strict_sni_vhost_check = SSL_ENABLED_UNSET; #endif -#ifdef HAVE_FIPS - sc->fips = UNSET; -#endif #ifndef OPENSSL_NO_COMP sc->compression = UNSET; #endif @@ -261,9 +266,11 @@ static void modssl_ctx_cfg_merge(apr_pool_t *p, modssl_ctx_t *mrg) { if (add->protocol_set) { + mrg->protocol_set = 1; mrg->protocol = add->protocol; } else { + mrg->protocol_set = base->protocol_set; mrg->protocol = base->protocol; } @@ -393,9 +400,6 @@ void *ssl_config_server_merge(apr_pool_t *p, void *basev, void *addv) #ifdef HAVE_TLSEXT cfgMerge(strict_sni_vhost_check, SSL_ENABLED_UNSET); #endif -#ifdef HAVE_FIPS - cfgMergeBool(fips); -#endif #ifndef OPENSSL_NO_COMP cfgMergeBool(compression); #endif @@ -589,14 +593,15 @@ const char *ssl_cmd_SSLPassPhraseDialog(cmd_parms *cmd, return NULL; } -#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd, void *dcfg, const char *arg) { SSLModConfigRec *mc = myModConfig(cmd->server); const char *err; +#if MODSSL_HAVE_ENGINE_API ENGINE *e; +#endif if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { return err; @@ -605,13 +610,16 @@ const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd, if (strcEQ(arg, "builtin")) { mc->szCryptoDevice = NULL; } +#if MODSSL_HAVE_ENGINE_API else if ((e = ENGINE_by_id(arg))) { mc->szCryptoDevice = arg; ENGINE_free(e); } +#endif else { err = "SSLCryptoDevice: Invalid argument; must be one of: " "'builtin' (none)"; +#if MODSSL_HAVE_ENGINE_API e = ENGINE_get_first(); while (e) { err = apr_pstrcat(cmd->pool, err, ", '", ENGINE_get_id(e), @@ -620,12 +628,12 @@ const char *ssl_cmd_SSLCryptoDevice(cmd_parms *cmd, * on the 'old' e, per the docs in engine.h. */ e = ENGINE_get_next(e); } +#endif return err; } return NULL; } -#endif const char *ssl_cmd_SSLRandomSeed(cmd_parms *cmd, void *dcfg, @@ -743,7 +751,7 @@ const char *ssl_cmd_SSLEngine(cmd_parms *cmd, void *dcfg, const char *arg) const char *ssl_cmd_SSLFIPS(cmd_parms *cmd, void *dcfg, int flag) { #ifdef HAVE_FIPS - SSLSrvConfigRec *sc = mySrvConfig(cmd->server); + SSLModConfigRec *mc = myModConfig(cmd->server); #endif const char *err; @@ -752,9 +760,9 @@ const char *ssl_cmd_SSLFIPS(cmd_parms *cmd, void *dcfg, int flag) } #ifdef HAVE_FIPS - if ((sc->fips != UNSET) && (sc->fips != (BOOL)(flag ? TRUE : FALSE))) + if ((mc->fips != UNSET) && (mc->fips != (BOOL)(flag ? TRUE : FALSE))) return "Conflicting SSLFIPS options, cannot be both On and Off"; - sc->fips = flag ? TRUE : FALSE; + mc->fips = flag ? TRUE : FALSE; #else if (flag) return "SSLFIPS invalid, rebuild httpd and openssl compiled for FIPS"; @@ -795,7 +803,7 @@ const char *ssl_cmd_SSLCipherSuite(cmd_parms *cmd, return NULL; } #endif - return apr_pstrcat(cmd->pool, "procotol '", arg1, "' not supported", NULL); + return apr_pstrcat(cmd->pool, "protocol '", arg1, "' not supported", NULL); } #define SSL_FLAGS_CHECK_FILE \ @@ -807,8 +815,14 @@ const char *ssl_cmd_SSLCipherSuite(cmd_parms *cmd, static const char *ssl_cmd_check_file(cmd_parms *parms, const char **file) { - const char *filepath = ap_server_root_relative(parms->pool, *file); + const char *filepath; + + /* If only dumping the config, don't verify the paths */ + if (ap_state_query(AP_SQ_RUN_MODE) == AP_SQ_RM_CONFIG_DUMP) { + return NULL; + } + filepath = ap_server_root_relative(parms->pool, *file); if (!filepath) { return apr_pstrcat(parms->pool, parms->cmd->name, ": Invalid file path ", *file, NULL); @@ -847,10 +861,12 @@ const char *ssl_cmd_SSLCompression(cmd_parms *cmd, void *dcfg, int flag) } } sc->compression = flag ? TRUE : FALSE; - return NULL; #else - return "Setting Compression mode unsupported; not implemented by the SSL library"; + if (flag) { + return "Setting Compression mode unsupported; not implemented by the SSL library"; + } #endif + return NULL; } const char *ssl_cmd_SSLHonorCipherOrder(cmd_parms *cmd, void *dcfg, int flag) @@ -916,7 +932,9 @@ const char *ssl_cmd_SSLCertificateFile(cmd_parms *cmd, SSLSrvConfigRec *sc = mySrvConfig(cmd->server); const char *err; - if ((err = ssl_cmd_check_file(cmd, &arg))) { + /* Only check for non-ENGINE based certs. */ + if (!modssl_is_engine_id(arg) + && (err = ssl_cmd_check_file(cmd, &arg))) { return err; } @@ -932,7 +950,9 @@ const char *ssl_cmd_SSLCertificateKeyFile(cmd_parms *cmd, SSLSrvConfigRec *sc = mySrvConfig(cmd->server); const char *err; - if ((err = ssl_cmd_check_file(cmd, &arg))) { + /* Check keyfile exists for non-ENGINE keys. */ + if (!modssl_is_engine_id(arg) + && (err = ssl_cmd_check_file(cmd, &arg))) { return err; } @@ -1549,7 +1569,7 @@ const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *cmd, return NULL; } #endif - return apr_pstrcat(cmd->pool, "procotol '", arg1, "' not supported", NULL); + return apr_pstrcat(cmd->pool, "protocol '", arg1, "' not supported", NULL); } const char *ssl_cmd_SSLProxyVerify(cmd_parms *cmd, diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c index 18d18c6..c2ec048 100644 --- a/modules/ssl/ssl_engine_init.c +++ b/modules/ssl/ssl_engine_init.c @@ -27,15 +27,36 @@ see Recursive.'' -- Unknown */ #include "ssl_private.h" -#include "mod_ssl.h" -#include "mod_ssl_openssl.h" + #include "mpm_common.h" #include "mod_md.h" +static apr_status_t ssl_init_ca_cert_path(server_rec *, apr_pool_t *, const char *, + STACK_OF(X509_NAME) *, STACK_OF(X509_INFO) *); + APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, init_server, (server_rec *s,apr_pool_t *p,int is_proxy,SSL_CTX *ctx), (s,p,is_proxy,ctx), OK, DECLINED) +APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, add_cert_files, + (server_rec *s, apr_pool_t *p, + apr_array_header_t *cert_files, apr_array_header_t *key_files), + (s, p, cert_files, key_files), + OK, DECLINED) + +APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, add_fallback_cert_files, + (server_rec *s, apr_pool_t *p, + apr_array_header_t *cert_files, apr_array_header_t *key_files), + (s, p, cert_files, key_files), + OK, DECLINED) + +APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, answer_challenge, + (conn_rec *c, const char *server_name, + X509 **pcert, EVP_PKEY **pkey), + (c, server_name, pcert, pkey), + DECLINED, DECLINED) + + /* _________________________________________________________________ ** ** Module Initialization @@ -69,7 +90,6 @@ static int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) return 1; } -#endif /* * Grab well-defined DH parameters from OpenSSL, see the BN_get_rfc* @@ -149,40 +169,64 @@ DH *modssl_get_dh_params(unsigned keylen) return NULL; /* impossible to reach. */ } +#endif -static void ssl_add_version_components(apr_pool_t *p, +static void ssl_add_version_components(apr_pool_t *ptemp, apr_pool_t *pconf, server_rec *s) { - char *modver = ssl_var_lookup(p, s, NULL, NULL, "SSL_VERSION_INTERFACE"); - char *libver = ssl_var_lookup(p, s, NULL, NULL, "SSL_VERSION_LIBRARY"); - char *incver = ssl_var_lookup(p, s, NULL, NULL, + char *modver = ssl_var_lookup(ptemp, s, NULL, NULL, "SSL_VERSION_INTERFACE"); + char *libver = ssl_var_lookup(ptemp, s, NULL, NULL, "SSL_VERSION_LIBRARY"); + char *incver = ssl_var_lookup(ptemp, s, NULL, NULL, "SSL_VERSION_LIBRARY_INTERFACE"); - ap_add_version_component(p, libver); + ap_add_version_component(pconf, libver); ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(01876) "%s compiled against Server: %s, Library: %s", modver, AP_SERVER_BASEVERSION, incver); } -/**************************************************************************************************/ -/* Managed Domains Interface */ - -static APR_OPTIONAL_FN_TYPE(md_is_managed) *md_is_managed; -static APR_OPTIONAL_FN_TYPE(md_get_certificate) *md_get_certificate; -static APR_OPTIONAL_FN_TYPE(md_is_challenge) *md_is_challenge; +/* _________________________________________________________________ +** +** Let other answer special connection attempts. +** Used in ACME challenge handling by mod_md. +** _________________________________________________________________ +*/ int ssl_is_challenge(conn_rec *c, const char *servername, - X509 **pcert, EVP_PKEY **pkey) + X509 **pcert, EVP_PKEY **pkey, + const char **pcert_pem, const char **pkey_pem) { - if (md_is_challenge) { - return md_is_challenge(c, servername, pcert, pkey); - } *pcert = NULL; *pkey = NULL; + *pcert_pem = *pkey_pem = NULL; + if (ap_ssl_answer_challenge(c, servername, pcert_pem, pkey_pem)) { + return 1; + } + else if (OK == ssl_run_answer_challenge(c, servername, pcert, pkey)) { + return 1; + } return 0; } +#ifdef HAVE_FIPS +static apr_status_t modssl_fips_cleanup(void *data) +{ + modssl_fips_enable(0); + return APR_SUCCESS; +} +#endif + +static APR_INLINE unsigned long modssl_runtime_lib_version(void) +{ +#if MODSSL_USE_OPENSSL_PRE_1_1_API + return SSLeay(); +#else + return OpenSSL_version_num(); +#endif +} + + /* * Per-module initialization */ @@ -190,18 +234,22 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *base_server) { + unsigned long runtime_lib_version = modssl_runtime_lib_version(); SSLModConfigRec *mc = myModConfig(base_server); SSLSrvConfigRec *sc; server_rec *s; apr_status_t rv; apr_array_header_t *pphrases; - if (SSLeay() < MODSSL_LIBRARY_VERSION) { + AP_DEBUG_ASSERT(mc); + + if (runtime_lib_version < MODSSL_LIBRARY_VERSION) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(01882) "Init: this version of mod_ssl was compiled against " - "a newer library (%s, version currently loaded is %s)" + "a newer library (%s (%s), version currently loaded is 0x%lX)" " - may result in undefined or erroneous behavior", - MODSSL_LIBRARY_TEXT, MODSSL_LIBRARY_DYNTEXT); + MODSSL_LIBRARY_TEXT, MODSSL_LIBRARY_DYNTEXT, + runtime_lib_version); } /* We initialize mc->pid per-process in the child init, @@ -223,16 +271,6 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, ssl_config_global_create(base_server); /* just to avoid problems */ ssl_config_global_fix(mc); - /* Initialize our interface to mod_md, if it is loaded - */ - md_is_managed = APR_RETRIEVE_OPTIONAL_FN(md_is_managed); - md_get_certificate = APR_RETRIEVE_OPTIONAL_FN(md_get_certificate); - md_is_challenge = APR_RETRIEVE_OPTIONAL_FN(md_is_challenge); - if (!md_is_managed || !md_get_certificate) { - md_is_managed = NULL; - md_get_certificate = NULL; - } - /* * try to fix the configuration and open the dedicated SSL * logfile as early as possible @@ -279,12 +317,6 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, if (sc->server && sc->server->pphrase_dialog_type == SSL_PPTYPE_UNSET) { sc->server->pphrase_dialog_type = SSL_PPTYPE_BUILTIN; } - -#ifdef HAVE_FIPS - if (sc->fips == UNSET) { - sc->fips = FALSE; - } -#endif } #if APR_HAS_THREADS && MODSSL_USE_OPENSSL_PRE_1_1_API @@ -294,13 +326,11 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, /* * SSL external crypto device ("engine") support */ -#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) if ((rv = ssl_init_Engine(base_server, p)) != APR_SUCCESS) { return rv; } -#endif - ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(01883) + ap_log_error(APLOG_MARK, APLOG_INFO, 0, base_server, APLOGNO(01883) "Init: Initialized %s library", MODSSL_LIBRARY_NAME); /* @@ -311,22 +341,28 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, ssl_rand_seed(base_server, ptemp, SSL_RSCTX_STARTUP, "Init: "); #ifdef HAVE_FIPS - if(sc->fips) { - if (!FIPS_mode()) { - if (FIPS_mode_set(1)) { - ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(01884) - "Operating in SSL FIPS mode"); - } - else { - ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01885) "FIPS mode failed"); - ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); - return ssl_die(s); - } + if (!modssl_fips_is_enabled() && mc->fips == TRUE) { + if (!modssl_fips_enable(1)) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, base_server, APLOGNO(01885) + "Could not enable FIPS mode"); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, base_server); + return ssl_die(base_server); } + + apr_pool_cleanup_register(p, NULL, modssl_fips_cleanup, + apr_pool_cleanup_null); + } + + /* Log actual FIPS mode which the SSL library is operating under, + * which may have been set outside of the mod_ssl + * configuration. */ + if (modssl_fips_is_enabled()) { + ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, base_server, APLOGNO(01884) + MODSSL_LIBRARY_NAME " has FIPS mode enabled"); } else { - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01886) - "SSL FIPS mode disabled"); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, base_server, APLOGNO(01886) + MODSSL_LIBRARY_NAME " has FIPS mode disabled"); } #endif @@ -409,25 +445,48 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, * Announce mod_ssl and SSL library in HTTP Server field * as ``mod_ssl/X.X.X OpenSSL/X.X.X'' */ - ssl_add_version_components(p, base_server); + ssl_add_version_components(ptemp, p, base_server); modssl_init_app_data2_idx(); /* for modssl_get_app_data2() at request time */ +#if MODSSL_USE_OPENSSL_PRE_1_1_API init_dh_params(); -#if !MODSSL_USE_OPENSSL_PRE_1_1_API +#else init_bio_methods(); #endif +#ifdef HAVE_OPENSSL_KEYLOG + { + const char *logfn = getenv("SSLKEYLOGFILE"); + + if (logfn) { + rv = apr_file_open(&mc->keylog_file, logfn, + APR_FOPEN_CREATE|APR_FOPEN_WRITE|APR_FOPEN_APPEND|APR_FOPEN_LARGEFILE, + APR_FPROT_UREAD|APR_FPROT_UWRITE, + mc->pPool); + if (rv) { + ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, s, APLOGNO(10226) + "Could not open log file '%s' configured via SSLKEYLOGFILE", + logfn); + return rv; + } + + ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(10227) + "Init: Logging SSL private key material to %s", logfn); + } + } +#endif + return OK; } /* * Support for external a Crypto Device ("engine"), usually - * a hardware accellerator card for crypto operations. + * a hardware accelerator card for crypto operations. */ -#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) apr_status_t ssl_init_Engine(server_rec *s, apr_pool_t *p) { +#if MODSSL_HAVE_ENGINE_API SSLModConfigRec *mc = myModConfig(s); ENGINE *e; @@ -459,10 +518,9 @@ apr_status_t ssl_init_Engine(server_rec *s, apr_pool_t *p) ENGINE_free(e); } - +#endif return APR_SUCCESS; } -#endif #ifdef HAVE_TLSEXT static apr_status_t ssl_init_ctx_tls_extensions(server_rec *s, @@ -479,7 +537,9 @@ static apr_status_t ssl_init_ctx_tls_extensions(server_rec *s, "Configuring TLS extension handling"); /* - * Server name indication (SNI) + * The Server Name Indication (SNI) provided by the ClientHello can be + * used to select the right (name-based-)vhost and its SSL configuration + * before the handshake takes place. */ if (!SSL_CTX_set_tlsext_servername_callback(mctx->ssl_ctx, ssl_callback_ServerNameIndication) || @@ -491,6 +551,16 @@ static apr_status_t ssl_init_ctx_tls_extensions(server_rec *s, return ssl_die(s); } +#if OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined(LIBRESSL_VERSION_NUMBER) + /* + * The ClientHello callback also allows to retrieve the SNI, but since it + * runs at the earliest possible connection stage we can even set the TLS + * protocol version(s) according to the selected (name-based-)vhost, which + * is not possible at the SNI callback stage (due to OpenSSL internals). + */ + SSL_CTX_set_client_hello_cb(mctx->ssl_ctx, ssl_callback_ClientHello, NULL); +#endif + #ifdef HAVE_OCSP_STAPLING /* * OCSP Stapling support, status_request extension @@ -659,9 +729,9 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, #else /* #if OPENSSL_VERSION_NUMBER < 0x10100000L */ /* We first determine the maximum protocol version we should provide */ #if SSL_HAVE_PROTOCOL_TLSV1_3 - if (SSL_HAVE_PROTOCOL_TLSV1_3 && (protocol & SSL_PROTOCOL_TLSV1_3)) { + if (protocol & SSL_PROTOCOL_TLSV1_3) { prot = TLS1_3_VERSION; - } else + } else #endif if (protocol & SSL_PROTOCOL_TLSV1_2) { prot = TLS1_2_VERSION; @@ -767,6 +837,20 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, * https://github.com/openssl/openssl/issues/7178 */ SSL_CTX_clear_mode(ctx, SSL_MODE_AUTO_RETRY); #endif + +#ifdef HAVE_OPENSSL_KEYLOG + if (mctx->sc->mc->keylog_file) { + SSL_CTX_set_keylog_callback(ctx, modssl_callback_keylog); + } +#endif + +#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF + /* For server-side SSL_CTX, enable ignoring unexpected EOF */ + /* (OpenSSL 1.1.1 behavioural compatibility).. */ + if (!mctx->pkp) { + SSL_CTX_set_options(ctx, SSL_OP_IGNORE_UNEXPECTED_EOF); + } +#endif return APR_SUCCESS; } @@ -795,7 +879,11 @@ static void ssl_init_ctx_callbacks(server_rec *s, { SSL_CTX *ctx = mctx->ssl_ctx; +#if MODSSL_USE_OPENSSL_PRE_1_1_API + /* Note that for OpenSSL>=1.1, auto selection is enabled via + * SSL_CTX_set_dh_auto(,1) if no parameter is configured. */ SSL_CTX_set_tmp_dh_callback(ctx, ssl_callback_TmpDH); +#endif SSL_CTX_set_info_callback(ctx, ssl_callback_Info); @@ -804,6 +892,23 @@ static void ssl_init_ctx_callbacks(server_rec *s, #endif } +static APR_INLINE +int modssl_CTX_load_verify_locations(SSL_CTX *ctx, + const char *file, + const char *path) +{ +#if OPENSSL_VERSION_NUMBER < 0x30000000L + if (!SSL_CTX_load_verify_locations(ctx, file, path)) + return 0; +#else + if (file && !SSL_CTX_load_verify_file(ctx, file)) + return 0; + if (path && !SSL_CTX_load_verify_dir(ctx, path)) + return 0; +#endif + return 1; +} + static apr_status_t ssl_init_ctx_verify(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, @@ -844,10 +949,8 @@ static apr_status_t ssl_init_ctx_verify(server_rec *s, ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "Configuring client authentication"); - if (!SSL_CTX_load_verify_locations(ctx, - mctx->auth.ca_cert_file, - mctx->auth.ca_cert_path)) - { + if (!modssl_CTX_load_verify_locations(ctx, mctx->auth.ca_cert_file, + mctx->auth.ca_cert_path)) { ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01895) "Unable to configure verify locations " "for client authentication"); @@ -932,6 +1035,23 @@ static apr_status_t ssl_init_ctx_cipher_suite(server_rec *s, return APR_SUCCESS; } +static APR_INLINE +int modssl_X509_STORE_load_locations(X509_STORE *store, + const char *file, + const char *path) +{ +#if OPENSSL_VERSION_NUMBER < 0x30000000L + if (!X509_STORE_load_locations(store, file, path)) + return 0; +#else + if (file && !X509_STORE_load_file(store, file)) + return 0; + if (path && !X509_STORE_load_path(store, path)) + return 0; +#endif + return 1; +} + static apr_status_t ssl_init_ctx_crl(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, @@ -970,8 +1090,8 @@ static apr_status_t ssl_init_ctx_crl(server_rec *s, ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01900) "Configuring certificate revocation facility"); - if (!store || !X509_STORE_load_locations(store, mctx->crl_file, - mctx->crl_path)) { + if (!store || !modssl_X509_STORE_load_locations(store, mctx->crl_file, + mctx->crl_path)) { ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01901) "Host %s: unable to configure X.509 CRL storage " "for certificate revocation", mctx->sc->vhost_id); @@ -1200,6 +1320,22 @@ static int ssl_no_passwd_prompt_cb(char *buf, int size, int rwflag, return 0; } +/* SSL_CTX_use_PrivateKey_file() can fail either because the private + * key was encrypted, or due to a mismatch between an already-loaded + * cert and the key - a common misconfiguration - from calling + * X509_check_private_key(). This macro is passed the last error code + * off the OpenSSL stack and evaluates to true only for the first + * case. With OpenSSL < 3 the second case is identifiable by the + * function code, but function codes are not used from 3.0. */ +#if OPENSSL_VERSION_NUMBER < 0x30000000L +#define CHECK_PRIVKEY_ERROR(ec) (ERR_GET_FUNC(ec) != X509_F_X509_CHECK_PRIVATE_KEY) +#else +#define CHECK_PRIVKEY_ERROR(ec) (ERR_GET_LIB(ec) != ERR_LIB_X509 \ + || (ERR_GET_REASON(ec) != X509_R_KEY_TYPE_MISMATCH \ + && ERR_GET_REASON(ec) != X509_R_KEY_VALUES_MISMATCH \ + && ERR_GET_REASON(ec) != X509_R_UNKNOWN_KEY_TYPE)) +#endif + static apr_status_t ssl_init_server_certs(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, @@ -1209,15 +1345,10 @@ static apr_status_t ssl_init_server_certs(server_rec *s, SSLModConfigRec *mc = myModConfig(s); const char *vhost_id = mctx->sc->vhost_id, *key_id, *certfile, *keyfile; int i; - X509 *cert; - DH *dhparams; + EVP_PKEY *pkey; #ifdef HAVE_ECC - EC_GROUP *ecparams = NULL; - int nid; - EC_KEY *eckey = NULL; -#endif -#ifndef HAVE_SSL_CONF_CMD - SSL *ssl; + EC_GROUP *ecgroup = NULL; + int curve_nid = 0; #endif /* no OpenSSL default prompts for any of the SSL_CTX_use_* calls, please */ @@ -1228,12 +1359,18 @@ static apr_status_t ssl_init_server_certs(server_rec *s, (certfile = APR_ARRAY_IDX(mctx->pks->cert_files, i, const char *)); i++) { + X509 *cert = NULL; + const char *engine_certfile = NULL; + key_id = apr_psprintf(ptemp, "%s:%d", vhost_id, i); ERR_clear_error(); /* first the certificate (public key) */ - if (mctx->cert_chain) { + if (modssl_is_engine_id(certfile)) { + engine_certfile = certfile; + } + else if (mctx->cert_chain) { if ((SSL_CTX_use_certificate_file(mctx->ssl_ctx, certfile, SSL_FILETYPE_PEM) < 1)) { ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02561) @@ -1262,12 +1399,43 @@ static apr_status_t ssl_init_server_certs(server_rec *s, ERR_clear_error(); - if ((SSL_CTX_use_PrivateKey_file(mctx->ssl_ctx, keyfile, - SSL_FILETYPE_PEM) < 1) && - (ERR_GET_FUNC(ERR_peek_last_error()) - != X509_F_X509_CHECK_PRIVATE_KEY)) { + if (modssl_is_engine_id(keyfile)) { + apr_status_t rv; + + if ((rv = modssl_load_engine_keypair(s, ptemp, vhost_id, + engine_certfile, keyfile, + &cert, &pkey))) { + return rv; + } + + if (cert) { + if (SSL_CTX_use_certificate(mctx->ssl_ctx, cert) < 1) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10137) + "Failed to configure engine certificate %s, check %s", + key_id, certfile); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return APR_EGENERAL; + } + + /* SSL_CTX now owns the cert. */ + X509_free(cert); + } + + if (SSL_CTX_use_PrivateKey(mctx->ssl_ctx, pkey) < 1) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10130) + "Failed to configure private key %s from engine", + keyfile); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return APR_EGENERAL; + } + + /* SSL_CTX now owns the key */ + EVP_PKEY_free(pkey); + } + else if ((SSL_CTX_use_PrivateKey_file(mctx->ssl_ctx, keyfile, + SSL_FILETYPE_PEM) < 1) + && CHECK_PRIVKEY_ERROR(ERR_peek_last_error())) { ssl_asn1_t *asn1; - EVP_PKEY *pkey; const unsigned char *ptr; ERR_clear_error(); @@ -1304,22 +1472,21 @@ static apr_status_t ssl_init_server_certs(server_rec *s, * assume that if SSL_CONF is available, it's OpenSSL 1.0.2 or later, * and SSL_CTX_get0_certificate is implemented.) */ - if (!(cert = SSL_CTX_get0_certificate(mctx->ssl_ctx))) { + cert = SSL_CTX_get0_certificate(mctx->ssl_ctx); #else - ssl = SSL_new(mctx->ssl_ctx); - if (ssl) { - /* Workaround bug in SSL_get_certificate in OpenSSL 0.9.8y */ - SSL_set_connect_state(ssl); - cert = SSL_get_certificate(ssl); + { + SSL *ssl = SSL_new(mctx->ssl_ctx); + if (ssl) { + /* Workaround bug in SSL_get_certificate in OpenSSL 0.9.8y */ + SSL_set_connect_state(ssl); + cert = SSL_get_certificate(ssl); + SSL_free(ssl); + } } - if (!ssl || !cert) { #endif + if (!cert) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02566) "Unable to retrieve certificate %s", key_id); -#ifndef HAVE_SSL_CONF_CMD - if (ssl) - SSL_free(ssl); -#endif return APR_EGENERAL; } @@ -1334,18 +1501,13 @@ static apr_status_t ssl_init_server_certs(server_rec *s, * loaded via SSLOpenSSLConfCmd Certificate), so for 1.0.2 and * later, we defer to the code in ssl_init_server_ctx. */ - if ((mctx->stapling_enabled == TRUE) && - !ssl_stapling_init_cert(s, p, ptemp, mctx, cert)) { + if (!ssl_stapling_init_cert(s, p, ptemp, mctx, cert)) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02567) "Unable to configure certificate %s for stapling", key_id); } #endif -#ifndef HAVE_SSL_CONF_CMD - SSL_free(ssl); -#endif - ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(02568) "Certificate and private key %s configured from %s and %s", key_id, certfile, keyfile); @@ -1354,27 +1516,69 @@ static apr_status_t ssl_init_server_certs(server_rec *s, /* * Try to read DH parameters from the (first) SSLCertificateFile */ - if ((certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *)) && - (dhparams = ssl_dh_GetParamFromFile(certfile))) { - SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dhparams); - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540) - "Custom DH parameters (%d bits) for %s loaded from %s", - DH_bits(dhparams), vhost_id, certfile); - DH_free(dhparams); + certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *); + if (certfile && !modssl_is_engine_id(certfile)) { + int done = 0, num_bits = 0; +#if OPENSSL_VERSION_NUMBER < 0x30000000L + DH *dh = modssl_dh_from_file(certfile); + if (dh) { + num_bits = DH_bits(dh); + SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dh); + DH_free(dh); + done = 1; + } +#else + pkey = modssl_dh_pkey_from_file(certfile); + if (pkey) { + num_bits = EVP_PKEY_get_bits(pkey); + if (!SSL_CTX_set0_tmp_dh_pkey(mctx->ssl_ctx, pkey)) { + EVP_PKEY_free(pkey); + } + else { + done = 1; + } + } +#endif + if (done) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540) + "Custom DH parameters (%d bits) for %s loaded from %s", + num_bits, vhost_id, certfile); + } + } +#if !MODSSL_USE_OPENSSL_PRE_1_1_API + else { + /* If no parameter is manually configured, enable auto + * selection. */ + SSL_CTX_set_dh_auto(mctx->ssl_ctx, 1); } +#endif #ifdef HAVE_ECC /* * Similarly, try to read the ECDH curve name from SSLCertificateFile... */ - if ((certfile != NULL) && - (ecparams = ssl_ec_GetParamFromFile(certfile)) && - (nid = EC_GROUP_get_curve_name(ecparams)) && - (eckey = EC_KEY_new_by_curve_name(nid))) { - SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541) - "ECDH curve %s for %s specified in %s", - OBJ_nid2sn(nid), vhost_id, certfile); + if (certfile && !modssl_is_engine_id(certfile) + && (ecgroup = modssl_ec_group_from_file(certfile)) + && (curve_nid = EC_GROUP_get_curve_name(ecgroup))) { +#if OPENSSL_VERSION_NUMBER < 0x30000000L + EC_KEY *eckey = EC_KEY_new_by_curve_name(curve_nid); + if (eckey) { + SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); + EC_KEY_free(eckey); + } + else { + curve_nid = 0; + } +#else + if (!SSL_CTX_set1_curves(mctx->ssl_ctx, &curve_nid, 1)) { + curve_nid = 0; + } +#endif + if (curve_nid) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541) + "ECDH curve %s for %s specified in %s", + OBJ_nid2sn(curve_nid), vhost_id, certfile); + } } /* * ...otherwise, enable auto curve selection (OpenSSL 1.0.2) @@ -1382,18 +1586,20 @@ static apr_status_t ssl_init_server_certs(server_rec *s, * ECDH is always enabled in 1.1.0 unless excluded from SSLCipherList */ #if MODSSL_USE_OPENSSL_PRE_1_1_API - else { + if (!curve_nid) { #if defined(SSL_CTX_set_ecdh_auto) SSL_CTX_set_ecdh_auto(mctx->ssl_ctx, 1); #else - eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); - SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); + EC_KEY *eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + if (eckey) { + SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); + EC_KEY_free(eckey); + } #endif } #endif /* OpenSSL assures us that _free() is NULL-safe */ - EC_KEY_free(eckey); - EC_GROUP_free(ecparams); + EC_GROUP_free(ecgroup); #endif return APR_SUCCESS; @@ -1411,6 +1617,7 @@ static apr_status_t ssl_init_ticket_key(server_rec *s, char buf[TLSEXT_TICKET_KEY_LEN]; char *path; modssl_ticket_key_t *ticket_key = mctx->ticket_key; + int res; if (!ticket_key->file_path) { return APR_SUCCESS; @@ -1438,11 +1645,22 @@ static apr_status_t ssl_init_ticket_key(server_rec *s, } memcpy(ticket_key->key_name, buf, 16); - memcpy(ticket_key->hmac_secret, buf + 16, 16); memcpy(ticket_key->aes_key, buf + 32, 16); - - if (!SSL_CTX_set_tlsext_ticket_key_cb(mctx->ssl_ctx, - ssl_callback_SessionTicket)) { +#if OPENSSL_VERSION_NUMBER < 0x30000000L + memcpy(ticket_key->hmac_secret, buf + 16, 16); + res = SSL_CTX_set_tlsext_ticket_key_cb(mctx->ssl_ctx, + ssl_callback_SessionTicket); +#else + ticket_key->mac_params[0] = + OSSL_PARAM_construct_octet_string(OSSL_MAC_PARAM_KEY, buf + 16, 16); + ticket_key->mac_params[1] = + OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_DIGEST, "sha256", 0); + ticket_key->mac_params[2] = + OSSL_PARAM_construct_end(); + res = SSL_CTX_set_tlsext_ticket_key_evp_cb(mctx->ssl_ctx, + ssl_callback_SessionTicket); +#endif + if (!res) { ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01913) "Unable to initialize TLS session ticket key callback " "(incompatible OpenSSL version?)"); @@ -1493,8 +1711,12 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s, STACK_OF(X509) *chain; X509_STORE_CTX *sctx; X509_STORE *store = SSL_CTX_get_cert_store(mctx->ssl_ctx); + int addl_chain = 0; /* non-zero if additional chain certs were + * added to store */ -#if OPENSSL_VERSION_NUMBER >= 0x1010100fL + ap_assert(store != NULL); /* safe to assume always non-NULL? */ + +#if OPENSSL_VERSION_NUMBER >= 0x1010100fL && !defined(LIBRESSL_VERSION_NUMBER) /* For OpenSSL >=1.1.1, turn on client cert support which is * otherwise turned off by default (by design). * https://github.com/openssl/openssl/issues/6933 */ @@ -1515,42 +1737,31 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s, } if (pkp->cert_path) { - apr_dir_t *dir; - apr_finfo_t dirent; - apr_int32_t finfo_flags = APR_FINFO_TYPE|APR_FINFO_NAME; - - if (apr_dir_open(&dir, pkp->cert_path, ptemp) == APR_SUCCESS) { - while ((apr_dir_read(&dirent, finfo_flags, dir)) == APR_SUCCESS) { - const char *fullname; - - if (dirent.filetype == APR_DIR) { - continue; /* don't try to load directories */ - } - - fullname = apr_pstrcat(ptemp, - pkp->cert_path, "/", dirent.name, - NULL); - load_x509_info(ptemp, sk, fullname); - } - - apr_dir_close(dir); - } - } - - if ((ncerts = sk_X509_INFO_num(sk)) <= 0) { - sk_X509_INFO_free(sk); - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(02206) - "no client certs found for SSL proxy"); - return APR_SUCCESS; + ssl_init_ca_cert_path(s, ptemp, pkp->cert_path, NULL, sk); } /* Check that all client certs have got certificates and private - * keys. */ - for (n = 0; n < ncerts; n++) { + * keys. Note the number of certs in the stack may decrease + * during the loop. */ + for (n = 0; n < sk_X509_INFO_num(sk); n++) { X509_INFO *inf = sk_X509_INFO_value(sk, n); + int has_privkey = inf->x_pkey && inf->x_pkey->dec_pkey; + + /* For a lone certificate in the file, trust it as a + * CA/intermediate certificate. */ + if (inf->x509 && !has_privkey && !inf->enc_data) { + ssl_log_xerror(SSLLOG_MARK, APLOG_DEBUG, 0, ptemp, s, inf->x509, + APLOGNO(10261) "Trusting non-leaf certificate"); + X509_STORE_add_cert(store, inf->x509); /* increments inf->x509 */ + /* Delete from the stack and iterate again. */ + X509_INFO_free(inf); + sk_X509_INFO_delete(sk, n); + n--; + addl_chain = 1; + continue; + } - if (!inf->x509 || !inf->x_pkey || !inf->x_pkey->dec_pkey || - inf->enc_data) { + if (!has_privkey || inf->enc_data) { sk_X509_INFO_free(sk); ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, s, APLOGNO(02252) "incomplete client cert configured for SSL proxy " @@ -1567,13 +1778,21 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s, } } + if ((ncerts = sk_X509_INFO_num(sk)) <= 0) { + sk_X509_INFO_free(sk); + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(02206) + "no client certs found for SSL proxy"); + return APR_SUCCESS; + } + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02207) "loaded %d client certs for SSL proxy", ncerts); pkp->certs = sk; - - if (!pkp->ca_cert_file || !store) { + /* If any chain certs are configured, build the ->ca_certs chains + * corresponding to the loaded keypairs. */ + if (!pkp->ca_cert_file && !addl_chain) { return APR_SUCCESS; } @@ -1589,16 +1808,21 @@ static apr_status_t ssl_init_proxy_certs(server_rec *s, ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02208) "SSL proxy client cert initialization failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + sk_X509_INFO_free(sk); return ssl_die(s); } - X509_STORE_load_locations(store, pkp->ca_cert_file, NULL); + modssl_X509_STORE_load_locations(store, pkp->ca_cert_file, NULL); for (n = 0; n < ncerts; n++) { int i; X509_INFO *inf = sk_X509_INFO_value(pkp->certs, n); - X509_STORE_CTX_init(sctx, store, inf->x509, NULL); + if (!X509_STORE_CTX_init(sctx, store, inf->x509, NULL)) { + sk_X509_INFO_free(sk); + X509_STORE_CTX_free(sctx); + return ssl_die(s); + } /* Attempt to verify the client cert */ if (X509_verify_cert(sctx) != 1) { @@ -1729,11 +1953,13 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, apr_array_header_t *pphrases) { apr_status_t rv; + modssl_pk_server_t *pks; #ifdef HAVE_SSL_CONF_CMD ssl_ctx_param_t *param = (ssl_ctx_param_t *)sc->server->ssl_ctx_param->elts; SSL_CONF_CTX *cctx = sc->server->ssl_ctx_config; int i; #endif + int n; /* * Check for problematic re-initializations @@ -1745,52 +1971,30 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, return APR_EGENERAL; } - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10083) - "Init: (%s) mod_md support is %s.", ssl_util_vhostid(p, s), - md_is_managed? "available" : "unavailable"); - if (md_is_managed && md_is_managed(s)) { - modssl_pk_server_t *const pks = sc->server->pks; - if (pks->cert_files->nelts > 0 || pks->key_files->nelts > 0) { - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10084) - "Init: (%s) You configured certificate/key files on this host, but " - "is is covered by a Managed Domain. You need to remove these directives " - "for the Managed Domain to take over.", ssl_util_vhostid(p, s)); - } - else { - const char *key_file, *cert_file, *chain_file; - - key_file = cert_file = chain_file = NULL; - - if (md_get_certificate) { - rv = md_get_certificate(s, p, &key_file, &cert_file); - } - else { - rv = APR_ENOTIMPL; - } - - if (key_file && cert_file) { - ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, - "%s: installing key=%s, cert=%s, chain=%s", - ssl_util_vhostid(p, s), key_file, cert_file, chain_file); - APR_ARRAY_PUSH(pks->key_files, const char *) = key_file; - APR_ARRAY_PUSH(pks->cert_files, const char *) = cert_file; - sc->server->cert_chain = chain_file; - } - - if (APR_STATUS_IS_EAGAIN(rv)) { - /* Managed Domain not ready yet. This is not a reason to fail the config */ - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10085) - "Init: %s will respond with '503 Service Unavailable' for now. This " - "host is part of a Managed Domain, but no SSL certificate is " - "available (yet).", ssl_util_vhostid(p, s)); - pks->service_unavailable = 1; - } - else if (rv != APR_SUCCESS) { - return rv; - } + /* Allow others to provide certificate files */ + pks = sc->server->pks; + n = pks->cert_files->nelts; + ap_ssl_add_cert_files(s, p, pks->cert_files, pks->key_files); + ssl_run_add_cert_files(s, p, pks->cert_files, pks->key_files); + + if (apr_is_empty_array(pks->cert_files)) { + /* does someone propose a certiciate to fall back on here? */ + ap_ssl_add_fallback_cert_files(s, p, pks->cert_files, pks->key_files); + ssl_run_add_fallback_cert_files(s, p, pks->cert_files, pks->key_files); + if (n < pks->cert_files->nelts) { + pks->service_unavailable = 1; + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10085) + "Init: %s will respond with '503 Service Unavailable' for now. There " + "are no SSL certificates configured and no other module contributed any.", + ssl_util_vhostid(p, s)); } } + if (n < pks->cert_files->nelts) { + /* additionally installed certs overrides any old chain configuration */ + sc->server->cert_chain = NULL; + } + if ((rv = ssl_init_ctx(s, p, ptemp, sc->server)) != APR_SUCCESS) { return rv; } @@ -1841,7 +2045,7 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, * (late) point makes sure that we catch both certificates loaded * via SSLCertificateFile and SSLOpenSSLConfCmd Certificate. */ - if (sc->server->stapling_enabled == TRUE) { + do { X509 *cert; int i = 0; int ret = SSL_CTX_set_current_cert(sc->server->ssl_ctx, @@ -1858,7 +2062,7 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, SSL_CERT_SET_NEXT); i++; } - } + } while(0); #endif #ifdef HAVE_TLS_SESSION_TICKETS @@ -2038,50 +2242,38 @@ int ssl_proxy_section_post_config(apr_pool_t *p, apr_pool_t *plog, return OK; } -static int ssl_init_FindCAList_X509NameCmp(const X509_NAME * const *a, - const X509_NAME * const *b) -{ - return(X509_NAME_cmp(*a, *b)); -} - -static void ssl_init_PushCAList(STACK_OF(X509_NAME) *ca_list, - server_rec *s, apr_pool_t *ptemp, - const char *file) +static apr_status_t ssl_init_ca_cert_path(server_rec *s, + apr_pool_t *ptemp, + const char *path, + STACK_OF(X509_NAME) *ca_list, + STACK_OF(X509_INFO) *xi_list) { - int n; - STACK_OF(X509_NAME) *sk; + apr_dir_t *dir; + apr_finfo_t direntry; + apr_int32_t finfo_flags = APR_FINFO_TYPE|APR_FINFO_NAME; - sk = (STACK_OF(X509_NAME) *) - SSL_load_client_CA_file(file); - - if (!sk) { - return; + if (!path || (!ca_list && !xi_list) || + (apr_dir_open(&dir, path, ptemp) != APR_SUCCESS)) { + return APR_EGENERAL; } - for (n = 0; n < sk_X509_NAME_num(sk); n++) { - X509_NAME *name = sk_X509_NAME_value(sk, n); - - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02209) - "CA certificate: %s", - modssl_X509_NAME_to_string(ptemp, name, 0)); - - /* - * note that SSL_load_client_CA_file() checks for duplicates, - * but since we call it multiple times when reading a directory - * we must also check for duplicates ourselves. - */ - - if (sk_X509_NAME_find(ca_list, name) < 0) { - /* this will be freed when ca_list is */ - sk_X509_NAME_push(ca_list, name); + while ((apr_dir_read(&direntry, finfo_flags, dir)) == APR_SUCCESS) { + const char *file; + if (direntry.filetype == APR_DIR) { + continue; /* don't try to load directories */ } - else { - /* need to free this ourselves, else it will leak */ - X509_NAME_free(name); + file = apr_pstrcat(ptemp, path, "/", direntry.name, NULL); + if (ca_list) { + SSL_add_file_cert_subjects_to_stack(ca_list, file); + } + if (xi_list) { + load_x509_info(ptemp, xi_list, file); } } - sk_X509_NAME_free(sk); + apr_dir_close(dir); + + return APR_SUCCESS; } STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, @@ -2089,19 +2281,13 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, const char *ca_file, const char *ca_path) { - STACK_OF(X509_NAME) *ca_list; - - /* - * Start with a empty stack/list where new - * entries get added in sorted order. - */ - ca_list = sk_X509_NAME_new(ssl_init_FindCAList_X509NameCmp); + STACK_OF(X509_NAME) *ca_list = sk_X509_NAME_new_null();; /* * Process CA certificate bundle file */ if (ca_file) { - ssl_init_PushCAList(ca_list, s, ptemp, ca_file); + SSL_add_file_cert_subjects_to_stack(ca_list, ca_file); /* * If ca_list is still empty after trying to load ca_file * then the file failed to load, and users should hear about that. @@ -2116,37 +2302,15 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, /* * Process CA certificate path files */ - if (ca_path) { - apr_dir_t *dir; - apr_finfo_t direntry; - apr_int32_t finfo_flags = APR_FINFO_TYPE|APR_FINFO_NAME; - apr_status_t rv; - - if ((rv = apr_dir_open(&dir, ca_path, ptemp)) != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(02211) - "Failed to open Certificate Path `%s'", - ca_path); - sk_X509_NAME_pop_free(ca_list, X509_NAME_free); - return NULL; - } - - while ((apr_dir_read(&direntry, finfo_flags, dir)) == APR_SUCCESS) { - const char *file; - if (direntry.filetype == APR_DIR) { - continue; /* don't try to load directories */ - } - file = apr_pstrcat(ptemp, ca_path, "/", direntry.name, NULL); - ssl_init_PushCAList(ca_list, s, ptemp, file); - } - - apr_dir_close(dir); + if (ca_path && + ssl_init_ca_cert_path(s, ptemp, + ca_path, ca_list, NULL) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02211) + "Failed to open Certificate Path `%s'", ca_path); + sk_X509_NAME_pop_free(ca_list, X509_NAME_free); + return NULL; } - /* - * Cleanup - */ - (void) sk_X509_NAME_set_cmp_func(ca_list, NULL); - return ca_list; } @@ -2192,10 +2356,11 @@ apr_status_t ssl_init_ModuleKill(void *data) } -#if !MODSSL_USE_OPENSSL_PRE_1_1_API +#if MODSSL_USE_OPENSSL_PRE_1_1_API + free_dh_params(); +#else free_bio_methods(); #endif - free_dh_params(); return APR_SUCCESS; } diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c index 6da8f10..b91f784 100644 --- a/modules/ssl/ssl_engine_io.c +++ b/modules/ssl/ssl_engine_io.c @@ -28,8 +28,7 @@ core keeps dumping.'' -- Unknown */ #include "ssl_private.h" -#include "mod_ssl.h" -#include "mod_ssl_openssl.h" + #include "apr_date.h" APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, proxy_post_handshake, @@ -152,6 +151,9 @@ static int bio_filter_out_flush(BIO *bio) bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)BIO_get_data(bio); apr_bucket *e; + ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, outctx->c, + "bio_filter_out_write: flush"); + AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(outctx->bb)); e = apr_bucket_flush_create(outctx->bb->bucket_alloc); @@ -191,6 +193,10 @@ static int bio_filter_destroy(BIO *bio) static int bio_filter_out_read(BIO *bio, char *out, int outl) { /* this is never called */ + bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)BIO_get_data(bio); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, outctx->c, + "BUG: %s() should not be called", "bio_filter_out_read"); + AP_DEBUG_ASSERT(0); return -1; } @@ -208,6 +214,9 @@ static int bio_filter_out_write(BIO *bio, const char *in, int inl) return -1; } + ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, outctx->c, + "bio_filter_out_write: %i bytes", inl); + /* Use a transient bucket for the output data - any downstream * filter must setaside if necessary. */ e = apr_bucket_transient_create(in, inl, outctx->bb->bucket_alloc); @@ -287,12 +296,20 @@ static long bio_filter_out_ctrl(BIO *bio, int cmd, long num, void *ptr) static int bio_filter_out_gets(BIO *bio, char *buf, int size) { /* this is never called */ + bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)BIO_get_data(bio); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, outctx->c, + "BUG: %s() should not be called", "bio_filter_out_gets"); + AP_DEBUG_ASSERT(0); return -1; } static int bio_filter_out_puts(BIO *bio, const char *str) { /* this is never called */ + bio_filter_out_ctx_t *outctx = (bio_filter_out_ctx_t *)BIO_get_data(bio); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, outctx->c, + "BUG: %s() should not be called", "bio_filter_out_puts"); + AP_DEBUG_ASSERT(0); return -1; } @@ -527,22 +544,46 @@ static int bio_filter_in_read(BIO *bio, char *in, int inlen) static int bio_filter_in_write(BIO *bio, const char *in, int inl) { + bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)BIO_get_data(bio); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, inctx->f->c, + "BUG: %s() should not be called", "bio_filter_in_write"); + AP_DEBUG_ASSERT(0); return -1; } static int bio_filter_in_puts(BIO *bio, const char *str) { + bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)BIO_get_data(bio); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, inctx->f->c, + "BUG: %s() should not be called", "bio_filter_in_puts"); + AP_DEBUG_ASSERT(0); return -1; } static int bio_filter_in_gets(BIO *bio, char *buf, int size) { + bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)BIO_get_data(bio); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, inctx->f->c, + "BUG: %s() should not be called", "bio_filter_in_gets"); + AP_DEBUG_ASSERT(0); return -1; } static long bio_filter_in_ctrl(BIO *bio, int cmd, long num, void *ptr) { - return -1; + bio_filter_in_ctx_t *inctx = (bio_filter_in_ctx_t *)BIO_get_data(bio); + switch (cmd) { +#ifdef BIO_CTRL_EOF + case BIO_CTRL_EOF: + return inctx->rc == APR_EOF; +#endif + default: + break; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, inctx->f->c, + "BUG: bio_filter_in_ctrl() should not be called with cmd=%i", + cmd); + return 0; } #if MODSSL_USE_OPENSSL_PRE_1_1_API @@ -567,7 +608,7 @@ static BIO_METHOD bio_filter_in_method = { bio_filter_in_read, bio_filter_in_puts, /* puts is never called */ bio_filter_in_gets, /* gets is never called */ - bio_filter_in_ctrl, /* ctrl is never called */ + bio_filter_in_ctrl, /* ctrl is called for EOF check */ bio_filter_create, bio_filter_destroy, NULL @@ -846,6 +887,9 @@ static apr_status_t ssl_filter_write(ap_filter_t *f, return APR_EGENERAL; } + ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, f->c, + "ssl_filter_write: %"APR_SIZE_T_FMT" bytes", len); + /* We rely on SSL_get_error() after the write, which requires an empty error * queue before the write in order to work properly. */ @@ -934,7 +978,7 @@ static apr_status_t ssl_filter_write(ap_filter_t *f, alloc) /* Custom apr_status_t error code, used when a plain HTTP request is - * recevied on an SSL port. */ + * received on an SSL port. */ #define MODSSL_ERROR_HTTP_ON_HTTPS (APR_OS_START_USERERR + 0) /* Custom apr_status_t error code, used when the proxy cannot @@ -1162,11 +1206,13 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) } server = sslconn->server; - if (sslconn->is_proxy) { + if (c->outgoing) { #ifdef HAVE_TLSEXT apr_ipsubnet_t *ip; #ifdef HAVE_TLS_ALPN const char *alpn_note; + apr_array_header_t *alpn_proposed = NULL; + int alpn_empty_ok = 1; #endif #endif const char *hostname_note = apr_table_get(c->notes, @@ -1182,9 +1228,16 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) #ifdef HAVE_TLS_ALPN alpn_note = apr_table_get(c->notes, "proxy-request-alpn-protos"); if (alpn_note) { - char *protos, *s, *p, *last; + char *protos, *s, *p, *last, *proto; apr_size_t len; + /* Transform the note into a protocol formatted byte array: + * (len-byte proto-char+)* + * We need the remote server to agree on one of these, unless 'http/1.1' + * is also among our proposals. Because pre-ALPN remotes will speak this. + */ + alpn_proposed = apr_array_make(c->pool, 3, sizeof(const char*)); + alpn_empty_ok = 0; s = protos = apr_pcalloc(c->pool, strlen(alpn_note)+1); p = apr_pstrdup(c->pool, alpn_note); while ((p = apr_strtok(p, ", ", &last))) { @@ -1196,6 +1249,11 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, server); return APR_EGENERAL; } + proto = apr_pstrndup(c->pool, p, len); + APR_ARRAY_PUSH(alpn_proposed, const char*) = proto; + if (!strcmp("http/1.1", proto)) { + alpn_empty_ok = 1; + } *s++ = (unsigned char)len; while (len--) { *s++ = *p++; @@ -1211,6 +1269,8 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(03310) "error setting alpn protos from '%s'", alpn_note); ssl_log_ssl_error(SSLLOG_MARK, APLOG_WARNING, server); + /* If ALPN was requested and we cannot do it, we must fail */ + return MODSSL_ERROR_BAD_GATEWAY; } } #endif /* defined HAVE_TLS_ALPN */ @@ -1238,7 +1298,7 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) ssl_log_ssl_error(SSLLOG_MARK, APLOG_WARNING, server); } } -#endif +#endif /* defined HAVE_TLSEXT */ if ((n = SSL_connect(filter_ctx->pssl)) <= 0) { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02003) @@ -1267,7 +1327,6 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) ((dc->proxy->ssl_check_peer_cn != FALSE) || (dc->proxy->ssl_check_peer_name == TRUE)) && hostname_note) { - apr_table_unset(c->notes, "proxy-request-hostname"); if (!cert || modssl_X509_match_name(c->pool, cert, hostname_note, TRUE, server) == FALSE) { @@ -1284,7 +1343,6 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) hostname = ssl_var_lookup(NULL, server, c, NULL, "SSL_CLIENT_S_DN_CN"); - apr_table_unset(c->notes, "proxy-request-hostname"); /* Do string match or simplest wildcard match if that * fails. */ @@ -1304,6 +1362,50 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) } } +#ifdef HAVE_TLS_ALPN + /* If we proposed ALPN protocol(s), we need to check if the server + * agreed to one of them. While + * chapter 3.2 says the server SHALL error the handshake in such a case, + * the reality is that some servers fall back to their default, e.g. http/1.1. + * (we also do this right now) + * We need to treat this as an error for security reasons. + */ + if (alpn_proposed && alpn_proposed->nelts > 0) { + const char *selected; + unsigned int slen; + + SSL_get0_alpn_selected(filter_ctx->pssl, (const unsigned char**)&selected, &slen); + if (!selected || !slen) { + /* No ALPN selection reported by the remote server. This could mean + * it does not support ALPN (old server) or that it does not support + * any of our proposals (Apache itself up to 2.4.48 at least did that). */ + if (!alpn_empty_ok) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(10273) + "SSL Proxy: Peer did not select any of our ALPN protocols [%s].", + alpn_note); + proxy_ssl_check_peer_ok = FALSE; + } + } + else { + const char *proto; + int i, found = 0; + for (i = 0; !found && i < alpn_proposed->nelts; ++i) { + proto = APR_ARRAY_IDX(alpn_proposed, i, const char *); + found = !strncmp(selected, proto, slen); + } + if (!found) { + /* From a conforming peer, this should never happen, + * but life always finds a way... */ + proto = apr_pstrndup(c->pool, selected, slen); + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(10274) + "SSL Proxy: Peer proposed ALPN protocol %s which is none " + "of our proposals [%s].", proto, alpn_note); + proxy_ssl_check_peer_ok = FALSE; + } + } + } +#endif + if (proxy_ssl_check_peer_ok == TRUE) { /* another chance to fail */ post_handshake_rc = ssl_run_proxy_post_handshake(c, filter_ctx->pssl); @@ -1587,18 +1689,32 @@ static apr_status_t ssl_io_filter_input(ap_filter_t *f, } -/* ssl_io_filter_output() produces one SSL/TLS message per bucket +/* ssl_io_filter_output() produces one SSL/TLS record per bucket * passed down the output filter stack. This results in a high - * overhead (network packets) for any output comprising many small - * buckets. SSI page applied through the HTTP chunk filter, for - * example, may produce many brigades containing small buckets - - * [chunk-size CRLF] [chunk-data] [CRLF]. + * overhead (more network packets & TLS processing) for any output + * comprising many small buckets. SSI output passed through the HTTP + * chunk filter, for example, may produce many brigades containing + * small buckets - [chunk-size CRLF] [chunk-data] [CRLF]. * - * The coalescing filter merges many small buckets into larger buckets - * where possible, allowing the SSL I/O output filter to handle them - * more efficiently. */ + * Sending HTTP response headers as a separate TLS record to the + * response body also reveals information to a network observer (the + * size of headers) which can be significant. + * + * The coalescing filter merges data buckets with the aim of producing + * fewer, larger TLS records - without copying/buffering all content + * and introducing unnecessary overhead. + * + * ### This buffering could be probably be done more comprehensively + * ### in ssl_io_filter_output itself. + * + * ### Another possible performance optimisation in particular for the + * ### [HEAP] [FILE] HTTP response case is using a brigade rather than + * ### a char array to buffer; using apr_brigade_write() to append + * ### will use already-allocated memory from the HEAP, reducing # of + * ### copies. + */ -#define COALESCE_BYTES (2048) +#define COALESCE_BYTES (AP_IOBUFSIZE) struct coalesce_ctx { char buffer[COALESCE_BYTES]; @@ -1611,11 +1727,12 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, apr_bucket *e, *upto; apr_size_t bytes = 0; struct coalesce_ctx *ctx = f->ctx; + apr_size_t buffered = ctx ? ctx->bytes : 0; /* space used on entry */ unsigned count = 0; /* The brigade consists of zero-or-more small data buckets which - * can be coalesced (the prefix), followed by the remainder of the - * brigade. + * can be coalesced (referred to as the "prefix"), followed by the + * remainder of the brigade. * * Find the last bucket - if any - of that prefix. count gives * the number of buckets in the prefix. The "prefix" must contain @@ -1630,24 +1747,100 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_METADATA(e) && e->length != (apr_size_t)-1 - && e->length < COALESCE_BYTES - && (bytes + e->length) < COALESCE_BYTES - && (ctx == NULL - || bytes + ctx->bytes + e->length < COALESCE_BYTES); + && e->length <= COALESCE_BYTES + && (buffered + bytes + e->length) <= COALESCE_BYTES; e = APR_BUCKET_NEXT(e)) { - if (e->length) count++; /* don't count zero-length buckets */ - bytes += e->length; + /* don't count zero-length buckets */ + if (e->length) { + bytes += e->length; + count++; + } + } + + /* If there is room remaining and the next bucket is a data + * bucket, try to include it in the prefix to coalesce. For a + * typical [HEAP] [FILE] HTTP response brigade, this handles + * merging the headers and the start of the body into a single TLS + * record. */ + if (bytes + buffered > 0 + && bytes + buffered < COALESCE_BYTES + && e != APR_BRIGADE_SENTINEL(bb) + && !APR_BUCKET_IS_METADATA(e)) { + apr_status_t rv = APR_SUCCESS; + + /* For an indeterminate length bucket (PIPE/CGI/...), try a + * non-blocking read to have it morph into a HEAP. If the + * read fails with EAGAIN, it is harmless to try a split + * anyway, split is ENOTIMPL for most PIPE-like buckets. */ + if (e->length == (apr_size_t)-1) { + const char *discard; + apr_size_t ignore; + + rv = apr_bucket_read(e, &discard, &ignore, APR_NONBLOCK_READ); + if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(10232) + "coalesce failed to read from %s bucket", + e->type->name); + return AP_FILTER_ERROR; + } + } + + if (rv == APR_SUCCESS) { + /* If the read above made the bucket morph, it may now fit + * entirely within the buffer. Otherwise, split it so it does + * fit. */ + if (e->length > COALESCE_BYTES + || e->length + buffered + bytes > COALESCE_BYTES) { + rv = apr_bucket_split(e, COALESCE_BYTES - (buffered + bytes)); + } + + if (rv == APR_SUCCESS && e->length == 0) { + /* As above, don't count in the prefix if the bucket is + * now zero-length. */ + } + else if (rv == APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, + "coalesce: adding %" APR_SIZE_T_FMT " bytes " + "from split %s bucket, total %" APR_SIZE_T_FMT, + e->length, e->type->name, bytes + buffered); + + count++; + bytes += e->length; + e = APR_BUCKET_NEXT(e); + } + else if (rv != APR_ENOTIMPL) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(10233) + "coalesce: failed to split data bucket"); + return AP_FILTER_ERROR; + } + } } + + /* The prefix is zero or more buckets. upto now points to the + * bucket AFTER the end of the prefix, which may be the brigade + * sentinel. */ upto = e; - /* Coalesce the prefix, if: - * a) more than one bucket is found to coalesce, or - * b) the brigade contains only a single data bucket, or - * c) the data bucket is not last but we have buffered data already. + /* Coalesce the prefix, if any of the following are true: + * + * a) the prefix is more than one bucket + * OR + * b) the prefix is the entire brigade, which is a single bucket + * AND the prefix length is smaller than the buffer size, + * OR + * c) the prefix is a single bucket + * AND there is buffered data from a previous pass. + * + * The aim with (b) is to buffer a small bucket so it can be + * coalesced with future invocations of this filter. e.g. three + * calls each with a single 100 byte HEAP bucket should get + * coalesced together. But an invocation with a 8192 byte HEAP + * should pass through untouched. */ if (bytes > 0 && (count > 1 - || (upto == APR_BRIGADE_SENTINEL(bb)) + || (upto == APR_BRIGADE_SENTINEL(bb) + && bytes < COALESCE_BYTES) || (ctx && ctx->bytes > 0))) { /* If coalescing some bytes, ensure a context has been * created. */ @@ -1658,7 +1851,8 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, "coalesce: have %" APR_SIZE_T_FMT " bytes, " - "adding %" APR_SIZE_T_FMT " more", ctx->bytes, bytes); + "adding %" APR_SIZE_T_FMT " more (buckets=%u)", + ctx->bytes, bytes, count); /* Iterate through the prefix segment. For non-fatal errors * in this loop it is safe to break out and fall back to the @@ -1673,7 +1867,8 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, if (APR_BUCKET_IS_METADATA(e) || e->length == (apr_size_t)-1) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(02012) - "unexpected bucket type during coalesce"); + "unexpected %s bucket during coalesce", + e->type->name); break; /* non-fatal error; break out */ } @@ -1945,7 +2140,7 @@ static apr_status_t ssl_io_filter_buffer(ap_filter_t *f, } if (APR_BRIGADE_EMPTY(ctx->bb)) { - /* Suprisingly (and perhaps, wrongly), the request body can be + /* Surprisingly (and perhaps, wrongly), the request body can be * pulled from the input filter stack more than once; a * handler may read it, and ap_discard_request_body() will * attempt to do so again after *every* request. So input @@ -2087,14 +2282,7 @@ void ssl_io_filter_init(conn_rec *c, request_rec *r, SSL *ssl) ssl_io_filter_cleanup, apr_pool_cleanup_null); if (APLOG_CS_IS_LEVEL(c, mySrvFromConn(c), APLOG_TRACE4)) { - BIO *rbio = SSL_get_rbio(ssl), - *wbio = SSL_get_wbio(ssl); - BIO_set_callback(rbio, ssl_io_data_cb); - BIO_set_callback_arg(rbio, (void *)ssl); - if (wbio && wbio != rbio) { - BIO_set_callback(wbio, ssl_io_data_cb); - BIO_set_callback_arg(wbio, (void *)ssl); - } + modssl_set_io_callbacks(ssl); } return; @@ -2123,19 +2311,18 @@ static void ssl_io_data_dump(conn_rec *c, server_rec *s, const char *b, long len) { char buf[256]; - char tmp[64]; - int i, j, rows, trunc; + int i, j, rows, trunc, pos; unsigned char ch; trunc = 0; - for(; (len > 0) && ((b[len-1] == ' ') || (b[len-1] == '\0')); len--) + for (; (len > 0) && ((b[len-1] == ' ') || (b[len-1] == '\0')); len--) trunc++; rows = (len / DUMP_WIDTH); if ((rows * DUMP_WIDTH) < len) rows++; ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s, "+-------------------------------------------------------------------------+"); - for(i = 0 ; i< rows; i++) { + for (i = 0 ; i < rows; i++) { #if APR_CHARSET_EBCDIC char ebcdic_text[DUMP_WIDTH]; j = DUMP_WIDTH; @@ -2146,32 +2333,30 @@ static void ssl_io_data_dump(conn_rec *c, server_rec *s, memcpy(ebcdic_text,(char *)(b) + i * DUMP_WIDTH, j); ap_xlate_proto_from_ascii(ebcdic_text, j); #endif /* APR_CHARSET_EBCDIC */ - apr_snprintf(tmp, sizeof(tmp), "| %04x: ", i * DUMP_WIDTH); - apr_cpystrn(buf, tmp, sizeof(buf)); + pos = 0; + pos += apr_snprintf(buf, sizeof(buf)-pos, "| %04x: ", i * DUMP_WIDTH); for (j = 0; j < DUMP_WIDTH; j++) { if (((i * DUMP_WIDTH) + j) >= len) - apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf)); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, " "); else { ch = ((unsigned char)*((char *)(b) + i * DUMP_WIDTH + j)) & 0xff; - apr_snprintf(tmp, sizeof(tmp), "%02x%c", ch , j==7 ? '-' : ' '); - apr_cpystrn(buf+strlen(buf), tmp, sizeof(buf)-strlen(buf)); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, "%02x%c", ch , j==7 ? '-' : ' '); } } - apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf)); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, " "); for (j = 0; j < DUMP_WIDTH; j++) { if (((i * DUMP_WIDTH) + j) >= len) - apr_cpystrn(buf+strlen(buf), " ", sizeof(buf)-strlen(buf)); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, " "); else { ch = ((unsigned char)*((char *)(b) + i * DUMP_WIDTH + j)) & 0xff; #if APR_CHARSET_EBCDIC - apr_snprintf(tmp, sizeof(tmp), "%c", (ch >= 0x20 && ch <= 0x7F) ? ebcdic_text[j] : '.'); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, "%c", (ch >= 0x20 && ch <= 0x7F) ? ebcdic_text[j] : '.'); #else /* APR_CHARSET_EBCDIC */ - apr_snprintf(tmp, sizeof(tmp), "%c", ((ch >= ' ') && (ch <= '~')) ? ch : '.'); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, "%c", ((ch >= ' ') && (ch <= '~')) ? ch : '.'); #endif /* APR_CHARSET_EBCDIC */ - apr_cpystrn(buf+strlen(buf), tmp, sizeof(buf)-strlen(buf)); } } - apr_cpystrn(buf+strlen(buf), " |", sizeof(buf)-strlen(buf)); + pos += apr_snprintf(buf+pos, sizeof(buf)-pos, " |"); ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s, "%s", buf); } if (trunc > 0) @@ -2179,16 +2364,24 @@ static void ssl_io_data_dump(conn_rec *c, server_rec *s, "| %04ld - ", len + trunc); ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s, "+-------------------------------------------------------------------------+"); - return; } -long ssl_io_data_cb(BIO *bio, int cmd, - const char *argp, - int argi, long argl, long rc) +#if OPENSSL_VERSION_NUMBER >= 0x30000000L +static long modssl_io_cb(BIO *bio, int cmd, const char *argp, + size_t len, int argi, long argl, int rc, + size_t *processed) +#else +static long modssl_io_cb(BIO *bio, int cmd, const char *argp, + int argi, long argl, long rc) +#endif { SSL *ssl; conn_rec *c; server_rec *s; +#if OPENSSL_VERSION_NUMBER >= 0x30000000L + (void)len; + (void)processed; +#endif if ((ssl = (SSL *)BIO_get_callback_arg(bio)) == NULL) return rc; @@ -2210,7 +2403,7 @@ long ssl_io_data_cb(BIO *bio, int cmd, "%s: %s %ld/%d bytes %s BIO#%pp [mem: %pp] %s", MODSSL_LIBRARY_NAME, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"), - rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"), + (long)rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"), bio, argp, dump); if (*dump != '\0' && argp != NULL) ssl_io_data_dump(c, s, argp, rc); @@ -2225,3 +2418,25 @@ long ssl_io_data_cb(BIO *bio, int cmd, } return rc; } + +static APR_INLINE void set_bio_callback(BIO *bio, void *arg) +{ +#if OPENSSL_VERSION_NUMBER >= 0x30000000L + BIO_set_callback_ex(bio, modssl_io_cb); +#else + BIO_set_callback(bio, modssl_io_cb); +#endif + BIO_set_callback_arg(bio, arg); +} + +void modssl_set_io_callbacks(SSL *ssl) +{ + BIO *rbio = SSL_get_rbio(ssl), + *wbio = SSL_get_wbio(ssl); + if (rbio) { + set_bio_callback(rbio, ssl); + } + if (wbio && wbio != rbio) { + set_bio_callback(wbio, ssl); + } +} diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c index 81c0f63..fe0496f 100644 --- a/modules/ssl/ssl_engine_kernel.c +++ b/modules/ssl/ssl_engine_kernel.c @@ -114,6 +114,45 @@ static int has_buffered_data(request_rec *r) return result; } +/* If a renegotiation is required for the location, and the request + * includes a message body (and the client has not requested a "100 + * Continue" response), then the client will be streaming the request + * body over the wire already. In that case, it is not possible to + * stop and perform a new SSL handshake immediately; once the SSL + * library moves to the "accept" state, it will reject the SSL packets + * which the client is sending for the request body. + * + * To allow authentication to complete in the hook, the solution used + * here is to fill a (bounded) buffer with the request body, and then + * to reinject that request body later. + * + * This function is called to fill the renegotiation buffer for the + * location as required, or fail. Returns zero on success or HTTP_ + * error code on failure. + */ +static int fill_reneg_buffer(request_rec *r, SSLDirConfigRec *dc) +{ + int rv; + apr_size_t rsize; + + /* ### this is HTTP/1.1 specific, special case for protocol? */ + if (r->expecting_100 || !ap_request_has_body(r)) { + return 0; + } + + rsize = dc->nRenegBufferSize == UNSET ? DEFAULT_RENEG_BUFFER_SIZE : dc->nRenegBufferSize; + if (rsize > 0) { + /* Fill the I/O buffer with the request body if possible. */ + rv = ssl_io_buffer_fill(r, rsize); + } + else { + /* If the reneg buffer size is set to zero, just fail. */ + rv = HTTP_REQUEST_ENTITY_TOO_LARGE; + } + + return rv; +} + #ifdef HAVE_TLSEXT static int ap_array_same_str_set(apr_array_header_t *s1, apr_array_header_t *s2) { @@ -814,41 +853,14 @@ static int ssl_hook_Access_classic(request_rec *r, SSLSrvConfigRec *sc, SSLDirCo } } - /* If a renegotiation is now required for this location, and the - * request includes a message body (and the client has not - * requested a "100 Continue" response), then the client will be - * streaming the request body over the wire already. In that - * case, it is not possible to stop and perform a new SSL - * handshake immediately; once the SSL library moves to the - * "accept" state, it will reject the SSL packets which the client - * is sending for the request body. - * - * To allow authentication to complete in this auth hook, the - * solution used here is to fill a (bounded) buffer with the - * request body, and then to reinject that request body later. - */ - if (renegotiate && !renegotiate_quick - && !r->expecting_100 - && ap_request_has_body(r)) { - int rv; - apr_size_t rsize; - - rsize = dc->nRenegBufferSize == UNSET ? DEFAULT_RENEG_BUFFER_SIZE : - dc->nRenegBufferSize; - if (rsize > 0) { - /* Fill the I/O buffer with the request body if possible. */ - rv = ssl_io_buffer_fill(r, rsize); - } - else { - /* If the reneg buffer size is set to zero, just fail. */ - rv = HTTP_REQUEST_ENTITY_TOO_LARGE; - } - - if (rv) { + /* Fill reneg buffer if required. */ + if (renegotiate && !renegotiate_quick) { + rc = fill_reneg_buffer(r, dc); + if (rc) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02257) "could not buffer message body to allow " "SSL renegotiation to proceed"); - return rv; + return rc; } } @@ -1132,6 +1144,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon } } + /* Fill reneg buffer if required. */ if (change_vmode) { char peekbuf[1]; @@ -1144,7 +1157,16 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon return HTTP_FORBIDDEN; } - ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10129) "verify client post handshake"); + rc = fill_reneg_buffer(r, dc); + if (rc) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10228) + "could not buffer message body to allow " + "TLS Post-Handshake Authentication to proceed"); + return rc; + } + + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10129) + "verify client post handshake"); SSL_set_verify(ssl, vmode_needed, ssl_callback_SSLVerify); @@ -1154,6 +1176,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); apr_table_setn(r->notes, "error-notes", "Reason: Cannot perform Post-Handshake Authentication.
"); + SSL_set_verify(ssl, vmode_inplace, NULL); return HTTP_FORBIDDEN; } @@ -1175,6 +1198,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon * Finally check for acceptable renegotiation results */ if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) { + SSL_set_verify(ssl, vmode_inplace, NULL); return rc; } } @@ -1661,6 +1685,7 @@ const authz_provider ssl_authz_provider_verify_client = ** _________________________________________________________________ */ +#if MODSSL_USE_OPENSSL_PRE_1_1_API /* * Hand out standard DH parameters, based on the authentication strength */ @@ -1706,6 +1731,7 @@ DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen) return modssl_get_dh_params(keylen); } +#endif /* * This OpenSSL callback function is called when OpenSSL @@ -1723,7 +1749,7 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) SSLSrvConfigRec *sc = mySrvConfig(s); SSLConnRec *sslconn = myConnConfig(conn); SSLDirConfigRec *dc = r ? myDirConfig(r) : sslconn->dc; - modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); + modssl_ctx_t *mctx = myConnCtxConfig(conn, sc); int crl_check_mode = mctx->crl_check_mask & ~SSL_CRLCHECK_FLAGS; /* Get verify ingredients */ @@ -1747,7 +1773,7 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) * Check for optionally acceptable non-verifiable issuer situation */ if (dc) { - if (sslconn->is_proxy) { + if (conn->outgoing) { verify = dc->proxy->auth.verify_mode; } else { @@ -1810,8 +1836,8 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) /* * Perform OCSP-based revocation checks */ - if (ok && ((sc->server->ocsp_mask & SSL_OCSPCHECK_CHAIN) || - (errdepth == 0 && (sc->server->ocsp_mask & SSL_OCSPCHECK_LEAF)))) { + if (ok && ((mctx->ocsp_mask & SSL_OCSPCHECK_CHAIN) || + (errdepth == 0 && (mctx->ocsp_mask & SSL_OCSPCHECK_LEAF)))) { /* If there was an optional verification error, it's not * possible to perform OCSP validation since the issuer may be * missing/untrusted. Fail in that case. */ @@ -1859,7 +1885,7 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) * Finally check the depth of the certificate verification */ if (dc) { - if (sslconn->is_proxy) { + if (conn->outgoing) { depth = dc->proxy->auth.verify_depth; } else { @@ -1911,7 +1937,7 @@ static void modssl_proxy_info_log(conn_rec *c, *cert = info->x509; \ CRYPTO_add(&(*cert)->references, +1, CRYPTO_LOCK_X509); \ *pkey = info->x_pkey->dec_pkey; \ - CRYPTO_add(&(*pkey)->references, +1, CRYPTO_LOCK_X509_PKEY) + CRYPTO_add(&(*pkey)->references, +1, CRYPTO_LOCK_EVP_PKEY) #else #define modssl_set_cert_info(info, cert, pkey) \ *cert = info->x509; \ @@ -2268,7 +2294,7 @@ void ssl_callback_Info(const SSL *ssl, int where, int rc) /* If the reneg state is to reject renegotiations, check the SSL * state machine and move to ABORT if a Client Hello is being * read. */ - if (!sslconn->is_proxy && + if (!c->outgoing && (where & SSL_CB_HANDSHAKE_START) && sslconn->reneg_state == RENEG_REJECT) { sslconn->reneg_state = RENEG_ABORT; @@ -2290,60 +2316,89 @@ void ssl_callback_Info(const SSL *ssl, int where, int rc) } #ifdef HAVE_TLSEXT + +static apr_status_t set_challenge_creds(conn_rec *c, const char *servername, + SSL *ssl, X509 *cert, EVP_PKEY *key, + const char *cert_pem, const char *key_pem) +{ + SSLConnRec *sslcon = myConnConfig(c); + apr_status_t rv = APR_SUCCESS; + int our_data = 0; + + sslcon->service_unavailable = 1; + if (cert_pem) { + cert = NULL; + key = NULL; + our_data = 1; + + rv = modssl_read_cert(c->pool, cert_pem, key_pem, NULL, NULL, &cert, &key); + if (rv != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10266) + "Failed to parse PEM of challenge certificate %s", + servername); + goto cleanup; + } + } + + if ((SSL_use_certificate(ssl, cert) < 1)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10086) + "Failed to configure challenge certificate %s", + servername); + rv = APR_EGENERAL; + goto cleanup; + } + + if (!SSL_use_PrivateKey(ssl, key)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10087) + "error '%s' using Challenge key: %s", + ERR_error_string(ERR_peek_last_error(), NULL), + servername); + rv = APR_EGENERAL; + goto cleanup; + } + + if (SSL_check_private_key(ssl) < 1) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10088) + "Challenge certificate and private key %s " + "do not match", servername); + rv = APR_EGENERAL; + goto cleanup; + } + +cleanup: + if (our_data && cert) X509_free(cert); + if (our_data && key) EVP_PKEY_free(key); + return APR_SUCCESS; +} + /* * This function sets the virtual host from an extended * client hello with a server name indication extension ("SNI", cf. RFC 6066). */ -static apr_status_t init_vhost(conn_rec *c, SSL *ssl) +static apr_status_t init_vhost(conn_rec *c, SSL *ssl, const char *servername) { - const char *servername; - X509 *cert; - EVP_PKEY *key; - if (c) { SSLConnRec *sslcon = myConnConfig(c); - - if (sslcon->server != c->base_server) { - /* already found the vhost */ - return APR_SUCCESS; + + if (sslcon->vhost_found) { + /* already found the vhost? */ + return sslcon->vhost_found > 0 ? APR_SUCCESS : APR_NOTFOUND; } + sslcon->vhost_found = -1; - servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + if (!servername) { + servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + } if (servername) { if (ap_vhost_iterate_given_conn(c, ssl_find_vhost, (void *)servername)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02043) "SSL virtual host for servername %s found", servername); - + + sslcon->vhost_found = +1; return APR_SUCCESS; } - else if (ssl_is_challenge(c, servername, &cert, &key)) { - - sslcon->service_unavailable = 1; - if ((SSL_use_certificate(ssl, cert) < 1)) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10086) - "Failed to configure challenge certificate %s", - servername); - return APR_EGENERAL; - } - - if (!SSL_use_PrivateKey(ssl, key)) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10087) - "error '%s' using Challenge key: %s", - ERR_error_string(ERR_peek_last_error(), NULL), - servername); - return APR_EGENERAL; - } - - if (SSL_check_private_key(ssl) < 1) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10088) - "Challenge certificate and private key %s " - "do not match", servername); - return APR_EGENERAL; - } - - } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02044) "No matching SSL virtual host for servername " @@ -2383,11 +2438,71 @@ static apr_status_t init_vhost(conn_rec *c, SSL *ssl) int ssl_callback_ServerNameIndication(SSL *ssl, int *al, modssl_ctx_t *mctx) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); - apr_status_t status = init_vhost(c, ssl); + apr_status_t status = init_vhost(c, ssl, NULL); return (status == APR_SUCCESS)? SSL_TLSEXT_ERR_OK : SSL_TLSEXT_ERR_NOACK; } +#if OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined(LIBRESSL_VERSION_NUMBER) +/* + * This callback function is called when the ClientHello is received. + */ +int ssl_callback_ClientHello(SSL *ssl, int *al, void *arg) +{ + char *servername = NULL; + conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); + const unsigned char *pos; + size_t len, remaining; + (void)arg; + + /* We can't use SSL_get_servername() at this earliest OpenSSL connection + * stage, and there is no SSL_client_hello_get0_servername() provided as + * of OpenSSL 1.1.1. So the code below, that extracts the SNI from the + * ClientHello's TLS extensions, is taken from some test code in OpenSSL, + * i.e. client_hello_select_server_ctx() in "test/handshake_helper.c". + */ + + /* + * The server_name extension was given too much extensibility when it + * was written, so parsing the normal case is a bit complex. + */ + if (!SSL_client_hello_get0_ext(ssl, TLSEXT_TYPE_server_name, &pos, + &remaining) + || remaining <= 2) + goto give_up; + + /* Extract the length of the supplied list of names. */ + len = (*(pos++) << 8); + len += *(pos++); + if (len + 2 != remaining) + goto give_up; + remaining = len; + + /* + * The list in practice only has a single element, so we only consider + * the first one. + */ + if (remaining <= 3 || *pos++ != TLSEXT_NAMETYPE_host_name) + goto give_up; + remaining--; + + /* Now we can finally pull out the byte array with the actual hostname. */ + len = (*(pos++) << 8); + len += *(pos++); + if (len + 2 != remaining) + goto give_up; + + /* Use the SNI to switch to the relevant vhost, should it differ from + * c->base_server. + */ + servername = apr_pstrmemdup(c->pool, (const char *)pos, len); + +give_up: + init_vhost(c, ssl, servername); + return SSL_CLIENT_HELLO_SUCCESS; +} +#endif /* OPENSSL_VERSION_NUMBER < 0x10101000L */ + /* * Find a (name-based) SSL virtual host where either the ServerName * or one of the ServerAliases matches the supplied name (to be used @@ -2407,12 +2522,25 @@ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s) if (found && (ssl = sslcon->ssl) && (sc = mySrvConfig(s))) { SSL_CTX *ctx = SSL_set_SSL_CTX(ssl, sc->server->ssl_ctx); + /* * SSL_set_SSL_CTX() only deals with the server cert, * so we need to duplicate a few additional settings * from the ctx by hand */ SSL_set_options(ssl, SSL_CTX_get_options(ctx)); +#if OPENSSL_VERSION_NUMBER >= 0x1010007fL \ + && (!defined(LIBRESSL_VERSION_NUMBER) \ + || LIBRESSL_VERSION_NUMBER >= 0x20800000L) + /* + * Don't switch the protocol if none is configured for this vhost, + * the default in this case is still the base server's SSLProtocol. + */ + if (myConnCtxConfig(c, sc)->protocol_set) { + SSL_set_min_proto_version(ssl, SSL_CTX_get_min_proto_version(ctx)); + SSL_set_max_proto_version(ssl, SSL_CTX_get_max_proto_version(ctx)); + } +#endif if ((SSL_get_verify_mode(ssl) == SSL_VERIFY_NONE) || (SSL_num_renegotiations(ssl) == 0)) { /* @@ -2453,6 +2581,7 @@ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s) sc->server->pks->service_unavailable : 0; ap_update_child_status_from_server(c->sbh, SERVER_BUSY_READ, c, s); + /* * There is one special filter callback, which is set * very early depending on the base_server's log level. @@ -2461,14 +2590,7 @@ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s) * we need to set that callback here. */ if (APLOGtrace4(s)) { - BIO *rbio = SSL_get_rbio(ssl), - *wbio = SSL_get_wbio(ssl); - BIO_set_callback(rbio, ssl_io_data_cb); - BIO_set_callback_arg(rbio, (void *)ssl); - if (wbio && wbio != rbio) { - BIO_set_callback(wbio, ssl_io_data_cb); - BIO_set_callback_arg(wbio, (void *)ssl); - } + modssl_set_io_callbacks(ssl); } return 1; @@ -2488,14 +2610,17 @@ int ssl_callback_SessionTicket(SSL *ssl, unsigned char *keyname, unsigned char *iv, EVP_CIPHER_CTX *cipher_ctx, - HMAC_CTX *hctx, +#if OPENSSL_VERSION_NUMBER < 0x30000000L + HMAC_CTX *hmac_ctx, +#else + EVP_MAC_CTX *mac_ctx, +#endif int mode) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(c); SSLSrvConfigRec *sc = mySrvConfig(s); - SSLConnRec *sslconn = myConnConfig(c); - modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); + modssl_ctx_t *mctx = myConnCtxConfig(c, sc); modssl_ticket_key_t *ticket_key = mctx->ticket_key; if (mode == 1) { @@ -2515,7 +2640,13 @@ int ssl_callback_SessionTicket(SSL *ssl, } EVP_EncryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL, ticket_key->aes_key, iv); - HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL); + +#if OPENSSL_VERSION_NUMBER < 0x30000000L + HMAC_Init_ex(hmac_ctx, ticket_key->hmac_secret, 16, + tlsext_tick_md(), NULL); +#else + EVP_MAC_CTX_set_params(mac_ctx, ticket_key->mac_params); +#endif ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02289) "TLS session ticket key for %s successfully set, " @@ -2536,7 +2667,13 @@ int ssl_callback_SessionTicket(SSL *ssl, EVP_DecryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL, ticket_key->aes_key, iv); - HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL); + +#if OPENSSL_VERSION_NUMBER < 0x30000000L + HMAC_Init_ex(hmac_ctx, ticket_key->hmac_secret, 16, + tlsext_tick_md(), NULL); +#else + EVP_MAC_CTX_set_params(mac_ctx, ticket_key->mac_params); +#endif ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02290) "TLS session ticket key for %s successfully set, " @@ -2609,7 +2746,7 @@ int ssl_callback_alpn_select(SSL *ssl, * they callback the SNI. We need to make sure that we know which vhost * we are dealing with so we respect the correct protocols. */ - init_vhost(c, ssl); + init_vhost(c, ssl, NULL); proposed = ap_select_protocol(c, NULL, sslconn->server, client_protos); if (!proposed) { @@ -2635,6 +2772,26 @@ int ssl_callback_alpn_select(SSL *ssl, proposed); return SSL_TLSEXT_ERR_ALERT_FATAL; } + + /* protocol was switched, this could be a challenge protocol such as "acme-tls/1". + * For that to work, we need to allow overrides to our ssl certificate. + * However, exclude challenge checks on our best known traffic protocol. + * (http/1.1 is the default, we never switch to it anyway.) + */ + if (strcmp("h2", proposed)) { + const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + X509 *cert; + EVP_PKEY *key; + const char *cert_pem, *key_pem; + + if (ssl_is_challenge(c, servername, &cert, &key, &cert_pem, &key_pem)) { + if (set_challenge_creds(c, servername, ssl, cert, key, + cert_pem, key_pem) != APR_SUCCESS) { + return SSL_TLSEXT_ERR_ALERT_FATAL; + } + SSL_set_verify(ssl, SSL_VERIFY_NONE, ssl_callback_SSLVerify); + } + } } return SSL_TLSEXT_ERR_OK; @@ -2676,3 +2833,17 @@ int ssl_callback_SRPServerParams(SSL *ssl, int *ad, void *arg) } #endif /* HAVE_SRP */ + + +#ifdef HAVE_OPENSSL_KEYLOG +/* Callback used with SSL_CTX_set_keylog_callback. */ +void modssl_callback_keylog(const SSL *ssl, const char *line) +{ + conn_rec *conn = SSL_get_app_data(ssl); + SSLSrvConfigRec *sc = mySrvConfig(conn->base_server); + + if (sc && sc->mc->keylog_file) { + apr_file_printf(sc->mc->keylog_file, "%s\n", line); + } +} +#endif diff --git a/modules/ssl/ssl_engine_log.c b/modules/ssl/ssl_engine_log.c index d2f9ed0..3b3ceac 100644 --- a/modules/ssl/ssl_engine_log.c +++ b/modules/ssl/ssl_engine_log.c @@ -78,6 +78,16 @@ apr_status_t ssl_die(server_rec *s) return APR_EGENERAL; } +static APR_INLINE +unsigned long modssl_ERR_peek_error_data(const char **data, int *flags) +{ +#if OPENSSL_VERSION_NUMBER < 0x30000000L + return ERR_peek_error_line_data(NULL, NULL, data, flags); +#else + return ERR_peek_error_data(data, flags); +#endif +} + /* * Prints the SSL library error information. */ @@ -87,7 +97,7 @@ void ssl_log_ssl_error(const char *file, int line, int level, server_rec *s) const char *data; int flags; - while ((e = ERR_peek_error_line_data(NULL, NULL, &data, &flags))) { + while ((e = modssl_ERR_peek_error_data(&data, &flags))) { const char *annotation; char err[256]; @@ -123,10 +133,8 @@ static void ssl_log_cert_error(const char *file, int line, int level, int msglen, n; char *name; - apr_vsnprintf(buf, sizeof buf, format, ap); - - msglen = strlen(buf); - + msglen = apr_vsnprintf(buf, sizeof buf, format, ap); + if (cert) { BIO *bio = BIO_new(BIO_s_mem()); diff --git a/modules/ssl/ssl_engine_ocsp.c b/modules/ssl/ssl_engine_ocsp.c index ef92c37..5e04512 100644 --- a/modules/ssl/ssl_engine_ocsp.c +++ b/modules/ssl/ssl_engine_ocsp.c @@ -86,7 +86,7 @@ static apr_uri_t *determine_responder_uri(SSLSrvConfigRec *sc, X509 *cert, return NULL; } - if (strcasecmp(u->scheme, "http") != 0) { + if (ap_cstr_casecmp(u->scheme, "http") != 0) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(01920) "cannot handle OCSP responder URI '%s'", s); return NULL; @@ -284,6 +284,7 @@ int modssl_verify_ocsp(X509_STORE_CTX *ctx, SSLSrvConfigRec *sc, /* Create a temporary pool to constrain memory use (the passed-in * pool may be e.g. a connection pool). */ apr_pool_create(&vpool, pool); + apr_pool_tag(vpool, "modssl_verify_ocsp"); rv = verify_ocsp_status(cert, ctx, c, sc, s, vpool); diff --git a/modules/ssl/ssl_engine_pphrase.c b/modules/ssl/ssl_engine_pphrase.c index 8c29443..699019f 100644 --- a/modules/ssl/ssl_engine_pphrase.c +++ b/modules/ssl/ssl_engine_pphrase.c @@ -30,6 +30,8 @@ -- Clifford Stoll */ #include "ssl_private.h" +#include + typedef struct { server_rec *s; apr_pool_t *p; @@ -143,8 +145,6 @@ apr_status_t ssl_load_encrypted_pkey(server_rec *s, apr_pool_t *p, int idx, const char *key_id = asn1_table_vhost_key(mc, p, sc->vhost_id, idx); EVP_PKEY *pPrivateKey = NULL; ssl_asn1_t *asn1; - unsigned char *ucp; - long int length; int nPassPhrase = (*pphrases)->nelts; int nPassPhraseRetry = 0; apr_time_t pkey_mtime = 0; @@ -221,7 +221,7 @@ apr_status_t ssl_load_encrypted_pkey(server_rec *s, apr_pool_t *p, int idx, * is not empty. */ ERR_clear_error(); - pPrivateKey = modssl_read_privatekey(ppcb_arg.pkey_file, NULL, + pPrivateKey = modssl_read_privatekey(ppcb_arg.pkey_file, ssl_pphrase_Handle_CB, &ppcb_arg); /* If the private key was successfully read, nothing more to do here. */ @@ -351,19 +351,12 @@ apr_status_t ssl_load_encrypted_pkey(server_rec *s, apr_pool_t *p, int idx, nPassPhrase++; } - /* - * Insert private key into the global module configuration - * (we convert it to a stand-alone DER byte sequence - * because the SSL library uses static variables inside a - * RSA structure which do not survive DSO reloads!) - */ - length = i2d_PrivateKey(pPrivateKey, NULL); - ucp = ssl_asn1_table_set(mc->tPrivateKey, key_id, length); - (void)i2d_PrivateKey(pPrivateKey, &ucp); /* 2nd arg increments */ + /* Cache the private key in the global module configuration so it + * can be used after subsequent reloads. */ + asn1 = ssl_asn1_table_set(mc->tPrivateKey, key_id, pPrivateKey); if (ppcb_arg.nPassPhraseDialogCur != 0) { /* remember mtime of encrypted keys */ - asn1 = ssl_asn1_table_get(mc->tPrivateKey, key_id); asn1->source_mtime = pkey_mtime; } @@ -614,3 +607,306 @@ int ssl_pphrase_Handle_CB(char *buf, int bufsize, int verify, void *srv) */ return (len); } + +#if MODSSL_HAVE_ENGINE_API + +/* OpenSSL UI implementation for passphrase entry; largely duplicated + * from ssl_pphrase_Handle_CB but adjusted for UI API. TODO: Might be + * worth trying to shift pphrase handling over to the UI API + * completely. */ +static int passphrase_ui_open(UI *ui) +{ + pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui); + SSLSrvConfigRec *sc = mySrvConfig(ppcb->s); + + ppcb->nPassPhraseDialog++; + ppcb->nPassPhraseDialogCur++; + + /* + * Builtin or Pipe dialog + */ + if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN + || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { + if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { + if (!readtty) { + ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, + APLOGNO(10143) + "Init: Creating pass phrase dialog pipe child " + "'%s'", sc->server->pphrase_dialog_path); + if (ssl_pipe_child_create(ppcb->p, + sc->server->pphrase_dialog_path) + != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, ppcb->s, + APLOGNO(10144) + "Init: Failed to create pass phrase pipe '%s'", + sc->server->pphrase_dialog_path); + return 0; + } + } + ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10145) + "Init: Requesting pass phrase via piped dialog"); + } + else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */ +#ifdef WIN32 + ap_log_error(APLOG_MARK, APLOG_ERR, 0, ppcb->s, APLOGNO(10146) + "Init: Failed to create pass phrase pipe '%s'", + sc->server->pphrase_dialog_path); + return 0; +#else + /* + * stderr has already been redirected to the error_log. + * rather than attempting to temporarily rehook it to the terminal, + * we print the prompt to stdout before EVP_read_pw_string turns + * off tty echo + */ + apr_file_open_stdout(&writetty, ppcb->p); + + ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10147) + "Init: Requesting pass phrase via builtin terminal " + "dialog"); +#endif + } + + /* + * The first time display a header to inform the user about what + * program he actually speaks to, which module is responsible for + * this terminal dialog and why to the hell he has to enter + * something... + */ + if (ppcb->nPassPhraseDialog == 1) { + apr_file_printf(writetty, "%s mod_ssl (Pass Phrase Dialog)\n", + AP_SERVER_BASEVERSION); + apr_file_printf(writetty, + "A pass phrase is required to access the private key.\n"); + } + if (ppcb->bPassPhraseDialogOnce) { + ppcb->bPassPhraseDialogOnce = FALSE; + apr_file_printf(writetty, "\n"); + apr_file_printf(writetty, "Private key %s (%s)\n", + ppcb->key_id, ppcb->pkey_file); + } + } + + return 1; +} + +static int passphrase_ui_read(UI *ui, UI_STRING *uis) +{ + pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui); + SSLSrvConfigRec *sc = mySrvConfig(ppcb->s); + const char *prompt; + int i; + int bufsize; + int len; + char *buf; + + prompt = UI_get0_output_string(uis); + if (prompt == NULL) { + prompt = "Enter pass phrase:"; + } + + /* + * Get the maximum expected size and allocate the buffer + */ + bufsize = UI_get_result_maxsize(uis); + buf = apr_pcalloc(ppcb->p, bufsize); + + if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN + || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { + /* + * Get the pass phrase through a callback. + * Empty input is not accepted. + */ + for (;;) { + if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { + i = pipe_get_passwd_cb(buf, bufsize, "", FALSE); + } + else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */ + i = EVP_read_pw_string(buf, bufsize, "", FALSE); + } + if (i != 0) { + OPENSSL_cleanse(buf, bufsize); + return 0; + } + len = strlen(buf); + if (len < 1){ + apr_file_printf(writetty, "Apache:mod_ssl:Error: Pass phrase" + "empty (needs to be at least 1 character).\n"); + apr_file_puts(prompt, writetty); + } + else { + break; + } + } + } + /* + * Filter program + */ + else if (sc->server->pphrase_dialog_type == SSL_PPTYPE_FILTER) { + const char *cmd = sc->server->pphrase_dialog_path; + const char **argv = apr_palloc(ppcb->p, sizeof(char *) * 3); + char *result; + + ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10148) + "Init: Requesting pass phrase from dialog filter " + "program (%s)", cmd); + + argv[0] = cmd; + argv[1] = ppcb->key_id; + argv[2] = NULL; + + result = ssl_util_readfilter(ppcb->s, ppcb->p, cmd, argv); + apr_cpystrn(buf, result, bufsize); + len = strlen(buf); + } + + /* + * Ok, we now have the pass phrase, so give it back + */ + ppcb->cpPassPhraseCur = apr_pstrdup(ppcb->p, buf); + UI_set_result(ui, uis, buf); + + /* Clear sensitive data. */ + OPENSSL_cleanse(buf, bufsize); + return 1; +} + +static int passphrase_ui_write(UI *ui, UI_STRING *uis) +{ + pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui); + SSLSrvConfigRec *sc; + const char *prompt; + + sc = mySrvConfig(ppcb->s); + + if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN + || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { + prompt = UI_get0_output_string(uis); + apr_file_puts(prompt, writetty); + } + + return 1; +} + +static int passphrase_ui_close(UI *ui) +{ + /* + * Close the pipes if they were opened + */ + if (readtty) { + apr_file_close(readtty); + apr_file_close(writetty); + readtty = writetty = NULL; + } + return 1; +} + +static apr_status_t pp_ui_method_cleanup(void *uip) +{ + UI_METHOD *uim = uip; + + UI_destroy_method(uim); + + return APR_SUCCESS; +} + +static UI_METHOD *get_passphrase_ui(apr_pool_t *p) +{ + UI_METHOD *ui_method = UI_create_method("Passphrase UI"); + + UI_method_set_opener(ui_method, passphrase_ui_open); + UI_method_set_reader(ui_method, passphrase_ui_read); + UI_method_set_writer(ui_method, passphrase_ui_write); + UI_method_set_closer(ui_method, passphrase_ui_close); + + apr_pool_cleanup_register(p, ui_method, pp_ui_method_cleanup, + pp_ui_method_cleanup); + + return ui_method; +} +#endif + + +apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p, + const char *vhostid, + const char *certid, const char *keyid, + X509 **pubkey, EVP_PKEY **privkey) +{ +#if MODSSL_HAVE_ENGINE_API + const char *c, *scheme; + ENGINE *e; + UI_METHOD *ui_method = get_passphrase_ui(p); + pphrase_cb_arg_t ppcb; + + memset(&ppcb, 0, sizeof ppcb); + ppcb.s = s; + ppcb.p = p; + ppcb.bPassPhraseDialogOnce = TRUE; + ppcb.key_id = vhostid; + ppcb.pkey_file = keyid; + + c = ap_strchr_c(keyid, ':'); + if (!c || c == keyid) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10131) + "Init: Unrecognized private key identifier `%s'", + keyid); + return ssl_die(s); + } + + scheme = apr_pstrmemdup(p, keyid, c - keyid); + if (!(e = ENGINE_by_id(scheme))) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10132) + "Init: Failed to load engine for private key %s", + keyid); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return ssl_die(s); + } + + if (!ENGINE_init(e)) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10149) + "Init: Failed to initialize engine %s for private key %s", + scheme, keyid); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return ssl_die(s); + } + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, + "Init: Initialized engine %s for private key %s", + scheme, keyid); + + if (APLOGdebug(s)) { + ENGINE_ctrl_cmd_string(e, "VERBOSE", NULL, 0); + } + + if (certid) { + struct { + const char *cert_id; + X509 *cert; + } params = { certid, NULL }; + + if (!ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, ¶ms, NULL, 1)) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10136) + "Init: Unable to get the certificate"); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return ssl_die(s); + } + + *pubkey = params.cert; + } + + *privkey = ENGINE_load_private_key(e, keyid, ui_method, &ppcb); + if (*privkey == NULL) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10133) + "Init: Unable to get the private key"); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return ssl_die(s); + } + + ENGINE_finish(e); + ENGINE_free(e); + + return APR_SUCCESS; +#else + return APR_ENOTIMPL; +#endif +} diff --git a/modules/ssl/ssl_engine_vars.c b/modules/ssl/ssl_engine_vars.c index 5724f18..418d849 100644 --- a/modules/ssl/ssl_engine_vars.c +++ b/modules/ssl/ssl_engine_vars.c @@ -65,10 +65,10 @@ static SSLConnRec *ssl_get_effective_config(conn_rec *c) return sslconn; } -static int ssl_is_https(conn_rec *c) +static int ssl_conn_is_ssl(conn_rec *c) { - SSLConnRec *sslconn = ssl_get_effective_config(c); - return sslconn && sslconn->ssl; + const SSLConnRec *sslconn = ssl_get_effective_config(c); + return (sslconn && sslconn->ssl)? OK : DECLINED; } static const char var_interface[] = "mod_ssl/" AP_SERVER_BASEREVISION; @@ -137,7 +137,7 @@ void ssl_var_register(apr_pool_t *p) { char *cp, *cp2; - APR_REGISTER_OPTIONAL_FN(ssl_is_https); + ap_hook_ssl_conn_is_ssl(ssl_conn_is_ssl, NULL, NULL, APR_HOOK_MIDDLE); APR_REGISTER_OPTIONAL_FN(ssl_var_lookup); APR_REGISTER_OPTIONAL_FN(ssl_ext_list); @@ -460,18 +460,13 @@ static char *ssl_var_lookup_ssl_cert_dn_oneline(apr_pool_t *p, request_rec *r, } else { BIO* bio; - int n; unsigned long flags = XN_FLAG_RFC2253 & ~ASN1_STRFLGS_ESC_MSB; + if ((bio = BIO_new(BIO_s_mem())) == NULL) return NULL; X509_NAME_print_ex(bio, xsname, 0, flags); - n = BIO_pending(bio); - if (n > 0) { - result = apr_palloc(p, n+1); - n = BIO_read(bio, result, n); - result[n] = NUL; - } - BIO_free(bio); + + result = modssl_bio_free_read(p, bio); } return result; } @@ -635,7 +630,8 @@ static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, static char *ssl_var_lookup_ssl_cert_san(apr_pool_t *p, X509 *xs, char *var) { - int type, numlen; + int type; + apr_size_t numlen; const char *onf = NULL; apr_array_header_t *entries; @@ -678,19 +674,13 @@ static char *ssl_var_lookup_ssl_cert_san(apr_pool_t *p, X509 *xs, char *var) static char *ssl_var_lookup_ssl_cert_valid(apr_pool_t *p, ASN1_TIME *tm) { - char *result; BIO* bio; - int n; if ((bio = BIO_new(BIO_s_mem())) == NULL) return NULL; ASN1_TIME_print(bio, tm); - n = BIO_pending(bio); - result = apr_pcalloc(p, n+1); - n = BIO_read(bio, result, n); - result[n] = NUL; - BIO_free(bio); - return result; + + return modssl_bio_free_read(p, bio); } #define DIGIT2NUM(x) (((x)[0] - '0') * 10 + (x)[1] - '0') @@ -739,19 +729,13 @@ static char *ssl_var_lookup_ssl_cert_remain(apr_pool_t *p, ASN1_TIME *tm) static char *ssl_var_lookup_ssl_cert_serial(apr_pool_t *p, X509 *xs) { - char *result; BIO *bio; - int n; if ((bio = BIO_new(BIO_s_mem())) == NULL) return NULL; i2a_ASN1_INTEGER(bio, X509_get_serialNumber(xs)); - n = BIO_pending(bio); - result = apr_pcalloc(p, n+1); - n = BIO_read(bio, result, n); - result[n] = NUL; - BIO_free(bio); - return result; + + return modssl_bio_free_read(p, bio); } static char *ssl_var_lookup_ssl_cert_chain(apr_pool_t *p, STACK_OF(X509) *sk, char *var) @@ -806,19 +790,13 @@ static char *ssl_var_lookup_ssl_cert_rfc4523_cea(apr_pool_t *p, SSL *ssl) static char *ssl_var_lookup_ssl_cert_PEM(apr_pool_t *p, X509 *xs) { - char *result; BIO *bio; - int n; if ((bio = BIO_new(BIO_s_mem())) == NULL) return NULL; PEM_write_bio_X509(bio, xs); - n = BIO_pending(bio); - result = apr_pcalloc(p, n+1); - n = BIO_read(bio, result, n); - result[n] = NUL; - BIO_free(bio); - return result; + + return modssl_bio_free_read(p, bio); } static char *ssl_var_lookup_ssl_cert_verify(apr_pool_t *p, SSLConnRec *sslconn) diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h index f46814d..859e932 100644 --- a/modules/ssl/ssl_private.h +++ b/modules/ssl/ssl_private.h @@ -27,6 +27,7 @@ */ /** Apache headers */ +#include "ap_config.h" #include "httpd.h" #include "http_config.h" #include "http_core.h" @@ -35,6 +36,7 @@ #include "http_connection.h" #include "http_request.h" #include "http_protocol.h" +#include "http_ssl.h" #include "http_vhost.h" #include "util_script.h" #include "util_filter.h" @@ -81,13 +83,13 @@ #include "ap_expr.h" -/* OpenSSL headers */ -#include -#if (OPENSSL_VERSION_NUMBER >= 0x10001000) -/* must be defined before including ssl.h */ -#define OPENSSL_NO_SSL_INTERN +/* keep first for compat API */ +#ifndef OPENSSL_API_COMPAT +#define OPENSSL_API_COMPAT 0x10101000 /* for ENGINE_ API */ #endif -#include +#include "mod_ssl_openssl.h" + +/* OpenSSL headers */ #include #include #include @@ -97,12 +99,23 @@ #include #include #include +#include +#if OPENSSL_VERSION_NUMBER >= 0x30000000 +#include +#endif /* Avoid tripping over an engine build installed globally and detected * when the user points at an explicit non-engine flavor of OpenSSL */ -#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) +#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) \ + && (OPENSSL_VERSION_NUMBER < 0x30000000 \ + || (defined(OPENSSL_API_LEVEL) && OPENSSL_API_LEVEL < 30000)) \ + && !defined(OPENSSL_NO_ENGINE) #include +#define MODSSL_HAVE_ENGINE_API 1 +#endif +#ifndef MODSSL_HAVE_ENGINE_API +#define MODSSL_HAVE_ENGINE_API 0 #endif #if (OPENSSL_VERSION_NUMBER < 0x0090801f) @@ -132,18 +145,25 @@ SSL_CTX_ctrl(ctx, SSL_CTRL_SET_MIN_PROTO_VERSION, version, NULL) #define SSL_CTX_set_max_proto_version(ctx, version) \ SSL_CTX_ctrl(ctx, SSL_CTRL_SET_MAX_PROTO_VERSION, version, NULL) -#elif LIBRESSL_VERSION_NUMBER < 0x2070000f +#endif /* LIBRESSL_VERSION_NUMBER < 0x2060000f */ /* LibreSSL before 2.7 declares OPENSSL_VERSION_NUMBER == 2.0 but does not * include most changes from OpenSSL >= 1.1 (new functions, macros, * deprecations, ...), so we have to work around this... */ -#define MODSSL_USE_OPENSSL_PRE_1_1_API (1) -#endif /* LIBRESSL_VERSION_NUMBER < 0x2060000f */ +#if LIBRESSL_VERSION_NUMBER < 0x2070000f +#define MODSSL_USE_OPENSSL_PRE_1_1_API 1 +#else +#define MODSSL_USE_OPENSSL_PRE_1_1_API 0 +#endif #else /* defined(LIBRESSL_VERSION_NUMBER) */ -#define MODSSL_USE_OPENSSL_PRE_1_1_API (OPENSSL_VERSION_NUMBER < 0x10100000L) +#if OPENSSL_VERSION_NUMBER < 0x10100000L +#define MODSSL_USE_OPENSSL_PRE_1_1_API 1 +#else +#define MODSSL_USE_OPENSSL_PRE_1_1_API 0 #endif +#endif /* defined(LIBRESSL_VERSION_NUMBER) */ -#if defined(OPENSSL_FIPS) +#if defined(OPENSSL_FIPS) || OPENSSL_VERSION_NUMBER >= 0x30000000L #define HAVE_FIPS #endif @@ -207,7 +227,10 @@ #endif /* Secure Remote Password */ -#if !defined(OPENSSL_NO_SRP) && defined(SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB) +#if !defined(OPENSSL_NO_SRP) \ + && (OPENSSL_VERSION_NUMBER < 0x30000000L \ + || (defined(OPENSSL_API_LEVEL) && OPENSSL_API_LEVEL < 30000)) \ + && defined(SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB) #define HAVE_SRP #include #endif @@ -250,6 +273,28 @@ void free_bio_methods(void); #endif #endif +/* those may be deprecated */ +#ifndef X509_get_notBefore +#define X509_get_notBefore X509_getm_notBefore +#endif +#ifndef X509_get_notAfter +#define X509_get_notAfter X509_getm_notAfter +#endif + +#if OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined(LIBRESSL_VERSION_NUMBER) +#define HAVE_OPENSSL_KEYLOG +#endif + +#ifdef HAVE_FIPS +#if OPENSSL_VERSION_NUMBER >= 0x30000000L +#define modssl_fips_is_enabled() EVP_default_properties_is_fips_enabled(NULL) +#define modssl_fips_enable(to) EVP_default_properties_enable_fips(NULL, (to)) +#else +#define modssl_fips_is_enabled() FIPS_mode() +#define modssl_fips_enable(to) FIPS_mode_set((to)) +#endif +#endif /* HAVE_FIPS */ + /* mod_ssl headers */ #include "ssl_util_ssl.h" @@ -305,8 +350,8 @@ APLOG_USE_MODULE(ssl); ((SSLSrvConfigRec *)ap_get_module_config(srv->module_config, &ssl_module)) #define myDirConfig(req) \ ((SSLDirConfigRec *)ap_get_module_config(req->per_dir_config, &ssl_module)) -#define myCtxConfig(sslconn, sc) \ - (sslconn->is_proxy ? sslconn->dc->proxy : sc->server) +#define myConnCtxConfig(c, sc) \ + (c->outgoing ? myConnConfig(c)->dc->proxy : sc->server) #define myModConfig(srv) mySrvConfig((srv))->mc #define mySrvFromConn(c) myConnConfig(c)->server #define myDirConfigFromConn(c) myConnConfig(c)->dc @@ -527,7 +572,6 @@ typedef struct { const char *verify_info; const char *verify_error; int verify_depth; - int is_proxy; int disabled; enum { NON_SSL_OK = 0, /* is SSL request, or error handling completed */ @@ -554,6 +598,7 @@ typedef struct { const char *cipher_suite; /* cipher suite used in last reneg */ int service_unavailable; /* thouugh we negotiate SSL, no requests will be served */ + int vhost_found; /* whether we found vhost from SNI already */ } SSLConnRec; /* BIG FAT WARNING: SSLModConfigRec has unusual memory lifetime: it is @@ -607,9 +652,7 @@ typedef struct { * index), for example the string "vhost.example.com:443:0". */ apr_hash_t *tPrivateKey; -#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) - const char *szCryptoDevice; -#endif + const char *szCryptoDevice; /* ENGINE device (if available) */ #ifdef HAVE_OCSP_STAPLING const ap_socache_provider_t *stapling_cache; @@ -617,6 +660,14 @@ typedef struct { apr_global_mutex_t *stapling_cache_mutex; apr_global_mutex_t *stapling_refresh_mutex; #endif +#ifdef HAVE_OPENSSL_KEYLOG + /* Used for logging if SSLKEYLOGFILE is set at startup. */ + apr_file_t *keylog_file; +#endif + +#ifdef HAVE_FIPS + BOOL fips; +#endif } SSLModConfigRec; /** Structure representing configured filenames for certs and keys for @@ -640,10 +691,13 @@ typedef struct { const char *cert_file; const char *cert_path; const char *ca_cert_file; - STACK_OF(X509_INFO) *certs; /* Contains End Entity certs */ - STACK_OF(X509) **ca_certs; /* Contains ONLY chain certs for - * each item in certs. - * (ptr to array of ptrs) */ + /* certs is a stack of configured cert, key pairs. */ + STACK_OF(X509_INFO) *certs; + /* ca_certs contains ONLY chain certs for each item in certs. + * ca_certs[n] is a pointer to the (STACK_OF(X509) *) stack which + * holds the cert chain for the 'n'th cert in the certs stack, or + * NULL if no chain is configured. */ + STACK_OF(X509) **ca_certs; } modssl_pk_proxy_t; /** stuff related to authentication that can also be per-dir */ @@ -668,7 +722,11 @@ typedef struct { typedef struct { const char *file_path; unsigned char key_name[16]; +#if OPENSSL_VERSION_NUMBER < 0x30000000L unsigned char hmac_secret[16]; +#else + OSSL_PARAM mac_params[3]; +#endif unsigned char aes_key[16]; } modssl_ticket_key_t; #endif @@ -765,9 +823,6 @@ struct SSLSrvConfigRec { #ifdef HAVE_TLSEXT ssl_enabled_t strict_sni_vhost_check; #endif -#ifdef HAVE_FIPS - BOOL fips; -#endif #ifndef OPENSSL_NO_COMP BOOL compression; #endif @@ -928,9 +983,20 @@ void ssl_callback_Info(const SSL *, int, int); #ifdef HAVE_TLSEXT int ssl_callback_ServerNameIndication(SSL *, int *, modssl_ctx_t *); #endif +#if OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined(LIBRESSL_VERSION_NUMBER) +int ssl_callback_ClientHello(SSL *, int *, void *); +#endif #ifdef HAVE_TLS_SESSION_TICKETS -int ssl_callback_SessionTicket(SSL *, unsigned char *, unsigned char *, - EVP_CIPHER_CTX *, HMAC_CTX *, int); +int ssl_callback_SessionTicket(SSL *ssl, + unsigned char *keyname, + unsigned char *iv, + EVP_CIPHER_CTX *cipher_ctx, +#if OPENSSL_VERSION_NUMBER < 0x30000000L + HMAC_CTX *hmac_ctx, +#else + EVP_MAC_CTX *mac_ctx, +#endif + int mode); #endif #ifdef HAVE_TLS_ALPN @@ -970,10 +1036,15 @@ int ssl_stapling_init_cert(server_rec *, apr_pool_t *, apr_pool_t *, int ssl_callback_SRPServerParams(SSL *, int *, void *); #endif +#ifdef HAVE_OPENSSL_KEYLOG +/* Callback used with SSL_CTX_set_keylog_callback. */ +void modssl_callback_keylog(const SSL *ssl, const char *line); +#endif + /** I/O */ void ssl_io_filter_init(conn_rec *, request_rec *r, SSL *); void ssl_io_filter_register(apr_pool_t *); -long ssl_io_data_cb(BIO *, int, const char *, int, long, long); +void modssl_set_io_callbacks(SSL *ssl); /* ssl_io_buffer_fill fills the setaside buffering of the HTTP request * to allow an SSL renegotiation to take place. */ @@ -1002,21 +1073,32 @@ BOOL ssl_util_vhost_matches(const char *servername, server_rec *s); apr_status_t ssl_load_encrypted_pkey(server_rec *, apr_pool_t *, int, const char *, apr_array_header_t **); +/* Load public and/or private key from the configured ENGINE. Private + * key returned as *pkey. certid can be NULL, in which case *pubkey + * is not altered. Errors logged on failure. */ +apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p, + const char *vhostid, + const char *certid, const char *keyid, + X509 **pubkey, EVP_PKEY **privkey); + /** Diffie-Hellman Parameter Support */ -DH *ssl_dh_GetParamFromFile(const char *); +#if OPENSSL_VERSION_NUMBER < 0x30000000L +DH *modssl_dh_from_file(const char *); +#else +EVP_PKEY *modssl_dh_pkey_from_file(const char *); +#endif #ifdef HAVE_ECC -EC_GROUP *ssl_ec_GetParamFromFile(const char *); +EC_GROUP *modssl_ec_group_from_file(const char *); #endif -unsigned char *ssl_asn1_table_set(apr_hash_t *table, - const char *key, - long int length); - -ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, - const char *key); - -void ssl_asn1_table_unset(apr_hash_t *table, - const char *key); +/* Store the EVP_PKEY key (serialized into DER) in the hash table with + * key, returning the ssl_asn1_t structure pointer. */ +ssl_asn1_t *ssl_asn1_table_set(apr_hash_t *table, const char *key, + EVP_PKEY *pkey); +/* Retrieve the ssl_asn1_t structure with given key from the hash. */ +ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, const char *key); +/* Remove and free the ssl_asn1_t structure with given key. */ +void ssl_asn1_table_unset(apr_hash_t *table, const char *key); /** Mutex Support */ int ssl_mutex_init(server_rec *, apr_pool_t *); @@ -1096,10 +1178,12 @@ void ssl_init_ocsp_certificates(server_rec *s, modssl_ctx_t *mctx); #endif +#if MODSSL_USE_OPENSSL_PRE_1_1_API /* Retrieve DH parameters for given key length. Return value should * be treated as unmutable, since it is stored in process-global * memory. */ DH *modssl_get_dh_params(unsigned keylen); +#endif /* Returns non-zero if the request was made over SSL/TLS. If sslconn * is non-NULL and the request is using SSL/TLS, sets *sslconn to the @@ -1107,7 +1191,12 @@ DH *modssl_get_dh_params(unsigned keylen); int modssl_request_is_tls(const request_rec *r, SSLConnRec **sslconn); int ssl_is_challenge(conn_rec *c, const char *servername, - X509 **pcert, EVP_PKEY **pkey); + X509 **pcert, EVP_PKEY **pkey, + const char **pcert_file, const char **pkey_file); + +/* Returns non-zero if the cert/key filename should be handled through + * the configured ENGINE. */ +int modssl_is_engine_id(const char *name); #endif /* SSL_PRIVATE_H */ /** @} */ diff --git a/modules/ssl/ssl_scache.c b/modules/ssl/ssl_scache.c index 7b4a203..c0a0950 100644 --- a/modules/ssl/ssl_scache.c +++ b/modules/ssl/ssl_scache.c @@ -59,7 +59,7 @@ apr_status_t ssl_scache_init(server_rec *s, apr_pool_t *p) hints.expiry_interval = 300; rv = mc->stapling_cache->init(mc->stapling_cache_context, - "mod_ssl-stapling", &hints, s, p); + "mod_ssl-staple", &hints, s, p); if (rv) { ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01872) "Could not initialize stapling cache. Exiting."); @@ -84,7 +84,7 @@ apr_status_t ssl_scache_init(server_rec *s, apr_pool_t *p) hints.avg_id_len = 30; hints.expiry_interval = 30; - rv = mc->sesscache->init(mc->sesscache_context, "mod_ssl-session", &hints, s, p); + rv = mc->sesscache->init(mc->sesscache_context, "mod_ssl-sess", &hints, s, p); if (rv) { ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01874) "Could not initialize session cache. Exiting."); diff --git a/modules/ssl/ssl_util.c b/modules/ssl/ssl_util.c index 0d23465..87ddfa7 100644 --- a/modules/ssl/ssl_util.c +++ b/modules/ssl/ssl_util.c @@ -192,45 +192,37 @@ BOOL ssl_util_path_check(ssl_pathcheck_t pcm, const char *path, apr_pool_t *p) return TRUE; } -/* - * certain key data needs to survive restarts, - * which are stored in the user data table of s->process->pool. - * to prevent "leaking" of this data, we use malloc/free - * rather than apr_palloc and these wrappers to help make sure - * we do not leak the malloc-ed data. - */ -unsigned char *ssl_asn1_table_set(apr_hash_t *table, - const char *key, - long int length) +/* Decrypted private keys are cached to survive restarts. The cached + * data must have lifetime of the process (hence malloc/free rather + * than pools), and uses raw DER since the EVP_PKEY structure + * internals may not survive across a module reload. */ +ssl_asn1_t *ssl_asn1_table_set(apr_hash_t *table, const char *key, + EVP_PKEY *pkey) { apr_ssize_t klen = strlen(key); ssl_asn1_t *asn1 = apr_hash_get(table, key, klen); + apr_size_t length = i2d_PrivateKey(pkey, NULL); + unsigned char *p; - /* - * if a value for this key already exists, - * reuse as much of the already malloc-ed data - * as possible. - */ + /* Re-use structure if cached previously. */ if (asn1) { if (asn1->nData != length) { - free(asn1->cpData); /* XXX: realloc? */ - asn1->cpData = NULL; + asn1->cpData = ap_realloc(asn1->cpData, length); } } else { asn1 = ap_malloc(sizeof(*asn1)); asn1->source_mtime = 0; /* used as a note for encrypted private keys */ - asn1->cpData = NULL; - } - - asn1->nData = length; - if (!asn1->cpData) { asn1->cpData = ap_malloc(length); + + apr_hash_set(table, key, klen, asn1); } - apr_hash_set(table, key, klen, asn1); + asn1->nData = length; + p = asn1->cpData; + i2d_PrivateKey(pkey, &p); /* increases p by length */ - return asn1->cpData; /* caller will assign a value to this */ + return asn1; } ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, @@ -306,6 +298,7 @@ static struct CRYPTO_dynlock_value *ssl_dyn_create_function(const char *file, * away in the destruction callback. */ apr_pool_create(&p, dynlockpool); + apr_pool_tag(p, "modssl_dynlock_value"); ap_log_perror(file, line, APLOG_MODULE_INDEX, APLOG_TRACE1, 0, p, "Creating dynamic lock"); @@ -480,3 +473,13 @@ void ssl_util_thread_id_setup(apr_pool_t *p) } #endif /* #if APR_HAS_THREADS && MODSSL_USE_OPENSSL_PRE_1_1_API */ + +int modssl_is_engine_id(const char *name) +{ +#if MODSSL_HAVE_ENGINE_API + /* ### Can handle any other special ENGINE key names here? */ + return strncmp(name, "pkcs11:", 7) == 0; +#else + return 0; +#endif +} diff --git a/modules/ssl/ssl_util_ocsp.c b/modules/ssl/ssl_util_ocsp.c index b66e151..a202a72 100644 --- a/modules/ssl/ssl_util_ocsp.c +++ b/modules/ssl/ssl_util_ocsp.c @@ -46,6 +46,7 @@ static BIO *serialize_request(OCSP_REQUEST *req, const apr_uri_t *uri, BIO_printf(bio, "%s%s%s HTTP/1.0\r\n" "Host: %s:%d\r\n" "Content-Type: application/ocsp-request\r\n" + "Connection: close\r\n" "Content-Length: %d\r\n" "\r\n", uri->path ? uri->path : "/", @@ -369,8 +370,11 @@ static STACK_OF(X509) *modssl_read_ocsp_certificates(const char *file) while ((x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL)) != NULL) { if (!other_certs) { other_certs = sk_X509_new_null(); - if (!other_certs) + if (!other_certs) { + X509_free(x509); + BIO_free(bio); return NULL; + } } if (!sk_X509_push(other_certs, x509)) { diff --git a/modules/ssl/ssl_util_ssl.c b/modules/ssl/ssl_util_ssl.c index b7f0eca..44930b7 100644 --- a/modules/ssl/ssl_util_ssl.c +++ b/modules/ssl/ssl_util_ssl.c @@ -74,7 +74,7 @@ void modssl_set_app_data2(SSL *ssl, void *arg) ** _________________________________________________________________ */ -EVP_PKEY *modssl_read_privatekey(const char* filename, EVP_PKEY **key, pem_password_cb *cb, void *s) +EVP_PKEY *modssl_read_privatekey(const char *filename, pem_password_cb *cb, void *s) { EVP_PKEY *rc; BIO *bioS; @@ -83,7 +83,7 @@ EVP_PKEY *modssl_read_privatekey(const char* filename, EVP_PKEY **key, pem_passw /* 1. try PEM (= DER+Base64+headers) */ if ((bioS=BIO_new_file(filename, "r")) == NULL) return NULL; - rc = PEM_read_bio_PrivateKey(bioS, key, cb, s); + rc = PEM_read_bio_PrivateKey(bioS, NULL, cb, s); BIO_free(bioS); if (rc == NULL) { @@ -107,41 +107,9 @@ EVP_PKEY *modssl_read_privatekey(const char* filename, EVP_PKEY **key, pem_passw BIO_free(bioS); } } - if (rc != NULL && key != NULL) { - if (*key != NULL) - EVP_PKEY_free(*key); - *key = rc; - } return rc; } -typedef struct { - const char *pass; - int pass_len; -} pass_ctx; - -static int provide_pass(char *buf, int size, int rwflag, void *baton) -{ - pass_ctx *ctx = baton; - if (ctx->pass_len > 0) { - if (ctx->pass_len < size) { - size = (int)ctx->pass_len; - } - memcpy(buf, ctx->pass, size); - } - return ctx->pass_len; -} - -EVP_PKEY *modssl_read_encrypted_pkey(const char *filename, EVP_PKEY **key, - const char *pass, apr_size_t pass_len) -{ - pass_ctx ctx; - - ctx.pass = pass; - ctx.pass_len = pass_len; - return modssl_read_privatekey(filename, key, provide_pass, &ctx); -} - /* _________________________________________________________________ ** ** Smart shutdown @@ -156,7 +124,7 @@ int modssl_smart_shutdown(SSL *ssl) /* * Repeat the calls, because SSL_shutdown internally dispatches through a - * little state machine. Usually only one or two interation should be + * little state machine. Usually only one or two iterations should be * needed, so we restrict the total number of restrictions in order to * avoid process hangs in case the client played bad with the socket * connection and OpenSSL cannot recognize it. @@ -166,7 +134,7 @@ int modssl_smart_shutdown(SSL *ssl) for (i = 0; i < 4 /* max 2x pending + 2x data = 4 */; i++) { rc = SSL_shutdown(ssl); if (rc >= 0 && flush && (SSL_get_shutdown(ssl) & SSL_SENT_SHUTDOWN)) { - /* Once the close notity is sent through the output filters, + /* Once the close notify is sent through the output filters, * ensure it is flushed through the socket. */ if (BIO_flush(SSL_get_wbio(ssl)) <= 0) { @@ -217,14 +185,27 @@ BOOL modssl_X509_getBC(X509 *cert, int *ca, int *pathlen) return TRUE; } +char *modssl_bio_free_read(apr_pool_t *p, BIO *bio) +{ + int len = BIO_pending(bio); + char *result = NULL; + + if (len > 0) { + result = apr_palloc(p, len+1); + len = BIO_read(bio, result, len); + result[len] = NUL; + } + BIO_free(bio); + return result; +} + /* Convert ASN.1 string to a pool-allocated char * string, escaping * control characters. If raw is zero, convert to UTF-8, otherwise * unchanged from the character set. */ static char *asn1_string_convert(apr_pool_t *p, ASN1_STRING *asn1str, int raw) { - char *result = NULL; BIO *bio; - int len, flags = ASN1_STRFLGS_ESC_CTRL; + int flags = ASN1_STRFLGS_ESC_CTRL; if ((bio = BIO_new(BIO_s_mem())) == NULL) return NULL; @@ -232,14 +213,8 @@ static char *asn1_string_convert(apr_pool_t *p, ASN1_STRING *asn1str, int raw) if (!raw) flags |= ASN1_STRFLGS_UTF8_CONVERT; ASN1_STRING_print_ex(bio, asn1str, flags); - len = BIO_pending(bio); - if (len > 0) { - result = apr_palloc(p, len+1); - len = BIO_read(bio, result, len); - result[len] = NUL; - } - BIO_free(bio); - return result; + + return modssl_bio_free_read(p, bio); } #define asn1_string_to_utf8(p, a) asn1_string_convert(p, a, 0) @@ -489,29 +464,52 @@ BOOL modssl_X509_match_name(apr_pool_t *p, X509 *x509, const char *name, ** _________________________________________________________________ */ -DH *ssl_dh_GetParamFromFile(const char *file) +#if OPENSSL_VERSION_NUMBER < 0x30000000L +DH *modssl_dh_from_file(const char *file) { - DH *dh = NULL; + DH *dh; BIO *bio; if ((bio = BIO_new_file(file, "r")) == NULL) return NULL; dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); BIO_free(bio); - return (dh); + + return dh; +} +#else +EVP_PKEY *modssl_dh_pkey_from_file(const char *file) +{ + EVP_PKEY *pkey; + BIO *bio; + + if ((bio = BIO_new_file(file, "r")) == NULL) + return NULL; + pkey = PEM_read_bio_Parameters(bio, NULL); + BIO_free(bio); + + return pkey; } +#endif #ifdef HAVE_ECC -EC_GROUP *ssl_ec_GetParamFromFile(const char *file) +EC_GROUP *modssl_ec_group_from_file(const char *file) { - EC_GROUP *group = NULL; + EC_GROUP *group; BIO *bio; if ((bio = BIO_new_file(file, "r")) == NULL) return NULL; +#if OPENSSL_VERSION_NUMBER < 0x30000000L group = PEM_read_bio_ECPKParameters(bio, NULL, NULL, NULL); +#else + group = PEM_ASN1_read_bio((void *)d2i_ECPKParameters, + PEM_STRING_ECPARAMETERS, bio, + NULL, NULL, NULL); +#endif BIO_free(bio); - return (group); + + return group; } #endif @@ -536,3 +534,81 @@ char *modssl_SSL_SESSION_id2sz(IDCONST unsigned char *id, int idlen, return str; } + +/* _________________________________________________________________ +** +** Certificate/Key Stuff +** _________________________________________________________________ +*/ + +apr_status_t modssl_read_cert(apr_pool_t *p, + const char *cert_pem, const char *key_pem, + pem_password_cb *cb, void *ud, + X509 **pcert, EVP_PKEY **pkey) +{ + BIO *in; + X509 *x = NULL; + EVP_PKEY *key = NULL; + apr_status_t rv = APR_SUCCESS; + + in = BIO_new_mem_buf(cert_pem, -1); + if (in == NULL) { + rv = APR_ENOMEM; + goto cleanup; + } + + x = PEM_read_bio_X509(in, NULL, cb, ud); + if (x == NULL) { + rv = APR_ENOENT; + goto cleanup; + } + + BIO_free(in); + in = BIO_new_mem_buf(key_pem? key_pem : cert_pem, -1); + if (in == NULL) { + rv = APR_ENOMEM; + goto cleanup; + } + key = PEM_read_bio_PrivateKey(in, NULL, cb, ud); + if (key == NULL) { + rv = APR_ENOENT; + goto cleanup; + } + +cleanup: + if (rv == APR_SUCCESS) { + *pcert = x; + *pkey = key; + } + else { + *pcert = NULL; + *pkey = NULL; + if (x) X509_free(x); + if (key) EVP_PKEY_free(key); + } + if (in != NULL) BIO_free(in); + return rv; +} + +apr_status_t modssl_cert_get_pem(apr_pool_t *p, + X509 *cert1, X509 *cert2, + const char **ppem) +{ + apr_status_t rv = APR_ENOMEM; + BIO *bio; + + if ((bio = BIO_new(BIO_s_mem())) == NULL) goto cleanup; + if (PEM_write_bio_X509(bio, cert1) != 1) goto cleanup; + if (cert2 && PEM_write_bio_X509(bio, cert2) != 1) goto cleanup; + rv = APR_SUCCESS; + +cleanup: + if (rv != APR_SUCCESS) { + *ppem = NULL; + if (bio) BIO_free(bio); + } + else { + *ppem = modssl_bio_free_read(p, bio); + } + return rv; +} diff --git a/modules/ssl/ssl_util_ssl.h b/modules/ssl/ssl_util_ssl.h index c67dacf..443c1b7 100644 --- a/modules/ssl/ssl_util_ssl.h +++ b/modules/ssl/ssl_util_ssl.h @@ -64,8 +64,11 @@ void modssl_init_app_data2_idx(void); void *modssl_get_app_data2(SSL *); void modssl_set_app_data2(SSL *, void *); -EVP_PKEY *modssl_read_privatekey(const char *, EVP_PKEY **, pem_password_cb *, void *); -EVP_PKEY *modssl_read_encrypted_pkey(const char *, EVP_PKEY **, const char *, apr_size_t); + +/* Read private key from filename in either PEM or raw base64(DER) + * format, using password entry callback cb and userdata. */ +EVP_PKEY *modssl_read_privatekey(const char *filename, pem_password_cb *cb, void *ud); + int modssl_smart_shutdown(SSL *ssl); BOOL modssl_X509_getBC(X509 *, int *, int *); char *modssl_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne, @@ -75,6 +78,30 @@ BOOL modssl_X509_getSAN(apr_pool_t *, X509 *, int, const char *, int, apr BOOL modssl_X509_match_name(apr_pool_t *, X509 *, const char *, BOOL, server_rec *); char *modssl_SSL_SESSION_id2sz(IDCONST unsigned char *, int, char *, int); +/* Reads the remaining data in BIO, if not empty, and copies it into a + * pool-allocated string. If empty, returns NULL. BIO_free(bio) is + * called for both cases. */ +char *modssl_bio_free_read(apr_pool_t *p, BIO *bio); + +/* Read a single certificate and its private key from the given string in PEM format. + * If `key_pem` is NULL, it will expect the key in `cert_pem`. + */ +apr_status_t modssl_read_cert(apr_pool_t *p, + const char *cert_pem, const char *key_pem, + pem_password_cb *cb, void *ud, + X509 **pcert, EVP_PKEY **pkey); + +/* Convert a certificate (and optionally a second) into a PEM string. + * @param p pool for allocations + * @param cert1 the certificate to convert + * @param cert2 a second cert to add to the PEM afterwards or NULL. + * @param ppem the certificate(s) in PEM format, NUL-terminated. + * @return APR_SUCCESS if ppem is valid. + */ +apr_status_t modssl_cert_get_pem(apr_pool_t *p, + X509 *cert1, X509 *cert2, + const char **ppem); + #endif /* __SSL_UTIL_SSL_H__ */ /** @} */ diff --git a/modules/ssl/ssl_util_stapling.c b/modules/ssl/ssl_util_stapling.c index c3e2cfa..563de55 100644 --- a/modules/ssl/ssl_util_stapling.c +++ b/modules/ssl/ssl_util_stapling.c @@ -29,16 +29,32 @@ -- Alexei Sayle */ #include "ssl_private.h" + #include "ap_mpm.h" #include "apr_thread_mutex.h" +APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, init_stapling_status, + (server_rec *s, apr_pool_t *p, + X509 *cert, X509 *issuer), + (s, p, cert, issuer), + DECLINED, DECLINED) + +APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, get_stapling_status, + (unsigned char **pder, int *pderlen, + conn_rec *c, server_rec *s, X509 *cert), + (pder, pderlen, c, s, cert), + DECLINED, DECLINED) + + #ifdef HAVE_OCSP_STAPLING static int stapling_cache_mutex_on(server_rec *s); static int stapling_cache_mutex_off(server_rec *s); +static int stapling_cb(SSL *ssl, void *arg); + /** - * Maxiumum OCSP stapling response size. This should be the response for a + * Maximum OCSP stapling response size. This should be the response for a * single certificate and will typically include the responder certificate chain * so 10K should be more than enough. * @@ -101,8 +117,10 @@ static X509 *stapling_get_issuer(modssl_ctx_t *mctx, X509 *x) } inctx = X509_STORE_CTX_new(); - if (!X509_STORE_CTX_init(inctx, st, NULL, NULL)) + if (!X509_STORE_CTX_init(inctx, st, NULL, NULL)) { + X509_STORE_CTX_free(inctx); return 0; + } if (X509_STORE_CTX_get1_issuer(&issuer, inctx, x) <= 0) issuer = NULL; X509_STORE_CTX_cleanup(inctx); @@ -118,10 +136,51 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, X509 *issuer = NULL; OCSP_CERTID *cid = NULL; STACK_OF(OPENSSL_STRING) *aia = NULL; + const char *pem = NULL; + int rv = 1; /* until further notice */ - if ((x == NULL) || (X509_digest(x, EVP_sha1(), idx, NULL) != 1)) + if (x == NULL) return 0; + if (!(issuer = stapling_get_issuer(mctx, x))) { + /* In Apache pre 2.4.40, we use to come here only when mod_ssl stapling + * was enabled. With the new hooks, we give other modules the chance + * to provide stapling status. However, we do not want to log ssl errors + * where we did not do so in the past. */ + if (mctx->stapling_enabled == TRUE) { + ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02217) + "ssl_stapling_init_cert: can't retrieve issuer " + "certificate!"); + return 0; + } + return 1; + } + + if (X509_digest(x, EVP_sha1(), idx, NULL) != 1) { + rv = 0; + goto cleanup; + } + + if (modssl_cert_get_pem(ptemp, x, issuer, &pem) != APR_SUCCESS) { + rv = 0; + goto cleanup; + } + + if (ap_ssl_ocsp_prime(s, p, (const char*)idx, sizeof(idx), pem) == APR_SUCCESS + || ssl_run_init_stapling_status(s, p, x, issuer) == OK) { + /* Someone's taken over or mod_ssl's own implementation is not enabled */ + if (mctx->stapling_enabled != TRUE) { + SSL_CTX_set_tlsext_status_cb(mctx->ssl_ctx, stapling_cb); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10177) "OCSP stapling added via hook"); + } + goto cleanup; + } + + if (mctx->stapling_enabled != TRUE) { + /* mod_ssl's own implementation is not enabled */ + goto cleanup; + } + cinf = apr_hash_get(stapling_certinfo, idx, sizeof(idx)); if (cinf) { /* @@ -134,25 +193,18 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, APLOGNO(02814) "ssl_stapling_init_cert: no OCSP URI " "in certificate and no SSLStaplingForceURL " "configured for server %s", mctx->sc->vhost_id); - return 0; + rv = 0; } - return 1; - } - - if (!(issuer = stapling_get_issuer(mctx, x))) { - ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02217) - "ssl_stapling_init_cert: can't retrieve issuer " - "certificate!"); - return 0; + goto cleanup; } cid = OCSP_cert_to_id(NULL, x, issuer); - X509_free(issuer); if (!cid) { ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02815) "ssl_stapling_init_cert: can't create CertID " "for OCSP request"); - return 0; + rv = 0; + goto cleanup; } aia = X509_get1_ocsp(x); @@ -161,7 +213,8 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02218) "ssl_stapling_init_cert: no OCSP URI " "in certificate and no SSLStaplingForceURL set"); - return 0; + rv = 0; + goto cleanup; } /* At this point, we have determined that there's something to store */ @@ -183,19 +236,16 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, apr_hash_set(stapling_certinfo, cinf->idx, sizeof(cinf->idx), cinf); - return 1; +cleanup: + X509_free(issuer); + return rv; } -static certinfo *stapling_get_certinfo(server_rec *s, modssl_ctx_t *mctx, - SSL *ssl) +static certinfo *stapling_get_certinfo(server_rec *s, UCHAR *idx, apr_size_t idx_len, + modssl_ctx_t *mctx, SSL *ssl) { certinfo *cinf; - X509 *x; - UCHAR idx[SHA_DIGEST_LENGTH]; - x = SSL_get_certificate(ssl); - if ((x == NULL) || (X509_digest(x, EVP_sha1(), idx, NULL) != 1)) - return NULL; - cinf = apr_hash_get(stapling_certinfo, idx, sizeof(idx)); + cinf = apr_hash_get(stapling_certinfo, idx, idx_len); if (cinf && cinf->cid) return cinf; ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(01926) @@ -397,7 +447,7 @@ static int stapling_check_response(server_rec *s, modssl_ctx_t *mctx, rv = SSL_TLSEXT_ERR_NOACK; } - if (status != V_OCSP_CERTSTATUS_GOOD) { + if (status != V_OCSP_CERTSTATUS_GOOD && pok) { char snum[MAX_STRING_LEN] = { '\0' }; BIO *bio = BIO_new(BIO_s_mem()); @@ -418,12 +468,6 @@ static int stapling_check_response(server_rec *s, modssl_ctx_t *mctx, (reason != OCSP_REVOKED_STATUS_NOSTATUS) ? OCSP_crl_reason_str(reason) : "n/a", snum[0] ? snum : "[n/a]"); - - if (mctx->stapling_return_errors == FALSE) { - if (pok) - *pok = FALSE; - rv = SSL_TLSEXT_ERR_NOACK; - } } } @@ -482,6 +526,7 @@ static BOOL stapling_renew_response(server_rec *s, modssl_ctx_t *mctx, SSL *ssl, /* Create a temporary pool to constrain memory use */ apr_pool_create(&vpool, conn->pool); + apr_pool_tag(vpool, "modssl_stapling_renew"); if (apr_uri_parse(vpool, ocspuri, &uri) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01939) @@ -732,6 +777,23 @@ static int get_and_check_cached_response(server_rec *s, modssl_ctx_t *mctx, return 0; } +typedef struct { + unsigned char *data; + apr_size_t len; +} ocsp_resp; + +static void copy_ocsp_resp(const unsigned char *der, apr_size_t der_len, void *userdata) +{ + ocsp_resp *resp = userdata; + + resp->len = 0; + resp->data = der? OPENSSL_malloc(der_len) : NULL; + if (resp->data) { + memcpy(resp->data, der, der_len); + resp->len = der_len; + } +} + /* Certificate Status callback. This is called when a client includes a * certificate status request extension. * @@ -744,24 +806,53 @@ static int stapling_cb(SSL *ssl, void *arg) conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(conn); SSLSrvConfigRec *sc = mySrvConfig(s); - SSLConnRec *sslconn = myConnConfig(conn); - modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); + modssl_ctx_t *mctx = myConnCtxConfig(conn, sc); + UCHAR idx[SHA_DIGEST_LENGTH]; + ocsp_resp resp; certinfo *cinf = NULL; OCSP_RESPONSE *rsp = NULL; int rv; BOOL ok = TRUE; + X509 *x; + int rspderlen, provided = 0; + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01951) + "stapling_cb: OCSP Stapling callback called"); + + x = SSL_get_certificate(ssl); + if (x == NULL) { + return SSL_TLSEXT_ERR_NOACK; + } + + if (X509_digest(x, EVP_sha1(), idx, NULL) != 1) { + return SSL_TLSEXT_ERR_NOACK; + } + + if (ap_ssl_ocsp_get_resp(s, conn, (const char*)idx, sizeof(idx), + copy_ocsp_resp, &resp) == APR_SUCCESS) { + provided = 1; + } + else if (ssl_run_get_stapling_status(&resp.data, &rspderlen, conn, s, x) == APR_SUCCESS) { + resp.len = (apr_size_t)rspderlen; + provided = 1; + } + if (provided) { + /* a hook handles stapling for this certificate and determines the response */ + if (resp.data == NULL || resp.len == 0) { + return SSL_TLSEXT_ERR_NOACK; + } + SSL_set_tlsext_status_ocsp_resp(ssl, resp.data, (int)resp.len); + return SSL_TLSEXT_ERR_OK; + } + if (sc->server->stapling_enabled != TRUE) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01950) "stapling_cb: OCSP Stapling disabled"); return SSL_TLSEXT_ERR_NOACK; } - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01951) - "stapling_cb: OCSP Stapling callback called"); - - cinf = stapling_get_certinfo(s, mctx, ssl); - if (cinf == NULL) { + if ((cinf = stapling_get_certinfo(s, idx, sizeof(idx), mctx, ssl)) == NULL) { return SSL_TLSEXT_ERR_NOACK; } @@ -818,15 +909,21 @@ static int stapling_cb(SSL *ssl, void *arg) if (rsp && ((ok == TRUE) || (mctx->stapling_return_errors == TRUE))) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01956) "stapling_cb: setting response"); - if (!stapling_set_response(ssl, rsp)) - return SSL_TLSEXT_ERR_ALERT_FATAL; - return SSL_TLSEXT_ERR_OK; + if (!stapling_set_response(ssl, rsp)) { + rv = SSL_TLSEXT_ERR_ALERT_FATAL; + } + else { + rv = SSL_TLSEXT_ERR_OK; + } } - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01957) - "stapling_cb: no suitable response available"); - - return SSL_TLSEXT_ERR_NOACK; + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01957) + "stapling_cb: no suitable response available"); + rv = SSL_TLSEXT_ERR_NOACK; + } + OCSP_RESPONSE_free(rsp); /* NULL safe */ + return rv; } apr_status_t modssl_init_stapling(server_rec *s, apr_pool_t *p, @@ -864,9 +961,10 @@ apr_status_t modssl_init_stapling(server_rec *s, apr_pool_t *p, if (mctx->stapling_responder_timeout == UNSET) { mctx->stapling_responder_timeout = 10 * APR_USEC_PER_SEC; } + SSL_CTX_set_tlsext_status_cb(ctx, stapling_cb); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01960) "OCSP stapling initialized"); - + return APR_SUCCESS; } diff --git a/modules/test/mod_dialup.c b/modules/test/mod_dialup.c index 330c7c3..d018d9a 100644 --- a/modules/test/mod_dialup.c +++ b/modules/test/mod_dialup.c @@ -171,7 +171,7 @@ dialup_handler(request_rec *r) /* copied from default handler: */ ap_update_mtime(r, r->finfo.mtime); ap_set_last_modified(r); - ap_set_etag(r); + ap_set_etag_fd(r, fd); ap_set_accept_ranges(r); ap_set_content_length(r, r->finfo.size); @@ -269,7 +269,7 @@ cmd_modem_standard(cmd_parms *cmd, } if (dcfg->bytes_per_second == 0) { - return "mod_diaulup: Unkonwn Modem Standard specified."; + return "mod_dialup: Unknown Modem Standard specified."; } return NULL; diff --git a/modules/test/mod_optional_hook_import.c b/modules/test/mod_optional_hook_import.c index 12da318..f921d64 100644 --- a/modules/test/mod_optional_hook_import.c +++ b/modules/test/mod_optional_hook_import.c @@ -21,8 +21,8 @@ static int ImportOptionalHookTestHook(const char *szStr) { - ap_log_error(APLOG_MARK,APLOG_ERR,OK,NULL, APLOGNO(01866)"Optional hook test said: %s", - szStr); + ap_log_error(APLOG_MARK,APLOG_DEBUG,OK,NULL, APLOGNO(01866) + "Optional hook test said: %s", szStr); return OK; } diff --git a/modules/tls/Makefile.in b/modules/tls/Makefile.in new file mode 100644 index 0000000..4395bc3 --- /dev/null +++ b/modules/tls/Makefile.in @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# standard stuff +# + +include $(top_srcdir)/build/special.mk diff --git a/modules/tls/config2.m4 b/modules/tls/config2.m4 new file mode 100644 index 0000000..8a32490 --- /dev/null +++ b/modules/tls/config2.m4 @@ -0,0 +1,173 @@ +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl # start of module specific part +APACHE_MODPATH_INIT(tls) + +dnl # list of module object files +tls_objs="dnl +mod_tls.lo dnl +tls_cache.lo dnl +tls_cert.lo dnl +tls_conf.lo dnl +tls_core.lo dnl +tls_filter.lo dnl +tls_ocsp.lo dnl +tls_proto.lo dnl +tls_util.lo dnl +tls_var.lo dnl +" + +dnl +dnl APACHE_CHECK_TLS +dnl +dnl Configure for rustls, giving preference to +dnl "--with-rustls=" if it was specified. +dnl +AC_DEFUN([APACHE_CHECK_RUSTLS],[ + AC_CACHE_CHECK([for rustls], [ac_cv_rustls], [ + dnl initialise the variables we use + ac_cv_rustls=no + ap_rustls_found="" + ap_rustls_base="" + ap_rustls_libs="" + + dnl Determine the rustls base directory, if any + AC_MSG_CHECKING([for user-provided rustls base directory]) + AC_ARG_WITH(rustls, APACHE_HELP_STRING(--with-rustls=PATH, rustls installation directory), [ + dnl If --with-rustls specifies a directory, we use that directory + if test "x$withval" != "xyes" -a "x$withval" != "x"; then + dnl This ensures $withval is actually a directory and that it is absolute + ap_rustls_base="`cd $withval ; pwd`" + fi + ]) + if test "x$ap_rustls_base" = "x"; then + AC_MSG_RESULT(none) + else + AC_MSG_RESULT($ap_rustls_base) + fi + + dnl Run header and version checks + saved_CPPFLAGS="$CPPFLAGS" + saved_LIBS="$LIBS" + saved_LDFLAGS="$LDFLAGS" + + dnl Before doing anything else, load in pkg-config variables + if test -n "$PKGCONFIG"; then + saved_PKG_CONFIG_PATH="$PKG_CONFIG_PATH" + AC_MSG_CHECKING([for pkg-config along $PKG_CONFIG_PATH]) + if test "x$ap_rustls_base" != "x" ; then + if test -f "${ap_rustls_base}/lib/pkgconfig/librustls.pc"; then + dnl Ensure that the given path is used by pkg-config too, otherwise + dnl the system librustls.pc might be picked up instead. + PKG_CONFIG_PATH="${ap_rustls_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}" + export PKG_CONFIG_PATH + elif test -f "${ap_rustls_base}/lib64/pkgconfig/librustls.pc"; then + dnl Ensure that the given path is used by pkg-config too, otherwise + dnl the system librustls.pc might be picked up instead. + PKG_CONFIG_PATH="${ap_rustls_base}/lib64/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}" + export PKG_CONFIG_PATH + fi + fi + ap_rustls_libs="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-l --silence-errors librustls`" + if test $? -eq 0; then + ap_rustls_found="yes" + pkglookup="`$PKGCONFIG --cflags-only-I librustls`" + APR_ADDTO(CPPFLAGS, [$pkglookup]) + APR_ADDTO(MOD_CFLAGS, [$pkglookup]) + pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-L librustls`" + APR_ADDTO(LDFLAGS, [$pkglookup]) + APR_ADDTO(MOD_LDFLAGS, [$pkglookup]) + pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-other librustls`" + APR_ADDTO(LDFLAGS, [$pkglookup]) + APR_ADDTO(MOD_LDFLAGS, [$pkglookup]) + fi + PKG_CONFIG_PATH="$saved_PKG_CONFIG_PATH" + fi + + dnl fall back to the user-supplied directory if not found via pkg-config + if test "x$ap_rustls_base" != "x" -a "x$ap_rustls_found" = "x"; then + APR_ADDTO(CPPFLAGS, [-I$ap_rustls_base/include]) + APR_ADDTO(MOD_CFLAGS, [-I$ap_rustls_base/include]) + APR_ADDTO(LDFLAGS, [-L$ap_rustls_base/lib]) + APR_ADDTO(MOD_LDFLAGS, [-L$ap_rustls_base/lib]) + if test "x$ap_platform_runtime_link_flag" != "x"; then + APR_ADDTO(LDFLAGS, [$ap_platform_runtime_link_flag$ap_rustls_base/lib]) + APR_ADDTO(MOD_LDFLAGS, [$ap_platform_runtime_link_flag$ap_rustls_base/lib]) + fi + fi + + AC_MSG_CHECKING([for rustls version >= 0.9.2]) + AC_TRY_COMPILE([#include ],[ +rustls_version(); +rustls_acceptor_new(); +], + [AC_MSG_RESULT(OK) + ac_cv_rustls=yes], + [AC_MSG_RESULT(FAILED)]) + + dnl restore + CPPFLAGS="$saved_CPPFLAGS" + LIBS="$saved_LIBS" + LDFLAGS="$saved_LDFLAGS" + ]) + if test "x$ac_cv_rustls" = "xyes"; then + AC_DEFINE(HAVE_RUSTLS, 1, [Define if rustls is available]) + fi +]) + + +dnl # hook module into the Autoconf mechanism (--enable-http2) +APACHE_MODULE(tls, [TLS protocol handling using rustls. Implemented by mod_tls. +This module requires a librustls installation. +See --with-rustls on how to manage non-standard locations. This module +is usually linked shared and requires loading. ], $tls_objs, , most, [ + APACHE_CHECK_RUSTLS + if test "$ac_cv_rustls" = "yes" ; then + if test "x$enable_tls" = "xshared"; then + case `uname` in + "Darwin") + MOD_TLS_LINK_LIBS="-lrustls -framework Security -framework Foundation" + ;; + *) + MOD_TLS_LINK_LIBS="-lrustls" + ;; + esac + + # Some rustls versions need an extra -lm when linked + # See https://github.com/rustls/rustls-ffi/issues/133 + rustls_version=`rustc --version` + case "$rustls_version" in + *1.55*) need_lm="yes" ;; + *1.56*) need_lm="yes" ;; + *1.57*) need_lm="yes" ;; + esac + if test "$need_lm" = "yes" ; then + MOD_TLS_LINK_LIBS="$MOD_TLS_LINK_LIBS -lm" + fi + + # The only symbol which needs to be exported is the module + # structure, so ask libtool to hide everything else: + APR_ADDTO(MOD_TLS_LDADD, [$MOD_TLS_LINK_LIBS -export-symbols-regex tls_module]) + fi + else + enable_tls=no + fi +]) + + +dnl # end of module specific part +APACHE_MODPATH_FINISH + diff --git a/modules/tls/mod_tls.c b/modules/tls/mod_tls.c new file mode 100644 index 0000000..9d79521 --- /dev/null +++ b/modules/tls/mod_tls.c @@ -0,0 +1,288 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mod_tls.h" +#include "tls_conf.h" +#include "tls_core.h" +#include "tls_cache.h" +#include "tls_proto.h" +#include "tls_filter.h" +#include "tls_var.h" +#include "tls_version.h" + +#include "mod_proxy.h" + +static void tls_hooks(apr_pool_t *pool); + +AP_DECLARE_MODULE(tls) = { + STANDARD20_MODULE_STUFF, + tls_conf_create_dir, /* create per dir config */ + tls_conf_merge_dir, /* merge per dir config */ + tls_conf_create_svr, /* create per server config */ + tls_conf_merge_svr, /* merge per server config (inheritance) */ + tls_conf_cmds, /* command handlers */ + tls_hooks, +#if defined(AP_MODULE_FLAG_NONE) + AP_MODULE_FLAG_ALWAYS_MERGE +#endif +}; + +static const char* crustls_version(apr_pool_t *p) +{ + struct rustls_str rversion; + + rversion = rustls_version(); + return apr_pstrndup(p, rversion.data, rversion.len); +} + +static int tls_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) +{ + tls_proto_pre_config(pconf, ptemp); + tls_cache_pre_config(pconf, plog, ptemp); + return OK; +} + +static apr_status_t tls_post_config(apr_pool_t *p, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *s) +{ + const char *tls_init_key = "mod_tls_init_counter"; + tls_conf_server_t *sc; + void *data = NULL; + + (void)plog; + sc = tls_conf_server_get(s); + assert(sc); + assert(sc->global); + sc->global->module_version = "mod_tls/" MOD_TLS_VERSION; + sc->global->crustls_version = crustls_version(p); + + apr_pool_userdata_get(&data, tls_init_key, s->process->pool); + if (data == NULL) { + /* At the first start, httpd makes a config check dry run + * to see if the config is ok in principle. + */ + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "post config dry run"); + apr_pool_userdata_set((const void *)1, tls_init_key, + apr_pool_cleanup_null, s->process->pool); + } + else { + ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(10365) + "%s (%s), initializing...", + sc->global->module_version, + sc->global->crustls_version); + } + + return tls_core_init(p, ptemp, s); +} + +static apr_status_t tls_post_proxy_config( + apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) +{ + tls_conf_server_t *sc = tls_conf_server_get(s); + (void)plog; + sc->global->mod_proxy_post_config_done = 1; + return tls_core_init(p, ptemp, s); +} + +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 109) +static int tls_ssl_outgoing(conn_rec *c, ap_conf_vector_t *dir_conf, int enable_ssl) +{ + /* we are not handling proxy connections - for now */ + tls_core_conn_bind(c, dir_conf); + if (enable_ssl && tls_core_setup_outgoing(c) == OK) { + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, c->base_server, + "accepted ssl_bind_outgoing(enable=%d) for %s", + enable_ssl, c->base_server->server_hostname); + return OK; + } + tls_core_conn_disable(c); + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, c->base_server, + "declined ssl_bind_outgoing(enable=%d) for %s", + enable_ssl, c->base_server->server_hostname); + return DECLINED; +} + +#else /* #if AP_MODULE_MAGIC_AT_LEAST(20120211, 109) */ + +APR_DECLARE_OPTIONAL_FN(int, ssl_proxy_enable, (conn_rec *)); +APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *)); +APR_DECLARE_OPTIONAL_FN(int, ssl_engine_set, (conn_rec *, + ap_conf_vector_t *, + int proxy, int enable)); +static APR_OPTIONAL_FN_TYPE(ssl_engine_set) *module_ssl_engine_set; + +static int ssl_engine_set( + conn_rec *c, ap_conf_vector_t *dir_conf, int proxy, int enable) +{ + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, c->base_server, + "ssl_engine_set(proxy=%d, enable=%d) for %s", + proxy, enable, c->base_server->server_hostname); + tls_core_conn_bind(c, dir_conf); + if (enable && tls_core_setup_outgoing(c) == OK) { + if (module_ssl_engine_set) { + module_ssl_engine_set(c, dir_conf, proxy, 0); + } + return 1; + } + if (proxy || !enable) { + /* we are not handling proxy connections - for now */ + tls_core_conn_disable(c); + } + if (module_ssl_engine_set) { + return module_ssl_engine_set(c, dir_conf, proxy, enable); + } + return 0; +} + +static int ssl_proxy_enable(conn_rec *c) +{ + return ssl_engine_set(c, NULL, 1, 1); +} + +static int ssl_engine_disable(conn_rec *c) +{ + return ssl_engine_set(c, NULL, 0, 0); +} + +static apr_status_t tls_post_config_proxy_ssl( + apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) +{ + if (1) { + const char *tls_init_key = "mod_tls_proxy_ssl_counter"; + void *data = NULL; + APR_OPTIONAL_FN_TYPE(ssl_engine_set) *fn_ssl_engine_set; + + (void)p; + (void)plog; + (void)ptemp; + apr_pool_userdata_get(&data, tls_init_key, s->process->pool); + if (data == NULL) { + /* At the first start, httpd makes a config check dry run + * to see if the config is ok in principle. + */ + apr_pool_userdata_set((const void *)1, tls_init_key, + apr_pool_cleanup_null, s->process->pool); + return APR_SUCCESS; + } + + /* mod_ssl (if so loaded, has registered its optional functions. + * When mod_proxy runs in post-config, it looks up those functions and uses + * them to manipulate SSL status for backend connections. + * We provide our own implementations to avoid becoming active on such + * connections for now. + * */ + fn_ssl_engine_set = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_set); + module_ssl_engine_set = (fn_ssl_engine_set + && fn_ssl_engine_set != ssl_engine_set)? fn_ssl_engine_set : NULL; + APR_REGISTER_OPTIONAL_FN(ssl_engine_set); + APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable); + APR_REGISTER_OPTIONAL_FN(ssl_engine_disable); + } + return APR_SUCCESS; +} +#endif /* #if AP_MODULE_MAGIC_AT_LEAST(20120211, 109) */ + +static void tls_init_child(apr_pool_t *p, server_rec *s) +{ + tls_cache_init_child(p, s); +} + +static int hook_pre_connection(conn_rec *c, void *csd) +{ + (void)csd; /* mpm specific socket data, not used */ + + /* are we on a primary connection? */ + if (c->master) return DECLINED; + + /* Decide connection TLS stats and install our + * input/output filters for handling TLS/application data + * if enabled. + */ + return tls_filter_pre_conn_init(c); +} + +static int hook_connection(conn_rec* c) +{ + tls_filter_conn_init(c); + /* we do *not* take over. we are not processing requests. */ + return DECLINED; +} + +static const char *tls_hook_http_scheme(const request_rec *r) +{ + return (tls_conn_check_ssl(r->connection) == OK)? "https" : NULL; +} + +static apr_port_t tls_hook_default_port(const request_rec *r) +{ + return (tls_conn_check_ssl(r->connection) == OK) ? 443 : 0; +} + +static const char* const mod_http2[] = { "mod_http2.c", NULL}; + +static void tls_hooks(apr_pool_t *pool) +{ + /* If our request check denies further processing, certain things + * need to be in place for the response to be correctly generated. */ + static const char *dep_req_check[] = { "mod_setenvif.c", NULL }; + static const char *dep_proxy[] = { "mod_proxy.c", NULL }; + + ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks"); + tls_filter_register(pool); + + ap_hook_pre_config(tls_pre_config, NULL,NULL, APR_HOOK_MIDDLE); + /* run post-config hooks one before, one after mod_proxy, as the + * mod_proxy's own one calls us in its "section_post_config" hook. */ + ap_hook_post_config(tls_post_config, NULL, dep_proxy, APR_HOOK_MIDDLE); + APR_OPTIONAL_HOOK(proxy, section_post_config, + tls_proxy_section_post_config, NULL, NULL, + APR_HOOK_MIDDLE); + ap_hook_post_config(tls_post_proxy_config, dep_proxy, NULL, APR_HOOK_MIDDLE); + ap_hook_child_init(tls_init_child, NULL,NULL, APR_HOOK_MIDDLE); + /* connection things */ + ap_hook_pre_connection(hook_pre_connection, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_process_connection(hook_connection, NULL, mod_http2, APR_HOOK_MIDDLE); + /* request things */ + ap_hook_default_port(tls_hook_default_port, NULL,NULL, APR_HOOK_MIDDLE); + ap_hook_http_scheme(tls_hook_http_scheme, NULL,NULL, APR_HOOK_MIDDLE); + ap_hook_post_read_request(tls_core_request_check, dep_req_check, NULL, APR_HOOK_MIDDLE); + ap_hook_fixups(tls_var_request_fixup, NULL,NULL, APR_HOOK_MIDDLE); + + ap_hook_ssl_conn_is_ssl(tls_conn_check_ssl, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_ssl_var_lookup(tls_var_lookup, NULL, NULL, APR_HOOK_MIDDLE); + +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 109) + ap_hook_ssl_bind_outgoing(tls_ssl_outgoing, NULL, NULL, APR_HOOK_MIDDLE); +#else + ap_hook_post_config(tls_post_config_proxy_ssl, NULL, dep_proxy, APR_HOOK_MIDDLE); +#endif + +} diff --git a/modules/tls/mod_tls.h b/modules/tls/mod_tls.h new file mode 100644 index 0000000..db7dc41 --- /dev/null +++ b/modules/tls/mod_tls.h @@ -0,0 +1,19 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef mod_tls_h +#define mod_tls_h + +#endif /* mod_tls_h */ \ No newline at end of file diff --git a/modules/tls/tls_cache.c b/modules/tls/tls_cache.c new file mode 100644 index 0000000..de4be18 --- /dev/null +++ b/modules/tls/tls_cache.c @@ -0,0 +1,310 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "tls_conf.h" +#include "tls_core.h" +#include "tls_cache.h" + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + +#define TLS_CACHE_DEF_PROVIDER "shmcb" +#define TLS_CACHE_DEF_DIR "tls" +#define TLS_CACHE_DEF_FILE "session_cache" +#define TLS_CACHE_DEF_SIZE 512000 + +static const char *cache_provider_unknown(const char *name, apr_pool_t *p) +{ + apr_array_header_t *known; + const char *known_names; + + known = ap_list_provider_names(p, AP_SOCACHE_PROVIDER_GROUP, + AP_SOCACHE_PROVIDER_VERSION); + known_names = apr_array_pstrcat(p, known, ','); + return apr_psprintf(p, "cache type '%s' not supported " + "(known names: %s). Maybe you need to load the " + "appropriate socache module (mod_socache_%s?).", + name, known_names, name); +} + +void tls_cache_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) +{ + (void)plog; + (void)ptemp; + /* we make this visible, in case someone wants to configure it. + * this does not mean that we will really use it, which is determined + * by configuration and cache provider capabilities. */ + ap_mutex_register(pconf, TLS_SESSION_CACHE_MUTEX_TYPE, NULL, APR_LOCK_DEFAULT, 0); +} + +static const char *cache_init(tls_conf_global_t *gconf, apr_pool_t *p, apr_pool_t *ptemp) +{ + const char *err = NULL; + const char *name, *args = NULL; + apr_status_t rv; + + if (gconf->session_cache) { + goto cleanup; + } + else if (!apr_strnatcasecmp("none", gconf->session_cache_spec)) { + gconf->session_cache_provider = NULL; + gconf->session_cache = NULL; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, gconf->ap_server, APLOGNO(10346) + "session cache explicitly disabled"); + goto cleanup; + } + else if (!apr_strnatcasecmp("default", gconf->session_cache_spec)) { + const char *path = TLS_CACHE_DEF_DIR; + +#if AP_MODULE_MAGIC_AT_LEAST(20180906, 2) + path = ap_state_dir_relative(p, path); +#endif + gconf->session_cache_spec = apr_psprintf(p, "%s:%s/%s(%ld)", + TLS_CACHE_DEF_PROVIDER, path, TLS_CACHE_DEF_FILE, (long)TLS_CACHE_DEF_SIZE); + gconf->session_cache_spec = "shmcb:mod_tls-sesss(64000)"; + } + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, gconf->ap_server, APLOGNO(10347) + "Using session cache: %s", gconf->session_cache_spec); + name = gconf->session_cache_spec; + args = ap_strchr((char*)name, ':'); + if (args) { + name = apr_pstrmemdup(p, name, (apr_size_t)(args - name)); + ++args; + } + gconf->session_cache_provider = ap_lookup_provider(AP_SOCACHE_PROVIDER_GROUP, + name, AP_SOCACHE_PROVIDER_VERSION); + if (!gconf->session_cache_provider) { + err = cache_provider_unknown(name, p); + goto cleanup; + } + err = gconf->session_cache_provider->create(&gconf->session_cache, args, ptemp, p); + if (err != NULL) goto cleanup; + + if (gconf->session_cache_provider->flags & AP_SOCACHE_FLAG_NOTMPSAFE + && !gconf->session_cache_mutex) { + /* we need a global lock to access the cache */ + rv = ap_global_mutex_create(&gconf->session_cache_mutex, NULL, + TLS_SESSION_CACHE_MUTEX_TYPE, NULL, gconf->ap_server, p, 0); + if (APR_SUCCESS != rv) { + err = apr_psprintf(p, "error setting up global %s mutex: %d", + TLS_SESSION_CACHE_MUTEX_TYPE, rv); + gconf->session_cache_mutex = NULL; + goto cleanup; + } + } + +cleanup: + if (NULL != err) { + gconf->session_cache_provider = NULL; + gconf->session_cache = NULL; + } + return err; +} + +const char *tls_cache_set_specification( + const char *spec, tls_conf_global_t *gconf, apr_pool_t *p, apr_pool_t *ptemp) +{ + gconf->session_cache_spec = spec; + return cache_init(gconf, p, ptemp); +} + +apr_status_t tls_cache_post_config(apr_pool_t *p, apr_pool_t *ptemp, server_rec *s) +{ + tls_conf_server_t *sc = tls_conf_server_get(s); + const char *err; + apr_status_t rv = APR_SUCCESS; + + err = cache_init(sc->global, p, ptemp); + if (err) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10348) + "session cache [%s] could not be initialized, will continue " + "without session one. Since this will impact performance, " + "consider making use of the 'TLSSessionCache' directive. The " + "error was: %s", sc->global->session_cache_spec, err); + } + + if (sc->global->session_cache) { + struct ap_socache_hints hints; + + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "provider init session cache [%s]", + sc->global->session_cache_spec); + memset(&hints, 0, sizeof(hints)); + hints.avg_obj_size = 100; + hints.avg_id_len = 33; + hints.expiry_interval = 30; + + rv = sc->global->session_cache_provider->init( + sc->global->session_cache, "mod_tls-sess", &hints, s, p); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10349) + "error initializing session cache."); + } + } + return rv; +} + +void tls_cache_init_child(apr_pool_t *p, server_rec *s) +{ + tls_conf_server_t *sc = tls_conf_server_get(s); + const char *lockfile; + apr_status_t rv; + + if (sc->global->session_cache_mutex) { + lockfile = apr_global_mutex_lockfile(sc->global->session_cache_mutex); + rv = apr_global_mutex_child_init(&sc->global->session_cache_mutex, lockfile, p); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10350) + "Cannot reinit %s mutex (file `%s`)", + TLS_SESSION_CACHE_MUTEX_TYPE, lockfile? lockfile : "-"); + } + } +} + +void tls_cache_free(server_rec *s) +{ + tls_conf_server_t *sc = tls_conf_server_get(s); + if (sc->global->session_cache_provider) { + sc->global->session_cache_provider->destroy(sc->global->session_cache, s); + } +} + +static void tls_cache_lock(tls_conf_global_t *gconf) +{ + if (gconf->session_cache_mutex) { + apr_status_t rv = apr_global_mutex_lock(gconf->session_cache_mutex); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, gconf->ap_server, APLOGNO(10351) + "Failed to acquire TLS session cache lock"); + } + } +} + +static void tls_cache_unlock(tls_conf_global_t *gconf) +{ + if (gconf->session_cache_mutex) { + apr_status_t rv = apr_global_mutex_unlock(gconf->session_cache_mutex); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, gconf->ap_server, APLOGNO(10352) + "Failed to release TLS session cache lock"); + } + } +} + +static rustls_result tls_cache_get( + void *userdata, + const rustls_slice_bytes *key, + int remove_after, + unsigned char *buf, + size_t count, + size_t *out_n) +{ + conn_rec *c = userdata; + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_server_t *sc = tls_conf_server_get(cc->server); + apr_status_t rv = APR_ENOENT; + unsigned int vlen, klen; + const unsigned char *kdata; + + if (!sc->global->session_cache) goto not_found; + tls_cache_lock(sc->global); + + kdata = key->data; + klen = (unsigned int)key->len; + vlen = (unsigned int)count; + rv = sc->global->session_cache_provider->retrieve( + sc->global->session_cache, cc->server, kdata, klen, buf, &vlen, c->pool); + + if (APLOGctrace4(c)) { + apr_ssize_t n = klen; + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, rv, c, "retrieve key %d[%8x], found %d val", + klen, apr_hashfunc_default((const char*)kdata, &n), vlen); + } + if (remove_after || (APR_SUCCESS != rv && !APR_STATUS_IS_NOTFOUND(rv))) { + sc->global->session_cache_provider->remove( + sc->global->session_cache, cc->server, key->data, klen, c->pool); + } + + tls_cache_unlock(sc->global); + if (APR_SUCCESS != rv) goto not_found; + cc->session_id_cache_hit = 1; + *out_n = count; + return RUSTLS_RESULT_OK; + +not_found: + *out_n = 0; + return RUSTLS_RESULT_NOT_FOUND; +} + +static rustls_result tls_cache_put( + void *userdata, + const rustls_slice_bytes *key, + const rustls_slice_bytes *val) +{ + conn_rec *c = userdata; + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_server_t *sc = tls_conf_server_get(cc->server); + apr_status_t rv = APR_ENOENT; + apr_time_t expires_at; + unsigned int klen, vlen; + const unsigned char *kdata; + + if (!sc->global->session_cache) goto not_stored; + tls_cache_lock(sc->global); + + expires_at = apr_time_now() + apr_time_from_sec(300); + kdata = key->data; + klen = (unsigned int)key->len; + vlen = (unsigned int)val->len; + rv = sc->global->session_cache_provider->store(sc->global->session_cache, cc->server, + kdata, klen, expires_at, + (unsigned char*)val->data, vlen, c->pool); + if (APLOGctrace4(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, rv, c, + "stored %d key bytes, with %d val bytes", klen, vlen); + } + tls_cache_unlock(sc->global); + if (APR_SUCCESS != rv) goto not_stored; + return RUSTLS_RESULT_OK; + +not_stored: + return RUSTLS_RESULT_NOT_FOUND; +} + +apr_status_t tls_cache_init_server( + rustls_server_config_builder *builder, server_rec *s) +{ + tls_conf_server_t *sc = tls_conf_server_get(s); + + if (sc && sc->global->session_cache) { + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, "adding session persistence to rustls"); + rustls_server_config_builder_set_persistence( + builder, tls_cache_get, tls_cache_put); + } + return APR_SUCCESS; +} diff --git a/modules/tls/tls_cache.h b/modules/tls/tls_cache.h new file mode 100644 index 0000000..64ca077 --- /dev/null +++ b/modules/tls/tls_cache.h @@ -0,0 +1,63 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_cache_h +#define tls_cache_h + +/* name of the global session cache mutex, should we need it */ +#define TLS_SESSION_CACHE_MUTEX_TYPE "tls-session-cache" + + +/** + * Set the specification of the session cache to use. The syntax is + * "default|none|(:)?" + * + * @param spec the cache specification + * @param gconf the modules global configuration + * @param p pool for permanent allocations + * @param ptemp pool for temporary allocations + * @return NULL on success or an error message + */ +const char *tls_cache_set_specification( + const char *spec, tls_conf_global_t *gconf, apr_pool_t *p, apr_pool_t *ptemp); + +/** + * Setup before configuration runs, announces our potential global mutex. + */ +void tls_cache_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp); + +/** + * Verify the cache settings at the end of the configuration and + * create the default session cache, if not already done. + */ +apr_status_t tls_cache_post_config(apr_pool_t *p, apr_pool_t *ptemp, server_rec *s); + +/** + * Started a new child, make sure that global mutex we might use is set up. + */ +void tls_cache_init_child(apr_pool_t *p, server_rec *s); + +/** + * Free all cache related resources. + */ +void tls_cache_free(server_rec *s); + +/** + * Initialize the session store for the server's config builder. + */ +apr_status_t tls_cache_init_server( + rustls_server_config_builder *builder, server_rec *s); + +#endif /* tls_cache_h */ \ No newline at end of file diff --git a/modules/tls/tls_cert.c b/modules/tls/tls_cert.c new file mode 100644 index 0000000..624535a --- /dev/null +++ b/modules/tls/tls_cert.c @@ -0,0 +1,564 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "tls_cert.h" +#include "tls_util.h" + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + + +apr_status_t tls_cert_load_pem( + apr_pool_t *p, const tls_cert_spec_t *cert, tls_cert_pem_t **ppem) +{ + apr_status_t rv; + const char *fpath; + tls_cert_pem_t *cpem; + + ap_assert(cert->cert_file); + cpem = apr_pcalloc(p, sizeof(*cpem)); + fpath = ap_server_root_relative(p, cert->cert_file); + if (NULL == fpath) { + rv = APR_ENOENT; goto cleanup; + } + rv = tls_util_file_load(p, fpath, 0, 100*1024, &cpem->cert_pem); + if (APR_SUCCESS != rv) goto cleanup; + + if (cert->pkey_file) { + fpath = ap_server_root_relative(p, cert->pkey_file); + if (NULL == fpath) { + rv = APR_ENOENT; goto cleanup; + } + rv = tls_util_file_load(p, fpath, 0, 100*1024, &cpem->pkey_pem); + if (APR_SUCCESS != rv) goto cleanup; + } + else { + cpem->pkey_pem = cpem->cert_pem; + } +cleanup: + *ppem = (APR_SUCCESS == rv)? cpem : NULL; + return rv; +} + +#define PEM_IN_CHUNK 48 /* PEM demands at most 64 chars per line */ + +static apr_status_t tls_der_to_pem( + const char **ppem, apr_pool_t *p, + const unsigned char *der_data, apr_size_t der_len, + const char *header, const char *footer) +{ + apr_status_t rv = APR_SUCCESS; + char *pem = NULL, *s; + apr_size_t b64_len, n, hd_len, ft_len; + apr_ssize_t in_len, i; + + if (der_len > INT_MAX) { + rv = APR_ENOMEM; + goto cleanup; + } + in_len = (apr_ssize_t)der_len; + rv = apr_encode_base64(NULL, (const char*)der_data, in_len, APR_ENCODE_NONE, &b64_len); + if (APR_SUCCESS != rv) goto cleanup; + if (b64_len > INT_MAX) { + rv = APR_ENOMEM; + goto cleanup; + } + hd_len = header? strlen(header) : 0; + ft_len = footer? strlen(footer) : 0; + s = pem = apr_pcalloc(p, + + b64_len + (der_len/PEM_IN_CHUNK) + 1 /* \n per chunk */ + + hd_len +1 + ft_len + 1 /* adding \n */ + + 1); /* NUL-terminated */ + if (header) { + strcpy(s, header); + s += hd_len; + *s++ = '\n'; + } + for (i = 0; in_len > 0; i += PEM_IN_CHUNK, in_len -= PEM_IN_CHUNK) { + rv = apr_encode_base64(s, + (const char*)der_data + i, in_len > PEM_IN_CHUNK? PEM_IN_CHUNK : in_len, + APR_ENCODE_NONE, &n); + s += n; + *s++ = '\n'; + } + if (footer) { + strcpy(s, footer); + s += ft_len; + *s++ = '\n'; + } +cleanup: + *ppem = (APR_SUCCESS == rv)? pem : NULL; + return rv; +} + +#define PEM_CERT_HD "-----BEGIN CERTIFICATE-----" +#define PEM_CERT_FT "-----END CERTIFICATE-----" + +apr_status_t tls_cert_to_pem(const char **ppem, apr_pool_t *p, const rustls_certificate *cert) +{ + const unsigned char* der_data; + size_t der_len; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + const char *pem = NULL; + + rr = rustls_certificate_get_der(cert, &der_data, &der_len); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + rv = tls_der_to_pem(&pem, p, der_data, der_len, PEM_CERT_HD, PEM_CERT_FT); +cleanup: + if (RUSTLS_RESULT_OK != rr) { + rv = tls_util_rustls_error(p, rr, NULL); + } + *ppem = (APR_SUCCESS == rv)? pem : NULL; + return rv; +} + +static void nullify_key_pem(tls_cert_pem_t *pems) +{ + if (pems->pkey_pem.len) { + memset((void*)pems->pkey_pem.data, 0, pems->pkey_pem.len); + } +} + +static apr_status_t make_certified_key( + apr_pool_t *p, const char *name, + const tls_data_t *cert_pem, const tls_data_t *pkey_pem, + const rustls_certified_key **pckey) +{ + const rustls_certified_key *ckey = NULL; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + + rr = rustls_certified_key_build( + cert_pem->data, cert_pem->len, + pkey_pem->data, pkey_pem->len, + &ckey); + + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr; + rv = tls_util_rustls_error(p, rr, &err_descr); + ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10363) + "Failed to load certified key %s: [%d] %s", + name, (int)rr, err_descr); + } + if (APR_SUCCESS == rv) { + *pckey = ckey; + } + else if (ckey) { + rustls_certified_key_free(ckey); + } + return rv; +} + +apr_status_t tls_cert_load_cert_key( + apr_pool_t *p, const tls_cert_spec_t *spec, + const char **pcert_pem, const rustls_certified_key **pckey) +{ + apr_status_t rv = APR_SUCCESS; + + if (spec->cert_file) { + tls_cert_pem_t *pems; + + rv = tls_cert_load_pem(p, spec, &pems); + if (APR_SUCCESS != rv) goto cleanup; + if (pcert_pem) *pcert_pem = tls_data_to_str(p, &pems->cert_pem); + rv = make_certified_key(p, spec->cert_file, &pems->cert_pem, &pems->pkey_pem, pckey); + /* dont want them hanging around in memory unnecessarily. */ + nullify_key_pem(pems); + } + else if (spec->cert_pem) { + tls_data_t pkey_pem, pem; + pem = tls_data_from_str(spec->cert_pem); + if (spec->pkey_pem) { + pkey_pem = tls_data_from_str(spec->pkey_pem); + } + else { + pkey_pem = pem; + } + if (pcert_pem) *pcert_pem = spec->cert_pem; + rv = make_certified_key(p, "memory", &pem, &pkey_pem, pckey); + /* pems provided from outside are responsibility of the caller */ + } + else { + rv = APR_ENOENT; goto cleanup; + } +cleanup: + return rv; +} + +typedef struct { + const char *id; + const char *cert_pem; + server_rec *server; + const rustls_certified_key *certified_key; +} tls_cert_reg_entry_t; + +static int reg_entry_cleanup(void *ctx, const void *key, apr_ssize_t klen, const void *val) +{ + tls_cert_reg_entry_t *entry = (tls_cert_reg_entry_t*)val; + (void)ctx; (void)key; (void)klen; + if (entry->certified_key) { + rustls_certified_key_free(entry->certified_key); + entry->certified_key = NULL; + } + return 1; +} + +static apr_status_t reg_cleanup(void *data) +{ + tls_cert_reg_t *reg = data; + if (reg->id2entry) { + apr_hash_do(reg_entry_cleanup, reg, reg->id2entry); + apr_hash_clear(reg->id2entry); + if (reg->key2entry) apr_hash_clear(reg->key2entry); + } + return APR_SUCCESS; +} + +tls_cert_reg_t *tls_cert_reg_make(apr_pool_t *p) +{ + tls_cert_reg_t *reg; + + reg = apr_pcalloc(p, sizeof(*reg)); + reg->pool = p; + reg->id2entry = apr_hash_make(p); + reg->key2entry = apr_hash_make(p); + apr_pool_cleanup_register(p, reg, reg_cleanup, apr_pool_cleanup_null); + return reg; +} + +apr_size_t tls_cert_reg_count(tls_cert_reg_t *reg) +{ + return apr_hash_count(reg->id2entry); +} + +static const char *cert_spec_to_id(const tls_cert_spec_t *spec) +{ + if (spec->cert_file) return spec->cert_file; + if (spec->cert_pem) return spec->cert_pem; + return NULL; +} + +apr_status_t tls_cert_reg_get_certified_key( + tls_cert_reg_t *reg, server_rec *s, const tls_cert_spec_t *spec, + const rustls_certified_key **pckey) +{ + apr_status_t rv = APR_SUCCESS; + const char *id; + tls_cert_reg_entry_t *entry; + + id = cert_spec_to_id(spec); + assert(id); + entry = apr_hash_get(reg->id2entry, id, APR_HASH_KEY_STRING); + if (!entry) { + const rustls_certified_key *certified_key; + const char *cert_pem; + rv = tls_cert_load_cert_key(reg->pool, spec, &cert_pem, &certified_key); + if (APR_SUCCESS != rv) goto cleanup; + entry = apr_pcalloc(reg->pool, sizeof(*entry)); + entry->id = apr_pstrdup(reg->pool, id); + entry->cert_pem = cert_pem; + entry->server = s; + entry->certified_key = certified_key; + apr_hash_set(reg->id2entry, entry->id, APR_HASH_KEY_STRING, entry); + /* associates the pointer value */ + apr_hash_set(reg->key2entry, &entry->certified_key, sizeof(entry->certified_key), entry); + } + +cleanup: + if (APR_SUCCESS == rv) { + *pckey = entry->certified_key; + } + else { + *pckey = NULL; + } + return rv; +} + +typedef struct { + void *userdata; + tls_cert_reg_visitor *visitor; +} reg_visit_ctx_t; + +static int reg_visit(void *vctx, const void *key, apr_ssize_t klen, const void *val) +{ + reg_visit_ctx_t *ctx = vctx; + tls_cert_reg_entry_t *entry = (tls_cert_reg_entry_t*)val; + + (void)key; (void)klen; + return ctx->visitor(ctx->userdata, entry->server, entry->id, entry->cert_pem, entry->certified_key); +} + +void tls_cert_reg_do( + tls_cert_reg_visitor *visitor, void *userdata, tls_cert_reg_t *reg) +{ + reg_visit_ctx_t ctx; + ctx.visitor = visitor; + ctx.userdata = userdata; + apr_hash_do(reg_visit, &ctx, reg->id2entry); +} + +const char *tls_cert_reg_get_id(tls_cert_reg_t *reg, const rustls_certified_key *certified_key) +{ + tls_cert_reg_entry_t *entry; + + entry = apr_hash_get(reg->key2entry, &certified_key, sizeof(certified_key)); + return entry? entry->id : NULL; +} + +apr_status_t tls_cert_load_root_store( + apr_pool_t *p, const char *store_file, rustls_root_cert_store **pstore) +{ + const char *fpath; + tls_data_t pem; + rustls_root_cert_store *store = NULL; + rustls_result rr = RUSTLS_RESULT_OK; + apr_pool_t *ptemp = NULL; + apr_status_t rv; + + ap_assert(store_file); + + rv = apr_pool_create(&ptemp, p); + if (APR_SUCCESS != rv) goto cleanup; + apr_pool_tag(ptemp, "tls_load_root_cert_store"); + fpath = ap_server_root_relative(ptemp, store_file); + if (NULL == fpath) { + rv = APR_ENOENT; goto cleanup; + } + /* we use this for client auth CAs. 1MB seems large enough. */ + rv = tls_util_file_load(ptemp, fpath, 0, 1024*1024, &pem); + if (APR_SUCCESS != rv) goto cleanup; + + store = rustls_root_cert_store_new(); + rr = rustls_root_cert_store_add_pem(store, pem.data, pem.len, 1); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + +cleanup: + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr; + rv = tls_util_rustls_error(p, rr, &err_descr); + ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10364) + "Failed to load root store %s: [%d] %s", + store_file, (int)rr, err_descr); + } + if (APR_SUCCESS == rv) { + *pstore = store; + } + else { + *pstore = NULL; + if (store) rustls_root_cert_store_free(store); + } + if (ptemp) apr_pool_destroy(ptemp); + return rv; +} + +typedef struct { + const char *id; + rustls_root_cert_store *store; +} tls_cert_root_stores_entry_t; + +static int stores_entry_cleanup(void *ctx, const void *key, apr_ssize_t klen, const void *val) +{ + tls_cert_root_stores_entry_t *entry = (tls_cert_root_stores_entry_t*)val; + (void)ctx; (void)key; (void)klen; + if (entry->store) { + rustls_root_cert_store_free(entry->store); + entry->store = NULL; + } + return 1; +} + +static apr_status_t stores_cleanup(void *data) +{ + tls_cert_root_stores_t *stores = data; + tls_cert_root_stores_clear(stores); + return APR_SUCCESS; +} + +tls_cert_root_stores_t *tls_cert_root_stores_make(apr_pool_t *p) +{ + tls_cert_root_stores_t *stores; + + stores = apr_pcalloc(p, sizeof(*stores)); + stores->pool = p; + stores->file2store = apr_hash_make(p); + apr_pool_cleanup_register(p, stores, stores_cleanup, apr_pool_cleanup_null); + return stores; +} + +void tls_cert_root_stores_clear(tls_cert_root_stores_t *stores) +{ + if (stores->file2store) { + apr_hash_do(stores_entry_cleanup, stores, stores->file2store); + apr_hash_clear(stores->file2store); + } +} + +apr_status_t tls_cert_root_stores_get( + tls_cert_root_stores_t *stores, + const char *store_file, + rustls_root_cert_store **pstore) +{ + apr_status_t rv = APR_SUCCESS; + tls_cert_root_stores_entry_t *entry; + + entry = apr_hash_get(stores->file2store, store_file, APR_HASH_KEY_STRING); + if (!entry) { + rustls_root_cert_store *store; + rv = tls_cert_load_root_store(stores->pool, store_file, &store); + if (APR_SUCCESS != rv) goto cleanup; + entry = apr_pcalloc(stores->pool, sizeof(*entry)); + entry->id = apr_pstrdup(stores->pool, store_file); + entry->store = store; + apr_hash_set(stores->file2store, entry->id, APR_HASH_KEY_STRING, entry); + } + +cleanup: + if (APR_SUCCESS == rv) { + *pstore = entry->store; + } + else { + *pstore = NULL; + } + return rv; +} + +typedef struct { + const char *id; + const rustls_client_cert_verifier *client_verifier; + const rustls_client_cert_verifier_optional *client_verifier_opt; +} tls_cert_verifiers_entry_t; + +static int verifiers_entry_cleanup(void *ctx, const void *key, apr_ssize_t klen, const void *val) +{ + tls_cert_verifiers_entry_t *entry = (tls_cert_verifiers_entry_t*)val; + (void)ctx; (void)key; (void)klen; + if (entry->client_verifier) { + rustls_client_cert_verifier_free(entry->client_verifier); + entry->client_verifier = NULL; + } + if (entry->client_verifier_opt) { + rustls_client_cert_verifier_optional_free(entry->client_verifier_opt); + entry->client_verifier_opt = NULL; + } + return 1; +} + +static apr_status_t verifiers_cleanup(void *data) +{ + tls_cert_verifiers_t *verifiers = data; + tls_cert_verifiers_clear(verifiers); + return APR_SUCCESS; +} + +tls_cert_verifiers_t *tls_cert_verifiers_make( + apr_pool_t *p, tls_cert_root_stores_t *stores) +{ + tls_cert_verifiers_t *verifiers; + + verifiers = apr_pcalloc(p, sizeof(*verifiers)); + verifiers->pool = p; + verifiers->stores = stores; + verifiers->file2verifier = apr_hash_make(p); + apr_pool_cleanup_register(p, verifiers, verifiers_cleanup, apr_pool_cleanup_null); + return verifiers; +} + +void tls_cert_verifiers_clear(tls_cert_verifiers_t *verifiers) +{ + if (verifiers->file2verifier) { + apr_hash_do(verifiers_entry_cleanup, verifiers, verifiers->file2verifier); + apr_hash_clear(verifiers->file2verifier); + } +} + +static tls_cert_verifiers_entry_t * verifiers_get_or_make_entry( + tls_cert_verifiers_t *verifiers, + const char *store_file) +{ + tls_cert_verifiers_entry_t *entry; + + entry = apr_hash_get(verifiers->file2verifier, store_file, APR_HASH_KEY_STRING); + if (!entry) { + entry = apr_pcalloc(verifiers->pool, sizeof(*entry)); + entry->id = apr_pstrdup(verifiers->pool, store_file); + apr_hash_set(verifiers->file2verifier, entry->id, APR_HASH_KEY_STRING, entry); + } + return entry; +} + +apr_status_t tls_cert_client_verifiers_get( + tls_cert_verifiers_t *verifiers, + const char *store_file, + const rustls_client_cert_verifier **pverifier) +{ + apr_status_t rv = APR_SUCCESS; + tls_cert_verifiers_entry_t *entry; + + entry = verifiers_get_or_make_entry(verifiers, store_file); + if (!entry->client_verifier) { + rustls_root_cert_store *store; + rv = tls_cert_root_stores_get(verifiers->stores, store_file, &store); + if (APR_SUCCESS != rv) goto cleanup; + entry->client_verifier = rustls_client_cert_verifier_new(store); + } + +cleanup: + if (APR_SUCCESS == rv) { + *pverifier = entry->client_verifier; + } + else { + *pverifier = NULL; + } + return rv; +} + +apr_status_t tls_cert_client_verifiers_get_optional( + tls_cert_verifiers_t *verifiers, + const char *store_file, + const rustls_client_cert_verifier_optional **pverifier) +{ + apr_status_t rv = APR_SUCCESS; + tls_cert_verifiers_entry_t *entry; + + entry = verifiers_get_or_make_entry(verifiers, store_file); + if (!entry->client_verifier_opt) { + rustls_root_cert_store *store; + rv = tls_cert_root_stores_get(verifiers->stores, store_file, &store); + if (APR_SUCCESS != rv) goto cleanup; + entry->client_verifier_opt = rustls_client_cert_verifier_optional_new(store); + } + +cleanup: + if (APR_SUCCESS == rv) { + *pverifier = entry->client_verifier_opt; + } + else { + *pverifier = NULL; + } + return rv; +} diff --git a/modules/tls/tls_cert.h b/modules/tls/tls_cert.h new file mode 100644 index 0000000..6ab3f48 --- /dev/null +++ b/modules/tls/tls_cert.h @@ -0,0 +1,211 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_cert_h +#define tls_cert_h + +#include "tls_util.h" + +/** + * The PEM data of a certificate and its key. + */ +typedef struct { + tls_data_t cert_pem; + tls_data_t pkey_pem; +} tls_cert_pem_t; + +/** + * Specify a certificate via files or PEM data. + */ +typedef struct { + const char *cert_file; /* file path, relative to ap_root */ + const char *pkey_file; /* file path, relative to ap_root */ + const char *cert_pem; /* NUL-terminated PEM string */ + const char *pkey_pem; /* NUL-terminated PEM string */ +} tls_cert_spec_t; + +/** + * Load the PEM data for a certificate file and key file as given in `cert`. + */ +apr_status_t tls_cert_load_pem( + apr_pool_t *p, const tls_cert_spec_t *cert, tls_cert_pem_t **ppem); + +apr_status_t tls_cert_to_pem(const char **ppem, apr_pool_t *p, const rustls_certificate *cert); + +/** + * Load a rustls certified key from a certificate specification. + * The returned `rustls_certified_key` is owned by the caller. + * @param p the memory pool to use + * @param spec the specification for the certificate (file or PEM data) + * @param cert_pem return the PEM data used for loading the certificates, optional + * @param pckey the loaded certified key on return + */ +apr_status_t tls_cert_load_cert_key( + apr_pool_t *p, const tls_cert_spec_t *spec, + const char **pcert_pem, const rustls_certified_key **pckey); + +/** + * A registry of rustls_certified_key* by identifier. + */ +typedef struct tls_cert_reg_t tls_cert_reg_t; +struct tls_cert_reg_t{ + apr_pool_t *pool; + apr_hash_t *id2entry; + apr_hash_t *key2entry; +}; + +/** + * Create a new registry with lifetime based on the memory pool. + * The registry will take care of its memory and allocated keys when + * the pool is destroyed. + */ +tls_cert_reg_t *tls_cert_reg_make(apr_pool_t *p); + +/** + * Return the number of certified keys in the registry. + */ +apr_size_t tls_cert_reg_count(tls_cert_reg_t *reg); + +/** + * Get a the `rustls_certified_key` identified by `spec` from the registry. + * This will load the key the first time it is requested. + * The returned `rustls_certified_key` is owned by the registry. + * @param reg the certified key registry + * @param s the server_rec this is loaded into, useful for error logging + * @param spec the specification of the certified key + * @param pckey the certified key instance on return + */ +apr_status_t tls_cert_reg_get_certified_key( + tls_cert_reg_t *reg, server_rec *s, const tls_cert_spec_t *spec, const rustls_certified_key **pckey); + +/** + * Visit all certified keys in the registry. + * The callback may return 0 to abort the iteration. + * @param userdata supplied by the visit invocation + * @param s the server_rec the certified was load into first + * @param id internal identifier of the certified key + * @param cert_pem the PEM data of the certificate and its chain + * @param certified_key the key instance itself + */ +typedef int tls_cert_reg_visitor( + void *userdata, server_rec *s, + const char *id, const char *cert_pem, const rustls_certified_key *certified_key); + +/** + * Visit all certified_key entries in the registry. + * @param visitor callback invoked on each entry until it returns 0. + * @param userdata passed to callback + * @param reg the registry to iterate over + */ +void tls_cert_reg_do( + tls_cert_reg_visitor *visitor, void *userdata, tls_cert_reg_t *reg); + +/** + * Get the identity assigned to a loaded, certified key. Returns NULL, if the + * key is not part of the registry. The returned bytes are owned by the registry + * entry. + * @param reg the registry to look in. + * @param certified_key the key to get the identifier for + */ +const char *tls_cert_reg_get_id(tls_cert_reg_t *reg, const rustls_certified_key *certified_key); + +/** + * Load all root certificates from a PEM file into a rustls_root_cert_store. + * @param p the memory pool to use + * @param store_file the (server relative) path of the PEM file + * @param pstore the loaded root store on success + */ +apr_status_t tls_cert_load_root_store( + apr_pool_t *p, const char *store_file, rustls_root_cert_store **pstore); + +typedef struct tls_cert_root_stores_t tls_cert_root_stores_t; +struct tls_cert_root_stores_t { + apr_pool_t *pool; + apr_hash_t *file2store; +}; + +/** + * Create a new root stores registry with lifetime based on the memory pool. + * The registry will take care of its memory and allocated stores when + * the pool is destroyed. + */ +tls_cert_root_stores_t *tls_cert_root_stores_make(apr_pool_t *p); + +/** + * Clear the root stores registry, freeing all stores. + */ +void tls_cert_root_stores_clear(tls_cert_root_stores_t *stores); + +/** + * Load all root certificates from a PEM file into a rustls_root_cert_store. + * @param p the memory pool to use + * @param store_file the (server relative) path of the PEM file + * @param pstore the loaded root store on success + */ +apr_status_t tls_cert_root_stores_get( + tls_cert_root_stores_t *stores, + const char *store_file, + rustls_root_cert_store **pstore); + +typedef struct tls_cert_verifiers_t tls_cert_verifiers_t; +struct tls_cert_verifiers_t { + apr_pool_t *pool; + tls_cert_root_stores_t *stores; + apr_hash_t *file2verifier; +}; + +/** + * Create a new registry for certificate verifiers with lifetime based on the memory pool. + * The registry will take care of its memory and allocated verifiers when + * the pool is destroyed. + * @param p the memory pool to use + * @param stores the store registry for lookups + */ +tls_cert_verifiers_t *tls_cert_verifiers_make( + apr_pool_t *p, tls_cert_root_stores_t *stores); + +/** + * Clear the verifiers registry, freeing all verifiers. + */ +void tls_cert_verifiers_clear( + tls_cert_verifiers_t *verifiers); + +/** + * Get the mandatory client certificate verifier for the + * root certificate store in `store_file`. Will create + * the verifier if not already known. + * @param verifiers the registry of certificate verifiers + * @param store_file the (server relative) path of the PEM file with certificates + * @param pverifiers the verifier on success + */ +apr_status_t tls_cert_client_verifiers_get( + tls_cert_verifiers_t *verifiers, + const char *store_file, + const rustls_client_cert_verifier **pverifier); + +/** + * Get the optional client certificate verifier for the + * root certificate store in `store_file`. Will create + * the verifier if not already known. + * @param verifiers the registry of certificate verifiers + * @param store_file the (server relative) path of the PEM file with certificates + * @param pverifiers the verifier on success + */ +apr_status_t tls_cert_client_verifiers_get_optional( + tls_cert_verifiers_t *verifiers, + const char *store_file, + const rustls_client_cert_verifier_optional **pverifier); + +#endif /* tls_cert_h */ \ No newline at end of file diff --git a/modules/tls/tls_conf.c b/modules/tls/tls_conf.c new file mode 100644 index 0000000..a9f27de --- /dev/null +++ b/modules/tls/tls_conf.c @@ -0,0 +1,780 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "tls_cert.h" +#include "tls_proto.h" +#include "tls_conf.h" +#include "tls_util.h" +#include "tls_var.h" +#include "tls_cache.h" + + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + +static tls_conf_global_t *conf_global_get_or_make(apr_pool_t *pool, server_rec *s) +{ + tls_conf_global_t *gconf; + + /* we create this only once for apache's one ap_server_conf. + * If this gets called for another server, we should already have + * done it for ap_server_conf. */ + if (ap_server_conf && s != ap_server_conf) { + tls_conf_server_t *sconf = tls_conf_server_get(ap_server_conf); + ap_assert(sconf); + ap_assert(sconf->global); + return sconf->global; + } + + gconf = apr_pcalloc(pool, sizeof(*gconf)); + gconf->ap_server = ap_server_conf; + gconf->status = TLS_CONF_ST_INIT; + gconf->proto = tls_proto_init(pool, s); + gconf->proxy_configs = apr_array_make(pool, 10, sizeof(tls_conf_proxy_t*)); + + gconf->var_lookups = apr_hash_make(pool); + tls_var_init_lookup_hash(pool, gconf->var_lookups); + gconf->session_cache_spec = "default"; + + return gconf; +} + +tls_conf_server_t *tls_conf_server_get(server_rec *s) +{ + tls_conf_server_t *sc = ap_get_module_config(s->module_config, &tls_module); + ap_assert(sc); + return sc; +} + + +#define CONF_S_NAME(s) (s && s->server_hostname? s->server_hostname : "default") + +void *tls_conf_create_svr(apr_pool_t *pool, server_rec *s) +{ + tls_conf_server_t *conf; + + conf = apr_pcalloc(pool, sizeof(*conf)); + conf->global = conf_global_get_or_make(pool, s); + conf->server = s; + + conf->enabled = TLS_FLAG_UNSET; + conf->cert_specs = apr_array_make(pool, 3, sizeof(tls_cert_spec_t*)); + conf->honor_client_order = TLS_FLAG_UNSET; + conf->strict_sni = TLS_FLAG_UNSET; + conf->tls_protocol_min = TLS_FLAG_UNSET; + conf->tls_pref_ciphers = apr_array_make(pool, 3, sizeof(apr_uint16_t));; + conf->tls_supp_ciphers = apr_array_make(pool, 3, sizeof(apr_uint16_t));; + return conf; +} + +#define MERGE_INT(base, add, field) \ + (add->field == TLS_FLAG_UNSET)? base->field : add->field; + +void *tls_conf_merge_svr(apr_pool_t *pool, void *basev, void *addv) +{ + tls_conf_server_t *base = basev; + tls_conf_server_t *add = addv; + tls_conf_server_t *nconf; + + nconf = apr_pcalloc(pool, sizeof(*nconf)); + nconf->server = add->server; + nconf->global = add->global? add->global : base->global; + + nconf->enabled = MERGE_INT(base, add, enabled); + nconf->cert_specs = apr_array_append(pool, base->cert_specs, add->cert_specs); + nconf->tls_protocol_min = MERGE_INT(base, add, tls_protocol_min); + nconf->tls_pref_ciphers = add->tls_pref_ciphers->nelts? + add->tls_pref_ciphers : base->tls_pref_ciphers; + nconf->tls_supp_ciphers = add->tls_supp_ciphers->nelts? + add->tls_supp_ciphers : base->tls_supp_ciphers; + nconf->honor_client_order = MERGE_INT(base, add, honor_client_order); + nconf->client_ca = add->client_ca? add->client_ca : base->client_ca; + nconf->client_auth = (add->client_auth != TLS_CLIENT_AUTH_UNSET)? + add->client_auth : base->client_auth; + nconf->var_user_name = add->var_user_name? add->var_user_name : base->var_user_name; + return nconf; +} + +tls_conf_dir_t *tls_conf_dir_get(request_rec *r) +{ + tls_conf_dir_t *dc = ap_get_module_config(r->per_dir_config, &tls_module); + ap_assert(dc); + return dc; +} + +tls_conf_dir_t *tls_conf_dir_server_get(server_rec *s) +{ + tls_conf_dir_t *dc = ap_get_module_config(s->lookup_defaults, &tls_module); + ap_assert(dc); + return dc; +} + +void *tls_conf_create_dir(apr_pool_t *pool, char *dir) +{ + tls_conf_dir_t *conf; + + (void)dir; + conf = apr_pcalloc(pool, sizeof(*conf)); + conf->std_env_vars = TLS_FLAG_UNSET; + conf->proxy_enabled = TLS_FLAG_UNSET; + conf->proxy_protocol_min = TLS_FLAG_UNSET; + conf->proxy_pref_ciphers = apr_array_make(pool, 3, sizeof(apr_uint16_t));; + conf->proxy_supp_ciphers = apr_array_make(pool, 3, sizeof(apr_uint16_t));; + conf->proxy_machine_cert_specs = apr_array_make(pool, 3, sizeof(tls_cert_spec_t*)); + return conf; +} + + +static int same_proxy_settings(tls_conf_dir_t *a, tls_conf_dir_t *b) +{ + return a->proxy_ca == b->proxy_ca; +} + +static void dir_assign_merge( + tls_conf_dir_t *dest, apr_pool_t *pool, tls_conf_dir_t *base, tls_conf_dir_t *add) +{ + tls_conf_dir_t local; + + memset(&local, 0, sizeof(local)); + local.std_env_vars = MERGE_INT(base, add, std_env_vars); + local.export_cert_vars = MERGE_INT(base, add, export_cert_vars); + local.proxy_enabled = MERGE_INT(base, add, proxy_enabled); + local.proxy_ca = add->proxy_ca? add->proxy_ca : base->proxy_ca; + local.proxy_protocol_min = MERGE_INT(base, add, proxy_protocol_min); + local.proxy_pref_ciphers = add->proxy_pref_ciphers->nelts? + add->proxy_pref_ciphers : base->proxy_pref_ciphers; + local.proxy_supp_ciphers = add->proxy_supp_ciphers->nelts? + add->proxy_supp_ciphers : base->proxy_supp_ciphers; + local.proxy_machine_cert_specs = apr_array_append(pool, + base->proxy_machine_cert_specs, add->proxy_machine_cert_specs); + if (local.proxy_enabled == TLS_FLAG_TRUE) { + if (add->proxy_config) { + local.proxy_config = same_proxy_settings(&local, add)? add->proxy_config : NULL; + } + else if (base->proxy_config) { + local.proxy_config = same_proxy_settings(&local, base)? add->proxy_config : NULL; + } + } + memcpy(dest, &local, sizeof(*dest)); +} + +void *tls_conf_merge_dir(apr_pool_t *pool, void *basev, void *addv) +{ + tls_conf_dir_t *base = basev; + tls_conf_dir_t *add = addv; + tls_conf_dir_t *nconf = apr_pcalloc(pool, sizeof(*nconf)); + dir_assign_merge(nconf, pool, base, add); + return nconf; +} + +static void tls_conf_dir_set_options_defaults(apr_pool_t *pool, tls_conf_dir_t *dc) +{ + (void)pool; + dc->std_env_vars = TLS_FLAG_FALSE; + dc->export_cert_vars = TLS_FLAG_FALSE; +} + +apr_status_t tls_conf_server_apply_defaults(tls_conf_server_t *sc, apr_pool_t *p) +{ + (void)p; + if (sc->enabled == TLS_FLAG_UNSET) sc->enabled = TLS_FLAG_FALSE; + if (sc->tls_protocol_min == TLS_FLAG_UNSET) sc->tls_protocol_min = 0; + if (sc->honor_client_order == TLS_FLAG_UNSET) sc->honor_client_order = TLS_FLAG_TRUE; + if (sc->strict_sni == TLS_FLAG_UNSET) sc->strict_sni = TLS_FLAG_TRUE; + if (sc->client_auth == TLS_CLIENT_AUTH_UNSET) sc->client_auth = TLS_CLIENT_AUTH_NONE; + return APR_SUCCESS; +} + +apr_status_t tls_conf_dir_apply_defaults(tls_conf_dir_t *dc, apr_pool_t *p) +{ + (void)p; + if (dc->std_env_vars == TLS_FLAG_UNSET) dc->std_env_vars = TLS_FLAG_FALSE; + if (dc->export_cert_vars == TLS_FLAG_UNSET) dc->export_cert_vars = TLS_FLAG_FALSE; + if (dc->proxy_enabled == TLS_FLAG_UNSET) dc->proxy_enabled = TLS_FLAG_FALSE; + return APR_SUCCESS; +} + +tls_conf_proxy_t *tls_conf_proxy_make( + apr_pool_t *p, tls_conf_dir_t *dc, tls_conf_global_t *gc, server_rec *s) +{ + tls_conf_proxy_t *pc = apr_pcalloc(p, sizeof(*pc)); + pc->defined_in = s; + pc->global = gc; + pc->proxy_ca = dc->proxy_ca; + pc->proxy_protocol_min = dc->proxy_protocol_min; + pc->proxy_pref_ciphers = dc->proxy_pref_ciphers; + pc->proxy_supp_ciphers = dc->proxy_supp_ciphers; + pc->machine_cert_specs = dc->proxy_machine_cert_specs; + pc->machine_certified_keys = apr_array_make(p, 3, sizeof(const rustls_certified_key*)); + return pc; +} + +int tls_proxy_section_post_config( + apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s, + ap_conf_vector_t *section_config) +{ + tls_conf_dir_t *proxy_dc, *server_dc; + tls_conf_server_t *sc; + + /* mod_proxy collects the ... sections per server (base server or virtualhost) + * and in its post_config hook, calls our function registered at its hook for each with + * s - the server they were define in + * section_config - the set of dir_configs for a section + * + * If none of _our_ config directives had been used, here or in the server, we get a NULL. + * Which means we have to do nothing. Otherwise, we add to `proxy_dc` the + * settings from `server_dc` - since this is not automagically done by apache. + * + * `proxy_dc` is then complete and tells us if we handle outgoing connections + * here and with what parameter settings. + */ + (void)ptemp; (void)plog; + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "%s: tls_proxy_section_post_config called", s->server_hostname); + proxy_dc = ap_get_module_config(section_config, &tls_module); + if (!proxy_dc) goto cleanup; + server_dc = ap_get_module_config(s->lookup_defaults, &tls_module); + ap_assert(server_dc); + dir_assign_merge(proxy_dc, p, server_dc, proxy_dc); + tls_conf_dir_apply_defaults(proxy_dc, p); + if (proxy_dc->proxy_enabled && !proxy_dc->proxy_config) { + /* remember `proxy_dc` for subsequent configuration of outoing TLS setups */ + sc = tls_conf_server_get(s); + proxy_dc->proxy_config = tls_conf_proxy_make(p, proxy_dc, sc->global, s); + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "%s: adding proxy_conf to globals in proxy_post_config_section", + s->server_hostname); + APR_ARRAY_PUSH(sc->global->proxy_configs, tls_conf_proxy_t*) = proxy_dc->proxy_config; + } +cleanup: + return OK; +} + +static const char *cmd_check_file(cmd_parms *cmd, const char *fpath) +{ + char *real_path; + + /* just a dump of the configuration, dont resolve/check */ + if (ap_state_query(AP_SQ_RUN_MODE) == AP_SQ_RM_CONFIG_DUMP) { + return NULL; + } + real_path = ap_server_root_relative(cmd->pool, fpath); + if (!real_path) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": Invalid file path ", fpath, NULL); + } + if (!tls_util_is_file(cmd->pool, real_path)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": file '", real_path, + "' does not exist or is empty", NULL); + } + return NULL; +} + +static const char *tls_conf_add_engine(cmd_parms *cmd, void *dc, const char*v) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + tls_conf_global_t *gc = sc->global; + const char *err = NULL; + char *host, *scope_id; + apr_port_t port; + apr_sockaddr_t *sa; + server_addr_rec *sar; + apr_status_t rv; + + (void)dc; + /* Example of use: + * TLSEngine 443 + * TLSEngine hostname:443 + * TLSEngine 91.0.0.1:443 + * TLSEngine [::0]:443 + */ + rv = apr_parse_addr_port(&host, &scope_id, &port, v, cmd->pool); + if (APR_SUCCESS != rv) { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": invalid address/port in '", v, "'", NULL); + goto cleanup; + } + + /* translate host/port to a sockaddr that we can match with incoming connections */ + rv = apr_sockaddr_info_get(&sa, host, APR_UNSPEC, port, 0, cmd->pool); + if (APR_SUCCESS != rv) { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unable to get sockaddr for '", host, "'", NULL); + goto cleanup; + } + + if (scope_id) { +#if APR_VERSION_AT_LEAST(1,7,0) + rv = apr_sockaddr_zone_set(sa, scope_id); + if (APR_SUCCESS != rv) { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": error setting ipv6 scope id: '", scope_id, "'", NULL); + goto cleanup; + } +#else + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": IPv6 scopes not supported by your APR: '", scope_id, "'", NULL); + goto cleanup; +#endif + } + + sar = apr_pcalloc(cmd->pool, sizeof(*sar)); + sar->host_addr = sa; + sar->virthost = host; + sar->host_port = port; + + sar->next = gc->tls_addresses; + gc->tls_addresses = sar; +cleanup: + return err; +} + +static int flag_value( + const char *arg) +{ + if (!strcasecmp(arg, "On")) { + return TLS_FLAG_TRUE; + } + else if (!strcasecmp(arg, "Off")) { + return TLS_FLAG_FALSE; + } + return TLS_FLAG_UNSET; +} + +static const char *flag_err( + cmd_parms *cmd, const char *v) +{ + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": value must be 'On' or 'Off': '", v, "'", NULL); +} + +static const char *tls_conf_add_certificate( + cmd_parms *cmd, void *dc, const char *cert_file, const char *pkey_file) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err = NULL, *fpath; + tls_cert_spec_t *cert; + + (void)dc; + if (NULL != (err = cmd_check_file(cmd, cert_file))) goto cleanup; + /* key file may be NULL, in which case cert_file must contain the key PEM */ + if (pkey_file && NULL != (err = cmd_check_file(cmd, pkey_file))) goto cleanup; + + cert = apr_pcalloc(cmd->pool, sizeof(*cert)); + fpath = ap_server_root_relative(cmd->pool, cert_file); + if (!tls_util_is_file(cmd->pool, fpath)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unable to find certificate file: '", fpath, "'", NULL); + } + cert->cert_file = cert_file; + if (pkey_file) { + fpath = ap_server_root_relative(cmd->pool, pkey_file); + if (!tls_util_is_file(cmd->pool, fpath)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unable to find certificate key file: '", fpath, "'", NULL); + } + } + cert->pkey_file = pkey_file; + *(const tls_cert_spec_t **)apr_array_push(sc->cert_specs) = cert; + +cleanup: + return err; +} + +static const char *parse_ciphers( + cmd_parms *cmd, + tls_conf_global_t *gc, + const char *nop_name, + int argc, char *const argv[], + apr_array_header_t *ciphers) +{ + apr_array_clear(ciphers); + if (argc > 1 || apr_strnatcasecmp(nop_name, argv[0])) { + apr_uint16_t cipher; + int i; + + for (i = 0; i < argc; ++i) { + char *name, *last = NULL; + const char *value = argv[i]; + + name = apr_strtok(apr_pstrdup(cmd->pool, value), ":", &last); + while (name) { + if (tls_proto_get_cipher_by_name(gc->proto, name, &cipher) != APR_SUCCESS) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": cipher not recognized '", name, "'", NULL); + } + APR_ARRAY_PUSH(ciphers, apr_uint16_t) = cipher; + name = apr_strtok(NULL, ":", &last); + } + } + } + return NULL; +} + +static const char *tls_conf_set_preferred_ciphers( + cmd_parms *cmd, void *dc, int argc, char *const argv[]) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err = NULL; + + (void)dc; + if (!argc) { + err = "specify the TLS ciphers to prefer or 'default' for the rustls default ordering."; + goto cleanup; + } + err = parse_ciphers(cmd, sc->global, "default", argc, argv, sc->tls_pref_ciphers); +cleanup: + return err; +} + +static const char *tls_conf_set_suppressed_ciphers( + cmd_parms *cmd, void *dc, int argc, char *const argv[]) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err = NULL; + + (void)dc; + if (!argc) { + err = "specify the TLS ciphers to never use or 'none'."; + goto cleanup; + } + err = parse_ciphers(cmd, sc->global, "none", argc, argv, sc->tls_supp_ciphers); +cleanup: + return err; +} + +static const char *tls_conf_set_honor_client_order( + cmd_parms *cmd, void *dc, const char *v) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + int flag = flag_value(v); + + (void)dc; + if (TLS_FLAG_UNSET == flag) return flag_err(cmd, v); + sc->honor_client_order = flag; + return NULL; +} + +static const char *tls_conf_set_strict_sni( + cmd_parms *cmd, void *dc, const char *v) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + int flag = flag_value(v); + + (void)dc; + if (TLS_FLAG_UNSET == flag) return flag_err(cmd, v); + sc->strict_sni = flag; + return NULL; +} + +static const char *get_min_protocol( + cmd_parms *cmd, const char *v, int *pmin) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err = NULL; + + if (!apr_strnatcasecmp("default", v)) { + *pmin = 0; + } + else if (*v && v[strlen(v)-1] == '+') { + char *name = apr_pstrdup(cmd->pool, v); + name[strlen(name)-1] = '\0'; + *pmin = tls_proto_get_version_by_name(sc->global->proto, name); + if (!*pmin) { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unrecognized protocol version specifier (try TLSv1.2+ or TLSv1.3+): '", v, "'", NULL); + goto cleanup; + } + } + else { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": value must be 'default', 'TLSv1.2+' or 'TLSv1.3+': '", v, "'", NULL); + goto cleanup; + } +cleanup: + return err; +} + +static const char *tls_conf_set_protocol( + cmd_parms *cmd, void *dc, const char *v) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + (void)dc; + return get_min_protocol(cmd, v, &sc->tls_protocol_min); +} + +static const char *tls_conf_set_options( + cmd_parms *cmd, void *dcv, int argc, char *const argv[]) +{ + tls_conf_dir_t *dc = dcv; + const char *err = NULL, *option; + int i, val; + + /* Are we only having deltas (+/-) or do we reset the options? */ + for (i = 0; i < argc; ++i) { + if (argv[i][0] != '+' && argv[i][0] != '-') { + tls_conf_dir_set_options_defaults(cmd->pool, dc); + break; + } + } + + for (i = 0; i < argc; ++i) { + option = argv[i]; + if (!apr_strnatcasecmp("Defaults", option)) { + dc->std_env_vars = TLS_FLAG_FALSE; + dc->export_cert_vars = TLS_FLAG_FALSE; + } + else { + val = TLS_FLAG_TRUE; + if (*option == '+' || *option == '-') { + val = (*option == '+')? TLS_FLAG_TRUE : TLS_FLAG_FALSE; + ++option; + } + + if (!apr_strnatcasecmp("StdEnvVars", option)) { + dc->std_env_vars = val; + } + else if (!apr_strnatcasecmp("ExportCertData", option)) { + dc->export_cert_vars = val; + } + else { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unknown option '", option, "'", NULL); + goto cleanup; + } + } + } +cleanup: + return err; +} + +static const char *tls_conf_set_session_cache( + cmd_parms *cmd, void *dc, const char *value) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err = NULL; + + (void)dc; + if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) goto cleanup; + + err = tls_cache_set_specification(value, sc->global, cmd->pool, cmd->temp_pool); +cleanup: + return err; +} + +static const char *tls_conf_set_proxy_engine(cmd_parms *cmd, void *dir_conf, int flag) +{ + tls_conf_dir_t *dc = dir_conf; + (void)cmd; + dc->proxy_enabled = flag ? TLS_FLAG_TRUE : TLS_FLAG_FALSE; + return NULL; +} + +static const char *tls_conf_set_proxy_ca( + cmd_parms *cmd, void *dir_conf, const char *proxy_ca) +{ + tls_conf_dir_t *dc = dir_conf; + const char *err = NULL; + + if (strcasecmp(proxy_ca, "default") && NULL != (err = cmd_check_file(cmd, proxy_ca))) goto cleanup; + dc->proxy_ca = proxy_ca; +cleanup: + return err; +} + +static const char *tls_conf_set_proxy_protocol( + cmd_parms *cmd, void *dir_conf, const char *v) +{ + tls_conf_dir_t *dc = dir_conf; + return get_min_protocol(cmd, v, &dc->proxy_protocol_min); +} + +static const char *tls_conf_set_proxy_preferred_ciphers( + cmd_parms *cmd, void *dir_conf, int argc, char *const argv[]) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + tls_conf_dir_t *dc = dir_conf; + const char *err = NULL; + + if (!argc) { + err = "specify the proxy TLS ciphers to prefer or 'default' for the rustls default ordering."; + goto cleanup; + } + err = parse_ciphers(cmd, sc->global, "default", argc, argv, dc->proxy_pref_ciphers); +cleanup: + return err; +} + +static const char *tls_conf_set_proxy_suppressed_ciphers( + cmd_parms *cmd, void *dir_conf, int argc, char *const argv[]) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + tls_conf_dir_t *dc = dir_conf; + const char *err = NULL; + + if (!argc) { + err = "specify the proxy TLS ciphers to never use or 'none'."; + goto cleanup; + } + err = parse_ciphers(cmd, sc->global, "none", argc, argv, dc->proxy_supp_ciphers); +cleanup: + return err; +} + +#if TLS_CLIENT_CERTS + +static const char *tls_conf_set_client_ca( + cmd_parms *cmd, void *dc, const char *client_ca) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err; + + (void)dc; + if (NULL != (err = cmd_check_file(cmd, client_ca))) goto cleanup; + sc->client_ca = client_ca; +cleanup: + return err; +} + +static const char *tls_conf_set_client_auth( + cmd_parms *cmd, void *dc, const char *mode) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + const char *err = NULL; + (void)dc; + if (!strcasecmp(mode, "required")) { + sc->client_auth = TLS_CLIENT_AUTH_REQUIRED; + } + else if (!strcasecmp(mode, "optional")) { + sc->client_auth = TLS_CLIENT_AUTH_OPTIONAL; + } + else if (!strcasecmp(mode, "none")) { + sc->client_auth = TLS_CLIENT_AUTH_NONE; + } + else { + err = apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unknown value: '", mode, "', use required/optional/none.", NULL); + } + return err; +} + +static const char *tls_conf_set_user_name( + cmd_parms *cmd, void *dc, const char *var_user_name) +{ + tls_conf_server_t *sc = tls_conf_server_get(cmd->server); + (void)dc; + sc->var_user_name = var_user_name; + return NULL; +} + +#endif /* if TLS_CLIENT_CERTS */ + +#if TLS_MACHINE_CERTS + +static const char *tls_conf_add_proxy_machine_certificate( + cmd_parms *cmd, void *dir_conf, const char *cert_file, const char *pkey_file) +{ + tls_conf_dir_t *dc = dir_conf; + const char *err = NULL, *fpath; + tls_cert_spec_t *cert; + + (void)dc; + if (NULL != (err = cmd_check_file(cmd, cert_file))) goto cleanup; + /* key file may be NULL, in which case cert_file must contain the key PEM */ + if (pkey_file && NULL != (err = cmd_check_file(cmd, pkey_file))) goto cleanup; + + cert = apr_pcalloc(cmd->pool, sizeof(*cert)); + fpath = ap_server_root_relative(cmd->pool, cert_file); + if (!tls_util_is_file(cmd->pool, fpath)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unable to find certificate file: '", fpath, "'", NULL); + } + cert->cert_file = cert_file; + if (pkey_file) { + fpath = ap_server_root_relative(cmd->pool, pkey_file); + if (!tls_util_is_file(cmd->pool, fpath)) { + return apr_pstrcat(cmd->pool, cmd->cmd->name, + ": unable to find certificate key file: '", fpath, "'", NULL); + } + } + cert->pkey_file = pkey_file; + *(const tls_cert_spec_t **)apr_array_push(dc->proxy_machine_cert_specs) = cert; + +cleanup: + return err; +} + +#endif /* if TLS_MACHINE_CERTS */ + +const command_rec tls_conf_cmds[] = { + AP_INIT_TAKE12("TLSCertificate", tls_conf_add_certificate, NULL, RSRC_CONF, + "Add a certificate to the server by specifying a file containing the " + "certificate PEM, followed by its chain PEMs. The PEM of the key must " + "either also be there or can be given as a separate file."), + AP_INIT_TAKE_ARGV("TLSCiphersPrefer", tls_conf_set_preferred_ciphers, NULL, RSRC_CONF, + "Set the TLS ciphers to prefer when negotiating with a client."), + AP_INIT_TAKE_ARGV("TLSCiphersSuppress", tls_conf_set_suppressed_ciphers, NULL, RSRC_CONF, + "Set the TLS ciphers to never use when negotiating with a client."), + AP_INIT_TAKE1("TLSHonorClientOrder", tls_conf_set_honor_client_order, NULL, RSRC_CONF, + "Set 'on' to have the server honor client preferences in cipher suites, default off."), + AP_INIT_TAKE1("TLSEngine", tls_conf_add_engine, NULL, RSRC_CONF, + "Specify an address+port where the module shall handle incoming TLS connections."), + AP_INIT_TAKE_ARGV("TLSOptions", tls_conf_set_options, NULL, OR_OPTIONS, + "En-/disables optional features in the module."), + AP_INIT_TAKE1("TLSProtocol", tls_conf_set_protocol, NULL, RSRC_CONF, + "Set the minimum TLS protocol version to use."), + AP_INIT_TAKE1("TLSStrictSNI", tls_conf_set_strict_sni, NULL, RSRC_CONF, + "Set strictness of client server name (SNI) check against hosts, default on."), + AP_INIT_TAKE1("TLSSessionCache", tls_conf_set_session_cache, NULL, RSRC_CONF, + "Set which cache to use for TLS sessions."), + AP_INIT_FLAG("TLSProxyEngine", tls_conf_set_proxy_engine, NULL, RSRC_CONF|PROXY_CONF, + "Enable TLS encryption of outgoing connections in this location/server."), + AP_INIT_TAKE1("TLSProxyCA", tls_conf_set_proxy_ca, NULL, RSRC_CONF|PROXY_CONF, + "Set the trust anchors for certificates from proxied backend servers from a PEM file."), + AP_INIT_TAKE1("TLSProxyProtocol", tls_conf_set_proxy_protocol, NULL, RSRC_CONF|PROXY_CONF, + "Set the minimum TLS protocol version to use for proxy connections."), + AP_INIT_TAKE_ARGV("TLSProxyCiphersPrefer", tls_conf_set_proxy_preferred_ciphers, NULL, RSRC_CONF|PROXY_CONF, + "Set the TLS ciphers to prefer when negotiating a proxy connection."), + AP_INIT_TAKE_ARGV("TLSProxyCiphersSuppress", tls_conf_set_proxy_suppressed_ciphers, NULL, RSRC_CONF|PROXY_CONF, + "Set the TLS ciphers to never use when negotiating a proxy connection."), +#if TLS_CLIENT_CERTS + AP_INIT_TAKE1("TLSClientCA", tls_conf_set_client_ca, NULL, RSRC_CONF, + "Set the trust anchors for client certificates from a PEM file."), + AP_INIT_TAKE1("TLSClientCertificate", tls_conf_set_client_auth, NULL, RSRC_CONF, + "If TLS client authentication is 'required', 'optional' or 'none'."), + AP_INIT_TAKE1("TLSUserName", tls_conf_set_user_name, NULL, RSRC_CONF, + "Set the SSL variable to be used as user name."), +#endif /* if TLS_CLIENT_CERTS */ +#if TLS_MACHINE_CERTS + AP_INIT_TAKE12("TLSProxyMachineCertificate", tls_conf_add_proxy_machine_certificate, NULL, RSRC_CONF|PROXY_CONF, + "Add a certificate to be used as client certificate on a proxy connection. "), +#endif /* if TLS_MACHINE_CERTS */ + AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL) +}; diff --git a/modules/tls/tls_conf.h b/modules/tls/tls_conf.h new file mode 100644 index 0000000..e924412 --- /dev/null +++ b/modules/tls/tls_conf.h @@ -0,0 +1,185 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_conf_h +#define tls_conf_h + +/* Configuration flags */ +#define TLS_FLAG_UNSET (-1) +#define TLS_FLAG_FALSE (0) +#define TLS_FLAG_TRUE (1) + +struct tls_proto_conf_t; +struct tls_cert_reg_t; +struct tls_cert_root_stores_t; +struct tls_cert_verifiers_t; +struct ap_socache_instance_t; +struct ap_socache_provider_t; +struct apr_global_mutex_t; + + +/* disabled, since rustls support is lacking + * - x.509 retrieval of certificate fields and extensions + * - certificate revocation lists (CRL) + * - x.509 access to issuer of trust chain in x.509 CA store: + * server CA has ca1, ca2, ca3 + * client present certA + * rustls verifies that it is signed by *one of* ca* certs + * OCSP check needs (certA, issuing cert) for query + */ +#define TLS_CLIENT_CERTS 0 + +/* support for this exists as PR + */ +#define TLS_MACHINE_CERTS 1 + + +typedef enum { + TLS_CLIENT_AUTH_UNSET, + TLS_CLIENT_AUTH_NONE, + TLS_CLIENT_AUTH_REQUIRED, + TLS_CLIENT_AUTH_OPTIONAL, +} tls_client_auth_t; + +typedef enum { + TLS_CONF_ST_INIT, + TLS_CONF_ST_INCOMING_DONE, + TLS_CONF_ST_OUTGOING_DONE, + TLS_CONF_ST_DONE, +} tls_conf_status_t; + +/* The global module configuration, created after post-config + * and then readonly. + */ +typedef struct { + server_rec *ap_server; /* the global server we initialized on */ + const char *module_version; + const char *crustls_version; + + tls_conf_status_t status; + int mod_proxy_post_config_done; /* if mod_proxy did its post-config things */ + + server_addr_rec *tls_addresses; /* the addresses/ports our engine is enabled on */ + apr_array_header_t *proxy_configs; /* tls_conf_proxy_t* collected from everywhere */ + + struct tls_proto_conf_t *proto; /* TLS protocol/rustls specific globals */ + apr_hash_t *var_lookups; /* variable lookup functions by var name */ + struct tls_cert_reg_t *cert_reg; /* all certified keys loaded */ + struct tls_cert_root_stores_t *stores; /* loaded certificate stores */ + struct tls_cert_verifiers_t *verifiers; /* registry of certificate verifiers */ + + const char *session_cache_spec; /* how the session cache was specified */ + const struct ap_socache_provider_t *session_cache_provider; /* provider used for session cache */ + struct ap_socache_instance_t *session_cache; /* session cache instance */ + struct apr_global_mutex_t *session_cache_mutex; /* global mutex for access to session cache */ + + const rustls_server_config *rustls_hello_config; /* used for initial client hello parsing */ +} tls_conf_global_t; + +/* The module configuration for a server (vhost). + * Populated during config parsing, merged and completed + * in the post config phase. Readonly after that. + */ +typedef struct { + server_rec *server; /* server this config belongs to */ + tls_conf_global_t *global; /* global module config, singleton */ + + int enabled; /* TLS_FLAG_TRUE if mod_tls is active on this server */ + apr_array_header_t *cert_specs; /* array of (tls_cert_spec_t*) of configured certificates */ + int tls_protocol_min; /* the minimum TLS protocol version to use */ + apr_array_header_t *tls_pref_ciphers; /* List of apr_uint16_t cipher ids to prefer */ + apr_array_header_t *tls_supp_ciphers; /* List of apr_uint16_t cipher ids to suppress */ + const apr_array_header_t *ciphersuites; /* Computed post-config, ordered list of rustls cipher suites */ + int honor_client_order; /* honor client cipher ordering */ + int strict_sni; + + const char *client_ca; /* PEM file with trust anchors for client certs */ + tls_client_auth_t client_auth; /* how client authentication with certificates is used */ + const char *var_user_name; /* which SSL variable to use as user name */ + + apr_array_header_t *certified_keys; /* rustls_certified_key list configured */ + int base_server; /* != 0 iff this is the base server */ + int service_unavailable; /* TLS not trustworthy configured, return 503s */ +} tls_conf_server_t; + +typedef struct { + server_rec *defined_in; /* the server/host defining this dir_conf */ + tls_conf_global_t *global; /* global module config, singleton */ + const char *proxy_ca; /* PEM file with trust anchors for proxied remote server certs */ + int proxy_protocol_min; /* the minimum TLS protocol version to use for proxy connections */ + apr_array_header_t *proxy_pref_ciphers; /* List of apr_uint16_t cipher ids to prefer */ + apr_array_header_t *proxy_supp_ciphers; /* List of apr_uint16_t cipher ids to suppress */ + apr_array_header_t *machine_cert_specs; /* configured machine certificates specs */ + apr_array_header_t *machine_certified_keys; /* rustls_certified_key list */ + const rustls_client_config *rustls_config; +} tls_conf_proxy_t; + +typedef struct { + int std_env_vars; + int export_cert_vars; + int proxy_enabled; /* TLS_FLAG_TRUE if mod_tls is active on outgoing connections */ + const char *proxy_ca; /* PEM file with trust anchors for proxied remote server certs */ + int proxy_protocol_min; /* the minimum TLS protocol version to use for proxy connections */ + apr_array_header_t *proxy_pref_ciphers; /* List of apr_uint16_t cipher ids to prefer */ + apr_array_header_t *proxy_supp_ciphers; /* List of apr_uint16_t cipher ids to suppress */ + apr_array_header_t *proxy_machine_cert_specs; /* configured machine certificates specs */ + + tls_conf_proxy_t *proxy_config; +} tls_conf_dir_t; + +/* our static registry of configuration directives. */ +extern const command_rec tls_conf_cmds[]; + +/* create the modules configuration for a server_rec. */ +void *tls_conf_create_svr(apr_pool_t *pool, server_rec *s); + +/* merge (inherit) server configurations for the module. + * Settings in 'add' overwrite the ones in 'base' and unspecified + * settings shine through. */ +void *tls_conf_merge_svr(apr_pool_t *pool, void *basev, void *addv); + +/* create the modules configuration for a directory. */ +void *tls_conf_create_dir(apr_pool_t *pool, char *dir); + +/* merge (inherit) directory configurations for the module. + * Settings in 'add' overwrite the ones in 'base' and unspecified + * settings shine through. */ +void *tls_conf_merge_dir(apr_pool_t *pool, void *basev, void *addv); + + +/* Get the server specific module configuration. */ +tls_conf_server_t *tls_conf_server_get(server_rec *s); + +/* Get the directory specific module configuration for the request. */ +tls_conf_dir_t *tls_conf_dir_get(request_rec *r); + +/* Get the directory specific module configuration for the server. */ +tls_conf_dir_t *tls_conf_dir_server_get(server_rec *s); + +/* If any configuration values are unset, supply the global server defaults. */ +apr_status_t tls_conf_server_apply_defaults(tls_conf_server_t *sc, apr_pool_t *p); + +/* If any configuration values are unset, supply the global dir defaults. */ +apr_status_t tls_conf_dir_apply_defaults(tls_conf_dir_t *dc, apr_pool_t *p); + +/* create a new proxy configuration from directory config in server */ +tls_conf_proxy_t *tls_conf_proxy_make( + apr_pool_t *p, tls_conf_dir_t *dc, tls_conf_global_t *gc, server_rec *s); + +int tls_proxy_section_post_config( + apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s, + ap_conf_vector_t *section_config); + +#endif /* tls_conf_h */ diff --git a/modules/tls/tls_core.c b/modules/tls/tls_core.c new file mode 100644 index 0000000..2547939 --- /dev/null +++ b/modules/tls/tls_core.c @@ -0,0 +1,1433 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "tls_proto.h" +#include "tls_cert.h" +#include "tls_conf.h" +#include "tls_core.h" +#include "tls_ocsp.h" +#include "tls_util.h" +#include "tls_cache.h" +#include "tls_var.h" + + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + +tls_conf_conn_t *tls_conf_conn_get(conn_rec *c) +{ + return ap_get_module_config(c->conn_config, &tls_module); +} + +void tls_conf_conn_set(conn_rec *c, tls_conf_conn_t *cc) +{ + ap_set_module_config(c->conn_config, &tls_module, cc); +} + +int tls_conn_check_ssl(conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c->master? c->master : c); + if (TLS_CONN_ST_IS_ENABLED(cc)) { + return OK; + } + return DECLINED; +} + +static int we_listen_on(tls_conf_global_t *gc, server_rec *s, tls_conf_server_t *sc) +{ + server_addr_rec *sa, *la; + + if (gc->tls_addresses && sc->base_server) { + /* The base server listens to every port and may be selected via SNI */ + return 1; + } + for (la = gc->tls_addresses; la; la = la->next) { + for (sa = s->addrs; sa; sa = sa->next) { + if (la->host_port == sa->host_port + && la->host_addr->ipaddr_len == sa->host_addr->ipaddr_len + && !memcmp(la->host_addr->ipaddr_ptr, + la->host_addr->ipaddr_ptr, (size_t)la->host_addr->ipaddr_len)) { + /* exact match */ + return 1; + } + } + } + return 0; +} + +static apr_status_t tls_core_free(void *data) +{ + server_rec *base_server = (server_rec *)data; + tls_conf_server_t *sc = tls_conf_server_get(base_server); + + if (sc && sc->global && sc->global->rustls_hello_config) { + rustls_server_config_free(sc->global->rustls_hello_config); + sc->global->rustls_hello_config = NULL; + } + tls_cache_free(base_server); + return APR_SUCCESS; +} + +static apr_status_t load_certified_keys( + apr_array_header_t *keys, server_rec *s, + apr_array_header_t *cert_specs, + tls_cert_reg_t *cert_reg) +{ + apr_status_t rv = APR_SUCCESS; + const rustls_certified_key *ckey; + tls_cert_spec_t *spec; + int i; + + if (cert_specs && cert_specs->nelts > 0) { + for (i = 0; i < cert_specs->nelts; ++i) { + spec = APR_ARRAY_IDX(cert_specs, i, tls_cert_spec_t*); + rv = tls_cert_reg_get_certified_key(cert_reg, s, spec, &ckey); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10318) + "Failed to load certificate %d[cert=%s(%d), key=%s(%d)] for %s", + i, spec->cert_file, (int)(spec->cert_pem? strlen(spec->cert_pem) : 0), + spec->pkey_file, (int)(spec->pkey_pem? strlen(spec->pkey_pem) : 0), + s->server_hostname); + goto cleanup; + } + assert(ckey); + APR_ARRAY_PUSH(keys, const rustls_certified_key*) = ckey; + } + } +cleanup: + return rv; +} + +static apr_status_t use_local_key( + conn_rec *c, const char *cert_pem, const char *pkey_pem) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + const rustls_certified_key *ckey = NULL; + tls_cert_spec_t spec; + apr_status_t rv = APR_SUCCESS; + + memset(&spec, 0, sizeof(spec)); + spec.cert_pem = cert_pem; + spec.pkey_pem = pkey_pem; + rv = tls_cert_load_cert_key(c->pool, &spec, NULL, &ckey); + if (APR_SUCCESS != rv) goto cleanup; + + cc->local_keys = apr_array_make(c->pool, 2, sizeof(const rustls_certified_key*)); + APR_ARRAY_PUSH(cc->local_keys, const rustls_certified_key*) = ckey; + ckey = NULL; + +cleanup: + if (ckey != NULL) rustls_certified_key_free(ckey); + return rv; +} + +static void add_file_specs( + apr_array_header_t *certificates, + apr_pool_t *p, + apr_array_header_t *cert_files, + apr_array_header_t *key_files) +{ + tls_cert_spec_t *spec; + int i; + + for (i = 0; i < cert_files->nelts; ++i) { + spec = apr_pcalloc(p, sizeof(*spec)); + spec->cert_file = APR_ARRAY_IDX(cert_files, i, const char*); + spec->pkey_file = (i < key_files->nelts)? APR_ARRAY_IDX(key_files, i, const char*) : NULL; + *(const tls_cert_spec_t**)apr_array_push(certificates) = spec; + } +} + +static apr_status_t calc_ciphers( + apr_pool_t *pool, + server_rec *s, + tls_conf_global_t *gc, + const char *proxy, + apr_array_header_t *pref_ciphers, + apr_array_header_t *supp_ciphers, + const apr_array_header_t **pciphers) +{ + apr_array_header_t *ordered_ciphers; + const apr_array_header_t *ciphers; + apr_array_header_t *unsupported = NULL; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + apr_uint16_t id; + int i; + + + /* remove all suppressed ciphers from the ones supported by rustls */ + ciphers = tls_util_array_uint16_remove(pool, gc->proto->supported_cipher_ids, supp_ciphers); + ordered_ciphers = NULL; + /* if preferred ciphers are actually still present in allowed_ciphers, put + * them into `ciphers` in this order */ + for (i = 0; i < pref_ciphers->nelts; ++i) { + id = APR_ARRAY_IDX(pref_ciphers, i, apr_uint16_t); + ap_log_error(APLOG_MARK, APLOG_TRACE4, rv, s, + "checking preferred cipher %s: %d", + s->server_hostname, id); + if (tls_util_array_uint16_contains(ciphers, id)) { + ap_log_error(APLOG_MARK, APLOG_TRACE4, rv, s, + "checking preferred cipher %s: %d is known", + s->server_hostname, id); + if (ordered_ciphers == NULL) { + ordered_ciphers = apr_array_make(pool, ciphers->nelts, sizeof(apr_uint16_t)); + } + APR_ARRAY_PUSH(ordered_ciphers, apr_uint16_t) = id; + } + else if (!tls_proto_is_cipher_supported(gc->proto, id)) { + ap_log_error(APLOG_MARK, APLOG_TRACE4, rv, s, + "checking preferred cipher %s: %d is unsupported", + s->server_hostname, id); + if (!unsupported) unsupported = apr_array_make(pool, 5, sizeof(apr_uint16_t)); + APR_ARRAY_PUSH(unsupported, apr_uint16_t) = id; + } + } + /* if we found ciphers with preference among allowed_ciphers, + * append all other allowed ciphers. */ + if (ordered_ciphers) { + for (i = 0; i < ciphers->nelts; ++i) { + id = APR_ARRAY_IDX(ciphers, i, apr_uint16_t); + if (!tls_util_array_uint16_contains(ordered_ciphers, id)) { + APR_ARRAY_PUSH(ordered_ciphers, apr_uint16_t) = id; + } + } + ciphers = ordered_ciphers; + } + + if (ciphers == gc->proto->supported_cipher_ids) { + ciphers = NULL; + } + + if (unsupported && unsupported->nelts) { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, APLOGNO(10319) + "Server '%s' has TLS%sCiphersPrefer configured that are not " + "supported by rustls. These will not have an effect: %s", + s->server_hostname, proxy, + tls_proto_get_cipher_names(gc->proto, unsupported, pool)); + } + + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr; + rv = tls_util_rustls_error(pool, rr, &err_descr); + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10320) + "Failed to configure ciphers %s: [%d] %s", + s->server_hostname, (int)rr, err_descr); + } + *pciphers = (APR_SUCCESS == rv)? ciphers : NULL; + return rv; +} + +static apr_status_t get_server_ciphersuites( + const apr_array_header_t **pciphersuites, + apr_pool_t *pool, tls_conf_server_t *sc) +{ + const apr_array_header_t *ciphers, *suites = NULL; + apr_status_t rv = APR_SUCCESS; + + rv = calc_ciphers(pool, sc->server, sc->global, + "", sc->tls_pref_ciphers, sc->tls_supp_ciphers, + &ciphers); + if (APR_SUCCESS != rv) goto cleanup; + + if (ciphers) { + suites = tls_proto_get_rustls_suites( + sc->global->proto, ciphers, pool); + if (APLOGtrace2(sc->server)) { + tls_proto_conf_t *conf = sc->global->proto; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, sc->server, + "tls ciphers configured[%s]: %s", + sc->server->server_hostname, + tls_proto_get_cipher_names(conf, ciphers, pool)); + } + } + +cleanup: + *pciphersuites = (APR_SUCCESS == rv)? suites : NULL; + return rv; +} + +static apr_array_header_t *complete_cert_specs( + apr_pool_t *p, tls_conf_server_t *sc) +{ + apr_array_header_t *cert_adds, *key_adds, *specs; + + /* Take the configured certificate specifications and ask + * around for other modules to add specifications to this server. + * This is the way mod_md provides certificates. + * + * If the server then still has no cert specifications, ask + * around for `fallback` certificates which are commonly self-signed, + * temporary ones which let the server startup in order to + * obtain the `real` certificates from sources like ACME. + * Servers will fallbacks will answer all requests with 503. + */ + specs = apr_array_copy(p, sc->cert_specs); + cert_adds = apr_array_make(p, 2, sizeof(const char*)); + key_adds = apr_array_make(p, 2, sizeof(const char*)); + + ap_ssl_add_cert_files(sc->server, p, cert_adds, key_adds); + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, sc->server, + "init server: complete_cert_specs added %d certs", cert_adds->nelts); + add_file_specs(specs, p, cert_adds, key_adds); + + if (apr_is_empty_array(specs)) { + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, sc->server, + "init server: no certs configured, looking for fallback"); + ap_ssl_add_fallback_cert_files(sc->server, p, cert_adds, key_adds); + if (cert_adds->nelts > 0) { + add_file_specs(specs, p, cert_adds, key_adds); + sc->service_unavailable = 1; + ap_log_error(APLOG_MARK, APLOG_INFO, 0, sc->server, APLOGNO(10321) + "Init: %s will respond with '503 Service Unavailable' for now. There " + "are no SSL certificates configured and no other module contributed any.", + sc->server->server_hostname); + } + else if (!sc->base_server) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, sc->server, APLOGNO(10322) + "Init: %s has no certificates configured. Use 'TLSCertificate' to " + "configure a certificate and key file.", + sc->server->server_hostname); + } + } + return specs; +} + +static const rustls_certified_key *select_certified_key( + void* userdata, const rustls_client_hello *hello) +{ + conn_rec *c = userdata; + tls_conf_conn_t *cc; + tls_conf_server_t *sc; + apr_array_header_t *keys; + const rustls_certified_key *clone; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv; + + ap_assert(c); + cc = tls_conf_conn_get(c); + ap_assert(cc); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "client hello select certified key"); + if (!cc || !cc->server) goto cleanup; + sc = tls_conf_server_get(cc->server); + if (!sc) goto cleanup; + + cc->key = NULL; + cc->key_cloned = 0; + if (cc->local_keys && cc->local_keys->nelts > 0) { + keys = cc->local_keys; + } + else { + keys = sc->certified_keys; + } + if (!keys || keys->nelts <= 0) goto cleanup; + + rr = rustls_client_hello_select_certified_key(hello, + (const rustls_certified_key**)keys->elts, (size_t)keys->nelts, &cc->key); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + + if (APR_SUCCESS == tls_ocsp_update_key(c, cc->key, &clone)) { + /* got OCSP response data for it, meaning the key was cloned and we need to remember */ + cc->key_cloned = 1; + cc->key = clone; + } + if (APLOGctrace2(c)) { + const char *key_id = tls_cert_reg_get_id(sc->global->cert_reg, cc->key); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, APLOGNO(10323) + "client hello selected key: %s", key_id? key_id : "unknown"); + } + return cc->key; + +cleanup: + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr; + rv = tls_util_rustls_error(c->pool, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(10324) + "Failed to select certified key: [%d] %s", (int)rr, err_descr); + } + return NULL; +} + +static apr_status_t server_conf_setup( + apr_pool_t *p, apr_pool_t *ptemp, tls_conf_server_t *sc, tls_conf_global_t *gc) +{ + apr_array_header_t *cert_specs; + apr_status_t rv = APR_SUCCESS; + + /* TODO: most code has been stripped here with the changes in rustls-ffi v0.8.0 + * and this means that any errors are only happening at connection setup, which + * is too late. + */ + (void)p; + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, sc->server, + "init server: %s", sc->server->server_hostname); + + if (sc->client_auth != TLS_CLIENT_AUTH_NONE && !sc->client_ca) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, sc->server, APLOGNO(10325) + "TLSClientAuthentication is enabled for %s, but no client CA file is set. " + "Use 'TLSClientCA ' to specify the trust anchors.", + sc->server->server_hostname); + rv = APR_EINVAL; goto cleanup; + } + + cert_specs = complete_cert_specs(ptemp, sc); + sc->certified_keys = apr_array_make(p, 3, sizeof(rustls_certified_key *)); + rv = load_certified_keys(sc->certified_keys, sc->server, cert_specs, gc->cert_reg); + if (APR_SUCCESS != rv) goto cleanup; + + rv = get_server_ciphersuites(&sc->ciphersuites, p, sc); + if (APR_SUCCESS != rv) goto cleanup; + + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, sc->server, + "init server: %s with %d certificates loaded", + sc->server->server_hostname, sc->certified_keys->nelts); +cleanup: + return rv; +} + +static apr_status_t get_proxy_ciphers(const apr_array_header_t **pciphersuites, + apr_pool_t *pool, tls_conf_proxy_t *pc) +{ + const apr_array_header_t *ciphers, *suites = NULL; + apr_status_t rv = APR_SUCCESS; + + rv = calc_ciphers(pool, pc->defined_in, pc->global, + "", pc->proxy_pref_ciphers, pc->proxy_supp_ciphers, &ciphers); + if (APR_SUCCESS != rv) goto cleanup; + + if (ciphers) { + suites = tls_proto_get_rustls_suites(pc->global->proto, ciphers, pool); + /* this changed the default rustls ciphers, configure it. */ + if (APLOGtrace2(pc->defined_in)) { + tls_proto_conf_t *conf = pc->global->proto; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, pc->defined_in, + "tls proxy ciphers configured[%s]: %s", + pc->defined_in->server_hostname, + tls_proto_get_cipher_names(conf, ciphers, pool)); + } + } + +cleanup: + *pciphersuites = (APR_SUCCESS == rv)? suites : NULL; + return rv; +} + +static apr_status_t proxy_conf_setup( + apr_pool_t *p, apr_pool_t *ptemp, tls_conf_proxy_t *pc, tls_conf_global_t *gc) +{ + apr_status_t rv = APR_SUCCESS; + + (void)p; (void)ptemp; + ap_assert(pc->defined_in); + pc->global = gc; + + if (pc->proxy_ca && strcasecmp(pc->proxy_ca, "default")) { + ap_log_error(APLOG_MARK, APLOG_TRACE2, rv, pc->defined_in, + "proxy: will use roots in %s from %s", + pc->defined_in->server_hostname, pc->proxy_ca); + } + else { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, pc->defined_in, + "proxy: there is no TLSProxyCA configured in %s which means " + "the certificates of remote servers contacted from here will not be trusted.", + pc->defined_in->server_hostname); + } + + if (pc->proxy_protocol_min > 0) { + apr_array_header_t *tls_versions; + + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, pc->defined_in, + "init server: set proxy protocol min version %04x", pc->proxy_protocol_min); + tls_versions = tls_proto_create_versions_plus( + gc->proto, (apr_uint16_t)pc->proxy_protocol_min, ptemp); + if (tls_versions->nelts > 0) { + if (pc->proxy_protocol_min != APR_ARRAY_IDX(tls_versions, 0, apr_uint16_t)) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, pc->defined_in, APLOGNO(10326) + "Init: the minimum proxy protocol version configured for %s (%04x) " + "is not supported and version %04x was selected instead.", + pc->defined_in->server_hostname, pc->proxy_protocol_min, + APR_ARRAY_IDX(tls_versions, 0, apr_uint16_t)); + } + } + else { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, pc->defined_in, APLOGNO(10327) + "Unable to configure the proxy protocol version for %s: " + "neither the configured minimum version (%04x), nor any higher one is " + "available.", pc->defined_in->server_hostname, pc->proxy_protocol_min); + rv = APR_ENOTIMPL; goto cleanup; + } + } + +#if TLS_MACHINE_CERTS + rv = load_certified_keys(pc->machine_certified_keys, pc->defined_in, + pc->machine_cert_specs, gc->cert_reg); + if (APR_SUCCESS != rv) goto cleanup; +#endif + +cleanup: + return rv; +} + +static const rustls_certified_key *extract_client_hello_values( + void* userdata, const rustls_client_hello *hello) +{ + conn_rec *c = userdata; + tls_conf_conn_t *cc = tls_conf_conn_get(c); + size_t i, len; + unsigned short n; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "extract client hello values"); + if (!cc) goto cleanup; + cc->client_hello_seen = 1; + if (hello->server_name.len > 0) { + cc->sni_hostname = apr_pstrndup(c->pool, hello->server_name.data, hello->server_name.len); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "sni detected: %s", cc->sni_hostname); + } + else { + cc->sni_hostname = NULL; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "no sni from client"); + } + if (APLOGctrace4(c) && hello->signature_schemes.len > 0) { + for (i = 0; i < hello->signature_schemes.len; ++i) { + n = hello->signature_schemes.data[i]; + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, + "client supports signature scheme: %x", (int)n); + } + } + if ((len = rustls_slice_slice_bytes_len(hello->alpn)) > 0) { + apr_array_header_t *alpn = apr_array_make(c->pool, 5, sizeof(const char*)); + const char *protocol; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "ALPN: client proposes %d protocols", (int)len); + for (i = 0; i < len; ++i) { + rustls_slice_bytes rs = rustls_slice_slice_bytes_get(hello->alpn, i); + protocol = apr_pstrndup(c->pool, (const char*)rs.data, rs.len); + APR_ARRAY_PUSH(alpn, const char*) = protocol; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "ALPN: client proposes %d: `%s`", (int)i, protocol); + } + cc->alpn = alpn; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, "ALPN: no alpn proposed by client"); + } +cleanup: + return NULL; +} + +static apr_status_t setup_hello_config(apr_pool_t *p, server_rec *base_server, tls_conf_global_t *gc) +{ + rustls_server_config_builder *builder; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + + builder = rustls_server_config_builder_new(); + if (!builder) { + rr = RUSTLS_RESULT_PANIC; goto cleanup; + } + rustls_server_config_builder_set_hello_callback(builder, extract_client_hello_values); + gc->rustls_hello_config = rustls_server_config_builder_build(builder); + if (!gc->rustls_hello_config) { + rr = RUSTLS_RESULT_PANIC; goto cleanup; + } + +cleanup: + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr = NULL; + rv = tls_util_rustls_error(p, rr, &err_descr); + ap_log_error(APLOG_MARK, APLOG_ERR, rv, base_server, APLOGNO(10328) + "Failed to init generic hello config: [%d] %s", (int)rr, err_descr); + goto cleanup; + } + return rv; +} + +static apr_status_t init_incoming(apr_pool_t *p, apr_pool_t *ptemp, server_rec *base_server) +{ + tls_conf_server_t *sc = tls_conf_server_get(base_server); + tls_conf_global_t *gc = sc->global; + server_rec *s; + apr_status_t rv = APR_ENOMEM; + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, base_server, "tls_core_init incoming"); + apr_pool_cleanup_register(p, base_server, tls_core_free, + apr_pool_cleanup_null); + + rv = tls_proto_post_config(p, ptemp, base_server); + if (APR_SUCCESS != rv) goto cleanup; + + for (s = base_server; s; s = s->next) { + sc = tls_conf_server_get(s); + assert(sc); + ap_assert(sc->global == gc); + + /* If 'TLSEngine' has been configured, use those addresses to + * decide if we are enabled on this server. */ + sc->base_server = (s == base_server); + sc->enabled = we_listen_on(gc, s, sc)? TLS_FLAG_TRUE : TLS_FLAG_FALSE; + } + + rv = tls_cache_post_config(p, ptemp, base_server); + if (APR_SUCCESS != rv) goto cleanup; + + rv = setup_hello_config(p, base_server, gc); + if (APR_SUCCESS != rv) goto cleanup; + + /* Setup server configs and collect all certificates we use. */ + gc->cert_reg = tls_cert_reg_make(p); + gc->stores = tls_cert_root_stores_make(p); + gc->verifiers = tls_cert_verifiers_make(p, gc->stores); + for (s = base_server; s; s = s->next) { + sc = tls_conf_server_get(s); + rv = tls_conf_server_apply_defaults(sc, p); + if (APR_SUCCESS != rv) goto cleanup; + if (sc->enabled != TLS_FLAG_TRUE) continue; + rv = server_conf_setup(p, ptemp, sc, gc); + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "server setup failed: %s", + s->server_hostname); + goto cleanup; + } + } + +cleanup: + if (APR_SUCCESS != rv) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, base_server, "error during post_config"); + } + return rv; +} + +static apr_status_t init_outgoing(apr_pool_t *p, apr_pool_t *ptemp, server_rec *base_server) +{ + tls_conf_server_t *sc = tls_conf_server_get(base_server); + tls_conf_global_t *gc = sc->global; + tls_conf_dir_t *dc; + tls_conf_proxy_t *pc; + server_rec *s; + apr_status_t rv = APR_SUCCESS; + int i; + + (void)p; (void)ptemp; + (void)gc; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, base_server, "tls_core_init outgoing"); + ap_assert(gc->mod_proxy_post_config_done); + /* Collect all proxy'ing default server dir configs. + * All section dir_configs should already be there - if there were any. */ + for (s = base_server; s; s = s->next) { + dc = tls_conf_dir_server_get(s); + rv = tls_conf_dir_apply_defaults(dc, p); + if (APR_SUCCESS != rv) goto cleanup; + if (dc->proxy_enabled != TLS_FLAG_TRUE) continue; + dc->proxy_config = tls_conf_proxy_make(p, dc, gc, s); + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, "%s: adding proxy_conf to globals", + s->server_hostname); + APR_ARRAY_PUSH(gc->proxy_configs, tls_conf_proxy_t*) = dc->proxy_config; + } + /* Now gc->proxy_configs contains all configurations we need to possibly + * act on for outgoing connections. */ + for (i = 0; i < gc->proxy_configs->nelts; ++i) { + pc = APR_ARRAY_IDX(gc->proxy_configs, i, tls_conf_proxy_t*); + rv = proxy_conf_setup(p, ptemp, pc, gc); + if (APR_SUCCESS != rv) goto cleanup; + } + +cleanup: + return rv; +} + +apr_status_t tls_core_init(apr_pool_t *p, apr_pool_t *ptemp, server_rec *base_server) +{ + tls_conf_server_t *sc = tls_conf_server_get(base_server); + tls_conf_global_t *gc = sc->global; + apr_status_t rv = APR_SUCCESS; + + ap_assert(gc); + if (TLS_CONF_ST_INIT == gc->status) { + rv = init_incoming(p, ptemp, base_server); + if (APR_SUCCESS != rv) goto cleanup; + gc->status = TLS_CONF_ST_INCOMING_DONE; + } + if (TLS_CONF_ST_INCOMING_DONE == gc->status) { + if (!gc->mod_proxy_post_config_done) goto cleanup; + + rv = init_outgoing(p, ptemp, base_server); + if (APR_SUCCESS != rv) goto cleanup; + gc->status = TLS_CONF_ST_OUTGOING_DONE; + } + if (TLS_CONF_ST_OUTGOING_DONE == gc->status) { + /* register all loaded certificates for OCSP stapling */ + rv = tls_ocsp_prime_certs(gc, p, base_server); + if (APR_SUCCESS != rv) goto cleanup; + + if (gc->verifiers) tls_cert_verifiers_clear(gc->verifiers); + if (gc->stores) tls_cert_root_stores_clear(gc->stores); + gc->status = TLS_CONF_ST_DONE; + } +cleanup: + return rv; +} + +static apr_status_t tls_core_conn_free(void *data) +{ + tls_conf_conn_t *cc = data; + + /* free all rustls things we are owning. */ + if (cc->rustls_connection) { + rustls_connection_free(cc->rustls_connection); + cc->rustls_connection = NULL; + } + if (cc->rustls_server_config) { + rustls_server_config_free(cc->rustls_server_config); + cc->rustls_server_config = NULL; + } + if (cc->rustls_client_config) { + rustls_client_config_free(cc->rustls_client_config); + cc->rustls_client_config = NULL; + } + if (cc->key_cloned && cc->key) { + rustls_certified_key_free(cc->key); + cc->key = NULL; + } + if (cc->local_keys) { + const rustls_certified_key *key; + int i; + + for (i = 0; i < cc->local_keys->nelts; ++i) { + key = APR_ARRAY_IDX(cc->local_keys, i, const rustls_certified_key*); + rustls_certified_key_free(key); + } + apr_array_clear(cc->local_keys); + } + return APR_SUCCESS; +} + +static tls_conf_conn_t *cc_get_or_make(conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + if (!cc) { + cc = apr_pcalloc(c->pool, sizeof(*cc)); + cc->server = c->base_server; + cc->state = TLS_CONN_ST_INIT; + tls_conf_conn_set(c, cc); + apr_pool_cleanup_register(c->pool, cc, tls_core_conn_free, + apr_pool_cleanup_null); + } + return cc; +} + +void tls_core_conn_disable(conn_rec *c) +{ + tls_conf_conn_t *cc; + cc = cc_get_or_make(c); + if (cc->state == TLS_CONN_ST_INIT) { + cc->state = TLS_CONN_ST_DISABLED; + } +} + +void tls_core_conn_bind(conn_rec *c, ap_conf_vector_t *dir_conf) +{ + tls_conf_conn_t *cc = cc_get_or_make(c); + cc->dc = dir_conf? ap_get_module_config(dir_conf, &tls_module) : NULL; +} + + +static apr_status_t init_outgoing_connection(conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_proxy_t *pc; + const apr_array_header_t *ciphersuites = NULL; + apr_array_header_t *tls_versions = NULL; + rustls_client_config_builder *builder = NULL; + rustls_root_cert_store *ca_store = NULL; + const char *hostname = NULL, *alpn_note = NULL; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + + ap_assert(cc->outgoing); + ap_assert(cc->dc); + pc = cc->dc->proxy_config; + ap_assert(pc); + + hostname = apr_table_get(c->notes, "proxy-request-hostname"); + alpn_note = apr_table_get(c->notes, "proxy-request-alpn-protos"); + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, c->base_server, + "setup_outgoing: to %s [ALPN: %s] from configuration in %s" + " using CA %s", hostname, alpn_note, pc->defined_in->server_hostname, pc->proxy_ca); + + rv = get_proxy_ciphers(&ciphersuites, c->pool, pc); + if (APR_SUCCESS != rv) goto cleanup; + + if (pc->proxy_protocol_min > 0) { + tls_versions = tls_proto_create_versions_plus( + pc->global->proto, (apr_uint16_t)pc->proxy_protocol_min, c->pool); + } + + if (ciphersuites && ciphersuites->nelts > 0 + && tls_versions && tls_versions->nelts >= 0) { + rr = rustls_client_config_builder_new_custom( + (const struct rustls_supported_ciphersuite *const *)ciphersuites->elts, + (size_t)ciphersuites->nelts, + (const uint16_t *)tls_versions->elts, (size_t)tls_versions->nelts, + &builder); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + } + else { + builder = rustls_client_config_builder_new(); + if (NULL == builder) { + rv = APR_ENOMEM; + goto cleanup; + } + } + + if (pc->proxy_ca && strcasecmp(pc->proxy_ca, "default")) { + rv = tls_cert_root_stores_get(pc->global->stores, pc->proxy_ca, &ca_store); + if (APR_SUCCESS != rv) goto cleanup; + rustls_client_config_builder_use_roots(builder, ca_store); + } + +#if TLS_MACHINE_CERTS + if (pc->machine_certified_keys->nelts > 0) { + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, c->base_server, + "setup_outgoing: adding %d client certificate", (int)pc->machine_certified_keys->nelts); + rr = rustls_client_config_builder_set_certified_key( + builder, (const rustls_certified_key**)pc->machine_certified_keys->elts, + (size_t)pc->machine_certified_keys->nelts); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + } +#endif + + if (hostname) { + rustls_client_config_builder_set_enable_sni(builder, true); + } + else { + hostname = "unknown.proxy.local"; + rustls_client_config_builder_set_enable_sni(builder, false); + } + + if (alpn_note) { + apr_array_header_t *alpn_proposed = NULL; + char *p, *last; + apr_size_t len; + + alpn_proposed = apr_array_make(c->pool, 3, sizeof(const char*)); + p = apr_pstrdup(c->pool, alpn_note); + while ((p = apr_strtok(p, ", ", &last))) { + len = (apr_size_t)(last - p - (*last? 1 : 0)); + if (len > 255) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10329) + "ALPN proxy protocol identifier too long: %s", p); + rv = APR_EGENERAL; + goto cleanup; + } + APR_ARRAY_PUSH(alpn_proposed, const char*) = apr_pstrndup(c->pool, p, len); + p = NULL; + } + if (alpn_proposed->nelts > 0) { + apr_array_header_t *rustls_protocols; + const char* proto; + rustls_slice_bytes bytes; + int i; + + rustls_protocols = apr_array_make(c->pool, alpn_proposed->nelts, sizeof(rustls_slice_bytes)); + for (i = 0; i < alpn_proposed->nelts; ++i) { + proto = APR_ARRAY_IDX(alpn_proposed, i, const char*); + bytes.data = (const unsigned char*)proto; + bytes.len = strlen(proto); + APR_ARRAY_PUSH(rustls_protocols, rustls_slice_bytes) = bytes; + } + + rr = rustls_client_config_builder_set_alpn_protocols(builder, + (rustls_slice_bytes*)rustls_protocols->elts, (size_t)rustls_protocols->nelts); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, c->base_server, + "setup_outgoing: to %s, added %d ALPN protocols from %s", + hostname, rustls_protocols->nelts, alpn_note); + } + } + + cc->rustls_client_config = rustls_client_config_builder_build(builder); + builder = NULL; + + rr = rustls_client_connection_new(cc->rustls_client_config, hostname, &cc->rustls_connection); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + rustls_connection_set_userdata(cc->rustls_connection, c); + +cleanup: + if (builder != NULL) rustls_client_config_builder_free(builder); + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr = NULL; + rv = tls_util_rustls_error(c->pool, rr, &err_descr); + ap_log_error(APLOG_MARK, APLOG_ERR, rv, cc->server, APLOGNO(10330) + "Failed to init pre_session for outgoing %s to %s: [%d] %s", + cc->server->server_hostname, hostname, (int)rr, err_descr); + c->aborted = 1; + cc->state = TLS_CONN_ST_DISABLED; + goto cleanup; + } + return rv; +} + +int tls_core_pre_conn_init(conn_rec *c) +{ + tls_conf_server_t *sc = tls_conf_server_get(c->base_server); + tls_conf_conn_t *cc; + + cc = cc_get_or_make(c); + if (cc->state == TLS_CONN_ST_INIT) { + /* Need to decide if we TLS this connection or not */ + int enabled = +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 109) + !c->outgoing && +#endif + sc->enabled == TLS_FLAG_TRUE; + cc->state = enabled? TLS_CONN_ST_CLIENT_HELLO : TLS_CONN_ST_DISABLED; + cc->client_auth = sc->client_auth; + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, c->base_server, + "tls_core_conn_init: %s for tls: %s", + enabled? "enabled" : "disabled", c->base_server->server_hostname); + } + else if (cc->state == TLS_CONN_ST_DISABLED) { + ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, c->base_server, + "tls_core_conn_init, not our connection: %s", + c->base_server->server_hostname); + goto cleanup; + } + +cleanup: + return TLS_CONN_ST_IS_ENABLED(cc)? OK : DECLINED; +} + +apr_status_t tls_core_conn_init(conn_rec *c) +{ + tls_conf_server_t *sc = tls_conf_server_get(c->base_server); + tls_conf_conn_t *cc; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + + cc = tls_conf_conn_get(c); + if (cc && TLS_CONN_ST_IS_ENABLED(cc) && !cc->rustls_connection) { + if (cc->outgoing) { + rv = init_outgoing_connection(c); + if (APR_SUCCESS != rv) goto cleanup; + } + else { + /* Use a generic rustls_connection with its defaults, which we feed + * the first TLS bytes from the client. Its Hello message will trigger + * our callback where we can inspect the (possibly) supplied SNI and + * select another server. + */ + rr = rustls_server_connection_new(sc->global->rustls_hello_config, &cc->rustls_connection); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + /* we might refuse requests on this connection, e.g. ACME challenge */ + cc->service_unavailable = sc->service_unavailable; + } + rustls_connection_set_userdata(cc->rustls_connection, c); + } + +cleanup: + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr = NULL; + rv = tls_util_rustls_error(c->pool, rr, &err_descr); + ap_log_error(APLOG_MARK, APLOG_ERR, rv, sc->server, APLOGNO(10331) + "Failed to init TLS connection for server %s: [%d] %s", + sc->server->server_hostname, (int)rr, err_descr); + c->aborted = 1; + cc->state = TLS_CONN_ST_DISABLED; + goto cleanup; + } + return rv; +} + +static int find_vhost(void *sni_hostname, conn_rec *c, server_rec *s) +{ + if (tls_util_name_matches_server(sni_hostname, s)) { + tls_conf_conn_t *cc = tls_conf_conn_get(c); + cc->server = s; + return 1; + } + return 0; +} + +static apr_status_t select_application_protocol( + conn_rec *c, server_rec *s, rustls_server_config_builder *builder) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + const char *proposed; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + + /* The server always has a protocol it uses, normally "http/1.1". + * if the client, via ALPN, proposes protocols, they are in + * order of preference. + * We propose those to modules registered in the server and + * get the protocol back that someone is willing to run on this + * connection. + * If this is different from what the connection already does, + * we tell the server (and all protocol modules) to switch. + * If successful, we announce that protocol back to the client as + * our only ALPN protocol and then do the 'real' handshake. + */ + cc->application_protocol = ap_get_protocol(c); + if (cc->alpn && cc->alpn->nelts > 0) { + rustls_slice_bytes rsb; + + proposed = ap_select_protocol(c, NULL, s, cc->alpn); + if (!proposed) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, c, + "ALPN: no protocol selected in server"); + goto cleanup; + } + + if (strcmp(proposed, cc->application_protocol)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, c, + "ALPN: switching protocol from `%s` to `%s`", cc->application_protocol, proposed); + rv = ap_switch_protocol(c, NULL, cc->server, proposed); + if (APR_SUCCESS != rv) goto cleanup; + } + + rsb.data = (const unsigned char*)proposed; + rsb.len = strlen(proposed); + rr = rustls_server_config_builder_set_alpn_protocols(builder, &rsb, 1); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + + cc->application_protocol = proposed; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, c, + "ALPN: using connection protocol `%s`", cc->application_protocol); + + /* protocol was switched, this could be a challenge protocol + * such as "acme-tls/1". Give handlers the opportunity to + * override the certificate for this connection. */ + if (strcmp("h2", proposed) && strcmp("http/1.1", proposed)) { + const char *cert_pem = NULL, *key_pem = NULL; + if (ap_ssl_answer_challenge(c, cc->sni_hostname, &cert_pem, &key_pem)) { + /* With ACME we can have challenge connections to a unknown domains + * that need to be answered with a special certificate and will + * otherwise not answer any requests. See RFC 8555 */ + rv = use_local_key(c, cert_pem, key_pem); + if (APR_SUCCESS != rv) goto cleanup; + + cc->service_unavailable = 1; + cc->client_auth = TLS_CLIENT_AUTH_NONE; + } + } + } + +cleanup: + if (rr != RUSTLS_RESULT_OK) { + const char *err_descr = NULL; + rv = tls_util_rustls_error(c->pool, rr, &err_descr); + ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(10332) + "Failed to init session for server %s: [%d] %s", + s->server_hostname, (int)rr, err_descr); + c->aborted = 1; + goto cleanup; + } + return rv; +} + +static apr_status_t build_server_connection(rustls_connection **pconnection, + const rustls_server_config **pconfig, + conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_server_t *sc; + const apr_array_header_t *tls_versions = NULL; + rustls_server_config_builder *builder = NULL; + const rustls_server_config *config = NULL; + rustls_connection *rconnection = NULL; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + + sc = tls_conf_server_get(cc->server); + + if (sc->tls_protocol_min > 0) { + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, sc->server, + "init server: set protocol min version %04x", sc->tls_protocol_min); + tls_versions = tls_proto_create_versions_plus( + sc->global->proto, (apr_uint16_t)sc->tls_protocol_min, c->pool); + if (tls_versions->nelts > 0) { + if (sc->tls_protocol_min != APR_ARRAY_IDX(tls_versions, 0, apr_uint16_t)) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, sc->server, APLOGNO(10333) + "Init: the minimum protocol version configured for %s (%04x) " + "is not supported and version %04x was selected instead.", + sc->server->server_hostname, sc->tls_protocol_min, + APR_ARRAY_IDX(tls_versions, 0, apr_uint16_t)); + } + } + else { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, sc->server, APLOGNO(10334) + "Unable to configure the protocol version for %s: " + "neither the configured minimum version (%04x), nor any higher one is " + "available.", sc->server->server_hostname, sc->tls_protocol_min); + rv = APR_ENOTIMPL; goto cleanup; + } + } + else if (sc->ciphersuites && sc->ciphersuites->nelts > 0) { + /* FIXME: rustls-ffi current has not way to make a builder with ALL_PROTOCOL_VERSIONS */ + tls_versions = tls_proto_create_versions_plus(sc->global->proto, 0, c->pool); + } + + if (sc->ciphersuites && sc->ciphersuites->nelts > 0 + && tls_versions && tls_versions->nelts >= 0) { + rr = rustls_server_config_builder_new_custom( + (const struct rustls_supported_ciphersuite *const *)sc->ciphersuites->elts, + (size_t)sc->ciphersuites->nelts, + (const uint16_t *)tls_versions->elts, (size_t)tls_versions->nelts, + &builder); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + } + else { + builder = rustls_server_config_builder_new(); + if (NULL == builder) { + rv = APR_ENOMEM; + goto cleanup; + } + } + + /* decide on the application protocol, this may change other + * settings like client_auth. */ + rv = select_application_protocol(c, cc->server, builder); + + if (cc->client_auth != TLS_CLIENT_AUTH_NONE) { + ap_assert(sc->client_ca); /* checked in server_setup */ + if (cc->client_auth == TLS_CLIENT_AUTH_REQUIRED) { + const rustls_client_cert_verifier *verifier; + rv = tls_cert_client_verifiers_get(sc->global->verifiers, sc->client_ca, &verifier); + if (APR_SUCCESS != rv) goto cleanup; + rustls_server_config_builder_set_client_verifier(builder, verifier); + } + else { + const rustls_client_cert_verifier_optional *verifier; + rv = tls_cert_client_verifiers_get_optional(sc->global->verifiers, sc->client_ca, &verifier); + if (APR_SUCCESS != rv) goto cleanup; + rustls_server_config_builder_set_client_verifier_optional(builder, verifier); + } + } + + rustls_server_config_builder_set_hello_callback(builder, select_certified_key); + + rr = rustls_server_config_builder_set_ignore_client_order( + builder, sc->honor_client_order == TLS_FLAG_FALSE); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + + rv = tls_cache_init_server(builder, sc->server); + if (APR_SUCCESS != rv) goto cleanup; + + config = rustls_server_config_builder_build(builder); + builder = NULL; + if (!config) { + rv = APR_ENOMEM; goto cleanup; + } + + rr = rustls_server_connection_new(config, &rconnection); + if (RUSTLS_RESULT_OK != rr) goto cleanup; + rustls_connection_set_userdata(rconnection, c); + +cleanup: + if (rr != RUSTLS_RESULT_OK) { + const char *err_descr = NULL; + rv = tls_util_rustls_error(c->pool, rr, &err_descr); + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, sc->server, APLOGNO(10335) + "Failed to init session for server %s: [%d] %s", + sc->server->server_hostname, (int)rr, err_descr); + } + if (APR_SUCCESS == rv) { + *pconfig = config; + *pconnection = rconnection; + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, sc->server, + "tls_core_conn_server_init done: %s", + sc->server->server_hostname); + } + else { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, sc->server, APLOGNO(10336) + "Failed to init session for server %s", + sc->server->server_hostname); + c->aborted = 1; + if (config) rustls_server_config_free(config); + if (builder) rustls_server_config_builder_free(builder); + } + return rv; +} + +apr_status_t tls_core_conn_seen_client_hello(conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_server_t *sc; + apr_status_t rv = APR_SUCCESS; + int sni_match = 0; + + /* The initial rustls generic session has been fed the client hello and + * we have extracted SNI and ALPN values (so present). + * Time to select the actual server_rec and application protocol that + * will be used on this connection. */ + ap_assert(cc); + sc = tls_conf_server_get(cc->server); + if (!cc->client_hello_seen) goto cleanup; + + if (cc->sni_hostname) { + if (ap_vhost_iterate_given_conn(c, find_vhost, (void*)cc->sni_hostname)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(10337) + "vhost_init: virtual host found for SNI '%s'", cc->sni_hostname); + sni_match = 1; + } + else if (tls_util_name_matches_server(cc->sni_hostname, ap_server_conf)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(10338) + "vhost_init: virtual host NOT found, but base server[%s] matches SNI '%s'", + ap_server_conf->server_hostname, cc->sni_hostname); + cc->server = ap_server_conf; + sni_match = 1; + } + else if (sc->strict_sni == TLS_FLAG_FALSE) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(10339) + "vhost_init: no virtual host found, relaxed SNI checking enabled, SNI '%s'", + cc->sni_hostname); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(10340) + "vhost_init: no virtual host, nor base server[%s] matches SNI '%s'", + c->base_server->server_hostname, cc->sni_hostname); + cc->server = sc->global->ap_server; + rv = APR_NOTFOUND; goto cleanup; + } + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10341) + "vhost_init: no SNI hostname provided by client"); + } + + /* reinit, we might have a new server selected */ + sc = tls_conf_server_get(cc->server); + /* on relaxed SNI matches, we do not enforce the 503 of fallback + * certificates. */ + if (!cc->service_unavailable) { + cc->service_unavailable = sni_match? sc->service_unavailable : 0; + } + + /* if found or not, cc->server will be the server we use now to do + * the real handshake and, if successful, the traffic after that. + * Free the current session and create the real one for the + * selected server. */ + if (cc->rustls_server_config) { + rustls_server_config_free(cc->rustls_server_config); + cc->rustls_server_config = NULL; + } + rustls_connection_free(cc->rustls_connection); + cc->rustls_connection = NULL; + + rv = build_server_connection(&cc->rustls_connection, &cc->rustls_server_config, c); + +cleanup: + return rv; +} + +apr_status_t tls_core_conn_post_handshake(conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_server_t *sc = tls_conf_server_get(cc->server); + const rustls_supported_ciphersuite *rsuite; + const rustls_certificate *cert; + apr_status_t rv = APR_SUCCESS; + + if (rustls_connection_is_handshaking(cc->rustls_connection)) { + rv = APR_EGENERAL; + ap_log_error(APLOG_MARK, APLOG_ERR, rv, cc->server, APLOGNO(10342) + "post handshake, but rustls claims to still be handshaking: %s", + cc->server->server_hostname); + goto cleanup; + } + + cc->tls_protocol_id = rustls_connection_get_protocol_version(cc->rustls_connection); + cc->tls_protocol_name = tls_proto_get_version_name(sc->global->proto, + cc->tls_protocol_id, c->pool); + rsuite = rustls_connection_get_negotiated_ciphersuite(cc->rustls_connection); + if (!rsuite) { + rv = APR_EGENERAL; + ap_log_error(APLOG_MARK, APLOG_ERR, rv, cc->server, APLOGNO(10343) + "post handshake, but rustls does not report negotiated cipher suite: %s", + cc->server->server_hostname); + goto cleanup; + } + cc->tls_cipher_id = rustls_supported_ciphersuite_get_suite(rsuite); + cc->tls_cipher_name = tls_proto_get_cipher_name(sc->global->proto, + cc->tls_cipher_id, c->pool); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "post_handshake %s: %s [%s]", + cc->server->server_hostname, cc->tls_protocol_name, cc->tls_cipher_name); + + cert = rustls_connection_get_peer_certificate(cc->rustls_connection, 0); + if (cert) { + size_t i = 0; + + cc->peer_certs = apr_array_make(c->pool, 5, sizeof(const rustls_certificate*)); + while (cert) { + APR_ARRAY_PUSH(cc->peer_certs, const rustls_certificate*) = cert; + cert = rustls_connection_get_peer_certificate(cc->rustls_connection, ++i); + } + } + if (!cc->peer_certs && sc->client_auth == TLS_CLIENT_AUTH_REQUIRED) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(10344) + "A client certificate is required, but no acceptable certificate was presented."); + rv = APR_ECONNABORTED; + } + + rv = tls_var_handshake_done(c); +cleanup: + return rv; +} + +/** + * Return != 0, if a connection also serve requests for server . + */ +static int tls_conn_compatible_for(tls_conf_conn_t *cc, server_rec *other) +{ + tls_conf_server_t *oc, *sc; + const rustls_certified_key *sk, *ok; + int i; + + /* - differences in certificates are the responsibility of the client. + * if it thinks the SNI server works for r->server, we are fine with that. + * - if there are differences in requirements to client certificates, we + * need to deny the request. + */ + if (!cc->server || !other) return 0; + if (cc->server == other) return 1; + oc = tls_conf_server_get(other); + if (!oc) return 0; + sc = tls_conf_server_get(cc->server); + if (!sc) return 0; + + /* same certified keys used? */ + if (sc->certified_keys->nelts != oc->certified_keys->nelts) return 0; + for (i = 0; i < sc->certified_keys->nelts; ++i) { + sk = APR_ARRAY_IDX(sc->certified_keys, i, const rustls_certified_key*); + ok = APR_ARRAY_IDX(oc->certified_keys, i, const rustls_certified_key*); + if (sk != ok) return 0; + } + + /* If the connection TLS version is below other other min one, no */ + if (oc->tls_protocol_min > 0 && cc->tls_protocol_id < oc->tls_protocol_min) return 0; + /* If the connection TLS cipher is listed as suppressed by other, no */ + if (oc->tls_supp_ciphers && tls_util_array_uint16_contains( + oc->tls_supp_ciphers, cc->tls_cipher_id)) return 0; + return 1; +} + +int tls_core_request_check(request_rec *r) +{ + conn_rec *c = r->connection; + tls_conf_conn_t *cc = tls_conf_conn_get(c->master? c->master : c); + int rv = DECLINED; /* do not object to the request */ + + /* If we are not enabled on this connection, leave. We are not renegotiating. + * Otherwise: + * - service is unavailable when we have only a fallback certificate or + * when a challenge protocol is active (ACME tls-alpn-01 for example). + * - with vhosts configured and no SNI from the client, deny access. + * - are servers compatible for connection sharing? + */ + if (!TLS_CONN_ST_IS_ENABLED(cc)) goto cleanup; + + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, + "tls_core_request_check[%s, %d]: %s", r->hostname, + cc? cc->service_unavailable : 2, r->the_request); + if (cc->service_unavailable) { + rv = HTTP_SERVICE_UNAVAILABLE; goto cleanup; + } + if (!cc->sni_hostname && r->connection->vhost_lookup_data) { + rv = HTTP_FORBIDDEN; goto cleanup; + } + if (!tls_conn_compatible_for(cc, r->server)) { + rv = HTTP_MISDIRECTED_REQUEST; + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10345) + "Connection host %s, selected via SNI, and request host %s" + " have incompatible TLS configurations.", + cc->server->server_hostname, r->hostname); + goto cleanup; + } +cleanup: + return rv; +} + +apr_status_t tls_core_error(conn_rec *c, rustls_result rr, const char **perrstr) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + apr_status_t rv; + + rv = tls_util_rustls_error(c->pool, rr, perrstr); + if (cc) { + cc->last_error = rr; + cc->last_error_descr = *perrstr; + } + return rv; +} + +int tls_core_setup_outgoing(conn_rec *c) +{ + tls_conf_conn_t *cc; + int rv = DECLINED; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "tls_core_setup_outgoing called"); +#if AP_MODULE_MAGIC_AT_LEAST(20120211, 109) + if (!c->outgoing) goto cleanup; +#endif + cc = cc_get_or_make(c); + if (cc->state == TLS_CONN_ST_DISABLED) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "tls_core_setup_outgoing: already disabled"); + goto cleanup; + } + if (TLS_CONN_ST_IS_ENABLED(cc)) { + /* we already handle it, allow repeated calls */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "tls_core_setup_outgoing: already enabled"); + rv = OK; goto cleanup; + } + cc->outgoing = 1; + if (!cc->dc) { + /* In case there is not dir_conf bound for this connection, we fallback + * to the defaults in the base server (we have no virtual host config to use) */ + cc->dc = ap_get_module_config(c->base_server->lookup_defaults, &tls_module); + } + if (cc->dc->proxy_enabled != TLS_FLAG_TRUE) { + cc->state = TLS_CONN_ST_DISABLED; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "tls_core_setup_outgoing: TLSProxyEngine not configured"); + goto cleanup; + } + /* we handle this connection */ + cc->state = TLS_CONN_ST_CLIENT_HELLO; + rv = OK; + +cleanup: + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "tls_core_setup_outgoing returns %s", rv == OK? "OK" : "DECLINED"); + return rv; +} diff --git a/modules/tls/tls_core.h b/modules/tls/tls_core.h new file mode 100644 index 0000000..6ee1713 --- /dev/null +++ b/modules/tls/tls_core.h @@ -0,0 +1,184 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_core_h +#define tls_core_h + +/* The module's state handling of a connection in normal chronological order, + */ +typedef enum { + TLS_CONN_ST_INIT, /* being initialized */ + TLS_CONN_ST_DISABLED, /* TLS is disabled here */ + TLS_CONN_ST_CLIENT_HELLO, /* TLS is enabled, prep handshake */ + TLS_CONN_ST_HANDSHAKE, /* TLS is enabled, handshake ongonig */ + TLS_CONN_ST_TRAFFIC, /* TLS is enabled, handshake done */ + TLS_CONN_ST_NOTIFIED, /* TLS is enabled, notification to end sent */ + TLS_CONN_ST_DONE, /* TLS is enabled, TLS has shut down */ +} tls_conn_state_t; + +#define TLS_CONN_ST_IS_ENABLED(cc) (cc && cc->state >= TLS_CONN_ST_CLIENT_HELLO) + +struct tls_filter_ctx_t; + +/* The modules configuration for a connection. Created at connection + * start and mutable during the lifetime of the connection. + * (A conn_rec is only ever processed by one thread at a time.) + */ +typedef struct { + server_rec *server; /* the server_rec selected for this connection, + * initially c->base_server, to be negotiated via SNI. */ + tls_conf_dir_t *dc; /* directory config applying here */ + tls_conn_state_t state; + int outgoing; /* != 0 iff outgoing connection (redundant once c->outgoing is everywhere) */ + int service_unavailable; /* we 503 all requests on this connection */ + tls_client_auth_t client_auth; /* how client authentication with certificates is used */ + int client_hello_seen; /* the client hello has been inspected */ + + rustls_connection *rustls_connection; /* the session used on this connection or NULL */ + const rustls_server_config *rustls_server_config; /* the config made for this connection (incoming) or NULL */ + const rustls_client_config *rustls_client_config; /* the config made for this connection (outgoing) or NULL */ + struct tls_filter_ctx_t *filter_ctx; /* the context used by this connection's tls filters */ + + apr_array_header_t *local_keys; /* rustls_certified_key* array of connection specific keys */ + const rustls_certified_key *key; /* the key selected for the session */ + int key_cloned; /* != 0 iff the key is a unique clone, to be freed */ + apr_array_header_t *peer_certs; /* handshaked peer ceritificates or NULL */ + const char *sni_hostname; /* the SNI value from the client hello, or NULL */ + const apr_array_header_t *alpn; /* the protocols proposed via ALPN by the client */ + const char *application_protocol; /* the ALPN selected protocol or NULL */ + + int session_id_cache_hit; /* if a submitted session id was found in our cache */ + + apr_uint16_t tls_protocol_id; /* the TLS version negotiated */ + const char *tls_protocol_name; /* the name of the TLS version negotiated */ + apr_uint16_t tls_cipher_id; /* the TLS cipher suite negotiated */ + const char *tls_cipher_name; /* the name of TLS cipher suite negotiated */ + + const char *user_name; /* != NULL if we derived a TLSUserName from the client_cert */ + apr_table_t *subprocess_env; /* common TLS variables for this connection */ + + rustls_result last_error; + const char *last_error_descr; + +} tls_conf_conn_t; + +/* Get the connection specific module configuration. */ +tls_conf_conn_t *tls_conf_conn_get(conn_rec *c); + +/* Set the module configuration for a connection. */ +void tls_conf_conn_set(conn_rec *c, tls_conf_conn_t *cc); + +/* Return OK iff this connection is a TSL connection (or a secondary on a TLS connection). */ +int tls_conn_check_ssl(conn_rec *c); + +/** + * Initialize the module's global and server specific settings. This runs + * in Apache's "post-config" phase, meaning the configuration has been read + * and checked for syntactic and other easily verifiable errors and now + * it is time to load everything in and make it ready for traffic. + *

a memory pool staying with us the whole time until the server stops/reloads. + * a temporary pool as a scratch buffer that will be destroyed shortly after. + * the server for the global configuration which links -> next to + * all contained virtual hosts configured. + */ +apr_status_t tls_core_init(apr_pool_t *p, apr_pool_t *ptemp, server_rec *base_server); + +/** + * Initialize the module's outgoing connection settings. This runs + * in Apache's "post-config" phase after mod_proxy. + */ +apr_status_t tls_core_init_outgoing(apr_pool_t *p, apr_pool_t *ptemp, server_rec *base_server); + +/** + * Supply a directory configuration for the connection to work with. This + * maybe NULL. This can be called several times during the lifetime of a + * connection and must not change the current TLS state. + * @param c the connection + * @param dir_conf optional directory configuration that applies + */ +void tls_core_conn_bind(conn_rec *c, ap_conf_vector_t *dir_conf); + +/** + * Disable TLS on a new connection. Will do nothing on already initialized + * connections. + * @param c a new connection + */ +void tls_core_conn_disable(conn_rec *c); + +/** + * Initialize the tls_conf_connt_t for the connection + * and decide if TLS is enabled or not. + * @return OK if enabled, DECLINED otherwise + */ +int tls_core_pre_conn_init(conn_rec *c); + +/** + * Initialize the module for a TLS enabled connection. + * @param c a new connection + */ +apr_status_t tls_core_conn_init(conn_rec *c); + +/** + * Called when the ClientHello has been received and values from it + * have been extracted into the `tls_conf_conn_t` of the connection. + * + * Decides: + * - which `server_rec` this connection is for (SNI) + * - which application protocol to use (ALPN) + * This may be unsuccessful for several reasons. The SNI + * from the client may not be known or the selected server + * has not certificates available. etc. + * On success, a proper `rustls_connection` will have been + * created and set in the `tls_conf_conn_t` of the connection. + */ +apr_status_t tls_core_conn_seen_client_hello(conn_rec *c); + +/** + * The TLS handshake for the connection has been successfully performed. + * This means that TLS related properties, such as TLS version and cipher, + * are known and the props in `tls_conf_conn_t` of the connection + * can be set. + */ +apr_status_t tls_core_conn_post_handshake(conn_rec *c); + +/** + * After a request has been read, but before processing is started, we + * check if everything looks good to us: + * - was an SNI hostname provided by the client when we have vhosts to choose from? + * if not, we deny it. + * - if the SNI hostname and request host are not the same, are they - from TLS + * point of view - 'compatible' enough? For example, if one server requires + * client certificates and the other not (or with different settings), such + * a request will also be denied. + * returns DECLINED if everything is ok, otherwise an HTTP response code to + * generate an error page for. + */ +int tls_core_request_check(request_rec *r); + +/** + * A Rustls error happened while processing the connection. Look up an + * error description, determine the apr_status_t to use for it and remember + * this as the last error at tls_conf_conn_t. + */ +apr_status_t tls_core_error(conn_rec *c, rustls_result rr, const char **perrstr); + +/** + * Determine if we handle the TLS for an outgoing connection or not. + * @param c the connection + * @return OK if we handle the TLS, DECLINED otherwise. + */ +int tls_core_setup_outgoing(conn_rec *c); + +#endif /* tls_core_h */ diff --git a/modules/tls/tls_filter.c b/modules/tls/tls_filter.c new file mode 100644 index 0000000..0ee6be6 --- /dev/null +++ b/modules/tls/tls_filter.c @@ -0,0 +1,1017 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "tls_proto.h" +#include "tls_conf.h" +#include "tls_core.h" +#include "tls_filter.h" +#include "tls_util.h" + + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + + +static rustls_io_result tls_read_callback( + void *userdata, unsigned char *buf, size_t n, size_t *out_n) +{ + tls_data_t *d = userdata; + size_t len = d->len > n? n : d->len; + memcpy(buf, d->data, len); + *out_n = len; + return 0; +} + +/** + * Provide TLS encrypted data to the rustls server_session in cc->rustls_connection>. + * + * If fin_tls_bb> holds data, take it from there. Otherwise perform a + * read via the network filters below us into that brigade. + * + * fin_block> determines if we do a blocking read inititally or not. + * If the first read did to not produce enough data, any secondary read is done + * non-blocking. + * + * Had any data been added to cc->rustls_connection>, call its "processing" + * function to handle the added data before leaving. + */ +static apr_status_t read_tls_to_rustls( + tls_filter_ctx_t *fctx, apr_size_t len, apr_read_type_e block, int errors_expected) +{ + tls_data_t d; + apr_size_t rlen; + apr_off_t passed = 0; + rustls_result rr = RUSTLS_RESULT_OK; + int os_err; + apr_status_t rv = APR_SUCCESS; + + if (APR_BRIGADE_EMPTY(fctx->fin_tls_bb)) { + ap_log_error(APLOG_MARK, APLOG_TRACE2, rv, fctx->cc->server, + "read_tls_to_rustls, get data from network, block=%d", block); + rv = ap_get_brigade(fctx->fin_ctx->next, fctx->fin_tls_bb, + AP_MODE_READBYTES, block, (apr_off_t)len); + if (APR_SUCCESS != rv) { + goto cleanup; + } + } + + while (!APR_BRIGADE_EMPTY(fctx->fin_tls_bb) && passed < (apr_off_t)len) { + apr_bucket *b = APR_BRIGADE_FIRST(fctx->fin_tls_bb); + + if (APR_BUCKET_IS_EOS(b)) { + ap_log_error(APLOG_MARK, APLOG_TRACE2, rv, fctx->cc->server, + "read_tls_to_rustls, EOS"); + if (fctx->fin_tls_buffer_bb) { + apr_brigade_cleanup(fctx->fin_tls_buffer_bb); + } + rv = APR_EOF; goto cleanup; + } + + rv = apr_bucket_read(b, (const char**)&d.data, &d.len, block); + if (APR_STATUS_IS_EOF(rv)) { + apr_bucket_delete(b); + continue; + } + else if (APR_SUCCESS != rv) { + goto cleanup; + } + + if (d.len > 0) { + /* got something, do not block on getting more */ + block = APR_NONBLOCK_READ; + + os_err = rustls_connection_read_tls(fctx->cc->rustls_connection, + tls_read_callback, &d, &rlen); + if (os_err) { + rv = APR_FROM_OS_ERROR(os_err); + goto cleanup; + } + + if (fctx->fin_tls_buffer_bb) { + /* we buffer for later replay on the 'real' rustls_connection */ + apr_brigade_write(fctx->fin_tls_buffer_bb, NULL, NULL, (const char*)d.data, rlen); + } + if (rlen >= d.len) { + apr_bucket_delete(b); + } + else { + b->start += (apr_off_t)rlen; + b->length -= rlen; + } + fctx->fin_bytes_in_rustls += (apr_off_t)d.len; + passed += (apr_off_t)rlen; + } + else if (d.len == 0) { + apr_bucket_delete(b); + } + } + + if (passed > 0) { + rr = rustls_connection_process_new_packets(fctx->cc->rustls_connection); + if (rr != RUSTLS_RESULT_OK) goto cleanup; + } + +cleanup: + if (rr != RUSTLS_RESULT_OK) { + rv = APR_ECONNRESET; + if (!errors_expected) { + const char *err_descr = ""; + rv = tls_core_error(fctx->c, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_WARNING, rv, fctx->c, APLOGNO(10353) + "processing TLS data: [%d] %s", (int)rr, err_descr); + } + } + else if (APR_STATUS_IS_EOF(rv) && passed > 0) { + /* encountering EOF while actually having read sth is a success. */ + rv = APR_SUCCESS; + } + else if (APR_SUCCESS == rv && passed == 0 && fctx->fin_block == APR_NONBLOCK_READ) { + rv = APR_EAGAIN; + } + else { + ap_log_error(APLOG_MARK, APLOG_TRACE2, rv, fctx->cc->server, + "read_tls_to_rustls, passed %ld bytes to rustls", (long)passed); + } + return rv; +} + +static apr_status_t fout_pass_tls_to_net(tls_filter_ctx_t *fctx) +{ + apr_status_t rv = APR_SUCCESS; + + if (!APR_BRIGADE_EMPTY(fctx->fout_tls_bb)) { + rv = ap_pass_brigade(fctx->fout_ctx->next, fctx->fout_tls_bb); + if (APR_SUCCESS == rv && fctx->c->aborted) { + rv = APR_ECONNRESET; + } + fctx->fout_bytes_in_tls_bb = 0; + apr_brigade_cleanup(fctx->fout_tls_bb); + } + return rv; +} + +static apr_status_t fout_pass_all_to_net( + tls_filter_ctx_t *fctx, int flush); + +static apr_status_t filter_abort( + tls_filter_ctx_t *fctx) +{ + apr_status_t rv; + + if (fctx->cc->state != TLS_CONN_ST_DONE) { + if (fctx->cc->state > TLS_CONN_ST_CLIENT_HELLO) { + rustls_connection_send_close_notify(fctx->cc->rustls_connection); + rv = fout_pass_all_to_net(fctx, 1); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, "filter_abort, flushed output"); + } + fctx->c->aborted = 1; + fctx->cc->state = TLS_CONN_ST_DONE; + } + return APR_ECONNABORTED; +} + +static apr_status_t filter_recv_client_hello(tls_filter_ctx_t *fctx) +{ + apr_status_t rv = APR_SUCCESS; + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, fctx->cc->server, + "tls_filter, server=%s, recv client hello", fctx->cc->server->server_hostname); + /* only for incoming connections */ + ap_assert(!fctx->cc->outgoing); + + if (rustls_connection_is_handshaking(fctx->cc->rustls_connection)) { + apr_bucket_brigade *bb_tmp; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, "filter_recv_client_hello: start"); + fctx->fin_tls_buffer_bb = apr_brigade_create(fctx->c->pool, fctx->c->bucket_alloc); + do { + if (rustls_connection_wants_read(fctx->cc->rustls_connection)) { + rv = read_tls_to_rustls(fctx, fctx->fin_max_in_rustls, APR_BLOCK_READ, 1); + if (APR_SUCCESS != rv) { + if (fctx->cc->client_hello_seen) { + rv = APR_EAGAIN; /* we got what we needed */ + break; + } + /* Something went wrong before we saw the client hello. + * This is a real error on which we should not continue. */ + goto cleanup; + } + } + /* Notice: we never write here to the client. We just want to inspect + * the client hello. */ + } while (!fctx->cc->client_hello_seen); + + /* We have seen the client hello and selected the server (vhost) to use + * on this connection. Set up the 'real' rustls_connection based on the + * servers 'real' rustls_config. */ + rv = tls_core_conn_seen_client_hello(fctx->c); + if (APR_SUCCESS != rv) goto cleanup; + + bb_tmp = fctx->fin_tls_bb; /* data we have yet to feed to rustls */ + fctx->fin_tls_bb = fctx->fin_tls_buffer_bb; /* data we already fed to the pre_session */ + fctx->fin_tls_buffer_bb = NULL; + APR_BRIGADE_CONCAT(fctx->fin_tls_bb, bb_tmp); /* all tls data from the client so far, reloaded */ + apr_brigade_destroy(bb_tmp); + rv = APR_SUCCESS; + } + +cleanup: + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, "filter_recv_client_hello: done"); + return rv; +} + +static apr_status_t filter_send_client_hello(tls_filter_ctx_t *fctx) +{ + apr_status_t rv = APR_SUCCESS; + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, fctx->cc->server, + "tls_filter, server=%s, send client hello", fctx->cc->server->server_hostname); + /* Only for outgoing connections */ + ap_assert(fctx->cc->outgoing); + if (rustls_connection_is_handshaking(fctx->cc->rustls_connection)) { + while (rustls_connection_wants_write(fctx->cc->rustls_connection)) { + /* write flushed, so it really gets out */ + rv = fout_pass_all_to_net(fctx, 1); + if (APR_SUCCESS != rv) goto cleanup; + } + } + +cleanup: + return rv; +} + +/** + * While cc->rustls_connection> indicates that a handshake is ongoing, + * write TLS data from and read network TLS data to the server session. + * + * @return APR_SUCCESS when the handshake is completed + */ +static apr_status_t filter_do_handshake( + tls_filter_ctx_t *fctx) +{ + apr_status_t rv = APR_SUCCESS; + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, fctx->cc->server, + "tls_filter, server=%s, do handshake", fctx->cc->server->server_hostname); + if (rustls_connection_is_handshaking(fctx->cc->rustls_connection)) { + do { + if (rustls_connection_wants_write(fctx->cc->rustls_connection)) { + rv = fout_pass_all_to_net(fctx, 1); + if (APR_SUCCESS != rv) goto cleanup; + } + else if (rustls_connection_wants_read(fctx->cc->rustls_connection)) { + rv = read_tls_to_rustls(fctx, fctx->fin_max_in_rustls, APR_BLOCK_READ, 0); + if (APR_SUCCESS != rv) goto cleanup; + } + } + while (rustls_connection_is_handshaking(fctx->cc->rustls_connection)); + + /* rustls reports the TLS handshake to be done, when it *internally* has + * processed everything into its buffers. Not when the buffers have been + * send to the other side. */ + if (rustls_connection_wants_write(fctx->cc->rustls_connection)) { + rv = fout_pass_all_to_net(fctx, 1); + if (APR_SUCCESS != rv) goto cleanup; + } + } +cleanup: + ap_log_error(APLOG_MARK, APLOG_TRACE2, rv, fctx->cc->server, + "tls_filter, server=%s, handshake done", fctx->cc->server->server_hostname); + if (APR_SUCCESS != rv) { + if (fctx->cc->last_error_descr) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, APR_ECONNABORTED, fctx->c, APLOGNO(10354) + "handshake failed: %s", fctx->cc->last_error_descr); + } + } + return rv; +} + +static apr_status_t progress_tls_atleast_to(tls_filter_ctx_t *fctx, tls_conn_state_t state) +{ + apr_status_t rv = APR_SUCCESS; + + /* handle termination immediately */ + if (state == TLS_CONN_ST_DONE) { + rv = APR_ECONNABORTED; + goto cleanup; + } + + if (state > TLS_CONN_ST_CLIENT_HELLO + && TLS_CONN_ST_CLIENT_HELLO == fctx->cc->state) { + rv = tls_core_conn_init(fctx->c); + if (APR_SUCCESS != rv) goto cleanup; + + if (fctx->cc->outgoing) { + rv = filter_send_client_hello(fctx); + } + else { + rv = filter_recv_client_hello(fctx); + } + if (APR_SUCCESS != rv) goto cleanup; + fctx->cc->state = TLS_CONN_ST_HANDSHAKE; + } + + if (state > TLS_CONN_ST_HANDSHAKE + && TLS_CONN_ST_HANDSHAKE== fctx->cc->state) { + rv = filter_do_handshake(fctx); + if (APR_SUCCESS != rv) goto cleanup; + rv = tls_core_conn_post_handshake(fctx->c); + if (APR_SUCCESS != rv) goto cleanup; + fctx->cc->state = TLS_CONN_ST_TRAFFIC; + } + + if (state < fctx->cc->state) { + rv = APR_ECONNABORTED; + } + +cleanup: + if (APR_SUCCESS != rv) { + filter_abort(fctx); /* does change the state itself */ + } + return rv; +} + +/** + * The connection filter converting TLS encrypted network data into plain, unencrpyted + * traffic data to be processed by filters above it in the filter chain. + * + * Unfortunately, Apache's filter infrastructure places a heavy implementation + * complexity on its input filters for the various use cases its HTTP/1.x parser + * (mainly) finds convenient: + * + * the bucket brigade to place the data into. + * one of + * - AP_MODE_READBYTES: just add up to data into + * - AP_MODE_GETLINE: make a best effort to get data up to and including a CRLF. + * it can be less, but not more t than that. + * - AP_MODE_EATCRLF: never used, we puke on it. + * - AP_MODE_SPECULATIVE: read data without consuming it. + * - AP_MODE_EXHAUSTIVE: never used, we puke on it. + * - AP_MODE_INIT: called once on a connection. needs to pass down the filter + * chain, giving every filter the change to "INIT". + * do blocking or non-blocking reads + * max amount of data to add to , seems to be 0 for GETLINE + */ +static apr_status_t filter_conn_input( + ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, + apr_read_type_e block, apr_off_t readbytes) +{ + tls_filter_ctx_t *fctx = f->ctx; + apr_status_t rv = APR_SUCCESS; + apr_off_t passed = 0, nlen; + rustls_result rr = RUSTLS_RESULT_OK; + apr_size_t in_buf_len; + char *in_buf = NULL; + + fctx->fin_block = block; + if (f->c->aborted) { + rv = filter_abort(fctx); goto cleanup; + } + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, fctx->cc->server, + "tls_filter_conn_input, server=%s, mode=%d, block=%d, readbytes=%ld", + fctx->cc->server->server_hostname, mode, block, (long)readbytes); + + rv = progress_tls_atleast_to(fctx, TLS_CONN_ST_TRAFFIC); + if (APR_SUCCESS != rv) goto cleanup; /* this also leaves on APR_EAGAIN */ + + if (!fctx->cc->rustls_connection) { + return ap_get_brigade(f->next, bb, mode, block, readbytes); + } + +#if AP_MODULE_MAGIC_AT_LEAST(20200420, 1) + ap_filter_reinstate_brigade(f, fctx->fin_plain_bb, NULL); +#endif + + if (AP_MODE_INIT == mode) { + /* INIT is used to trigger the handshake, it does not return any traffic data. */ + goto cleanup; + } + + /* If we have nothing buffered, try getting more input. + * a) ask rustls_connection for decrypted data, if it has any. + * Note that only full records can be decrypted. We might have + * written TLS data to the session, but that does not mean it + * can give unencryted data out again. + * b) read TLS bytes from the network and feed them to the rustls session. + * c) go back to a) if b) added data. + */ + while (APR_BRIGADE_EMPTY(fctx->fin_plain_bb)) { + apr_size_t rlen = 0; + apr_bucket *b; + + if (fctx->fin_bytes_in_rustls > 0) { + in_buf_len = APR_BUCKET_BUFF_SIZE; + in_buf = ap_calloc(in_buf_len, sizeof(char)); + rr = rustls_connection_read(fctx->cc->rustls_connection, + (unsigned char*)in_buf, in_buf_len, &rlen); + if (rr == RUSTLS_RESULT_PLAINTEXT_EMPTY) { + rr = RUSTLS_RESULT_OK; + rlen = 0; + } + if (rr != RUSTLS_RESULT_OK) goto cleanup; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, + "tls_filter_conn_input: got %ld plain bytes from rustls", (long)rlen); + if (rlen > 0) { + b = apr_bucket_heap_create(in_buf, rlen, free, fctx->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(fctx->fin_plain_bb, b); + } + else { + free(in_buf); + } + in_buf = NULL; + } + if (rlen == 0) { + /* that did not produce anything either. try getting more + * TLS data from the network into the rustls session. */ + fctx->fin_bytes_in_rustls = 0; + rv = read_tls_to_rustls(fctx, fctx->fin_max_in_rustls, block, 0); + if (APR_SUCCESS != rv) goto cleanup; /* this also leave on APR_EAGAIN */ + } + } + + if (AP_MODE_GETLINE == mode) { + if (readbytes <= 0) readbytes = HUGE_STRING_LEN; + rv = tls_util_brigade_split_line(bb, fctx->fin_plain_bb, block, readbytes, &nlen); + if (APR_SUCCESS != rv) goto cleanup; + passed += nlen; + } + else if (AP_MODE_READBYTES == mode) { + ap_assert(readbytes > 0); + rv = tls_util_brigade_transfer(bb, fctx->fin_plain_bb, readbytes, &nlen); + if (APR_SUCCESS != rv) goto cleanup; + passed += nlen; + } + else if (AP_MODE_SPECULATIVE == mode) { + ap_assert(readbytes > 0); + rv = tls_util_brigade_copy(bb, fctx->fin_plain_bb, readbytes, &nlen); + if (APR_SUCCESS != rv) goto cleanup; + passed += nlen; + } + else if (AP_MODE_EXHAUSTIVE == mode) { + /* return all we have */ + APR_BRIGADE_CONCAT(bb, fctx->fin_plain_bb); + } + else { + /* We do support any other mode */ + rv = APR_ENOTIMPL; goto cleanup; + } + + fout_pass_all_to_net(fctx, 0); + +cleanup: + if (NULL != in_buf) free(in_buf); + + if (APLOGctrace3(fctx->c)) { + tls_util_bb_log(fctx->c, APLOG_TRACE3, "tls_input, fctx->fin_plain_bb", fctx->fin_plain_bb); + tls_util_bb_log(fctx->c, APLOG_TRACE3, "tls_input, bb", bb); + } + if (rr != RUSTLS_RESULT_OK) { + const char *err_descr = ""; + + rv = tls_core_error(fctx->c, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, fctx->c, APLOGNO(10355) + "tls_filter_conn_input: [%d] %s", (int)rr, err_descr); + } + else if (APR_STATUS_IS_EAGAIN(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, rv, fctx->c, + "tls_filter_conn_input: no data available"); + } + else if (APR_SUCCESS != rv) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, fctx->c, APLOGNO(10356) + "tls_filter_conn_input"); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, + "tls_filter_conn_input: passed %ld bytes", (long)passed); + } + +#if AP_MODULE_MAGIC_AT_LEAST(20200420, 1) + if (APR_SUCCESS == rv || APR_STATUS_IS_EAGAIN(rv)) { + ap_filter_setaside_brigade(f, fctx->fin_plain_bb); + } +#endif + return rv; +} + +static rustls_io_result tls_write_callback( + void *userdata, const unsigned char *buf, size_t n, size_t *out_n) +{ + tls_filter_ctx_t *fctx = userdata; + apr_status_t rv; + + if ((apr_off_t)n + fctx->fout_bytes_in_tls_bb >= (apr_off_t)fctx->fout_auto_flush_size) { + apr_bucket *b = apr_bucket_transient_create((const char*)buf, n, fctx->fout_tls_bb->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(fctx->fout_tls_bb, b); + fctx->fout_bytes_in_tls_bb += (apr_off_t)n; + rv = fout_pass_tls_to_net(fctx); + *out_n = n; + } + else { + rv = apr_brigade_write(fctx->fout_tls_bb, NULL, NULL, (const char*)buf, n); + if (APR_SUCCESS != rv) goto cleanup; + fctx->fout_bytes_in_tls_bb += (apr_off_t)n; + *out_n = n; + } +cleanup: + ap_log_error(APLOG_MARK, APLOG_TRACE5, rv, fctx->cc->server, + "tls_write_callback: %ld bytes", (long)n); + return APR_TO_OS_ERROR(rv); +} + +static rustls_io_result tls_write_vectored_callback( + void *userdata, const rustls_iovec *riov, size_t count, size_t *out_n) +{ + tls_filter_ctx_t *fctx = userdata; + const struct iovec *iov = (const struct iovec*)riov; + apr_status_t rv; + size_t i, n = 0; + apr_bucket *b; + + for (i = 0; i < count; ++i, ++iov) { + b = apr_bucket_transient_create((const char*)iov->iov_base, iov->iov_len, fctx->fout_tls_bb->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(fctx->fout_tls_bb, b); + n += iov->iov_len; + } + fctx->fout_bytes_in_tls_bb += (apr_off_t)n; + rv = fout_pass_tls_to_net(fctx); + *out_n = n; + ap_log_error(APLOG_MARK, APLOG_TRACE5, rv, fctx->cc->server, + "tls_write_vectored_callback: %ld bytes in %d slices", (long)n, (int)count); + return APR_TO_OS_ERROR(rv); +} + +#define TLS_WRITE_VECTORED 1 +/** + * Read TLS encrypted data from cc->rustls_connection> and pass it down + * Apache's filter chain to the network. + * + * For now, we always FLUSH the data, since that is what we need during handshakes. + */ +static apr_status_t fout_pass_rustls_to_tls(tls_filter_ctx_t *fctx) +{ + apr_status_t rv = APR_SUCCESS; + + if (rustls_connection_wants_write(fctx->cc->rustls_connection)) { + size_t dlen; + int os_err; + + if (TLS_WRITE_VECTORED) { + do { + os_err = rustls_connection_write_tls_vectored( + fctx->cc->rustls_connection, tls_write_vectored_callback, fctx, &dlen); + if (os_err) { + rv = APR_FROM_OS_ERROR(os_err); + goto cleanup; + } + } + while (rustls_connection_wants_write(fctx->cc->rustls_connection)); + } + else { + do { + os_err = rustls_connection_write_tls( + fctx->cc->rustls_connection, tls_write_callback, fctx, &dlen); + if (os_err) { + rv = APR_FROM_OS_ERROR(os_err); + goto cleanup; + } + } + while (rustls_connection_wants_write(fctx->cc->rustls_connection)); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, rv, fctx->c, + "fout_pass_rustls_to_tls, %ld bytes ready for network", (long)fctx->fout_bytes_in_tls_bb); + fctx->fout_bytes_in_rustls = 0; + } + } +cleanup: + return rv; +} + +static apr_status_t fout_pass_buf_to_rustls( + tls_filter_ctx_t *fctx, const char *buf, apr_size_t len) +{ + apr_status_t rv = APR_SUCCESS; + rustls_result rr = RUSTLS_RESULT_OK; + apr_size_t written; + + while (len) { + /* check if we will exceed the limit of data in rustls. + * rustls does not guarantuee that it will accept all data, so we + * iterate and flush when needed. */ + if (fctx->fout_bytes_in_rustls + (apr_off_t)len > (apr_off_t)fctx->fout_max_in_rustls) { + rv = fout_pass_rustls_to_tls(fctx); + if (APR_SUCCESS != rv) goto cleanup; + } + + rr = rustls_connection_write(fctx->cc->rustls_connection, + (const unsigned char*)buf, len, &written); + if (rr != RUSTLS_RESULT_OK) goto cleanup; + ap_assert(written <= len); + fctx->fout_bytes_in_rustls += (apr_off_t)written; + buf += written; + len -= written; + if (written == 0) { + rv = APR_EAGAIN; + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, fctx->c, APLOGNO(10357) + "fout_pass_buf_to_rustls: not read by rustls at all"); + goto cleanup; + } + } +cleanup: + if (rr != RUSTLS_RESULT_OK) { + const char *err_descr = ""; + rv = tls_core_error(fctx->c, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, fctx->c, APLOGNO(10358) + "fout_pass_buf_to_tls to rustls: [%d] %s", (int)rr, err_descr); + } + return rv; +} + +static apr_status_t fout_pass_all_to_tls(tls_filter_ctx_t *fctx) +{ + apr_status_t rv = APR_SUCCESS; + + if (fctx->fout_buf_plain_len) { + rv = fout_pass_buf_to_rustls(fctx, fctx->fout_buf_plain, fctx->fout_buf_plain_len); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, + "fout_pass_all_to_tls: %ld plain bytes written to rustls", + (long)fctx->fout_buf_plain_len); + if (APR_SUCCESS != rv) goto cleanup; + fctx->fout_buf_plain_len = 0; + } + + rv = fout_pass_rustls_to_tls(fctx); +cleanup: + return rv; +} + +static apr_status_t fout_pass_all_to_net(tls_filter_ctx_t *fctx, int flush) +{ + apr_status_t rv; + + rv = fout_pass_all_to_tls(fctx); + if (APR_SUCCESS != rv) goto cleanup; + if (flush) { + apr_bucket *b = apr_bucket_flush_create(fctx->fout_tls_bb->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(fctx->fout_tls_bb, b); + } + rv = fout_pass_tls_to_net(fctx); +cleanup: + return rv; +} + +static apr_status_t fout_add_bucket_to_plain(tls_filter_ctx_t *fctx, apr_bucket *b) +{ + const char *data; + apr_size_t dlen, buf_remain; + apr_status_t rv = APR_SUCCESS; + + ap_assert((apr_size_t)-1 != b->length); + if (b->length == 0) { + apr_bucket_delete(b); + goto cleanup; + } + + buf_remain = fctx->fout_buf_plain_size - fctx->fout_buf_plain_len; + if (buf_remain == 0) { + rv = fout_pass_all_to_tls(fctx); + if (APR_SUCCESS != rv) goto cleanup; + buf_remain = fctx->fout_buf_plain_size - fctx->fout_buf_plain_len; + ap_assert(buf_remain > 0); + } + if (b->length > buf_remain) { + apr_bucket_split(b, buf_remain); + } + rv = apr_bucket_read(b, &data, &dlen, APR_BLOCK_READ); + if (APR_SUCCESS != rv) goto cleanup; + /*if (dlen > TLS_PREF_PLAIN_CHUNK_SIZE)*/ + ap_assert(dlen <= buf_remain); + memcpy(fctx->fout_buf_plain + fctx->fout_buf_plain_len, data, dlen); + fctx->fout_buf_plain_len += dlen; + apr_bucket_delete(b); +cleanup: + return rv; +} + +static apr_status_t fout_add_bucket_to_tls(tls_filter_ctx_t *fctx, apr_bucket *b) +{ + apr_status_t rv; + + rv = fout_pass_all_to_tls(fctx); + if (APR_SUCCESS != rv) goto cleanup; + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(fctx->fout_tls_bb, b); + if (AP_BUCKET_IS_EOC(b)) { + rustls_connection_send_close_notify(fctx->cc->rustls_connection); + fctx->cc->state = TLS_CONN_ST_NOTIFIED; + rv = fout_pass_rustls_to_tls(fctx); + if (APR_SUCCESS != rv) goto cleanup; + } +cleanup: + return rv; +} + +static apr_status_t fout_append_plain(tls_filter_ctx_t *fctx, apr_bucket *b) +{ + const char *data; + apr_size_t dlen, buf_remain; + rustls_result rr = RUSTLS_RESULT_OK; + apr_status_t rv = APR_SUCCESS; + const char *lbuf = NULL; + int flush = 0; + + if (b) { + /* if our plain buffer is full, now is a good time to flush it. */ + buf_remain = fctx->fout_buf_plain_size - fctx->fout_buf_plain_len; + if (buf_remain == 0) { + rv = fout_pass_all_to_tls(fctx); + if (APR_SUCCESS != rv) goto cleanup; + buf_remain = fctx->fout_buf_plain_size - fctx->fout_buf_plain_len; + ap_assert(buf_remain > 0); + } + + /* Resolve any indeterminate bucket to a "real" one by reading it. */ + if ((apr_size_t)-1 == b->length) { + rv = apr_bucket_read(b, &data, &dlen, APR_BLOCK_READ); + if (APR_STATUS_IS_EOF(rv)) { + apr_bucket_delete(b); + goto maybe_flush; + } + else if (APR_SUCCESS != rv) goto cleanup; + } + /* Now `b` is the bucket that we need to append and consume */ + if (APR_BUCKET_IS_METADATA(b)) { + /* outgoing buckets: + * [PLAINDATA META PLAINDATA META META] + * need to become: + * [TLSDATA META TLSDATA META META] + * because we need to send the meta buckets down the + * network filters. */ + rv = fout_add_bucket_to_tls(fctx, b); + flush = 1; + } + else if (b->length == 0) { + apr_bucket_delete(b); + } + else if (b->length < 1024 || fctx->fout_buf_plain_len > 0) { + /* we want to buffer small chunks to create larger TLS records and + * not leak security relevant information. So, we buffer small + * chunks and add (parts of) later, larger chunks if the plain + * buffer contains data. */ + rv = fout_add_bucket_to_plain(fctx, b); + if (APR_SUCCESS != rv) goto cleanup; + } + else { + /* we have a large chunk and our plain buffer is empty, write it + * directly into rustls. */ +#define TLS_FILE_CHUNK_SIZE 4 * TLS_PREF_PLAIN_CHUNK_SIZE + if (b->length > TLS_FILE_CHUNK_SIZE) { + apr_bucket_split(b, TLS_FILE_CHUNK_SIZE); + } + + if (APR_BUCKET_IS_FILE(b) + && (lbuf = malloc(b->length))) { + /* A file bucket is a most wonderous thing. Since the dawn of time, + * it has been subject to many optimizations for efficient handling + * of large data in the server: + * - unless one reads from it, it will just consist of a file handle + * and the offset+length information. + * - a apr_bucket_read() will transform itself to a bucket holding + * some 8000 bytes of data (APR_BUCKET_BUFF_SIZE), plus a following + * bucket that continues to hold the file handle and updated offsets/length + * information. + * Using standard bucket brigade handling, one would send 8000 bytes + * chunks to the network and that is fine for many occasions. + * - to have improved performance, the http: network handler takes + * the file handle directly and uses sendfile() when the OS supports it. + * - But there is not sendfile() for TLS (netflix did some experiments). + * So. + * rustls will try to collect max length traffic data into ont TLS + * message, but it can only work with what we gave it. If we give it buffers + * that fit what it wants to assemble already, its work is much easier. + * + * We can read file buckets in large chunks than APR_BUCKET_BUFF_SIZE, + * with a bit of knowledge about how they work. + */ + apr_bucket_file *f = (apr_bucket_file *)b->data; + apr_file_t *fd = f->fd; + apr_off_t offset = b->start; + + dlen = b->length; + rv = apr_file_seek(fd, APR_SET, &offset); + if (APR_SUCCESS != rv) goto cleanup; + rv = apr_file_read(fd, (void*)lbuf, &dlen); + if (APR_SUCCESS != rv && !APR_STATUS_IS_EOF(rv)) goto cleanup; + rv = fout_pass_buf_to_rustls(fctx, lbuf, dlen); + if (APR_SUCCESS != rv) goto cleanup; + apr_bucket_delete(b); + } + else { + rv = apr_bucket_read(b, &data, &dlen, APR_BLOCK_READ); + if (APR_SUCCESS != rv) goto cleanup; + rv = fout_pass_buf_to_rustls(fctx, data, dlen); + if (APR_SUCCESS != rv) goto cleanup; + apr_bucket_delete(b); + } + } + } + +maybe_flush: + if (flush) { + rv = fout_pass_all_to_net(fctx, 1); + if (APR_SUCCESS != rv) goto cleanup; + } + +cleanup: + if (lbuf) free((void*)lbuf); + if (rr != RUSTLS_RESULT_OK) { + const char *err_descr = ""; + rv = tls_core_error(fctx->c, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, fctx->c, APLOGNO(10359) + "write_bucket_to_rustls: [%d] %s", (int)rr, err_descr); + } + return rv; +} + +/** + * The connection filter converting plain, unencrypted traffic data into TLS + * encrypted bytes and send the down the Apache filter chain out to the network. + * + * the data to send, including "meta data" such as FLUSH indicators + * to force filters to write any data set aside (an apache term for + * 'buffering'). + * The buckets in need to be completely consumed, e.g. will be + * empty on a successful return. but unless FLUSHed, filters may hold + * buckets back internally, for various reasons. However they always + * need to be processed in the order they arrive. + */ +static apr_status_t filter_conn_output( + ap_filter_t *f, apr_bucket_brigade *bb) +{ + tls_filter_ctx_t *fctx = f->ctx; + apr_status_t rv = APR_SUCCESS; + rustls_result rr = RUSTLS_RESULT_OK; + + if (f->c->aborted) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, fctx->c, + "tls_filter_conn_output: aborted conn"); + apr_brigade_cleanup(bb); + rv = APR_ECONNABORTED; goto cleanup; + } + + rv = progress_tls_atleast_to(fctx, TLS_CONN_ST_TRAFFIC); + if (APR_SUCCESS != rv) goto cleanup; /* this also leaves on APR_EAGAIN */ + + if (fctx->cc->state == TLS_CONN_ST_DONE) { + /* have done everything, just pass through */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, fctx->c, + "tls_filter_conn_output: tls session is already done"); + rv = ap_pass_brigade(f->next, bb); + goto cleanup; + } + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, fctx->cc->server, + "tls_filter_conn_output, server=%s", fctx->cc->server->server_hostname); + if (APLOGctrace5(fctx->c)) { + tls_util_bb_log(fctx->c, APLOG_TRACE5, "filter_conn_output", bb); + } + + while (!APR_BRIGADE_EMPTY(bb)) { + rv = fout_append_plain(fctx, APR_BRIGADE_FIRST(bb)); + if (APR_SUCCESS != rv) goto cleanup; + } + + if (APLOGctrace5(fctx->c)) { + tls_util_bb_log(fctx->c, APLOG_TRACE5, "filter_conn_output, processed plain", bb); + tls_util_bb_log(fctx->c, APLOG_TRACE5, "filter_conn_output, tls", fctx->fout_tls_bb); + } + +cleanup: + if (rr != RUSTLS_RESULT_OK) { + const char *err_descr = ""; + rv = tls_core_error(fctx->c, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, fctx->c, APLOGNO(10360) + "tls_filter_conn_output: [%d] %s", (int)rr, err_descr); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, fctx->c, + "tls_filter_conn_output: done"); + } + return rv; +} + +int tls_filter_pre_conn_init(conn_rec *c) +{ + tls_conf_conn_t *cc; + tls_filter_ctx_t *fctx; + + if (OK != tls_core_pre_conn_init(c)) { + return DECLINED; + } + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, c->base_server, + "tls_filter_pre_conn_init on %s", c->base_server->server_hostname); + + cc = tls_conf_conn_get(c); + ap_assert(cc); + + fctx = apr_pcalloc(c->pool, sizeof(*fctx)); + fctx->c = c; + fctx->cc = cc; + cc->filter_ctx = fctx; + + /* a bit tricky: registering out filters returns the ap_filter_t* + * that it created for it. The ->next field points always + * to the filter "below" our filter. That will be other registered + * filters and last, but not least, the network filter on the socket. + * + * Therefore, wenn we need to read/write TLS data during handshake, we can + * pass the data to/call on ->next- Since ->next can change during the setup of + * a connections (other modules register also sth.), we keep the ap_filter_t* + * returned here, since httpd core will update the ->next whenever someone + * adds a filter or removes one. This can potentially happen all the time. + */ + fctx->fin_ctx = ap_add_input_filter(TLS_FILTER_RAW, fctx, NULL, c); + fctx->fin_tls_bb = apr_brigade_create(c->pool, c->bucket_alloc); + fctx->fin_tls_buffer_bb = NULL; + fctx->fin_plain_bb = apr_brigade_create(c->pool, c->bucket_alloc); + fctx->fout_ctx = ap_add_output_filter(TLS_FILTER_RAW, fctx, NULL, c); + fctx->fout_tls_bb = apr_brigade_create(c->pool, c->bucket_alloc); + fctx->fout_buf_plain_size = APR_BUCKET_BUFF_SIZE; + fctx->fout_buf_plain = apr_pcalloc(c->pool, fctx->fout_buf_plain_size); + fctx->fout_buf_plain_len = 0; + + /* Let the filters have 2 max-length TLS Messages in the rustls buffers. + * The effects we would like to achieve here are: + * 1. pass data out, so that every bucket becomes its own TLS message. + * This hides, if possible, the length of response parts. + * If we give rustls enough plain data, it will use the max TLS message + * size and things are more hidden. But we can only write what the application + * or protocol gives us. + * 2. max length records result in less overhead for all layers involved. + * 3. a TLS message from the client can only be decrypted when it has + * completely arrived. If we provide rustls with enough data (if the + * network has it for us), it should always be able to decrypt at least + * one TLS message and we have plain bytes to forward to the protocol + * handler. + */ + fctx->fin_max_in_rustls = 4 * TLS_REC_MAX_SIZE; + fctx->fout_max_in_rustls = 4 * TLS_PREF_PLAIN_CHUNK_SIZE; + fctx->fout_auto_flush_size = 2 * TLS_REC_MAX_SIZE; + + return OK; +} + +void tls_filter_conn_init(conn_rec *c) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + + if (cc && cc->filter_ctx && !cc->outgoing) { + /* We are one in a row of hooks that - possibly - want to process this + * connection, the (HTTP) protocol handlers among them. + * + * For incoming connections, we need to select the protocol to use NOW, + * so that the later protocol handlers do the right thing. + * Send an INIT down the input filter chain to trigger the TLS handshake, + * which will select a protocol via ALPN. */ + apr_bucket_brigade* temp; + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, c->base_server, + "tls_filter_conn_init on %s, triggering handshake", c->base_server->server_hostname); + temp = apr_brigade_create(c->pool, c->bucket_alloc); + ap_get_brigade(c->input_filters, temp, AP_MODE_INIT, APR_BLOCK_READ, 0); + apr_brigade_destroy(temp); + } +} + +void tls_filter_register( + apr_pool_t *pool) +{ + (void)pool; + ap_register_input_filter(TLS_FILTER_RAW, filter_conn_input, NULL, AP_FTYPE_CONNECTION + 5); + ap_register_output_filter(TLS_FILTER_RAW, filter_conn_output, NULL, AP_FTYPE_CONNECTION + 5); +} diff --git a/modules/tls/tls_filter.h b/modules/tls/tls_filter.h new file mode 100644 index 0000000..4f3d38b --- /dev/null +++ b/modules/tls/tls_filter.h @@ -0,0 +1,90 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_filter_h +#define tls_filter_h + +#define TLS_FILTER_RAW "TLS raw" + +typedef struct tls_filter_ctx_t tls_filter_ctx_t; + +struct tls_filter_ctx_t { + conn_rec *c; /* connection this context is for */ + tls_conf_conn_t *cc; /* tls module configuration of connection */ + + ap_filter_t *fin_ctx; /* Apache's entry into the input filter chain */ + apr_bucket_brigade *fin_tls_bb; /* TLS encrypted, incoming network data */ + apr_bucket_brigade *fin_tls_buffer_bb; /* TLS encrypted, incoming network data buffering */ + apr_bucket_brigade *fin_plain_bb; /* decrypted, incoming traffic data */ + apr_off_t fin_bytes_in_rustls; /* # of input TLS bytes in rustls_connection */ + apr_read_type_e fin_block; /* Do we block on input reads or not? */ + + ap_filter_t *fout_ctx; /* Apache's entry into the output filter chain */ + char *fout_buf_plain; /* a buffer to collect plain bytes for output */ + apr_size_t fout_buf_plain_len; /* the amount of bytes in the buffer */ + apr_size_t fout_buf_plain_size; /* the total size of the buffer */ + apr_bucket_brigade *fout_tls_bb; /* TLS encrypted, outgoing network data */ + apr_off_t fout_bytes_in_rustls; /* # of output plain bytes in rustls_connection */ + apr_off_t fout_bytes_in_tls_bb; /* # of output tls bytes in our brigade */ + + apr_size_t fin_max_in_rustls; /* how much tls we like to read into rustls */ + apr_size_t fout_max_in_rustls; /* how much plain bytes we like in rustls */ + apr_size_t fout_max_bucket_size; /* how large bucket chunks we handle before splitting */ + apr_size_t fout_auto_flush_size; /* on much outoing TLS data we flush to network */ +}; + +/** + * Register the in-/output filters for converting TLS to application data and vice versa. + */ +void tls_filter_register(apr_pool_t *pool); + +/** + * Initialize the pre_connection state. Install all filters. + * + * @return OK if TLS on connection is enabled, DECLINED otherwise + */ +int tls_filter_pre_conn_init(conn_rec *c); + +/** + * Initialize the connection for use, perform the TLS handshake. + * + * Any failure will lead to the connection becoming aborted. + */ +void tls_filter_conn_init(conn_rec *c); + +/* + * says: + * "For large data transfers, small record sizes can materially affect performance." + * and + * "For TLS 1.2 and earlier, that limit is 2^14 octets. TLS 1.3 uses a limit of + * 2^14+1 octets." + * Maybe future TLS versions will raise that value, but for now these limits stand. + * Given the choice, we would like rustls to provide traffic data in those chunks. + */ +#define TLS_PREF_PLAIN_CHUNK_SIZE (16384) + +/* + * When retrieving TLS chunks for rustls, or providing it a buffer + * to pass out TLS chunks (which are then bucketed and written to the + * network filters), we ideally would do that in multiples of TLS + * messages sizes. + * That would be TLS_PREF_WRITE_SIZE + TLS Message Overhead, such as + * MAC and padding. But these vary with protocol and ciphers chosen, so + * we define something which should be "large enough", but not overly so. + */ +#define TLS_REC_EXTRA (1024) +#define TLS_REC_MAX_SIZE (TLS_PREF_PLAIN_CHUNK_SIZE + TLS_REC_EXTRA) + +#endif /* tls_filter_h */ \ No newline at end of file diff --git a/modules/tls/tls_ocsp.c b/modules/tls/tls_ocsp.c new file mode 100644 index 0000000..37e95b1 --- /dev/null +++ b/modules/tls/tls_ocsp.c @@ -0,0 +1,120 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "tls_cert.h" +#include "tls_conf.h" +#include "tls_core.h" +#include "tls_proto.h" +#include "tls_ocsp.h" + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + + +static int prime_cert( + void *userdata, server_rec *s, const char *cert_id, const char *cert_pem, + const rustls_certified_key *certified_key) +{ + apr_pool_t *p = userdata; + apr_status_t rv; + + (void)certified_key; + rv = ap_ssl_ocsp_prime(s, p, cert_id, strlen(cert_id), cert_pem); + ap_log_error(APLOG_MARK, APLOG_TRACE1, rv, s, "ocsp prime of cert [%s] from %s", + cert_id, s->server_hostname); + return 1; +} + +apr_status_t tls_ocsp_prime_certs(tls_conf_global_t *gc, apr_pool_t *p, server_rec *s) +{ + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "ocsp priming of %d certs", + (int)tls_cert_reg_count(gc->cert_reg)); + tls_cert_reg_do(prime_cert, p, gc->cert_reg); + return APR_SUCCESS; +} + +typedef struct { + conn_rec *c; + const rustls_certified_key *key_in; + const rustls_certified_key *key_out; +} ocsp_copy_ctx_t; + +static void ocsp_clone_key(const unsigned char *der, apr_size_t der_len, void *userdata) +{ + ocsp_copy_ctx_t *ctx = userdata; + rustls_slice_bytes rslice; + rustls_result rr; + + rslice.data = der; + rslice.len = der_len; + + rr = rustls_certified_key_clone_with_ocsp(ctx->key_in, der_len? &rslice : NULL, &ctx->key_out); + if (RUSTLS_RESULT_OK != rr) { + const char *err_descr = NULL; + apr_status_t rv = tls_util_rustls_error(ctx->c->pool, rr, &err_descr); + ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, ctx->c, APLOGNO(10362) + "Failed add OCSP data to certificate: [%d] %s", (int)rr, err_descr); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->c, + "provided %ld bytes of ocsp response DER data to key.", (long)der_len); + } +} + +apr_status_t tls_ocsp_update_key( + conn_rec *c, const rustls_certified_key *certified_key, + const rustls_certified_key **pkey_out) +{ + tls_conf_conn_t *cc = tls_conf_conn_get(c); + tls_conf_server_t *sc; + const char *key_id; + apr_status_t rv = APR_SUCCESS; + ocsp_copy_ctx_t ctx; + + assert(cc); + assert(cc->server); + sc = tls_conf_server_get(cc->server); + key_id = tls_cert_reg_get_id(sc->global->cert_reg, certified_key); + if (!key_id) { + rv = APR_ENOENT; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c, "certified key not registered"); + goto cleanup; + } + + ctx.c = c; + ctx.key_in = certified_key; + ctx.key_out = NULL; + rv = ap_ssl_ocsp_get_resp(cc->server, c, key_id, strlen(key_id), ocsp_clone_key, &ctx); + if (APR_SUCCESS != rv) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c, + "ocsp response not available for cert %s", key_id); + } + +cleanup: + *pkey_out = (APR_SUCCESS == rv)? ctx.key_out : NULL; + return rv; +} diff --git a/modules/tls/tls_ocsp.h b/modules/tls/tls_ocsp.h new file mode 100644 index 0000000..60770a9 --- /dev/null +++ b/modules/tls/tls_ocsp.h @@ -0,0 +1,47 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_ocsp_h +#define tls_ocsp_h + +/** + * Prime the collected certified keys for OCSP response provisioning (aka. Stapling). + * + * To be called in the post-config phase of the server before connections are handled. + * @param gc the global module configuration with the certified_key registry + * @param p the pool to use for allocations + * @param s the base server record + */ +apr_status_t tls_ocsp_prime_certs(tls_conf_global_t *gc, apr_pool_t *p, server_rec *s); + +/** + * Provide the OCSP response data for the certified_key into the offered buffer, + * so available. + * If not data is available `out_n` is set to 0. Same, if the offered buffer + * is not large enough to hold the complete response. + * If OCSP response DER data is copied, the number of copied bytes is given in `out_n`. + * + * Note that only keys that have been primed initially will have OCSP data available. + * @param c the current connection + * @param certified_key the key to get the OCSP response data for + * @param buf a buffer which can hold up to `buf_len` bytes + * @param buf_len the length of `buf` + * @param out_n the number of OCSP response DER bytes copied or 0. + */ +apr_status_t tls_ocsp_update_key( + conn_rec *c, const rustls_certified_key *certified_key, + const rustls_certified_key **key_out); + +#endif /* tls_ocsp_h */ diff --git a/modules/tls/tls_proto.c b/modules/tls/tls_proto.c new file mode 100644 index 0000000..95a903b --- /dev/null +++ b/modules/tls/tls_proto.c @@ -0,0 +1,603 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "tls_proto.h" +#include "tls_conf.h" +#include "tls_util.h" + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + + + +/** + * Known cipher as registered in + * + */ +static tls_cipher_t KNOWN_CIPHERS[] = { + { 0x0000, "TLS_NULL_WITH_NULL_NULL", NULL }, + { 0x0001, "TLS_RSA_WITH_NULL_MD5", NULL }, + { 0x0002, "TLS_RSA_WITH_NULL_SHA", NULL }, + { 0x0003, "TLS_RSA_EXPORT_WITH_RC4_40_MD5", NULL }, + { 0x0004, "TLS_RSA_WITH_RC4_128_MD5", NULL }, + { 0x0005, "TLS_RSA_WITH_RC4_128_SHA", NULL }, + { 0x0006, "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5", NULL }, + { 0x0007, "TLS_RSA_WITH_IDEA_CBC_SHA", NULL }, + { 0x0008, "TLS_RSA_EXPORT_WITH_DES40_CBC_SHA", NULL }, + { 0x0009, "TLS_RSA_WITH_DES_CBC_SHA", NULL }, + { 0x000a, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x000b, "TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA", NULL }, + { 0x000c, "TLS_DH_DSS_WITH_DES_CBC_SHA", NULL }, + { 0x000d, "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x000e, "TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA", NULL }, + { 0x000f, "TLS_DH_RSA_WITH_DES_CBC_SHA", NULL }, + { 0x0010, "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x0011, "TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA", NULL }, + { 0x0012, "TLS_DHE_DSS_WITH_DES_CBC_SHA", NULL }, + { 0x0013, "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x0014, "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA", NULL }, + { 0x0015, "TLS_DHE_RSA_WITH_DES_CBC_SHA", NULL }, + { 0x0016, "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x0017, "TLS_DH_anon_EXPORT_WITH_RC4_40_MD5", NULL }, + { 0x0018, "TLS_DH_anon_WITH_RC4_128_MD5", NULL }, + { 0x0019, "TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA", NULL }, + { 0x001a, "TLS_DH_anon_WITH_DES_CBC_SHA", NULL }, + { 0x001b, "TLS_DH_anon_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x001c, "SSL_FORTEZZA_KEA_WITH_NULL_SHA", NULL }, + { 0x001d, "SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA", NULL }, + { 0x001e, "TLS_KRB5_WITH_DES_CBC_SHA_or_SSL_FORTEZZA_KEA_WITH_RC4_128_SHA", NULL }, + { 0x001f, "TLS_KRB5_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x0020, "TLS_KRB5_WITH_RC4_128_SHA", NULL }, + { 0x0021, "TLS_KRB5_WITH_IDEA_CBC_SHA", NULL }, + { 0x0022, "TLS_KRB5_WITH_DES_CBC_MD5", NULL }, + { 0x0023, "TLS_KRB5_WITH_3DES_EDE_CBC_MD5", NULL }, + { 0x0024, "TLS_KRB5_WITH_RC4_128_MD5", NULL }, + { 0x0025, "TLS_KRB5_WITH_IDEA_CBC_MD5", NULL }, + { 0x0026, "TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA", NULL }, + { 0x0027, "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA", NULL }, + { 0x0028, "TLS_KRB5_EXPORT_WITH_RC4_40_SHA", NULL }, + { 0x0029, "TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5", NULL }, + { 0x002a, "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5", NULL }, + { 0x002b, "TLS_KRB5_EXPORT_WITH_RC4_40_MD5", NULL }, + { 0x002c, "TLS_PSK_WITH_NULL_SHA", NULL }, + { 0x002d, "TLS_DHE_PSK_WITH_NULL_SHA", NULL }, + { 0x002e, "TLS_RSA_PSK_WITH_NULL_SHA", NULL }, + { 0x002f, "TLS_RSA_WITH_AES_128_CBC_SHA", NULL }, + { 0x0030, "TLS_DH_DSS_WITH_AES_128_CBC_SHA", NULL }, + { 0x0031, "TLS_DH_RSA_WITH_AES_128_CBC_SHA", NULL }, + { 0x0032, "TLS_DHE_DSS_WITH_AES_128_CBC_SHA", NULL }, + { 0x0033, "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", NULL }, + { 0x0034, "TLS_DH_anon_WITH_AES_128_CBC_SHA", NULL }, + { 0x0035, "TLS_RSA_WITH_AES_256_CBC_SHA", NULL }, + { 0x0036, "TLS_DH_DSS_WITH_AES_256_CBC_SHA", NULL }, + { 0x0037, "TLS_DH_RSA_WITH_AES_256_CBC_SHA", NULL }, + { 0x0038, "TLS_DHE_DSS_WITH_AES_256_CBC_SHA", NULL }, + { 0x0039, "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", NULL }, + { 0x003a, "TLS_DH_anon_WITH_AES_256_CBC_SHA", NULL }, + { 0x003b, "TLS_RSA_WITH_NULL_SHA256", "NULL-SHA256" }, + { 0x003c, "TLS_RSA_WITH_AES_128_CBC_SHA256", "AES128-SHA256" }, + { 0x003d, "TLS_RSA_WITH_AES_256_CBC_SHA256", "AES256-SHA256" }, + { 0x003e, "TLS_DH_DSS_WITH_AES_128_CBC_SHA256", "DH-DSS-AES128-SHA256" }, + { 0x003f, "TLS_DH_RSA_WITH_AES_128_CBC_SHA256", "DH-RSA-AES128-SHA256" }, + { 0x0040, "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256", "DHE-DSS-AES128-SHA256" }, + { 0x0041, "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA", NULL }, + { 0x0042, "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA", NULL }, + { 0x0043, "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA", NULL }, + { 0x0044, "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA", NULL }, + { 0x0045, "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA", NULL }, + { 0x0046, "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA", NULL }, + { 0x0047, "TLS_ECDH_ECDSA_WITH_NULL_SHA_draft", NULL }, + { 0x0048, "TLS_ECDH_ECDSA_WITH_RC4_128_SHA_draft", NULL }, + { 0x0049, "TLS_ECDH_ECDSA_WITH_DES_CBC_SHA_draft", NULL }, + { 0x004a, "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA_draft", NULL }, + { 0x004b, "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA_draft", NULL }, + { 0x004c, "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA_draft", NULL }, + { 0x004d, "TLS_ECDH_ECNRA_WITH_DES_CBC_SHA_draft", NULL }, + { 0x004e, "TLS_ECDH_ECNRA_WITH_3DES_EDE_CBC_SHA_draft", NULL }, + { 0x004f, "TLS_ECMQV_ECDSA_NULL_SHA_draft", NULL }, + { 0x0050, "TLS_ECMQV_ECDSA_WITH_RC4_128_SHA_draft", NULL }, + { 0x0051, "TLS_ECMQV_ECDSA_WITH_DES_CBC_SHA_draft", NULL }, + { 0x0052, "TLS_ECMQV_ECDSA_WITH_3DES_EDE_CBC_SHA_draft", NULL }, + { 0x0053, "TLS_ECMQV_ECNRA_NULL_SHA_draft", NULL }, + { 0x0054, "TLS_ECMQV_ECNRA_WITH_RC4_128_SHA_draft", NULL }, + { 0x0055, "TLS_ECMQV_ECNRA_WITH_DES_CBC_SHA_draft", NULL }, + { 0x0056, "TLS_ECMQV_ECNRA_WITH_3DES_EDE_CBC_SHA_draft", NULL }, + { 0x0057, "TLS_ECDH_anon_NULL_WITH_SHA_draft", NULL }, + { 0x0058, "TLS_ECDH_anon_WITH_RC4_128_SHA_draft", NULL }, + { 0x0059, "TLS_ECDH_anon_WITH_DES_CBC_SHA_draft", NULL }, + { 0x005a, "TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA_draft", NULL }, + { 0x005b, "TLS_ECDH_anon_EXPORT_WITH_DES40_CBC_SHA_draft", NULL }, + { 0x005c, "TLS_ECDH_anon_EXPORT_WITH_RC4_40_SHA_draft", NULL }, + { 0x0060, "TLS_RSA_EXPORT1024_WITH_RC4_56_MD5", NULL }, + { 0x0061, "TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5", NULL }, + { 0x0062, "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA", NULL }, + { 0x0063, "TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA", NULL }, + { 0x0064, "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA", NULL }, + { 0x0065, "TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA", NULL }, + { 0x0066, "TLS_DHE_DSS_WITH_RC4_128_SHA", NULL }, + { 0x0067, "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", "DHE-RSA-AES128-SHA256" }, + { 0x0068, "TLS_DH_DSS_WITH_AES_256_CBC_SHA256", "DH-DSS-AES256-SHA256" }, + { 0x0069, "TLS_DH_RSA_WITH_AES_256_CBC_SHA256", "DH-RSA-AES256-SHA256" }, + { 0x006a, "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256", "DHE-DSS-AES256-SHA256" }, + { 0x006b, "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", "DHE-RSA-AES256-SHA256" }, + { 0x006c, "TLS_DH_anon_WITH_AES_128_CBC_SHA256", "ADH-AES128-SHA256" }, + { 0x006d, "TLS_DH_anon_WITH_AES_256_CBC_SHA256", "ADH-AES256-SHA256" }, + { 0x0072, "TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD", NULL }, + { 0x0073, "TLS_DHE_DSS_WITH_AES_128_CBC_RMD", NULL }, + { 0x0074, "TLS_DHE_DSS_WITH_AES_256_CBC_RMD", NULL }, + { 0x0077, "TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD", NULL }, + { 0x0078, "TLS_DHE_RSA_WITH_AES_128_CBC_RMD", NULL }, + { 0x0079, "TLS_DHE_RSA_WITH_AES_256_CBC_RMD", NULL }, + { 0x007c, "TLS_RSA_WITH_3DES_EDE_CBC_RMD", NULL }, + { 0x007d, "TLS_RSA_WITH_AES_128_CBC_RMD", NULL }, + { 0x007e, "TLS_RSA_WITH_AES_256_CBC_RMD", NULL }, + { 0x0080, "TLS_GOSTR341094_WITH_28147_CNT_IMIT", NULL }, + { 0x0081, "TLS_GOSTR341001_WITH_28147_CNT_IMIT", NULL }, + { 0x0082, "TLS_GOSTR341094_WITH_NULL_GOSTR3411", NULL }, + { 0x0083, "TLS_GOSTR341001_WITH_NULL_GOSTR3411", NULL }, + { 0x0084, "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA", NULL }, + { 0x0085, "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA", NULL }, + { 0x0086, "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA", NULL }, + { 0x0087, "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA", NULL }, + { 0x0088, "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA", NULL }, + { 0x0089, "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA", NULL }, + { 0x008a, "TLS_PSK_WITH_RC4_128_SHA", "PSK-RC4-SHA" }, + { 0x008b, "TLS_PSK_WITH_3DES_EDE_CBC_SHA", "PSK-3DES-EDE-CBC-SHA" }, + { 0x008c, "TLS_PSK_WITH_AES_128_CBC_SHA", NULL }, + { 0x008d, "TLS_PSK_WITH_AES_256_CBC_SHA", NULL }, + { 0x008e, "TLS_DHE_PSK_WITH_RC4_128_SHA", NULL }, + { 0x008f, "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x0090, "TLS_DHE_PSK_WITH_AES_128_CBC_SHA", NULL }, + { 0x0091, "TLS_DHE_PSK_WITH_AES_256_CBC_SHA", NULL }, + { 0x0092, "TLS_RSA_PSK_WITH_RC4_128_SHA", NULL }, + { 0x0093, "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0x0094, "TLS_RSA_PSK_WITH_AES_128_CBC_SHA", NULL }, + { 0x0095, "TLS_RSA_PSK_WITH_AES_256_CBC_SHA", NULL }, + { 0x0096, "TLS_RSA_WITH_SEED_CBC_SHA", NULL }, + { 0x0097, "TLS_DH_DSS_WITH_SEED_CBC_SHA", NULL }, + { 0x0098, "TLS_DH_RSA_WITH_SEED_CBC_SHA", NULL }, + { 0x0099, "TLS_DHE_DSS_WITH_SEED_CBC_SHA", NULL }, + { 0x009a, "TLS_DHE_RSA_WITH_SEED_CBC_SHA", NULL }, + { 0x009b, "TLS_DH_anon_WITH_SEED_CBC_SHA", NULL }, + { 0x009c, "TLS_RSA_WITH_AES_128_GCM_SHA256", "AES128-GCM-SHA256" }, + { 0x009d, "TLS_RSA_WITH_AES_256_GCM_SHA384", "AES256-GCM-SHA384" }, + { 0x009e, "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", "DHE-RSA-AES128-GCM-SHA256" }, + { 0x009f, "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", "DHE-RSA-AES256-GCM-SHA384" }, + { 0x00a0, "TLS_DH_RSA_WITH_AES_128_GCM_SHA256", "DH-RSA-AES128-GCM-SHA256" }, + { 0x00a1, "TLS_DH_RSA_WITH_AES_256_GCM_SHA384", "DH-RSA-AES256-GCM-SHA384" }, + { 0x00a2, "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256", "DHE-DSS-AES128-GCM-SHA256" }, + { 0x00a3, "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384", "DHE-DSS-AES256-GCM-SHA384" }, + { 0x00a4, "TLS_DH_DSS_WITH_AES_128_GCM_SHA256", "DH-DSS-AES128-GCM-SHA256" }, + { 0x00a5, "TLS_DH_DSS_WITH_AES_256_GCM_SHA384", "DH-DSS-AES256-GCM-SHA384" }, + { 0x00a6, "TLS_DH_anon_WITH_AES_128_GCM_SHA256", "ADH-AES128-GCM-SHA256" }, + { 0x00a7, "TLS_DH_anon_WITH_AES_256_GCM_SHA384", "ADH-AES256-GCM-SHA384" }, + { 0x00a8, "TLS_PSK_WITH_AES_128_GCM_SHA256", NULL }, + { 0x00a9, "TLS_PSK_WITH_AES_256_GCM_SHA384", NULL }, + { 0x00aa, "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256", NULL }, + { 0x00ab, "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384", NULL }, + { 0x00ac, "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256", NULL }, + { 0x00ad, "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384", NULL }, + { 0x00ae, "TLS_PSK_WITH_AES_128_CBC_SHA256", "PSK-AES128-CBC-SHA" }, + { 0x00af, "TLS_PSK_WITH_AES_256_CBC_SHA384", "PSK-AES256-CBC-SHA" }, + { 0x00b0, "TLS_PSK_WITH_NULL_SHA256", NULL }, + { 0x00b1, "TLS_PSK_WITH_NULL_SHA384", NULL }, + { 0x00b2, "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256", NULL }, + { 0x00b3, "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384", NULL }, + { 0x00b4, "TLS_DHE_PSK_WITH_NULL_SHA256", NULL }, + { 0x00b5, "TLS_DHE_PSK_WITH_NULL_SHA384", NULL }, + { 0x00b6, "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256", NULL }, + { 0x00b7, "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384", NULL }, + { 0x00b8, "TLS_RSA_PSK_WITH_NULL_SHA256", NULL }, + { 0x00b9, "TLS_RSA_PSK_WITH_NULL_SHA384", NULL }, + { 0x00ba, "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0x00bb, "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0x00bc, "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0x00bd, "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0x00be, "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0x00bf, "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0x00c0, "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256", NULL }, + { 0x00c1, "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256", NULL }, + { 0x00c2, "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256", NULL }, + { 0x00c3, "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256", NULL }, + { 0x00c4, "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256", NULL }, + { 0x00c5, "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256", NULL }, + { 0x00ff, "TLS_EMPTY_RENEGOTIATION_INFO_SCSV", NULL }, + { 0x1301, "TLS_AES_128_GCM_SHA256", "TLS13_AES_128_GCM_SHA256" }, + { 0x1302, "TLS_AES_256_GCM_SHA384", "TLS13_AES_256_GCM_SHA384" }, + { 0x1303, "TLS_CHACHA20_POLY1305_SHA256", "TLS13_CHACHA20_POLY1305_SHA256" }, + { 0x1304, "TLS_AES_128_CCM_SHA256", "TLS13_AES_128_CCM_SHA256" }, + { 0x1305, "TLS_AES_128_CCM_8_SHA256", "TLS13_AES_128_CCM_8_SHA256" }, + { 0xc001, "TLS_ECDH_ECDSA_WITH_NULL_SHA", NULL }, + { 0xc002, "TLS_ECDH_ECDSA_WITH_RC4_128_SHA", NULL }, + { 0xc003, "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc004, "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA", NULL }, + { 0xc005, "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA", NULL }, + { 0xc006, "TLS_ECDHE_ECDSA_WITH_NULL_SHA", NULL }, + { 0xc007, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", NULL }, + { 0xc008, "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc009, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", NULL }, + { 0xc00a, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", NULL }, + { 0xc00b, "TLS_ECDH_RSA_WITH_NULL_SHA", NULL }, + { 0xc00c, "TLS_ECDH_RSA_WITH_RC4_128_SHA", NULL }, + { 0xc00d, "TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc00e, "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA", NULL }, + { 0xc00f, "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA", NULL }, + { 0xc010, "TLS_ECDHE_RSA_WITH_NULL_SHA", NULL }, + { 0xc011, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", NULL }, + { 0xc012, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc013, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", NULL }, + { 0xc014, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", NULL }, + { 0xc015, "TLS_ECDH_anon_WITH_NULL_SHA", NULL }, + { 0xc016, "TLS_ECDH_anon_WITH_RC4_128_SHA", NULL }, + { 0xc017, "TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc018, "TLS_ECDH_anon_WITH_AES_128_CBC_SHA", NULL }, + { 0xc019, "TLS_ECDH_anon_WITH_AES_256_CBC_SHA", NULL }, + { 0xc01a, "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc01b, "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc01c, "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc01d, "TLS_SRP_SHA_WITH_AES_128_CBC_SHA", NULL }, + { 0xc01e, "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA", NULL }, + { 0xc01f, "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA", NULL }, + { 0xc020, "TLS_SRP_SHA_WITH_AES_256_CBC_SHA", NULL }, + { 0xc021, "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA", NULL }, + { 0xc022, "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA", NULL }, + { 0xc023, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "ECDHE-ECDSA-AES128-SHA256" }, + { 0xc024, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "ECDHE-ECDSA-AES256-SHA384" }, + { 0xc025, "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256", "ECDH-ECDSA-AES128-SHA256" }, + { 0xc026, "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384", "ECDH-ECDSA-AES256-SHA384" }, + { 0xc027, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "ECDHE-RSA-AES128-SHA256" }, + { 0xc028, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "ECDHE-RSA-AES256-SHA384" }, + { 0xc029, "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256", "ECDH-RSA-AES128-SHA256" }, + { 0xc02a, "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384", "ECDH-RSA-AES256-SHA384" }, + { 0xc02b, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "ECDHE-ECDSA-AES128-GCM-SHA256" }, + { 0xc02c, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384" }, + { 0xc02d, "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256", "ECDH-ECDSA-AES128-GCM-SHA256" }, + { 0xc02e, "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384", "ECDH-ECDSA-AES256-GCM-SHA384" }, + { 0xc02f, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "ECDHE-RSA-AES128-GCM-SHA256" }, + { 0xc030, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "ECDHE-RSA-AES256-GCM-SHA384" }, + { 0xc031, "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256", "ECDH-RSA-AES128-GCM-SHA256" }, + { 0xc032, "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384", "ECDH-RSA-AES256-GCM-SHA384" }, + { 0xc033, "TLS_ECDHE_PSK_WITH_RC4_128_SHA", NULL }, + { 0xc034, "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA", NULL }, + { 0xc035, "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA", NULL }, + { 0xc036, "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA", NULL }, + { 0xc037, "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256", NULL }, + { 0xc038, "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384", NULL }, + { 0xc039, "TLS_ECDHE_PSK_WITH_NULL_SHA", NULL }, + { 0xc03a, "TLS_ECDHE_PSK_WITH_NULL_SHA256", NULL }, + { 0xc03b, "TLS_ECDHE_PSK_WITH_NULL_SHA384", NULL }, + { 0xc03c, "TLS_RSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc03d, "TLS_RSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc03e, "TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc03f, "TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc040, "TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc041, "TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc042, "TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc043, "TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc044, "TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc045, "TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc046, "TLS_DH_anon_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc047, "TLS_DH_anon_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc048, "TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc049, "TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc04a, "TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc04b, "TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc04c, "TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc04d, "TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc04e, "TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc04f, "TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc050, "TLS_RSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc051, "TLS_RSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc052, "TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc053, "TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc054, "TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc055, "TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc056, "TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc057, "TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc058, "TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc059, "TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc05a, "TLS_DH_anon_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc05b, "TLS_DH_anon_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc05c, "TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc05d, "TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc05e, "TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc05f, "TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc060, "TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc061, "TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc062, "TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc063, "TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc064, "TLS_PSK_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc065, "TLS_PSK_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc066, "TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc067, "TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc068, "TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc069, "TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc06a, "TLS_PSK_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc06b, "TLS_PSK_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc06c, "TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc06d, "TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc06e, "TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256", NULL }, + { 0xc06f, "TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384", NULL }, + { 0xc070, "TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256", NULL }, + { 0xc071, "TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384", NULL }, + { 0xc072, "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc073, "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc074, "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc075, "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc076, "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc077, "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc078, "TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc079, "TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc07a, "TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc07b, "TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc07c, "TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc07d, "TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc07e, "TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc07f, "TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc080, "TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc081, "TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc082, "TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc083, "TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc084, "TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc085, "TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc086, "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc087, "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc088, "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc089, "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc08a, "TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc08b, "TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc08c, "TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc08d, "TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc08e, "TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc08f, "TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc090, "TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc091, "TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc092, "TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256", NULL }, + { 0xc093, "TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384", NULL }, + { 0xc094, "TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc095, "TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc096, "TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc097, "TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc098, "TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc099, "TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc09a, "TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256", NULL }, + { 0xc09b, "TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384", NULL }, + { 0xc09c, "TLS_RSA_WITH_AES_128_CCM", NULL }, + { 0xc09d, "TLS_RSA_WITH_AES_256_CCM", NULL }, + { 0xc09e, "TLS_DHE_RSA_WITH_AES_128_CCM", NULL }, + { 0xc09f, "TLS_DHE_RSA_WITH_AES_256_CCM", NULL }, + { 0xc0a0, "TLS_RSA_WITH_AES_128_CCM_8", NULL }, + { 0xc0a1, "TLS_RSA_WITH_AES_256_CCM_8", NULL }, + { 0xc0a2, "TLS_DHE_RSA_WITH_AES_128_CCM_8", NULL }, + { 0xc0a3, "TLS_DHE_RSA_WITH_AES_256_CCM_8", NULL }, + { 0xc0a4, "TLS_PSK_WITH_AES_128_CCM", NULL }, + { 0xc0a5, "TLS_PSK_WITH_AES_256_CCM", NULL }, + { 0xc0a6, "TLS_DHE_PSK_WITH_AES_128_CCM", NULL }, + { 0xc0a7, "TLS_DHE_PSK_WITH_AES_256_CCM", NULL }, + { 0xc0a8, "TLS_PSK_WITH_AES_128_CCM_8", NULL }, + { 0xc0a9, "TLS_PSK_WITH_AES_256_CCM_8", NULL }, + { 0xc0aa, "TLS_PSK_DHE_WITH_AES_128_CCM_8", NULL }, + { 0xc0ab, "TLS_PSK_DHE_WITH_AES_256_CCM_8", NULL }, + { 0xcca8, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-RSA-CHACHA20-POLY1305" }, + { 0xcca9, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-ECDSA-CHACHA20-POLY1305" }, + { 0xccaa, "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "DHE-RSA-CHACHA20-POLY1305" }, + { 0xccab, "TLS_PSK_WITH_CHACHA20_POLY1305_SHA256", "PSK-CHACHA20-POLY1305" }, + { 0xccac, "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-PSK-CHACHA20-POLY1305" }, + { 0xccad, "TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256", "DHE-PSK-CHACHA20-POLY1305" }, + { 0xccae, "TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256", "RSA-PSK-CHACHA20-POLY1305" }, + { 0xfefe, "SSL_RSA_FIPS_WITH_DES_CBC_SHA", NULL }, + { 0xfeff, "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA", NULL }, +}; + +typedef struct { + apr_uint16_t id; + const rustls_supported_ciphersuite *rustls_suite; +} rustls_cipher_t; + +tls_proto_conf_t *tls_proto_init(apr_pool_t *pool, server_rec *s) +{ + tls_proto_conf_t *conf; + tls_cipher_t *cipher; + const rustls_supported_ciphersuite *rustls_suite; + rustls_cipher_t *rcipher; + apr_uint16_t id; + apr_size_t i; + + (void)s; + conf = apr_pcalloc(pool, sizeof(*conf)); + + conf->supported_versions = apr_array_make(pool, 3, sizeof(apr_uint16_t)); + /* Until we can look that up at crustls, we assume what we currently know */ + APR_ARRAY_PUSH(conf->supported_versions, apr_uint16_t) = TLS_VERSION_1_2; + APR_ARRAY_PUSH(conf->supported_versions, apr_uint16_t) = TLS_VERSION_1_3; + + conf->known_ciphers_by_name = apr_hash_make(pool); + conf->known_ciphers_by_id = apr_hash_make(pool); + for (i = 0; i < TLS_DIM(KNOWN_CIPHERS); ++i) { + cipher = &KNOWN_CIPHERS[i]; + apr_hash_set(conf->known_ciphers_by_id, &cipher->id, sizeof(apr_uint16_t), cipher); + apr_hash_set(conf->known_ciphers_by_name, cipher->name, APR_HASH_KEY_STRING, cipher); + if (cipher->alias) { + apr_hash_set(conf->known_ciphers_by_name, cipher->alias, APR_HASH_KEY_STRING, cipher); + } + } + + conf->supported_cipher_ids = apr_array_make(pool, 10, sizeof(apr_uint16_t)); + conf->rustls_ciphers_by_id = apr_hash_make(pool); + i = 0; + while ((rustls_suite = rustls_all_ciphersuites_get_entry(i++))) { + id = rustls_supported_ciphersuite_get_suite(rustls_suite); + rcipher = apr_pcalloc(pool, sizeof(*rcipher)); + rcipher->id = id; + rcipher->rustls_suite = rustls_suite; + APR_ARRAY_PUSH(conf->supported_cipher_ids, apr_uint16_t) = id; + apr_hash_set(conf->rustls_ciphers_by_id, &rcipher->id, sizeof(apr_uint16_t), rcipher); + + } + + return conf; +} + +const char *tls_proto_get_cipher_names( + tls_proto_conf_t *conf, const apr_array_header_t *ciphers, apr_pool_t *pool) +{ + apr_array_header_t *names; + int n; + + names = apr_array_make(pool, ciphers->nelts, sizeof(const char*)); + for (n = 0; n < ciphers->nelts; ++n) { + apr_uint16_t id = APR_ARRAY_IDX(ciphers, n, apr_uint16_t); + APR_ARRAY_PUSH(names, const char *) = tls_proto_get_cipher_name(conf, id, pool); + } + return apr_array_pstrcat(pool, names, ':'); +} + +apr_status_t tls_proto_pre_config(apr_pool_t *pool, apr_pool_t *ptemp) +{ + (void)pool; + (void)ptemp; + return APR_SUCCESS; +} + +apr_status_t tls_proto_post_config(apr_pool_t *pool, apr_pool_t *ptemp, server_rec *s) +{ + tls_conf_server_t *sc = tls_conf_server_get(s); + tls_proto_conf_t *conf = sc->global->proto; + + (void)pool; + if (APLOGdebug(s)) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10314) + "tls ciphers supported: %s", + tls_proto_get_cipher_names(conf, conf->supported_cipher_ids, ptemp)); + } + return APR_SUCCESS; +} + +static apr_status_t get_uint16_from(const char *name, const char *prefix, apr_uint16_t *pint) +{ + apr_size_t plen = strlen(prefix); + if (strlen(name) == plen+4 && !strncmp(name, prefix, plen)) { + /* may be a hex notation cipher id */ + char *end = NULL; + apr_int64_t code = apr_strtoi64(name + plen, &end, 16); + if ((!end || !*end) && code && code <= APR_UINT16_MAX) { + *pint = (apr_uint16_t)code; + return APR_SUCCESS; + } + } + return APR_ENOENT; +} + +apr_uint16_t tls_proto_get_version_by_name(tls_proto_conf_t *conf, const char *name) +{ + apr_uint16_t version; + (void)conf; + if (!apr_strnatcasecmp(name, "TLSv1.2")) { + return TLS_VERSION_1_2; + } + else if (!apr_strnatcasecmp(name, "TLSv1.3")) { + return TLS_VERSION_1_3; + } + if (APR_SUCCESS == get_uint16_from(name, "TLSv0x", &version)) { + return version; + } + return 0; +} + +const char *tls_proto_get_version_name( + tls_proto_conf_t *conf, apr_uint16_t id, apr_pool_t *pool) +{ + (void)conf; + switch (id) { + case TLS_VERSION_1_2: + return "TLSv1.2"; + case TLS_VERSION_1_3: + return "TLSv1.3"; + default: + return apr_psprintf(pool, "TLSv0x%04x", id); + } +} + +apr_array_header_t *tls_proto_create_versions_plus( + tls_proto_conf_t *conf, apr_uint16_t min_version, apr_pool_t *pool) +{ + apr_array_header_t *versions = apr_array_make(pool, 3, sizeof(apr_uint16_t)); + apr_uint16_t version; + int i; + + for (i = 0; i < conf->supported_versions->nelts; ++i) { + version = APR_ARRAY_IDX(conf->supported_versions, i, apr_uint16_t); + if (version >= min_version) { + APR_ARRAY_PUSH(versions, apr_uint16_t) = version; + } + } + return versions; +} + +int tls_proto_is_cipher_supported(tls_proto_conf_t *conf, apr_uint16_t cipher) +{ + return tls_util_array_uint16_contains(conf->supported_cipher_ids, cipher); +} + +apr_status_t tls_proto_get_cipher_by_name( + tls_proto_conf_t *conf, const char *name, apr_uint16_t *pcipher) +{ + tls_cipher_t *cipher = apr_hash_get(conf->known_ciphers_by_name, name, APR_HASH_KEY_STRING); + if (cipher) { + *pcipher = cipher->id; + return APR_SUCCESS; + } + return get_uint16_from(name, "TLS_CIPHER_0x", pcipher); +} + +const char *tls_proto_get_cipher_name( + tls_proto_conf_t *conf, apr_uint16_t id, apr_pool_t *pool) +{ + tls_cipher_t *cipher = apr_hash_get(conf->known_ciphers_by_id, &id, sizeof(apr_uint16_t)); + if (cipher) { + return cipher->name; + } + return apr_psprintf(pool, "TLS_CIPHER_0x%04x", id); +} + +apr_array_header_t *tls_proto_get_rustls_suites( + tls_proto_conf_t *conf, const apr_array_header_t *ids, apr_pool_t *pool) +{ + apr_array_header_t *suites; + rustls_cipher_t *rcipher; + apr_uint16_t id; + int i; + + suites = apr_array_make(pool, ids->nelts, sizeof(const rustls_supported_ciphersuite*)); + for (i = 0; i < ids->nelts; ++i) { + id = APR_ARRAY_IDX(ids, i, apr_uint16_t); + rcipher = apr_hash_get(conf->rustls_ciphers_by_id, &id, sizeof(apr_uint16_t)); + if (rcipher) { + APR_ARRAY_PUSH(suites, const rustls_supported_ciphersuite *) = rcipher->rustls_suite; + } + } + return suites; +} diff --git a/modules/tls/tls_proto.h b/modules/tls/tls_proto.h new file mode 100644 index 0000000..a3fe881 --- /dev/null +++ b/modules/tls/tls_proto.h @@ -0,0 +1,124 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_proto_h +#define tls_proto_h + +#include "tls_util.h" + + +#define TLS_VERSION_1_2 0x0303 +#define TLS_VERSION_1_3 0x0304 + +/** + * Specification of a TLS cipher by name, possible alias and its 16 bit value + * as assigned by IANA. + */ +typedef struct { + apr_uint16_t id; /* IANA 16-bit assigned value as used on the wire */ + const char *name; /* IANA given name of the cipher */ + const char *alias; /* Optional, commonly known alternate name */ +} tls_cipher_t; + +/** + * TLS protocol related definitions constructed + * by querying crustls lib. + */ +typedef struct tls_proto_conf_t tls_proto_conf_t; +struct tls_proto_conf_t { + apr_array_header_t *supported_versions; /* supported protocol versions (apr_uint16_t) */ + apr_hash_t *known_ciphers_by_name; /* hash by name of known tls_cipher_t* */ + apr_hash_t *known_ciphers_by_id; /* hash by id of known tls_cipher_t* */ + apr_hash_t *rustls_ciphers_by_id; /* hash by id of rustls rustls_supported_ciphersuite* */ + apr_array_header_t *supported_cipher_ids; /* cipher ids (apr_uint16_t) supported by rustls */ + const rustls_root_cert_store *native_roots; +}; + +/** + * Create and populate the protocol configuration. + */ +tls_proto_conf_t *tls_proto_init(apr_pool_t *p, server_rec *s); + +/** + * Called during pre-config phase to start initialization + * of the tls protocol configuration. + */ +apr_status_t tls_proto_pre_config(apr_pool_t *pool, apr_pool_t *ptemp); + +/** + * Called during post-config phase to conclude the initialization + * of the tls protocol configuration. + */ +apr_status_t tls_proto_post_config(apr_pool_t *p, apr_pool_t *ptemp, server_rec *s); + +/** + * Get the TLS protocol identifier (as used on the wire) for the TLS + * protocol of the given name. Returns 0 if protocol is unknown. + */ +apr_uint16_t tls_proto_get_version_by_name(tls_proto_conf_t *conf, const char *name); + +/** + * Get the name of the protocol version identified by its identifier. This + * will return the name from the protocol configuration or, if unknown, create + * the string `TLSv0x%04x` from the 16bit identifier. + */ +const char *tls_proto_get_version_name( + tls_proto_conf_t *conf, apr_uint16_t id, apr_pool_t *pool); + +/** + * Create an array of the given TLS protocol version identifier `min_version` + * and all supported new ones. The array carries apr_uint16_t values. + */ +apr_array_header_t *tls_proto_create_versions_plus( + tls_proto_conf_t *conf, apr_uint16_t min_version, apr_pool_t *pool); + +/** + * Get a TLS cipher spec by name/alias. + */ +apr_status_t tls_proto_get_cipher_by_name( + tls_proto_conf_t *conf, const char *name, apr_uint16_t *pcipher); + +/** + * Return != 0 iff the cipher is supported by the rustls library. + */ +int tls_proto_is_cipher_supported(tls_proto_conf_t *conf, apr_uint16_t cipher); + +/** + * Get the name of a TLS cipher for the IANA assigned 16bit value. This will + * return the name in the protocol configuration, if the cipher is known, and + * create the string `TLS_CIPHER_0x%04x` for the 16bit cipher value. + */ +const char *tls_proto_get_cipher_name( + tls_proto_conf_t *conf, apr_uint16_t cipher, apr_pool_t *pool); + +/** + * Get the concatenated names with ':' as separator of all TLS cipher identifiers + * as given in `ciphers`. + * @param conf the TLS protocol configuration + * @param ciphers the 16bit values of the TLS ciphers + * @param pool to use for allocation the string. + */ +const char *tls_proto_get_cipher_names( + tls_proto_conf_t *conf, const apr_array_header_t *ciphers, apr_pool_t *pool); + +/** + * Convert an array of TLS cipher 16bit identifiers into the `rustls_supported_ciphersuite` + * instances that can be passed to crustls in session configurations. + * Any cipher identifier not supported by rustls we be silently omitted. + */ +apr_array_header_t *tls_proto_get_rustls_suites( + tls_proto_conf_t *conf, const apr_array_header_t *ids, apr_pool_t *pool); + +#endif /* tls_proto_h */ diff --git a/modules/tls/tls_util.c b/modules/tls/tls_util.c new file mode 100644 index 0000000..9eac212 --- /dev/null +++ b/modules/tls/tls_util.c @@ -0,0 +1,367 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "tls_proto.h" +#include "tls_util.h" + + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + + +tls_data_t tls_data_from_str(const char *s) +{ + tls_data_t d; + d.data = (const unsigned char*)s; + d.len = s? strlen(s) : 0; + return d; +} + +tls_data_t tls_data_assign_copy(apr_pool_t *p, const tls_data_t *d) +{ + tls_data_t copy; + copy.data = apr_pmemdup(p, d->data, d->len); + copy.len = d->len; + return copy; +} + +tls_data_t *tls_data_copy(apr_pool_t *p, const tls_data_t *d) +{ + tls_data_t *copy; + copy = apr_pcalloc(p, sizeof(*copy)); + *copy = tls_data_assign_copy(p, d); + return copy; +} + +const char *tls_data_to_str(apr_pool_t *p, const tls_data_t *d) +{ + char *s = apr_pcalloc(p, d->len+1); + memcpy(s, d->data, d->len); + return s; +} + +apr_status_t tls_util_rustls_error( + apr_pool_t *p, rustls_result rr, const char **perr_descr) +{ + if (perr_descr) { + char buffer[HUGE_STRING_LEN]; + apr_size_t len = 0; + + rustls_error(rr, buffer, sizeof(buffer), &len); + *perr_descr = apr_pstrndup(p, buffer, len); + } + return APR_EGENERAL; +} + +int tls_util_is_file( + apr_pool_t *p, const char *fpath) +{ + apr_finfo_t finfo; + + return (fpath != NULL + && apr_stat(&finfo, fpath, APR_FINFO_TYPE|APR_FINFO_SIZE, p) == 0 + && finfo.filetype == APR_REG); +} + +apr_status_t tls_util_file_load( + apr_pool_t *p, const char *fpath, apr_size_t min_len, apr_size_t max_len, tls_data_t *data) +{ + apr_finfo_t finfo; + apr_status_t rv; + apr_file_t *f = NULL; + unsigned char *buffer; + apr_size_t len; + const char *err = NULL; + tls_data_t *d; + + rv = apr_stat(&finfo, fpath, APR_FINFO_TYPE|APR_FINFO_SIZE, p); + if (APR_SUCCESS != rv) { + err = "cannot stat"; goto cleanup; + } + if (finfo.filetype != APR_REG) { + err = "not a plain file"; + rv = APR_EINVAL; goto cleanup; + } + if (finfo.size > LONG_MAX) { + err = "file is too large"; + rv = APR_EINVAL; goto cleanup; + } + len = (apr_size_t)finfo.size; + if (len < min_len || len > max_len) { + err = "file size not in allowed range"; + rv = APR_EINVAL; goto cleanup; + } + d = apr_pcalloc(p, sizeof(*d)); + buffer = apr_pcalloc(p, len+1); /* keep it NUL terminated in any case */ + rv = apr_file_open(&f, fpath, APR_FOPEN_READ, 0, p); + if (APR_SUCCESS != rv) { + err = "error opening"; goto cleanup; + } + rv = apr_file_read(f, buffer, &len); + if (APR_SUCCESS != rv) { + err = "error reading"; goto cleanup; + } +cleanup: + if (f) apr_file_close(f); + if (APR_SUCCESS == rv) { + data->data = buffer; + data->len = len; + } + else { + memset(data, 0, sizeof(*data)); + ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10361) + "Failed to load file %s: %s", fpath, err? err: "-"); + } + return rv; +} + +int tls_util_array_uint16_contains(const apr_array_header_t* a, apr_uint16_t n) +{ + int i; + for (i = 0; i < a->nelts; ++i) { + if (APR_ARRAY_IDX(a, i, apr_uint16_t) == n) return 1; + } + return 0; +} + +const apr_array_header_t *tls_util_array_uint16_remove( + apr_pool_t *pool, const apr_array_header_t* from, const apr_array_header_t* others) +{ + apr_array_header_t *na = NULL; + apr_uint16_t id; + int i, j; + + for (i = 0; i < from->nelts; ++i) { + id = APR_ARRAY_IDX(from, i, apr_uint16_t); + if (tls_util_array_uint16_contains(others, id)) { + if (na == NULL) { + /* first removal, make a new result array, copy elements before */ + na = apr_array_make(pool, from->nelts, sizeof(apr_uint16_t)); + for (j = 0; j < i; ++j) { + APR_ARRAY_PUSH(na, apr_uint16_t) = APR_ARRAY_IDX(from, j, apr_uint16_t); + } + } + } + else if (na) { + APR_ARRAY_PUSH(na, apr_uint16_t) = id; + } + } + return na? na : from; +} + +apr_status_t tls_util_brigade_transfer( + apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length, + apr_off_t *pnout) +{ + apr_bucket *b; + apr_off_t remain = length; + apr_status_t rv = APR_SUCCESS; + const char *ign; + apr_size_t ilen; + + *pnout = 0; + while (!APR_BRIGADE_EMPTY(src)) { + b = APR_BRIGADE_FIRST(src); + + if (APR_BUCKET_IS_METADATA(b)) { + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(dest, b); + } + else { + if (remain == (apr_off_t)b->length) { + /* fall through */ + } + else if (remain <= 0) { + goto cleanup; + } + else { + if (b->length == ((apr_size_t)-1)) { + rv= apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (APR_SUCCESS != rv) goto cleanup; + } + if (remain < (apr_off_t)b->length) { + apr_bucket_split(b, (apr_size_t)remain); + } + } + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(dest, b); + remain -= (apr_off_t)b->length; + *pnout += (apr_off_t)b->length; + } + } +cleanup: + return rv; +} + +apr_status_t tls_util_brigade_copy( + apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length, + apr_off_t *pnout) +{ + apr_bucket *b, *next; + apr_off_t remain = length; + apr_status_t rv = APR_SUCCESS; + const char *ign; + apr_size_t ilen; + + *pnout = 0; + for (b = APR_BRIGADE_FIRST(src); + b != APR_BRIGADE_SENTINEL(src); + b = next) { + next = APR_BUCKET_NEXT(b); + + if (APR_BUCKET_IS_METADATA(b)) { + /* fall through */ + } + else { + if (remain == (apr_off_t)b->length) { + /* fall through */ + } + else if (remain <= 0) { + goto cleanup; + } + else { + if (b->length == ((apr_size_t)-1)) { + rv = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (APR_SUCCESS != rv) goto cleanup; + } + if (remain < (apr_off_t)b->length) { + apr_bucket_split(b, (apr_size_t)remain); + } + } + } + rv = apr_bucket_copy(b, &b); + if (APR_SUCCESS != rv) goto cleanup; + APR_BRIGADE_INSERT_TAIL(dest, b); + remain -= (apr_off_t)b->length; + *pnout += (apr_off_t)b->length; + } +cleanup: + return rv; +} + +apr_status_t tls_util_brigade_split_line( + apr_bucket_brigade *dest, apr_bucket_brigade *src, + apr_read_type_e block, apr_off_t length, + apr_off_t *pnout) +{ + apr_off_t nstart, nend; + apr_status_t rv; + + apr_brigade_length(dest, 0, &nstart); + rv = apr_brigade_split_line(dest, src, block, length); + if (APR_SUCCESS != rv) goto cleanup; + apr_brigade_length(dest, 0, &nend); + /* apr_brigade_split_line() has the nasty habit of leaving a 0-length bucket + * at the start of the brigade when it transferred the whole content. Get rid of it. + */ + if (!APR_BRIGADE_EMPTY(src)) { + apr_bucket *b = APR_BRIGADE_FIRST(src); + if (!APR_BUCKET_IS_METADATA(b) && 0 == b->length) { + APR_BUCKET_REMOVE(b); + apr_bucket_delete(b); + } + } +cleanup: + *pnout = (APR_SUCCESS == rv)? (nend - nstart) : 0; + return rv; +} + +int tls_util_name_matches_server(const char *name, server_rec *s) +{ + apr_array_header_t *names; + char **alias; + int i; + + if (!s || !s->server_hostname) return 0; + if (!strcasecmp(name, s->server_hostname)) return 1; + /* first the fast equality match, then the pattern wild_name matches */ + names = s->names; + if (!names) return 0; + alias = (char **)names->elts; + for (i = 0; i < names->nelts; ++i) { + if (alias[i] && !strcasecmp(name, alias[i])) return 1; + } + names = s->wild_names; + if (!names) return 0; + alias = (char **)names->elts; + for (i = 0; i < names->nelts; ++i) { + if (alias[i] && !ap_strcasecmp_match(name, alias[i])) return 1; + } + return 0; +} + +apr_size_t tls_util_bucket_print(char *buffer, apr_size_t bmax, + apr_bucket *b, const char *sep) +{ + apr_size_t off = 0; + if (sep && *sep) { + off += (size_t)apr_snprintf(buffer+off, bmax-off, "%s", sep); + } + + if (bmax <= off) { + return off; + } + else if (APR_BUCKET_IS_METADATA(b)) { + off += (size_t)apr_snprintf(buffer+off, bmax-off, "%s", b->type->name); + } + else if (bmax > off) { + off += (size_t)apr_snprintf(buffer+off, bmax-off, "%s[%ld]", + b->type->name, (long)(b->length == ((apr_size_t)-1)? + -1 : (int)b->length)); + } + return off; +} + +apr_size_t tls_util_bb_print(char *buffer, apr_size_t bmax, + const char *tag, const char *sep, + apr_bucket_brigade *bb) +{ + apr_size_t off = 0; + const char *sp = ""; + apr_bucket *b; + + if (bmax > 1) { + if (bb) { + memset(buffer, 0, bmax--); + off += (size_t)apr_snprintf(buffer+off, bmax-off, "%s(", tag); + for (b = APR_BRIGADE_FIRST(bb); + (bmax > off) && (b != APR_BRIGADE_SENTINEL(bb)); + b = APR_BUCKET_NEXT(b)) { + + off += tls_util_bucket_print(buffer+off, bmax-off, b, sp); + sp = " "; + } + if (bmax > off) { + off += (size_t)apr_snprintf(buffer+off, bmax-off, ")%s", sep); + } + } + else { + off += (size_t)apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep); + } + } + return off; +} + diff --git a/modules/tls/tls_util.h b/modules/tls/tls_util.h new file mode 100644 index 0000000..18ae4df --- /dev/null +++ b/modules/tls/tls_util.h @@ -0,0 +1,157 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_util_h +#define tls_util_h + +#define TLS_DIM(a) (sizeof(a)/sizeof(a[0])) + + +/** + * Simple struct to hold a range of bytes and its length together. + */ +typedef struct tls_data_t tls_data_t; +struct tls_data_t { + const unsigned char* data; + apr_size_t len; +}; + +/** + * Return a tls_data_t for a string. + */ +tls_data_t tls_data_from_str(const char *s); + +/** + * Create a copy of a tls_data_t using the given pool. + */ +tls_data_t *tls_data_copy(apr_pool_t *p, const tls_data_t *d); + +/** + * Return a copy of a tls_data_t bytes allocated from pool. + */ +tls_data_t tls_data_assign_copy(apr_pool_t *p, const tls_data_t *d); + +/** + * Convert the data bytes in `d` into a NUL-terminated string. + * There is no check if the data bytes already contain NUL. + */ +const char *tls_data_to_str(apr_pool_t *p, const tls_data_t *d); + +/** + * Return != 0 if fpath is a 'real' file. + */ +int tls_util_is_file(apr_pool_t *p, const char *fpath); + +/** + * Inspect a 'rustls_result', retrieve the error description for it and + * return the apr_status_t to use as our error status. + */ +apr_status_t tls_util_rustls_error(apr_pool_t *p, rustls_result rr, const char **perr_descr); + +/** + * Load up to `max_len` bytes into a buffer allocated from the pool. + * @return ARP_SUCCESS on successful load. + * APR_EINVAL when the file was not a regular file or is too large. + */ +apr_status_t tls_util_file_load( + apr_pool_t *p, const char *fpath, size_t min_len, size_t max_len, tls_data_t *data); + +/** + * Return != 0 iff the array of apr_uint16_t contains value n. + */ +int tls_util_array_uint16_contains(const apr_array_header_t* a, apr_uint16_t n); + +/** + * Remove all apr_uint16_t in `others` from array `from`. + * Returns the new array or, if no overlap was found, the `from` array unchanged. + */ +const apr_array_header_t *tls_util_array_uint16_remove( + apr_pool_t *pool, const apr_array_header_t* from, const apr_array_header_t* others); + +/** + * Transfer up to bytes from to , including all + * encountered meta data buckets. The transferred buckets/data are + * removed from . + * Return the actual byte count transferred in . + */ +apr_status_t tls_util_brigade_transfer( + apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length, + apr_off_t *pnout); + +/** + * Copy up to bytes from to , including all + * encountered meta data buckets. remains semantically unchaanged, + * meaning there might have been buckets split or changed while reading + * their content. + * Return the actual byte count copied in . + */ +apr_status_t tls_util_brigade_copy( + apr_bucket_brigade *dest, apr_bucket_brigade *src, apr_off_t length, + apr_off_t *pnout); + +/** + * Get a line of max `length` bytes from `src` into `dest`. + * Return the number of bytes transferred in `pnout`. + */ +apr_status_t tls_util_brigade_split_line( + apr_bucket_brigade *dest, apr_bucket_brigade *src, + apr_read_type_e block, apr_off_t length, + apr_off_t *pnout); + +/** + * Return != 0 iff the given matches the configured 'ServerName' + * or one of the 'ServerAlias' name of , including wildcard patterns + * as understood by ap_strcasecmp_match(). + */ +int tls_util_name_matches_server(const char *name, server_rec *s); + + +/** + * Print a bucket's meta data (type and length) to the buffer. + * @return number of characters printed + */ +apr_size_t tls_util_bucket_print(char *buffer, apr_size_t bmax, + apr_bucket *b, const char *sep); + +/** + * Prints the brigade bucket types and lengths into the given buffer + * up to bmax. + * @return number of characters printed + */ +apr_size_t tls_util_bb_print(char *buffer, apr_size_t bmax, + const char *tag, const char *sep, + apr_bucket_brigade *bb); +/** + * Logs the bucket brigade (which bucket types with what length) + * to the log at the given level. + * @param c the connection to log for + * @param sid the stream identifier this brigade belongs to + * @param level the log level (as in APLOG_*) + * @param tag a short message text about the context + * @param bb the brigade to log + */ +#define tls_util_bb_log(c, level, tag, bb) \ +do { \ + char buffer[4 * 1024]; \ + const char *line = "(null)"; \ + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \ + len = tls_util_bb_print(buffer, bmax, (tag), "", (bb)); \ + ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \ + ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \ +} while(0) + + + +#endif /* tls_util_h */ diff --git a/modules/tls/tls_var.c b/modules/tls/tls_var.c new file mode 100644 index 0000000..fa4ae2a --- /dev/null +++ b/modules/tls/tls_var.c @@ -0,0 +1,397 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "tls_conf.h" +#include "tls_core.h" +#include "tls_cert.h" +#include "tls_util.h" +#include "tls_var.h" +#include "tls_version.h" + + +extern module AP_MODULE_DECLARE_DATA tls_module; +APLOG_USE_MODULE(tls); + +typedef struct { + apr_pool_t *p; + server_rec *s; + conn_rec *c; + request_rec *r; + tls_conf_conn_t *cc; + const char *name; + const char *arg_s; + int arg_i; +} tls_var_lookup_ctx_t; + +typedef const char *var_lookup(const tls_var_lookup_ctx_t *ctx); + +static const char *var_get_ssl_protocol(const tls_var_lookup_ctx_t *ctx) +{ + return ctx->cc->tls_protocol_name; +} + +static const char *var_get_ssl_cipher(const tls_var_lookup_ctx_t *ctx) +{ + return ctx->cc->tls_cipher_name; +} + +static const char *var_get_sni_hostname(const tls_var_lookup_ctx_t *ctx) +{ + return ctx->cc->sni_hostname; +} + +static const char *var_get_version_interface(const tls_var_lookup_ctx_t *ctx) +{ + tls_conf_server_t *sc = tls_conf_server_get(ctx->s); + return sc->global->module_version; +} + +static const char *var_get_version_library(const tls_var_lookup_ctx_t *ctx) +{ + tls_conf_server_t *sc = tls_conf_server_get(ctx->s); + return sc->global->crustls_version; +} + +static const char *var_get_false(const tls_var_lookup_ctx_t *ctx) +{ + (void)ctx; + return "false"; +} + +static const char *var_get_null(const tls_var_lookup_ctx_t *ctx) +{ + (void)ctx; + return "NULL"; +} + +static const char *var_get_client_s_dn_cn(const tls_var_lookup_ctx_t *ctx) +{ + /* There is no support in the crustls/rustls/webpki APIs to + * parse X.509 certificates and extract information about + * subject, issuer, etc. */ + if (!ctx->cc->peer_certs || !ctx->cc->peer_certs->nelts) return NULL; + return "Not Implemented"; +} + +static const char *var_get_client_verify(const tls_var_lookup_ctx_t *ctx) +{ + return ctx->cc->peer_certs? "SUCCESS" : "NONE"; +} + +static const char *var_get_session_resumed(const tls_var_lookup_ctx_t *ctx) +{ + return ctx->cc->session_id_cache_hit? "Resumed" : "Initial"; +} + +static const char *var_get_client_cert(const tls_var_lookup_ctx_t *ctx) +{ + const rustls_certificate *cert; + const char *pem; + apr_status_t rv; + int cert_idx = 0; + + if (ctx->arg_s) { + if (strcmp(ctx->arg_s, "chain")) return NULL; + /* ctx->arg_i'th chain cert, which is in out list as */ + cert_idx = ctx->arg_i + 1; + } + if (!ctx->cc->peer_certs || cert_idx >= ctx->cc->peer_certs->nelts) return NULL; + cert = APR_ARRAY_IDX(ctx->cc->peer_certs, cert_idx, const rustls_certificate*); + if (APR_SUCCESS != (rv = tls_cert_to_pem(&pem, ctx->p, cert))) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, ctx->s, APLOGNO(10315) + "Failed to create client certificate PEM"); + return NULL; + } + return pem; +} + +static const char *var_get_server_cert(const tls_var_lookup_ctx_t *ctx) +{ + const rustls_certificate *cert; + const char *pem; + apr_status_t rv; + + if (!ctx->cc->key) return NULL; + cert = rustls_certified_key_get_certificate(ctx->cc->key, 0); + if (!cert) return NULL; + if (APR_SUCCESS != (rv = tls_cert_to_pem(&pem, ctx->p, cert))) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, ctx->s, APLOGNO(10316) + "Failed to create server certificate PEM"); + return NULL; + } + return pem; +} + +typedef struct { + const char *name; + var_lookup* fn; + const char *arg_s; + int arg_i; +} var_def_t; + +static const var_def_t VAR_DEFS[] = { + { "SSL_PROTOCOL", var_get_ssl_protocol, NULL, 0 }, + { "SSL_CIPHER", var_get_ssl_cipher, NULL, 0 }, + { "SSL_TLS_SNI", var_get_sni_hostname, NULL, 0 }, + { "SSL_CLIENT_S_DN_CN", var_get_client_s_dn_cn, NULL, 0 }, + { "SSL_VERSION_INTERFACE", var_get_version_interface, NULL, 0 }, + { "SSL_VERSION_LIBRARY", var_get_version_library, NULL, 0 }, + { "SSL_SECURE_RENEG", var_get_false, NULL, 0 }, + { "SSL_COMPRESS_METHOD", var_get_null, NULL, 0 }, + { "SSL_CIPHER_EXPORT", var_get_false, NULL, 0 }, + { "SSL_CLIENT_VERIFY", var_get_client_verify, NULL, 0 }, + { "SSL_SESSION_RESUMED", var_get_session_resumed, NULL, 0 }, + { "SSL_CLIENT_CERT", var_get_client_cert, NULL, 0 }, + { "SSL_CLIENT_CHAIN_0", var_get_client_cert, "chain", 0 }, + { "SSL_CLIENT_CHAIN_1", var_get_client_cert, "chain", 1 }, + { "SSL_CLIENT_CHAIN_2", var_get_client_cert, "chain", 2 }, + { "SSL_CLIENT_CHAIN_3", var_get_client_cert, "chain", 3 }, + { "SSL_CLIENT_CHAIN_4", var_get_client_cert, "chain", 4 }, + { "SSL_CLIENT_CHAIN_5", var_get_client_cert, "chain", 5 }, + { "SSL_CLIENT_CHAIN_6", var_get_client_cert, "chain", 6 }, + { "SSL_CLIENT_CHAIN_7", var_get_client_cert, "chain", 7 }, + { "SSL_CLIENT_CHAIN_8", var_get_client_cert, "chain", 8 }, + { "SSL_CLIENT_CHAIN_9", var_get_client_cert, "chain", 9 }, + { "SSL_SERVER_CERT", var_get_server_cert, NULL, 0 }, +}; + +static const char *const TlsAlwaysVars[] = { + "SSL_TLS_SNI", + "SSL_PROTOCOL", + "SSL_CIPHER", + "SSL_CLIENT_S_DN_CN", +}; + +/* what mod_ssl defines, plus server cert and client cert DN and SAN entries */ +static const char *const StdEnvVars[] = { + "SSL_VERSION_INTERFACE", /* implemented: module version string */ + "SSL_VERSION_LIBRARY", /* implemented: crustls/rustls version string */ + "SSL_SECURE_RENEG", /* implemented: always "false" */ + "SSL_COMPRESS_METHOD", /* implemented: always "NULL" */ + "SSL_CIPHER_EXPORT", /* implemented: always "false" */ + "SSL_CIPHER_USEKEYSIZE", + "SSL_CIPHER_ALGKEYSIZE", + "SSL_CLIENT_VERIFY", /* implemented: always "SUCCESS" or "NONE" */ + "SSL_CLIENT_M_VERSION", + "SSL_CLIENT_M_SERIAL", + "SSL_CLIENT_V_START", + "SSL_CLIENT_V_END", + "SSL_CLIENT_V_REMAIN", + "SSL_CLIENT_S_DN", + "SSL_CLIENT_I_DN", + "SSL_CLIENT_A_KEY", + "SSL_CLIENT_A_SIG", + "SSL_CLIENT_CERT_RFC4523_CEA", + "SSL_SERVER_M_VERSION", + "SSL_SERVER_M_SERIAL", + "SSL_SERVER_V_START", + "SSL_SERVER_V_END", + "SSL_SERVER_S_DN", + "SSL_SERVER_I_DN", + "SSL_SERVER_A_KEY", + "SSL_SERVER_A_SIG", + "SSL_SESSION_ID", /* not implemented: highly sensitive data we do not expose */ + "SSL_SESSION_RESUMED", /* implemented: if our cache was hit successfully */ +}; + +/* Cert related variables, export when TLSOption ExportCertData is set */ +static const char *const ExportCertVars[] = { + "SSL_CLIENT_CERT", /* implemented: */ + "SSL_CLIENT_CHAIN_0", /* implemented: */ + "SSL_CLIENT_CHAIN_1", /* implemented: */ + "SSL_CLIENT_CHAIN_2", /* implemented: */ + "SSL_CLIENT_CHAIN_3", /* implemented: */ + "SSL_CLIENT_CHAIN_4", /* implemented: */ + "SSL_CLIENT_CHAIN_5", /* implemented: */ + "SSL_CLIENT_CHAIN_6", /* implemented: */ + "SSL_CLIENT_CHAIN_7", /* implemented: */ + "SSL_CLIENT_CHAIN_8", /* implemented: */ + "SSL_CLIENT_CHAIN_9", /* implemented: */ + "SSL_SERVER_CERT", /* implemented: */ +}; + +void tls_var_init_lookup_hash(apr_pool_t *pool, apr_hash_t *map) +{ + const var_def_t *def; + apr_size_t i; + + (void)pool; + for (i = 0; i < TLS_DIM(VAR_DEFS); ++i) { + def = &VAR_DEFS[i]; + apr_hash_set(map, def->name, APR_HASH_KEY_STRING, def); + } +} + +static const char *invoke(var_def_t* def, tls_var_lookup_ctx_t *ctx) +{ + if (TLS_CONN_ST_IS_ENABLED(ctx->cc)) { + const char *val = ctx->cc->subprocess_env? + apr_table_get(ctx->cc->subprocess_env, def->name) : NULL; + if (val && *val) return val; + ctx->arg_s = def->arg_s; + ctx->arg_i = def->arg_i; + return def->fn(ctx); + } + return NULL; +} + +static void set_var( + tls_var_lookup_ctx_t *ctx, apr_hash_t *lookups, apr_table_t *table) +{ + var_def_t* def = apr_hash_get(lookups, ctx->name, APR_HASH_KEY_STRING); + if (def) { + const char *val = invoke(def, ctx); + if (val && *val) { + apr_table_setn(table, ctx->name, val); + } + } +} + +const char *tls_var_lookup( + apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *name) +{ + const char *val = NULL; + tls_conf_server_t *sc; + var_def_t* def; + + ap_assert(p); + ap_assert(name); + s = s? s : (r? r->server : (c? c->base_server : NULL)); + c = c? c : (r? r->connection : NULL); + + sc = tls_conf_server_get(s? s : ap_server_conf); + def = apr_hash_get(sc->global->var_lookups, name, APR_HASH_KEY_STRING); + if (def) { + tls_var_lookup_ctx_t ctx; + ctx.p = p; + ctx.s = s; + ctx.c = c; + ctx.r = r; + ctx.cc = c? tls_conf_conn_get(c->master? c->master : c) : NULL; + ctx.cc = c? tls_conf_conn_get(c->master? c->master : c) : NULL; + ctx.name = name; + val = invoke(def, &ctx); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "tls lookup of var '%s' -> '%s'", name, val); + } + return val; +} + +static void add_vars(apr_table_t *env, conn_rec *c, server_rec *s, request_rec *r) +{ + tls_conf_server_t *sc; + tls_conf_dir_t *dc, *sdc; + tls_var_lookup_ctx_t ctx; + apr_size_t i; + int overlap; + + sc = tls_conf_server_get(s); + dc = r? tls_conf_dir_get(r) : tls_conf_dir_server_get(s); + sdc = r? tls_conf_dir_server_get(s): dc; + ctx.p = r? r->pool : c->pool; + ctx.s = s; + ctx.c = c; + ctx.r = r; + ctx.cc = tls_conf_conn_get(c->master? c->master : c); + /* Can we re-use the precomputed connection values? */ + overlap = (r && ctx.cc->subprocess_env && r->server == ctx.cc->server); + if (overlap) { + apr_table_overlap(env, ctx.cc->subprocess_env, APR_OVERLAP_TABLES_SET); + } + else { + apr_table_setn(env, "HTTPS", "on"); + for (i = 0; i < TLS_DIM(TlsAlwaysVars); ++i) { + ctx.name = TlsAlwaysVars[i]; + set_var(&ctx, sc->global->var_lookups, env); + } + } + if (dc->std_env_vars == TLS_FLAG_TRUE) { + for (i = 0; i < TLS_DIM(StdEnvVars); ++i) { + ctx.name = StdEnvVars[i]; + set_var(&ctx, sc->global->var_lookups, env); + } + } + else if (overlap && sdc->std_env_vars == TLS_FLAG_TRUE) { + /* Remove variables added on connection init that are disabled here */ + for (i = 0; i < TLS_DIM(StdEnvVars); ++i) { + apr_table_unset(env, StdEnvVars[i]); + } + } + if (dc->export_cert_vars == TLS_FLAG_TRUE) { + for (i = 0; i < TLS_DIM(ExportCertVars); ++i) { + ctx.name = ExportCertVars[i]; + set_var(&ctx, sc->global->var_lookups, env); + } + } + else if (overlap && sdc->std_env_vars == TLS_FLAG_TRUE) { + /* Remove variables added on connection init that are disabled here */ + for (i = 0; i < TLS_DIM(ExportCertVars); ++i) { + apr_table_unset(env, ExportCertVars[i]); + } + } + } + +apr_status_t tls_var_handshake_done(conn_rec *c) +{ + tls_conf_conn_t *cc; + tls_conf_server_t *sc; + apr_status_t rv = APR_SUCCESS; + + cc = tls_conf_conn_get(c); + if (!TLS_CONN_ST_IS_ENABLED(cc)) goto cleanup; + + sc = tls_conf_server_get(cc->server); + if (cc->peer_certs && sc->var_user_name) { + cc->user_name = tls_var_lookup(c->pool, cc->server, c, NULL, sc->var_user_name); + if (!cc->user_name) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cc->server, APLOGNO(10317) + "Failed to set r->user to '%s'", sc->var_user_name); + } + } + cc->subprocess_env = apr_table_make(c->pool, 5); + add_vars(cc->subprocess_env, c, cc->server, NULL); + +cleanup: + return rv; +} + +int tls_var_request_fixup(request_rec *r) +{ + conn_rec *c = r->connection; + tls_conf_conn_t *cc; + + cc = tls_conf_conn_get(c->master? c->master : c); + if (!TLS_CONN_ST_IS_ENABLED(cc)) goto cleanup; + if (cc->user_name) { + /* why is r->user a char* and not const? */ + r->user = apr_pstrdup(r->pool, cc->user_name); + } + add_vars(r->subprocess_env, c, r->server, r); + +cleanup: + return DECLINED; +} diff --git a/modules/tls/tls_var.h b/modules/tls/tls_var.h new file mode 100644 index 0000000..2e8c0bb --- /dev/null +++ b/modules/tls/tls_var.h @@ -0,0 +1,39 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef tls_var_h +#define tls_var_h + +void tls_var_init_lookup_hash(apr_pool_t *pool, apr_hash_t *map); + +/** + * Callback for installation in Apache's 'ssl_var_lookup' hook to provide + * SSL related variable lookups to other modules. + */ +const char *tls_var_lookup( + apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *name); + +/** + * A connection has been handshaked. Prepare commond TLS variables on this connection. + */ +apr_status_t tls_var_handshake_done(conn_rec *c); + +/** + * A request is ready for processing, add TLS variables r->subprocess_env if applicable. + * This is a hook function returning OK/DECLINED. + */ +int tls_var_request_fixup(request_rec *r); + +#endif /* tls_var_h */ \ No newline at end of file diff --git a/modules/tls/tls_version.h b/modules/tls/tls_version.h new file mode 100644 index 0000000..811d6f1 --- /dev/null +++ b/modules/tls/tls_version.h @@ -0,0 +1,39 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef mod_tls_version_h +#define mod_tls_version_h + +#undef PACKAGE_VERSION +#undef PACKAGE_TARNAME +#undef PACKAGE_STRING +#undef PACKAGE_NAME +#undef PACKAGE_BUGREPORT + +/** + * @macro + * Version number of the md module as c string + */ +#define MOD_TLS_VERSION "0.8.3" + +/** + * @macro + * Numerical representation of the version number of the md module + * release. This is a 24 bit number with 8 bits for major number, 8 bits + * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. + */ +#define MOD_TLS_VERSION_NUM 0x000802 + +#endif /* mod_md_md_version_h */ -- cgit v1.2.3