diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-07 02:04:06 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-07 02:04:06 +0000 |
commit | 5dff2d61cc1c27747ee398e04d8e02843aabb1f8 (patch) | |
tree | a67c336b406c8227bac912beb74a1ad3cdc55100 /modules/http2 | |
parent | Initial commit. (diff) | |
download | apache2-upstream.tar.xz apache2-upstream.zip |
Adding upstream version 2.4.38.upstream/2.4.38upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
66 files changed, 27315 insertions, 0 deletions
diff --git a/modules/http2/.gitignore b/modules/http2/.gitignore new file mode 100644 index 0000000..ca49620 --- /dev/null +++ b/modules/http2/.gitignore @@ -0,0 +1,35 @@ +*.xcuserstate +sandbox/httpd/packages/httpd-2.4.x.tar.gz +sandbox/test/conf/sites/mod-h2.greenbytes.de.conf +*.o +*.slo +*.lo +*.la +*.pcap +.libs +.configured +.deps +compile +aclocal.m4 +autom4te.cache +autoscan.log +config.guess +config.log +config.status +config.sub +config.h +config.h.in +config.h.in~ +configure +configure.scan +depcomp +install-sh +libtool +ltmain.sh +missing +stamp-h1 +Makefile.in +Makefile +mod_h2-*.tar.gz +mod_h2/h2_version.h +m4 diff --git a/modules/http2/Makefile.in b/modules/http2/Makefile.in new file mode 100644 index 0000000..4395bc3 --- /dev/null +++ b/modules/http2/Makefile.in @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# standard stuff +# + +include $(top_srcdir)/build/special.mk diff --git a/modules/http2/NWGNUmakefile b/modules/http2/NWGNUmakefile new file mode 100644 index 0000000..d4a51ed --- /dev/null +++ b/modules/http2/NWGNUmakefile @@ -0,0 +1,246 @@ +# +# Declare the sub-directories to be built here +# + +SUBDIRS = \ + $(EOLIST) + +# +# Get the 'head' of the build environment. This includes default targets and +# paths to tools +# + +include $(AP_WORK)/build/NWGNUhead.inc + +# +# build this level's files + +# +# Make sure all needed macro's are defined +# + +# +# These directories will be at the beginning of the include list, followed by +# INCDIRS +# +XINCDIRS += \ + $(EOLIST) + +# +# These flags will come after CFLAGS +# +XCFLAGS += \ + $(EOLIST) + +# +# These defines will come after DEFINES +# +XDEFINES += \ + $(EOLIST) + +# +# These flags will be added to the link.opt file +# +XLFLAGS += \ + $(EOLIST) + +# +# These values will be appended to the correct variables based on the value of +# RELEASE +# +ifeq "$(RELEASE)" "debug" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +ifeq "$(RELEASE)" "noopt" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +ifeq "$(RELEASE)" "release" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +# +# These are used by the link target if an NLM is being generated +# This is used by the link 'name' directive to name the nlm. If left blank +# TARGET_nlm (see below) will be used. +# +NLM_NAME = + +# +# This is used by the link '-desc ' directive. +# If left blank, NLM_NAME will be used. +# +NLM_DESCRIPTION = + +# +# This is used by the '-threadname' directive. If left blank, +# NLM_NAME Thread will be used. +# +NLM_THREAD_NAME = + +# +# If this is specified, it will override VERSION value in +# $(AP_WORK)/build/NWGNUenvironment.inc +# +NLM_VERSION = + +# +# If this is specified, it will override the default of 64K +# +NLM_STACK_SIZE = + + +# +# If this is specified it will be used by the link '-entry' directive +# +NLM_ENTRY_SYM = + +# +# If this is specified it will be used by the link '-exit' directive +# +NLM_EXIT_SYM = + +# +# If this is specified it will be used by the link '-check' directive +# +NLM_CHECK_SYM = + +# +# If these are specified it will be used by the link '-flags' directive +# +NLM_FLAGS = + +# +# If this is specified it will be linked in with the XDCData option in the def +# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled +# by setting APACHE_UNIPROC in the environment +# +XDCDATA = + +# +# If there is an NLM target, put it here +# +TARGET_nlm = \ + $(OBJDIR)/mod_http2.nlm \ + $(OBJDIR)/proxyht2.nlm \ + $(EOLIST) + +# +# If there is an LIB target, put it here +# +TARGET_lib = \ + $(EOLIST) + +# +# These are the OBJ files needed to create the NLM target above. +# Paths must all use the '/' character +# +FILES_nlm_objs = \ + $(EOLIST) + +# +# These are the LIB files needed to create the NLM target above. +# These will be added as a library command in the link.opt file. +# +FILES_nlm_libs = \ + $(EOLIST) + +# +# These are the modules that the above NLM target depends on to load. +# These will be added as a module command in the link.opt file. +# +FILES_nlm_modules = \ + $(EOLIST) + +# +# If the nlm has a msg file, put it's path here +# +FILE_nlm_msg = + +# +# If the nlm has a hlp file put it's path here +# +FILE_nlm_hlp = + +# +# If this is specified, it will override $(NWOS)\copyright.txt. +# +FILE_nlm_copyright = + +# +# Any additional imports go here +# +FILES_nlm_Ximports = \ + $(EOLIST) + +# +# Any symbols exported to here +# +FILES_nlm_exports = \ + $(EOLIST) + +# +# These are the OBJ files needed to create the LIB target above. +# Paths must all use the '/' character +# +FILES_lib_objs = \ + $(EOLIST) + +# +# implement targets and dependancies (leave this section alone) +# + +libs :: $(OBJDIR) $(TARGET_lib) + +nlms :: libs $(TARGET_nlm) + +# +# Updated this target to create necessary directories and copy files to the +# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples) +# +install :: nlms FORCE + $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/) + +# +# Any specialized rules here +# + +# +# Include the 'tail' makefile that has targets that depend on variables defined +# in this makefile +# + +include $(APBUILD)/NWGNUtail.inc + + diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2 new file mode 100644 index 0000000..f6d4a38 --- /dev/null +++ b/modules/http2/NWGNUmod_http2 @@ -0,0 +1,395 @@ +# +# This Makefile requires the environment var NGH2SRC +# pointing to the base directory of nghttp2 source tree. +# + +# +# Declare the sub-directories to be built here +# + +SUBDIRS = \ + $(EOLIST) + +# +# Get the 'head' of the build environment. This includes default targets and +# paths to tools +# + +include $(AP_WORK)/build/NWGNUhead.inc + +# +# build this level's files +# +# Make sure all needed macro's are defined +# + +# +# These directories will be at the beginning of the include list, followed by +# INCDIRS +# +XINCDIRS += \ + $(APR)/include \ + $(APRUTIL)/include \ + $(SRC)/include \ + $(NGH2SRC)/lib/ \ + $(NGH2SRC)/lib/includes \ + $(SERVER)/mpm/NetWare \ + $(STDMOD)/ssl \ + $(NWOS) \ + $(EOLIST) + +# +# These flags will come after CFLAGS +# +XCFLAGS += \ + $(EOLIST) + +# +# These defines will come after DEFINES +# +XDEFINES += \ + -DHAVE_CONFIG_H \ + $(EOLIST) + +# +# These flags will be added to the link.opt file +# +XLFLAGS += \ + -L$(OBJDIR) \ + $(EOLIST) + +# +# These values will be appended to the correct variables based on the value of +# RELEASE +# +ifeq "$(RELEASE)" "debug" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +ifeq "$(RELEASE)" "noopt" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +ifeq "$(RELEASE)" "release" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +# +# These are used by the link target if an NLM is being generated +# This is used by the link 'name' directive to name the nlm. If left blank +# TARGET_nlm (see below) will be used. +# +NLM_NAME = mod_http2 + +# +# This is used by the link '-desc ' directive. +# If left blank, NLM_NAME will be used. +# +NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Support module (w/ NGHTTP2 Lib) + +# +# This is used by the '-threadname' directive. If left blank, +# NLM_NAME Thread will be used. +# +NLM_THREAD_NAME = $(NLM_NAME) + +# +# If this is specified, it will override VERSION value in +# $(AP_WORK)/build/NWGNUenvironment.inc +# +NLM_VERSION = + +# +# If this is specified, it will override the default of 64K +# +NLM_STACK_SIZE = 65536 + +# +# If this is specified it will be used by the link '-entry' directive +# +NLM_ENTRY_SYM = + +# +# If this is specified it will be used by the link '-exit' directive +# +NLM_EXIT_SYM = + +# +# If this is specified it will be used by the link '-check' directive +# +NLM_CHECK_SYM = + +# +# If this is specified it will be used by the link '-flags' directive +# +NLM_FLAGS = + +# +# If this is specified it will be linked in with the XDCData option in the def +# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled +# by setting APACHE_UNIPROC in the environment +# +XDCDATA = + +# +# Declare all target files (you must add your files here) +# + +# +# If there is an NLM target, put it here +# +TARGET_nlm = \ + $(OBJDIR)/$(NLM_NAME).nlm \ + $(EOLIST) + +# +# If there is an LIB target, put it here +# +TARGET_lib = \ + $(OBJDIR)/nghttp2.lib \ + $(EOLIST) + +# +# These are the OBJ files needed to create the NLM target above. +# Paths must all use the '/' character +# +FILES_nlm_objs = \ + $(OBJDIR)/h2_alt_svc.o \ + $(OBJDIR)/h2_bucket_beam.o \ + $(OBJDIR)/h2_bucket_eos.o \ + $(OBJDIR)/h2_config.o \ + $(OBJDIR)/h2_conn.o \ + $(OBJDIR)/h2_conn_io.o \ + $(OBJDIR)/h2_ctx.o \ + $(OBJDIR)/h2_filter.o \ + $(OBJDIR)/h2_from_h1.o \ + $(OBJDIR)/h2_h2.o \ + $(OBJDIR)/h2_mplx.o \ + $(OBJDIR)/h2_ngn_shed.o \ + $(OBJDIR)/h2_push.o \ + $(OBJDIR)/h2_request.o \ + $(OBJDIR)/h2_headers.o \ + $(OBJDIR)/h2_session.o \ + $(OBJDIR)/h2_stream.o \ + $(OBJDIR)/h2_switch.o \ + $(OBJDIR)/h2_task.o \ + $(OBJDIR)/h2_util.o \ + $(OBJDIR)/h2_workers.o \ + $(OBJDIR)/mod_http2.o \ + $(EOLIST) + +# +# These are the LIB files needed to create the NLM target above. +# These will be added as a library command in the link.opt file. +# +FILES_nlm_libs = \ + $(PRELUDE) \ + $(OBJDIR)/nghttp2.lib \ + $(EOLIST) + +# +# These are the modules that the above NLM target depends on to load. +# These will be added as a module command in the link.opt file. +# +FILES_nlm_modules = \ + Libc \ + Apache2 \ + $(EOLIST) + +# +# If the nlm has a msg file, put it's path here +# +FILE_nlm_msg = + +# +# If the nlm has a hlp file put it's path here +# +FILE_nlm_hlp = + +# +# If this is specified, it will override $(NWOS)\copyright.txt. +# +FILE_nlm_copyright = + +# +# Any additional imports go here +# +FILES_nlm_Ximports = \ + @libc.imp \ + @aprlib.imp \ + @httpd.imp \ + $(EOLIST) + +# +# Any symbols exported to here +# +FILES_nlm_exports = \ + @$(OBJDIR)/mod_http2.imp \ + $(EOLIST) + +# +# These are the OBJ files needed to create the LIB target above. +# Paths must all use the '/' character +# +FILES_lib_objs := $(sort $(patsubst $(NGH2SRC)/lib/%.c,$(OBJDIR)/%.o,$(wildcard $(NGH2SRC)/lib/*.c))) +# +# implement targets and dependancies (leave this section alone) +# + +libs :: $(OBJDIR) $(NGH2SRC)/lib/config.h $(TARGET_lib) + +nlms :: libs $(OBJDIR)/mod_http2.imp $(TARGET_nlm) + +# +# Updated this target to create necessary directories and copy files to the +# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples) +# +install :: nlms FORCE + $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/) + +clean :: + $(call DEL,$(NGH2SRC)/lib/config.h) +# +# Any specialized rules here +# +vpath %.c $(NGH2SRC)/lib + +$(NGH2SRC)/lib/config.h : NWGNUmod_http2 + @-$(RM) $@ + @echo $(DL)GEN $@$(DL) + @echo $(DL)/* For NetWare target.$(DL) > $@ + @echo $(DL)** Do not edit - created by Make!$(DL) >> $@ + @echo $(DL)*/$(DL) >> $@ + @echo $(DL)#ifndef NGH2_CONFIG_H$(DL) >> $@ + @echo $(DL)#define NGH2_CONFIG_H$(DL) >> $@ + @echo #define HAVE_ARPA_INET_H 1 >> $@ + @echo #define HAVE_CHOWN 1 >> $@ + @echo #define HAVE_DECL_STRERROR_R 1 >> $@ + @echo #define HAVE_DLFCN_H 1 >> $@ + @echo #define HAVE_DUP2 1 >> $@ + @echo #define HAVE_FCNTL_H 1 >> $@ + @echo #define HAVE_GETCWD 1 >> $@ + @echo #define HAVE_INTTYPES_H 1 >> $@ + @echo #define HAVE_LIMITS_H 1 >> $@ + @echo #define HAVE_LOCALTIME_R 1 >> $@ + @echo #define HAVE_MALLOC 1 >> $@ + @echo #define HAVE_MEMCHR 1 >> $@ + @echo #define HAVE_MEMMOVE 1 >> $@ + @echo #define HAVE_MEMORY_H 1 >> $@ + @echo #define HAVE_MEMSET 1 >> $@ + @echo #define HAVE_NETDB_H 1 >> $@ + @echo #define HAVE_NETINET_IN_H 1 >> $@ + @echo #define HAVE_PTRDIFF_T 1 >> $@ + @echo #define HAVE_PWD_H 1 >> $@ + @echo #define HAVE_SOCKET 1 >> $@ + @echo #define HAVE_SQRT 1 >> $@ + @echo #define HAVE_STDDEF_H 1 >> $@ + @echo #define HAVE_STDINT_H 1 >> $@ + @echo #define HAVE_STDLIB_H 1 >> $@ + @echo #define HAVE_STRCHR 1 >> $@ + @echo #define HAVE_STRDUP 1 >> $@ + @echo #define HAVE_STRERROR 1 >> $@ + @echo #define HAVE_STRERROR_R 1 >> $@ + @echo #define HAVE_STRINGS_H 1 >> $@ + @echo #define HAVE_STRING_H 1 >> $@ + @echo #define HAVE_STRSTR 1 >> $@ + @echo #define HAVE_STRTOL 1 >> $@ + @echo #define HAVE_STRTOUL 1 >> $@ + @echo #define HAVE_SYSLOG_H 1 >> $@ + @echo #define HAVE_SYS_SOCKET_H 1 >> $@ + @echo #define HAVE_SYS_STAT_H 1 >> $@ + @echo #define HAVE_SYS_TIME_H 1 >> $@ + @echo #define HAVE_SYS_TYPES_H 1 >> $@ + @echo #define HAVE_TIME_H 1 >> $@ + @echo #define HAVE_UNISTD_H 1 >> $@ + + @echo #define SIZEOF_INT_P 4 >> $@ + @echo #define STDC_HEADERS 1 >> $@ + @echo #define STRERROR_R_CHAR_P 4 >> $@ + +# Hint to compiler a function parameter is not used + @echo #define _U_ >> $@ + + @echo #ifndef __cplusplus >> $@ + @echo #define inline __inline >> $@ + @echo #endif >> $@ + + @echo $(DL)#endif /* NGH2_CONFIG_H */$(DL) >> $@ + +# +# Exports from mod_http2 for mod_proxy_http2 +$(OBJDIR)/mod_http2.imp : NWGNUmod_http2 + @-$(RM) $@ + @echo $(DL)GEN $@$(DL) + @echo $(DL) (HTTP2)$(DL) > $@ + @echo $(DL) http2_module,$(DL) >> $@ + @echo $(DL) nghttp2_is_fatal,$(DL) >> $@ + @echo $(DL) nghttp2_option_del,$(DL) >> $@ + @echo $(DL) nghttp2_option_new,$(DL) >> $@ + @echo $(DL) nghttp2_option_set_no_auto_window_update,$(DL) >> $@ + @echo $(DL) nghttp2_option_set_peer_max_concurrent_streams,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_del,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_new,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_set_before_frame_send_callback,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_set_on_data_chunk_recv_callback,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_set_on_frame_recv_callback,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_set_on_header_callback,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_set_on_stream_close_callback,$(DL) >> $@ + @echo $(DL) nghttp2_session_callbacks_set_send_callback,$(DL) >> $@ + @echo $(DL) nghttp2_session_client_new2,$(DL) >> $@ + @echo $(DL) nghttp2_session_consume,$(DL) >> $@ + @echo $(DL) nghttp2_session_consume_connection,$(DL) >> $@ + @echo $(DL) nghttp2_session_del,$(DL) >> $@ + @echo $(DL) nghttp2_session_get_remote_settings,$(DL) >> $@ + @echo $(DL) nghttp2_session_get_stream_user_data,$(DL) >> $@ + @echo $(DL) nghttp2_session_mem_recv,$(DL) >> $@ + @echo $(DL) nghttp2_session_resume_data,$(DL) >> $@ + @echo $(DL) nghttp2_session_send,$(DL) >> $@ + @echo $(DL) nghttp2_session_want_read,$(DL) >> $@ + @echo $(DL) nghttp2_session_want_write,$(DL) >> $@ + @echo $(DL) nghttp2_strerror,$(DL) >> $@ + @echo $(DL) nghttp2_submit_goaway,$(DL) >> $@ + @echo $(DL) nghttp2_submit_ping,$(DL) >> $@ + @echo $(DL) nghttp2_submit_request,$(DL) >> $@ + @echo $(DL) nghttp2_submit_rst_stream,$(DL) >> $@ + @echo $(DL) nghttp2_submit_settings,$(DL) >> $@ + @echo $(DL) nghttp2_submit_window_update,$(DL) >> $@ + @echo $(DL) nghttp2_version$(DL) >> $@ + +# Include the 'tail' makefile that has targets that depend on variables defined +# in this makefile +# + +include $(APBUILD)/NWGNUtail.inc + + diff --git a/modules/http2/NWGNUproxyht2 b/modules/http2/NWGNUproxyht2 new file mode 100644 index 0000000..ca44ad7 --- /dev/null +++ b/modules/http2/NWGNUproxyht2 @@ -0,0 +1,288 @@ +# +# This Makefile requires the environment var NGH2SRC +# pointing to the base directory of nghttp2 source tree. +# + +# +# Declare the sub-directories to be built here +# + +SUBDIRS = \ + $(EOLIST) + +# +# Get the 'head' of the build environment. This includes default targets and +# paths to tools +# + +include $(AP_WORK)/build/NWGNUhead.inc + +# +# build this level's files +# +# Make sure all needed macro's are defined +# + +# +# These directories will be at the beginning of the include list, followed by +# INCDIRS +# +XINCDIRS += \ + $(APR)/include \ + $(APRUTIL)/include \ + $(SRC)/include \ + $(NGH2SRC)/lib/includes \ + $(STDMOD)/proxy \ + $(SERVER)/mpm/NetWare \ + $(NWOS) \ + $(EOLIST) + +# +# These flags will come after CFLAGS +# +XCFLAGS += \ + $(EOLIST) + +# +# These defines will come after DEFINES +# +XDEFINES += \ + $(EOLIST) + +# +# These flags will be added to the link.opt file +# +XLFLAGS += \ + -L$(OBJDIR) \ + $(EOLIST) + +# +# These values will be appended to the correct variables based on the value of +# RELEASE +# +ifeq "$(RELEASE)" "debug" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +ifeq "$(RELEASE)" "noopt" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +ifeq "$(RELEASE)" "release" +XINCDIRS += \ + $(EOLIST) + +XCFLAGS += \ + $(EOLIST) + +XDEFINES += \ + $(EOLIST) + +XLFLAGS += \ + $(EOLIST) +endif + +# +# These are used by the link target if an NLM is being generated +# This is used by the link 'name' directive to name the nlm. If left blank +# TARGET_nlm (see below) will be used. +# +NLM_NAME = proxyht2 + +# +# This is used by the link '-desc ' directive. +# If left blank, NLM_NAME will be used. +# +NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Proxy module +# +# This is used by the '-threadname' directive. If left blank, +# NLM_NAME Thread will be used. +# +NLM_THREAD_NAME = $(NLM_NAME) + +# +# If this is specified, it will override VERSION value in +# $(AP_WORK)/build/NWGNUenvironment.inc +# +NLM_VERSION = + +# +# If this is specified, it will override the default of 64K +# +NLM_STACK_SIZE = 65536 + +# +# If this is specified it will be used by the link '-entry' directive +# +NLM_ENTRY_SYM = + +# +# If this is specified it will be used by the link '-exit' directive +# +NLM_EXIT_SYM = + +# +# If this is specified it will be used by the link '-check' directive +# +NLM_CHECK_SYM = + +# +# If this is specified it will be used by the link '-flags' directive +# +NLM_FLAGS = + +# +# If this is specified it will be linked in with the XDCData option in the def +# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled +# by setting APACHE_UNIPROC in the environment +# +XDCDATA = + +# +# Declare all target files (you must add your files here) +# + +# +# If there is an NLM target, put it here +# +TARGET_nlm = \ + $(OBJDIR)/$(NLM_NAME).nlm \ + $(EOLIST) + +# +# If there is an LIB target, put it here +# +TARGET_lib = \ + $(EOLIST) + +# +# These are the OBJ files needed to create the NLM target above. +# Paths must all use the '/' character +# +FILES_nlm_objs = \ + $(OBJDIR)/mod_proxy_http2.o \ + $(OBJDIR)/h2_proxy_session.o \ + $(OBJDIR)/h2_proxy_util.o \ + $(EOLIST) + +# +# These are the LIB files needed to create the NLM target above. +# These will be added as a library command in the link.opt file. +# +FILES_nlm_libs = \ + $(PRELUDE) \ + $(EOLIST) + +# +# These are the modules that the above NLM target depends on to load. +# These will be added as a module command in the link.opt file. +# +FILES_nlm_modules = \ + Libc \ + Apache2 \ + mod_proxy \ + mod_http2 \ + $(EOLIST) + +# +# If the nlm has a msg file, put it's path here +# +FILE_nlm_msg = + +# +# If the nlm has a hlp file put it's path here +# +FILE_nlm_hlp = + +# +# If this is specified, it will override $(NWOS)\copyright.txt. +# +FILE_nlm_copyright = + +# +# Any additional imports go here +# +FILES_nlm_Ximports = \ + @libc.imp \ + @aprlib.imp \ + @httpd.imp \ + @$(OBJDIR)/mod_http2.imp \ + ap_proxy_acquire_connection \ + ap_proxy_canon_netloc \ + ap_proxy_canonenc \ + ap_proxy_connect_backend \ + ap_proxy_connection_create \ + ap_proxy_cookie_reverse_map \ + ap_proxy_determine_connection \ + ap_proxy_location_reverse_map \ + ap_proxy_port_of_scheme \ + ap_proxy_release_connection \ + ap_proxy_ssl_connection_cleanup \ + ap_sock_disable_nagle \ + proxy_hook_canon_handler \ + proxy_hook_scheme_handler \ + proxy_module \ + proxy_run_detach_backend \ + $(EOLIST) + +# +# Any symbols exported to here +# +FILES_nlm_exports = \ + proxy_http2_module \ + $(EOLIST) + +# +# These are the OBJ files needed to create the LIB target above. +# Paths must all use the '/' character +# +FILES_lib_objs := +# +# implement targets and dependancies (leave this section alone) +# + +libs :: $(OBJDIR) $(TARGET_lib) + +nlms :: libs $(TARGET_nlm) + +# +# Updated this target to create necessary directories and copy files to the +# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples) +# +install :: nlms FORCE + $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/) + +clean :: + +# +# Any specialized rules here +# + +# +# Include the 'tail' makefile that has targets that depend on variables defined +# in this makefile +# + +include $(APBUILD)/NWGNUtail.inc + + diff --git a/modules/http2/README.h2 b/modules/http2/README.h2 new file mode 100644 index 0000000..f2956f3 --- /dev/null +++ b/modules/http2/README.h2 @@ -0,0 +1,70 @@ +The http2 module adds support for the HTTP/2 protocol to the server. + +Specifically, it supports the protocols "h2" (HTTP2 over TLS) and "h2c" +(HTTP2 over plain HTTP connections via Upgrade). Additionally it offers +the "direct" mode for both encrypted and unencrypted connections. + +You may enable it for the whole server or specific virtual hosts only. + + +BUILD + +If you have libnghttp2 (https://nghttp2.org) installed on your system, simply +add + + --enable-http2 + +to your httpd ./configure invocation. Should libnghttp2 reside in a unusual +location, add + + --with-nghttp2=<path> + +to ./configure. <path> is expected to be the installation prefix, so there +should be a <path>/lib/libnghttp2.*. If your system support pkg-config, +<path>/lib/pkgconfig/libnghttp2.pc will be inspected. + +If you want to link nghttp2 statically into the mod_http2 module, you may +similarly to mod_ssl add + + --enable-nghttp2-staticlib-deps + +For this, the lib directory should only contain the libnghttp2.a, not its +shared cousins. + + +CONFIGURATION + +If mod_http2 is enabled for a site or not depends on the new "Protocols" +directive. This directive list all protocols enabled for a server or +virtual host. + +If you do not specify "Protocols" all available protocols are enabled. For +sites using TLS, the protocol supported by mod_http2 is "h2". For cleartext +http:, the offered protocol is "h2c". + +The following is an example of a server that only supports http/1.1 in +general and offers h2 for a specific virtual host. + + ... + Protocols http/1.1 + <virtualhost *:443> + Protocols h2 http/1.1 + ... + </virtualhost> + +Please see the documentation of mod_http2 for a complete list and explanation +of all options. + + +TLS CONFIGURATION + +If you want to use HTTP/2 with a browser, most modern browsers will support +it without further configuration. However, browsers so far only support +HTTP/2 over TLS and are especially picky about the certificate and +encryption ciphers used. + +Server admins may look for up-to-date information about "modern" TLS +compatibility under: + + https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4 new file mode 100644 index 0000000..e8cefe3 --- /dev/null +++ b/modules/http2/config2.m4 @@ -0,0 +1,235 @@ +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl # start of module specific part +APACHE_MODPATH_INIT(http2) + +dnl # list of module object files +http2_objs="dnl +mod_http2.lo dnl +h2_alt_svc.lo dnl +h2_bucket_beam.lo dnl +h2_bucket_eos.lo dnl +h2_config.lo dnl +h2_conn.lo dnl +h2_conn_io.lo dnl +h2_ctx.lo dnl +h2_filter.lo dnl +h2_from_h1.lo dnl +h2_h2.lo dnl +h2_headers.lo dnl +h2_mplx.lo dnl +h2_ngn_shed.lo dnl +h2_push.lo dnl +h2_request.lo dnl +h2_session.lo dnl +h2_stream.lo dnl +h2_switch.lo dnl +h2_task.lo dnl +h2_util.lo dnl +h2_workers.lo dnl +" + +dnl +dnl APACHE_CHECK_NGHTTP2 +dnl +dnl Configure for nghttp2, giving preference to +dnl "--with-nghttp2=<path>" if it was specified. +dnl +AC_DEFUN([APACHE_CHECK_NGHTTP2],[ + AC_CACHE_CHECK([for nghttp2], [ac_cv_nghttp2], [ + dnl initialise the variables we use + ac_cv_nghttp2=no + ap_nghttp2_found="" + ap_nghttp2_base="" + ap_nghttp2_libs="" + + dnl Determine the nghttp2 base directory, if any + AC_MSG_CHECKING([for user-provided nghttp2 base directory]) + AC_ARG_WITH(nghttp2, APACHE_HELP_STRING(--with-nghttp2=PATH, nghttp2 installation directory), [ + dnl If --with-nghttp2 specifies a directory, we use that directory + if test "x$withval" != "xyes" -a "x$withval" != "x"; then + dnl This ensures $withval is actually a directory and that it is absolute + ap_nghttp2_base="`cd $withval ; pwd`" + fi + ]) + if test "x$ap_nghttp2_base" = "x"; then + AC_MSG_RESULT(none) + else + AC_MSG_RESULT($ap_nghttp2_base) + fi + + dnl Run header and version checks + saved_CPPFLAGS="$CPPFLAGS" + saved_LIBS="$LIBS" + saved_LDFLAGS="$LDFLAGS" + + dnl Before doing anything else, load in pkg-config variables + if test -n "$PKGCONFIG"; then + saved_PKG_CONFIG_PATH="$PKG_CONFIG_PATH" + AC_MSG_CHECKING([for pkg-config along $PKG_CONFIG_PATH]) + if test "x$ap_nghttp2_base" != "x" ; then + if test -f "${ap_nghttp2_base}/lib/pkgconfig/libnghttp2.pc"; then + dnl Ensure that the given path is used by pkg-config too, otherwise + dnl the system libnghttp2.pc might be picked up instead. + PKG_CONFIG_PATH="${ap_nghttp2_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}" + export PKG_CONFIG_PATH + elif test -f "${ap_nghttp2_base}/lib64/pkgconfig/libnghttp2.pc"; then + dnl Ensure that the given path is used by pkg-config too, otherwise + dnl the system libnghttp2.pc might be picked up instead. + PKG_CONFIG_PATH="${ap_nghttp2_base}/lib64/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}" + export PKG_CONFIG_PATH + fi + fi + AC_ARG_ENABLE(nghttp2-staticlib-deps,APACHE_HELP_STRING(--enable-nghttp2-staticlib-deps,[link mod_http2 with dependencies of libnghttp2's static libraries (as indicated by "pkg-config --static"). Must be specified in addition to --enable-http2.]), [ + if test "$enableval" = "yes"; then + PKGCONFIG_LIBOPTS="--static" + fi + ]) + ap_nghttp2_libs="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-l --silence-errors libnghttp2`" + if test $? -eq 0; then + ap_nghttp2_found="yes" + pkglookup="`$PKGCONFIG --cflags-only-I libnghttp2`" + APR_ADDTO(CPPFLAGS, [$pkglookup]) + APR_ADDTO(MOD_CFLAGS, [$pkglookup]) + pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-L libnghttp2`" + APR_ADDTO(LDFLAGS, [$pkglookup]) + APR_ADDTO(MOD_LDFLAGS, [$pkglookup]) + pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-other libnghttp2`" + APR_ADDTO(LDFLAGS, [$pkglookup]) + APR_ADDTO(MOD_LDFLAGS, [$pkglookup]) + fi + PKG_CONFIG_PATH="$saved_PKG_CONFIG_PATH" + fi + + dnl fall back to the user-supplied directory if not found via pkg-config + if test "x$ap_nghttp2_base" != "x" -a "x$ap_nghttp2_found" = "x"; then + APR_ADDTO(CPPFLAGS, [-I$ap_nghttp2_base/include]) + APR_ADDTO(MOD_CFLAGS, [-I$ap_nghttp2_base/include]) + APR_ADDTO(LDFLAGS, [-L$ap_nghttp2_base/lib]) + APR_ADDTO(MOD_LDFLAGS, [-L$ap_nghttp2_base/lib]) + if test "x$ap_platform_runtime_link_flag" != "x"; then + APR_ADDTO(LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib]) + APR_ADDTO(MOD_LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib]) + fi + fi + + AC_MSG_CHECKING([for nghttp2 version >= 1.2.1]) + AC_TRY_COMPILE([#include <nghttp2/nghttp2ver.h>],[ +#if !defined(NGHTTP2_VERSION_NUM) +#error "Missing nghttp2 version" +#endif +#if NGHTTP2_VERSION_NUM < 0x010201 +#error "Unsupported nghttp2 version " NGHTTP2_VERSION_TEXT +#endif], + [AC_MSG_RESULT(OK) + ac_cv_nghttp2=yes], + [AC_MSG_RESULT(FAILED)]) + + if test "x$ac_cv_nghttp2" = "xyes"; then + ap_nghttp2_libs="${ap_nghttp2_libs:--lnghttp2} `$apr_config --libs`" + APR_ADDTO(MOD_LDFLAGS, [$ap_nghttp2_libs]) + APR_ADDTO(LIBS, [$ap_nghttp2_libs]) + + dnl Run library and function checks + liberrors="" + AC_CHECK_HEADERS([nghttp2/nghttp2.h]) + AC_CHECK_FUNCS([nghttp2_session_server_new2], [], [liberrors="yes"]) + if test "x$liberrors" != "x"; then + AC_MSG_WARN([nghttp2 library is unusable]) + fi +dnl # nghttp2 >= 1.3.0: access to stream weights + AC_CHECK_FUNCS([nghttp2_stream_get_weight], [], [liberrors="yes"]) + if test "x$liberrors" != "x"; then + AC_MSG_WARN([nghttp2 version >= 1.3.0 is required]) + fi +dnl # nghttp2 >= 1.5.0: changing stream priorities + AC_CHECK_FUNCS([nghttp2_session_change_stream_priority], + [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_CHANGE_PRIO"])], []) +dnl # nghttp2 >= 1.14.0: invalid header callback + AC_CHECK_FUNCS([nghttp2_session_callbacks_set_on_invalid_header_callback], + [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_INVALID_HEADER_CB"])], []) +dnl # nghttp2 >= 1.15.0: get/set stream window sizes + AC_CHECK_FUNCS([nghttp2_session_get_stream_local_window_size], + [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_LOCAL_WIN_SIZE"])], []) + else + AC_MSG_WARN([nghttp2 version is too old]) + fi + + dnl restore + CPPFLAGS="$saved_CPPFLAGS" + LIBS="$saved_LIBS" + LDFLAGS="$saved_LDFLAGS" + ]) + if test "x$ac_cv_nghttp2" = "xyes"; then + AC_DEFINE(HAVE_NGHTTP2, 1, [Define if nghttp2 is available]) + fi +]) + + +dnl # hook module into the Autoconf mechanism (--enable-http2) +APACHE_MODULE(http2, [HTTP/2 protocol handling in addition to HTTP protocol +handling. Implemented by mod_http2. This module requires a libnghttp2 installation. +See --with-nghttp2 on how to manage non-standard locations. This module +is usually linked shared and requires loading. ], $http2_objs, , most, [ + APACHE_CHECK_OPENSSL + if test "$ac_cv_openssl" = "yes" ; then + APR_ADDTO(MOD_CPPFLAGS, ["-DH2_OPENSSL"]) + fi + + APACHE_CHECK_NGHTTP2 + if test "$ac_cv_nghttp2" = "yes" ; then + if test "x$enable_http2" = "xshared"; then + # The only symbol which needs to be exported is the module + # structure, so ask libtool to hide everything else: + APR_ADDTO(MOD_HTTP2_LDADD, [-export-symbols-regex http2_module]) + fi + else + enable_http2=no + fi +]) + +# Ensure that other modules can pick up mod_http2.h +# icing: hold back for now until it is more stable +#APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) + + + +dnl # list of module object files +proxy_http2_objs="dnl +mod_proxy_http2.lo dnl +h2_proxy_session.lo dnl +h2_proxy_util.lo dnl +" + +dnl # hook module into the Autoconf mechanism (--enable-proxy_http2) +APACHE_MODULE(proxy_http2, [HTTP/2 proxy module. This module requires a libnghttp2 installation. +See --with-nghttp2 on how to manage non-standard locations. Also requires --enable-proxy.], $proxy_http2_objs, , no, [ + APACHE_CHECK_NGHTTP2 + if test "$ac_cv_nghttp2" = "yes" ; then + if test "x$enable_http2" = "xshared"; then + # The only symbol which needs to be exported is the module + # structure, so ask libtool to hide everything else: + APR_ADDTO(MOD_PROXY_HTTP2_LDADD, [-export-symbols-regex proxy_http2_module]) + fi + else + enable_proxy_http2=no + fi +], proxy) + + +dnl # end of module specific part +APACHE_MODPATH_FINISH + diff --git a/modules/http2/h2.h b/modules/http2/h2.h new file mode 100644 index 0000000..38b4019 --- /dev/null +++ b/modules/http2/h2.h @@ -0,0 +1,166 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2__ +#define __mod_h2__h2__ + +/** + * The magic PRIamble of RFC 7540 that is always sent when starting + * a h2 communication. + */ +extern const char *H2_MAGIC_TOKEN; + +#define H2_ERR_NO_ERROR (0x00) +#define H2_ERR_PROTOCOL_ERROR (0x01) +#define H2_ERR_INTERNAL_ERROR (0x02) +#define H2_ERR_FLOW_CONTROL_ERROR (0x03) +#define H2_ERR_SETTINGS_TIMEOUT (0x04) +#define H2_ERR_STREAM_CLOSED (0x05) +#define H2_ERR_FRAME_SIZE_ERROR (0x06) +#define H2_ERR_REFUSED_STREAM (0x07) +#define H2_ERR_CANCEL (0x08) +#define H2_ERR_COMPRESSION_ERROR (0x09) +#define H2_ERR_CONNECT_ERROR (0x0a) +#define H2_ERR_ENHANCE_YOUR_CALM (0x0b) +#define H2_ERR_INADEQUATE_SECURITY (0x0c) +#define H2_ERR_HTTP_1_1_REQUIRED (0x0d) + +#define H2_HEADER_METHOD ":method" +#define H2_HEADER_METHOD_LEN 7 +#define H2_HEADER_SCHEME ":scheme" +#define H2_HEADER_SCHEME_LEN 7 +#define H2_HEADER_AUTH ":authority" +#define H2_HEADER_AUTH_LEN 10 +#define H2_HEADER_PATH ":path" +#define H2_HEADER_PATH_LEN 5 +#define H2_CRLF "\r\n" + +/* Max data size to write so it fits inside a TLS record */ +#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9) + +/* Size of the frame header itself in HTTP/2 */ +#define H2_FRAME_HDR_LEN 9 + +/* Maximum number of padding bytes in a frame, rfc7540 */ +#define H2_MAX_PADLEN 256 +/* Initial default window size, RFC 7540 ch. 6.5.2 */ +#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1) + +#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01) + +#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) + +#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) +#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) + +typedef enum { + H2_DEPENDANT_AFTER, + H2_DEPENDANT_INTERLEAVED, + H2_DEPENDANT_BEFORE, +} h2_dependency; + +typedef struct h2_priority { + h2_dependency dependency; + int weight; +} h2_priority; + +typedef enum { + H2_PUSH_NONE, + H2_PUSH_DEFAULT, + H2_PUSH_HEAD, + H2_PUSH_FAST_LOAD, +} h2_push_policy; + +typedef enum { + H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */ + H2_SESSION_ST_DONE, /* finished, connection close */ + H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */ + H2_SESSION_ST_BUSY, /* read/write without stop */ + H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */ + H2_SESSION_ST_CLEANUP, /* pool is being cleaned up */ +} h2_session_state; + +typedef struct h2_session_props { + int accepted_max; /* the highest remote stream id was/will be handled */ + int completed_max; /* the highest remote stream completed */ + int emitted_count; /* the number of local streams sent */ + int emitted_max; /* the highest local stream id sent */ + int error; /* the last session error encountered */ + unsigned int accepting : 1; /* if the session is accepting new streams */ + unsigned int shutdown : 1; /* if the final GOAWAY has been sent */ +} h2_session_props; + +typedef enum h2_stream_state_t { + H2_SS_IDLE, + H2_SS_RSVD_R, + H2_SS_RSVD_L, + H2_SS_OPEN, + H2_SS_CLOSED_R, + H2_SS_CLOSED_L, + H2_SS_CLOSED, + H2_SS_CLEANUP, + H2_SS_MAX +} h2_stream_state_t; + +typedef enum { + H2_SEV_CLOSED_L, + H2_SEV_CLOSED_R, + H2_SEV_CANCELLED, + H2_SEV_EOS_SENT, + H2_SEV_IN_DATA_PENDING, +} h2_stream_event_t; + + +/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal + * format that will be fed to various httpd input filters to finally + * become a request_rec to be handled by soemone. + */ +typedef struct h2_request h2_request; + +struct h2_request { + const char *method; /* pseudo header values, see ch. 8.1.2.3 */ + const char *scheme; + const char *authority; + const char *path; + apr_table_t *headers; + + apr_time_t request_time; + unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ + unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */ + apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */ +}; + +typedef struct h2_headers h2_headers; + +struct h2_headers { + int status; + apr_table_t *headers; + apr_table_t *notes; + apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */ +}; + +typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len); + +typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx); + +/* Note key to attach connection task id to conn_rec/request_rec instances */ + +#define H2_TASK_ID_NOTE "http2-task-id" +#define H2_FILTER_DEBUG_NOTE "http2-debug" +#define H2_HDR_CONFORMANCE "http2-hdr-conformance" +#define H2_HDR_CONFORMANCE_UNSAFE "unsafe" + +#endif /* defined(__mod_h2__h2__) */ diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c new file mode 100644 index 0000000..295a16d --- /dev/null +++ b/modules/http2/h2_alt_svc.c @@ -0,0 +1,131 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <apr_strings.h> +#include <httpd.h> +#include <http_core.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_log.h> + +#include "h2_private.h" +#include "h2_alt_svc.h" +#include "h2_ctx.h" +#include "h2_config.h" +#include "h2_h2.h" +#include "h2_util.h" + +static int h2_alt_svc_handler(request_rec *r); + +void h2_alt_svc_register_hooks(void) +{ + ap_hook_post_read_request(h2_alt_svc_handler, NULL, NULL, APR_HOOK_MIDDLE); +} + +/** + * Parse an Alt-Svc specifier as described in "HTTP Alternative Services" + * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04) + * with the following changes: + * - do not percent encode token values + * - do not use quotation marks + */ +h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool) +{ + const char *sep = ap_strchr_c(s, '='); + if (sep) { + const char *alpn = apr_pstrmemdup(pool, s, sep - s); + const char *host = NULL; + int port = 0; + s = sep + 1; + sep = ap_strchr_c(s, ':'); /* mandatory : */ + if (sep) { + if (sep != s) { /* optional host */ + host = apr_pstrmemdup(pool, s, sep - s); + } + s = sep + 1; + if (*s) { /* must be a port number */ + port = (int)apr_atoi64(s); + if (port > 0 && port < (0x1 << 16)) { + h2_alt_svc *as = apr_pcalloc(pool, sizeof(*as)); + as->alpn = alpn; + as->host = host; + as->port = port; + return as; + } + } + } + } + return NULL; +} + +#define h2_alt_svc_IDX(list, i) ((h2_alt_svc**)(list)->elts)[i] + +static int h2_alt_svc_handler(request_rec *r) +{ + const h2_config *cfg; + int i; + + if (r->connection->keepalives > 0) { + /* Only announce Alt-Svc on the first response */ + return DECLINED; + } + + if (h2_ctx_rget(r)) { + return DECLINED; + } + + cfg = h2_config_sget(r->server); + if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) { + const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used"); + if (!alt_svc_used) { + /* We have alt-svcs defined and client is not already using + * one, announce the services that were configured and match. + * The security of this connection determines if we allow + * other host names or ports only. + */ + const char *alt_svc = ""; + const char *svc_ma = ""; + int secure = h2_h2_is_tls(r->connection); + int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE); + if (ma >= 0) { + svc_ma = apr_psprintf(r->pool, "; ma=%d", ma); + } + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03043) + "h2_alt_svc: announce %s for %s:%d", + (secure? "secure" : "insecure"), + r->hostname, (int)r->server->port); + for (i = 0; i < cfg->alt_svcs->nelts; ++i) { + h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i); + const char *ahost = as->host; + if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) { + ahost = NULL; + } + if (secure || !ahost) { + alt_svc = apr_psprintf(r->pool, "%s%s%s=\"%s:%d\"%s", + alt_svc, + (*alt_svc? ", " : ""), as->alpn, + ahost? ahost : "", as->port, + svc_ma); + } + } + if (*alt_svc) { + apr_table_setn(r->headers_out, "Alt-Svc", alt_svc); + } + } + } + + return DECLINED; +} diff --git a/modules/http2/h2_alt_svc.h b/modules/http2/h2_alt_svc.h new file mode 100644 index 0000000..479e4d1 --- /dev/null +++ b/modules/http2/h2_alt_svc.h @@ -0,0 +1,40 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_alt_svc__ +#define __mod_h2__h2_alt_svc__ + +typedef struct h2_alt_svc h2_alt_svc; + +struct h2_alt_svc { + const char *alpn; + const char *host; + int port; +}; + +void h2_alt_svc_register_hooks(void); + +/** + * Parse an Alt-Svc specifier as described in "HTTP Alternative Services" + * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04) + * with the following changes: + * - do not percent encode token values + * - do not use quotation marks + */ +h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool); + + +#endif /* defined(__mod_h2__h2_alt_svc__) */ diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c new file mode 100644 index 0000000..f79cbe3 --- /dev/null +++ b/modules/http2/h2_bucket_beam.c @@ -0,0 +1,1283 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <apr_lib.h> +#include <apr_atomic.h> +#include <apr_strings.h> +#include <apr_time.h> +#include <apr_buckets.h> +#include <apr_thread_mutex.h> +#include <apr_thread_cond.h> + +#include <httpd.h> +#include <http_protocol.h> +#include <http_log.h> + +#include "h2_private.h" +#include "h2_util.h" +#include "h2_bucket_beam.h" + +static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy); + +#define H2_BPROXY_NEXT(e) APR_RING_NEXT((e), link) +#define H2_BPROXY_PREV(e) APR_RING_PREV((e), link) +#define H2_BPROXY_REMOVE(e) APR_RING_REMOVE((e), link) + +#define H2_BPROXY_LIST_INIT(b) APR_RING_INIT(&(b)->list, h2_beam_proxy, link); +#define H2_BPROXY_LIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, h2_beam_proxy, link) +#define H2_BPROXY_LIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, h2_beam_proxy, link) +#define H2_BPROXY_LIST_FIRST(b) APR_RING_FIRST(&(b)->list) +#define H2_BPROXY_LIST_LAST(b) APR_RING_LAST(&(b)->list) +#define H2_PROXY_BLIST_INSERT_HEAD(b, e) do { \ + h2_beam_proxy *ap__b = (e); \ + APR_RING_INSERT_HEAD(&(b)->list, ap__b, h2_beam_proxy, link); \ + } while (0) +#define H2_BPROXY_LIST_INSERT_TAIL(b, e) do { \ + h2_beam_proxy *ap__b = (e); \ + APR_RING_INSERT_TAIL(&(b)->list, ap__b, h2_beam_proxy, link); \ + } while (0) +#define H2_BPROXY_LIST_CONCAT(a, b) do { \ + APR_RING_CONCAT(&(a)->list, &(b)->list, h2_beam_proxy, link); \ + } while (0) +#define H2_BPROXY_LIST_PREPEND(a, b) do { \ + APR_RING_PREPEND(&(a)->list, &(b)->list, h2_beam_proxy, link); \ + } while (0) + + +/******************************************************************************* + * beam bucket with reference to beam and bucket it represents + ******************************************************************************/ + +const apr_bucket_type_t h2_bucket_type_beam; + +#define H2_BUCKET_IS_BEAM(e) (e->type == &h2_bucket_type_beam) + +struct h2_beam_proxy { + apr_bucket_refcount refcount; + APR_RING_ENTRY(h2_beam_proxy) link; + h2_bucket_beam *beam; + apr_bucket *bsender; + apr_size_t n; +}; + +static const char Dummy = '\0'; + +static apr_status_t beam_bucket_read(apr_bucket *b, const char **str, + apr_size_t *len, apr_read_type_e block) +{ + h2_beam_proxy *d = b->data; + if (d->bsender) { + const char *data; + apr_status_t status = apr_bucket_read(d->bsender, &data, len, block); + if (status == APR_SUCCESS) { + *str = data + b->start; + *len = b->length; + } + return status; + } + *str = &Dummy; + *len = 0; + return APR_ECONNRESET; +} + +static void beam_bucket_destroy(void *data) +{ + h2_beam_proxy *d = data; + + if (apr_bucket_shared_destroy(d)) { + /* When the beam gets destroyed before this bucket, it will + * NULLify its reference here. This is not protected by a mutex, + * so it will not help with race conditions. + * But it lets us shut down memory pool with circulare beam + * references. */ + if (d->beam) { + h2_beam_emitted(d->beam, d); + } + apr_bucket_free(d); + } +} + +static apr_bucket * h2_beam_bucket_make(apr_bucket *b, + h2_bucket_beam *beam, + apr_bucket *bsender, apr_size_t n) +{ + h2_beam_proxy *d; + + d = apr_bucket_alloc(sizeof(*d), b->list); + H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d); + d->beam = beam; + d->bsender = bsender; + d->n = n; + + b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0); + b->type = &h2_bucket_type_beam; + + return b; +} + +static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam, + apr_bucket *bsender, + apr_bucket_alloc_t *list, + apr_size_t n) +{ + apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); + + APR_BUCKET_INIT(b); + b->free = apr_bucket_free; + b->list = list; + return h2_beam_bucket_make(b, beam, bsender, n); +} + +const apr_bucket_type_t h2_bucket_type_beam = { + "BEAM", 5, APR_BUCKET_DATA, + beam_bucket_destroy, + beam_bucket_read, + apr_bucket_setaside_noop, + apr_bucket_shared_split, + apr_bucket_shared_copy +}; + +/******************************************************************************* + * h2_blist, a brigade without allocations + ******************************************************************************/ + +static apr_array_header_t *beamers; + +static apr_status_t cleanup_beamers(void *dummy) +{ + (void)dummy; + beamers = NULL; + return APR_SUCCESS; +} + +void h2_register_bucket_beamer(h2_bucket_beamer *beamer) +{ + if (!beamers) { + apr_pool_cleanup_register(apr_hook_global_pool, NULL, + cleanup_beamers, apr_pool_cleanup_null); + beamers = apr_array_make(apr_hook_global_pool, 10, + sizeof(h2_bucket_beamer*)); + } + APR_ARRAY_PUSH(beamers, h2_bucket_beamer*) = beamer; +} + +static apr_bucket *h2_beam_bucket(h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src) +{ + apr_bucket *b = NULL; + int i; + if (beamers) { + for (i = 0; i < beamers->nelts && b == NULL; ++i) { + h2_bucket_beamer *beamer; + + beamer = APR_ARRAY_IDX(beamers, i, h2_bucket_beamer*); + b = beamer(beam, dest, src); + } + } + return b; +} + + +/******************************************************************************* + * bucket beam that can transport buckets across threads + ******************************************************************************/ + +static void mutex_leave(void *ctx, apr_thread_mutex_t *lock) +{ + apr_thread_mutex_unlock(lock); +} + +static apr_status_t mutex_enter(void *ctx, h2_beam_lock *pbl) +{ + h2_bucket_beam *beam = ctx; + pbl->mutex = beam->lock; + pbl->leave = mutex_leave; + return apr_thread_mutex_lock(pbl->mutex); +} + +static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl) +{ + return mutex_enter(beam, pbl); +} + +static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl) +{ + if (pbl->leave) { + pbl->leave(pbl->leave_ctx, pbl->mutex); + } +} + +static apr_off_t bucket_mem_used(apr_bucket *b) +{ + if (APR_BUCKET_IS_FILE(b)) { + return 0; + } + else { + /* should all have determinate length */ + return b->length; + } +} + +static int report_consumption(h2_bucket_beam *beam, h2_beam_lock *pbl) +{ + int rv = 0; + apr_off_t len = beam->received_bytes - beam->cons_bytes_reported; + h2_beam_io_callback *cb = beam->cons_io_cb; + + if (len > 0) { + if (cb) { + void *ctx = beam->cons_ctx; + + if (pbl) leave_yellow(beam, pbl); + cb(ctx, beam, len); + if (pbl) enter_yellow(beam, pbl); + rv = 1; + } + beam->cons_bytes_reported += len; + } + return rv; +} + +static void report_prod_io(h2_bucket_beam *beam, int force, h2_beam_lock *pbl) +{ + apr_off_t len = beam->sent_bytes - beam->prod_bytes_reported; + if (force || len > 0) { + h2_beam_io_callback *cb = beam->prod_io_cb; + if (cb) { + void *ctx = beam->prod_ctx; + + leave_yellow(beam, pbl); + cb(ctx, beam, len); + enter_yellow(beam, pbl); + } + beam->prod_bytes_reported += len; + } +} + +static apr_size_t calc_buffered(h2_bucket_beam *beam) +{ + apr_size_t len = 0; + apr_bucket *b; + for (b = H2_BLIST_FIRST(&beam->send_list); + b != H2_BLIST_SENTINEL(&beam->send_list); + b = APR_BUCKET_NEXT(b)) { + if (b->length == ((apr_size_t)-1)) { + /* do not count */ + } + else if (APR_BUCKET_IS_FILE(b)) { + /* if unread, has no real mem footprint. */ + } + else { + len += b->length; + } + } + return len; +} + +static void r_purge_sent(h2_bucket_beam *beam) +{ + apr_bucket *b; + /* delete all sender buckets in purge brigade, needs to be called + * from sender thread only */ + while (!H2_BLIST_EMPTY(&beam->purge_list)) { + b = H2_BLIST_FIRST(&beam->purge_list); + apr_bucket_delete(b); + } +} + +static apr_size_t calc_space_left(h2_bucket_beam *beam) +{ + if (beam->max_buf_size > 0) { + apr_off_t len = calc_buffered(beam); + return (beam->max_buf_size > len? (beam->max_buf_size - len) : 0); + } + return APR_SIZE_MAX; +} + +static int buffer_is_empty(h2_bucket_beam *beam) +{ + return ((!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer)) + && H2_BLIST_EMPTY(&beam->send_list)); +} + +static apr_status_t wait_empty(h2_bucket_beam *beam, apr_read_type_e block, + apr_thread_mutex_t *lock) +{ + apr_status_t rv = APR_SUCCESS; + + while (!buffer_is_empty(beam) && APR_SUCCESS == rv) { + if (APR_BLOCK_READ != block || !lock) { + rv = APR_EAGAIN; + } + else if (beam->timeout > 0) { + rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout); + } + else { + rv = apr_thread_cond_wait(beam->change, lock); + } + } + return rv; +} + +static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block, + apr_thread_mutex_t *lock) +{ + apr_status_t rv = APR_SUCCESS; + + while (buffer_is_empty(beam) && APR_SUCCESS == rv) { + if (beam->aborted) { + rv = APR_ECONNABORTED; + } + else if (beam->closed) { + rv = APR_EOF; + } + else if (APR_BLOCK_READ != block || !lock) { + rv = APR_EAGAIN; + } + else if (beam->timeout > 0) { + rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout); + } + else { + rv = apr_thread_cond_wait(beam->change, lock); + } + } + return rv; +} + +static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block, + apr_size_t *pspace_left, h2_beam_lock *bl) +{ + apr_status_t rv = APR_SUCCESS; + apr_size_t left; + + while (0 == (left = calc_space_left(beam)) && APR_SUCCESS == rv) { + if (beam->aborted) { + rv = APR_ECONNABORTED; + } + else if (block != APR_BLOCK_READ || !bl->mutex) { + rv = APR_EAGAIN; + } + else { + if (beam->timeout > 0) { + rv = apr_thread_cond_timedwait(beam->change, bl->mutex, beam->timeout); + } + else { + rv = apr_thread_cond_wait(beam->change, bl->mutex); + } + } + } + *pspace_left = left; + return rv; +} + +static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy) +{ + h2_beam_lock bl; + apr_bucket *b, *next; + + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + /* even when beam buckets are split, only the one where + * refcount drops to 0 will call us */ + H2_BPROXY_REMOVE(proxy); + /* invoked from receiver thread, the last beam bucket for the send + * bucket is about to be destroyed. + * remove it from the hold, where it should be now */ + if (proxy->bsender) { + for (b = H2_BLIST_FIRST(&beam->hold_list); + b != H2_BLIST_SENTINEL(&beam->hold_list); + b = APR_BUCKET_NEXT(b)) { + if (b == proxy->bsender) { + break; + } + } + if (b != H2_BLIST_SENTINEL(&beam->hold_list)) { + /* bucket is in hold as it should be, mark this one + * and all before it for purging. We might have placed meta + * buckets without a receiver proxy into the hold before it + * and schedule them for purging now */ + for (b = H2_BLIST_FIRST(&beam->hold_list); + b != H2_BLIST_SENTINEL(&beam->hold_list); + b = next) { + next = APR_BUCKET_NEXT(b); + if (b == proxy->bsender) { + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->purge_list, b); + break; + } + else if (APR_BUCKET_IS_METADATA(b)) { + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->purge_list, b); + } + else { + /* another data bucket before this one in hold. this + * is normal since DATA buckets need not be destroyed + * in order */ + } + } + + proxy->bsender = NULL; + } + else { + /* it should be there unless we screwed up */ + ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->send_pool, + APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not " + "in hold, n=%d", beam->id, beam->tag, + (int)proxy->n); + ap_assert(!proxy->bsender); + } + } + /* notify anyone waiting on space to become available */ + if (!bl.mutex) { + r_purge_sent(beam); + } + else { + apr_thread_cond_broadcast(beam->change); + } + leave_yellow(beam, &bl); + } +} + +static void h2_blist_cleanup(h2_blist *bl) +{ + apr_bucket *e; + + while (!H2_BLIST_EMPTY(bl)) { + e = H2_BLIST_FIRST(bl); + apr_bucket_delete(e); + } +} + +static apr_status_t beam_close(h2_bucket_beam *beam) +{ + if (!beam->closed) { + beam->closed = 1; + apr_thread_cond_broadcast(beam->change); + } + return APR_SUCCESS; +} + +int h2_beam_is_closed(h2_bucket_beam *beam) +{ + return beam->closed; +} + +static int pool_register(h2_bucket_beam *beam, apr_pool_t *pool, + apr_status_t (*cleanup)(void *)) +{ + if (pool && pool != beam->pool) { + apr_pool_pre_cleanup_register(pool, beam, cleanup); + return 1; + } + return 0; +} + +static int pool_kill(h2_bucket_beam *beam, apr_pool_t *pool, + apr_status_t (*cleanup)(void *)) { + if (pool && pool != beam->pool) { + apr_pool_cleanup_kill(pool, beam, cleanup); + return 1; + } + return 0; +} + +static apr_status_t beam_recv_cleanup(void *data) +{ + h2_bucket_beam *beam = data; + /* receiver pool has gone away, clear references */ + beam->recv_buffer = NULL; + beam->recv_pool = NULL; + return APR_SUCCESS; +} + +static apr_status_t beam_send_cleanup(void *data) +{ + h2_bucket_beam *beam = data; + /* sender is going away, clear up all references to its memory */ + r_purge_sent(beam); + h2_blist_cleanup(&beam->send_list); + report_consumption(beam, NULL); + while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) { + h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies); + H2_BPROXY_REMOVE(proxy); + proxy->beam = NULL; + proxy->bsender = NULL; + } + h2_blist_cleanup(&beam->purge_list); + h2_blist_cleanup(&beam->hold_list); + beam->send_pool = NULL; + return APR_SUCCESS; +} + +static void beam_set_send_pool(h2_bucket_beam *beam, apr_pool_t *pool) +{ + if (beam->send_pool != pool) { + if (beam->send_pool && beam->send_pool != beam->pool) { + pool_kill(beam, beam->send_pool, beam_send_cleanup); + beam_send_cleanup(beam); + } + beam->send_pool = pool; + pool_register(beam, beam->send_pool, beam_send_cleanup); + } +} + +static void recv_buffer_cleanup(h2_bucket_beam *beam, h2_beam_lock *bl) +{ + if (beam->recv_buffer && !APR_BRIGADE_EMPTY(beam->recv_buffer)) { + apr_bucket_brigade *bb = beam->recv_buffer; + apr_off_t bblen = 0; + + beam->recv_buffer = NULL; + apr_brigade_length(bb, 0, &bblen); + beam->received_bytes += bblen; + + /* need to do this unlocked since bucket destroy might + * call this beam again. */ + if (bl) leave_yellow(beam, bl); + apr_brigade_destroy(bb); + if (bl) enter_yellow(beam, bl); + + apr_thread_cond_broadcast(beam->change); + if (beam->cons_ev_cb) { + beam->cons_ev_cb(beam->cons_ctx, beam); + } + } +} + +static apr_status_t beam_cleanup(h2_bucket_beam *beam, int from_pool) +{ + apr_status_t status = APR_SUCCESS; + int safe_send = (beam->owner == H2_BEAM_OWNER_SEND); + int safe_recv = (beam->owner == H2_BEAM_OWNER_RECV); + + /* + * Owner of the beam is going away, depending on which side it owns, + * cleanup strategies will differ. + * + * In general, receiver holds references to memory from sender. + * Clean up receiver first, if safe, then cleanup sender, if safe. + */ + + /* When called from pool destroy, io callbacks are disabled */ + if (from_pool) { + beam->cons_io_cb = NULL; + } + + /* When modify send is not safe, this means we still have multi-thread + * protection and the owner is receiving the buckets. If the sending + * side has not gone away, this means we could have dangling buckets + * in our lists that never get destroyed. This should not happen. */ + ap_assert(safe_send || !beam->send_pool); + if (!H2_BLIST_EMPTY(&beam->send_list)) { + ap_assert(beam->send_pool); + } + + if (safe_recv) { + if (beam->recv_pool) { + pool_kill(beam, beam->recv_pool, beam_recv_cleanup); + beam->recv_pool = NULL; + } + recv_buffer_cleanup(beam, NULL); + } + else { + beam->recv_buffer = NULL; + beam->recv_pool = NULL; + } + + if (safe_send && beam->send_pool) { + pool_kill(beam, beam->send_pool, beam_send_cleanup); + status = beam_send_cleanup(beam); + } + + if (safe_recv) { + ap_assert(H2_BPROXY_LIST_EMPTY(&beam->proxies)); + ap_assert(H2_BLIST_EMPTY(&beam->send_list)); + ap_assert(H2_BLIST_EMPTY(&beam->hold_list)); + ap_assert(H2_BLIST_EMPTY(&beam->purge_list)); + } + return status; +} + +static apr_status_t beam_pool_cleanup(void *data) +{ + return beam_cleanup(data, 1); +} + +apr_status_t h2_beam_destroy(h2_bucket_beam *beam) +{ + apr_pool_cleanup_kill(beam->pool, beam, beam_pool_cleanup); + return beam_cleanup(beam, 0); +} + +apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *pool, + int id, const char *tag, + h2_beam_owner_t owner, + apr_size_t max_buf_size, + apr_interval_time_t timeout) +{ + h2_bucket_beam *beam; + apr_status_t rv = APR_SUCCESS; + + beam = apr_pcalloc(pool, sizeof(*beam)); + if (!beam) { + return APR_ENOMEM; + } + + beam->id = id; + beam->tag = tag; + beam->pool = pool; + beam->owner = owner; + H2_BLIST_INIT(&beam->send_list); + H2_BLIST_INIT(&beam->hold_list); + H2_BLIST_INIT(&beam->purge_list); + H2_BPROXY_LIST_INIT(&beam->proxies); + beam->tx_mem_limits = 1; + beam->max_buf_size = max_buf_size; + beam->timeout = timeout; + + rv = apr_thread_mutex_create(&beam->lock, APR_THREAD_MUTEX_DEFAULT, pool); + if (APR_SUCCESS == rv) { + rv = apr_thread_cond_create(&beam->change, pool); + if (APR_SUCCESS == rv) { + apr_pool_pre_cleanup_register(pool, beam, beam_pool_cleanup); + *pbeam = beam; + } + } + return rv; +} + +void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size) +{ + h2_beam_lock bl; + + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + beam->max_buf_size = buffer_size; + leave_yellow(beam, &bl); + } +} + +apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam) +{ + h2_beam_lock bl; + apr_size_t buffer_size = 0; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + buffer_size = beam->max_buf_size; + leave_yellow(beam, &bl); + } + return buffer_size; +} + +void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout) +{ + h2_beam_lock bl; + + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + beam->timeout = timeout; + leave_yellow(beam, &bl); + } +} + +apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam) +{ + h2_beam_lock bl; + apr_interval_time_t timeout = 0; + + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + timeout = beam->timeout; + leave_yellow(beam, &bl); + } + return timeout; +} + +void h2_beam_abort(h2_bucket_beam *beam) +{ + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + beam->aborted = 1; + r_purge_sent(beam); + h2_blist_cleanup(&beam->send_list); + report_consumption(beam, &bl); + apr_thread_cond_broadcast(beam->change); + leave_yellow(beam, &bl); + } +} + +apr_status_t h2_beam_close(h2_bucket_beam *beam) +{ + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + r_purge_sent(beam); + beam_close(beam); + report_consumption(beam, &bl); + leave_yellow(beam, &bl); + } + return beam->aborted? APR_ECONNABORTED : APR_SUCCESS; +} + +apr_status_t h2_beam_leave(h2_bucket_beam *beam) +{ + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + recv_buffer_cleanup(beam, &bl); + beam->aborted = 1; + beam_close(beam); + leave_yellow(beam, &bl); + } + return APR_SUCCESS; +} + +apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block) +{ + apr_status_t status; + h2_beam_lock bl; + + if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) { + status = wait_empty(beam, block, bl.mutex); + leave_yellow(beam, &bl); + } + return status; +} + +static void move_to_hold(h2_bucket_beam *beam, + apr_bucket_brigade *sender_bb) +{ + apr_bucket *b; + while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) { + b = APR_BRIGADE_FIRST(sender_bb); + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->send_list, b); + } +} + +static apr_status_t append_bucket(h2_bucket_beam *beam, + apr_bucket *b, + apr_read_type_e block, + apr_size_t *pspace_left, + h2_beam_lock *pbl) +{ + const char *data; + apr_size_t len; + apr_status_t status; + int can_beam = 0, check_len; + + if (beam->aborted) { + return APR_ECONNABORTED; + } + + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + beam->closed = 1; + } + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->send_list, b); + return APR_SUCCESS; + } + else if (APR_BUCKET_IS_FILE(b)) { + /* For file buckets the problem is their internal readpool that + * is used on the first read to allocate buffer/mmap. + * Since setting aside a file bucket will de-register the + * file cleanup function from the previous pool, we need to + * call that only from the sender thread. + * + * Currently, we do not handle file bucket with refcount > 1 as + * the beam is then not in complete control of the file's lifetime. + * Which results in the bug that a file get closed by the receiver + * while the sender or the beam still have buckets using it. + * + * Additionally, we allow callbacks to prevent beaming file + * handles across. The use case for this is to limit the number + * of open file handles and rather use a less efficient beam + * transport. */ + apr_bucket_file *bf = b->data; + apr_file_t *fd = bf->fd; + can_beam = (bf->refcount.refcount == 1); + if (can_beam && beam->can_beam_fn) { + can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd); + } + check_len = !can_beam; + } + else { + if (b->length == ((apr_size_t)-1)) { + const char *data; + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (status != APR_SUCCESS) { + return status; + } + } + check_len = 1; + } + + if (check_len) { + if (b->length > *pspace_left) { + apr_bucket_split(b, *pspace_left); + } + *pspace_left -= b->length; + } + + /* The fundamental problem is that reading a sender bucket from + * a receiver thread is a total NO GO, because the bucket might use + * its pool/bucket_alloc from a foreign thread and that will + * corrupt. */ + status = APR_ENOTIMPL; + if (APR_BUCKET_IS_TRANSIENT(b)) { + /* this takes care of transient buckets and converts them + * into heap ones. Other bucket types might or might not be + * affected by this. */ + status = apr_bucket_setaside(b, beam->send_pool); + } + else if (APR_BUCKET_IS_HEAP(b)) { + /* For heap buckets read from a receiver thread is fine. The + * data will be there and live until the bucket itself is + * destroyed. */ + status = APR_SUCCESS; + } + else if (APR_BUCKET_IS_POOL(b)) { + /* pool buckets are bastards that register at pool cleanup + * to morph themselves into heap buckets. That may happen anytime, + * even after the bucket data pointer has been read. So at + * any time inside the receiver thread, the pool bucket memory + * may disappear. yikes. */ + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (status == APR_SUCCESS) { + apr_bucket_heap_make(b, data, len, NULL); + } + } + else if (APR_BUCKET_IS_FILE(b) && can_beam) { + status = apr_bucket_setaside(b, beam->send_pool); + } + + if (status == APR_ENOTIMPL) { + /* we have no knowledge about the internals of this bucket, + * but hope that after read, its data stays immutable for the + * lifetime of the bucket. (see pool bucket handling above for + * a counter example). + * We do the read while in the sender thread, so that the bucket may + * use pools/allocators safely. */ + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (status == APR_SUCCESS) { + status = apr_bucket_setaside(b, beam->send_pool); + } + } + + if (status != APR_SUCCESS && status != APR_ENOTIMPL) { + return status; + } + + APR_BUCKET_REMOVE(b); + H2_BLIST_INSERT_TAIL(&beam->send_list, b); + beam->sent_bytes += b->length; + + return APR_SUCCESS; +} + +void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p) +{ + h2_beam_lock bl; + /* Called from the sender thread to add buckets to the beam */ + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + r_purge_sent(beam); + beam_set_send_pool(beam, p); + leave_yellow(beam, &bl); + } +} + +apr_status_t h2_beam_send(h2_bucket_beam *beam, + apr_bucket_brigade *sender_bb, + apr_read_type_e block) +{ + apr_bucket *b; + apr_status_t rv = APR_SUCCESS; + apr_size_t space_left = 0; + h2_beam_lock bl; + + /* Called from the sender thread to add buckets to the beam */ + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + ap_assert(beam->send_pool); + r_purge_sent(beam); + + if (beam->aborted) { + move_to_hold(beam, sender_bb); + rv = APR_ECONNABORTED; + } + else if (sender_bb) { + int force_report = !APR_BRIGADE_EMPTY(sender_bb); + + space_left = calc_space_left(beam); + while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) { + if (space_left <= 0) { + report_prod_io(beam, force_report, &bl); + r_purge_sent(beam); + rv = wait_not_full(beam, block, &space_left, &bl); + if (APR_SUCCESS != rv) { + break; + } + } + b = APR_BRIGADE_FIRST(sender_bb); + rv = append_bucket(beam, b, block, &space_left, &bl); + } + + report_prod_io(beam, force_report, &bl); + apr_thread_cond_broadcast(beam->change); + } + report_consumption(beam, &bl); + leave_yellow(beam, &bl); + } + return rv; +} + +apr_status_t h2_beam_receive(h2_bucket_beam *beam, + apr_bucket_brigade *bb, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_beam_lock bl; + apr_bucket *bsender, *brecv, *ng; + int transferred = 0; + apr_status_t status = APR_SUCCESS; + apr_off_t remain; + int transferred_buckets = 0; + + /* Called from the receiver thread to take buckets from the beam */ + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + if (readbytes <= 0) { + readbytes = APR_SIZE_MAX; + } + remain = readbytes; + +transfer: + if (beam->aborted) { + recv_buffer_cleanup(beam, &bl); + status = APR_ECONNABORTED; + goto leave; + } + + /* transfer enough buckets from our receiver brigade, if we have one */ + while (remain >= 0 + && beam->recv_buffer + && !APR_BRIGADE_EMPTY(beam->recv_buffer)) { + + brecv = APR_BRIGADE_FIRST(beam->recv_buffer); + if (brecv->length > 0 && remain <= 0) { + break; + } + APR_BUCKET_REMOVE(brecv); + APR_BRIGADE_INSERT_TAIL(bb, brecv); + remain -= brecv->length; + ++transferred; + } + + /* transfer from our sender brigade, transforming sender buckets to + * receiver ones until we have enough */ + while (remain >= 0 && !H2_BLIST_EMPTY(&beam->send_list)) { + + brecv = NULL; + bsender = H2_BLIST_FIRST(&beam->send_list); + if (bsender->length > 0 && remain <= 0) { + break; + } + + if (APR_BUCKET_IS_METADATA(bsender)) { + if (APR_BUCKET_IS_EOS(bsender)) { + brecv = apr_bucket_eos_create(bb->bucket_alloc); + beam->close_sent = 1; + } + else if (APR_BUCKET_IS_FLUSH(bsender)) { + brecv = apr_bucket_flush_create(bb->bucket_alloc); + } + else if (AP_BUCKET_IS_ERROR(bsender)) { + ap_bucket_error *eb = (ap_bucket_error *)bsender; + brecv = ap_bucket_error_create(eb->status, eb->data, + bb->p, bb->bucket_alloc); + } + } + else if (bsender->length == 0) { + APR_BUCKET_REMOVE(bsender); + H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender); + continue; + } + else if (APR_BUCKET_IS_FILE(bsender)) { + /* This is set aside into the target brigade pool so that + * any read operation messes with that pool and not + * the sender one. */ + apr_bucket_file *f = (apr_bucket_file *)bsender->data; + apr_file_t *fd = f->fd; + int setaside = (f->readpool != bb->p); + + if (setaside) { + status = apr_file_setaside(&fd, fd, bb->p); + if (status != APR_SUCCESS) { + goto leave; + } + ++beam->files_beamed; + } + ng = apr_brigade_insert_file(bb, fd, bsender->start, bsender->length, + bb->p); +#if APR_HAS_MMAP + /* disable mmap handling as this leads to segfaults when + * the underlying file is changed while memory pointer has + * been handed out. See also PR 59348 */ + apr_bucket_file_enable_mmap(ng, 0); +#endif + APR_BUCKET_REMOVE(bsender); + H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender); + + remain -= bsender->length; + ++transferred; + ++transferred_buckets; + continue; + } + else { + /* create a "receiver" standin bucket. we took care about the + * underlying sender bucket and its data when we placed it into + * the sender brigade. + * the beam bucket will notify us on destruction that bsender is + * no longer needed. */ + brecv = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc, + beam->buckets_sent++); + } + + /* Place the sender bucket into our hold, to be destroyed when no + * receiver bucket references it any more. */ + APR_BUCKET_REMOVE(bsender); + H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender); + + beam->received_bytes += bsender->length; + ++transferred_buckets; + + if (brecv) { + APR_BRIGADE_INSERT_TAIL(bb, brecv); + remain -= brecv->length; + ++transferred; + } + else { + /* let outside hook determine how bucket is beamed */ + leave_yellow(beam, &bl); + brecv = h2_beam_bucket(beam, bb, bsender); + enter_yellow(beam, &bl); + + while (brecv && brecv != APR_BRIGADE_SENTINEL(bb)) { + ++transferred; + remain -= brecv->length; + brecv = APR_BUCKET_NEXT(brecv); + } + } + } + + if (remain < 0) { + /* too much, put some back into out recv_buffer */ + remain = readbytes; + for (brecv = APR_BRIGADE_FIRST(bb); + brecv != APR_BRIGADE_SENTINEL(bb); + brecv = APR_BUCKET_NEXT(brecv)) { + remain -= (beam->tx_mem_limits? bucket_mem_used(brecv) + : brecv->length); + if (remain < 0) { + apr_bucket_split(brecv, brecv->length+remain); + beam->recv_buffer = apr_brigade_split_ex(bb, + APR_BUCKET_NEXT(brecv), + beam->recv_buffer); + break; + } + } + } + + if (beam->closed && buffer_is_empty(beam)) { + /* beam is closed and we have nothing more to receive */ + if (!beam->close_sent) { + apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + beam->close_sent = 1; + ++transferred; + status = APR_SUCCESS; + } + } + + if (transferred_buckets > 0) { + if (beam->cons_ev_cb) { + beam->cons_ev_cb(beam->cons_ctx, beam); + } + } + + if (transferred) { + apr_thread_cond_broadcast(beam->change); + status = APR_SUCCESS; + } + else { + status = wait_not_empty(beam, block, bl.mutex); + if (status != APR_SUCCESS) { + goto leave; + } + goto transfer; + } +leave: + leave_yellow(beam, &bl); + } + return status; +} + +void h2_beam_on_consumed(h2_bucket_beam *beam, + h2_beam_ev_callback *ev_cb, + h2_beam_io_callback *io_cb, void *ctx) +{ + h2_beam_lock bl; + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + beam->cons_ev_cb = ev_cb; + beam->cons_io_cb = io_cb; + beam->cons_ctx = ctx; + leave_yellow(beam, &bl); + } +} + +void h2_beam_on_produced(h2_bucket_beam *beam, + h2_beam_io_callback *io_cb, void *ctx) +{ + h2_beam_lock bl; + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + beam->prod_io_cb = io_cb; + beam->prod_ctx = ctx; + leave_yellow(beam, &bl); + } +} + +void h2_beam_on_file_beam(h2_bucket_beam *beam, + h2_beam_can_beam_callback *cb, void *ctx) +{ + h2_beam_lock bl; + + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + beam->can_beam_fn = cb; + beam->can_beam_ctx = ctx; + leave_yellow(beam, &bl); + } +} + + +apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam) +{ + apr_bucket *b; + apr_off_t l = 0; + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + for (b = H2_BLIST_FIRST(&beam->send_list); + b != H2_BLIST_SENTINEL(&beam->send_list); + b = APR_BUCKET_NEXT(b)) { + /* should all have determinate length */ + l += b->length; + } + leave_yellow(beam, &bl); + } + return l; +} + +apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam) +{ + apr_bucket *b; + apr_off_t l = 0; + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + for (b = H2_BLIST_FIRST(&beam->send_list); + b != H2_BLIST_SENTINEL(&beam->send_list); + b = APR_BUCKET_NEXT(b)) { + l += bucket_mem_used(b); + } + leave_yellow(beam, &bl); + } + return l; +} + +int h2_beam_empty(h2_bucket_beam *beam) +{ + int empty = 1; + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + empty = (H2_BLIST_EMPTY(&beam->send_list) + && (!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer))); + leave_yellow(beam, &bl); + } + return empty; +} + +int h2_beam_holds_proxies(h2_bucket_beam *beam) +{ + int has_proxies = 1; + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + has_proxies = !H2_BPROXY_LIST_EMPTY(&beam->proxies); + leave_yellow(beam, &bl); + } + return has_proxies; +} + +int h2_beam_was_received(h2_bucket_beam *beam) +{ + int happend = 0; + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + happend = (beam->received_bytes > 0); + leave_yellow(beam, &bl); + } + return happend; +} + +apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam) +{ + apr_size_t n = 0; + h2_beam_lock bl; + + if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) { + n = beam->files_beamed; + leave_yellow(beam, &bl); + } + return n; +} + +int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file) +{ + return 0; +} + +int h2_beam_report_consumption(h2_bucket_beam *beam) +{ + h2_beam_lock bl; + int rv = 0; + if (enter_yellow(beam, &bl) == APR_SUCCESS) { + rv = report_consumption(beam, &bl); + leave_yellow(beam, &bl); + } + return rv; +} + +void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg) +{ + if (beam && APLOG_C_IS_LEVEL(c,level)) { + ap_log_cerror(APLOG_MARK, level, 0, c, + "beam(%ld-%d,%s,closed=%d,aborted=%d,empty=%d,buf=%ld): %s", + (c->master? c->master->id : c->id), beam->id, beam->tag, + beam->closed, beam->aborted, h2_beam_empty(beam), + (long)h2_beam_get_buffered(beam), msg); + } +} + + diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h new file mode 100644 index 0000000..f260762 --- /dev/null +++ b/modules/http2/h2_bucket_beam.h @@ -0,0 +1,406 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef h2_bucket_beam_h +#define h2_bucket_beam_h + +struct apr_thread_mutex_t; +struct apr_thread_cond_t; + +/******************************************************************************* + * apr_bucket list without bells and whistles + ******************************************************************************/ + +/** + * h2_blist can hold a list of buckets just like apr_bucket_brigade, but + * does not to any allocations or related features. + */ +typedef struct { + APR_RING_HEAD(h2_bucket_list, apr_bucket) list; +} h2_blist; + +#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link); +#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link) +#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link) +#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list) +#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list) +#define H2_BLIST_INSERT_HEAD(b, e) do { \ + apr_bucket *ap__b = (e); \ + APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \ + } while (0) +#define H2_BLIST_INSERT_TAIL(b, e) do { \ + apr_bucket *ap__b = (e); \ + APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \ + } while (0) +#define H2_BLIST_CONCAT(a, b) do { \ + APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \ + } while (0) +#define H2_BLIST_PREPEND(a, b) do { \ + APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \ + } while (0) + +/******************************************************************************* + * h2_bucket_beam + ******************************************************************************/ + +/** + * A h2_bucket_beam solves the task of transferring buckets, esp. their data, + * across threads with zero buffer copies. + * + * When a thread, let's call it the sender thread, wants to send buckets to + * another, the green thread, it creates a h2_bucket_beam and adds buckets + * via the h2_beam_send(). It gives the beam to the green thread which then + * can receive buckets into its own brigade via h2_beam_receive(). + * + * Sending and receiving can happen concurrently. + * + * The beam can limit the amount of data it accepts via the buffer_size. This + * can also be adjusted during its lifetime. Sends and receives can be done blocking. + * A timeout can be set for such blocks. + * + * Care needs to be taken when terminating the beam. The beam registers at + * the pool it was created with and will cleanup after itself. However, if + * received buckets do still exist, already freed memory might be accessed. + * The beam does a assertion on this condition. + * + * The proper way of shutting down a beam is to first make sure there are no + * more green buckets out there, then cleanup the beam to purge eventually + * still existing sender buckets and then, possibly, terminate the beam itself + * (or the pool it was created with). + * + * The following restrictions apply to bucket transport: + * - only EOS and FLUSH meta buckets are copied through. All other meta buckets + * are kept in the beams hold. + * - all kind of data buckets are transported through: + * - transient buckets are converted to heap ones on send + * - heap and pool buckets require no extra handling + * - buckets with indeterminate length are read on send + * - file buckets will transfer the file itself into a new bucket, if allowed + * - all other buckets are read on send to make sure data is present + * + * This assures that when the sender thread sends its sender buckets, the data + * is made accessible while still on the sender side. The sender bucket then enters + * the beams hold storage. + * When the green thread calls receive, sender buckets in the hold are wrapped + * into special beam buckets. Beam buckets on read present the data directly + * from the internal sender one, but otherwise live on the green side. When a + * beam bucket gets destroyed, it notifies its beam that the corresponding + * sender bucket from the hold may be destroyed. + * Since the destruction of green buckets happens in the green thread, any + * corresponding sender bucket can not immediately be destroyed, as that would + * result in race conditions. + * Instead, the beam transfers such sender buckets from the hold to the purge + * storage. Next time there is a call from the sender side, the buckets in + * purge will be deleted. + * + * There are callbacks that can be registesender with a beam: + * - a "consumed" callback that gets called on the sender side with the + * amount of data that has been received by the green side. The amount + * is a delta from the last callback invocation. The sender side can trigger + * these callbacks by calling h2_beam_send() with a NULL brigade. + * - a "can_beam_file" callback that can prohibit the transfer of file handles + * through the beam. This will cause file buckets to be read on send and + * its data buffer will then be transports just like a heap bucket would. + * When no callback is registered, no restrictions apply and all files are + * passed through. + * File handles transfersender to the green side will stay there until the + * receiving brigade's pool is destroyed/cleared. If the pool lives very + * long or if many different files are beamed, the process might run out + * of available file handles. + * + * The name "beam" of course is inspired by good old transporter + * technology where humans are kept inside the transporter's memory + * buffers until the transmission is complete. Star gates use a similar trick. + */ + +typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock); + +typedef struct { + apr_thread_mutex_t *mutex; + h2_beam_mutex_leave *leave; + void *leave_ctx; +} h2_beam_lock; + +typedef struct h2_bucket_beam h2_bucket_beam; + +typedef apr_status_t h2_beam_mutex_enter(void *ctx, h2_beam_lock *pbl); + +typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam, + apr_off_t bytes); +typedef void h2_beam_ev_callback(void *ctx, h2_bucket_beam *beam); + +typedef struct h2_beam_proxy h2_beam_proxy; +typedef struct { + APR_RING_HEAD(h2_beam_proxy_list, h2_beam_proxy) list; +} h2_bproxy_list; + +typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam, + apr_file_t *file); + +typedef enum { + H2_BEAM_OWNER_SEND, + H2_BEAM_OWNER_RECV +} h2_beam_owner_t; + +/** + * Will deny all transfer of apr_file_t across the beam and force + * a data copy instead. + */ +int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file); + +struct h2_bucket_beam { + int id; + const char *tag; + apr_pool_t *pool; + h2_beam_owner_t owner; + h2_blist send_list; + h2_blist hold_list; + h2_blist purge_list; + apr_bucket_brigade *recv_buffer; + h2_bproxy_list proxies; + apr_pool_t *send_pool; + apr_pool_t *recv_pool; + + apr_size_t max_buf_size; + apr_interval_time_t timeout; + + apr_off_t sent_bytes; /* amount of bytes send */ + apr_off_t received_bytes; /* amount of bytes received */ + + apr_size_t buckets_sent; /* # of beam buckets sent */ + apr_size_t files_beamed; /* how many file handles have been set aside */ + + unsigned int aborted : 1; + unsigned int closed : 1; + unsigned int close_sent : 1; + unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */ + + struct apr_thread_mutex_t *lock; + struct apr_thread_cond_t *change; + + apr_off_t cons_bytes_reported; /* amount of bytes reported as consumed */ + h2_beam_ev_callback *cons_ev_cb; + h2_beam_io_callback *cons_io_cb; + void *cons_ctx; + + apr_off_t prod_bytes_reported; /* amount of bytes reported as produced */ + h2_beam_io_callback *prod_io_cb; + void *prod_ctx; + + h2_beam_can_beam_callback *can_beam_fn; + void *can_beam_ctx; +}; + +/** + * Creates a new bucket beam for transfer of buckets across threads. + * + * The pool the beam is created with will be protected by the given + * mutex and will be used in multiple threads. It needs a pool allocator + * that is only used inside that same mutex. + * + * @param pbeam will hold the created beam on return + * @param pool pool owning the beam, beam will cleanup when pool released + * @param id identifier of the beam + * @param tag tag identifying beam for logging + * @param owner if the beam is owned by the sender or receiver, e.g. if + * the pool owner is using this beam for sending or receiving + * @param buffer_size maximum memory footprint of buckets buffered in beam, or + * 0 for no limitation + * @param timeout timeout for blocking operations + */ +apr_status_t h2_beam_create(h2_bucket_beam **pbeam, + apr_pool_t *pool, + int id, const char *tag, + h2_beam_owner_t owner, + apr_size_t buffer_size, + apr_interval_time_t timeout); + +/** + * Destroys the beam immediately without cleanup. + */ +apr_status_t h2_beam_destroy(h2_bucket_beam *beam); + +/** + * Send buckets from the given brigade through the beam. Will hold buckets + * internally as long as they have not been processed by the receiving side. + * All accepted buckets are removed from the given brigade. Will return with + * APR_EAGAIN on non-blocking sends when not all buckets could be accepted. + * + * Call from the sender side only. + */ +apr_status_t h2_beam_send(h2_bucket_beam *beam, + apr_bucket_brigade *bb, + apr_read_type_e block); + +/** + * Register the pool from which future buckets are send. This defines + * the lifetime of the buckets, e.g. the pool should not be cleared/destroyed + * until the data is no longer needed (or has been received). + */ +void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p); + +/** + * Receive buckets from the beam into the given brigade. Will return APR_EOF + * when reading past an EOS bucket. Reads can be blocking until data is + * available or the beam has been closed. Non-blocking calls return APR_EAGAIN + * if no data is available. + * + * Call from the receiver side only. + */ +apr_status_t h2_beam_receive(h2_bucket_beam *beam, + apr_bucket_brigade *green_buckets, + apr_read_type_e block, + apr_off_t readbytes); + +/** + * Determine if beam is empty. + */ +int h2_beam_empty(h2_bucket_beam *beam); + +/** + * Determine if beam has handed out proxy buckets that are not destroyed. + */ +int h2_beam_holds_proxies(h2_bucket_beam *beam); + +/** + * Abort the beam. Will cleanup any buffered buckets and answer all send + * and receives with APR_ECONNABORTED. + * + * Call from the sender side only. + */ +void h2_beam_abort(h2_bucket_beam *beam); + +/** + * Close the beam. Sending an EOS bucket serves the same purpose. + * + * Call from the sender side only. + */ +apr_status_t h2_beam_close(h2_bucket_beam *beam); + +/** + * Receives leaves the beam, e.g. will no longer read. This will + * interrupt any sender blocked writing and fail future send. + * + * Call from the receiver side only. + */ +apr_status_t h2_beam_leave(h2_bucket_beam *beam); + +int h2_beam_is_closed(h2_bucket_beam *beam); + +/** + * Return APR_SUCCESS when all buckets in transit have been handled. + * When called with APR_BLOCK_READ and a mutex set, will wait until the green + * side has consumed all data. Otherwise APR_EAGAIN is returned. + * With clear_buffers set, any queued data is discarded. + * If a timeout is set on the beam, waiting might also time out and + * return APR_ETIMEUP. + * + * Call from the sender side only. + */ +apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block); + +/** + * Set/get the timeout for blocking read/write operations. Only works + * if a mutex has been set for the beam. + */ +void h2_beam_timeout_set(h2_bucket_beam *beam, + apr_interval_time_t timeout); +apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam); + +/** + * Set/get the maximum buffer size for beam data (memory footprint). + */ +void h2_beam_buffer_size_set(h2_bucket_beam *beam, + apr_size_t buffer_size); +apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam); + +/** + * Register a callback to be invoked on the sender side with the + * amount of bytes that have been consumed by the receiver, since the + * last callback invocation or reset. + * @param beam the beam to set the callback on + * @param ev_cb the callback or NULL, called when bytes are consumed + * @param io_cb the callback or NULL, called on sender with bytes consumed + * @param ctx the context to use in callback invocation + * + * Call from the sender side, io callbacks invoked on sender side, ev callback + * from any side. + */ +void h2_beam_on_consumed(h2_bucket_beam *beam, + h2_beam_ev_callback *ev_cb, + h2_beam_io_callback *io_cb, void *ctx); + +/** + * Call any registered consumed handler, if any changes have happened + * since the last invocation. + * @return !=0 iff a handler has been called + * + * Needs to be invoked from the sending side. + */ +int h2_beam_report_consumption(h2_bucket_beam *beam); + +/** + * Register a callback to be invoked on the receiver side with the + * amount of bytes that have been produces by the sender, since the + * last callback invocation or reset. + * @param beam the beam to set the callback on + * @param io_cb the callback or NULL, called on receiver with bytes produced + * @param ctx the context to use in callback invocation + * + * Call from the receiver side, callbacks invoked on either side. + */ +void h2_beam_on_produced(h2_bucket_beam *beam, + h2_beam_io_callback *io_cb, void *ctx); + +/** + * Register a callback that may prevent a file from being beam as + * file handle, forcing the file content to be copied. Then no callback + * is set (NULL), file handles are transferred directly. + * @param beam the beam to set the callback on + * @param io_cb the callback or NULL, called on receiver with bytes produced + * @param ctx the context to use in callback invocation + * + * Call from the receiver side, callbacks invoked on either side. + */ +void h2_beam_on_file_beam(h2_bucket_beam *beam, + h2_beam_can_beam_callback *cb, void *ctx); + +/** + * Get the amount of bytes currently buffered in the beam (unread). + */ +apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam); + +/** + * Get the memory used by the buffered buckets, approximately. + */ +apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam); + +/** + * Return != 0 iff (some) data from the beam has been received. + */ +int h2_beam_was_received(h2_bucket_beam *beam); + +apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam); + +typedef apr_bucket *h2_bucket_beamer(h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src); + +void h2_register_bucket_beamer(h2_bucket_beamer *beamer); + +void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg); + +#endif /* h2_bucket_beam_h */ diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c new file mode 100644 index 0000000..c89d499 --- /dev/null +++ b/modules/http2/h2_bucket_eos.c @@ -0,0 +1,127 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stddef.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_connection.h> +#include <http_log.h> + +#include "h2_private.h" +#include "h2.h" +#include "h2_mplx.h" +#include "h2_stream.h" +#include "h2_bucket_eos.h" + +typedef struct { + apr_bucket_refcount refcount; + h2_stream *stream; +} h2_bucket_eos; + +static apr_status_t bucket_cleanup(void *data) +{ + h2_stream **pstream = data; + + if (*pstream) { + /* If bucket_destroy is called after us, this prevents + * bucket_destroy from trying to destroy the stream again. */ + *pstream = NULL; + } + return APR_SUCCESS; +} + +static apr_status_t bucket_read(apr_bucket *b, const char **str, + apr_size_t *len, apr_read_type_e block) +{ + (void)b; + (void)block; + *str = NULL; + *len = 0; + return APR_SUCCESS; +} + +apr_bucket *h2_bucket_eos_make(apr_bucket *b, h2_stream *stream) +{ + h2_bucket_eos *h; + + h = apr_bucket_alloc(sizeof(*h), b->list); + h->stream = stream; + + b = apr_bucket_shared_make(b, h, 0, 0); + b->type = &h2_bucket_type_eos; + + return b; +} + +apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list, + h2_stream *stream) +{ + apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); + + APR_BUCKET_INIT(b); + b->free = apr_bucket_free; + b->list = list; + b = h2_bucket_eos_make(b, stream); + if (stream) { + h2_bucket_eos *h = b->data; + apr_pool_pre_cleanup_register(stream->pool, &h->stream, bucket_cleanup); + } + return b; +} + +static void bucket_destroy(void *data) +{ + h2_bucket_eos *h = data; + + if (apr_bucket_shared_destroy(h)) { + h2_stream *stream = h->stream; + if (stream && stream->pool) { + apr_pool_cleanup_kill(stream->pool, &h->stream, bucket_cleanup); + } + apr_bucket_free(h); + if (stream) { + h2_stream_dispatch(stream, H2_SEV_EOS_SENT); + } + } +} + +const apr_bucket_type_t h2_bucket_type_eos = { + "H2EOS", 5, APR_BUCKET_METADATA, + bucket_destroy, + bucket_read, + apr_bucket_setaside_noop, + apr_bucket_split_notimpl, + apr_bucket_shared_copy +}; + diff --git a/modules/http2/h2_bucket_eos.h b/modules/http2/h2_bucket_eos.h new file mode 100644 index 0000000..04e32e3 --- /dev/null +++ b/modules/http2/h2_bucket_eos.h @@ -0,0 +1,32 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_http2_h2_bucket_stream_eos_h +#define mod_http2_h2_bucket_stream_eos_h + +struct h2_stream; + +/** End Of HTTP/2 STREAM (H2EOS) bucket */ +extern const apr_bucket_type_t h2_bucket_type_eos; + +#define H2_BUCKET_IS_H2EOS(e) (e->type == &h2_bucket_type_eos) + +apr_bucket *h2_bucket_eos_make(apr_bucket *b, struct h2_stream *stream); + +apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list, + struct h2_stream *stream); + +#endif /* mod_http2_h2_bucket_stream_eos_h */ diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c new file mode 100644 index 0000000..8766355 --- /dev/null +++ b/modules/http2/h2_config.c @@ -0,0 +1,702 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> + +#include <apr_hash.h> +#include <apr_lib.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_config.h> +#include <http_log.h> +#include <http_vhost.h> + +#include <ap_mpm.h> + +#include <apr_strings.h> + +#include "h2.h" +#include "h2_alt_svc.h" +#include "h2_ctx.h" +#include "h2_conn.h" +#include "h2_config.h" +#include "h2_h2.h" +#include "h2_private.h" + +#define DEF_VAL (-1) + +#define H2_CONFIG_GET(a, b, n) \ + (((a)->n == DEF_VAL)? (b) : (a))->n + +static h2_config defconf = { + "default", + 100, /* max_streams */ + H2_INITIAL_WINDOW_SIZE, /* window_size */ + -1, /* min workers */ + -1, /* max workers */ + 10 * 60, /* max workers idle secs */ + 32 * 1024, /* stream max mem size */ + NULL, /* no alt-svcs */ + -1, /* alt-svc max age */ + 0, /* serialize headers */ + -1, /* h2 direct mode */ + 1, /* modern TLS only */ + -1, /* HTTP/1 Upgrade support */ + 1024*1024, /* TLS warmup size */ + 1, /* TLS cooldown secs */ + 1, /* HTTP/2 server push enabled */ + NULL, /* map of content-type to priorities */ + 256, /* push diary size */ + 0, /* copy files across threads */ + NULL, /* push list */ + 0, /* early hints, http status 103 */ +}; + +void h2_config_init(apr_pool_t *pool) +{ + (void)pool; +} + +static void *h2_config_create(apr_pool_t *pool, + const char *prefix, const char *x) +{ + h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config)); + const char *s = x? x : "unknown"; + char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL); + + conf->name = name; + conf->h2_max_streams = DEF_VAL; + conf->h2_window_size = DEF_VAL; + conf->min_workers = DEF_VAL; + conf->max_workers = DEF_VAL; + conf->max_worker_idle_secs = DEF_VAL; + conf->stream_max_mem_size = DEF_VAL; + conf->alt_svc_max_age = DEF_VAL; + conf->serialize_headers = DEF_VAL; + conf->h2_direct = DEF_VAL; + conf->modern_tls_only = DEF_VAL; + conf->h2_upgrade = DEF_VAL; + conf->tls_warmup_size = DEF_VAL; + conf->tls_cooldown_secs = DEF_VAL; + conf->h2_push = DEF_VAL; + conf->priorities = NULL; + conf->push_diary_size = DEF_VAL; + conf->copy_files = DEF_VAL; + conf->push_list = NULL; + conf->early_hints = DEF_VAL; + return conf; +} + +void *h2_config_create_svr(apr_pool_t *pool, server_rec *s) +{ + return h2_config_create(pool, "srv", s->defn_name); +} + +void *h2_config_create_dir(apr_pool_t *pool, char *x) +{ + return h2_config_create(pool, "dir", x); +} + +static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) +{ + h2_config *base = (h2_config *)basev; + h2_config *add = (h2_config *)addv; + h2_config *n = (h2_config *)apr_pcalloc(pool, sizeof(h2_config)); + char *name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL); + n->name = name; + + n->h2_max_streams = H2_CONFIG_GET(add, base, h2_max_streams); + n->h2_window_size = H2_CONFIG_GET(add, base, h2_window_size); + n->min_workers = H2_CONFIG_GET(add, base, min_workers); + n->max_workers = H2_CONFIG_GET(add, base, max_workers); + n->max_worker_idle_secs = H2_CONFIG_GET(add, base, max_worker_idle_secs); + n->stream_max_mem_size = H2_CONFIG_GET(add, base, stream_max_mem_size); + n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs; + n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age); + n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers); + n->h2_direct = H2_CONFIG_GET(add, base, h2_direct); + n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only); + n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade); + n->tls_warmup_size = H2_CONFIG_GET(add, base, tls_warmup_size); + n->tls_cooldown_secs = H2_CONFIG_GET(add, base, tls_cooldown_secs); + n->h2_push = H2_CONFIG_GET(add, base, h2_push); + if (add->priorities && base->priorities) { + n->priorities = apr_hash_overlay(pool, add->priorities, base->priorities); + } + else { + n->priorities = add->priorities? add->priorities : base->priorities; + } + n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size); + n->copy_files = H2_CONFIG_GET(add, base, copy_files); + if (add->push_list && base->push_list) { + n->push_list = apr_array_append(pool, base->push_list, add->push_list); + } + else { + n->push_list = add->push_list? add->push_list : base->push_list; + } + n->early_hints = H2_CONFIG_GET(add, base, early_hints); + return n; +} + +void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv) +{ + return h2_config_merge(pool, basev, addv); +} + +void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv) +{ + return h2_config_merge(pool, basev, addv); +} + +int h2_config_geti(const h2_config *conf, h2_config_var_t var) +{ + return (int)h2_config_geti64(conf, var); +} + +apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) +{ + switch(var) { + case H2_CONF_MAX_STREAMS: + return H2_CONFIG_GET(conf, &defconf, h2_max_streams); + case H2_CONF_WIN_SIZE: + return H2_CONFIG_GET(conf, &defconf, h2_window_size); + case H2_CONF_MIN_WORKERS: + return H2_CONFIG_GET(conf, &defconf, min_workers); + case H2_CONF_MAX_WORKERS: + return H2_CONFIG_GET(conf, &defconf, max_workers); + case H2_CONF_MAX_WORKER_IDLE_SECS: + return H2_CONFIG_GET(conf, &defconf, max_worker_idle_secs); + case H2_CONF_STREAM_MAX_MEM: + return H2_CONFIG_GET(conf, &defconf, stream_max_mem_size); + case H2_CONF_ALT_SVC_MAX_AGE: + return H2_CONFIG_GET(conf, &defconf, alt_svc_max_age); + case H2_CONF_SER_HEADERS: + return H2_CONFIG_GET(conf, &defconf, serialize_headers); + case H2_CONF_MODERN_TLS_ONLY: + return H2_CONFIG_GET(conf, &defconf, modern_tls_only); + case H2_CONF_UPGRADE: + return H2_CONFIG_GET(conf, &defconf, h2_upgrade); + case H2_CONF_DIRECT: + return H2_CONFIG_GET(conf, &defconf, h2_direct); + case H2_CONF_TLS_WARMUP_SIZE: + return H2_CONFIG_GET(conf, &defconf, tls_warmup_size); + case H2_CONF_TLS_COOLDOWN_SECS: + return H2_CONFIG_GET(conf, &defconf, tls_cooldown_secs); + case H2_CONF_PUSH: + return H2_CONFIG_GET(conf, &defconf, h2_push); + case H2_CONF_PUSH_DIARY_SIZE: + return H2_CONFIG_GET(conf, &defconf, push_diary_size); + case H2_CONF_COPY_FILES: + return H2_CONFIG_GET(conf, &defconf, copy_files); + case H2_CONF_EARLY_HINTS: + return H2_CONFIG_GET(conf, &defconf, early_hints); + default: + return DEF_VAL; + } +} + +const h2_config *h2_config_sget(server_rec *s) +{ + h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config, + &http2_module); + ap_assert(cfg); + return cfg; +} + +const struct h2_priority *h2_config_get_priority(const h2_config *conf, + const char *content_type) +{ + if (content_type && conf->priorities) { + size_t len = strcspn(content_type, "; \t"); + h2_priority *prio = apr_hash_get(conf->priorities, content_type, len); + return prio? prio : apr_hash_get(conf->priorities, "*", 1); + } + return NULL; +} + +static const char *h2_conf_set_max_streams(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->h2_max_streams = (int)apr_atoi64(value); + (void)arg; + if (cfg->h2_max_streams < 1) { + return "value must be > 0"; + } + return NULL; +} + +static const char *h2_conf_set_window_size(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->h2_window_size = (int)apr_atoi64(value); + (void)arg; + if (cfg->h2_window_size < 1024) { + return "value must be >= 1024"; + } + return NULL; +} + +static const char *h2_conf_set_min_workers(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->min_workers = (int)apr_atoi64(value); + (void)arg; + if (cfg->min_workers < 1) { + return "value must be > 0"; + } + return NULL; +} + +static const char *h2_conf_set_max_workers(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->max_workers = (int)apr_atoi64(value); + (void)arg; + if (cfg->max_workers < 1) { + return "value must be > 0"; + } + return NULL; +} + +static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->max_worker_idle_secs = (int)apr_atoi64(value); + (void)arg; + if (cfg->max_worker_idle_secs < 1) { + return "value must be > 0"; + } + return NULL; +} + +static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + + + cfg->stream_max_mem_size = (int)apr_atoi64(value); + (void)arg; + if (cfg->stream_max_mem_size < 1024) { + return "value must be >= 1024"; + } + return NULL; +} + +static const char *h2_add_alt_svc(cmd_parms *parms, + void *arg, const char *value) +{ + if (value && *value) { + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool); + if (!as) { + return "unable to parse alt-svc specifier"; + } + if (!cfg->alt_svcs) { + cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*)); + } + APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as; + } + (void)arg; + return NULL; +} + +static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->alt_svc_max_age = (int)apr_atoi64(value); + (void)arg; + return NULL; +} + +static const char *h2_conf_set_session_extra_files(cmd_parms *parms, + void *arg, const char *value) +{ + /* deprecated, ignore */ + (void)arg; + (void)value; + ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */ + "H2SessionExtraFiles is obsolete and will be ignored"); + return NULL; +} + +static const char *h2_conf_set_serialize_headers(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->serialize_headers = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->serialize_headers = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +static const char *h2_conf_set_direct(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->h2_direct = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->h2_direct = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +static const char *h2_conf_set_push(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->h2_push = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->h2_push = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg, + const char *ctype, const char *sdependency, + const char *sweight) +{ + h2_config *cfg = (h2_config *)h2_config_sget(cmd->server); + const char *sdefweight = "16"; /* default AFTER weight */ + h2_dependency dependency; + h2_priority *priority; + int weight; + + if (!*ctype) { + return "1st argument must be a mime-type, like 'text/css' or '*'"; + } + + if (!sweight) { + /* 2 args only, but which one? */ + if (apr_isdigit(sdependency[0])) { + sweight = sdependency; + sdependency = "AFTER"; /* default dependency */ + } + } + + if (!strcasecmp("AFTER", sdependency)) { + dependency = H2_DEPENDANT_AFTER; + } + else if (!strcasecmp("BEFORE", sdependency)) { + dependency = H2_DEPENDANT_BEFORE; + if (sweight) { + return "dependecy 'Before' does not allow a weight"; + } + } + else if (!strcasecmp("INTERLEAVED", sdependency)) { + dependency = H2_DEPENDANT_INTERLEAVED; + sdefweight = "256"; /* default INTERLEAVED weight */ + } + else { + return "dependency must be one of 'After', 'Before' or 'Interleaved'"; + } + + weight = (int)apr_atoi64(sweight? sweight : sdefweight); + if (weight < NGHTTP2_MIN_WEIGHT) { + return apr_psprintf(cmd->pool, "weight must be a number >= %d", + NGHTTP2_MIN_WEIGHT); + } + + priority = apr_pcalloc(cmd->pool, sizeof(*priority)); + priority->dependency = dependency; + priority->weight = weight; + + if (!cfg->priorities) { + cfg->priorities = apr_hash_make(cmd->pool); + } + apr_hash_set(cfg->priorities, ctype, strlen(ctype), priority); + return NULL; +} + +static const char *h2_conf_set_modern_tls_only(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->modern_tls_only = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->modern_tls_only = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +static const char *h2_conf_set_upgrade(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->h2_upgrade = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->h2_upgrade = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->tls_warmup_size = apr_atoi64(value); + (void)arg; + return NULL; +} + +static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + cfg->tls_cooldown_secs = (int)apr_atoi64(value); + (void)arg; + return NULL; +} + +static const char *h2_conf_set_push_diary_size(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + (void)arg; + cfg->push_diary_size = (int)apr_atoi64(value); + if (cfg->push_diary_size < 0) { + return "value must be >= 0"; + } + if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) { + return "value must a power of 2"; + } + if (cfg->push_diary_size > (1 << 15)) { + return "value must <= 65536"; + } + return NULL; +} + +static const char *h2_conf_set_copy_files(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)arg; + if (!strcasecmp(value, "On")) { + cfg->copy_files = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->copy_files = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push) +{ + h2_push_res *new; + if (!conf->push_list) { + conf->push_list = apr_array_make(pool, 10, sizeof(*push)); + } + new = apr_array_push(conf->push_list); + new->uri_ref = push->uri_ref; + new->critical = push->critical; +} + +static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf, + const char *arg1, const char *arg2, + const char *arg3) +{ + h2_config *dconf = (h2_config*)dirconf ; + h2_config *sconf = (h2_config*)h2_config_sget(cmd->server); + h2_push_res push; + const char *last = arg3; + + memset(&push, 0, sizeof(push)); + if (!strcasecmp("add", arg1)) { + push.uri_ref = arg2; + } + else { + push.uri_ref = arg1; + last = arg2; + if (arg3) { + return "too many parameter"; + } + } + + if (last) { + if (!strcasecmp("critical", last)) { + push.critical = 1; + } + else { + return "unknown last parameter"; + } + } + + /* server command? set both */ + if (cmd->path == NULL) { + add_push(cmd->pool, sconf, &push); + add_push(cmd->pool, dconf, &push); + } + else { + add_push(cmd->pool, dconf, &push); + } + + return NULL; +} + +static const char *h2_conf_set_early_hints(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->early_hints = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->early_hints = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; +} + +void h2_get_num_workers(server_rec *s, int *minw, int *maxw) +{ + int threads_per_child = 0; + const h2_config *config = h2_config_sget(s); + + *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS); + *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS); + ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child); + + if (*minw <= 0) { + *minw = threads_per_child; + } + if (*maxw <= 0) { + /* As a default, this seems to work quite well under mpm_event. + * For people enabling http2 under mpm_prefork, start 4 threads unless + * configured otherwise. People get unhappy if their http2 requests are + * blocking each other. */ + *maxw = 3 * (*minw) / 2; + if (*maxw < 4) { + *maxw = 4; + } + } +} + +#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL) + +const command_rec h2_cmds[] = { + AP_INIT_TAKE1("H2MaxSessionStreams", h2_conf_set_max_streams, NULL, + RSRC_CONF, "maximum number of open streams per session"), + AP_INIT_TAKE1("H2WindowSize", h2_conf_set_window_size, NULL, + RSRC_CONF, "window size on client DATA"), + AP_INIT_TAKE1("H2MinWorkers", h2_conf_set_min_workers, NULL, + RSRC_CONF, "minimum number of worker threads per child"), + AP_INIT_TAKE1("H2MaxWorkers", h2_conf_set_max_workers, NULL, + RSRC_CONF, "maximum number of worker threads per child"), + AP_INIT_TAKE1("H2MaxWorkerIdleSeconds", h2_conf_set_max_worker_idle_secs, NULL, + RSRC_CONF, "maximum number of idle seconds before a worker shuts down"), + AP_INIT_TAKE1("H2StreamMaxMemSize", h2_conf_set_stream_max_mem_size, NULL, + RSRC_CONF, "maximum number of bytes buffered in memory for a stream"), + AP_INIT_TAKE1("H2AltSvc", h2_add_alt_svc, NULL, + RSRC_CONF, "adds an Alt-Svc for this server"), + AP_INIT_TAKE1("H2AltSvcMaxAge", h2_conf_set_alt_svc_max_age, NULL, + RSRC_CONF, "set the maximum age (in seconds) that client can rely on alt-svc information"), + AP_INIT_TAKE1("H2SerializeHeaders", h2_conf_set_serialize_headers, NULL, + RSRC_CONF, "on to enable header serialization for compatibility"), + AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL, + RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"), + AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL, + RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"), + AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL, + RSRC_CONF, "on to enable direct HTTP/2 mode"), + AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL, + RSRC_CONF, "number of extra file a session might keep open (obsolete)"), + AP_INIT_TAKE1("H2TLSWarmUpSize", h2_conf_set_tls_warmup_size, NULL, + RSRC_CONF, "number of bytes on TLS connection before doing max writes"), + AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL, + RSRC_CONF, "seconds of idle time on TLS before shrinking writes"), + AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL, + RSRC_CONF, "off to disable HTTP/2 server push"), + AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL, + RSRC_CONF, "define priority of PUSHed resources per content type"), + AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL, + RSRC_CONF, "size of push diary"), + AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL, + OR_FILEINFO, "on to perform copy of file data"), + AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL, + OR_FILEINFO, "add a resource to be pushed in this location/on this server."), + AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL, + RSRC_CONF, "on to enable interim status 103 responses"), + AP_END_CMD +}; + + +const h2_config *h2_config_rget(request_rec *r) +{ + h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config, + &http2_module); + return cfg? cfg : h2_config_sget(r->server); +} + +const h2_config *h2_config_get(conn_rec *c) +{ + h2_ctx *ctx = h2_ctx_get(c, 0); + + if (ctx) { + if (ctx->config) { + return ctx->config; + } + else if (ctx->server) { + ctx->config = h2_config_sget(ctx->server); + return ctx->config; + } + } + + return h2_config_sget(c->base_server); +} diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h new file mode 100644 index 0000000..17d75d6 --- /dev/null +++ b/modules/http2/h2_config.h @@ -0,0 +1,106 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_config_h__ +#define __mod_h2__h2_config_h__ + +#undef PACKAGE_VERSION +#undef PACKAGE_TARNAME +#undef PACKAGE_STRING +#undef PACKAGE_NAME +#undef PACKAGE_BUGREPORT + +typedef enum { + H2_CONF_MAX_STREAMS, + H2_CONF_WIN_SIZE, + H2_CONF_MIN_WORKERS, + H2_CONF_MAX_WORKERS, + H2_CONF_MAX_WORKER_IDLE_SECS, + H2_CONF_STREAM_MAX_MEM, + H2_CONF_ALT_SVCS, + H2_CONF_ALT_SVC_MAX_AGE, + H2_CONF_SER_HEADERS, + H2_CONF_DIRECT, + H2_CONF_MODERN_TLS_ONLY, + H2_CONF_UPGRADE, + H2_CONF_TLS_WARMUP_SIZE, + H2_CONF_TLS_COOLDOWN_SECS, + H2_CONF_PUSH, + H2_CONF_PUSH_DIARY_SIZE, + H2_CONF_COPY_FILES, + H2_CONF_EARLY_HINTS, +} h2_config_var_t; + +struct apr_hash_t; +struct h2_priority; +struct h2_push_res; + +typedef struct h2_push_res { + const char *uri_ref; + int critical; +} h2_push_res; + +/* Apache httpd module configuration for h2. */ +typedef struct h2_config { + const char *name; + int h2_max_streams; /* max concurrent # streams (http2) */ + int h2_window_size; /* stream window size (http2) */ + int min_workers; /* min # of worker threads/child */ + int max_workers; /* max # of worker threads/child */ + int max_worker_idle_secs; /* max # of idle seconds for worker */ + int stream_max_mem_size; /* max # bytes held in memory/stream */ + apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */ + int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/ + int serialize_headers; /* Use serialized HTTP/1.1 headers for + processing, better compatibility */ + int h2_direct; /* if mod_h2 is active directly */ + int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */ + int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ + apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */ + int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */ + int h2_push; /* if HTTP/2 server push is enabled */ + struct apr_hash_t *priorities;/* map of content-type to h2_priority records */ + + int push_diary_size; /* # of entries in push diary */ + int copy_files; /* if files shall be copied vs setaside on output */ + apr_array_header_t *push_list;/* list of h2_push_res configurations */ + int early_hints; /* support status code 103 */ +} h2_config; + + +void *h2_config_create_dir(apr_pool_t *pool, char *x); +void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv); +void *h2_config_create_svr(apr_pool_t *pool, server_rec *s); +void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv); + +extern const command_rec h2_cmds[]; + +const h2_config *h2_config_get(conn_rec *c); +const h2_config *h2_config_sget(server_rec *s); +const h2_config *h2_config_rget(request_rec *r); + +int h2_config_geti(const h2_config *conf, h2_config_var_t var); +apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var); + +void h2_get_num_workers(server_rec *s, int *minw, int *maxw); + +void h2_config_init(apr_pool_t *pool); + +const struct h2_priority *h2_config_get_priority(const h2_config *conf, + const char *content_type); + +#endif /* __mod_h2__h2_config_h__ */ + diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c new file mode 100644 index 0000000..88da2ba --- /dev/null +++ b/modules/http2/h2_conn.c @@ -0,0 +1,370 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <apr_strings.h> + +#include <ap_mpm.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_config.h> +#include <http_log.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_request.h> + +#include <mpm_common.h> + +#include "h2_private.h" +#include "h2.h" +#include "h2_config.h" +#include "h2_ctx.h" +#include "h2_filter.h" +#include "h2_mplx.h" +#include "h2_session.h" +#include "h2_stream.h" +#include "h2_h2.h" +#include "h2_task.h" +#include "h2_workers.h" +#include "h2_conn.h" +#include "h2_version.h" + +static struct h2_workers *workers; + +static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN; +static module *mpm_module; +static int async_mpm; +static int mpm_supported = 1; +static apr_socket_t *dummy_socket; + +static void check_modules(int force) +{ + static int checked = 0; + int i; + + if (force || !checked) { + for (i = 0; ap_loaded_modules[i]; ++i) { + module *m = ap_loaded_modules[i]; + + if (!strcmp("event.c", m->name)) { + mpm_type = H2_MPM_EVENT; + mpm_module = m; + break; + } + else if (!strcmp("motorz.c", m->name)) { + mpm_type = H2_MPM_MOTORZ; + mpm_module = m; + break; + } + else if (!strcmp("mpm_netware.c", m->name)) { + mpm_type = H2_MPM_NETWARE; + mpm_module = m; + break; + } + else if (!strcmp("prefork.c", m->name)) { + mpm_type = H2_MPM_PREFORK; + mpm_module = m; + /* While http2 can work really well on prefork, it collides + * today's use case for prefork: runnning single-thread app engines + * like php. If we restrict h2_workers to 1 per process, php will + * work fine, but browser will be limited to 1 active request at a + * time. */ + mpm_supported = 0; + break; + } + else if (!strcmp("simple_api.c", m->name)) { + mpm_type = H2_MPM_SIMPLE; + mpm_module = m; + mpm_supported = 0; + break; + } + else if (!strcmp("mpm_winnt.c", m->name)) { + mpm_type = H2_MPM_WINNT; + mpm_module = m; + break; + } + else if (!strcmp("worker.c", m->name)) { + mpm_type = H2_MPM_WORKER; + mpm_module = m; + break; + } + } + checked = 1; + } +} + +apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) +{ + const h2_config *config = h2_config_sget(s); + apr_status_t status = APR_SUCCESS; + int minw, maxw; + int max_threads_per_child = 0; + int idle_secs = 0; + + check_modules(1); + ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child); + + status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm); + if (status != APR_SUCCESS) { + /* some MPMs do not implemnent this */ + async_mpm = 0; + status = APR_SUCCESS; + } + + h2_config_init(pool); + + h2_get_num_workers(s, &minw, &maxw); + + idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS); + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d", + minw, maxw, max_threads_per_child, idle_secs); + workers = h2_workers_create(s, pool, minw, maxw, idle_secs); + + ap_register_input_filter("H2_IN", h2_filter_core_input, + NULL, AP_FTYPE_CONNECTION); + + status = h2_mplx_child_init(pool, s); + + if (status == APR_SUCCESS) { + status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM, + APR_PROTO_TCP, pool); + } + + return status; +} + +h2_mpm_type_t h2_conn_mpm_type(void) +{ + check_modules(0); + return mpm_type; +} + +const char *h2_conn_mpm_name(void) +{ + check_modules(0); + return mpm_module? mpm_module->name : "unknown"; +} + +int h2_mpm_supported(void) +{ + check_modules(0); + return mpm_supported; +} + +static module *h2_conn_mpm_module(void) +{ + check_modules(0); + return mpm_module; +} + +apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r) +{ + h2_session *session; + apr_status_t status; + + if (!workers) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911) + "workers not initialized"); + return APR_EGENERAL; + } + + if (r) { + status = h2_session_rcreate(&session, r, ctx, workers); + } + else { + status = h2_session_create(&session, c, ctx, workers); + } + + if (status == APR_SUCCESS) { + h2_ctx_session_set(ctx, session); + } + return status; +} + +apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c) +{ + apr_status_t status; + int mpm_state = 0; + h2_session *session = h2_ctx_session_get(ctx); + + ap_assert(session); + do { + if (c->cs) { + c->cs->sense = CONN_SENSE_DEFAULT; + c->cs->state = CONN_STATE_HANDLER; + } + + status = h2_session_process(session, async_mpm); + + if (APR_STATUS_IS_EOF(status)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + H2_SSSN_LOG(APLOGNO(03045), session, + "process, closing conn")); + c->keepalive = AP_CONN_CLOSE; + } + else { + c->keepalive = AP_CONN_KEEPALIVE; + } + + if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) { + break; + } + } while (!async_mpm + && c->keepalive == AP_CONN_KEEPALIVE + && mpm_state != AP_MPMQ_STOPPING); + + if (c->cs) { + switch (session->state) { + case H2_SESSION_ST_INIT: + case H2_SESSION_ST_IDLE: + case H2_SESSION_ST_BUSY: + case H2_SESSION_ST_WAIT: + c->cs->state = CONN_STATE_WRITE_COMPLETION; + break; + case H2_SESSION_ST_CLEANUP: + case H2_SESSION_ST_DONE: + default: + c->cs->state = CONN_STATE_LINGER; + break; + } + } + + return APR_SUCCESS; +} + +apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c) +{ + h2_session *session = h2_ctx_session_get(ctx); + if (session) { + apr_status_t status = h2_session_pre_close(session, async_mpm); + return (status == APR_SUCCESS)? DONE : status; + } + return DONE; +} + +conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) +{ + apr_allocator_t *allocator; + apr_status_t status; + apr_pool_t *pool; + conn_rec *c; + void *cfg; + module *mpm; + + ap_assert(master); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master, + "h2_stream(%ld-%d): create slave", master->id, slave_id); + + /* We create a pool with its own allocator to be used for + * processing a request. This is the only way to have the processing + * independant of its parent pool in the sense that it can work in + * another thread. Also, the new allocator needs its own mutex to + * synchronize sub-pools. + */ + apr_allocator_create(&allocator); + apr_allocator_max_free_set(allocator, ap_max_mem_free); + status = apr_pool_create_ex(&pool, parent, NULL, allocator); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, + APLOGNO(10004) "h2_session(%ld-%d): create slave pool", + master->id, slave_id); + return NULL; + } + apr_allocator_owner_set(allocator, pool); + apr_pool_tag(pool, "h2_slave_conn"); + + c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec)); + if (c == NULL) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, + APLOGNO(02913) "h2_session(%ld-%d): create slave", + master->id, slave_id); + apr_pool_destroy(pool); + return NULL; + } + + memcpy(c, master, sizeof(conn_rec)); + + c->master = master; + c->pool = pool; + c->conn_config = ap_create_conn_config(pool); + c->notes = apr_table_make(pool, 5); + c->input_filters = NULL; + c->output_filters = NULL; + c->bucket_alloc = apr_bucket_alloc_create(pool); + c->data_in_input_filters = 0; + c->data_in_output_filters = 0; + /* prevent mpm_event from making wrong assumptions about this connection, + * like e.g. using its socket for an async read check. */ + c->clogging_input_filters = 1; + c->log = NULL; + c->log_id = apr_psprintf(pool, "%ld-%d", + master->id, slave_id); + c->aborted = 0; + /* We cannot install the master connection socket on the slaves, as + * modules mess with timeouts/blocking of the socket, with + * unwanted side effects to the master connection processing. + * Fortunately, since we never use the slave socket, we can just install + * a single, process-wide dummy and everyone is happy. + */ + ap_set_module_config(c->conn_config, &core_module, dummy_socket); + /* TODO: these should be unique to this thread */ + c->sbh = master->sbh; + /* TODO: not all mpm modules have learned about slave connections yet. + * copy their config from master to slave. + */ + if ((mpm = h2_conn_mpm_module()) != NULL) { + cfg = ap_get_module_config(master->conn_config, mpm); + ap_set_module_config(c->conn_config, mpm, cfg); + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_stream(%ld-%d): created slave", master->id, slave_id); + return c; +} + +void h2_slave_destroy(conn_rec *slave) +{ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave, + "h2_stream(%s): destroy slave", + apr_table_get(slave->notes, H2_TASK_ID_NOTE)); + slave->sbh = NULL; + apr_pool_destroy(slave->pool); +} + +apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd) +{ + if (slave->keepalives == 0) { + /* Simulate that we had already a request on this connection. Some + * hooks trigger special behaviour when keepalives is 0. + * (Not necessarily in pre_connection, but later. Set it here, so it + * is in place.) */ + slave->keepalives = 1; + /* We signal that this connection will be closed after the request. + * Which is true in that sense that we throw away all traffic data + * on this slave connection after each requests. Although we might + * reuse internal structures like memory pools. + * The wanted effect of this is that httpd does not try to clean up + * any dangling data on this connection when a request is done. Which + * is unneccessary on a h2 stream. + */ + slave->keepalive = AP_CONN_CLOSE; + return ap_run_pre_connection(slave, csd); + } + return APR_SUCCESS; +} + diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h new file mode 100644 index 0000000..e45ff31 --- /dev/null +++ b/modules/http2/h2_conn.h @@ -0,0 +1,77 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_conn__ +#define __mod_h2__h2_conn__ + +struct h2_ctx; +struct h2_task; + +/** + * Setup the connection and our context for HTTP/2 processing + * + * @param ctx the http2 context to setup + * @param c the connection HTTP/2 is starting on + * @param r the upgrade request that still awaits an answer, optional + */ +apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r); + +/** + * Run the HTTP/2 connection in synchronous fashion. + * Return when the HTTP/2 session is done + * and the connection will close or a fatal error occurred. + * + * @param ctx the http2 context to run + * @return APR_SUCCESS when session is done. + */ +apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c); + +/** + * The connection is about to close. If we have not send a GOAWAY + * yet, this is the last chance. + */ +apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c); + +/* Initialize this child process for h2 connection work, + * to be called once during child init before multi processing + * starts. + */ +apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s); + + +typedef enum { + H2_MPM_UNKNOWN, + H2_MPM_WORKER, + H2_MPM_EVENT, + H2_MPM_PREFORK, + H2_MPM_MOTORZ, + H2_MPM_SIMPLE, + H2_MPM_NETWARE, + H2_MPM_WINNT, +} h2_mpm_type_t; + +/* Returns the type of MPM module detected */ +h2_mpm_type_t h2_conn_mpm_type(void); +const char *h2_conn_mpm_name(void); +int h2_mpm_supported(void); + +conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent); +void h2_slave_destroy(conn_rec *slave); + +apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd); +void h2_slave_run_connection(conn_rec *slave); + +#endif /* defined(__mod_h2__h2_conn__) */ diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c new file mode 100644 index 0000000..eb6ec92 --- /dev/null +++ b/modules/http2/h2_conn_io.c @@ -0,0 +1,389 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <apr_strings.h> +#include <ap_mpm.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> +#include <http_connection.h> +#include <http_request.h> + +#include "h2_private.h" +#include "h2_bucket_eos.h" +#include "h2_config.h" +#include "h2_conn_io.h" +#include "h2_h2.h" +#include "h2_session.h" +#include "h2_util.h" + +#define TLS_DATA_MAX (16*1024) + +/* Calculated like this: assuming MTU 1500 bytes + * 1500 - 40 (IP) - 20 (TCP) - 40 (TCP options) + * - TLS overhead (60-100) + * ~= 1300 bytes */ +#define WRITE_SIZE_INITIAL 1300 + +/* Calculated like this: max TLS record size 16*1024 + * - 40 (IP) - 20 (TCP) - 40 (TCP options) + * - TLS overhead (60-100) + * which seems to create less TCP packets overall + */ +#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100) + + +static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level, + const char *tag, apr_bucket_brigade *bb) +{ + char buffer[16 * 1024]; + const char *line = "(null)"; + apr_size_t bmax = sizeof(buffer)/sizeof(buffer[0]); + int off = 0; + apr_bucket *b; + + if (bb) { + memset(buffer, 0, bmax--); + for (b = APR_BRIGADE_FIRST(bb); + bmax && (b != APR_BRIGADE_SENTINEL(bb)); + b = APR_BUCKET_NEXT(b)) { + + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + off += apr_snprintf(buffer+off, bmax-off, "eos "); + } + else if (APR_BUCKET_IS_FLUSH(b)) { + off += apr_snprintf(buffer+off, bmax-off, "flush "); + } + else if (AP_BUCKET_IS_EOR(b)) { + off += apr_snprintf(buffer+off, bmax-off, "eor "); + } + else if (H2_BUCKET_IS_H2EOS(b)) { + off += apr_snprintf(buffer+off, bmax-off, "h2eos "); + } + else { + off += apr_snprintf(buffer+off, bmax-off, "meta(unknown) "); + } + } + else { + const char *btype = "data"; + if (APR_BUCKET_IS_FILE(b)) { + btype = "file"; + } + else if (APR_BUCKET_IS_PIPE(b)) { + btype = "pipe"; + } + else if (APR_BUCKET_IS_SOCKET(b)) { + btype = "socket"; + } + else if (APR_BUCKET_IS_HEAP(b)) { + btype = "heap"; + } + else if (APR_BUCKET_IS_TRANSIENT(b)) { + btype = "transient"; + } + else if (APR_BUCKET_IS_IMMORTAL(b)) { + btype = "immortal"; + } +#if APR_HAS_MMAP + else if (APR_BUCKET_IS_MMAP(b)) { + btype = "mmap"; + } +#endif + else if (APR_BUCKET_IS_POOL(b)) { + btype = "pool"; + } + + off += apr_snprintf(buffer+off, bmax-off, "%s[%ld] ", + btype, + (long)(b->length == ((apr_size_t)-1)? + -1 : b->length)); + } + } + line = *buffer? buffer : "(empty)"; + } + /* Intentional no APLOGNO */ + ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%ld)-%s: %s", + c->id, tag, line); + +} + +apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, + const h2_config *cfg) +{ + io->c = c; + io->output = apr_brigade_create(c->pool, c->bucket_alloc); + io->is_tls = h2_h2_is_tls(c); + io->buffer_output = io->is_tls; + io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM); + + if (io->is_tls) { + /* This is what we start with, + * see https://issues.apache.org/jira/browse/TS-2503 + */ + io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE); + io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS) + * APR_USEC_PER_SEC); + io->write_size = (io->cooldown_usecs > 0? + WRITE_SIZE_INITIAL : WRITE_SIZE_MAX); + } + else { + io->warmup_size = 0; + io->cooldown_usecs = 0; + io->write_size = 0; + } + + if (APLOGctrace1(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, + "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, " + "cd_secs=%f", io->c->id, io->buffer_output, + (long)io->warmup_size, + ((float)io->cooldown_usecs/APR_USEC_PER_SEC)); + } + + return APR_SUCCESS; +} + +static void append_scratch(h2_conn_io *io) +{ + if (io->scratch && io->slen > 0) { + apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen, + apr_bucket_free, + io->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(io->output, b); + io->scratch = NULL; + io->slen = io->ssize = 0; + } +} + +static apr_size_t assure_scratch_space(h2_conn_io *io) { + apr_size_t remain = io->ssize - io->slen; + if (io->scratch && remain == 0) { + append_scratch(io); + } + if (!io->scratch) { + /* we control the size and it is larger than what buckets usually + * allocate. */ + io->scratch = apr_bucket_alloc(io->write_size, io->c->bucket_alloc); + io->ssize = io->write_size; + io->slen = 0; + remain = io->ssize; + } + return remain; +} + +static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b) +{ + apr_status_t status; + const char *data; + apr_size_t len; + + if (!b->length) { + return APR_SUCCESS; + } + + ap_assert(b->length <= (io->ssize - io->slen)); + if (APR_BUCKET_IS_FILE(b)) { + apr_bucket_file *f = (apr_bucket_file *)b->data; + apr_file_t *fd = f->fd; + apr_off_t offset = b->start; + apr_size_t len = b->length; + + /* file buckets will either mmap (which we do not want) or + * read 8000 byte chunks and split themself. However, we do + * know *exactly* how many bytes we need where. + */ + status = apr_file_seek(fd, APR_SET, &offset); + if (status != APR_SUCCESS) { + return status; + } + status = apr_file_read(fd, io->scratch + io->slen, &len); + if (status != APR_SUCCESS && status != APR_EOF) { + return status; + } + io->slen += len; + } + else { + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (status == APR_SUCCESS) { + memcpy(io->scratch+io->slen, data, len); + io->slen += len; + } + } + return status; +} + +static void check_write_size(h2_conn_io *io) +{ + if (io->write_size > WRITE_SIZE_INITIAL + && (io->cooldown_usecs > 0) + && (apr_time_now() - io->last_write) >= io->cooldown_usecs) { + /* long time not written, reset write size */ + io->write_size = WRITE_SIZE_INITIAL; + io->bytes_written = 0; + } + else if (io->write_size < WRITE_SIZE_MAX + && io->bytes_written >= io->warmup_size) { + /* connection is hot, use max size */ + io->write_size = WRITE_SIZE_MAX; + } +} + +static apr_status_t pass_output(h2_conn_io *io, int flush) +{ + conn_rec *c = io->c; + apr_bucket_brigade *bb = io->output; + apr_bucket *b; + apr_off_t bblen; + apr_status_t status; + + append_scratch(io); + if (flush && !io->is_flushed) { + b = apr_bucket_flush_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + } + + if (APR_BRIGADE_EMPTY(bb)) { + return APR_SUCCESS; + } + + ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL); + apr_brigade_length(bb, 0, &bblen); + h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb); + + status = ap_pass_brigade(c->output_filters, bb); + if (status == APR_SUCCESS) { + io->bytes_written += (apr_size_t)bblen; + io->last_write = apr_time_now(); + if (flush) { + io->is_flushed = 1; + } + } + apr_brigade_cleanup(bb); + + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044) + "h2_conn_io(%ld): pass_out brigade %ld bytes", + c->id, (long)bblen); + } + return status; +} + +int h2_conn_io_needs_flush(h2_conn_io *io) +{ + if (!io->is_flushed) { + apr_off_t len = h2_brigade_mem_size(io->output); + if (len > io->flush_threshold) { + return 1; + } + /* if we do not exceed flush length due to memory limits, + * we want at least flush when we have that amount of data. */ + apr_brigade_length(io->output, 0, &len); + return len > (4 * io->flush_threshold); + } + return 0; +} + +apr_status_t h2_conn_io_flush(h2_conn_io *io) +{ + apr_status_t status; + status = pass_output(io, 1); + check_write_size(io); + return status; +} + +apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length) +{ + apr_status_t status = APR_SUCCESS; + apr_size_t remain; + + if (length > 0) { + io->is_flushed = 0; + } + + if (io->buffer_output) { + while (length > 0) { + remain = assure_scratch_space(io); + if (remain >= length) { + memcpy(io->scratch + io->slen, data, length); + io->slen += length; + length = 0; + } + else { + memcpy(io->scratch + io->slen, data, remain); + io->slen += remain; + data += remain; + length -= remain; + } + } + } + else { + status = apr_brigade_write(io->output, NULL, NULL, data, length); + } + return status; +} + +apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb) +{ + apr_bucket *b; + apr_status_t status = APR_SUCCESS; + + if (!APR_BRIGADE_EMPTY(bb)) { + io->is_flushed = 0; + } + + while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) { + b = APR_BRIGADE_FIRST(bb); + + if (APR_BUCKET_IS_METADATA(b)) { + /* need to finish any open scratch bucket, as meta data + * needs to be forward "in order". */ + append_scratch(io); + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(io->output, b); + } + else if (io->buffer_output) { + apr_size_t remain = assure_scratch_space(io); + if (b->length > remain) { + apr_bucket_split(b, remain); + if (io->slen == 0) { + /* complete write_size bucket, append unchanged */ + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(io->output, b); + continue; + } + } + else { + /* bucket fits in remain, copy to scratch */ + status = read_to_scratch(io, b); + apr_bucket_delete(b); + continue; + } + } + else { + /* no buffering, forward buckets setaside on flush */ + if (APR_BUCKET_IS_TRANSIENT(b)) { + apr_bucket_setaside(b, io->c->pool); + } + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(io->output, b); + } + } + return status; +} + diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h new file mode 100644 index 0000000..2c3be1c --- /dev/null +++ b/modules/http2/h2_conn_io.h @@ -0,0 +1,77 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_conn_io__ +#define __mod_h2__h2_conn_io__ + +struct h2_config; +struct h2_session; + +/* h2_io is the basic handler of a httpd connection. It keeps two brigades, + * one for input, one for output and works with the installed connection + * filters. + * The read is done via a callback function, so that input can be processed + * directly without copying. + */ +typedef struct { + conn_rec *c; + apr_bucket_brigade *output; + + int is_tls; + apr_time_t cooldown_usecs; + apr_int64_t warmup_size; + + apr_size_t write_size; + apr_time_t last_write; + apr_int64_t bytes_read; + apr_int64_t bytes_written; + + int buffer_output; + apr_size_t flush_threshold; + unsigned int is_flushed : 1; + + char *scratch; + apr_size_t ssize; + apr_size_t slen; +} h2_conn_io; + +apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, + const struct h2_config *cfg); + +/** + * Append data to the buffered output. + * @param buf the data to append + * @param length the length of the data to append + */ +apr_status_t h2_conn_io_write(h2_conn_io *io, + const char *buf, + size_t length); + +apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb); + +/** + * Pass any buffered data on to the connection output filters. + * @param io the connection io + * @param flush if a flush bucket should be appended to any output + */ +apr_status_t h2_conn_io_flush(h2_conn_io *io); + +/** + * Check if the buffered amount of data needs flushing. + */ +int h2_conn_io_needs_flush(h2_conn_io *io); + +#endif /* defined(__mod_h2__h2_conn_io__) */ diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c new file mode 100644 index 0000000..d5ccc24 --- /dev/null +++ b/modules/http2/h2_ctx.c @@ -0,0 +1,121 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_config.h> + +#include "h2_private.h" +#include "h2_session.h" +#include "h2_task.h" +#include "h2_ctx.h" + +static h2_ctx *h2_ctx_create(const conn_rec *c) +{ + h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx)); + ap_assert(ctx); + ap_set_module_config(c->conn_config, &http2_module, ctx); + h2_ctx_server_set(ctx, c->base_server); + return ctx; +} + +void h2_ctx_clear(const conn_rec *c) +{ + ap_assert(c); + ap_set_module_config(c->conn_config, &http2_module, NULL); +} + +h2_ctx *h2_ctx_create_for(const conn_rec *c, h2_task *task) +{ + h2_ctx *ctx = h2_ctx_create(c); + if (ctx) { + ctx->task = task; + } + return ctx; +} + +h2_ctx *h2_ctx_get(const conn_rec *c, int create) +{ + h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module); + if (ctx == NULL && create) { + ctx = h2_ctx_create(c); + } + return ctx; +} + +h2_ctx *h2_ctx_rget(const request_rec *r) +{ + return h2_ctx_get(r->connection, 0); +} + +const char *h2_ctx_protocol_get(const conn_rec *c) +{ + h2_ctx *ctx; + if (c->master) { + c = c->master; + } + ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module); + return ctx? ctx->protocol : NULL; +} + +h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto) +{ + ctx->protocol = proto; + return ctx; +} + +h2_session *h2_ctx_session_get(h2_ctx *ctx) +{ + return ctx? ctx->session : NULL; +} + +void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session) +{ + ctx->session = session; +} + +server_rec *h2_ctx_server_get(h2_ctx *ctx) +{ + return ctx? ctx->server : NULL; +} + +h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s) +{ + ctx->server = s; + return ctx; +} + +int h2_ctx_is_task(h2_ctx *ctx) +{ + return ctx && ctx->task; +} + +h2_task *h2_ctx_get_task(h2_ctx *ctx) +{ + return ctx? ctx->task : NULL; +} + +h2_task *h2_ctx_cget_task(conn_rec *c) +{ + return h2_ctx_get_task(h2_ctx_get(c, 0)); +} + +h2_task *h2_ctx_rget_task(request_rec *r) +{ + return h2_ctx_get_task(h2_ctx_rget(r)); +} diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h new file mode 100644 index 0000000..cb111c9 --- /dev/null +++ b/modules/http2/h2_ctx.h @@ -0,0 +1,78 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_ctx__ +#define __mod_h2__h2_ctx__ + +struct h2_session; +struct h2_task; +struct h2_config; + +/** + * The h2 module context associated with a connection. + * + * It keeps track of the different types of connections: + * - those from clients that use HTTP/2 protocol + * - those from clients that do not use HTTP/2 + * - those created by ourself to perform work on HTTP/2 streams + */ +typedef struct h2_ctx { + const char *protocol; /* the protocol negotiated */ + struct h2_session *session; /* the session established */ + struct h2_task *task; /* the h2_task executing or NULL */ + const char *hostname; /* hostname negotiated via SNI, optional */ + server_rec *server; /* httpd server config selected. */ + const struct h2_config *config; /* effective config in this context */ +} h2_ctx; + +/** + * Get (or create) a h2 context record for this connection. + * @param c the connection to look at + * @param create != 0 iff missing context shall be created + * @return h2 context of this connection + */ +h2_ctx *h2_ctx_get(const conn_rec *c, int create); +void h2_ctx_clear(const conn_rec *c); + +h2_ctx *h2_ctx_rget(const request_rec *r); +h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task); + + +/* Set the h2 protocol established on this connection context or + * NULL when other protocols are in place. + */ +h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto); + +/* Set the server_rec relevant for this context. + */ +h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s); +server_rec *h2_ctx_server_get(h2_ctx *ctx); + +struct h2_session *h2_ctx_session_get(h2_ctx *ctx); +void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session); + +/** + * Get the h2 protocol negotiated for this connection, or NULL. + */ +const char *h2_ctx_protocol_get(const conn_rec *c); + +int h2_ctx_is_task(h2_ctx *ctx); + +struct h2_task *h2_ctx_get_task(h2_ctx *ctx); +struct h2_task *h2_ctx_cget_task(conn_rec *c); +struct h2_task *h2_ctx_rget_task(request_rec *r); + +#endif /* defined(__mod_h2__h2_ctx__) */ diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c new file mode 100644 index 0000000..8b254b1 --- /dev/null +++ b/modules/http2/h2_filter.c @@ -0,0 +1,568 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> + +#include <apr_strings.h> +#include <httpd.h> +#include <http_core.h> +#include <http_protocol.h> +#include <http_log.h> +#include <http_connection.h> +#include <scoreboard.h> + +#include "h2_private.h" +#include "h2.h" +#include "h2_config.h" +#include "h2_conn_io.h" +#include "h2_ctx.h" +#include "h2_mplx.h" +#include "h2_push.h" +#include "h2_task.h" +#include "h2_stream.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_stream.h" +#include "h2_session.h" +#include "h2_util.h" +#include "h2_version.h" + +#include "h2_filter.h" + +#define UNSET -1 +#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) + +static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin, + apr_bucket *b, apr_read_type_e block) +{ + h2_session *session = cin->session; + apr_status_t status = APR_SUCCESS; + apr_size_t len; + const char *data; + ssize_t n; + + status = apr_bucket_read(b, &data, &len, block); + + while (status == APR_SUCCESS && len > 0) { + n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"), + (long)len, (long)n); + if (n < 0) { + if (nghttp2_is_fatal((int)n)) { + h2_session_event(session, H2_SESSION_EV_PROTO_ERROR, + (int)n, nghttp2_strerror((int)n)); + status = APR_EGENERAL; + } + } + else { + session->io.bytes_read += n; + if (len <= n) { + break; + } + len -= n; + data += n; + } + } + + return status; +} + +static apr_status_t recv_RAW_brigade(conn_rec *c, h2_filter_cin *cin, + apr_bucket_brigade *bb, + apr_read_type_e block) +{ + apr_status_t status = APR_SUCCESS; + apr_bucket* b; + int consumed = 0; + + h2_util_bb_log(c, c->id, APLOG_TRACE2, "RAW_in", bb); + while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) { + b = APR_BRIGADE_FIRST(bb); + + if (APR_BUCKET_IS_METADATA(b)) { + /* nop */ + } + else { + status = recv_RAW_DATA(c, cin, b, block); + } + consumed = 1; + apr_bucket_delete(b); + } + + if (!consumed && status == APR_SUCCESS && block == APR_NONBLOCK_READ) { + return APR_EAGAIN; + } + return status; +} + +h2_filter_cin *h2_filter_cin_create(h2_session *session) +{ + h2_filter_cin *cin; + + cin = apr_pcalloc(session->pool, sizeof(*cin)); + if (!cin) { + return NULL; + } + cin->session = session; + return cin; +} + +void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout) +{ + cin->timeout = timeout; +} + +apr_status_t h2_filter_core_input(ap_filter_t* f, + apr_bucket_brigade* brigade, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_filter_cin *cin = f->ctx; + apr_status_t status = APR_SUCCESS; + apr_interval_time_t saved_timeout = UNSET; + const int trace1 = APLOGctrace1(f->c); + + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_session(%ld): read, %s, mode=%d, readbytes=%ld", + (long)f->c->id, (block == APR_BLOCK_READ)? + "BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes); + } + + if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) { + return ap_get_brigade(f->next, brigade, mode, block, readbytes); + } + + if (mode != AP_MODE_READBYTES) { + return (block == APR_BLOCK_READ)? APR_SUCCESS : APR_EAGAIN; + } + + if (!cin->bb) { + cin->bb = apr_brigade_create(cin->session->pool, f->c->bucket_alloc); + } + + if (!cin->socket) { + cin->socket = ap_get_conn_socket(f->c); + } + + if (APR_BRIGADE_EMPTY(cin->bb)) { + /* We only do a blocking read when we have no streams to process. So, + * in httpd scoreboard lingo, we are in a KEEPALIVE connection state. + */ + if (block == APR_BLOCK_READ) { + if (cin->timeout > 0) { + apr_socket_timeout_get(cin->socket, &saved_timeout); + apr_socket_timeout_set(cin->socket, cin->timeout); + } + } + status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES, + block, readbytes); + if (saved_timeout != UNSET) { + apr_socket_timeout_set(cin->socket, saved_timeout); + } + } + + switch (status) { + case APR_SUCCESS: + status = recv_RAW_brigade(f->c, cin, cin->bb, block); + break; + case APR_EOF: + case APR_EAGAIN: + case APR_TIMEUP: + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "h2_session(%ld): read", f->c->id); + } + break; + default: + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046) + "h2_session(%ld): error reading", f->c->id); + break; + } + return status; +} + +/******************************************************************************* + * http2 connection status handler + stream out source + ******************************************************************************/ + +typedef struct { + apr_bucket_refcount refcount; + h2_bucket_event_cb *cb; + void *ctx; +} h2_bucket_observer; + +static apr_status_t bucket_read(apr_bucket *b, const char **str, + apr_size_t *len, apr_read_type_e block) +{ + (void)b; + (void)block; + *str = NULL; + *len = 0; + return APR_SUCCESS; +} + +static void bucket_destroy(void *data) +{ + h2_bucket_observer *h = data; + if (apr_bucket_shared_destroy(h)) { + if (h->cb) { + h->cb(h->ctx, H2_BUCKET_EV_BEFORE_DESTROY, NULL); + } + apr_bucket_free(h); + } +} + +apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb, + void *ctx) +{ + h2_bucket_observer *br; + + br = apr_bucket_alloc(sizeof(*br), b->list); + br->cb = cb; + br->ctx = ctx; + + b = apr_bucket_shared_make(b, br, 0, 0); + b->type = &h2_bucket_type_observer; + return b; +} + +apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list, + h2_bucket_event_cb *cb, void *ctx) +{ + apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); + + APR_BUCKET_INIT(b); + b->free = apr_bucket_free; + b->list = list; + b = h2_bucket_observer_make(b, cb, ctx); + return b; +} + +apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event) +{ + if (H2_BUCKET_IS_OBSERVER(b)) { + h2_bucket_observer *l = (h2_bucket_observer *)b->data; + return l->cb(l->ctx, event, b); + } + return APR_EINVAL; +} + +const apr_bucket_type_t h2_bucket_type_observer = { + "H2OBS", 5, APR_BUCKET_METADATA, + bucket_destroy, + bucket_read, + apr_bucket_setaside_noop, + apr_bucket_split_notimpl, + apr_bucket_shared_copy +}; + +apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src) +{ + if (H2_BUCKET_IS_OBSERVER(src)) { + h2_bucket_observer *l = (h2_bucket_observer *)src->data; + apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc, + l->cb, l->ctx); + APR_BRIGADE_INSERT_TAIL(dest, b); + l->cb = NULL; + l->ctx = NULL; + h2_bucket_observer_fire(b, H2_BUCKET_EV_BEFORE_MASTER_SEND); + return b; + } + return NULL; +} + +static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...) + __attribute__((format(printf,2,3))); +static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...) +{ + va_list args; + apr_status_t rv; + + va_start(args, fmt); + rv = apr_brigade_vprintf(bb, NULL, NULL, fmt, args); + va_end(args); + + return rv; +} + +static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last) +{ + h2_mplx *m = s->mplx; + + bbout(bb, " \"settings\": {\n"); + bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams); + bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024); + bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", + h2_config_geti(s->config, H2_CONF_WIN_SIZE)); + bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s)); + bbout(bb, " }%s\n", last? "" : ","); +} + +static void add_peer_settings(apr_bucket_brigade *bb, h2_session *s, int last) +{ + bbout(bb, " \"peerSettings\": {\n"); + bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", + nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS)); + bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", + nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_FRAME_SIZE)); + bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", + nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE)); + bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d,\n", + nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_ENABLE_PUSH)); + bbout(bb, " \"SETTINGS_HEADER_TABLE_SIZE\": %d,\n", + nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_HEADER_TABLE_SIZE)); + bbout(bb, " \"SETTINGS_MAX_HEADER_LIST_SIZE\": %d\n", + nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE)); + bbout(bb, " }%s\n", last? "" : ","); +} + +typedef struct { + apr_bucket_brigade *bb; + h2_session *s; + int idx; +} stream_ctx_t; + +static int add_stream(h2_stream *stream, void *ctx) +{ + stream_ctx_t *x = ctx; + int32_t flowIn, flowOut; + + flowIn = nghttp2_session_get_stream_effective_local_window_size(x->s->ngh2, stream->id); + flowOut = nghttp2_session_get_stream_remote_window_size(x->s->ngh2, stream->id); + bbout(x->bb, "%s\n \"%d\": {\n", (x->idx? "," : ""), stream->id); + bbout(x->bb, " \"state\": \"%s\",\n", h2_stream_state_str(stream)); + bbout(x->bb, " \"created\": %f,\n", ((double)stream->created)/APR_USEC_PER_SEC); + bbout(x->bb, " \"flowIn\": %d,\n", flowIn); + bbout(x->bb, " \"flowOut\": %d,\n", flowOut); + bbout(x->bb, " \"dataIn\": %"APR_OFF_T_FMT",\n", stream->in_data_octets); + bbout(x->bb, " \"dataOut\": %"APR_OFF_T_FMT"\n", stream->out_data_octets); + bbout(x->bb, " }"); + + ++x->idx; + return 1; +} + +static void add_streams(apr_bucket_brigade *bb, h2_session *s, int last) +{ + stream_ctx_t x; + + x.bb = bb; + x.s = s; + x.idx = 0; + bbout(bb, " \"streams\": {"); + h2_mplx_stream_do(s->mplx, add_stream, &x); + bbout(bb, "\n }%s\n", last? "" : ","); +} + +static void add_push(apr_bucket_brigade *bb, h2_session *s, + h2_stream *stream, int last) +{ + h2_push_diary *diary; + apr_status_t status; + + bbout(bb, " \"push\": {\n"); + diary = s->push_diary; + if (diary) { + const char *data; + const char *base64_digest; + apr_size_t len; + + status = h2_push_diary_digest_get(diary, bb->p, 256, + stream->request->authority, + &data, &len); + if (status == APR_SUCCESS) { + base64_digest = h2_util_base64url_encode(data, len, bb->p); + bbout(bb, " \"cacheDigest\": \"%s\",\n", base64_digest); + } + } + bbout(bb, " \"promises\": %d,\n", s->pushes_promised); + bbout(bb, " \"submits\": %d,\n", s->pushes_submitted); + bbout(bb, " \"resets\": %d\n", s->pushes_reset); + bbout(bb, " }%s\n", last? "" : ","); +} + +static void add_in(apr_bucket_brigade *bb, h2_session *s, int last) +{ + bbout(bb, " \"in\": {\n"); + bbout(bb, " \"requests\": %d,\n", s->remote.emitted_count); + bbout(bb, " \"resets\": %d, \n", s->streams_reset); + bbout(bb, " \"frames\": %ld,\n", (long)s->frames_received); + bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_read); + bbout(bb, " }%s\n", last? "" : ","); +} + +static void add_out(apr_bucket_brigade *bb, h2_session *s, int last) +{ + bbout(bb, " \"out\": {\n"); + bbout(bb, " \"responses\": %d,\n", s->responses_submitted); + bbout(bb, " \"frames\": %ld,\n", (long)s->frames_sent); + bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_written); + bbout(bb, " }%s\n", last? "" : ","); +} + +static void add_stats(apr_bucket_brigade *bb, h2_session *s, + h2_stream *stream, int last) +{ + bbout(bb, " \"stats\": {\n"); + add_in(bb, s, 0); + add_out(bb, s, 0); + add_push(bb, s, stream, 1); + bbout(bb, " }%s\n", last? "" : ","); +} + +static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b) +{ + conn_rec *c = task->c->master; + h2_ctx *h2ctx = h2_ctx_get(c, 0); + h2_session *session; + h2_stream *stream; + apr_bucket_brigade *bb; + apr_bucket *e; + int32_t connFlowIn, connFlowOut; + + + if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) { + return APR_SUCCESS; + } + + stream = h2_session_stream_get(session, task->stream_id); + if (!stream) { + /* stream already done */ + return APR_SUCCESS; + } + + bb = apr_brigade_create(stream->pool, c->bucket_alloc); + + connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2); + connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2); + + bbout(bb, "{\n"); + bbout(bb, " \"version\": \"draft-01\",\n"); + add_settings(bb, session, 0); + add_peer_settings(bb, session, 0); + bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn); + bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut); + bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown); + + add_streams(bb, session, 0); + + add_stats(bb, session, stream, 1); + bbout(bb, "}\n"); + + while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) { + APR_BUCKET_REMOVE(e); + APR_BUCKET_INSERT_AFTER(b, e); + b = e; + } + apr_brigade_destroy(bb); + + return APR_SUCCESS; +} + +static apr_status_t status_event(void *ctx, h2_bucket_event event, + apr_bucket *b) +{ + h2_task *task = ctx; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, task->c->master, + "status_event(%s): %d", task->id, event); + switch (event) { + case H2_BUCKET_EV_BEFORE_MASTER_SEND: + h2_status_insert(task, b); + break; + default: + break; + } + return APR_SUCCESS; +} + +int h2_filter_h2_status_handler(request_rec *r) +{ + h2_ctx *ctx = h2_ctx_rget(r); + conn_rec *c = r->connection; + h2_task *task; + apr_bucket_brigade *bb; + apr_bucket *b; + apr_status_t status; + + if (strcmp(r->handler, "http2-status")) { + return DECLINED; + } + if (r->method_number != M_GET && r->method_number != M_POST) { + return DECLINED; + } + + task = ctx? h2_ctx_get_task(ctx) : NULL; + if (task) { + + if ((status = ap_discard_request_body(r)) != OK) { + return status; + } + + /* We need to handle the actual output on the main thread, as + * we need to access h2_session information. */ + r->status = 200; + r->clength = -1; + r->chunked = 1; + apr_table_unset(r->headers_out, "Content-Length"); + /* Discourage content-encodings */ + apr_table_unset(r->headers_out, "Content-Encoding"); + apr_table_setn(r->subprocess_env, "no-brotli", "1"); + apr_table_setn(r->subprocess_env, "no-gzip", "1"); + + ap_set_content_type(r, "application/json"); + apr_table_setn(r->notes, H2_FILTER_DEBUG_NOTE, "on"); + + bb = apr_brigade_create(r->pool, c->bucket_alloc); + b = h2_bucket_observer_create(c->bucket_alloc, status_event, task); + APR_BRIGADE_INSERT_TAIL(bb, b); + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "status_handler(%s): checking for incoming trailers", + task->id); + if (r->trailers_in && !apr_is_empty_table(r->trailers_in)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "status_handler(%s): seeing incoming trailers", + task->id); + apr_table_setn(r->trailers_out, "h2-trailers-in", + apr_itoa(r->pool, 1)); + } + + status = ap_pass_brigade(r->output_filters, bb); + if (status == APR_SUCCESS + || r->status != HTTP_OK + || c->aborted) { + return OK; + } + else { + /* no way to know what type of error occurred */ + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, status, r, + "status_handler(%s): ap_pass_brigade failed", + task->id); + return AP_FILTER_ERROR; + } + } + return DECLINED; +} + diff --git a/modules/http2/h2_filter.h b/modules/http2/h2_filter.h new file mode 100644 index 0000000..12810d8 --- /dev/null +++ b/modules/http2/h2_filter.h @@ -0,0 +1,73 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_filter__ +#define __mod_h2__h2_filter__ + +struct h2_bucket_beam; +struct h2_headers; +struct h2_stream; +struct h2_session; + +typedef struct h2_filter_cin { + apr_pool_t *pool; + apr_socket_t *socket; + apr_interval_time_t timeout; + apr_bucket_brigade *bb; + struct h2_session *session; + apr_bucket *cur; +} h2_filter_cin; + +h2_filter_cin *h2_filter_cin_create(struct h2_session *session); + +void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout); + +apr_status_t h2_filter_core_input(ap_filter_t* filter, + apr_bucket_brigade* brigade, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes); + +/******* observer bucket ******************************************************/ + +typedef enum { + H2_BUCKET_EV_BEFORE_DESTROY, + H2_BUCKET_EV_BEFORE_MASTER_SEND +} h2_bucket_event; + +extern const apr_bucket_type_t h2_bucket_type_observer; + +typedef apr_status_t h2_bucket_event_cb(void *ctx, h2_bucket_event event, apr_bucket *b); + +#define H2_BUCKET_IS_OBSERVER(e) (e->type == &h2_bucket_type_observer) + +apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb, + void *ctx); + +apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list, + h2_bucket_event_cb *cb, void *ctx); + +apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event); + +apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src); + +/******* /.well-known/h2/state handler ****************************************/ + +int h2_filter_h2_status_handler(request_rec *r); + +#endif /* __mod_h2__h2_filter__ */ diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c new file mode 100644 index 0000000..d69c53c --- /dev/null +++ b/modules/http2/h2_from_h1.c @@ -0,0 +1,875 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stdio.h> + +#include <apr_date.h> +#include <apr_lib.h> +#include <apr_strings.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_request.h> +#include <util_time.h> + +#include "h2_private.h" +#include "h2_headers.h" +#include "h2_from_h1.h" +#include "h2_task.h" +#include "h2_util.h" + + +/* This routine is called by apr_table_do and merges all instances of + * the passed field values into a single array that will be further + * processed by some later routine. Originally intended to help split + * and recombine multiple Vary fields, though it is generic to any field + * consisting of comma/space-separated tokens. + */ +static int uniq_field_values(void *d, const char *key, const char *val) +{ + apr_array_header_t *values; + char *start; + char *e; + char **strpp; + int i; + + (void)key; + values = (apr_array_header_t *)d; + + e = apr_pstrdup(values->pool, val); + + do { + /* Find a non-empty fieldname */ + + while (*e == ',' || apr_isspace(*e)) { + ++e; + } + if (*e == '\0') { + break; + } + start = e; + while (*e != '\0' && *e != ',' && !apr_isspace(*e)) { + ++e; + } + if (*e != '\0') { + *e++ = '\0'; + } + + /* Now add it to values if it isn't already represented. + * Could be replaced by a ap_array_strcasecmp() if we had one. + */ + for (i = 0, strpp = (char **) values->elts; i < values->nelts; + ++i, ++strpp) { + if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) { + break; + } + } + if (i == values->nelts) { /* if not found */ + *(char **)apr_array_push(values) = start; + } + } while (*e != '\0'); + + return 1; +} + +/* + * Since some clients choke violently on multiple Vary fields, or + * Vary fields with duplicate tokens, combine any multiples and remove + * any duplicates. + */ +static void fix_vary(request_rec *r) +{ + apr_array_header_t *varies; + + varies = apr_array_make(r->pool, 5, sizeof(char *)); + + /* Extract all Vary fields from the headers_out, separate each into + * its comma-separated fieldname values, and then add them to varies + * if not already present in the array. + */ + apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL); + + /* If we found any, replace old Vary fields with unique-ified value */ + + if (varies->nelts > 0) { + apr_table_setn(r->headers_out, "Vary", + apr_array_pstrcat(r->pool, varies, ',')); + } +} + +static void set_basic_http_header(apr_table_t *headers, request_rec *r, + apr_pool_t *pool) +{ + char *date = NULL; + const char *proxy_date = NULL; + const char *server = NULL; + const char *us = ap_get_server_banner(); + + /* + * keep the set-by-proxy server and date headers, otherwise + * generate a new server header / date header + */ + if (r && r->proxyreq != PROXYREQ_NONE) { + proxy_date = apr_table_get(r->headers_out, "Date"); + if (!proxy_date) { + /* + * proxy_date needs to be const. So use date for the creation of + * our own Date header and pass it over to proxy_date later to + * avoid a compiler warning. + */ + date = apr_palloc(pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r->request_time); + } + server = apr_table_get(r->headers_out, "Server"); + } + else { + date = apr_palloc(pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r? r->request_time : apr_time_now()); + } + + apr_table_setn(headers, "Date", proxy_date ? proxy_date : date ); + if (r) { + apr_table_unset(r->headers_out, "Date"); + } + + if (!server && *us) { + server = us; + } + if (server) { + apr_table_setn(headers, "Server", server); + if (r) { + apr_table_unset(r->headers_out, "Server"); + } + } +} + +static int copy_header(void *ctx, const char *name, const char *value) +{ + apr_table_t *headers = ctx; + + apr_table_add(headers, name, value); + return 1; +} + +static h2_headers *create_response(h2_task *task, request_rec *r) +{ + const char *clheader; + const char *ctype; + apr_table_t *headers; + /* + * Now that we are ready to send a response, we need to combine the two + * header field tables into a single table. If we don't do this, our + * later attempts to set or unset a given fieldname might be bypassed. + */ + if (!apr_is_empty_table(r->err_headers_out)) { + r->headers_out = apr_table_overlay(r->pool, r->err_headers_out, + r->headers_out); + apr_table_clear(r->err_headers_out); + } + + /* + * Remove the 'Vary' header field if the client can't handle it. + * Since this will have nasty effects on HTTP/1.1 caches, force + * the response into HTTP/1.0 mode. + */ + if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) { + apr_table_unset(r->headers_out, "Vary"); + r->proto_num = HTTP_VERSION(1,0); + apr_table_setn(r->subprocess_env, "force-response-1.0", "1"); + } + else { + fix_vary(r); + } + + /* + * Now remove any ETag response header field if earlier processing + * says so (such as a 'FileETag None' directive). + */ + if (apr_table_get(r->notes, "no-etag") != NULL) { + apr_table_unset(r->headers_out, "ETag"); + } + + /* determine the protocol and whether we should use keepalives. */ + ap_set_keepalive(r); + + if (AP_STATUS_IS_HEADER_ONLY(r->status)) { + apr_table_unset(r->headers_out, "Transfer-Encoding"); + apr_table_unset(r->headers_out, "Content-Length"); + r->content_type = r->content_encoding = NULL; + r->content_languages = NULL; + r->clength = r->chunked = 0; + } + else if (r->chunked) { + apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked"); + apr_table_unset(r->headers_out, "Content-Length"); + } + + ctype = ap_make_content_type(r, r->content_type); + if (ctype) { + apr_table_setn(r->headers_out, "Content-Type", ctype); + } + + if (r->content_encoding) { + apr_table_setn(r->headers_out, "Content-Encoding", + r->content_encoding); + } + + if (!apr_is_empty_array(r->content_languages)) { + unsigned int i; + char *token; + char **languages = (char **)(r->content_languages->elts); + const char *field = apr_table_get(r->headers_out, "Content-Language"); + + while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) { + for (i = 0; i < r->content_languages->nelts; ++i) { + if (!apr_strnatcasecmp(token, languages[i])) + break; + } + if (i == r->content_languages->nelts) { + *((char **) apr_array_push(r->content_languages)) = token; + } + } + + field = apr_array_pstrcat(r->pool, r->content_languages, ','); + apr_table_setn(r->headers_out, "Content-Language", field); + } + + /* + * Control cachability for non-cachable responses if not already set by + * some other part of the server configuration. + */ + if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) { + char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r->request_time); + apr_table_add(r->headers_out, "Expires", date); + } + + /* This is a hack, but I can't find anyway around it. The idea is that + * we don't want to send out 0 Content-Lengths if it is a head request. + * This happens when modules try to outsmart the server, and return + * if they see a HEAD request. Apache 1.3 handlers were supposed to + * just return in that situation, and the core handled the HEAD. In + * 2.0, if a handler returns, then the core sends an EOS bucket down + * the filter stack, and the content-length filter computes a C-L of + * zero and that gets put in the headers, and we end up sending a + * zero C-L to the client. We can't just remove the C-L filter, + * because well behaved 2.0 handlers will send their data down the stack, + * and we will compute a real C-L for the head request. RBB + */ + if (r->header_only + && (clheader = apr_table_get(r->headers_out, "Content-Length")) + && !strcmp(clheader, "0")) { + apr_table_unset(r->headers_out, "Content-Length"); + } + + headers = apr_table_make(r->pool, 10); + + set_basic_http_header(headers, r, r->pool); + if (r->status == HTTP_NOT_MODIFIED) { + apr_table_do(copy_header, headers, r->headers_out, + "ETag", + "Content-Location", + "Expires", + "Cache-Control", + "Vary", + "Warning", + "WWW-Authenticate", + "Proxy-Authenticate", + "Set-Cookie", + "Set-Cookie2", + NULL); + } + else { + apr_table_do(copy_header, headers, r->headers_out, NULL); + } + + return h2_headers_rcreate(r, r->status, headers, r->pool); +} + +typedef enum { + H2_RP_STATUS_LINE, + H2_RP_HEADER_LINE, + H2_RP_DONE +} h2_rp_state_t; + +typedef struct h2_response_parser { + h2_rp_state_t state; + h2_task *task; + int http_status; + apr_array_header_t *hlines; + apr_bucket_brigade *tmp; +} h2_response_parser; + +static apr_status_t parse_header(h2_response_parser *parser, char *line) { + const char *hline; + if (line[0] == ' ' || line[0] == '\t') { + char **plast; + /* continuation line from the header before this */ + while (line[0] == ' ' || line[0] == '\t') { + ++line; + } + + plast = apr_array_pop(parser->hlines); + if (plast == NULL) { + /* not well formed */ + return APR_EINVAL; + } + hline = apr_psprintf(parser->task->pool, "%s %s", *plast, line); + } + else { + /* new header line */ + hline = apr_pstrdup(parser->task->pool, line); + } + APR_ARRAY_PUSH(parser->hlines, const char*) = hline; + return APR_SUCCESS; +} + +static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb, + char *line, apr_size_t len) +{ + h2_task *task = parser->task; + apr_status_t status; + + if (!parser->tmp) { + parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc); + } + status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ, + HUGE_STRING_LEN); + if (status == APR_SUCCESS) { + --len; + status = apr_brigade_flatten(parser->tmp, line, &len); + if (status == APR_SUCCESS) { + /* we assume a non-0 containing line and remove trailing crlf. */ + line[len] = '\0'; + if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) { + len -= 2; + line[len] = '\0'; + apr_brigade_cleanup(parser->tmp); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, + "h2_task(%s): read response line: %s", + task->id, line); + } + else { + /* this does not look like a complete line yet */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, + "h2_task(%s): read response, incomplete line: %s", + task->id, line); + return APR_EAGAIN; + } + } + } + apr_brigade_cleanup(parser->tmp); + return status; +} + +static apr_table_t *make_table(h2_response_parser *parser) +{ + h2_task *task = parser->task; + apr_array_header_t *hlines = parser->hlines; + if (hlines) { + apr_table_t *headers = apr_table_make(task->pool, hlines->nelts); + int i; + + for (i = 0; i < hlines->nelts; ++i) { + char *hline = ((char **)hlines->elts)[i]; + char *sep = ap_strchr(hline, ':'); + if (!sep) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, task->c, + APLOGNO(02955) "h2_task(%s): invalid header[%d] '%s'", + task->id, i, (char*)hline); + /* not valid format, abort */ + return NULL; + } + (*sep++) = '\0'; + while (*sep == ' ' || *sep == '\t') { + ++sep; + } + + if (!h2_util_ignore_header(hline)) { + apr_table_merge(headers, hline, sep); + } + } + return headers; + } + else { + return apr_table_make(task->pool, 0); + } +} + +static apr_status_t pass_response(h2_task *task, ap_filter_t *f, + h2_response_parser *parser) +{ + apr_bucket *b; + apr_status_t status; + + h2_headers *response = h2_headers_create(parser->http_status, + make_table(parser), + NULL, 0, task->pool); + apr_brigade_cleanup(parser->tmp); + b = h2_bucket_headers_create(task->c->bucket_alloc, response); + APR_BRIGADE_INSERT_TAIL(parser->tmp, b); + b = apr_bucket_flush_create(task->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(parser->tmp, b); + status = ap_pass_brigade(f->next, parser->tmp); + apr_brigade_cleanup(parser->tmp); + + /* reset parser for possible next response */ + parser->state = H2_RP_STATUS_LINE; + apr_array_clear(parser->hlines); + + if (response->status >= 200) { + task->output.sent_response = 1; + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, + APLOGNO(03197) "h2_task(%s): passed response %d", + task->id, response->status); + return status; +} + +static apr_status_t parse_status(h2_task *task, char *line) +{ + h2_response_parser *parser = task->output.rparser; + int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 : + (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0)); + if (sindex > 0) { + int k = sindex + 3; + char keepchar = line[k]; + line[k] = '\0'; + parser->http_status = atoi(&line[sindex]); + line[k] = keepchar; + parser->state = H2_RP_HEADER_LINE; + + return APR_SUCCESS; + } + /* Seems like there is garbage on the connection. May be a leftover + * from a previous proxy request. + * This should only happen if the H2_RESPONSE filter is not yet in + * place (post_read_request has not been reached and the handler wants + * to write something. Probably just the interim response we are + * waiting for. But if there is other data hanging around before + * that, this needs to fail. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03467) + "h2_task(%s): unable to parse status line: %s", + task->id, line); + return APR_EINVAL; +} + +apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f, + apr_bucket_brigade *bb) +{ + h2_response_parser *parser = task->output.rparser; + char line[HUGE_STRING_LEN]; + apr_status_t status = APR_SUCCESS; + + if (!parser) { + parser = apr_pcalloc(task->pool, sizeof(*parser)); + parser->task = task; + parser->state = H2_RP_STATUS_LINE; + parser->hlines = apr_array_make(task->pool, 10, sizeof(char *)); + task->output.rparser = parser; + } + + while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) { + switch (parser->state) { + case H2_RP_STATUS_LINE: + case H2_RP_HEADER_LINE: + status = get_line(parser, bb, line, sizeof(line)); + if (status == APR_EAGAIN) { + /* need more data */ + return APR_SUCCESS; + } + else if (status != APR_SUCCESS) { + return status; + } + if (parser->state == H2_RP_STATUS_LINE) { + /* instead of parsing, just take it directly */ + status = parse_status(task, line); + } + else if (line[0] == '\0') { + /* end of headers, pass response onward */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_task(%s): end of response", task->id); + return pass_response(task, f, parser); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_task(%s): response header %s", task->id, line); + status = parse_header(parser, line); + } + break; + + default: + return status; + } + } + return status; +} + +apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb) +{ + h2_task *task = f->ctx; + request_rec *r = f->r; + apr_bucket *b, *bresp, *body_bucket = NULL, *next; + ap_bucket_error *eb = NULL; + h2_headers *response = NULL; + int headers_passing = 0; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_task(%s): output_filter called", task->id); + + if (!task->output.sent_response && !f->c->aborted) { + /* check, if we need to send the response now. Until we actually + * see a DATA bucket or some EOS/EOR, we do not do so. */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + if (AP_BUCKET_IS_ERROR(b) && !eb) { + eb = b->data; + } + else if (AP_BUCKET_IS_EOC(b)) { + /* If we see an EOC bucket it is a signal that we should get out + * of the way doing nothing. + */ + ap_remove_output_filter(f); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c, + "h2_task(%s): eoc bucket passed", task->id); + return ap_pass_brigade(f->next, bb); + } + else if (H2_BUCKET_IS_HEADERS(b)) { + headers_passing = 1; + } + else if (!APR_BUCKET_IS_FLUSH(b)) { + body_bucket = b; + break; + } + } + + if (eb) { + int st = eb->status; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047) + "h2_task(%s): err bucket status=%d", task->id, st); + /* throw everything away and replace it with the error response + * generated by ap_die() */ + apr_brigade_cleanup(bb); + ap_die(st, r); + return AP_FILTER_ERROR; + } + + if (body_bucket || !headers_passing) { + /* time to insert the response bucket before the body or if + * no h2_headers is passed, e.g. the response is empty */ + response = create_response(task, r); + if (response == NULL) { + ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048) + "h2_task(%s): unable to create response", task->id); + return APR_ENOMEM; + } + + bresp = h2_bucket_headers_create(f->c->bucket_alloc, response); + if (body_bucket) { + APR_BUCKET_INSERT_BEFORE(body_bucket, bresp); + } + else { + APR_BRIGADE_INSERT_HEAD(bb, bresp); + } + task->output.sent_response = 1; + r->sent_bodyct = 1; + } + } + + if (r->header_only) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_task(%s): header_only, cleanup output brigade", + task->id); + b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb); + while (b != APR_BRIGADE_SENTINEL(bb)) { + next = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) { + break; + } + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + b = next; + } + } + else if (task->output.sent_response) { + /* lets get out of the way, our task is done */ + ap_remove_output_filter(f); + } + return ap_pass_brigade(f->next, bb); +} + +static void make_chunk(h2_task *task, apr_bucket_brigade *bb, + apr_bucket *first, apr_off_t chunk_len, + apr_bucket *tail) +{ + /* Surround the buckets [first, tail[ with new buckets carrying the + * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends + * to the end of the brigade. */ + char buffer[128]; + apr_bucket *c; + int len; + + len = apr_snprintf(buffer, H2_ALEN(buffer), + "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len); + c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc); + APR_BUCKET_INSERT_BEFORE(first, c); + c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc); + if (tail) { + APR_BUCKET_INSERT_BEFORE(tail, c); + } + else { + APR_BRIGADE_INSERT_TAIL(bb, c); + } + task->input.chunked_total += chunk_len; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, + "h2_task(%s): added chunk %ld, total %ld", + task->id, (long)chunk_len, (long)task->input.chunked_total); +} + +static int ser_header(void *ctx, const char *name, const char *value) +{ + apr_bucket_brigade *bb = ctx; + apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value); + return 1; +} + +static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task, + apr_read_type_e block) { + request_rec *r = f->r; + apr_status_t status = APR_SUCCESS; + apr_bucket_brigade *bb = task->input.bbchunk; + + if (!bb) { + bb = apr_brigade_create(r->pool, f->c->bucket_alloc); + task->input.bbchunk = bb; + } + + if (APR_BRIGADE_EMPTY(bb)) { + apr_bucket *b, *next, *first_data = NULL; + apr_bucket_brigade *tmp; + apr_off_t bblen = 0; + + /* get more data from the lower layer filters. Always do this + * in larger pieces, since we handle the read modes ourself. */ + status = ap_get_brigade(f->next, bb, + AP_MODE_READBYTES, block, 32*1024); + if (status == APR_EOF) { + if (!task->input.eos) { + status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); + task->input.eos = 1; + return APR_SUCCESS; + } + ap_remove_input_filter(f); + return status; + + } + else if (status != APR_SUCCESS) { + return status; + } + + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb) && !task->input.eos; + b = next) { + next = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { + if (first_data) { + make_chunk(task, bb, first_data, bblen, b); + first_data = NULL; + } + + if (H2_BUCKET_IS_HEADERS(b)) { + h2_headers *headers = h2_bucket_headers_get(b); + + ap_assert(headers); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "h2_task(%s): receiving trailers", task->id); + tmp = apr_brigade_split_ex(bb, b, NULL); + if (!apr_is_empty_table(headers->headers)) { + status = apr_brigade_puts(bb, NULL, NULL, "0\r\n"); + apr_table_do(ser_header, bb, headers->headers, NULL); + status = apr_brigade_puts(bb, NULL, NULL, "\r\n"); + } + else { + status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); + } + r->trailers_in = apr_table_clone(r->pool, headers->headers); + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + APR_BRIGADE_CONCAT(bb, tmp); + apr_brigade_destroy(tmp); + task->input.eos = 1; + } + else if (APR_BUCKET_IS_EOS(b)) { + tmp = apr_brigade_split_ex(bb, b, NULL); + status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); + APR_BRIGADE_CONCAT(bb, tmp); + apr_brigade_destroy(tmp); + task->input.eos = 1; + } + } + else if (b->length == 0) { + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + else { + if (!first_data) { + first_data = b; + bblen = 0; + } + bblen += b->length; + } + } + + if (first_data) { + make_chunk(task, bb, first_data, bblen, NULL); + } + } + return status; +} + +apr_status_t h2_filter_request_in(ap_filter_t* f, + apr_bucket_brigade* bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_task *task = f->ctx; + request_rec *r = f->r; + apr_status_t status = APR_SUCCESS; + apr_bucket *b, *next; + core_server_config *conf = + (core_server_config *) ap_get_module_config(r->server->module_config, + &core_module); + + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r, + "h2_task(%s): request filter, exp=%d", task->id, r->expecting_100); + if (!task->request->chunked) { + status = ap_get_brigade(f->next, bb, mode, block, readbytes); + /* pipe data through, just take care of trailers */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); b = next) { + next = APR_BUCKET_NEXT(b); + if (H2_BUCKET_IS_HEADERS(b)) { + h2_headers *headers = h2_bucket_headers_get(b); + ap_assert(headers); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "h2_task(%s): receiving trailers", task->id); + r->trailers_in = headers->headers; + if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) { + r->headers_in = apr_table_overlay(r->pool, r->headers_in, + r->trailers_in); + } + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + ap_remove_input_filter(f); + + if (headers->raw_bytes && h2_task_logio_add_bytes_in) { + h2_task_logio_add_bytes_in(task->c, headers->raw_bytes); + } + break; + } + } + return status; + } + + /* Things are more complicated. The standard HTTP input filter, which + * does a lot what we do not want to duplicate, also cares about chunked + * transfer encoding and trailers. + * We need to simulate chunked encoding for it to be happy. + */ + if ((status = read_and_chunk(f, task, block)) != APR_SUCCESS) { + return status; + } + + if (mode == AP_MODE_EXHAUSTIVE) { + /* return all we have */ + APR_BRIGADE_CONCAT(bb, task->input.bbchunk); + } + else if (mode == AP_MODE_READBYTES) { + status = h2_brigade_concat_length(bb, task->input.bbchunk, readbytes); + } + else if (mode == AP_MODE_SPECULATIVE) { + status = h2_brigade_copy_length(bb, task->input.bbchunk, readbytes); + } + else if (mode == AP_MODE_GETLINE) { + /* we are reading a single LF line, e.g. the HTTP headers. + * this has the nasty side effect to split the bucket, even + * though it ends with CRLF and creates a 0 length bucket */ + status = apr_brigade_split_line(bb, task->input.bbchunk, block, + HUGE_STRING_LEN); + if (APLOGctrace1(f->c)) { + char buffer[1024]; + apr_size_t len = sizeof(buffer)-1; + apr_brigade_flatten(bb, buffer, &len); + buffer[len] = 0; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "h2_task(%s): getline: %s", + task->id, buffer); + } + } + else { + /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not + * to support it. Seems to work. */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, + APLOGNO(02942) + "h2_task, unsupported READ mode %d", mode); + status = APR_ENOTIMPL; + } + + h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, "forwarding input", bb); + return status; +} + +apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb) +{ + h2_task *task = f->ctx; + request_rec *r = f->r; + apr_bucket *b, *e; + + if (task && r) { + /* Detect the EOS/EOR bucket and forward any trailers that may have + * been set to our h2_headers. + */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) + && r->trailers_out && !apr_is_empty_table(r->trailers_out)) { + h2_headers *headers; + apr_table_t *trailers; + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049) + "h2_task(%s): sending trailers", task->id); + trailers = apr_table_clone(r->pool, r->trailers_out); + headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool); + e = h2_bucket_headers_create(bb->bucket_alloc, headers); + APR_BUCKET_INSERT_BEFORE(b, e); + apr_table_clear(r->trailers_out); + ap_remove_output_filter(f); + break; + } + } + } + + return ap_pass_brigade(f->next, bb); +} + diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_from_h1.h new file mode 100644 index 0000000..68a24fd --- /dev/null +++ b/modules/http2/h2_from_h1.h @@ -0,0 +1,50 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_from_h1__ +#define __mod_h2__h2_from_h1__ + +/** + * h2_from_h1 parses a HTTP/1.1 response into + * - response status + * - a list of header values + * - a series of bytes that represent the response body alone, without + * any meta data, such as inserted by chunked transfer encoding. + * + * All data is allocated from the stream memory pool. + * + * Again, see comments in h2_request: ideally we would take the headers + * and status from the httpd structures instead of parsing them here, but + * we need to have all handlers and filters involved in request/response + * processing, so this seems to be the way for now. + */ +struct h2_headers; +struct h2_task; + +apr_status_t h2_from_h1_parse_response(struct h2_task *task, ap_filter_t *f, + apr_bucket_brigade *bb); + +apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb); + +apr_status_t h2_filter_request_in(ap_filter_t* f, + apr_bucket_brigade* brigade, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes); + +apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb); + +#endif /* defined(__mod_h2__h2_from_h1__) */ diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c new file mode 100644 index 0000000..5580cef --- /dev/null +++ b/modules/http2/h2_h2.c @@ -0,0 +1,765 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> + +#include <apr_strings.h> +#include <apr_optional.h> +#include <apr_optional_hooks.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_config.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_request.h> +#include <http_log.h> + +#include "mod_ssl.h" + +#include "mod_http2.h" +#include "h2_private.h" + +#include "h2_bucket_beam.h" +#include "h2_stream.h" +#include "h2_task.h" +#include "h2_config.h" +#include "h2_ctx.h" +#include "h2_conn.h" +#include "h2_filter.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_session.h" +#include "h2_util.h" +#include "h2_h2.h" +#include "mod_http2.h" + +const char *h2_tls_protos[] = { + "h2", NULL +}; + +const char *h2_clear_protos[] = { + "h2c", NULL +}; + +const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/******************************************************************************* + * The optional mod_ssl functions we need. + */ +static APR_OPTIONAL_FN_TYPE(ssl_is_https) *opt_ssl_is_https; +static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *opt_ssl_var_lookup; + + +/******************************************************************************* + * HTTP/2 error stuff + */ +static const char *h2_err_descr[] = { + "no error", /* 0x0 */ + "protocol error", + "internal error", + "flow control error", + "settings timeout", + "stream closed", /* 0x5 */ + "frame size error", + "refused stream", + "cancel", + "compression error", + "connect error", /* 0xa */ + "enhance your calm", + "inadequate security", + "http/1.1 required", +}; + +const char *h2_h2_err_description(unsigned int h2_error) +{ + if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) { + return h2_err_descr[h2_error]; + } + return "unknown http/2 error code"; +} + +/******************************************************************************* + * Check connection security requirements of RFC 7540 + */ + +/* + * Black Listed Ciphers from RFC 7549 Appendix A + * + */ +static const char *RFC7540_names[] = { + /* ciphers with NULL encrpytion */ + "NULL-MD5", /* TLS_NULL_WITH_NULL_NULL */ + /* same */ /* TLS_RSA_WITH_NULL_MD5 */ + "NULL-SHA", /* TLS_RSA_WITH_NULL_SHA */ + "NULL-SHA256", /* TLS_RSA_WITH_NULL_SHA256 */ + "PSK-NULL-SHA", /* TLS_PSK_WITH_NULL_SHA */ + "DHE-PSK-NULL-SHA", /* TLS_DHE_PSK_WITH_NULL_SHA */ + "RSA-PSK-NULL-SHA", /* TLS_RSA_PSK_WITH_NULL_SHA */ + "PSK-NULL-SHA256", /* TLS_PSK_WITH_NULL_SHA256 */ + "PSK-NULL-SHA384", /* TLS_PSK_WITH_NULL_SHA384 */ + "DHE-PSK-NULL-SHA256", /* TLS_DHE_PSK_WITH_NULL_SHA256 */ + "DHE-PSK-NULL-SHA384", /* TLS_DHE_PSK_WITH_NULL_SHA384 */ + "RSA-PSK-NULL-SHA256", /* TLS_RSA_PSK_WITH_NULL_SHA256 */ + "RSA-PSK-NULL-SHA384", /* TLS_RSA_PSK_WITH_NULL_SHA384 */ + "ECDH-ECDSA-NULL-SHA", /* TLS_ECDH_ECDSA_WITH_NULL_SHA */ + "ECDHE-ECDSA-NULL-SHA", /* TLS_ECDHE_ECDSA_WITH_NULL_SHA */ + "ECDH-RSA-NULL-SHA", /* TLS_ECDH_RSA_WITH_NULL_SHA */ + "ECDHE-RSA-NULL-SHA", /* TLS_ECDHE_RSA_WITH_NULL_SHA */ + "AECDH-NULL-SHA", /* TLS_ECDH_anon_WITH_NULL_SHA */ + "ECDHE-PSK-NULL-SHA", /* TLS_ECDHE_PSK_WITH_NULL_SHA */ + "ECDHE-PSK-NULL-SHA256", /* TLS_ECDHE_PSK_WITH_NULL_SHA256 */ + "ECDHE-PSK-NULL-SHA384", /* TLS_ECDHE_PSK_WITH_NULL_SHA384 */ + + /* DES/3DES ciphers */ + "PSK-3DES-EDE-CBC-SHA", /* TLS_PSK_WITH_3DES_EDE_CBC_SHA */ + "DHE-PSK-3DES-EDE-CBC-SHA", /* TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA */ + "RSA-PSK-3DES-EDE-CBC-SHA", /* TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA */ + "ECDH-ECDSA-DES-CBC3-SHA", /* TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA */ + "ECDHE-ECDSA-DES-CBC3-SHA", /* TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA */ + "ECDH-RSA-DES-CBC3-SHA", /* TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA */ + "ECDHE-RSA-DES-CBC3-SHA", /* TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA */ + "AECDH-DES-CBC3-SHA", /* TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA */ + "SRP-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA */ + "SRP-RSA-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA */ + "SRP-DSS-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA */ + "ECDHE-PSK-3DES-EDE-CBC-SHA", /* TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA */ + "DES-CBC-SHA", /* TLS_RSA_WITH_DES_CBC_SHA */ + "DES-CBC3-SHA", /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */ + "DHE-DSS-DES-CBC3-SHA", /* TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA */ + "DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_WITH_DES_CBC_SHA */ + "DHE-RSA-DES-CBC3-SHA", /* TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA */ + "ADH-DES-CBC-SHA", /* TLS_DH_anon_WITH_DES_CBC_SHA */ + "ADH-DES-CBC3-SHA", /* TLS_DH_anon_WITH_3DES_EDE_CBC_SHA */ + "EXP-DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA */ + "DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_WITH_DES_CBC_SHA */ + "DH-DSS-DES-CBC3-SHA", /* TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA */ + "EXP-DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA */ + "DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_WITH_DES_CBC_SHA */ + "DH-RSA-DES-CBC3-SHA", /* TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA */ + + /* blacklisted EXPORT ciphers */ + "EXP-RC4-MD5", /* TLS_RSA_EXPORT_WITH_RC4_40_MD5 */ + "EXP-RC2-CBC-MD5", /* TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 */ + "EXP-DES-CBC-SHA", /* TLS_RSA_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-DHE-DSS-DES-CBC-SHA", /* TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-ADH-DES-CBC-SHA", /* TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA */ + "EXP-ADH-RC4-MD5", /* TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 */ + + /* blacklisted RC4 encryption */ + "RC4-MD5", /* TLS_RSA_WITH_RC4_128_MD5 */ + "RC4-SHA", /* TLS_RSA_WITH_RC4_128_SHA */ + "ADH-RC4-MD5", /* TLS_DH_anon_WITH_RC4_128_MD5 */ + "KRB5-RC4-SHA", /* TLS_KRB5_WITH_RC4_128_SHA */ + "KRB5-RC4-MD5", /* TLS_KRB5_WITH_RC4_128_MD5 */ + "EXP-KRB5-RC4-SHA", /* TLS_KRB5_EXPORT_WITH_RC4_40_SHA */ + "EXP-KRB5-RC4-MD5", /* TLS_KRB5_EXPORT_WITH_RC4_40_MD5 */ + "PSK-RC4-SHA", /* TLS_PSK_WITH_RC4_128_SHA */ + "DHE-PSK-RC4-SHA", /* TLS_DHE_PSK_WITH_RC4_128_SHA */ + "RSA-PSK-RC4-SHA", /* TLS_RSA_PSK_WITH_RC4_128_SHA */ + "ECDH-ECDSA-RC4-SHA", /* TLS_ECDH_ECDSA_WITH_RC4_128_SHA */ + "ECDHE-ECDSA-RC4-SHA", /* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA */ + "ECDH-RSA-RC4-SHA", /* TLS_ECDH_RSA_WITH_RC4_128_SHA */ + "ECDHE-RSA-RC4-SHA", /* TLS_ECDHE_RSA_WITH_RC4_128_SHA */ + "AECDH-RC4-SHA", /* TLS_ECDH_anon_WITH_RC4_128_SHA */ + "ECDHE-PSK-RC4-SHA", /* TLS_ECDHE_PSK_WITH_RC4_128_SHA */ + + /* blacklisted AES128 encrpytion ciphers */ + "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA */ + "DH-DSS-AES128-SHA", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA */ + "DH-RSA-AES128-SHA", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA */ + "DHE-DSS-AES128-SHA", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA */ + "DHE-RSA-AES128-SHA", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA */ + "ADH-AES128-SHA", /* TLS_DH_anon_WITH_AES_128_CBC_SHA */ + "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA256 */ + "DH-DSS-AES128-SHA256", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA256 */ + "DH-RSA-AES128-SHA256", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA256 */ + "DHE-DSS-AES128-SHA256", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 */ + "DHE-RSA-AES128-SHA256", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 */ + "ECDH-ECDSA-AES128-SHA", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA */ + "ECDHE-ECDSA-AES128-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */ + "ECDH-RSA-AES128-SHA", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA */ + "ECDHE-RSA-AES128-SHA", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */ + "AECDH-AES128-SHA", /* TLS_ECDH_anon_WITH_AES_128_CBC_SHA */ + "ECDHE-ECDSA-AES128-SHA256", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 */ + "ECDH-ECDSA-AES128-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 */ + "ECDHE-RSA-AES128-SHA256", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 */ + "ECDH-RSA-AES128-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 */ + "ADH-AES128-SHA256", /* TLS_DH_anon_WITH_AES_128_CBC_SHA256 */ + "PSK-AES128-CBC-SHA", /* TLS_PSK_WITH_AES_128_CBC_SHA */ + "DHE-PSK-AES128-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA */ + "RSA-PSK-AES128-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA */ + "PSK-AES128-CBC-SHA256", /* TLS_PSK_WITH_AES_128_CBC_SHA256 */ + "DHE-PSK-AES128-CBC-SHA256", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 */ + "RSA-PSK-AES128-CBC-SHA256", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 */ + "ECDHE-PSK-AES128-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA */ + "ECDHE-PSK-AES128-CBC-SHA256", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 */ + "AES128-CCM", /* TLS_RSA_WITH_AES_128_CCM */ + "AES128-CCM8", /* TLS_RSA_WITH_AES_128_CCM_8 */ + "PSK-AES128-CCM", /* TLS_PSK_WITH_AES_128_CCM */ + "PSK-AES128-CCM8", /* TLS_PSK_WITH_AES_128_CCM_8 */ + "AES128-GCM-SHA256", /* TLS_RSA_WITH_AES_128_GCM_SHA256 */ + "DH-RSA-AES128-GCM-SHA256", /* TLS_DH_RSA_WITH_AES_128_GCM_SHA256 */ + "DH-DSS-AES128-GCM-SHA256", /* TLS_DH_DSS_WITH_AES_128_GCM_SHA256 */ + "ADH-AES128-GCM-SHA256", /* TLS_DH_anon_WITH_AES_128_GCM_SHA256 */ + "PSK-AES128-GCM-SHA256", /* TLS_PSK_WITH_AES_128_GCM_SHA256 */ + "RSA-PSK-AES128-GCM-SHA256", /* TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 */ + "ECDH-ECDSA-AES128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 */ + "ECDH-RSA-AES128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 */ + "SRP-AES-128-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_128_CBC_SHA */ + "SRP-RSA-AES-128-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA */ + "SRP-DSS-AES-128-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA */ + + /* blacklisted AES256 encrpytion ciphers */ + "AES256-SHA", /* TLS_RSA_WITH_AES_256_CBC_SHA */ + "DH-DSS-AES256-SHA", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA */ + "DH-RSA-AES256-SHA", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA */ + "DHE-DSS-AES256-SHA", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA */ + "DHE-RSA-AES256-SHA", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA */ + "ADH-AES256-SHA", /* TLS_DH_anon_WITH_AES_256_CBC_SHA */ + "AES256-SHA256", /* TLS_RSA_WITH_AES_256_CBC_SHA256 */ + "DH-DSS-AES256-SHA256", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA256 */ + "DH-RSA-AES256-SHA256", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA256 */ + "DHE-DSS-AES256-SHA256", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 */ + "DHE-RSA-AES256-SHA256", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 */ + "ADH-AES256-SHA256", /* TLS_DH_anon_WITH_AES_256_CBC_SHA256 */ + "ECDH-ECDSA-AES256-SHA", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA */ + "ECDHE-ECDSA-AES256-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */ + "ECDH-RSA-AES256-SHA", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA */ + "ECDHE-RSA-AES256-SHA", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */ + "AECDH-AES256-SHA", /* TLS_ECDH_anon_WITH_AES_256_CBC_SHA */ + "ECDHE-ECDSA-AES256-SHA384", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 */ + "ECDH-ECDSA-AES256-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 */ + "ECDHE-RSA-AES256-SHA384", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 */ + "ECDH-RSA-AES256-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 */ + "PSK-AES256-CBC-SHA", /* TLS_PSK_WITH_AES_256_CBC_SHA */ + "DHE-PSK-AES256-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA */ + "RSA-PSK-AES256-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA */ + "PSK-AES256-CBC-SHA384", /* TLS_PSK_WITH_AES_256_CBC_SHA384 */ + "DHE-PSK-AES256-CBC-SHA384", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 */ + "RSA-PSK-AES256-CBC-SHA384", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 */ + "ECDHE-PSK-AES256-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA */ + "ECDHE-PSK-AES256-CBC-SHA384", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 */ + "SRP-AES-256-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_256_CBC_SHA */ + "SRP-RSA-AES-256-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA */ + "SRP-DSS-AES-256-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA */ + "AES256-CCM", /* TLS_RSA_WITH_AES_256_CCM */ + "AES256-CCM8", /* TLS_RSA_WITH_AES_256_CCM_8 */ + "PSK-AES256-CCM", /* TLS_PSK_WITH_AES_256_CCM */ + "PSK-AES256-CCM8", /* TLS_PSK_WITH_AES_256_CCM_8 */ + "AES256-GCM-SHA384", /* TLS_RSA_WITH_AES_256_GCM_SHA384 */ + "DH-RSA-AES256-GCM-SHA384", /* TLS_DH_RSA_WITH_AES_256_GCM_SHA384 */ + "DH-DSS-AES256-GCM-SHA384", /* TLS_DH_DSS_WITH_AES_256_GCM_SHA384 */ + "ADH-AES256-GCM-SHA384", /* TLS_DH_anon_WITH_AES_256_GCM_SHA384 */ + "PSK-AES256-GCM-SHA384", /* TLS_PSK_WITH_AES_256_GCM_SHA384 */ + "RSA-PSK-AES256-GCM-SHA384", /* TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 */ + "ECDH-ECDSA-AES256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 */ + "ECDH-RSA-AES256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 */ + + /* blacklisted CAMELLIA128 encrpytion ciphers */ + "CAMELLIA128-SHA", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA */ + "DH-DSS-CAMELLIA128-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA */ + "DH-RSA-CAMELLIA128-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA */ + "DHE-DSS-CAMELLIA128-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA */ + "DHE-RSA-CAMELLIA128-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA */ + "ADH-CAMELLIA128-SHA", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA */ + "ECDHE-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDH-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDHE-RSA-CAMELLIA128-SHA256", /* TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDH-RSA-CAMELLIA128-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "PSK-CAMELLIA128-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "DHE-PSK-CAMELLIA128-SHA256", /* TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "RSA-PSK-CAMELLIA128-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "ECDHE-PSK-CAMELLIA128-SHA256", /* TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */ + "CAMELLIA128-GCM-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "DH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "DH-DSS-CAMELLIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 */ + "ADH-CAMELLIA128-GCM-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 */ + "ECDH-ECDSA-CAMELLIA128-GCM-SHA256",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "ECDH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */ + "PSK-CAMELLIA128-GCM-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 */ + "RSA-PSK-CAMELLIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 */ + "CAMELLIA128-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "DH-DSS-CAMELLIA128-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 */ + "DH-RSA-CAMELLIA128-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "DHE-DSS-CAMELLIA128-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 */ + "DHE-RSA-CAMELLIA128-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */ + "ADH-CAMELLIA128-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 */ + + /* blacklisted CAMELLIA256 encrpytion ciphers */ + "CAMELLIA256-SHA", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA */ + "DH-RSA-CAMELLIA256-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA */ + "DH-DSS-CAMELLIA256-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA */ + "DHE-DSS-CAMELLIA256-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA */ + "DHE-RSA-CAMELLIA256-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA */ + "ADH-CAMELLIA256-SHA", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA */ + "ECDHE-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDH-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDHE-RSA-CAMELLIA256-SHA384", /* TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDH-RSA-CAMELLIA256-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 */ + "PSK-CAMELLIA256-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "DHE-PSK-CAMELLIA256-SHA384", /* TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "RSA-PSK-CAMELLIA256-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "ECDHE-PSK-CAMELLIA256-SHA384", /* TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */ + "CAMELLIA256-SHA256", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ + "DH-DSS-CAMELLIA256-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 */ + "DH-RSA-CAMELLIA256-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ + "DHE-DSS-CAMELLIA256-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 */ + "DHE-RSA-CAMELLIA256-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 */ + "ADH-CAMELLIA256-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 */ + "CAMELLIA256-GCM-SHA384", /* TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "DH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "DH-DSS-CAMELLIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 */ + "ADH-CAMELLIA256-GCM-SHA384", /* TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 */ + "ECDH-ECDSA-CAMELLIA256-GCM-SHA384",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "ECDH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */ + "PSK-CAMELLIA256-GCM-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 */ + "RSA-PSK-CAMELLIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 */ + + /* The blacklisted ARIA encrpytion ciphers */ + "ARIA128-SHA256", /* TLS_RSA_WITH_ARIA_128_CBC_SHA256 */ + "ARIA256-SHA384", /* TLS_RSA_WITH_ARIA_256_CBC_SHA384 */ + "DH-DSS-ARIA128-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 */ + "DH-DSS-ARIA256-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 */ + "DH-RSA-ARIA128-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 */ + "DH-RSA-ARIA256-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 */ + "DHE-DSS-ARIA128-SHA256", /* TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 */ + "DHE-DSS-ARIA256-SHA384", /* TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 */ + "DHE-RSA-ARIA128-SHA256", /* TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 */ + "DHE-RSA-ARIA256-SHA384", /* TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 */ + "ADH-ARIA128-SHA256", /* TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 */ + "ADH-ARIA256-SHA384", /* TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 */ + "ECDHE-ECDSA-ARIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDHE-ECDSA-ARIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 */ + "ECDH-ECDSA-ARIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDH-ECDSA-ARIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 */ + "ECDHE-RSA-ARIA128-SHA256", /* TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDHE-RSA-ARIA256-SHA384", /* TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 */ + "ECDH-RSA-ARIA128-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 */ + "ECDH-RSA-ARIA256-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 */ + "ARIA128-GCM-SHA256", /* TLS_RSA_WITH_ARIA_128_GCM_SHA256 */ + "ARIA256-GCM-SHA384", /* TLS_RSA_WITH_ARIA_256_GCM_SHA384 */ + "DH-DSS-ARIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 */ + "DH-DSS-ARIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 */ + "DH-RSA-ARIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 */ + "DH-RSA-ARIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 */ + "ADH-ARIA128-GCM-SHA256", /* TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 */ + "ADH-ARIA256-GCM-SHA384", /* TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 */ + "ECDH-ECDSA-ARIA128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 */ + "ECDH-ECDSA-ARIA256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 */ + "ECDH-RSA-ARIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 */ + "ECDH-RSA-ARIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 */ + "PSK-ARIA128-SHA256", /* TLS_PSK_WITH_ARIA_128_CBC_SHA256 */ + "PSK-ARIA256-SHA384", /* TLS_PSK_WITH_ARIA_256_CBC_SHA384 */ + "DHE-PSK-ARIA128-SHA256", /* TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 */ + "DHE-PSK-ARIA256-SHA384", /* TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 */ + "RSA-PSK-ARIA128-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 */ + "RSA-PSK-ARIA256-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 */ + "ARIA128-GCM-SHA256", /* TLS_PSK_WITH_ARIA_128_GCM_SHA256 */ + "ARIA256-GCM-SHA384", /* TLS_PSK_WITH_ARIA_256_GCM_SHA384 */ + "RSA-PSK-ARIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 */ + "RSA-PSK-ARIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 */ + "ECDHE-PSK-ARIA128-SHA256", /* TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 */ + "ECDHE-PSK-ARIA256-SHA384", /* TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 */ + + /* blacklisted SEED encryptions */ + "SEED-SHA", /*TLS_RSA_WITH_SEED_CBC_SHA */ + "DH-DSS-SEED-SHA", /* TLS_DH_DSS_WITH_SEED_CBC_SHA */ + "DH-RSA-SEED-SHA", /* TLS_DH_RSA_WITH_SEED_CBC_SHA */ + "DHE-DSS-SEED-SHA", /* TLS_DHE_DSS_WITH_SEED_CBC_SHA */ + "DHE-RSA-SEED-SHA", /* TLS_DHE_RSA_WITH_SEED_CBC_SHA */ + "ADH-SEED-SHA", /* TLS_DH_anon_WITH_SEED_CBC_SHA */ + + /* blacklisted KRB5 ciphers */ + "KRB5-DES-CBC-SHA", /* TLS_KRB5_WITH_DES_CBC_SHA */ + "KRB5-DES-CBC3-SHA", /* TLS_KRB5_WITH_3DES_EDE_CBC_SHA */ + "KRB5-IDEA-CBC-SHA", /* TLS_KRB5_WITH_IDEA_CBC_SHA */ + "KRB5-DES-CBC-MD5", /* TLS_KRB5_WITH_DES_CBC_MD5 */ + "KRB5-DES-CBC3-MD5", /* TLS_KRB5_WITH_3DES_EDE_CBC_MD5 */ + "KRB5-IDEA-CBC-MD5", /* TLS_KRB5_WITH_IDEA_CBC_MD5 */ + "EXP-KRB5-DES-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA */ + "EXP-KRB5-DES-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 */ + "EXP-KRB5-RC2-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA */ + "EXP-KRB5-RC2-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 */ + + /* blacklisted exoticas */ + "DHE-DSS-CBC-SHA", /* TLS_DHE_DSS_WITH_DES_CBC_SHA */ + "IDEA-CBC-SHA", /* TLS_RSA_WITH_IDEA_CBC_SHA */ + + /* not really sure if the following names are correct */ + "SSL3_CK_SCSV", /* TLS_EMPTY_RENEGOTIATION_INFO_SCSV */ + "SSL3_CK_FALLBACK_SCSV" +}; +static size_t RFC7540_names_LEN = sizeof(RFC7540_names)/sizeof(RFC7540_names[0]); + + +static apr_hash_t *BLCNames; + +static void cipher_init(apr_pool_t *pool) +{ + apr_hash_t *hash = apr_hash_make(pool); + const char *source; + unsigned int i; + + source = "rfc7540"; + for (i = 0; i < RFC7540_names_LEN; ++i) { + apr_hash_set(hash, RFC7540_names[i], APR_HASH_KEY_STRING, source); + } + + BLCNames = hash; +} + +static int cipher_is_blacklisted(const char *cipher, const char **psource) +{ + *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING); + return !!*psource; +} + +/******************************************************************************* + * Hooks for processing incoming connections: + * - process_conn take over connection in case of h2 + */ +static int h2_h2_process_conn(conn_rec* c); +static int h2_h2_pre_close_conn(conn_rec* c); +static int h2_h2_post_read_req(request_rec *r); +static int h2_h2_late_fixups(request_rec *r); + +/******************************************************************************* + * Once per lifetime init, retrieve optional functions + */ +apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s) +{ + (void)pool; + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init"); + opt_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); + opt_ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); + + if (!opt_ssl_is_https || !opt_ssl_var_lookup) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + APLOGNO(02951) "mod_ssl does not seem to be enabled"); + } + + cipher_init(pool); + + return APR_SUCCESS; +} + +int h2_h2_is_tls(conn_rec *c) +{ + return opt_ssl_is_https && opt_ssl_is_https(c); +} + +int h2_is_acceptable_connection(conn_rec *c, int require_all) +{ + int is_tls = h2_h2_is_tls(c); + const h2_config *cfg = h2_config_get(c); + + if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) { + /* Check TLS connection for modern TLS parameters, as defined in + * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + */ + apr_pool_t *pool = c->pool; + server_rec *s = c->base_server; + char *val; + + if (!opt_ssl_var_lookup) { + /* unable to check */ + return 0; + } + + /* Need Tlsv1.2 or higher, rfc 7540, ch. 9.2 + */ + val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_PROTOCOL"); + if (val && *val) { + if (strncmp("TLS", val, 3) + || !strcmp("TLSv1", val) + || !strcmp("TLSv1.1", val)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050) + "h2_h2(%ld): tls protocol not suitable: %s", + (long)c->id, val); + return 0; + } + } + else if (require_all) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051) + "h2_h2(%ld): tls protocol is indetermined", (long)c->id); + return 0; + } + + /* Check TLS cipher blacklist + */ + val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_CIPHER"); + if (val && *val) { + const char *source; + if (cipher_is_blacklisted(val, &source)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052) + "h2_h2(%ld): tls cipher %s blacklisted by %s", + (long)c->id, val, source); + return 0; + } + } + else if (require_all) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053) + "h2_h2(%ld): tls cipher is indetermined", (long)c->id); + return 0; + } + } + return 1; +} + +int h2_allows_h2_direct(conn_rec *c) +{ + const h2_config *cfg = h2_config_get(c); + int is_tls = h2_h2_is_tls(c); + const char *needed_protocol = is_tls? "h2" : "h2c"; + int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT); + + if (h2_direct < 0) { + h2_direct = is_tls? 0 : 1; + } + return (h2_direct + && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol)); +} + +int h2_allows_h2_upgrade(conn_rec *c) +{ + const h2_config *cfg = h2_config_get(c); + int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE); + + return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c)); +} + +/******************************************************************************* + * Register various hooks + */ +static const char* const mod_ssl[] = { "mod_ssl.c", NULL}; +static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL}; + +void h2_h2_register_hooks(void) +{ + /* Our main processing needs to run quite late. Definitely after mod_ssl, + * as we need its connection filters, but also before reqtimeout as its + * method of timeouts is specific to HTTP/1.1 (as of now). + * The core HTTP/1 processing run as REALLY_LAST, so we will have + * a chance to take over before it. + */ + ap_hook_process_connection(h2_h2_process_conn, + mod_ssl, mod_reqtimeout, APR_HOOK_LAST); + + /* One last chance to properly say goodbye if we have not done so + * already. */ + ap_hook_pre_close_connection(h2_h2_pre_close_conn, NULL, mod_ssl, APR_HOOK_LAST); + + /* With "H2SerializeHeaders On", we install the filter in this hook + * that parses the response. This needs to happen before any other post + * read function terminates the request with an error. Otherwise we will + * never see the response. + */ + ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST); + ap_hook_fixups(h2_h2_late_fixups, NULL, NULL, APR_HOOK_LAST); + + /* special bucket type transfer through a h2_bucket_beam */ + h2_register_bucket_beamer(h2_bucket_headers_beam); + h2_register_bucket_beamer(h2_bucket_observer_beam); +} + +int h2_h2_process_conn(conn_rec* c) +{ + apr_status_t status; + h2_ctx *ctx; + + if (c->master) { + return DECLINED; + } + + ctx = h2_ctx_get(c, 0); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn"); + if (h2_ctx_is_task(ctx)) { + /* our stream pseudo connection */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined"); + return DECLINED; + } + + if (!ctx && c->keepalives == 0) { + const char *proto = ap_get_protocol(c); + + if (APLOGctrace1(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, " + "new connection using protocol '%s', direct=%d, " + "tls acceptable=%d", proto, h2_allows_h2_direct(c), + h2_is_acceptable_connection(c, 1)); + } + + if (!strcmp(AP_PROTOCOL_HTTP1, proto) + && h2_allows_h2_direct(c) + && h2_is_acceptable_connection(c, 1)) { + /* Fresh connection still is on http/1.1 and H2Direct is enabled. + * Otherwise connection is in a fully acceptable state. + * -> peek at the first 24 incoming bytes + */ + apr_bucket_brigade *temp; + char *s = NULL; + apr_size_t slen; + + temp = apr_brigade_create(c->pool, c->bucket_alloc); + status = ap_get_brigade(c->input_filters, temp, + AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24); + + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054) + "h2_h2, error reading 24 bytes speculative"); + apr_brigade_destroy(temp); + return DECLINED; + } + + apr_brigade_pflatten(temp, &s, &slen, c->pool); + if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_h2, direct mode detected"); + if (!ctx) { + ctx = h2_ctx_get(c, 1); + } + h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c"); + } + else if (APLOGctrace2(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_h2, not detected in %d bytes(base64): %s", + (int)slen, h2_util_base64url_encode(s, slen, c->pool)); + } + + apr_brigade_destroy(temp); + } + } + + if (ctx) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn"); + if (!h2_ctx_session_get(ctx)) { + status = h2_conn_setup(ctx, c, NULL); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup"); + if (status != APR_SUCCESS) { + h2_ctx_clear(c); + return !OK; + } + } + h2_conn_run(ctx, c); + return OK; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined"); + return DECLINED; +} + +static int h2_h2_pre_close_conn(conn_rec *c) +{ + h2_ctx *ctx; + + /* slave connection? */ + if (c->master) { + return DECLINED; + } + + ctx = h2_ctx_get(c, 0); + if (ctx) { + /* If the session has been closed correctly already, we will not + * find a h2_ctx here. The presence indicates that the session + * is still ongoing. */ + return h2_conn_pre_close(ctx, c); + } + return DECLINED; +} + +static void check_push(request_rec *r, const char *tag) +{ + const h2_config *conf = h2_config_rget(r); + if (!r->expecting_100 + && conf && conf->push_list && conf->push_list->nelts > 0) { + int i, old_status; + const char *old_line; + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "%s, early announcing %d resources for push", + tag, conf->push_list->nelts); + for (i = 0; i < conf->push_list->nelts; ++i) { + h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res); + apr_table_add(r->headers_out, "Link", + apr_psprintf(r->pool, "<%s>; rel=preload%s", + push->uri_ref, push->critical? "; critical" : "")); + } + old_status = r->status; + old_line = r->status_line; + r->status = 103; + r->status_line = "103 Early Hints"; + ap_send_interim_response(r, 1); + r->status = old_status; + r->status_line = old_line; + } +} + +static int h2_h2_post_read_req(request_rec *r) +{ + /* slave connection? */ + if (r->connection->master) { + h2_ctx *ctx = h2_ctx_rget(r); + struct h2_task *task = h2_ctx_get_task(ctx); + /* This hook will get called twice on internal redirects. Take care + * that we manipulate filters only once. */ + if (task && !task->filters_set) { + ap_filter_t *f; + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, + "h2_task(%s): adding request filters", task->id); + + /* setup the correct filters to process the request for h2 */ + ap_add_input_filter("H2_REQUEST", task, r, r->connection); + + /* replace the core http filter that formats response headers + * in HTTP/1 with our own that collects status and headers */ + ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); + ap_add_output_filter("H2_RESPONSE", task, r, r->connection); + + for (f = r->input_filters; f; f = f->next) { + if (!strcmp("H2_SLAVE_IN", f->frec->name)) { + f->r = r; + break; + } + } + ap_add_output_filter("H2_TRAILERS_OUT", task, r, r->connection); + task->filters_set = 1; + } + } + return DECLINED; +} + +static int h2_h2_late_fixups(request_rec *r) +{ + /* slave connection? */ + if (r->connection->master) { + h2_ctx *ctx = h2_ctx_rget(r); + struct h2_task *task = h2_ctx_get_task(ctx); + if (task) { + /* check if we copy vs. setaside files in this location */ + task->output.copy_files = h2_config_geti(h2_config_rget(r), + H2_CONF_COPY_FILES); + if (task->output.copy_files) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_slave_out(%s): copy_files on", task->id); + h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL); + } + check_push(r, "late_fixup"); + } + } + return DECLINED; +} + diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_h2.h new file mode 100644 index 0000000..367823d --- /dev/null +++ b/modules/http2/h2_h2.h @@ -0,0 +1,79 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_h2__ +#define __mod_h2__h2_h2__ + +/** + * List of ALPN protocol identifiers that we support in cleartext + * negotiations. NULL terminated. + */ +extern const char *h2_clear_protos[]; + +/** + * List of ALPN protocol identifiers that we support in TLS encrypted + * negotiations. NULL terminated. + */ +extern const char *h2_tls_protos[]; + +/** + * Provide a user readable description of the HTTP/2 error code- + * @param h2_error http/2 error code, as in rfc 7540, ch. 7 + * @return textual description of code or that it is unknown. + */ +const char *h2_h2_err_description(unsigned int h2_error); + +/* + * One time, post config initialization. + */ +apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s); + +/* Is the connection a TLS connection? + */ +int h2_h2_is_tls(conn_rec *c); + +/* Register apache hooks for h2 protocol + */ +void h2_h2_register_hooks(void); + +/** + * Check if the given connection fulfills the requirements as configured. + * @param c the connection + * @param require_all != 0 iff any missing connection properties make + * the test fail. For example, a cipher might not have been selected while + * the handshake is still ongoing. + * @return != 0 iff connection requirements are met + */ +int h2_is_acceptable_connection(conn_rec *c, int require_all); + +/** + * Check if the "direct" HTTP/2 mode of protocol handling is enabled + * for the given connection. + * @param c the connection to check + * @return != 0 iff direct mode is enabled + */ +int h2_allows_h2_direct(conn_rec *c); + +/** + * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled + * for the given connection. + * @param c the connection to check + * @return != 0 iff Upgrade switching is enabled + */ +int h2_allows_h2_upgrade(conn_rec *c); + + +#endif /* defined(__mod_h2__h2_h2__) */ diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c new file mode 100644 index 0000000..8b7add6 --- /dev/null +++ b/modules/http2/h2_headers.c @@ -0,0 +1,178 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stdio.h> + +#include <apr_strings.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> +#include <util_time.h> + +#include <nghttp2/nghttp2.h> + +#include "h2_private.h" +#include "h2_h2.h" +#include "h2_util.h" +#include "h2_request.h" +#include "h2_headers.h" + + +static int is_unsafe(server_rec *s) +{ + core_server_config *conf = ap_get_core_module_config(s->module_config); + return (conf->http_conformance == AP_HTTP_CONFORMANCE_UNSAFE); +} + +typedef struct { + apr_bucket_refcount refcount; + h2_headers *headers; +} h2_bucket_headers; + +static apr_status_t bucket_read(apr_bucket *b, const char **str, + apr_size_t *len, apr_read_type_e block) +{ + (void)b; + (void)block; + *str = NULL; + *len = 0; + return APR_SUCCESS; +} + +apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r) +{ + h2_bucket_headers *br; + + br = apr_bucket_alloc(sizeof(*br), b->list); + br->headers = r; + + b = apr_bucket_shared_make(b, br, 0, 0); + b->type = &h2_bucket_type_headers; + + return b; +} + +apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list, + h2_headers *r) +{ + apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); + + APR_BUCKET_INIT(b); + b->free = apr_bucket_free; + b->list = list; + b = h2_bucket_headers_make(b, r); + return b; +} + +h2_headers *h2_bucket_headers_get(apr_bucket *b) +{ + if (H2_BUCKET_IS_HEADERS(b)) { + return ((h2_bucket_headers *)b->data)->headers; + } + return NULL; +} + +const apr_bucket_type_t h2_bucket_type_headers = { + "H2HEADERS", 5, APR_BUCKET_METADATA, + apr_bucket_destroy_noop, + bucket_read, + apr_bucket_setaside_noop, + apr_bucket_split_notimpl, + apr_bucket_shared_copy +}; + +apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src) +{ + if (H2_BUCKET_IS_HEADERS(src)) { + h2_headers *r = ((h2_bucket_headers *)src->data)->headers; + apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r); + APR_BRIGADE_INSERT_TAIL(dest, b); + return b; + } + return NULL; +} + + +h2_headers *h2_headers_create(int status, apr_table_t *headers_in, + apr_table_t *notes, apr_off_t raw_bytes, + apr_pool_t *pool) +{ + h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers)); + headers->status = status; + headers->headers = (headers_in? apr_table_clone(pool, headers_in) + : apr_table_make(pool, 5)); + headers->notes = (notes? apr_table_clone(pool, notes) + : apr_table_make(pool, 5)); + return headers; +} + +h2_headers *h2_headers_rcreate(request_rec *r, int status, + apr_table_t *header, apr_pool_t *pool) +{ + h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool); + if (headers->status == HTTP_FORBIDDEN) { + const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden"); + if (cause) { + /* This request triggered a TLS renegotiation that is now allowed + * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. + */ + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r, + APLOGNO(03061) + "h2_headers(%ld): renegotiate forbidden, cause: %s", + (long)r->connection->id, cause); + headers->status = H2_ERR_HTTP_1_1_REQUIRED; + } + } + if (is_unsafe(r->server)) { + apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, + H2_HDR_CONFORMANCE_UNSAFE); + } + return headers; +} + +h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h) +{ + return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool); +} + +h2_headers *h2_headers_die(apr_status_t type, + const h2_request *req, apr_pool_t *pool) +{ + h2_headers *headers; + char *date; + + headers = apr_pcalloc(pool, sizeof(h2_headers)); + headers->status = (type >= 200 && type < 600)? type : 500; + headers->headers = apr_table_make(pool, 5); + headers->notes = apr_table_make(pool, 5); + + date = apr_palloc(pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, req? req->request_time : apr_time_now()); + apr_table_setn(headers->headers, "Date", date); + apr_table_setn(headers->headers, "Server", ap_get_server_banner()); + + return headers; +} + +int h2_headers_are_response(h2_headers *headers) +{ + return headers->status >= 200; +} + diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h new file mode 100644 index 0000000..840e8c4 --- /dev/null +++ b/modules/http2/h2_headers.h @@ -0,0 +1,79 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_headers__ +#define __mod_h2__h2_headers__ + +#include "h2.h" + +struct h2_bucket_beam; + +extern const apr_bucket_type_t h2_bucket_type_headers; + +#define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers) + +apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r); + +apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list, + h2_headers *r); + +h2_headers *h2_bucket_headers_get(apr_bucket *b); + +apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src); + +/** + * Create the headers from the given status and headers + * @param status the headers status + * @param header the headers of the headers + * @param notes the notes carried by the headers + * @param raw_bytes the raw network bytes (if known) used to transmit these + * @param pool the memory pool to use + */ +h2_headers *h2_headers_create(int status, apr_table_t *header, + apr_table_t *notes, apr_off_t raw_bytes, + apr_pool_t *pool); + +/** + * Create the headers from the given request_rec. + * @param r the request record which was processed + * @param status the headers status + * @param header the headers of the headers + * @param pool the memory pool to use + */ +h2_headers *h2_headers_rcreate(request_rec *r, int status, + apr_table_t *header, apr_pool_t *pool); + +/** + * Clone the headers into another pool. This will not copy any + * header strings. + */ +h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h); + +/** + * Create the headers for the given error. + * @param stream_id id of the stream to create the headers for + * @param type the error code + * @param req the original h2_request + * @param pool the memory pool to use + */ +h2_headers *h2_headers_die(apr_status_t type, + const struct h2_request *req, apr_pool_t *pool); + +int h2_headers_are_response(h2_headers *headers); + +#endif /* defined(__mod_h2__h2_headers__) */ diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c new file mode 100644 index 0000000..0fae117 --- /dev/null +++ b/modules/http2/h2_mplx.c @@ -0,0 +1,1282 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stddef.h> +#include <stdlib.h> + +#include <apr_atomic.h> +#include <apr_thread_mutex.h> +#include <apr_thread_cond.h> +#include <apr_strings.h> +#include <apr_time.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> + +#include <mpm_common.h> + +#include "mod_http2.h" + +#include "h2.h" +#include "h2_private.h" +#include "h2_bucket_beam.h" +#include "h2_config.h" +#include "h2_conn.h" +#include "h2_ctx.h" +#include "h2_h2.h" +#include "h2_mplx.h" +#include "h2_ngn_shed.h" +#include "h2_request.h" +#include "h2_stream.h" +#include "h2_session.h" +#include "h2_task.h" +#include "h2_workers.h" +#include "h2_util.h" + + +/* utility for iterating over ihash stream sets */ +typedef struct { + h2_mplx *m; + h2_stream *stream; + apr_time_t now; +} stream_iter_ctx; + +apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) +{ + return APR_SUCCESS; +} + +#define H2_MPLX_ENTER(m) \ + do { apr_status_t rv; if ((rv = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\ + return rv;\ + } } while(0) + +#define H2_MPLX_LEAVE(m) \ + apr_thread_mutex_unlock(m->lock) + +#define H2_MPLX_ENTER_ALWAYS(m) \ + apr_thread_mutex_lock(m->lock) + +#define H2_MPLX_ENTER_MAYBE(m, lock) \ + if (lock) apr_thread_mutex_lock(m->lock) + +#define H2_MPLX_LEAVE_MAYBE(m, lock) \ + if (lock) apr_thread_mutex_unlock(m->lock) + +static void check_data_for(h2_mplx *m, h2_stream *stream, int lock); + +static void stream_output_consumed(void *ctx, + h2_bucket_beam *beam, apr_off_t length) +{ + h2_stream *stream = ctx; + h2_task *task = stream->task; + + if (length > 0 && task && task->assigned) { + h2_req_engine_out_consumed(task->assigned, task->c, length); + } +} + +static void stream_input_ev(void *ctx, h2_bucket_beam *beam) +{ + h2_stream *stream = ctx; + h2_mplx *m = stream->session->mplx; + apr_atomic_set32(&m->event_pending, 1); +} + +static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length) +{ + h2_stream_in_consumed(ctx, length); +} + +static void stream_joined(h2_mplx *m, h2_stream *stream) +{ + ap_assert(!stream->task || stream->task->worker_done); + + h2_ihash_remove(m->shold, stream->id); + h2_ihash_add(m->spurge, stream); +} + +static void stream_cleanup(h2_mplx *m, h2_stream *stream) +{ + ap_assert(stream->state == H2_SS_CLEANUP); + + if (stream->input) { + h2_beam_on_consumed(stream->input, NULL, NULL, NULL); + h2_beam_abort(stream->input); + } + if (stream->output) { + h2_beam_on_produced(stream->output, NULL, NULL); + h2_beam_leave(stream->output); + } + + h2_stream_cleanup(stream); + + h2_ihash_remove(m->streams, stream->id); + h2_iq_remove(m->q, stream->id); + h2_ififo_remove(m->readyq, stream->id); + h2_ihash_add(m->shold, stream); + + if (!stream->task || stream->task->worker_done) { + stream_joined(m, stream); + } + else if (stream->task) { + stream->task->c->aborted = 1; + apr_thread_cond_broadcast(m->task_thawed); + } +} + +/** + * A h2_mplx needs to be thread-safe *and* if will be called by + * the h2_session thread *and* the h2_worker threads. Therefore: + * - calls are protected by a mutex lock, m->lock + * - the pool needs its own allocator, since apr_allocator_t are + * not re-entrant. The separate allocator works without a + * separate lock since we already protect h2_mplx itself. + * Since HTTP/2 connections can be expected to live longer than + * their HTTP/1 cousins, the separate allocator seems to work better + * than protecting a shared h2_session one with an own lock. + */ +h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, + const h2_config *conf, + h2_workers *workers) +{ + apr_status_t status = APR_SUCCESS; + apr_allocator_t *allocator; + apr_thread_mutex_t *mutex; + h2_mplx *m; + h2_ctx *ctx = h2_ctx_get(c, 0); + ap_assert(conf); + + m = apr_pcalloc(parent, sizeof(h2_mplx)); + if (m) { + m->id = c->id; + m->c = c; + m->s = (ctx? h2_ctx_server_get(ctx) : NULL); + if (!m->s) { + m->s = c->base_server; + } + + /* We create a pool with its own allocator to be used for + * processing slave connections. This is the only way to have the + * processing independant of its parent pool in the sense that it + * can work in another thread. Also, the new allocator needs its own + * mutex to synchronize sub-pools. + */ + status = apr_allocator_create(&allocator); + if (status != APR_SUCCESS) { + return NULL; + } + apr_allocator_max_free_set(allocator, ap_max_mem_free); + apr_pool_create_ex(&m->pool, parent, NULL, allocator); + if (!m->pool) { + apr_allocator_destroy(allocator); + return NULL; + } + apr_pool_tag(m->pool, "h2_mplx"); + apr_allocator_owner_set(allocator, m->pool); + status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, + m->pool); + if (status != APR_SUCCESS) { + apr_pool_destroy(m->pool); + return NULL; + } + apr_allocator_mutex_set(allocator, mutex); + + status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, + m->pool); + if (status != APR_SUCCESS) { + apr_pool_destroy(m->pool); + return NULL; + } + + status = apr_thread_cond_create(&m->task_thawed, m->pool); + if (status != APR_SUCCESS) { + apr_pool_destroy(m->pool); + return NULL; + } + + m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS); + m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); + + m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->q = h2_iq_create(m->pool, m->max_streams); + + status = h2_ififo_set_create(&m->readyq, m->pool, m->max_streams); + if (status != APR_SUCCESS) { + apr_pool_destroy(m->pool); + return NULL; + } + + m->workers = workers; + m->max_active = workers->max_workers; + m->limit_active = 6; /* the original h1 max parallel connections */ + m->last_limit_change = m->last_idle_block = apr_time_now(); + m->limit_change_interval = apr_time_from_msec(100); + + m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); + + m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, + m->stream_max_mem); + h2_ngn_shed_set_ctx(m->ngn_shed , m); + } + return m; +} + +int h2_mplx_shutdown(h2_mplx *m) +{ + int max_stream_started = 0; + + H2_MPLX_ENTER(m); + + max_stream_started = m->max_stream_started; + /* Clear schedule queue, disabling existing streams from starting */ + h2_iq_clear(m->q); + + H2_MPLX_LEAVE(m); + return max_stream_started; +} + +static int input_consumed_signal(h2_mplx *m, h2_stream *stream) +{ + if (stream->input) { + return h2_beam_report_consumption(stream->input); + } + return 0; +} + +static int report_consumption_iter(void *ctx, void *val) +{ + h2_stream *stream = val; + h2_mplx *m = ctx; + + input_consumed_signal(m, stream); + if (stream->state == H2_SS_CLOSED_L + && (!stream->task || stream->task->worker_done)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, + H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing")); + nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE, + stream->id, NGHTTP2_NO_ERROR); + } + return 1; +} + +static int output_consumed_signal(h2_mplx *m, h2_task *task) +{ + if (task->output.beam) { + return h2_beam_report_consumption(task->output.beam); + } + return 0; +} + +static int stream_destroy_iter(void *ctx, void *val) +{ + h2_mplx *m = ctx; + h2_stream *stream = val; + + h2_ihash_remove(m->spurge, stream->id); + ap_assert(stream->state == H2_SS_CLEANUP); + + if (stream->input) { + /* Process outstanding events before destruction */ + input_consumed_signal(m, stream); + h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy"); + h2_beam_destroy(stream->input); + stream->input = NULL; + } + + if (stream->task) { + h2_task *task = stream->task; + conn_rec *slave; + int reuse_slave = 0; + + stream->task = NULL; + slave = task->c; + if (slave) { + /* On non-serialized requests, the IO logging has not accounted for any + * meta data send over the network: response headers and h2 frame headers. we + * counted this on the stream and need to add this now. + * This is supposed to happen before the EOR bucket triggers the + * logging of the transaction. *fingers crossed* */ + if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) { + apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets; + if (unaccounted > 0) { + h2_task_logio_add_bytes_out(slave, unaccounted); + } + } + + if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) { + reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2)) + && !task->rst_error); + } + + if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) { + h2_beam_log(task->output.beam, m->c, APLOG_DEBUG, + APLOGNO(03385) "h2_task_destroy, reuse slave"); + h2_task_destroy(task); + APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave; + } + else { + h2_beam_log(task->output.beam, m->c, APLOG_TRACE1, + "h2_task_destroy, destroy slave"); + h2_slave_destroy(slave); + } + } + } + h2_stream_destroy(stream); + return 0; +} + +static void purge_streams(h2_mplx *m, int lock) +{ + if (!h2_ihash_empty(m->spurge)) { + H2_MPLX_ENTER_MAYBE(m, lock); + while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) { + /* repeat until empty */ + } + H2_MPLX_LEAVE_MAYBE(m, lock); + } +} + +typedef struct { + h2_mplx_stream_cb *cb; + void *ctx; +} stream_iter_ctx_t; + +static int stream_iter_wrap(void *ctx, void *stream) +{ + stream_iter_ctx_t *x = ctx; + return x->cb(stream, x->ctx); +} + +apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) +{ + stream_iter_ctx_t x; + + H2_MPLX_ENTER(m); + + x.cb = cb; + x.ctx = ctx; + h2_ihash_iter(m->streams, stream_iter_wrap, &x); + + H2_MPLX_LEAVE(m); + return APR_SUCCESS; +} + +static int report_stream_iter(void *ctx, void *val) { + h2_mplx *m = ctx; + h2_stream *stream = val; + h2_task *task = stream->task; + if (APLOGctrace1(m->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"), + !!stream->task, stream->scheduled, h2_stream_is_ready(stream), + (long)h2_beam_get_buffered(stream->output)); + } + if (task) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */ + H2_STRM_MSG(stream, "->03198: %s %s %s" + "[started=%d/done=%d/frozen=%d]"), + task->request->method, task->request->authority, + task->request->path, task->worker_started, + task->worker_done, task->frozen); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */ + H2_STRM_MSG(stream, "->03198: no task")); + } + return 1; +} + +static int unexpected_stream_iter(void *ctx, void *val) { + h2_mplx *m = ctx; + h2_stream *stream = val; + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */ + H2_STRM_MSG(stream, "unexpected, started=%d, scheduled=%d, ready=%d"), + !!stream->task, stream->scheduled, h2_stream_is_ready(stream)); + return 1; +} + +static int stream_cancel_iter(void *ctx, void *val) { + h2_mplx *m = ctx; + h2_stream *stream = val; + + /* disabled input consumed reporting */ + if (stream->input) { + h2_beam_on_consumed(stream->input, NULL, NULL, NULL); + } + /* take over event monitoring */ + h2_stream_set_monitor(stream, NULL); + /* Reset, should transit to CLOSED state */ + h2_stream_rst(stream, H2_ERR_NO_ERROR); + /* All connection data has been sent, simulate cleanup */ + h2_stream_dispatch(stream, H2_SEV_EOS_SENT); + stream_cleanup(m, stream); + return 0; +} + +void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) +{ + apr_status_t status; + int i, wait_secs = 60; + + /* How to shut down a h2 connection: + * 0. abort and tell the workers that no more tasks will come from us */ + m->aborted = 1; + h2_workers_unregister(m->workers, m); + + H2_MPLX_ENTER_ALWAYS(m); + + /* How to shut down a h2 connection: + * 1. cancel all streams still active */ + while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) { + /* until empty */ + } + + /* 2. terminate ngn_shed, no more streams + * should be scheduled or in the active set */ + h2_ngn_shed_abort(m->ngn_shed); + ap_assert(h2_ihash_empty(m->streams)); + ap_assert(h2_iq_empty(m->q)); + + /* 3. while workers are busy on this connection, meaning they + * are processing tasks from this connection, wait on them finishing + * in order to wake us and let us check again. + * Eventually, this has to succeed. */ + m->join_wait = wait; + for (i = 0; h2_ihash_count(m->shold) > 0; ++i) { + status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs)); + + if (APR_STATUS_IS_TIMEUP(status)) { + /* This can happen if we have very long running requests + * that do not time out on IO. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198) + "h2_mplx(%ld): waited %d sec for %d tasks", + m->id, i*wait_secs, (int)h2_ihash_count(m->shold)); + h2_ihash_iter(m->shold, report_stream_iter, m); + } + } + ap_assert(m->tasks_active == 0); + m->join_wait = NULL; + + /* 4. close the h2_req_enginge shed */ + h2_ngn_shed_destroy(m->ngn_shed); + m->ngn_shed = NULL; + + /* 4. With all workers done, all streams should be in spurge */ + if (!h2_ihash_empty(m->shold)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516) + "h2_mplx(%ld): unexpected %d streams in hold", + m->id, (int)h2_ihash_count(m->shold)); + h2_ihash_iter(m->shold, unexpected_stream_iter, m); + } + + H2_MPLX_LEAVE(m); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): released", m->id); +} + +apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream) +{ + H2_MPLX_ENTER(m); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + H2_STRM_MSG(stream, "cleanup")); + stream_cleanup(m, stream); + + H2_MPLX_LEAVE(m); + return APR_SUCCESS; +} + +h2_stream *h2_mplx_stream_get(h2_mplx *m, int id) +{ + h2_stream *s = NULL; + + H2_MPLX_ENTER_ALWAYS(m); + + s = h2_ihash_get(m->streams, id); + + H2_MPLX_LEAVE(m); + return s; +} + +static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes) +{ + h2_stream *stream = ctx; + h2_mplx *m = stream->session->mplx; + + check_data_for(m, stream, 1); +} + +static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) +{ + apr_status_t status = APR_SUCCESS; + h2_stream *stream = h2_ihash_get(m->streams, stream_id); + + if (!stream || !stream->task || m->aborted) { + return APR_ECONNABORTED; + } + + ap_assert(stream->output == NULL); + stream->output = beam; + + if (APLOGctrace2(m->c)) { + h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open"); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, + "h2_mplx(%s): out open", stream->task->id); + } + + h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream); + h2_beam_on_produced(stream->output, output_produced, stream); + if (stream->task->output.copy_files) { + h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL); + } + + /* we might see some file buckets in the output, see + * if we have enough handles reserved. */ + check_data_for(m, stream, 0); + return status; +} + +apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) +{ + apr_status_t status; + + H2_MPLX_ENTER(m); + + if (m->aborted) { + status = APR_ECONNABORTED; + } + else { + status = out_open(m, stream_id, beam); + } + + H2_MPLX_LEAVE(m); + return status; +} + +static apr_status_t out_close(h2_mplx *m, h2_task *task) +{ + apr_status_t status = APR_SUCCESS; + h2_stream *stream; + + if (!task) { + return APR_ECONNABORTED; + } + if (task->c) { + ++task->c->keepalives; + } + + stream = h2_ihash_get(m->streams, task->stream_id); + if (!stream) { + return APR_ECONNABORTED; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, + "h2_mplx(%s): close", task->id); + status = h2_beam_close(task->output.beam); + h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close"); + output_consumed_signal(m, task); + check_data_for(m, stream, 0); + return status; +} + +apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, + apr_thread_cond_t *iowait) +{ + apr_status_t status; + + H2_MPLX_ENTER(m); + + if (m->aborted) { + status = APR_ECONNABORTED; + } + else if (h2_mplx_has_master_events(m)) { + status = APR_SUCCESS; + } + else { + purge_streams(m, 0); + h2_ihash_iter(m->streams, report_consumption_iter, m); + m->added_output = iowait; + status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout); + if (APLOGctrace2(m->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + "h2_mplx(%ld): trywait on data for %f ms)", + m->id, timeout/1000.0); + } + m->added_output = NULL; + } + + H2_MPLX_LEAVE(m); + return status; +} + +static void check_data_for(h2_mplx *m, h2_stream *stream, int lock) +{ + if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) { + apr_atomic_set32(&m->event_pending, 1); + H2_MPLX_ENTER_MAYBE(m, lock); + if (m->added_output) { + apr_thread_cond_signal(m->added_output); + } + H2_MPLX_LEAVE_MAYBE(m, lock); + } +} + +apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) +{ + apr_status_t status; + + H2_MPLX_ENTER(m); + + if (m->aborted) { + status = APR_ECONNABORTED; + } + else { + h2_iq_sort(m->q, cmp, ctx); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): reprioritize tasks", m->id); + status = APR_SUCCESS; + } + + H2_MPLX_LEAVE(m); + return status; +} + +static void register_if_needed(h2_mplx *m) +{ + if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) { + apr_status_t status = h2_workers_register(m->workers, m); + if (status == APR_SUCCESS) { + m->is_registered = 1; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021) + "h2_mplx(%ld): register at workers", m->id); + } + } +} + +apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, + h2_stream_pri_cmp *cmp, void *ctx) +{ + apr_status_t status; + + H2_MPLX_ENTER(m); + + if (m->aborted) { + status = APR_ECONNABORTED; + } + else { + status = APR_SUCCESS; + h2_ihash_add(m->streams, stream); + if (h2_stream_is_ready(stream)) { + /* already have a response */ + check_data_for(m, stream, 0); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + H2_STRM_MSG(stream, "process, add to readyq")); + } + else { + h2_iq_add(m->q, stream->id, cmp, ctx); + register_if_needed(m); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + H2_STRM_MSG(stream, "process, added to q")); + } + } + + H2_MPLX_LEAVE(m); + return status; +} + +static h2_task *next_stream_task(h2_mplx *m) +{ + h2_stream *stream; + int sid; + while (!m->aborted && (m->tasks_active < m->limit_active) + && (sid = h2_iq_shift(m->q)) > 0) { + + stream = h2_ihash_get(m->streams, sid); + if (stream) { + conn_rec *slave, **pslave; + + pslave = (conn_rec **)apr_array_pop(m->spare_slaves); + if (pslave) { + slave = *pslave; + slave->aborted = 0; + } + else { + slave = h2_slave_create(m->c, stream->id, m->pool); + } + + if (!stream->task) { + + if (sid > m->max_stream_started) { + m->max_stream_started = sid; + } + if (stream->input) { + h2_beam_on_consumed(stream->input, stream_input_ev, + stream_input_consumed, stream); + } + + stream->task = h2_task_create(slave, stream->id, + stream->request, m, stream->input, + stream->session->s->timeout, + m->stream_max_mem); + if (!stream->task) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave, + H2_STRM_LOG(APLOGNO(02941), stream, + "create task")); + return NULL; + } + + } + + ++m->tasks_active; + return stream->task; + } + } + return NULL; +} + +apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) +{ + apr_status_t rv = APR_EOF; + + *ptask = NULL; + ap_assert(m); + ap_assert(m->lock); + + if (APR_SUCCESS != (rv = apr_thread_mutex_lock(m->lock))) { + return rv; + } + + if (m->aborted) { + rv = APR_EOF; + } + else { + *ptask = next_stream_task(m); + rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS; + } + if (APR_EAGAIN != rv) { + m->is_registered = 0; /* h2_workers will discard this mplx */ + } + H2_MPLX_LEAVE(m); + return rv; +} + +static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) +{ + h2_stream *stream; + + if (task->frozen) { + /* this task was handed over to an engine for processing + * and the original worker has finished. That means the + * engine may start processing now. */ + h2_task_thaw(task); + apr_thread_cond_broadcast(m->task_thawed); + return; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): task(%s) done", m->id, task->id); + out_close(m, task); + + if (ngn) { + apr_off_t bytes = 0; + h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ); + bytes += h2_beam_get_buffered(task->output.beam); + if (bytes > 0) { + /* we need to report consumed and current buffered output + * to the engine. The request will be streamed out or cancelled, + * no more data is coming from it and the engine should update + * its calculations before we destroy this information. */ + h2_req_engine_out_consumed(ngn, task->c, bytes); + } + } + + if (task->engine) { + if (!m->aborted && !task->c->aborted + && !h2_req_engine_is_shutdown(task->engine)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022) + "h2_mplx(%ld): task(%s) has not-shutdown " + "engine(%s)", m->id, task->id, + h2_req_engine_get_id(task->engine)); + } + h2_ngn_shed_done_ngn(m->ngn_shed, task->engine); + } + + task->worker_done = 1; + task->done_at = apr_time_now(); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + "h2_mplx(%s): request done, %f ms elapsed", task->id, + (task->done_at - task->started_at) / 1000.0); + + if (task->started_at > m->last_idle_block) { + /* this task finished without causing an 'idle block', e.g. + * a block by flow control. + */ + if (task->done_at- m->last_limit_change >= m->limit_change_interval + && m->limit_active < m->max_active) { + /* Well behaving stream, allow it more workers */ + m->limit_active = H2MIN(m->limit_active * 2, + m->max_active); + m->last_limit_change = task->done_at; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): increase worker limit to %d", + m->id, m->limit_active); + } + } + + stream = h2_ihash_get(m->streams, task->stream_id); + if (stream) { + /* stream not done yet. */ + if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) { + /* reset and schedule again */ + h2_task_redo(task); + h2_ihash_remove(m->sredo, stream->id); + h2_iq_add(m->q, stream->id, NULL, NULL); + } + else { + /* stream not cleaned up, stay around */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + H2_STRM_MSG(stream, "task_done, stream open")); + if (stream->input) { + h2_beam_leave(stream->input); + } + + /* more data will not arrive, resume the stream */ + check_data_for(m, stream, 0); + } + } + else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) { + /* stream is done, was just waiting for this. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + H2_STRM_MSG(stream, "task_done, in hold")); + if (stream->input) { + h2_beam_leave(stream->input); + } + stream_joined(m, stream); + } + else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, + H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge")); + ap_assert("stream should not be in spurge" == NULL); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518) + "h2_mplx(%s): task_done, stream not found", + task->id); + ap_assert("stream should still be available" == NULL); + } +} + +void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) +{ + H2_MPLX_ENTER_ALWAYS(m); + + task_done(m, task, NULL); + --m->tasks_active; + + if (m->join_wait) { + apr_thread_cond_signal(m->join_wait); + } + if (ptask) { + /* caller wants another task */ + *ptask = next_stream_task(m); + } + register_if_needed(m); + + H2_MPLX_LEAVE(m); +} + +/******************************************************************************* + * h2_mplx DoS protection + ******************************************************************************/ + +static int latest_repeatable_unsubmitted_iter(void *data, void *val) +{ + stream_iter_ctx *ctx = data; + h2_stream *stream = val; + + if (stream->task && !stream->task->worker_done + && h2_task_can_redo(stream->task) + && !h2_ihash_get(ctx->m->sredo, stream->id)) { + if (!h2_stream_is_ready(stream)) { + /* this task occupies a worker, the response has not been submitted + * yet, not been cancelled and it is a repeatable request + * -> it can be re-scheduled later */ + if (!ctx->stream + || (ctx->stream->task->started_at < stream->task->started_at)) { + /* we did not have one or this one was started later */ + ctx->stream = stream; + } + } + } + return 1; +} + +static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m) +{ + stream_iter_ctx ctx; + ctx.m = m; + ctx.stream = NULL; + h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx); + return ctx.stream; +} + +static int timed_out_busy_iter(void *data, void *val) +{ + stream_iter_ctx *ctx = data; + h2_stream *stream = val; + if (stream->task && !stream->task->worker_done + && (ctx->now - stream->task->started_at) > stream->task->timeout) { + /* timed out stream occupying a worker, found */ + ctx->stream = stream; + return 0; + } + return 1; +} + +static h2_stream *get_timed_out_busy_stream(h2_mplx *m) +{ + stream_iter_ctx ctx; + ctx.m = m; + ctx.stream = NULL; + ctx.now = apr_time_now(); + h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx); + return ctx.stream; +} + +static apr_status_t unschedule_slow_tasks(h2_mplx *m) +{ + h2_stream *stream; + int n; + + /* Try to get rid of streams that occupy workers. Look for safe requests + * that are repeatable. If none found, fail the connection. + */ + n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo)); + while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) { + h2_task_rst(stream->task, H2_ERR_CANCEL); + h2_ihash_add(m->sredo, stream); + --n; + } + + if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) { + h2_stream *stream = get_timed_out_busy_stream(m); + if (stream) { + /* Too many busy workers, unable to cancel enough streams + * and with a busy, timed out stream, we tell the client + * to go away... */ + return APR_TIMEUP; + } + } + return APR_SUCCESS; +} + +apr_status_t h2_mplx_idle(h2_mplx *m) +{ + apr_status_t status = APR_SUCCESS; + apr_time_t now; + apr_size_t scount; + + H2_MPLX_ENTER(m); + + scount = h2_ihash_count(m->streams); + if (scount > 0) { + if (m->tasks_active) { + /* If we have streams in connection state 'IDLE', meaning + * all streams are ready to sent data out, but lack + * WINDOW_UPDATEs. + * + * This is ok, unless we have streams that still occupy + * h2 workers. As worker threads are a scarce resource, + * we need to take measures that we do not get DoSed. + * + * This is what we call an 'idle block'. Limit the amount + * of busy workers we allow for this connection until it + * well behaves. + */ + now = apr_time_now(); + m->last_idle_block = now; + if (m->limit_active > 2 + && now - m->last_limit_change >= m->limit_change_interval) { + if (m->limit_active > 16) { + m->limit_active = 16; + } + else if (m->limit_active > 8) { + m->limit_active = 8; + } + else if (m->limit_active > 4) { + m->limit_active = 4; + } + else if (m->limit_active > 2) { + m->limit_active = 2; + } + m->last_limit_change = now; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): decrease worker limit to %d", + m->id, m->limit_active); + } + + if (m->tasks_active > m->limit_active) { + status = unschedule_slow_tasks(m); + } + } + else if (!h2_iq_empty(m->q)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): idle, but %d streams to process", + m->id, (int)h2_iq_count(m->q)); + status = APR_EAGAIN; + } + else { + /* idle, have streams, but no tasks active. what are we waiting for? + * WINDOW_UPDATEs from client? */ + h2_stream *stream = NULL; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): idle, no tasks ongoing, %d streams", + m->id, (int)h2_ihash_count(m->streams)); + h2_ihash_shift(m->streams, (void**)&stream, 1); + if (stream) { + h2_ihash_add(m->streams, stream); + if (stream->output && !stream->out_checked) { + /* FIXME: this looks like a race between the session thinking + * it is idle and the EOF on a stream not being sent. + * Signal to caller to leave IDLE state. + */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + H2_STRM_MSG(stream, "output closed=%d, mplx idle" + ", out has %ld bytes buffered"), + h2_beam_is_closed(stream->output), + (long)h2_beam_get_buffered(stream->output)); + h2_ihash_add(m->streams, stream); + check_data_for(m, stream, 0); + stream->out_checked = 1; + status = APR_EAGAIN; + } + } + } + } + register_if_needed(m); + + H2_MPLX_LEAVE(m); + return status; +} + +/******************************************************************************* + * HTTP/2 request engines + ******************************************************************************/ + +typedef struct { + h2_mplx * m; + h2_req_engine *ngn; + int streams_updated; +} ngn_update_ctx; + +static int ngn_update_window(void *ctx, void *val) +{ + ngn_update_ctx *uctx = ctx; + h2_stream *stream = val; + if (stream->task && stream->task->assigned == uctx->ngn + && output_consumed_signal(uctx->m, stream->task)) { + ++uctx->streams_updated; + } + return 1; +} + +static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn) +{ + ngn_update_ctx ctx; + + ctx.m = m; + ctx.ngn = ngn; + ctx.streams_updated = 0; + h2_ihash_iter(m->streams, ngn_update_window, &ctx); + + return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN; +} + +apr_status_t h2_mplx_req_engine_push(const char *ngn_type, + request_rec *r, + http2_req_engine_init *einit) +{ + apr_status_t status; + h2_mplx *m; + h2_task *task; + h2_stream *stream; + + task = h2_ctx_rget_task(r); + if (!task) { + return APR_ECONNABORTED; + } + m = task->mplx; + + H2_MPLX_ENTER(m); + + stream = h2_ihash_get(m->streams, task->stream_id); + if (stream) { + status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit); + } + else { + status = APR_ECONNABORTED; + } + + H2_MPLX_LEAVE(m); + return status; +} + +apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn, + apr_read_type_e block, + int capacity, + request_rec **pr) +{ + h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn); + h2_mplx *m = h2_ngn_shed_get_ctx(shed); + apr_status_t status; + int want_shutdown; + + H2_MPLX_ENTER(m); + + want_shutdown = (block == APR_BLOCK_READ); + + /* Take this opportunity to update output consummation + * for this engine */ + ngn_out_update_windows(m, ngn); + + if (want_shutdown && !h2_iq_empty(m->q)) { + /* For a blocking read, check first if requests are to be + * had and, if not, wait a short while before doing the + * blocking, and if unsuccessful, terminating read. + */ + status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr); + if (APR_STATUS_IS_EAGAIN(status)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): start block engine pull", m->id); + apr_thread_cond_timedwait(m->task_thawed, m->lock, + apr_time_from_msec(20)); + status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr); + } + } + else { + status = h2_ngn_shed_pull_request(shed, ngn, capacity, + want_shutdown, pr); + } + + H2_MPLX_LEAVE(m); + return status; +} + +void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, + apr_status_t status) +{ + h2_task *task = h2_ctx_cget_task(r_conn); + + if (task) { + h2_mplx *m = task->mplx; + h2_stream *stream; + + H2_MPLX_ENTER_ALWAYS(m); + + stream = h2_ihash_get(m->streams, task->stream_id); + + ngn_out_update_windows(m, ngn); + h2_ngn_shed_done_task(m->ngn_shed, ngn, task); + + if (status != APR_SUCCESS && stream + && h2_task_can_redo(task) + && !h2_ihash_get(m->sredo, stream->id)) { + h2_ihash_add(m->sredo, stream); + } + + if (task->engine) { + /* cannot report that as done until engine returns */ + } + else { + task_done(m, task, ngn); + } + + H2_MPLX_LEAVE(m); + } +} + +/******************************************************************************* + * mplx master events dispatching + ******************************************************************************/ + +int h2_mplx_has_master_events(h2_mplx *m) +{ + return apr_atomic_read32(&m->event_pending) > 0; +} + +apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, + stream_ev_callback *on_resume, + void *on_ctx) +{ + h2_stream *stream; + int n, id; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + "h2_mplx(%ld): dispatch events", m->id); + apr_atomic_set32(&m->event_pending, 0); + + /* update input windows for streams */ + h2_ihash_iter(m->streams, report_consumption_iter, m); + purge_streams(m, 1); + + n = h2_ififo_count(m->readyq); + while (n > 0 + && (h2_ififo_try_pull(m->readyq, &id) == APR_SUCCESS)) { + --n; + stream = h2_ihash_get(m->streams, id); + if (stream) { + on_resume(on_ctx, stream); + } + } + + return APR_SUCCESS; +} + +apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream) +{ + check_data_for(m, stream, 1); + return APR_SUCCESS; +} + +int h2_mplx_awaits_data(h2_mplx *m) +{ + int waiting = 1; + + H2_MPLX_ENTER_ALWAYS(m); + + if (h2_ihash_empty(m->streams)) { + waiting = 0; + } + else if (!m->tasks_active && !h2_ififo_count(m->readyq) + && h2_iq_empty(m->q)) { + waiting = 0; + } + + H2_MPLX_LEAVE(m); + return waiting; +} diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h new file mode 100644 index 0000000..2890b98 --- /dev/null +++ b/modules/http2/h2_mplx.h @@ -0,0 +1,330 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_mplx__ +#define __mod_h2__h2_mplx__ + +/** + * The stream multiplexer. It pushes buckets from the connection + * thread to the stream threads and vice versa. It's thread-safe + * to use. + * + * There is one h2_mplx instance for each h2_session, which sits on top + * of a particular httpd conn_rec. Input goes from the connection to + * the stream tasks. Output goes from the stream tasks to the connection, + * e.g. the client. + * + * For each stream, there can be at most "H2StreamMaxMemSize" output bytes + * queued in the multiplexer. If a task thread tries to write more + * data, it is blocked until space becomes available. + * + * Writing input is never blocked. In order to use flow control on the input, + * the mplx can be polled for input data consumption. + */ + +struct apr_pool_t; +struct apr_thread_mutex_t; +struct apr_thread_cond_t; +struct h2_bucket_beam; +struct h2_config; +struct h2_ihash_t; +struct h2_task; +struct h2_stream; +struct h2_request; +struct apr_thread_cond_t; +struct h2_workers; +struct h2_iqueue; +struct h2_ngn_shed; +struct h2_req_engine; + +#include <apr_queue.h> + +typedef struct h2_mplx h2_mplx; + +struct h2_mplx { + long id; + conn_rec *c; + apr_pool_t *pool; + server_rec *s; /* server for master conn */ + + unsigned int event_pending; + unsigned int aborted; + unsigned int is_registered; /* is registered at h2_workers */ + + struct h2_ihash_t *streams; /* all streams currently processing */ + struct h2_ihash_t *sredo; /* all streams that need to be re-started */ + struct h2_ihash_t *shold; /* all streams done with task ongoing */ + struct h2_ihash_t *spurge; /* all streams done, ready for destroy */ + + struct h2_iqueue *q; /* all stream ids that need to be started */ + struct h2_ififo *readyq; /* all stream ids ready for output */ + + struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */ + + int max_streams; /* max # of concurrent streams */ + int max_stream_started; /* highest stream id that started processing */ + int tasks_active; /* # of tasks being processed from this mplx */ + int limit_active; /* current limit on active tasks, dynamic */ + int max_active; /* max, hard limit # of active tasks in a process */ + apr_time_t last_idle_block; /* last time, this mplx entered IDLE while + * streams were ready */ + apr_time_t last_limit_change; /* last time, worker limit changed */ + apr_interval_time_t limit_change_interval; + + apr_thread_mutex_t *lock; + struct apr_thread_cond_t *added_output; + struct apr_thread_cond_t *task_thawed; + struct apr_thread_cond_t *join_wait; + + apr_size_t stream_max_mem; + + apr_pool_t *spare_io_pool; + apr_array_header_t *spare_slaves; /* spare slave connections */ + + struct h2_workers *workers; + + struct h2_ngn_shed *ngn_shed; +}; + + + +/******************************************************************************* + * Object lifecycle and information. + ******************************************************************************/ + +apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s); + +/** + * Create the multiplexer for the given HTTP2 session. + * Implicitly has reference count 1. + */ +h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master, + const struct h2_config *conf, + struct h2_workers *workers); + +/** + * Decreases the reference counter of this mplx and waits for it + * to reached 0, destroy the mplx afterwards. + * This is to be called from the thread that created the mplx in + * the first place. + * @param m the mplx to be released and destroyed + * @param wait condition var to wait on for ref counter == 0 + */ +void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait); + +apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask); + +void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask); + +/** + * Shut down the multiplexer gracefully. Will no longer schedule new streams + * but let the ongoing ones finish normally. + * @return the highest stream id being/been processed + */ +int h2_mplx_shutdown(h2_mplx *m); + +int h2_mplx_is_busy(h2_mplx *m); + +/******************************************************************************* + * IO lifetime of streams. + ******************************************************************************/ + +struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id); + +/** + * Notifies mplx that a stream has been completely handled on the main + * connection and is ready for cleanup. + * + * @param m the mplx itself + * @param stream the stream ready for cleanup + */ +apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream); + +/** + * Waits on output data from any stream in this session to become available. + * Returns APR_TIMEUP if no data arrived in the given time. + */ +apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, + struct apr_thread_cond_t *iowait); + +apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream); + +/******************************************************************************* + * Stream processing. + ******************************************************************************/ + +/** + * Process a stream request. + * + * @param m the multiplexer + * @param stream the identifier of the stream + * @param r the request to be processed + * @param cmp the stream priority compare function + * @param ctx context data for the compare function + */ +apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, + h2_stream_pri_cmp *cmp, void *ctx); + +/** + * Stream priorities have changed, reschedule pending requests. + * + * @param m the multiplexer + * @param cmp the stream priority compare function + * @param ctx context data for the compare function + */ +apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx); + +typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream); + +/** + * Check if the multiplexer has events for the master connection pending. + * @return != 0 iff there are events pending + */ +int h2_mplx_has_master_events(h2_mplx *m); + +/** + * Dispatch events for the master connection, such as + ± @param m the multiplexer + * @param on_resume new output data has arrived for a suspended stream + * @param ctx user supplied argument to invocation. + */ +apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, + stream_ev_callback *on_resume, + void *ctx); + +int h2_mplx_awaits_data(h2_mplx *m); + +typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx); + +apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx); + +/******************************************************************************* + * Output handling of streams. + ******************************************************************************/ + +/** + * Opens the output for the given stream with the specified response. + */ +apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id, + struct h2_bucket_beam *beam); + +/******************************************************************************* + * h2_mplx list Manipulation. + ******************************************************************************/ + +/** + * The magic pointer value that indicates the head of a h2_mplx list + * @param b The mplx list + * @return The magic pointer value + */ +#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link) + +/** + * Determine if the mplx list is empty + * @param b The list to check + * @return true or false + */ +#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link) + +/** + * Return the first mplx in a list + * @param b The list to query + * @return The first mplx in the list + */ +#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b) + +/** + * Return the last mplx in a list + * @param b The list to query + * @return The last mplx int he list + */ +#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b) + +/** + * Insert a single mplx at the front of a list + * @param b The list to add to + * @param e The mplx to insert + */ +#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \ +h2_mplx *ap__b = (e); \ +APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \ +} while (0) + +/** + * Insert a single mplx at the end of a list + * @param b The list to add to + * @param e The mplx to insert + */ +#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \ +h2_mplx *ap__b = (e); \ +APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \ +} while (0) + +/** + * Get the next mplx in the list + * @param e The current mplx + * @return The next mplx + */ +#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link) +/** + * Get the previous mplx in the list + * @param e The current mplx + * @return The previous mplx + */ +#define H2_MPLX_PREV(e) APR_RING_PREV((e), link) + +/** + * Remove a mplx from its list + * @param e The mplx to remove + */ +#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link) + +/******************************************************************************* + * h2_mplx DoS protection + ******************************************************************************/ + +/** + * Master connection has entered idle mode. + * @param m the mplx instance of the master connection + * @return != SUCCESS iff connection should be terminated + */ +apr_status_t h2_mplx_idle(h2_mplx *m); + +/******************************************************************************* + * h2_req_engine handling + ******************************************************************************/ + +typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); +typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_size_t req_buffer_size, + request_rec *r, + h2_output_consumed **pconsumed, + void **pbaton); + +apr_status_t h2_mplx_req_engine_push(const char *ngn_type, + request_rec *r, + h2_mplx_req_engine_init *einit); +apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn, + apr_read_type_e block, + int capacity, + request_rec **pr); +void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn, + apr_status_t status); + +#endif /* defined(__mod_h2__h2_mplx__) */ diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c new file mode 100644 index 0000000..fb85776 --- /dev/null +++ b/modules/http2/h2_ngn_shed.c @@ -0,0 +1,392 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stddef.h> +#include <stdlib.h> + +#include <apr_thread_mutex.h> +#include <apr_thread_cond.h> +#include <apr_strings.h> +#include <apr_time.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> + +#include "mod_http2.h" + +#include "h2_private.h" +#include "h2.h" +#include "h2_config.h" +#include "h2_conn.h" +#include "h2_ctx.h" +#include "h2_h2.h" +#include "h2_mplx.h" +#include "h2_request.h" +#include "h2_task.h" +#include "h2_util.h" +#include "h2_ngn_shed.h" + + +typedef struct h2_ngn_entry h2_ngn_entry; +struct h2_ngn_entry { + APR_RING_ENTRY(h2_ngn_entry) link; + h2_task *task; + request_rec *r; +}; + +#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link) +#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link) +#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link) + +#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link) +#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link) +#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b) +#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b) + +#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \ +h2_ngn_entry *ap__b = (e); \ +APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \ +} while (0) + +#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \ +h2_ngn_entry *ap__b = (e); \ +APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \ +} while (0) + +struct h2_req_engine { + const char *id; /* identifier */ + const char *type; /* name of the engine type */ + apr_pool_t *pool; /* pool for engine specific allocations */ + conn_rec *c; /* connection this engine is assigned to */ + h2_task *task; /* the task this engine is based on, running in */ + h2_ngn_shed *shed; + + unsigned int shutdown : 1; /* engine is being shut down */ + unsigned int done : 1; /* engine has finished */ + + APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries; + int capacity; /* maximum concurrent requests */ + int no_assigned; /* # of assigned requests */ + int no_live; /* # of live */ + int no_finished; /* # of finished */ + + h2_output_consumed *out_consumed; + void *out_consumed_ctx; +}; + +const char *h2_req_engine_get_id(h2_req_engine *engine) +{ + return engine->id; +} + +int h2_req_engine_is_shutdown(h2_req_engine *engine) +{ + return engine->shutdown; +} + +void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, + apr_off_t bytes) +{ + if (engine->out_consumed) { + engine->out_consumed(engine->out_consumed_ctx, c, bytes); + } +} + +h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, + int default_capacity, + apr_size_t req_buffer_size) +{ + h2_ngn_shed *shed; + + shed = apr_pcalloc(pool, sizeof(*shed)); + shed->c = c; + shed->pool = pool; + shed->default_capacity = default_capacity; + shed->req_buffer_size = req_buffer_size; + shed->ngns = apr_hash_make(pool); + + return shed; +} + +void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx) +{ + shed->user_ctx = user_ctx; +} + +void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed) +{ + return shed->user_ctx; +} + +h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn) +{ + return ngn->shed; +} + +void h2_ngn_shed_abort(h2_ngn_shed *shed) +{ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394) + "h2_ngn_shed(%ld): abort", shed->c->id); + shed->aborted = 1; +} + +static void ngn_add_task(h2_req_engine *ngn, h2_task *task, request_rec *r) +{ + h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry)); + APR_RING_ELEM_INIT(entry, link); + entry->task = task; + entry->r = r; + H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry); + ngn->no_assigned++; +} + + +apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, + request_rec *r, + http2_req_engine_init *einit) +{ + h2_req_engine *ngn; + h2_task *task = h2_ctx_rget_task(r); + + ap_assert(task); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id, + task->id); + if (task->request->serialize) { + /* Max compatibility, deny processing of this */ + return APR_EOF; + } + + if (task->assigned) { + --task->assigned->no_assigned; + --task->assigned->no_live; + task->assigned = NULL; + } + + if (task->engine) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_ngn_shed(%ld): push task(%s) hosting engine %s " + "already with %d tasks", + shed->c->id, task->id, task->engine->id, + task->engine->no_assigned); + task->assigned = task->engine; + ngn_add_task(task->engine, task, r); + return APR_SUCCESS; + } + + ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING); + if (ngn && !ngn->shutdown) { + /* this task will be processed in another thread, + * freeze any I/O for the time being. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_ngn_shed(%ld): pushing request %s to %s", + shed->c->id, task->id, ngn->id); + if (!h2_task_has_thawed(task)) { + h2_task_freeze(task); + } + ngn_add_task(ngn, task, r); + return APR_SUCCESS; + } + + /* no existing engine or being shut down, start a new one */ + if (einit) { + apr_status_t status; + apr_pool_t *pool = task->pool; + h2_req_engine *newngn; + + newngn = apr_pcalloc(pool, sizeof(*ngn)); + newngn->pool = pool; + newngn->id = apr_psprintf(pool, "ngn-%s", task->id); + newngn->type = apr_pstrdup(pool, ngn_type); + newngn->c = task->c; + newngn->shed = shed; + newngn->capacity = shed->default_capacity; + newngn->no_assigned = 1; + newngn->no_live = 1; + APR_RING_INIT(&newngn->entries, h2_ngn_entry, link); + + status = einit(newngn, newngn->id, newngn->type, newngn->pool, + shed->req_buffer_size, r, + &newngn->out_consumed, &newngn->out_consumed_ctx); + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395) + "h2_ngn_shed(%ld): create engine %s (%s)", + shed->c->id, newngn->id, newngn->type); + if (status == APR_SUCCESS) { + newngn->task = task; + task->engine = newngn; + task->assigned = newngn; + apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn); + } + return status; + } + return APR_EOF; +} + +static h2_ngn_entry *pop_detached(h2_req_engine *ngn) +{ + h2_ngn_entry *entry; + for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); + entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); + entry = H2_NGN_ENTRY_NEXT(entry)) { + if (h2_task_has_thawed(entry->task) + || (entry->task->engine == ngn)) { + /* The task hosting this engine can always be pulled by it. + * For other task, they need to become detached, e.g. no longer + * assigned to another worker. */ + H2_NGN_ENTRY_REMOVE(entry); + return entry; + } + } + return NULL; +} + +apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, + h2_req_engine *ngn, + int capacity, + int want_shutdown, + request_rec **pr) +{ + h2_ngn_entry *entry; + + ap_assert(ngn); + *pr = NULL; + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396) + "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d", + shed->c->id, ngn->id, want_shutdown); + if (shed->aborted) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397) + "h2_ngn_shed(%ld): abort while pulling requests %s", + shed->c->id, ngn->id); + ngn->shutdown = 1; + return APR_ECONNABORTED; + } + + ngn->capacity = capacity; + if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { + if (want_shutdown) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s", + shed->c->id, ngn->id); + ngn->shutdown = 1; + } + return ngn->shutdown? APR_EOF : APR_EAGAIN; + } + + if ((entry = pop_detached(ngn))) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398) + "h2_ngn_shed(%ld): pulled request %s for engine %s", + shed->c->id, entry->task->id, ngn->id); + ngn->no_live++; + *pr = entry->r; + entry->task->assigned = ngn; + /* task will now run in ngn's own thread. Modules like lua + * seem to require the correct thread set in the conn_rec. + * See PR 59542. */ + if (entry->task->c && ngn->c) { + entry->task->c->current_thread = ngn->c->current_thread; + } + if (entry->task->engine == ngn) { + /* If an engine pushes its own base task, and then pulls + * it back to itself again, it needs to be thawed. + */ + h2_task_thaw(entry->task); + } + return APR_SUCCESS; + } + + if (1) { + h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399) + "h2_ngn_shed(%ld): pull task, nothing, first task %s", + shed->c->id, entry->task->id); + } + return APR_EAGAIN; +} + +static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn, + h2_task *task, int waslive, int aborted) +{ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400) + "h2_ngn_shed(%ld): task %s %s by %s", + shed->c->id, task->id, aborted? "aborted":"done", ngn->id); + ngn->no_finished++; + if (waslive) ngn->no_live--; + ngn->no_assigned--; + task->assigned = NULL; + + return APR_SUCCESS; +} + +apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, + struct h2_req_engine *ngn, h2_task *task) +{ + return ngn_done_task(shed, ngn, task, 1, 0); +} + +void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) +{ + if (ngn->done) { + return; + } + + if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { + h2_ngn_entry *entry; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s (%s), " + "has still requests queued, shutdown=%d," + "assigned=%ld, live=%ld, finished=%ld", + shed->c->id, ngn->id, ngn->type, + ngn->shutdown, + (long)ngn->no_assigned, (long)ngn->no_live, + (long)ngn->no_finished); + for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); + entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); + entry = H2_NGN_ENTRY_NEXT(entry)) { + h2_task *task = entry->task; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): engine %s has queued task %s, " + "frozen=%d, aborting", + shed->c->id, ngn->id, task->id, task->frozen); + ngn_done_task(shed, ngn, task, 0, 1); + task->engine = task->assigned = NULL; + } + } + if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s (%s), " + "assigned=%ld, live=%ld, finished=%ld", + shed->c->id, ngn->id, ngn->type, + (long)ngn->no_assigned, (long)ngn->no_live, + (long)ngn->no_finished); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s", + shed->c->id, ngn->id); + } + + apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL); + ngn->done = 1; +} + +void h2_ngn_shed_destroy(h2_ngn_shed *shed) +{ + ap_assert(apr_hash_count(shed->ngns) == 0); +} + diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h new file mode 100644 index 0000000..7764c18 --- /dev/null +++ b/modules/http2/h2_ngn_shed.h @@ -0,0 +1,79 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef h2_req_shed_h +#define h2_req_shed_h + +struct h2_req_engine; +struct h2_task; + +typedef struct h2_ngn_shed h2_ngn_shed; +struct h2_ngn_shed { + conn_rec *c; + apr_pool_t *pool; + apr_hash_t *ngns; + void *user_ctx; + + unsigned int aborted : 1; + + int default_capacity; + apr_size_t req_buffer_size; /* preferred buffer size for responses */ +}; + +const char *h2_req_engine_get_id(h2_req_engine *engine); +int h2_req_engine_is_shutdown(h2_req_engine *engine); + +void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, + apr_off_t bytes); + +typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_size_t req_buffer_size, + request_rec *r, + h2_output_consumed **pconsumed, + void **pbaton); + +h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, + int default_capactiy, + apr_size_t req_buffer_size); + +void h2_ngn_shed_destroy(h2_ngn_shed *shed); + +void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx); +void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed); + +h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn); + +void h2_ngn_shed_abort(h2_ngn_shed *shed); + +apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, + request_rec *r, + h2_shed_ngn_init *init_cb); + +apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, h2_req_engine *pub_ngn, + int capacity, + int want_shutdown, request_rec **pr); + +apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, + struct h2_req_engine *ngn, + struct h2_task *task); + +void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn); + + +#endif /* h2_req_shed_h */ diff --git a/modules/http2/h2_private.h b/modules/http2/h2_private.h new file mode 100644 index 0000000..516be13 --- /dev/null +++ b/modules/http2/h2_private.h @@ -0,0 +1,28 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_h2_h2_private_h +#define mod_h2_h2_private_h + +#include <apr_time.h> + +#include <nghttp2/nghttp2.h> + +extern module AP_MODULE_DECLARE_DATA http2_module; + +APLOG_USE_MODULE(http2); + +#endif diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c new file mode 100644 index 0000000..8389c7c --- /dev/null +++ b/modules/http2/h2_proxy_session.c @@ -0,0 +1,1584 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <stddef.h> +#include <apr_strings.h> +#include <nghttp2/nghttp2.h> + +#include <mpm_common.h> +#include <httpd.h> +#include <mod_proxy.h> + +#include "mod_http2.h" +#include "h2.h" +#include "h2_proxy_util.h" +#include "h2_proxy_session.h" + +APLOG_USE_MODULE(proxy_http2); + +typedef struct h2_proxy_stream { + int id; + apr_pool_t *pool; + h2_proxy_session *session; + + const char *url; + request_rec *r; + h2_proxy_request *req; + const char *real_server_uri; + const char *p_server_uri; + int standalone; + + h2_proxy_stream_state_t state; + unsigned int suspended : 1; + unsigned int waiting_on_100 : 1; + unsigned int waiting_on_ping : 1; + uint32_t error_code; + + apr_bucket_brigade *input; + apr_off_t data_sent; + apr_bucket_brigade *output; + apr_off_t data_received; + + apr_table_t *saves; +} h2_proxy_stream; + + +static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev, + int arg, const char *msg); +static void ping_arrived(h2_proxy_session *session); +static apr_status_t check_suspended(h2_proxy_session *session); +static void stream_resume(h2_proxy_stream *stream); + + +static apr_status_t proxy_session_pre_close(void *theconn) +{ + proxy_conn_rec *p_conn = (proxy_conn_rec *)theconn; + h2_proxy_session *session = p_conn->data; + + if (session && session->ngh2) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + "proxy_session(%s): pool cleanup, state=%d, streams=%d", + session->id, session->state, + (int)h2_proxy_ihash_count(session->streams)); + session->aborted = 1; + dispatch_event(session, H2_PROXYS_EV_PRE_CLOSE, 0, NULL); + nghttp2_session_del(session->ngh2); + session->ngh2 = NULL; + p_conn->data = NULL; + } + return APR_SUCCESS; +} + +static int proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, + proxy_conn_rec *p_conn, + conn_rec *origin, apr_bucket_brigade *bb, + int flush) +{ + apr_status_t status; + apr_off_t transferred; + + if (flush) { + apr_bucket *e = apr_bucket_flush_create(bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, e); + } + apr_brigade_length(bb, 0, &transferred); + if (transferred != -1) + p_conn->worker->s->transferred += transferred; + status = ap_pass_brigade(origin->output_filters, bb); + /* Cleanup the brigade now to avoid buckets lifetime + * issues in case of error returned below. */ + apr_brigade_cleanup(bb); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, origin, APLOGNO(03357) + "pass output failed to %pI (%s)", + p_conn->addr, p_conn->hostname); + } + return status; +} + +static ssize_t raw_send(nghttp2_session *ngh2, const uint8_t *data, + size_t length, int flags, void *user_data) +{ + h2_proxy_session *session = user_data; + apr_bucket *b; + apr_status_t status; + int flush = 1; + + if (data) { + b = apr_bucket_transient_create((const char*)data, length, + session->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(session->output, b); + } + + status = proxy_pass_brigade(session->c->bucket_alloc, + session->p_conn, session->c, + session->output, flush); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + "h2_proxy_sesssion(%s): raw_send %d bytes, flush=%d", + session->id, (int)length, flush); + if (status != APR_SUCCESS) { + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + return length; +} + +static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame, + void *user_data) +{ + h2_proxy_session *session = user_data; + h2_proxy_stream *stream; + request_rec *r; + int n; + + if (APLOGcdebug(session->c)) { + char buffer[256]; + + h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03341) + "h2_proxy_session(%s): recv FRAME[%s]", + session->id, buffer); + } + + session->last_frame_received = apr_time_now(); + switch (frame->hd.type) { + case NGHTTP2_HEADERS: + stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id); + if (!stream) { + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + r = stream->r; + if (r->status >= 100 && r->status < 200) { + /* By default, we will forward all interim responses when + * we are sitting on a HTTP/2 connection to the client */ + int forward = session->h2_front; + switch(r->status) { + case 100: + if (stream->waiting_on_100) { + stream->waiting_on_100 = 0; + r->status_line = ap_get_status_line(r->status); + forward = 1; + } + break; + case 103: + /* workaround until we get this into http protocol base + * parts. without this, unknown codes are converted to + * 500... */ + r->status_line = "103 Early Hints"; + break; + default: + r->status_line = ap_get_status_line(r->status); + break; + } + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03487) + "h2_proxy_session(%s): got interim HEADERS, " + "status=%d, will forward=%d", + session->id, r->status, forward); + if (forward) { + ap_send_interim_response(r, 1); + } + } + stream_resume(stream); + break; + case NGHTTP2_PING: + if (session->check_ping) { + session->check_ping = 0; + ping_arrived(session); + } + break; + case NGHTTP2_PUSH_PROMISE: + break; + case NGHTTP2_SETTINGS: + if (frame->settings.niv > 0) { + n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); + if (n > 0) { + session->remote_max_concurrent = n; + } + } + break; + case NGHTTP2_GOAWAY: + /* we expect the remote server to tell us the highest stream id + * that it has started processing. */ + session->last_stream_id = frame->goaway.last_stream_id; + dispatch_event(session, H2_PROXYS_EV_REMOTE_GOAWAY, 0, NULL); + break; + default: + break; + } + return 0; +} + +static int before_frame_send(nghttp2_session *ngh2, + const nghttp2_frame *frame, void *user_data) +{ + h2_proxy_session *session = user_data; + if (APLOGcdebug(session->c)) { + char buffer[256]; + + h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03343) + "h2_proxy_session(%s): sent FRAME[%s]", + session->id, buffer); + } + return 0; +} + +static int add_header(void *table, const char *n, const char *v) +{ + apr_table_add(table, n, v); + return 1; +} + +static void process_proxy_header(h2_proxy_stream *stream, const char *n, const char *v) +{ + static const struct { + const char *name; + ap_proxy_header_reverse_map_fn func; + } transform_hdrs[] = { + { "Location", ap_proxy_location_reverse_map }, + { "Content-Location", ap_proxy_location_reverse_map }, + { "URI", ap_proxy_location_reverse_map }, + { "Destination", ap_proxy_location_reverse_map }, + { "Set-Cookie", ap_proxy_cookie_reverse_map }, + { NULL, NULL } + }; + request_rec *r = stream->r; + proxy_dir_conf *dconf; + int i; + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + if (!dconf->preserve_host) { + for (i = 0; transform_hdrs[i].name; ++i) { + if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) { + apr_table_add(r->headers_out, n, + (*transform_hdrs[i].func)(r, dconf, v)); + return; + } + } + if (!ap_cstr_casecmp("Link", n)) { + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + apr_table_add(r->headers_out, n, + h2_proxy_link_reverse_map(r, dconf, + stream->real_server_uri, stream->p_server_uri, v)); + return; + } + } + apr_table_add(r->headers_out, n, v); +} + +static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream, + const char *n, apr_size_t nlen, + const char *v, apr_size_t vlen) +{ + if (n[0] == ':') { + if (!stream->data_received && !strncmp(":status", n, nlen)) { + char *s = apr_pstrndup(stream->r->pool, v, vlen); + + apr_table_setn(stream->r->notes, "proxy-status", s); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + "h2_proxy_stream(%s-%d): got status %s", + stream->session->id, stream->id, s); + stream->r->status = (int)apr_atoi64(s); + if (stream->r->status <= 0) { + stream->r->status = 500; + return APR_EGENERAL; + } + } + return APR_SUCCESS; + } + + if (!h2_proxy_res_ignore_header(n, nlen)) { + char *hname, *hvalue; + + hname = apr_pstrndup(stream->pool, n, nlen); + h2_proxy_util_camel_case_header(hname, nlen); + hvalue = apr_pstrndup(stream->pool, v, vlen); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + "h2_proxy_stream(%s-%d): got header %s: %s", + stream->session->id, stream->id, hname, hvalue); + process_proxy_header(stream, hname, hvalue); + } + return APR_SUCCESS; +} + +static int log_header(void *ctx, const char *key, const char *value) +{ + h2_proxy_stream *stream = ctx; + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, + "h2_proxy_stream(%s-%d), header_out %s: %s", + stream->session->id, stream->id, key, value); + return 1; +} + +static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream) +{ + h2_proxy_session *session = stream->session; + request_rec *r = stream->r; + apr_pool_t *p = r->pool; + + /* Now, add in the cookies from the response to the ones already saved */ + apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL); + + /* and now load 'em all in */ + if (!apr_is_empty_table(stream->saves)) { + apr_table_unset(r->headers_out, "Set-Cookie"); + r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves); + } + + /* handle Via header in response */ + if (session->conf->viaopt != via_off + && session->conf->viaopt != via_block) { + const char *server_name = ap_get_server_name(stream->r); + apr_port_t port = ap_get_server_port(stream->r); + char portstr[32]; + + /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, + * then the server name returned by ap_get_server_name() is the + * origin server name (which doesn't make sense with Via: headers) + * so we use the proxy vhost's name instead. + */ + if (server_name == stream->r->hostname) { + server_name = stream->r->server->server_hostname; + } + if (ap_is_default_port(port, stream->r)) { + portstr[0] = '\0'; + } + else { + apr_snprintf(portstr, sizeof(portstr), ":%d", port); + } + + /* create a "Via:" response header entry and merge it */ + apr_table_add(r->headers_out, "Via", + (session->conf->viaopt == via_full) + ? apr_psprintf(p, "%d.%d %s%s (%s)", + HTTP_VERSION_MAJOR(r->proto_num), + HTTP_VERSION_MINOR(r->proto_num), + server_name, portstr, + AP_SERVER_BASEVERSION) + : apr_psprintf(p, "%d.%d %s%s", + HTTP_VERSION_MAJOR(r->proto_num), + HTTP_VERSION_MINOR(r->proto_num), + server_name, portstr) + ); + } + + if (APLOGrtrace2(stream->r)) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, + "h2_proxy_stream(%s-%d), header_out after merging", + stream->session->id, stream->id); + apr_table_do(log_header, stream, stream->r->headers_out, NULL); + } +} + +static int stream_response_data(nghttp2_session *ngh2, uint8_t flags, + int32_t stream_id, const uint8_t *data, + size_t len, void *user_data) +{ + h2_proxy_session *session = user_data; + h2_proxy_stream *stream; + apr_bucket *b; + apr_status_t status; + + stream = nghttp2_session_get_stream_user_data(ngh2, stream_id); + if (!stream) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03358) + "h2_proxy_session(%s): recv data chunk for " + "unknown stream %d, ignored", + session->id, stream_id); + return 0; + } + + if (!stream->data_received) { + /* last chance to manipulate response headers. + * after this, only trailers */ + h2_proxy_stream_end_headers_out(stream); + } + stream->data_received += len; + + b = apr_bucket_transient_create((const char*)data, len, + stream->r->connection->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + /* always flush after a DATA frame, as we have no other indication + * of buffer use */ + b = apr_bucket_flush_create(stream->r->connection->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + + status = ap_pass_brigade(stream->r->output_filters, stream->output); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359) + "h2_proxy_session(%s): stream=%d, response DATA %ld, %ld" + " total", session->id, stream_id, (long)len, + (long)stream->data_received); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344) + "h2_proxy_session(%s): passing output on stream %d", + session->id, stream->id); + nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, + stream_id, NGHTTP2_STREAM_CLOSED); + return NGHTTP2_ERR_STREAM_CLOSING; + } + if (stream->standalone) { + nghttp2_session_consume(ngh2, stream_id, len); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, + "h2_proxy_session(%s): stream %d, win_update %d bytes", + session->id, stream_id, (int)len); + } + return 0; +} + +static int on_stream_close(nghttp2_session *ngh2, int32_t stream_id, + uint32_t error_code, void *user_data) +{ + h2_proxy_session *session = user_data; + h2_proxy_stream *stream; + if (!session->aborted) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03360) + "h2_proxy_session(%s): stream=%d, closed, err=%d", + session->id, stream_id, error_code); + stream = h2_proxy_ihash_get(session->streams, stream_id); + if (stream) { + stream->error_code = error_code; + } + dispatch_event(session, H2_PROXYS_EV_STREAM_DONE, stream_id, NULL); + } + return 0; +} + +static int on_header(nghttp2_session *ngh2, const nghttp2_frame *frame, + const uint8_t *namearg, size_t nlen, + const uint8_t *valuearg, size_t vlen, uint8_t flags, + void *user_data) +{ + h2_proxy_session *session = user_data; + h2_proxy_stream *stream; + const char *n = (const char*)namearg; + const char *v = (const char*)valuearg; + + (void)session; + if (frame->hd.type == NGHTTP2_HEADERS && nlen) { + stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id); + if (stream) { + if (h2_proxy_stream_add_header_out(stream, n, nlen, v, vlen)) { + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + } + } + else if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { + } + + return 0; +} + +static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id, + uint8_t *buf, size_t length, + uint32_t *data_flags, + nghttp2_data_source *source, void *user_data) +{ + h2_proxy_stream *stream; + apr_status_t status = APR_SUCCESS; + + *data_flags = 0; + stream = nghttp2_session_get_stream_user_data(ngh2, stream_id); + if (!stream) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361) + "h2_proxy_stream(%s): data_read, stream %d not found", + stream->session->id, stream_id); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + if (stream->session->check_ping) { + /* suspend until we hear from the other side */ + stream->waiting_on_ping = 1; + status = APR_EAGAIN; + } + else if (stream->r->expecting_100) { + /* suspend until the answer comes */ + stream->waiting_on_100 = 1; + status = APR_EAGAIN; + } + else if (APR_BRIGADE_EMPTY(stream->input)) { + status = ap_get_brigade(stream->r->input_filters, stream->input, + AP_MODE_READBYTES, APR_NONBLOCK_READ, + H2MAX(APR_BUCKET_BUFF_SIZE, length)); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r, + "h2_proxy_stream(%s-%d): request body read", + stream->session->id, stream->id); + } + + if (status == APR_SUCCESS) { + ssize_t readlen = 0; + while (status == APR_SUCCESS + && (readlen < length) + && !APR_BRIGADE_EMPTY(stream->input)) { + apr_bucket* b = APR_BRIGADE_FIRST(stream->input); + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + *data_flags |= NGHTTP2_DATA_FLAG_EOF; + } + else { + /* we do nothing more regarding any meta here */ + } + } + else { + const char *bdata = NULL; + apr_size_t blen = 0; + status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ); + + if (status == APR_SUCCESS && blen > 0) { + ssize_t copylen = H2MIN(length - readlen, blen); + memcpy(buf, bdata, copylen); + buf += copylen; + readlen += copylen; + if (copylen < blen) { + /* We have data left in the bucket. Split it. */ + status = apr_bucket_split(b, copylen); + } + } + } + apr_bucket_delete(b); + } + + stream->data_sent += readlen; + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468) + "h2_proxy_stream(%d): request DATA %ld, %ld" + " total, flags=%d", + stream->id, (long)readlen, (long)stream->data_sent, + (int)*data_flags); + return readlen; + } + else if (APR_STATUS_IS_EAGAIN(status)) { + /* suspended stream, needs to be re-awakened */ + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r, + "h2_proxy_stream(%s-%d): suspending", + stream->session->id, stream_id); + stream->suspended = 1; + h2_proxy_iq_add(stream->session->suspended, stream->id, NULL, NULL); + return NGHTTP2_ERR_DEFERRED; + } + else { + nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, + stream_id, NGHTTP2_STREAM_CLOSED); + return NGHTTP2_ERR_STREAM_CLOSING; + } +} + +#ifdef H2_NG2_INVALID_HEADER_CB +static int on_invalid_header_cb(nghttp2_session *ngh2, + const nghttp2_frame *frame, + const uint8_t *name, size_t namelen, + const uint8_t *value, size_t valuelen, + uint8_t flags, void *user_data) +{ + h2_proxy_session *session = user_data; + if (APLOGcdebug(session->c)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03469) + "h2_proxy_session(%s-%d): denying stream with invalid header " + "'%s: %s'", session->id, (int)frame->hd.stream_id, + apr_pstrndup(session->pool, (const char *)name, namelen), + apr_pstrndup(session->pool, (const char *)value, valuelen)); + } + return nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, + frame->hd.stream_id, + NGHTTP2_PROTOCOL_ERROR); +} +#endif + +h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, + proxy_server_conf *conf, + int h2_front, + unsigned char window_bits_connection, + unsigned char window_bits_stream, + h2_proxy_request_done *done) +{ + if (!p_conn->data) { + apr_pool_t *pool = p_conn->scpool; + h2_proxy_session *session; + nghttp2_session_callbacks *cbs; + nghttp2_option *option; + + session = apr_pcalloc(pool, sizeof(*session)); + apr_pool_pre_cleanup_register(pool, p_conn, proxy_session_pre_close); + p_conn->data = session; + + session->id = apr_pstrdup(p_conn->scpool, id); + session->c = p_conn->connection; + session->p_conn = p_conn; + session->conf = conf; + session->pool = p_conn->scpool; + session->state = H2_PROXYS_ST_INIT; + session->h2_front = h2_front; + session->window_bits_stream = window_bits_stream; + session->window_bits_connection = window_bits_connection; + session->streams = h2_proxy_ihash_create(pool, offsetof(h2_proxy_stream, id)); + session->suspended = h2_proxy_iq_create(pool, 5); + session->done = done; + + session->input = apr_brigade_create(session->pool, session->c->bucket_alloc); + session->output = apr_brigade_create(session->pool, session->c->bucket_alloc); + + nghttp2_session_callbacks_new(&cbs); + nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv); + nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, stream_response_data); + nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close); + nghttp2_session_callbacks_set_on_header_callback(cbs, on_header); + nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send); + nghttp2_session_callbacks_set_send_callback(cbs, raw_send); +#ifdef H2_NG2_INVALID_HEADER_CB + nghttp2_session_callbacks_set_on_invalid_header_callback(cbs, on_invalid_header_cb); +#endif + + nghttp2_option_new(&option); + nghttp2_option_set_peer_max_concurrent_streams(option, 100); + nghttp2_option_set_no_auto_window_update(option, 1); + + nghttp2_session_client_new2(&session->ngh2, cbs, session, option); + + nghttp2_option_del(option); + nghttp2_session_callbacks_del(cbs); + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362) + "setup session for %s", p_conn->hostname); + } + else { + h2_proxy_session *session = p_conn->data; + apr_interval_time_t age = apr_time_now() - session->last_frame_received; + if (age > apr_time_from_sec(1)) { + session->check_ping = 1; + nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup"); + } + } + return p_conn->data; +} + +static apr_status_t session_start(h2_proxy_session *session) +{ + nghttp2_settings_entry settings[2]; + int rv, add_conn_window; + apr_socket_t *s; + + s = ap_get_conn_socket(session->c); +#if (!defined(WIN32) && !defined(NETWARE)) || defined(DOXYGEN) + if (s) { + ap_sock_disable_nagle(s); + } +#endif + + settings[0].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH; + settings[0].value = 0; + settings[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; + settings[1].value = (1 << session->window_bits_stream) - 1; + + rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE, settings, + H2_ALEN(settings)); + + /* If the connection window is larger than our default, trigger a WINDOW_UPDATE */ + add_conn_window = ((1 << session->window_bits_connection) - 1 - + NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE); + if (!rv && add_conn_window != 0) { + rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE, 0, add_conn_window); + } + return rv? APR_EGENERAL : APR_SUCCESS; +} + +static apr_status_t open_stream(h2_proxy_session *session, const char *url, + request_rec *r, int standalone, + h2_proxy_stream **pstream) +{ + h2_proxy_stream *stream; + apr_uri_t puri; + const char *authority, *scheme, *path; + apr_status_t status; + proxy_dir_conf *dconf; + + stream = apr_pcalloc(r->pool, sizeof(*stream)); + + stream->pool = r->pool; + stream->url = url; + stream->r = r; + stream->standalone = standalone; + stream->session = session; + stream->state = H2_STREAM_ST_IDLE; + + stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc); + stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc); + + stream->req = h2_proxy_req_create(1, stream->pool, 0); + + status = apr_uri_parse(stream->pool, url, &puri); + if (status != APR_SUCCESS) + return status; + + scheme = (strcmp(puri.scheme, "h2")? "http" : "https"); + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + if (dconf->preserve_host) { + authority = r->hostname; + } + else { + authority = puri.hostname; + if (!ap_strchr_c(authority, ':') && puri.port + && apr_uri_port_of_scheme(scheme) != puri.port) { + /* port info missing and port is not default for scheme: append */ + authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port); + } + } + + /* we need this for mapping relative uris in headers ("Link") back + * to local uris */ + stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority); + stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority); + path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART); + h2_proxy_req_make(stream->req, stream->pool, r->method, scheme, + authority, path, r->headers_in); + + if (dconf->add_forwarded_headers) { + if (PROXYREQ_REVERSE == r->proxyreq) { + const char *buf; + + /* Add X-Forwarded-For: so that the upstream has a chance to + * determine, where the original request came from. + */ + apr_table_mergen(stream->req->headers, "X-Forwarded-For", + r->useragent_ip); + + /* Add X-Forwarded-Host: so that upstream knows what the + * original request hostname was. + */ + if ((buf = apr_table_get(r->headers_in, "Host"))) { + apr_table_mergen(stream->req->headers, "X-Forwarded-Host", buf); + } + + /* Add X-Forwarded-Server: so that upstream knows what the + * name of this proxy server is (if there are more than one) + * XXX: This duplicates Via: - do we strictly need it? + */ + apr_table_mergen(stream->req->headers, "X-Forwarded-Server", + r->server->server_hostname); + } + } + + /* Tuck away all already existing cookies */ + stream->saves = apr_table_make(r->pool, 2); + apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL); + + *pstream = stream; + + return APR_SUCCESS; +} + +static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *stream) +{ + h2_proxy_ngheader *hd; + nghttp2_data_provider *pp = NULL; + nghttp2_data_provider provider; + int rv, may_have_request_body = 1; + apr_status_t status; + + hd = h2_proxy_util_nghd_make_req(stream->pool, stream->req); + + /* If we expect a 100-continue response, we must refrain from reading + any input until we get it. Reading the input will possibly trigger + HTTP_IN filter to generate the 100-continue itself. */ + if (stream->waiting_on_100 || stream->waiting_on_ping) { + /* make a small test if we get an EOF/EOS immediately */ + status = ap_get_brigade(stream->r->input_filters, stream->input, + AP_MODE_READBYTES, APR_NONBLOCK_READ, + APR_BUCKET_BUFF_SIZE); + may_have_request_body = APR_STATUS_IS_EAGAIN(status) + || (status == APR_SUCCESS + && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input))); + } + + if (may_have_request_body) { + provider.source.fd = 0; + provider.source.ptr = NULL; + provider.read_callback = stream_request_data; + pp = &provider; + } + + rv = nghttp2_submit_request(session->ngh2, NULL, + hd->nv, hd->nvlen, pp, stream); + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363) + "h2_proxy_session(%s): submit %s%s -> %d", + session->id, stream->req->authority, stream->req->path, + rv); + if (rv > 0) { + stream->id = rv; + stream->state = H2_STREAM_ST_OPEN; + h2_proxy_ihash_add(session->streams, stream); + dispatch_event(session, H2_PROXYS_EV_STREAM_SUBMITTED, rv, NULL); + + return APR_SUCCESS; + } + return APR_EGENERAL; +} + +static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb) +{ + apr_status_t status = APR_SUCCESS; + apr_size_t readlen = 0; + ssize_t n; + + while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) { + apr_bucket* b = APR_BRIGADE_FIRST(bb); + + if (APR_BUCKET_IS_METADATA(b)) { + /* nop */ + } + else { + const char *bdata = NULL; + apr_size_t blen = 0; + + status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ); + if (status == APR_SUCCESS && blen > 0) { + n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)bdata, blen); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + "h2_proxy_session(%s): feeding %ld bytes -> %ld", + session->id, (long)blen, (long)n); + if (n < 0) { + if (nghttp2_is_fatal((int)n)) { + status = APR_EGENERAL; + } + } + else { + readlen += n; + if (n < blen) { + apr_bucket_split(b, n); + } + } + } + } + apr_bucket_delete(b); + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + "h2_proxy_session(%s): fed %ld bytes of input to session", + session->id, (long)readlen); + if (readlen == 0 && status == APR_SUCCESS) { + return APR_EAGAIN; + } + return status; +} + +static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block, + apr_interval_time_t timeout) +{ + apr_status_t status = APR_SUCCESS; + + if (APR_BRIGADE_EMPTY(session->input)) { + apr_socket_t *socket = NULL; + apr_time_t save_timeout = -1; + + if (block) { + socket = ap_get_conn_socket(session->c); + if (socket) { + apr_socket_timeout_get(socket, &save_timeout); + apr_socket_timeout_set(socket, timeout); + } + else { + /* cannot block on timeout */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03379) + "h2_proxy_session(%s): unable to get conn socket", + session->id); + return APR_ENOTIMPL; + } + } + + status = ap_get_brigade(session->c->input_filters, session->input, + AP_MODE_READBYTES, + block? APR_BLOCK_READ : APR_NONBLOCK_READ, + 64 * 1024); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, + "h2_proxy_session(%s): read from conn", session->id); + if (socket && save_timeout != -1) { + apr_socket_timeout_set(socket, save_timeout); + } + } + + if (status == APR_SUCCESS) { + status = feed_brigade(session, session->input); + } + else if (APR_STATUS_IS_TIMEUP(status)) { + /* nop */ + } + else if (!APR_STATUS_IS_EAGAIN(status)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03380) + "h2_proxy_session(%s): read error", session->id); + dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL); + } + + return status; +} + +apr_status_t h2_proxy_session_submit(h2_proxy_session *session, + const char *url, request_rec *r, + int standalone) +{ + h2_proxy_stream *stream; + apr_status_t status; + + status = open_stream(session, url, r, standalone, &stream); + if (status == APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03381) + "process stream(%d): %s %s%s, original: %s", + stream->id, stream->req->method, + stream->req->authority, stream->req->path, + r->the_request); + status = submit_stream(session, stream); + } + return status; +} + +static void stream_resume(h2_proxy_stream *stream) +{ + h2_proxy_session *session = stream->session; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_proxy_stream(%s-%d): resuming", + session->id, stream->id); + stream->suspended = 0; + h2_proxy_iq_remove(session->suspended, stream->id); + nghttp2_session_resume_data(session->ngh2, stream->id); + dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL); +} + +static apr_status_t check_suspended(h2_proxy_session *session) +{ + h2_proxy_stream *stream; + int i, stream_id; + apr_status_t status; + + for (i = 0; i < session->suspended->nelts; ++i) { + stream_id = session->suspended->elts[i]; + stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id); + if (stream) { + if (stream->waiting_on_100 || stream->waiting_on_ping) { + status = APR_EAGAIN; + } + else { + status = ap_get_brigade(stream->r->input_filters, stream->input, + AP_MODE_READBYTES, APR_NONBLOCK_READ, + APR_BUCKET_BUFF_SIZE); + } + if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->input)) { + stream_resume(stream); + check_suspended(session); + return APR_SUCCESS; + } + else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c, + APLOGNO(03382) "h2_proxy_stream(%s-%d): check input", + session->id, stream_id); + stream_resume(stream); + check_suspended(session); + return APR_SUCCESS; + } + } + else { + /* gone? */ + h2_proxy_iq_remove(session->suspended, stream_id); + check_suspended(session); + return APR_SUCCESS; + } + } + return APR_EAGAIN; +} + +static apr_status_t session_shutdown(h2_proxy_session *session, int reason, + const char *msg) +{ + apr_status_t status = APR_SUCCESS; + const char *err = msg; + + ap_assert(session); + if (!err && reason) { + err = nghttp2_strerror(reason); + } + nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0, + reason, (uint8_t*)err, err? strlen(err):0); + status = nghttp2_session_send(session->ngh2); + dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err); + return status; +} + + +static const char *StateNames[] = { + "INIT", /* H2_PROXYS_ST_INIT */ + "DONE", /* H2_PROXYS_ST_DONE */ + "IDLE", /* H2_PROXYS_ST_IDLE */ + "BUSY", /* H2_PROXYS_ST_BUSY */ + "WAIT", /* H2_PROXYS_ST_WAIT */ + "LSHUTDOWN", /* H2_PROXYS_ST_LOCAL_SHUTDOWN */ + "RSHUTDOWN", /* H2_PROXYS_ST_REMOTE_SHUTDOWN */ +}; + +static const char *state_name(h2_proxys_state state) +{ + if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) { + return "unknown"; + } + return StateNames[state]; +} + +static int is_accepting_streams(h2_proxy_session *session) +{ + switch (session->state) { + case H2_PROXYS_ST_IDLE: + case H2_PROXYS_ST_BUSY: + case H2_PROXYS_ST_WAIT: + return 1; + default: + return 0; + } +} + +static void transit(h2_proxy_session *session, const char *action, + h2_proxys_state nstate) +{ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03345) + "h2_proxy_session(%s): transit [%s] -- %s --> [%s]", session->id, + state_name(session->state), action, state_name(nstate)); + session->state = nstate; +} + +static void ev_init(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_INIT: + if (h2_proxy_ihash_empty(session->streams)) { + transit(session, "init", H2_PROXYS_ST_IDLE); + } + else { + transit(session, "init", H2_PROXYS_ST_BUSY); + } + break; + + default: + /* nop */ + break; + } +} + +static void ev_local_goaway(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + /* already did that? */ + break; + case H2_PROXYS_ST_IDLE: + case H2_PROXYS_ST_REMOTE_SHUTDOWN: + /* all done */ + transit(session, "local goaway", H2_PROXYS_ST_DONE); + break; + default: + transit(session, "local goaway", H2_PROXYS_ST_LOCAL_SHUTDOWN); + break; + } +} + +static void ev_remote_goaway(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_REMOTE_SHUTDOWN: + /* already received that? */ + break; + case H2_PROXYS_ST_IDLE: + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + /* all done */ + transit(session, "remote goaway", H2_PROXYS_ST_DONE); + break; + default: + transit(session, "remote goaway", H2_PROXYS_ST_REMOTE_SHUTDOWN); + break; + } +} + +static void ev_conn_error(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_INIT: + case H2_PROXYS_ST_DONE: + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + /* just leave */ + transit(session, "conn error", H2_PROXYS_ST_DONE); + break; + + default: + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, arg, session->c, + "h2_proxy_session(%s): conn error -> shutdown", session->id); + session_shutdown(session, arg, msg); + break; + } +} + +static void ev_proto_error(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_DONE: + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + /* just leave */ + transit(session, "proto error", H2_PROXYS_ST_DONE); + break; + + default: + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + "h2_proxy_session(%s): proto error -> shutdown", session->id); + session_shutdown(session, arg, msg); + break; + } +} + +static void ev_conn_timeout(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + transit(session, "conn timeout", H2_PROXYS_ST_DONE); + break; + default: + session_shutdown(session, arg, msg); + transit(session, "conn timeout", H2_PROXYS_ST_DONE); + break; + } +} + +static void ev_no_io(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_BUSY: + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + case H2_PROXYS_ST_REMOTE_SHUTDOWN: + /* nothing for input and output to do. If we remain + * in this state, we go into a tight loop and suck up + * CPU cycles. Ideally, we'd like to do a blocking read, but that + * is not possible if we have scheduled tasks and wait + * for them to produce something. */ + if (h2_proxy_ihash_empty(session->streams)) { + if (!is_accepting_streams(session)) { + /* We are no longer accepting new streams and have + * finished processing existing ones. Time to leave. */ + session_shutdown(session, arg, msg); + transit(session, "no io", H2_PROXYS_ST_DONE); + } + else { + /* When we have no streams, no task events are possible, + * switch to blocking reads */ + transit(session, "no io", H2_PROXYS_ST_IDLE); + } + } + else { + /* Unable to do blocking reads, as we wait on events from + * task processing in other threads. Do a busy wait with + * backoff timer. */ + transit(session, "no io", H2_PROXYS_ST_WAIT); + } + break; + default: + /* nop */ + break; + } +} + +static void ev_stream_submitted(h2_proxy_session *session, int stream_id, + const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_IDLE: + case H2_PROXYS_ST_WAIT: + transit(session, "stream submitted", H2_PROXYS_ST_BUSY); + break; + default: + /* nop */ + break; + } +} + +static void ev_stream_done(h2_proxy_session *session, int stream_id, + const char *msg) +{ + h2_proxy_stream *stream; + + stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id); + if (stream) { + int touched = (stream->data_sent || + stream_id <= session->last_stream_id); + apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364) + "h2_proxy_sesssion(%s): stream(%d) closed " + "(touched=%d, error=%d)", + session->id, stream_id, touched, stream->error_code); + + if (status != APR_SUCCESS) { + stream->r->status = 500; + } + else if (!stream->data_received) { + apr_bucket *b; + /* if the response had no body, this is the time to flush + * an empty brigade which will also write the resonse + * headers */ + h2_proxy_stream_end_headers_out(stream); + stream->data_received = 1; + b = apr_bucket_flush_create(stream->r->connection->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + b = apr_bucket_eos_create(stream->r->connection->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->output, b); + ap_pass_brigade(stream->r->output_filters, stream->output); + } + + stream->state = H2_STREAM_ST_CLOSED; + h2_proxy_ihash_remove(session->streams, stream_id); + h2_proxy_iq_remove(session->suspended, stream_id); + if (session->done) { + session->done(session, stream->r, status, touched); + } + } + + switch (session->state) { + default: + /* nop */ + break; + } +} + +static void ev_stream_resumed(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_WAIT: + transit(session, "stream resumed", H2_PROXYS_ST_BUSY); + break; + default: + /* nop */ + break; + } +} + +static void ev_data_read(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_IDLE: + case H2_PROXYS_ST_WAIT: + transit(session, "data read", H2_PROXYS_ST_BUSY); + break; + default: + /* nop */ + break; + } +} + +static void ev_ngh2_done(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_DONE: + /* nop */ + break; + default: + transit(session, "nghttp2 done", H2_PROXYS_ST_DONE); + break; + } +} + +static void ev_pre_close(h2_proxy_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_PROXYS_ST_DONE: + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + /* nop */ + break; + default: + session_shutdown(session, arg, msg); + break; + } +} + +static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev, + int arg, const char *msg) +{ + switch (ev) { + case H2_PROXYS_EV_INIT: + ev_init(session, arg, msg); + break; + case H2_PROXYS_EV_LOCAL_GOAWAY: + ev_local_goaway(session, arg, msg); + break; + case H2_PROXYS_EV_REMOTE_GOAWAY: + ev_remote_goaway(session, arg, msg); + break; + case H2_PROXYS_EV_CONN_ERROR: + ev_conn_error(session, arg, msg); + break; + case H2_PROXYS_EV_PROTO_ERROR: + ev_proto_error(session, arg, msg); + break; + case H2_PROXYS_EV_CONN_TIMEOUT: + ev_conn_timeout(session, arg, msg); + break; + case H2_PROXYS_EV_NO_IO: + ev_no_io(session, arg, msg); + break; + case H2_PROXYS_EV_STREAM_SUBMITTED: + ev_stream_submitted(session, arg, msg); + break; + case H2_PROXYS_EV_STREAM_DONE: + ev_stream_done(session, arg, msg); + break; + case H2_PROXYS_EV_STREAM_RESUMED: + ev_stream_resumed(session, arg, msg); + break; + case H2_PROXYS_EV_DATA_READ: + ev_data_read(session, arg, msg); + break; + case H2_PROXYS_EV_NGH2_DONE: + ev_ngh2_done(session, arg, msg); + break; + case H2_PROXYS_EV_PRE_CLOSE: + ev_pre_close(session, arg, msg); + break; + default: + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + "h2_proxy_session(%s): unknown event %d", + session->id, ev); + break; + } +} + +static int send_loop(h2_proxy_session *session) +{ + while (nghttp2_session_want_write(session->ngh2)) { + int rv = nghttp2_session_send(session->ngh2); + if (rv < 0 && nghttp2_is_fatal(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_proxy_session(%s): write, rv=%d", session->id, rv); + dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL); + break; + } + return 1; + } + return 0; +} + +apr_status_t h2_proxy_session_process(h2_proxy_session *session) +{ + apr_status_t status; + int have_written = 0, have_read = 0; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_proxy_session(%s): process", session->id); + +run_loop: + switch (session->state) { + case H2_PROXYS_ST_INIT: + status = session_start(session); + if (status == APR_SUCCESS) { + dispatch_event(session, H2_PROXYS_EV_INIT, 0, NULL); + goto run_loop; + } + else { + dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL); + } + break; + + case H2_PROXYS_ST_BUSY: + case H2_PROXYS_ST_LOCAL_SHUTDOWN: + case H2_PROXYS_ST_REMOTE_SHUTDOWN: + have_written = send_loop(session); + + if (nghttp2_session_want_read(session->ngh2)) { + status = h2_proxy_session_read(session, 0, 0); + if (status == APR_SUCCESS) { + have_read = 1; + } + } + + if (!have_written && !have_read + && !nghttp2_session_want_write(session->ngh2)) { + dispatch_event(session, H2_PROXYS_EV_NO_IO, 0, NULL); + goto run_loop; + } + break; + + case H2_PROXYS_ST_WAIT: + if (check_suspended(session) == APR_EAGAIN) { + /* no stream has become resumed. Do a blocking read with + * ever increasing timeouts... */ + if (session->wait_timeout < 25) { + session->wait_timeout = 25; + } + else { + session->wait_timeout = H2MIN(apr_time_from_msec(100), + 2*session->wait_timeout); + } + + status = h2_proxy_session_read(session, 1, session->wait_timeout); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, + APLOGNO(03365) + "h2_proxy_session(%s): WAIT read, timeout=%fms", + session->id, (float)session->wait_timeout/1000.0); + if (status == APR_SUCCESS) { + have_read = 1; + dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL); + } + else if (APR_STATUS_IS_TIMEUP(status) + || APR_STATUS_IS_EAGAIN(status)) { + /* go back to checking all inputs again */ + transit(session, "wait cycle", H2_PROXYS_ST_BUSY); + } + } + break; + + case H2_PROXYS_ST_IDLE: + break; + + case H2_PROXYS_ST_DONE: /* done, session terminated */ + return APR_EOF; + + default: + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, session->c, + APLOGNO(03346)"h2_proxy_session(%s): unknown state %d", + session->id, session->state); + dispatch_event(session, H2_PROXYS_EV_PROTO_ERROR, 0, NULL); + break; + } + + + if (have_read || have_written) { + session->wait_timeout = 0; + } + + if (!nghttp2_session_want_read(session->ngh2) + && !nghttp2_session_want_write(session->ngh2)) { + dispatch_event(session, H2_PROXYS_EV_NGH2_DONE, 0, NULL); + } + + return APR_SUCCESS; /* needs to be called again */ +} + +typedef struct { + h2_proxy_session *session; + h2_proxy_request_done *done; +} cleanup_iter_ctx; + +static int cancel_iter(void *udata, void *val) +{ + cleanup_iter_ctx *ctx = udata; + h2_proxy_stream *stream = val; + nghttp2_submit_rst_stream(ctx->session->ngh2, NGHTTP2_FLAG_NONE, + stream->id, 0); + return 1; +} + +void h2_proxy_session_cancel_all(h2_proxy_session *session) +{ + if (!h2_proxy_ihash_empty(session->streams)) { + cleanup_iter_ctx ctx; + ctx.session = session; + ctx.done = session->done; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366) + "h2_proxy_session(%s): cancel %d streams", + session->id, (int)h2_proxy_ihash_count(session->streams)); + h2_proxy_ihash_iter(session->streams, cancel_iter, &ctx); + session_shutdown(session, 0, NULL); + } +} + +static int done_iter(void *udata, void *val) +{ + cleanup_iter_ctx *ctx = udata; + h2_proxy_stream *stream = val; + int touched = (stream->data_sent || + stream->id <= ctx->session->last_stream_id); + ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched); + return 1; +} + +void h2_proxy_session_cleanup(h2_proxy_session *session, + h2_proxy_request_done *done) +{ + if (!h2_proxy_ihash_empty(session->streams)) { + cleanup_iter_ctx ctx; + ctx.session = session; + ctx.done = done; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03519) + "h2_proxy_session(%s): terminated, %d streams unfinished", + session->id, (int)h2_proxy_ihash_count(session->streams)); + h2_proxy_ihash_iter(session->streams, done_iter, &ctx); + h2_proxy_ihash_clear(session->streams); + } +} + +static int ping_arrived_iter(void *udata, void *val) +{ + h2_proxy_stream *stream = val; + if (stream->waiting_on_ping) { + stream->waiting_on_ping = 0; + stream_resume(stream); + } + return 1; +} + +static void ping_arrived(h2_proxy_session *session) +{ + if (!h2_proxy_ihash_empty(session->streams)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03470) + "h2_proxy_session(%s): ping arrived, unblocking streams", + session->id); + h2_proxy_ihash_iter(session->streams, ping_arrived_iter, &session); + } +} + +typedef struct { + h2_proxy_session *session; + conn_rec *c; + apr_off_t bytes; + int updated; +} win_update_ctx; + +static int win_update_iter(void *udata, void *val) +{ + win_update_ctx *ctx = udata; + h2_proxy_stream *stream = val; + + if (stream->r && stream->r->connection == ctx->c) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c, + "h2_proxy_session(%s-%d): win_update %ld bytes", + ctx->session->id, (int)stream->id, (long)ctx->bytes); + nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes); + ctx->updated = 1; + return 0; + } + return 1; +} + + +void h2_proxy_session_update_window(h2_proxy_session *session, + conn_rec *c, apr_off_t bytes) +{ + if (!h2_proxy_ihash_empty(session->streams)) { + win_update_ctx ctx; + ctx.session = session; + ctx.c = c; + ctx.bytes = bytes; + ctx.updated = 0; + h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx); + + if (!ctx.updated) { + /* could not find the stream any more, possibly closed, update + * the connection window at least */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_proxy_session(%s): win_update conn %ld bytes", + session->id, (long)bytes); + nghttp2_session_consume_connection(session->ngh2, (size_t)bytes); + } + } +} + diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h new file mode 100644 index 0000000..ecebb61 --- /dev/null +++ b/modules/http2/h2_proxy_session.h @@ -0,0 +1,128 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef h2_proxy_session_h +#define h2_proxy_session_h + +#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) + +#include <nghttp2/nghttp2.h> + +struct h2_proxy_iqueue; +struct h2_proxy_ihash_t; + +typedef enum { + H2_STREAM_ST_IDLE, + H2_STREAM_ST_OPEN, + H2_STREAM_ST_RESV_LOCAL, + H2_STREAM_ST_RESV_REMOTE, + H2_STREAM_ST_CLOSED_INPUT, + H2_STREAM_ST_CLOSED_OUTPUT, + H2_STREAM_ST_CLOSED, +} h2_proxy_stream_state_t; + +typedef enum { + H2_PROXYS_ST_INIT, /* send initial SETTINGS, etc. */ + H2_PROXYS_ST_DONE, /* finished, connection close */ + H2_PROXYS_ST_IDLE, /* no streams to process */ + H2_PROXYS_ST_BUSY, /* read/write without stop */ + H2_PROXYS_ST_WAIT, /* waiting for tasks reporting back */ + H2_PROXYS_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */ + H2_PROXYS_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */ +} h2_proxys_state; + +typedef enum { + H2_PROXYS_EV_INIT, /* session was initialized */ + H2_PROXYS_EV_LOCAL_GOAWAY, /* we send a GOAWAY */ + H2_PROXYS_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */ + H2_PROXYS_EV_CONN_ERROR, /* connection error */ + H2_PROXYS_EV_PROTO_ERROR, /* protocol error */ + H2_PROXYS_EV_CONN_TIMEOUT, /* connection timeout */ + H2_PROXYS_EV_NO_IO, /* nothing has been read or written */ + H2_PROXYS_EV_STREAM_SUBMITTED, /* stream has been submitted */ + H2_PROXYS_EV_STREAM_DONE, /* stream has been finished */ + H2_PROXYS_EV_STREAM_RESUMED, /* stream signalled availability of headers/data */ + H2_PROXYS_EV_DATA_READ, /* connection data has been read */ + H2_PROXYS_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */ + H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */ +} h2_proxys_event_t; + + +typedef struct h2_proxy_session h2_proxy_session; +typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r, + apr_status_t status, int touched); + +struct h2_proxy_session { + const char *id; + conn_rec *c; + proxy_conn_rec *p_conn; + proxy_server_conf *conf; + apr_pool_t *pool; + nghttp2_session *ngh2; /* the nghttp2 session itself */ + + unsigned int aborted : 1; + unsigned int check_ping : 1; + unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */ + + h2_proxy_request_done *done; + void *user_data; + + unsigned char window_bits_stream; + unsigned char window_bits_connection; + + h2_proxys_state state; + apr_interval_time_t wait_timeout; + + struct h2_proxy_ihash_t *streams; + struct h2_proxy_iqueue *suspended; + apr_size_t remote_max_concurrent; + int last_stream_id; /* last stream id processed by backend, or 0 */ + apr_time_t last_frame_received; + + apr_bucket_brigade *input; + apr_bucket_brigade *output; +}; + +h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, + proxy_server_conf *conf, + int h2_front, + unsigned char window_bits_connection, + unsigned char window_bits_stream, + h2_proxy_request_done *done); + +apr_status_t h2_proxy_session_submit(h2_proxy_session *s, const char *url, + request_rec *r, int standalone); + +/** + * Perform a step in processing the proxy session. Will return aftert + * one read/write cycle and indicate session status by status code. + * @param s the session to process + * @return APR_EAGAIN when processing needs to be invoked again + * APR_SUCCESS when all streams have been processed, session still live + * APR_EOF when the session has been terminated + */ +apr_status_t h2_proxy_session_process(h2_proxy_session *s); + +void h2_proxy_session_cancel_all(h2_proxy_session *s); + +void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done); + +void h2_proxy_session_update_window(h2_proxy_session *s, + conn_rec *c, apr_off_t bytes); + +#define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url" + +#endif /* h2_proxy_session_h */ diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c new file mode 100644 index 0000000..bd45294 --- /dev/null +++ b/modules/http2/h2_proxy_util.c @@ -0,0 +1,1335 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <apr_lib.h> +#include <apr_strings.h> +#include <apr_thread_mutex.h> +#include <apr_thread_cond.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> +#include <http_request.h> +#include <mod_proxy.h> + +#include <nghttp2/nghttp2.h> + +#include "h2.h" +#include "h2_proxy_util.h" + +APLOG_USE_MODULE(proxy_http2); + +/* h2_log2(n) iff n is a power of 2 */ +unsigned char h2_proxy_log2(int n) +{ + int lz = 0; + if (!n) { + return 0; + } + if (!(n & 0xffff0000u)) { + lz += 16; + n = (n << 16); + } + if (!(n & 0xff000000u)) { + lz += 8; + n = (n << 8); + } + if (!(n & 0xf0000000u)) { + lz += 4; + n = (n << 4); + } + if (!(n & 0xc0000000u)) { + lz += 2; + n = (n << 2); + } + if (!(n & 0x80000000u)) { + lz += 1; + } + + return 31 - lz; +} + +/******************************************************************************* + * ihash - hash for structs with int identifier + ******************************************************************************/ +struct h2_proxy_ihash_t { + apr_hash_t *hash; + size_t ioff; +}; + +static unsigned int ihash(const char *key, apr_ssize_t *klen) +{ + return (unsigned int)(*((int*)key)); +} + +h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int) +{ + h2_proxy_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_proxy_ihash_t)); + ih->hash = apr_hash_make_custom(pool, ihash); + ih->ioff = offset_of_int; + return ih; +} + +size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih) +{ + return apr_hash_count(ih->hash); +} + +int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih) +{ + return apr_hash_count(ih->hash) == 0; +} + +void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id) +{ + return apr_hash_get(ih->hash, &id, sizeof(id)); +} + +typedef struct { + h2_proxy_ihash_iter_t *iter; + void *ctx; +} iter_ctx; + +static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen, + const void *val) +{ + iter_ctx *ictx = ctx; + return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/ +} + +int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx) +{ + iter_ctx ictx; + ictx.iter = fn; + ictx.ctx = ctx; + return apr_hash_do(ihash_iter, &ictx, ih->hash); +} + +void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val) +{ + apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val); +} + +void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id) +{ + apr_hash_set(ih->hash, &id, sizeof(id), NULL); +} + +void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val) +{ + int id = *((int*)((char *)val + ih->ioff)); + apr_hash_set(ih->hash, &id, sizeof(id), NULL); +} + + +void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih) +{ + apr_hash_clear(ih->hash); +} + +typedef struct { + h2_proxy_ihash_t *ih; + void **buffer; + size_t max; + size_t len; +} collect_ctx; + +static int collect_iter(void *x, void *val) +{ + collect_ctx *ctx = x; + if (ctx->len < ctx->max) { + ctx->buffer[ctx->len++] = val; + return 1; + } + return 0; +} + +size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max) +{ + collect_ctx ctx; + size_t i; + + ctx.ih = ih; + ctx.buffer = buffer; + ctx.max = max; + ctx.len = 0; + h2_proxy_ihash_iter(ih, collect_iter, &ctx); + for (i = 0; i < ctx.len; ++i) { + h2_proxy_ihash_remove_val(ih, buffer[i]); + } + return ctx.len; +} + +typedef struct { + h2_proxy_ihash_t *ih; + int *buffer; + size_t max; + size_t len; +} icollect_ctx; + +static int icollect_iter(void *x, void *val) +{ + icollect_ctx *ctx = x; + if (ctx->len < ctx->max) { + ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff)); + return 1; + } + return 0; +} + +size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max) +{ + icollect_ctx ctx; + size_t i; + + ctx.ih = ih; + ctx.buffer = buffer; + ctx.max = max; + ctx.len = 0; + h2_proxy_ihash_iter(ih, icollect_iter, &ctx); + for (i = 0; i < ctx.len; ++i) { + h2_proxy_ihash_remove(ih, buffer[i]); + } + return ctx.len; +} + +/******************************************************************************* + * iqueue - sorted list of int + ******************************************************************************/ + +static void iq_grow(h2_proxy_iqueue *q, int nlen); +static void iq_swap(h2_proxy_iqueue *q, int i, int j); +static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top, + h2_proxy_iq_cmp *cmp, void *ctx); +static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom, + h2_proxy_iq_cmp *cmp, void *ctx); + +h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity) +{ + h2_proxy_iqueue *q = apr_pcalloc(pool, sizeof(h2_proxy_iqueue)); + if (q) { + q->pool = pool; + iq_grow(q, capacity); + q->nelts = 0; + } + return q; +} + +int h2_proxy_iq_empty(h2_proxy_iqueue *q) +{ + return q->nelts == 0; +} + +int h2_proxy_iq_count(h2_proxy_iqueue *q) +{ + return q->nelts; +} + + +void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx) +{ + int i; + + if (q->nelts >= q->nalloc) { + iq_grow(q, q->nalloc * 2); + } + + i = (q->head + q->nelts) % q->nalloc; + q->elts[i] = sid; + ++q->nelts; + + if (cmp) { + /* bubble it to the front of the queue */ + iq_bubble_up(q, i, q->head, cmp, ctx); + } +} + +int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid) +{ + int i; + for (i = 0; i < q->nelts; ++i) { + if (sid == q->elts[(q->head + i) % q->nalloc]) { + break; + } + } + + if (i < q->nelts) { + ++i; + for (; i < q->nelts; ++i) { + q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc]; + } + --q->nelts; + return 1; + } + return 0; +} + +void h2_proxy_iq_clear(h2_proxy_iqueue *q) +{ + q->nelts = 0; +} + +void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx) +{ + /* Assume that changes in ordering are minimal. This needs, + * best case, q->nelts - 1 comparisons to check that nothing + * changed. + */ + if (q->nelts > 0) { + int i, ni, prev, last; + + /* Start at the end of the queue and create a tail of sorted + * entries. Make that tail one element longer in each iteration. + */ + last = i = (q->head + q->nelts - 1) % q->nalloc; + while (i != q->head) { + prev = (q->nalloc + i - 1) % q->nalloc; + + ni = iq_bubble_up(q, i, prev, cmp, ctx); + if (ni == prev) { + /* i bubbled one up, bubble the new i down, which + * keeps all tasks below i sorted. */ + iq_bubble_down(q, i, last, cmp, ctx); + } + i = prev; + }; + } +} + + +int h2_proxy_iq_shift(h2_proxy_iqueue *q) +{ + int sid; + + if (q->nelts <= 0) { + return 0; + } + + sid = q->elts[q->head]; + q->head = (q->head + 1) % q->nalloc; + q->nelts--; + + return sid; +} + +static void iq_grow(h2_proxy_iqueue *q, int nlen) +{ + if (nlen > q->nalloc) { + int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen); + if (q->nelts > 0) { + int l = ((q->head + q->nelts) % q->nalloc) - q->head; + + memmove(nq, q->elts + q->head, sizeof(int) * l); + if (l < q->nelts) { + /* elts wrapped, append elts in [0, remain] to nq */ + int remain = q->nelts - l; + memmove(nq + l, q->elts, sizeof(int) * remain); + } + } + q->elts = nq; + q->nalloc = nlen; + q->head = 0; + } +} + +static void iq_swap(h2_proxy_iqueue *q, int i, int j) +{ + int x = q->elts[i]; + q->elts[i] = q->elts[j]; + q->elts[j] = x; +} + +static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top, + h2_proxy_iq_cmp *cmp, void *ctx) +{ + int prev; + while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top) + && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) { + iq_swap(q, prev, i); + i = prev; + } + return i; +} + +static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom, + h2_proxy_iq_cmp *cmp, void *ctx) +{ + int next; + while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom) + && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) { + iq_swap(q, next, i); + i = next; + } + return i; +} + +/******************************************************************************* + * h2_proxy_ngheader + ******************************************************************************/ +#define H2_HD_MATCH_LIT_CS(l, name) \ + ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) + +static int h2_util_ignore_header(const char *name) +{ + /* never forward, ch. 8.1.2.2 */ + return (H2_HD_MATCH_LIT_CS("connection", name) + || H2_HD_MATCH_LIT_CS("proxy-connection", name) + || H2_HD_MATCH_LIT_CS("upgrade", name) + || H2_HD_MATCH_LIT_CS("keep-alive", name) + || H2_HD_MATCH_LIT_CS("transfer-encoding", name)); +} + +static int count_header(void *ctx, const char *key, const char *value) +{ + if (!h2_util_ignore_header(key)) { + (*((size_t*)ctx))++; + } + return 1; +} + +#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v)) +#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v)) + +static int add_header(h2_proxy_ngheader *ngh, + const char *key, size_t key_len, + const char *value, size_t val_len) +{ + nghttp2_nv *nv = &ngh->nv[ngh->nvlen++]; + + nv->name = (uint8_t*)key; + nv->namelen = key_len; + nv->value = (uint8_t*)value; + nv->valuelen = val_len; + return 1; +} + +static int add_table_header(void *ctx, const char *key, const char *value) +{ + if (!h2_util_ignore_header(key)) { + add_header(ctx, key, strlen(key), value, strlen(value)); + } + return 1; +} + +h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p, + const h2_proxy_request *req) +{ + + h2_proxy_ngheader *ngh; + size_t n; + + ap_assert(req); + ap_assert(req->scheme); + ap_assert(req->authority); + ap_assert(req->path); + ap_assert(req->method); + + n = 4; + apr_table_do(count_header, &n, req->headers, NULL); + + ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader)); + ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv)); + NV_ADD_LIT_CS(ngh, ":scheme", req->scheme); + NV_ADD_LIT_CS(ngh, ":authority", req->authority); + NV_ADD_LIT_CS(ngh, ":path", req->path); + NV_ADD_LIT_CS(ngh, ":method", req->method); + apr_table_do(add_table_header, ngh, req->headers, NULL); + + return ngh; +} + +/******************************************************************************* + * header HTTP/1 <-> HTTP/2 conversions + ******************************************************************************/ + +typedef struct { + const char *name; + size_t len; +} literal; + +#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) } +#define H2_LIT_ARGS(a) (a),H2_ALEN(a) + +static literal IgnoredRequestHeaders[] = { + H2_DEF_LITERAL("upgrade"), + H2_DEF_LITERAL("connection"), + H2_DEF_LITERAL("keep-alive"), + H2_DEF_LITERAL("http2-settings"), + H2_DEF_LITERAL("proxy-connection"), + H2_DEF_LITERAL("transfer-encoding"), +}; +static literal IgnoredProxyRespHds[] = { + H2_DEF_LITERAL("alt-svc"), +}; + +static int ignore_header(const literal *lits, size_t llen, + const char *name, size_t nlen) +{ + const literal *lit; + int i; + + for (i = 0; i < llen; ++i) { + lit = &lits[i]; + if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) { + return 1; + } + } + return 0; +} + +static int h2_proxy_req_ignore_header(const char *name, size_t len) +{ + return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len); +} + +int h2_proxy_res_ignore_header(const char *name, size_t len) +{ + return (h2_proxy_req_ignore_header(name, len) + || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len)); +} + +void h2_proxy_util_camel_case_header(char *s, size_t len) +{ + size_t start = 1; + size_t i; + for (i = 0; i < len; ++i) { + if (start) { + if (s[i] >= 'a' && s[i] <= 'z') { + s[i] -= 'a' - 'A'; + } + + start = 0; + } + else if (s[i] == '-') { + start = 1; + } + } +} + +/******************************************************************************* + * h2 request handling + ******************************************************************************/ + +/** Match a header value against a string constance, case insensitive */ +#define H2_HD_MATCH_LIT(l, name, nlen) \ + ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) + +static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen) +{ + char *hname, *hvalue; + + if (h2_proxy_req_ignore_header(name, nlen)) { + return APR_SUCCESS; + } + else if (H2_HD_MATCH_LIT("cookie", name, nlen)) { + const char *existing = apr_table_get(headers, "cookie"); + if (existing) { + char *nval; + + /* Cookie header come separately in HTTP/2, but need + * to be merged by "; " (instead of default ", ") + */ + hvalue = apr_pstrndup(pool, value, vlen); + nval = apr_psprintf(pool, "%s; %s", existing, hvalue); + apr_table_setn(headers, "Cookie", nval); + return APR_SUCCESS; + } + } + else if (H2_HD_MATCH_LIT("host", name, nlen)) { + if (apr_table_get(headers, "Host")) { + return APR_SUCCESS; /* ignore duplicate */ + } + } + + hname = apr_pstrndup(pool, name, nlen); + hvalue = apr_pstrndup(pool, value, vlen); + h2_proxy_util_camel_case_header(hname, nlen); + apr_table_mergen(headers, hname, hvalue); + + return APR_SUCCESS; +} + +static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const char *method, + const char *scheme, const char *authority, + const char *path, apr_table_t *header, + int serialize) +{ + h2_proxy_request *req = apr_pcalloc(pool, sizeof(h2_proxy_request)); + + req->method = method; + req->scheme = scheme; + req->authority = authority; + req->path = path; + req->headers = header? header : apr_table_make(pool, 10); + req->request_time = apr_time_now(); + req->serialize = serialize; + + return req; +} + +h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize) +{ + return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize); +} + +typedef struct { + apr_table_t *headers; + apr_pool_t *pool; +} h1_ctx; + +static int set_h1_header(void *ctx, const char *key, const char *value) +{ + h1_ctx *x = ctx; + size_t klen = strlen(key); + if (!h2_proxy_req_ignore_header(key, klen)) { + h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value)); + } + return 1; +} + +apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool, + const char *method, const char *scheme, + const char *authority, const char *path, + apr_table_t *headers) +{ + h1_ctx x; + + req->method = method; + req->scheme = scheme; + req->authority = authority; + req->path = path; + + ap_assert(req->scheme); + ap_assert(req->authority); + ap_assert(req->path); + ap_assert(req->method); + + x.pool = pool; + x.headers = req->headers; + apr_table_do(set_h1_header, &x, headers, NULL); + return APR_SUCCESS; +} + +/******************************************************************************* + * frame logging + ******************************************************************************/ + +int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen) +{ + char scratch[128]; + size_t s_len = sizeof(scratch)/sizeof(scratch[0]); + + switch (frame->hd.type) { + case NGHTTP2_DATA: { + return apr_snprintf(buffer, maxlen, + "DATA[length=%d, flags=%d, stream=%d, padlen=%d]", + (int)frame->hd.length, frame->hd.flags, + frame->hd.stream_id, (int)frame->data.padlen); + } + case NGHTTP2_HEADERS: { + return apr_snprintf(buffer, maxlen, + "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]", + (int)frame->hd.length, + !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), + frame->hd.stream_id, + !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM)); + } + case NGHTTP2_PRIORITY: { + return apr_snprintf(buffer, maxlen, + "PRIORITY[length=%d, flags=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } + case NGHTTP2_RST_STREAM: { + return apr_snprintf(buffer, maxlen, + "RST_STREAM[length=%d, flags=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } + case NGHTTP2_SETTINGS: { + if (frame->hd.flags & NGHTTP2_FLAG_ACK) { + return apr_snprintf(buffer, maxlen, + "SETTINGS[ack=1, stream=%d]", + frame->hd.stream_id); + } + return apr_snprintf(buffer, maxlen, + "SETTINGS[length=%d, stream=%d]", + (int)frame->hd.length, frame->hd.stream_id); + } + case NGHTTP2_PUSH_PROMISE: { + return apr_snprintf(buffer, maxlen, + "PUSH_PROMISE[length=%d, hend=%d, stream=%d]", + (int)frame->hd.length, + !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), + frame->hd.stream_id); + } + case NGHTTP2_PING: { + return apr_snprintf(buffer, maxlen, + "PING[length=%d, ack=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags&NGHTTP2_FLAG_ACK, + frame->hd.stream_id); + } + case NGHTTP2_GOAWAY: { + size_t len = (frame->goaway.opaque_data_len < s_len)? + frame->goaway.opaque_data_len : s_len-1; + memcpy(scratch, frame->goaway.opaque_data, len); + scratch[len] = '\0'; + return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', " + "last_stream=%d]", frame->goaway.error_code, + scratch, frame->goaway.last_stream_id); + } + case NGHTTP2_WINDOW_UPDATE: { + return apr_snprintf(buffer, maxlen, + "WINDOW_UPDATE[stream=%d, incr=%d]", + frame->hd.stream_id, + frame->window_update.window_size_increment); + } + default: + return apr_snprintf(buffer, maxlen, + "type=%d[length=%d, flags=%d, stream=%d]", + frame->hd.type, (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } +} + +/******************************************************************************* + * link header handling + ******************************************************************************/ + +typedef struct { + apr_pool_t *pool; + request_rec *r; + proxy_dir_conf *conf; + const char *s; + int slen; + int i; + const char *server_uri; + int su_len; + const char *real_backend_uri; + int rbu_len; + const char *p_server_uri; + int psu_len; + int link_start; + int link_end; +} link_ctx; + +static int attr_char(char c) +{ + switch (c) { + case '!': + case '#': + case '$': + case '&': + case '+': + case '-': + case '.': + case '^': + case '_': + case '`': + case '|': + case '~': + return 1; + default: + return apr_isalnum(c); + } +} + +static int ptoken_char(char c) +{ + switch (c) { + case '!': + case '#': + case '$': + case '&': + case '\'': + case '(': + case ')': + case '*': + case '+': + case '-': + case '.': + case '/': + case ':': + case '<': + case '=': + case '>': + case '?': + case '@': + case '[': + case ']': + case '^': + case '_': + case '`': + case '{': + case '|': + case '}': + case '~': + return 1; + default: + return apr_isalnum(c); + } +} + +static int skip_ws(link_ctx *ctx) +{ + char c; + while (ctx->i < ctx->slen + && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) { + ++ctx->i; + } + return (ctx->i < ctx->slen); +} + +static int find_chr(link_ctx *ctx, char c, int *pidx) +{ + int j; + for (j = ctx->i; j < ctx->slen; ++j) { + if (ctx->s[j] == c) { + *pidx = j; + return 1; + } + } + return 0; +} + +static int read_chr(link_ctx *ctx, char c) +{ + if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) { + ++ctx->i; + return 1; + } + return 0; +} + +static int skip_qstring(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, '\"')) { + int end; + if (find_chr(ctx, '\"', &end)) { + ctx->i = end + 1; + return 1; + } + } + return 0; +} + +static int skip_ptoken(link_ctx *ctx) +{ + if (skip_ws(ctx)) { + int i; + for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) { + /* nop */ + } + if (i > ctx->i) { + ctx->i = i; + return 1; + } + } + return 0; +} + + +static int read_link(link_ctx *ctx) +{ + ctx->link_start = ctx->link_end = 0; + if (skip_ws(ctx) && read_chr(ctx, '<')) { + int end; + if (find_chr(ctx, '>', &end)) { + ctx->link_start = ctx->i; + ctx->link_end = end; + ctx->i = end + 1; + return 1; + } + } + return 0; +} + +static int skip_pname(link_ctx *ctx) +{ + if (skip_ws(ctx)) { + int i; + for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) { + /* nop */ + } + if (i > ctx->i) { + ctx->i = i; + return 1; + } + } + return 0; +} + +static int skip_pvalue(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, '=')) { + if (skip_qstring(ctx) || skip_ptoken(ctx)) { + return 1; + } + } + return 0; +} + +static int skip_param(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, ';')) { + if (skip_pname(ctx)) { + skip_pvalue(ctx); /* value is optional */ + return 1; + } + } + return 0; +} + +static int read_sep(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, ',')) { + return 1; + } + return 0; +} + +static size_t subst_str(link_ctx *ctx, int start, int end, const char *ns) +{ + int olen, nlen, plen; + int delta; + char *p; + + olen = end - start; + nlen = (int)strlen(ns); + delta = nlen - olen; + plen = ctx->slen + delta + 1; + p = apr_pcalloc(ctx->pool, plen); + memcpy(p, ctx->s, start); + memcpy(p + start, ns, nlen); + strcpy(p + start + nlen, ctx->s + end); + ctx->s = p; + ctx->slen = (int)strlen(p); + if (ctx->i >= end) { + ctx->i += delta; + } + return nlen; +} + +static void map_link(link_ctx *ctx) +{ + if (ctx->link_start < ctx->link_end) { + char buffer[HUGE_STRING_LEN]; + int need_len, link_len, buffer_len, prepend_p_server; + const char *mapped; + + buffer[0] = '\0'; + buffer_len = 0; + link_len = ctx->link_end - ctx->link_start; + need_len = link_len + 1; + prepend_p_server = (ctx->s[ctx->link_start] == '/'); + if (prepend_p_server) { + /* common to use relative uris in link header, for mappings + * to work need to prefix the backend server uri */ + need_len += ctx->psu_len; + apr_cpystrn(buffer, ctx->p_server_uri, sizeof(buffer)); + buffer_len = ctx->psu_len; + } + if (need_len > sizeof(buffer)) { + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ctx->r, APLOGNO(03482) + "link_reverse_map uri too long, skipped: %s", ctx->s); + return; + } + apr_cpystrn(buffer + buffer_len, ctx->s + ctx->link_start, link_len + 1); + if (!prepend_p_server + && strcmp(ctx->real_backend_uri, ctx->p_server_uri) + && !strncmp(buffer, ctx->real_backend_uri, ctx->rbu_len)) { + /* the server uri and our local proxy uri we use differ, for mapping + * to work, we need to use the proxy uri */ + int path_start = ctx->link_start + ctx->rbu_len; + link_len -= ctx->rbu_len; + memcpy(buffer, ctx->p_server_uri, ctx->psu_len); + memcpy(buffer + ctx->psu_len, ctx->s + path_start, link_len); + buffer_len = ctx->psu_len + link_len; + buffer[buffer_len] = '\0'; + } + mapped = ap_proxy_location_reverse_map(ctx->r, ctx->conf, buffer); + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->r, + "reverse_map[%s] %s --> %s", ctx->p_server_uri, buffer, mapped); + if (mapped != buffer) { + if (prepend_p_server) { + if (ctx->server_uri == NULL) { + ctx->server_uri = ap_construct_url(ctx->pool, "", ctx->r); + ctx->su_len = (int)strlen(ctx->server_uri); + } + if (!strncmp(mapped, ctx->server_uri, ctx->su_len)) { + mapped += ctx->su_len; + } + } + subst_str(ctx, ctx->link_start, ctx->link_end, mapped); + } + } +} + +/* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1> + Link = "Link" ":" #link-value + link-value = "<" URI-Reference ">" *( ";" link-param ) + link-param = ( ( "rel" "=" relation-types ) + | ( "anchor" "=" <"> URI-Reference <"> ) + | ( "rev" "=" relation-types ) + | ( "hreflang" "=" Language-Tag ) + | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) ) + | ( "title" "=" quoted-string ) + | ( "title*" "=" ext-value ) + | ( "type" "=" ( media-type | quoted-mt ) ) + | ( link-extension ) ) + link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] ) + | ( ext-name-star "=" ext-value ) + ext-name-star = parmname "*" ; reserved for RFC2231-profiled + ; extensions. Whitespace NOT + ; allowed in between. + ptoken = 1*ptokenchar + ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "(" + | ")" | "*" | "+" | "-" | "." | "/" | DIGIT + | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA + | "[" | "]" | "^" | "_" | "`" | "{" | "|" + | "}" | "~" + media-type = type-name "/" subtype-name + quoted-mt = <"> media-type <"> + relation-types = relation-type + | <"> relation-type *( 1*SP relation-type ) <"> + relation-type = reg-rel-type | ext-rel-type + reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" ) + ext-rel-type = URI + + and from <https://tools.ietf.org/html/rfc5987> + parmname = 1*attr-char + attr-char = ALPHA / DIGIT + / "!" / "#" / "$" / "&" / "+" / "-" / "." + / "^" / "_" / "`" / "|" / "~" + */ + +const char *h2_proxy_link_reverse_map(request_rec *r, + proxy_dir_conf *conf, + const char *real_backend_uri, + const char *proxy_server_uri, + const char *s) +{ + link_ctx ctx; + + if (r->proxyreq != PROXYREQ_REVERSE) { + return s; + } + memset(&ctx, 0, sizeof(ctx)); + ctx.r = r; + ctx.pool = r->pool; + ctx.conf = conf; + ctx.real_backend_uri = real_backend_uri; + ctx.rbu_len = (int)strlen(ctx.real_backend_uri); + ctx.p_server_uri = proxy_server_uri; + ctx.psu_len = (int)strlen(ctx.p_server_uri); + ctx.s = s; + ctx.slen = (int)strlen(s); + while (read_link(&ctx)) { + while (skip_param(&ctx)) { + /* nop */ + } + map_link(&ctx); + if (!read_sep(&ctx)) { + break; + } + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "link_reverse_map %s --> %s", s, ctx.s); + return ctx.s; +} + +/******************************************************************************* + * FIFO queue + ******************************************************************************/ + +struct h2_proxy_fifo { + void **elems; + int nelems; + int set; + int head; + int count; + int aborted; + apr_thread_mutex_t *lock; + apr_thread_cond_t *not_empty; + apr_thread_cond_t *not_full; +}; + +static int nth_index(h2_proxy_fifo *fifo, int n) +{ + return (fifo->head + n) % fifo->nelems; +} + +static apr_status_t fifo_destroy(void *data) +{ + h2_proxy_fifo *fifo = data; + + apr_thread_cond_destroy(fifo->not_empty); + apr_thread_cond_destroy(fifo->not_full); + apr_thread_mutex_destroy(fifo->lock); + + return APR_SUCCESS; +} + +static int index_of(h2_proxy_fifo *fifo, void *elem) +{ + int i; + + for (i = 0; i < fifo->count; ++i) { + if (elem == fifo->elems[nth_index(fifo, i)]) { + return i; + } + } + return -1; +} + +static apr_status_t create_int(h2_proxy_fifo **pfifo, apr_pool_t *pool, + int capacity, int as_set) +{ + apr_status_t rv; + h2_proxy_fifo *fifo; + + fifo = apr_pcalloc(pool, sizeof(*fifo)); + if (fifo == NULL) { + return APR_ENOMEM; + } + + rv = apr_thread_mutex_create(&fifo->lock, + APR_THREAD_MUTEX_UNNESTED, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + rv = apr_thread_cond_create(&fifo->not_empty, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + rv = apr_thread_cond_create(&fifo->not_full, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*)); + if (fifo->elems == NULL) { + return APR_ENOMEM; + } + fifo->nelems = capacity; + fifo->set = as_set; + + *pfifo = fifo; + apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null); + + return APR_SUCCESS; +} + +apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity) +{ + return create_int(pfifo, pool, capacity, 0); +} + +apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity) +{ + return create_int(pfifo, pool, capacity, 1); +} + +apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo) +{ + apr_status_t rv; + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + fifo->aborted = 1; + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo) +{ + apr_status_t rv; + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + apr_thread_cond_broadcast(fifo->not_empty); + apr_thread_cond_broadcast(fifo->not_full); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +int h2_proxy_fifo_count(h2_proxy_fifo *fifo) +{ + return fifo->count; +} + +int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo) +{ + return fifo->nelems; +} + +static apr_status_t check_not_empty(h2_proxy_fifo *fifo, int block) +{ + if (fifo->count == 0) { + if (!block) { + return APR_EAGAIN; + } + while (fifo->count == 0) { + if (fifo->aborted) { + return APR_EOF; + } + apr_thread_cond_wait(fifo->not_empty, fifo->lock); + } + } + return APR_SUCCESS; +} + +static apr_status_t fifo_push(h2_proxy_fifo *fifo, void *elem, int block) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + if (fifo->set && index_of(fifo, elem) >= 0) { + /* set mode, elem already member */ + apr_thread_mutex_unlock(fifo->lock); + return APR_EEXIST; + } + else if (fifo->count == fifo->nelems) { + if (block) { + while (fifo->count == fifo->nelems) { + if (fifo->aborted) { + apr_thread_mutex_unlock(fifo->lock); + return APR_EOF; + } + apr_thread_cond_wait(fifo->not_full, fifo->lock); + } + } + else { + apr_thread_mutex_unlock(fifo->lock); + return APR_EAGAIN; + } + } + + ap_assert(fifo->count < fifo->nelems); + fifo->elems[nth_index(fifo, fifo->count)] = elem; + ++fifo->count; + if (fifo->count == 1) { + apr_thread_cond_broadcast(fifo->not_empty); + } + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem) +{ + return fifo_push(fifo, elem, 1); +} + +apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem) +{ + return fifo_push(fifo, elem, 0); +} + +static void *pull_head(h2_proxy_fifo *fifo) +{ + void *elem; + + ap_assert(fifo->count > 0); + elem = fifo->elems[fifo->head]; + --fifo->count; + if (fifo->count > 0) { + fifo->head = nth_index(fifo, 1); + if (fifo->count+1 == fifo->nelems) { + apr_thread_cond_broadcast(fifo->not_full); + } + } + return elem; +} + +static apr_status_t fifo_pull(h2_proxy_fifo *fifo, void **pelem, int block) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) { + apr_thread_mutex_unlock(fifo->lock); + *pelem = NULL; + return rv; + } + + ap_assert(fifo->count > 0); + *pelem = pull_head(fifo); + + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem) +{ + return fifo_pull(fifo, pelem, 1); +} + +apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem) +{ + return fifo_pull(fifo, pelem, 0); +} + +apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + int i, rc; + void *e; + + rc = 0; + for (i = 0; i < fifo->count; ++i) { + e = fifo->elems[nth_index(fifo, i)]; + if (e == elem) { + ++rc; + } + else if (rc) { + fifo->elems[nth_index(fifo, i-rc)] = e; + } + } + if (rc) { + fifo->count -= rc; + if (fifo->count + rc == fifo->nelems) { + apr_thread_cond_broadcast(fifo->not_full); + } + rv = APR_SUCCESS; + } + else { + rv = APR_EAGAIN; + } + + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h new file mode 100644 index 0000000..a88fb7e --- /dev/null +++ b/modules/http2/h2_proxy_util.h @@ -0,0 +1,256 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_proxy_util__ +#define __mod_h2__h2_proxy_util__ + +/******************************************************************************* + * some debugging/format helpers + ******************************************************************************/ +struct h2_proxy_request; +struct nghttp2_frame; + +int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); + +/******************************************************************************* + * ihash - hash for structs with int identifier + ******************************************************************************/ +typedef struct h2_proxy_ihash_t h2_proxy_ihash_t; +typedef int h2_proxy_ihash_iter_t(void *ctx, void *val); + +/** + * Create a hash for structures that have an identifying int member. + * @param pool the pool to use + * @param offset_of_int the offsetof() the int member in the struct + */ +h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int); + +size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih); +int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih); +void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id); + +/** + * Iterate over the hash members (without defined order) and invoke + * fn for each member until 0 is returned. + * @param ih the hash to iterate over + * @param fn the function to invoke on each member + * @param ctx user supplied data passed into each iteration call + * @return 0 if one iteration returned 0, otherwise != 0 + */ +int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx); + +void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val); +void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id); +void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val); +void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih); + +size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max); +size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max); + +/******************************************************************************* + * iqueue - sorted list of int with user defined ordering + ******************************************************************************/ +typedef struct h2_proxy_iqueue { + int *elts; + int head; + int nelts; + int nalloc; + apr_pool_t *pool; +} h2_proxy_iqueue; + +/** + * Comparator for two int to determine their order. + * + * @param i1 first int to compare + * @param i2 second int to compare + * @param ctx provided user data + * @return value is the same as for strcmp() and has the effect: + * == 0: s1 and s2 are treated equal in ordering + * < 0: s1 should be sorted before s2 + * > 0: s2 should be sorted before s1 + */ +typedef int h2_proxy_iq_cmp(int i1, int i2, void *ctx); + +/** + * Allocate a new queue from the pool and initialize. + * @param id the identifier of the queue + * @param pool the memory pool + */ +h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity); + +/** + * Return != 0 iff there are no tasks in the queue. + * @param q the queue to check + */ +int h2_proxy_iq_empty(h2_proxy_iqueue *q); + +/** + * Return the number of int in the queue. + * @param q the queue to get size on + */ +int h2_proxy_iq_count(h2_proxy_iqueue *q); + +/** + * Add a stream id to the queue. + * + * @param q the queue to append the task to + * @param sid the stream id to add + * @param cmp the comparator for sorting + * @param ctx user data for comparator + */ +void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx); + +/** + * Remove the stream id from the queue. Return != 0 iff task + * was found in queue. + * @param q the task queue + * @param sid the stream id to remove + * @return != 0 iff task was found in queue + */ +int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid); + +/** + * Remove all entries in the queue. + */ +void h2_proxy_iq_clear(h2_proxy_iqueue *q); + +/** + * Sort the stream idqueue again. Call if the task ordering + * has changed. + * + * @param q the queue to sort + * @param cmp the comparator for sorting + * @param ctx user data for the comparator + */ +void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx); + +/** + * Get the first stream id from the queue or NULL if the queue is empty. + * The task will be removed. + * + * @param q the queue to get the first task from + * @return the first stream id of the queue, 0 if empty + */ +int h2_proxy_iq_shift(h2_proxy_iqueue *q); + +/******************************************************************************* + * common helpers + ******************************************************************************/ +/* h2_proxy_log2(n) iff n is a power of 2 */ +unsigned char h2_proxy_log2(int n); + +/******************************************************************************* + * HTTP/2 header helpers + ******************************************************************************/ +void h2_proxy_util_camel_case_header(char *s, size_t len); +int h2_proxy_res_ignore_header(const char *name, size_t len); + +/******************************************************************************* + * nghttp2 helpers + ******************************************************************************/ +typedef struct h2_proxy_ngheader { + nghttp2_nv *nv; + apr_size_t nvlen; +} h2_proxy_ngheader; +h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p, + const struct h2_proxy_request *req); + +/******************************************************************************* + * h2_proxy_request helpers + ******************************************************************************/ +typedef struct h2_proxy_request h2_proxy_request; + +struct h2_proxy_request { + const char *method; /* pseudo header values, see ch. 8.1.2.3 */ + const char *scheme; + const char *authority; + const char *path; + + apr_table_t *headers; + + apr_time_t request_time; + + unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ + unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */ +}; + +h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize); +apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool, + const char *method, const char *scheme, + const char *authority, const char *path, + apr_table_t *headers); + +/******************************************************************************* + * reverse mapping for link headers + ******************************************************************************/ +const char *h2_proxy_link_reverse_map(request_rec *r, + proxy_dir_conf *conf, + const char *real_server_uri, + const char *proxy_server_uri, + const char *s); + +/******************************************************************************* + * FIFO queue + ******************************************************************************/ + +/** + * A thread-safe FIFO queue with some extra bells and whistles, if you + * do not need anything special, better use 'apr_queue'. + */ +typedef struct h2_proxy_fifo h2_proxy_fifo; + +/** + * Create a FIFO queue that can hold up to capacity elements. Elements can + * appear several times. + */ +apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity); + +/** + * Create a FIFO set that can hold up to capacity elements. Elements only + * appear once. Pushing an element already present does not change the + * queue and is successful. + */ +apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity); + +apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo); +apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo); + +int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo); +int h2_proxy_fifo_count(h2_proxy_fifo *fifo); + +/** + * Push en element into the queue. Blocks if there is no capacity left. + * + * @param fifo the FIFO queue + * @param elem the element to push + * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue, + * APR_EEXIST when in set mode and elem already there. + */ +apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem); +apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem); + +apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem); +apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem); + +/** + * Remove the elem from the queue, will remove multiple appearances. + * @param elem the element to remove + * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise. + */ +apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem); + + +#endif /* defined(__mod_h2__h2_proxy_util__) */ diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c new file mode 100644 index 0000000..9a3b19b --- /dev/null +++ b/modules/http2/h2_push.c @@ -0,0 +1,1060 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stdio.h> + +#include <apr_lib.h> +#include <apr_strings.h> +#include <apr_hash.h> +#include <apr_time.h> + +#ifdef H2_OPENSSL +#include <openssl/sha.h> +#endif + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> + +#include "h2_private.h" +#include "h2_h2.h" +#include "h2_util.h" +#include "h2_push.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_session.h" +#include "h2_stream.h" + +/******************************************************************************* + * link header handling + ******************************************************************************/ + +static const char *policy_str(h2_push_policy policy) +{ + switch (policy) { + case H2_PUSH_NONE: + return "none"; + case H2_PUSH_FAST_LOAD: + return "fast-load"; + case H2_PUSH_HEAD: + return "head"; + default: + return "default"; + } +} + +typedef struct { + const h2_request *req; + int push_policy; + apr_pool_t *pool; + apr_array_header_t *pushes; + const char *s; + size_t slen; + size_t i; + + const char *link; + apr_table_t *params; + char b[4096]; +} link_ctx; + +static int attr_char(char c) +{ + switch (c) { + case '!': + case '#': + case '$': + case '&': + case '+': + case '-': + case '.': + case '^': + case '_': + case '`': + case '|': + case '~': + return 1; + default: + return apr_isalnum(c); + } +} + +static int ptoken_char(char c) +{ + switch (c) { + case '!': + case '#': + case '$': + case '&': + case '\'': + case '(': + case ')': + case '*': + case '+': + case '-': + case '.': + case '/': + case ':': + case '<': + case '=': + case '>': + case '?': + case '@': + case '[': + case ']': + case '^': + case '_': + case '`': + case '{': + case '|': + case '}': + case '~': + return 1; + default: + return apr_isalnum(c); + } +} + +static int skip_ws(link_ctx *ctx) +{ + char c; + while (ctx->i < ctx->slen + && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) { + ++ctx->i; + } + return (ctx->i < ctx->slen); +} + +static int find_chr(link_ctx *ctx, char c, size_t *pidx) +{ + size_t j; + for (j = ctx->i; j < ctx->slen; ++j) { + if (ctx->s[j] == c) { + *pidx = j; + return 1; + } + } + return 0; +} + +static int read_chr(link_ctx *ctx, char c) +{ + if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) { + ++ctx->i; + return 1; + } + return 0; +} + +static char *mk_str(link_ctx *ctx, size_t end) +{ + if (ctx->i < end) { + return apr_pstrndup(ctx->pool, ctx->s + ctx->i, end - ctx->i); + } + return (char*)""; +} + +static int read_qstring(link_ctx *ctx, const char **ps) +{ + if (skip_ws(ctx) && read_chr(ctx, '\"')) { + size_t end; + if (find_chr(ctx, '\"', &end)) { + *ps = mk_str(ctx, end); + ctx->i = end + 1; + return 1; + } + } + return 0; +} + +static int read_ptoken(link_ctx *ctx, const char **ps) +{ + if (skip_ws(ctx)) { + size_t i; + for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) { + /* nop */ + } + if (i > ctx->i) { + *ps = mk_str(ctx, i); + ctx->i = i; + return 1; + } + } + return 0; +} + + +static int read_link(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, '<')) { + size_t end; + if (find_chr(ctx, '>', &end)) { + ctx->link = mk_str(ctx, end); + ctx->i = end + 1; + return 1; + } + } + return 0; +} + +static int read_pname(link_ctx *ctx, const char **pname) +{ + if (skip_ws(ctx)) { + size_t i; + for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) { + /* nop */ + } + if (i > ctx->i) { + *pname = mk_str(ctx, i); + ctx->i = i; + return 1; + } + } + return 0; +} + +static int read_pvalue(link_ctx *ctx, const char **pvalue) +{ + if (skip_ws(ctx) && read_chr(ctx, '=')) { + if (read_qstring(ctx, pvalue) || read_ptoken(ctx, pvalue)) { + return 1; + } + } + return 0; +} + +static int read_param(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, ';')) { + const char *name, *value = ""; + if (read_pname(ctx, &name)) { + read_pvalue(ctx, &value); /* value is optional */ + apr_table_setn(ctx->params, name, value); + return 1; + } + } + return 0; +} + +static int read_sep(link_ctx *ctx) +{ + if (skip_ws(ctx) && read_chr(ctx, ',')) { + return 1; + } + return 0; +} + +static void init_params(link_ctx *ctx) +{ + if (!ctx->params) { + ctx->params = apr_table_make(ctx->pool, 5); + } + else { + apr_table_clear(ctx->params); + } +} + +static int same_authority(const h2_request *req, const apr_uri_t *uri) +{ + if (uri->scheme != NULL && strcmp(uri->scheme, req->scheme)) { + return 0; + } + if (uri->hostinfo != NULL && strcmp(uri->hostinfo, req->authority)) { + return 0; + } + return 1; +} + +static int set_push_header(void *ctx, const char *key, const char *value) +{ + size_t klen = strlen(key); + if (H2_HD_MATCH_LIT("User-Agent", key, klen) + || H2_HD_MATCH_LIT("Accept", key, klen) + || H2_HD_MATCH_LIT("Accept-Encoding", key, klen) + || H2_HD_MATCH_LIT("Accept-Language", key, klen) + || H2_HD_MATCH_LIT("Cache-Control", key, klen)) { + apr_table_setn(ctx, key, value); + } + return 1; +} + +static int has_param(link_ctx *ctx, const char *param) +{ + const char *p = apr_table_get(ctx->params, param); + return !!p; +} + +static int has_relation(link_ctx *ctx, const char *rel) +{ + const char *s, *val = apr_table_get(ctx->params, "rel"); + if (val) { + if (!strcmp(rel, val)) { + return 1; + } + s = ap_strstr_c(val, rel); + if (s && (s == val || s[-1] == ' ')) { + s += strlen(rel); + if (!*s || *s == ' ') { + return 1; + } + } + } + return 0; +} + +static int add_push(link_ctx *ctx) +{ + /* so, we have read a Link header and need to decide + * if we transform it into a push. + */ + if (has_relation(ctx, "preload") && !has_param(ctx, "nopush")) { + apr_uri_t uri; + if (apr_uri_parse(ctx->pool, ctx->link, &uri) == APR_SUCCESS) { + if (uri.path && same_authority(ctx->req, &uri)) { + char *path; + const char *method; + apr_table_t *headers; + h2_request *req; + h2_push *push; + + /* We only want to generate pushes for resources in the + * same authority than the original request. + * icing: i think that is wise, otherwise we really need to + * check that the vhost/server is available and uses the same + * TLS (if any) parameters. + */ + path = apr_uri_unparse(ctx->pool, &uri, APR_URI_UNP_OMITSITEPART); + push = apr_pcalloc(ctx->pool, sizeof(*push)); + switch (ctx->push_policy) { + case H2_PUSH_HEAD: + method = "HEAD"; + break; + default: + method = "GET"; + break; + } + headers = apr_table_make(ctx->pool, 5); + apr_table_do(set_push_header, headers, ctx->req->headers, NULL); + req = h2_req_create(0, ctx->pool, method, ctx->req->scheme, + ctx->req->authority, path, headers, + ctx->req->serialize); + /* atm, we do not push on pushes */ + h2_request_end_headers(req, ctx->pool, 1, 0); + push->req = req; + if (has_param(ctx, "critical")) { + h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio)); + prio->dependency = H2_DEPENDANT_BEFORE; + push->priority = prio; + } + if (!ctx->pushes) { + ctx->pushes = apr_array_make(ctx->pool, 5, sizeof(h2_push*)); + } + APR_ARRAY_PUSH(ctx->pushes, h2_push*) = push; + } + } + } + return 0; +} + +static void inspect_link(link_ctx *ctx, const char *s, size_t slen) +{ + /* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1> + Link = "Link" ":" #link-value + link-value = "<" URI-Reference ">" *( ";" link-param ) + link-param = ( ( "rel" "=" relation-types ) + | ( "anchor" "=" <"> URI-Reference <"> ) + | ( "rev" "=" relation-types ) + | ( "hreflang" "=" Language-Tag ) + | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) ) + | ( "title" "=" quoted-string ) + | ( "title*" "=" ext-value ) + | ( "type" "=" ( media-type | quoted-mt ) ) + | ( link-extension ) ) + link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] ) + | ( ext-name-star "=" ext-value ) + ext-name-star = parmname "*" ; reserved for RFC2231-profiled + ; extensions. Whitespace NOT + ; allowed in between. + ptoken = 1*ptokenchar + ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "(" + | ")" | "*" | "+" | "-" | "." | "/" | DIGIT + | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA + | "[" | "]" | "^" | "_" | "`" | "{" | "|" + | "}" | "~" + media-type = type-name "/" subtype-name + quoted-mt = <"> media-type <"> + relation-types = relation-type + | <"> relation-type *( 1*SP relation-type ) <"> + relation-type = reg-rel-type | ext-rel-type + reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" ) + ext-rel-type = URI + + and from <https://tools.ietf.org/html/rfc5987> + parmname = 1*attr-char + attr-char = ALPHA / DIGIT + / "!" / "#" / "$" / "&" / "+" / "-" / "." + / "^" / "_" / "`" / "|" / "~" + */ + + ctx->s = s; + ctx->slen = slen; + ctx->i = 0; + + while (read_link(ctx)) { + init_params(ctx); + while (read_param(ctx)) { + /* nop */ + } + add_push(ctx); + if (!read_sep(ctx)) { + break; + } + } +} + +static int head_iter(void *ctx, const char *key, const char *value) +{ + if (!apr_strnatcasecmp("link", key)) { + inspect_link(ctx, value, strlen(value)); + } + return 1; +} + +apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req, + int push_policy, const h2_headers *res) +{ + if (req && push_policy != H2_PUSH_NONE) { + /* Collect push candidates from the request/response pair. + * + * One source for pushes are "rel=preload" link headers + * in the response. + * + * TODO: This may be extended in the future by hooks or callbacks + * where other modules can provide push information directly. + */ + if (res->headers) { + link_ctx ctx; + + memset(&ctx, 0, sizeof(ctx)); + ctx.req = req; + ctx.push_policy = push_policy; + ctx.pool = p; + + apr_table_do(head_iter, &ctx, res->headers, NULL); + if (ctx.pushes) { + apr_table_setn(res->headers, "push-policy", + policy_str(push_policy)); + } + return ctx.pushes; + } + } + return NULL; +} + +/******************************************************************************* + * push diary + * + * - The push diary keeps track of resources already PUSHed via HTTP/2 on this + * connection. It records a hash value from the absolute URL of the resource + * pushed. + * - Lacking openssl, it uses 'apr_hashfunc_default' for the value + * - with openssl, it uses SHA256 to calculate the hash value + * - whatever the method to generate the hash, the diary keeps a maximum of 64 + * bits per hash, limiting the memory consumption to about + * H2PushDiarySize * 8 + * bytes. Entries are sorted by most recently used and oldest entries are + * forgotten first. + * - Clients can initialize/replace the push diary by sending a 'Cache-Digest' + * header. Currently, this is the base64url encoded value of the cache digest + * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + * This draft can be expected to evolve and the definition of the header + * will be added there and refined. + * - The cache digest header is a Golomb Coded Set of hash values, but it may + * limit the amount of bits per hash value even further. For a good description + * of GCS, read here: + * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters + * - The means that the push diary might be initialized with hash values of much + * less than 64 bits, leading to more false positives, but smaller digest size. + ******************************************************************************/ + + +#define GCSLOG_LEVEL APLOG_TRACE1 + +typedef struct h2_push_diary_entry { + apr_uint64_t hash; +} h2_push_diary_entry; + + +#ifdef H2_OPENSSL +static void sha256_update(SHA256_CTX *ctx, const char *s) +{ + SHA256_Update(ctx, s, strlen(s)); +} + +static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push) +{ + SHA256_CTX sha256; + apr_uint64_t val; + unsigned char hash[SHA256_DIGEST_LENGTH]; + int i; + + SHA256_Init(&sha256); + sha256_update(&sha256, push->req->scheme); + sha256_update(&sha256, "://"); + sha256_update(&sha256, push->req->authority); + sha256_update(&sha256, push->req->path); + SHA256_Final(hash, &sha256); + + val = 0; + for (i = 0; i != sizeof(val); ++i) + val = val * 256 + hash[i]; + *phash = val >> (64 - diary->mask_bits); +} +#endif + + +static unsigned int val_apr_hash(const char *str) +{ + apr_ssize_t len = strlen(str); + return apr_hashfunc_default(str, &len); +} + +static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push) +{ + apr_uint64_t val; +#if APR_UINT64_MAX > UINT_MAX + val = ((apr_uint64_t)(val_apr_hash(push->req->scheme)) << 32); + val ^= ((apr_uint64_t)(val_apr_hash(push->req->authority)) << 16); + val ^= val_apr_hash(push->req->path); +#else + val = val_apr_hash(push->req->scheme); + val ^= val_apr_hash(push->req->authority); + val ^= val_apr_hash(push->req->path); +#endif + *phash = val; +} + +static apr_int32_t ceil_power_of_2(apr_int32_t n) +{ + if (n <= 2) return 2; + --n; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + return ++n; +} + +static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype, + int N) +{ + h2_push_diary *diary = NULL; + + if (N > 0) { + diary = apr_pcalloc(p, sizeof(*diary)); + + diary->NMax = ceil_power_of_2(N); + diary->N = diary->NMax; + /* the mask we use in value comparison depends on where we got + * the values from. If we calculate them ourselves, we can use + * the full 64 bits. + * If we set the diary via a compressed golomb set, we have less + * relevant bits and need to use a smaller mask. */ + diary->mask_bits = 64; + /* grows by doubling, start with a power of 2 */ + diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry)); + + switch (dtype) { +#ifdef H2_OPENSSL + case H2_PUSH_DIGEST_SHA256: + diary->dtype = H2_PUSH_DIGEST_SHA256; + diary->dcalc = calc_sha256_hash; + break; +#endif /* ifdef H2_OPENSSL */ + default: + diary->dtype = H2_PUSH_DIGEST_APR_HASH; + diary->dcalc = calc_apr_hash; + break; + } + } + + return diary; +} + +h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N) +{ + return diary_create(p, H2_PUSH_DIGEST_SHA256, N); +} + +static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash) +{ + if (diary) { + h2_push_diary_entry *e; + int i; + + /* search from the end, where the last accessed digests are */ + for (i = diary->entries->nelts-1; i >= 0; --i) { + e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry); + if (e->hash == hash) { + return i; + } + } + } + return -1; +} + +static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx) +{ + h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; + h2_push_diary_entry e; + apr_size_t lastidx = diary->entries->nelts-1; + + /* move entry[idx] to the end */ + if (idx < lastidx) { + e = entries[idx]; + memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx)); + entries[lastidx] = e; + } + return &entries[lastidx]; +} + +static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) +{ + h2_push_diary_entry *ne; + + if (diary->entries->nelts < diary->N) { + /* append a new diary entry at the end */ + APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; + ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry); + } + else { + /* replace content with new digest. keeps memory usage constant once diary is full */ + ne = move_to_last(diary, 0); + *ne = *e; + } + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool, + "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash); +} + +apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes) +{ + apr_array_header_t *npushes = pushes; + h2_push_diary_entry e; + int i, idx; + + if (session->push_diary && pushes) { + npushes = NULL; + + for (i = 0; i < pushes->nelts; ++i) { + h2_push *push; + + push = APR_ARRAY_IDX(pushes, i, h2_push*); + session->push_diary->dcalc(session->push_diary, &e.hash, push); + idx = h2_push_diary_find(session->push_diary, e.hash); + if (idx >= 0) { + /* Intentional no APLOGNO */ + ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c, + "push_diary_update: already there PUSH %s", push->req->path); + move_to_last(session->push_diary, idx); + } + else { + /* Intentional no APLOGNO */ + ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c, + "push_diary_update: adding PUSH %s", push->req->path); + if (!npushes) { + npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*)); + } + APR_ARRAY_PUSH(npushes, h2_push*) = push; + h2_push_diary_append(session->push_diary, &e); + } + } + } + return npushes; +} + +apr_array_header_t *h2_push_collect_update(h2_stream *stream, + const struct h2_request *req, + const struct h2_headers *res) +{ + h2_session *session = stream->session; + const char *cache_digest = apr_table_get(req->headers, "Cache-Digest"); + apr_array_header_t *pushes; + apr_status_t status; + + if (cache_digest && session->push_diary) { + status = h2_push_diary_digest64_set(session->push_diary, req->authority, + cache_digest, stream->pool); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_SSSN_LOG(APLOGNO(03057), session, + "push diary set from Cache-Digest: %s"), cache_digest); + } + } + pushes = h2_push_collect(stream->pool, req, stream->push_policy, res); + return h2_push_diary_update(stream->session, pushes); +} + +static apr_int32_t h2_log2inv(unsigned char log2) +{ + return log2? (1 << log2) : 1; +} + + +typedef struct { + h2_push_diary *diary; + unsigned char log2p; + int mask_bits; + int delta_bits; + int fixed_bits; + apr_uint64_t fixed_mask; + apr_pool_t *pool; + unsigned char *data; + apr_size_t datalen; + apr_size_t offset; + unsigned int bit; + apr_uint64_t last; +} gset_encoder; + +static int cmp_puint64(const void *p1, const void *p2) +{ + const apr_uint64_t *pu1 = p1, *pu2 = p2; + return (*pu1 > *pu2)? 1 : ((*pu1 == *pu2)? 0 : -1); +} + +/* in golomb bit stream encoding, bit 0 is the 8th of the first char, or + * more generally: + * char(bit/8) & cbit_mask[(bit % 8)] + */ +static unsigned char cbit_mask[] = { + 0x80u, + 0x40u, + 0x20u, + 0x10u, + 0x08u, + 0x04u, + 0x02u, + 0x01u, +}; + +static apr_status_t gset_encode_bit(gset_encoder *encoder, int bit) +{ + if (++encoder->bit >= 8) { + if (++encoder->offset >= encoder->datalen) { + apr_size_t nlen = encoder->datalen*2; + unsigned char *ndata = apr_pcalloc(encoder->pool, nlen); + if (!ndata) { + return APR_ENOMEM; + } + memcpy(ndata, encoder->data, encoder->datalen); + encoder->data = ndata; + encoder->datalen = nlen; + } + encoder->bit = 0; + encoder->data[encoder->offset] = 0xffu; + } + if (!bit) { + encoder->data[encoder->offset] &= ~cbit_mask[encoder->bit]; + } + return APR_SUCCESS; +} + +static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval) +{ + apr_uint64_t delta, flex_bits; + apr_status_t status = APR_SUCCESS; + int i; + + delta = pval - encoder->last; + encoder->last = pval; + flex_bits = (delta >> encoder->fixed_bits); + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool, + "h2_push_diary_enc: val=%"APR_UINT64_T_HEX_FMT", delta=%" + APR_UINT64_T_HEX_FMT" flex_bits=%"APR_UINT64_T_FMT", " + ", fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT, + pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask); + for (; flex_bits != 0; --flex_bits) { + status = gset_encode_bit(encoder, 1); + if (status != APR_SUCCESS) { + return status; + } + } + status = gset_encode_bit(encoder, 0); + if (status != APR_SUCCESS) { + return status; + } + + for (i = encoder->fixed_bits-1; i >= 0; --i) { + status = gset_encode_bit(encoder, (delta >> i) & 1); + if (status != APR_SUCCESS) { + return status; + } + } + return APR_SUCCESS; +} + +/** + * Get a cache digest as described in + * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + * from the contents of the push diary. + * + * @param diary the diary to calculdate the digest from + * @param p the pool to use + * @param pdata on successful return, the binary cache digest + * @param plen on successful return, the length of the binary data + */ +apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, + int maxP, const char *authority, + const char **pdata, apr_size_t *plen) +{ + int nelts, N, i; + unsigned char log2n, log2pmax; + gset_encoder encoder; + apr_uint64_t *hashes; + apr_size_t hash_count; + + nelts = diary->entries->nelts; + + if (nelts > APR_UINT32_MAX) { + /* should not happen */ + return APR_ENOTIMPL; + } + N = ceil_power_of_2(nelts); + log2n = h2_log2(N); + + /* Now log2p is the max number of relevant bits, so that + * log2p + log2n == mask_bits. We can uise a lower log2p + * and have a shorter set encoding... + */ + log2pmax = h2_log2(ceil_power_of_2(maxP)); + + memset(&encoder, 0, sizeof(encoder)); + encoder.diary = diary; + encoder.log2p = H2MIN(diary->mask_bits - log2n, log2pmax); + encoder.mask_bits = log2n + encoder.log2p; + encoder.delta_bits = diary->mask_bits - encoder.mask_bits; + encoder.fixed_bits = encoder.log2p; + encoder.fixed_mask = 1; + encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1; + encoder.pool = pool; + encoder.datalen = 512; + encoder.data = apr_pcalloc(encoder.pool, encoder.datalen); + + encoder.data[0] = log2n; + encoder.data[1] = encoder.log2p; + encoder.offset = 1; + encoder.bit = 8; + encoder.last = 0; + + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, + "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, " + "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s", + (int)nelts, (int)N, (int)log2n, diary->mask_bits, + (int)encoder.mask_bits, (int)encoder.delta_bits, + (int)encoder.log2p, authority); + + if (!authority || !diary->authority + || !strcmp("*", authority) || !strcmp(diary->authority, authority)) { + hash_count = diary->entries->nelts; + hashes = apr_pcalloc(encoder.pool, hash_count); + for (i = 0; i < hash_count; ++i) { + hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash + >> encoder.delta_bits); + } + + qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64); + for (i = 0; i < hash_count; ++i) { + if (!i || (hashes[i] != hashes[i-1])) { + gset_encode_next(&encoder, hashes[i]); + } + } + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, + "h2_push_diary_digest_get: golomb compressed hashes, %d bytes", + (int)encoder.offset + 1); + } + *pdata = (const char *)encoder.data; + *plen = encoder.offset + 1; + + return APR_SUCCESS; +} + +typedef struct { + h2_push_diary *diary; + apr_pool_t *pool; + unsigned char log2p; + const unsigned char *data; + apr_size_t datalen; + apr_size_t offset; + unsigned int bit; + apr_uint64_t last_val; +} gset_decoder; + +static int gset_decode_next_bit(gset_decoder *decoder) +{ + if (++decoder->bit >= 8) { + if (++decoder->offset >= decoder->datalen) { + return -1; + } + decoder->bit = 0; + } + return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0; +} + +static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash) +{ + apr_uint64_t flex = 0, fixed = 0, delta; + int i; + + /* read 1 bits until we encounter 0, then read log2n(diary-P) bits. + * On a malformed bit-string, this will not fail, but produce results + * which are pbly too large. Luckily, the diary will modulo the hash. + */ + while (1) { + int bit = gset_decode_next_bit(decoder); + if (bit == -1) { + return APR_EINVAL; + } + if (!bit) { + break; + } + ++flex; + } + + for (i = 0; i < decoder->log2p; ++i) { + int bit = gset_decode_next_bit(decoder); + if (bit == -1) { + return APR_EINVAL; + } + fixed = (fixed << 1) | bit; + } + + delta = (flex << decoder->log2p) | fixed; + *phash = delta + decoder->last_val; + decoder->last_val = *phash; + + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool, + "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%" + APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT, + *phash, delta, (int)flex, fixed); + + return APR_SUCCESS; +} + +/** + * Initialize the push diary by a cache digest as described in + * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + * . + * @param diary the diary to set the digest into + * @param data the binary cache digest + * @param len the length of the cache digest + * @return APR_EINVAL if digest was not successfully parsed + */ +apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, + const char *data, apr_size_t len) +{ + gset_decoder decoder; + unsigned char log2n, log2p; + int N, i; + apr_pool_t *pool = diary->entries->pool; + h2_push_diary_entry e; + apr_status_t status = APR_SUCCESS; + + if (len < 2) { + /* at least this should be there */ + return APR_EINVAL; + } + log2n = data[0]; + log2p = data[1]; + diary->mask_bits = log2n + log2p; + if (diary->mask_bits > 64) { + /* cannot handle */ + return APR_ENOTIMPL; + } + + /* whatever is in the digest, it replaces the diary entries */ + apr_array_clear(diary->entries); + if (!authority || !strcmp("*", authority)) { + diary->authority = NULL; + } + else if (!diary->authority || strcmp(diary->authority, authority)) { + diary->authority = apr_pstrdup(diary->entries->pool, authority); + } + + N = h2_log2inv(log2n + log2p); + + decoder.diary = diary; + decoder.pool = pool; + decoder.log2p = log2p; + decoder.data = (const unsigned char*)data; + decoder.datalen = len; + decoder.offset = 1; + decoder.bit = 8; + decoder.last_val = 0; + + diary->N = N; + /* Determine effective N we use for storage */ + if (!N) { + /* a totally empty cache digest. someone tells us that she has no + * entries in the cache at all. Use our own preferences for N+mask + */ + diary->N = diary->NMax; + return APR_SUCCESS; + } + else if (N > diary->NMax) { + /* Store not more than diary is configured to hold. We open us up + * to DOS attacks otherwise. */ + diary->N = diary->NMax; + } + + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, + "h2_push_diary_digest_set: N=%d, log2n=%d, " + "diary->mask_bits=%d, dec.log2p=%d", + (int)diary->N, (int)log2n, diary->mask_bits, + (int)decoder.log2p); + + for (i = 0; i < diary->N; ++i) { + if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) { + /* the data may have less than N values */ + break; + } + h2_push_diary_append(diary, &e); + } + + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, + "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d", + (int)diary->entries->nelts, diary->mask_bits); + return status; +} + +apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, + const char *data64url, apr_pool_t *pool) +{ + const char *data; + apr_size_t len = h2_util_base64url_decode(&data, data64url, pool); + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, + "h2_push_diary_digest64_set: digest=%s, dlen=%d", + data64url, (int)len); + return h2_push_diary_digest_set(diary, authority, data, len); +} + diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h new file mode 100644 index 0000000..bc24e68 --- /dev/null +++ b/modules/http2/h2_push.h @@ -0,0 +1,120 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_push__ +#define __mod_h2__h2_push__ + +#include "h2.h" + +struct h2_request; +struct h2_headers; +struct h2_ngheader; +struct h2_session; +struct h2_stream; + +typedef struct h2_push { + const struct h2_request *req; + h2_priority *priority; +} h2_push; + +typedef enum { + H2_PUSH_DIGEST_APR_HASH, + H2_PUSH_DIGEST_SHA256 +} h2_push_digest_type; + +typedef struct h2_push_diary h2_push_diary; + +typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push); + +struct h2_push_diary { + apr_array_header_t *entries; + int NMax; /* Maximum for N, should size change be necessary */ + int N; /* Current maximum number of entries, power of 2 */ + apr_uint64_t mask; /* mask for relevant bits */ + unsigned int mask_bits; /* number of relevant bits */ + const char *authority; + h2_push_digest_type dtype; + h2_push_digest_calc *dcalc; +}; + +/** + * Determine the list of h2_push'es to send to the client on behalf of + * the given request/response pair. + * + * @param p the pool to use + * @param req the requst from the client + * @param res the response from the server + * @return array of h2_push addresses or NULL + */ +apr_array_header_t *h2_push_collect(apr_pool_t *p, + const struct h2_request *req, + int push_policy, + const struct h2_headers *res); + +/** + * Create a new push diary for the given maximum number of entries. + * + * @param p the pool to use + * @param N the max number of entries, rounded up to 2^x + * @return the created diary, might be NULL of max_entries is 0 + */ +h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N); + +/** + * Filters the given pushes against the diary and returns only those pushes + * that were newly entered in the diary. + */ +apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_header_t *pushes); + +/** + * Collect pushes for the given request/response pair, enter them into the + * diary and return those pushes newly entered. + */ +apr_array_header_t *h2_push_collect_update(struct h2_stream *stream, + const struct h2_request *req, + const struct h2_headers *res); +/** + * Get a cache digest as described in + * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + * from the contents of the push diary. + * + * @param diary the diary to calculdate the digest from + * @param p the pool to use + * @param authority the authority to get the data for, use NULL/"*" for all + * @param pdata on successful return, the binary cache digest + * @param plen on successful return, the length of the binary data + */ +apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p, + int maxP, const char *authority, + const char **pdata, apr_size_t *plen); + +/** + * Initialize the push diary by a cache digest as described in + * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + * . + * @param diary the diary to set the digest into + * @param authority the authority to set the data for + * @param data the binary cache digest + * @param len the length of the cache digest + * @return APR_EINVAL if digest was not successfully parsed + */ +apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, + const char *data, apr_size_t len); + +apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, + const char *data64url, apr_pool_t *pool); + +#endif /* defined(__mod_h2__h2_push__) */ diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c new file mode 100644 index 0000000..8899c4f --- /dev/null +++ b/modules/http2/h2_request.c @@ -0,0 +1,339 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> + +#include <apr_strings.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_request.h> +#include <http_log.h> +#include <http_vhost.h> +#include <util_filter.h> +#include <ap_mpm.h> +#include <mod_core.h> +#include <scoreboard.h> + +#include "h2_private.h" +#include "h2_config.h" +#include "h2_push.h" +#include "h2_request.h" +#include "h2_util.h" + + +typedef struct { + apr_table_t *headers; + apr_pool_t *pool; + apr_status_t status; +} h1_ctx; + +static int set_h1_header(void *ctx, const char *key, const char *value) +{ + h1_ctx *x = ctx; + x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key), + value, strlen(value)); + return (x->status == APR_SUCCESS)? 1 : 0; +} + +apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, + request_rec *r) +{ + h2_request *req; + const char *scheme, *authority, *path; + h1_ctx x; + + *preq = NULL; + scheme = apr_pstrdup(pool, r->parsed_uri.scheme? r->parsed_uri.scheme + : ap_http_scheme(r)); + authority = apr_pstrdup(pool, r->hostname); + path = apr_uri_unparse(pool, &r->parsed_uri, APR_URI_UNP_OMITSITEPART); + + if (!r->method || !scheme || !r->hostname || !path) { + return APR_EINVAL; + } + + if (!ap_strchr_c(authority, ':') && r->server && r->server->port) { + apr_port_t defport = apr_uri_port_of_scheme(scheme); + if (defport != r->server->port) { + /* port info missing and port is not default for scheme: append */ + authority = apr_psprintf(pool, "%s:%d", authority, + (int)r->server->port); + } + } + + req = apr_pcalloc(pool, sizeof(*req)); + req->method = apr_pstrdup(pool, r->method); + req->scheme = scheme; + req->authority = authority; + req->path = path; + req->headers = apr_table_make(pool, 10); + if (r->server) { + req->serialize = h2_config_geti(h2_config_sget(r->server), + H2_CONF_SER_HEADERS); + } + + x.pool = pool; + x.headers = req->headers; + x.status = APR_SUCCESS; + apr_table_do(set_h1_header, &x, r->headers_in, NULL); + + *preq = req; + return x.status; +} + +apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen) +{ + apr_status_t status = APR_SUCCESS; + + if (nlen <= 0) { + return status; + } + + if (name[0] == ':') { + /* pseudo header, see ch. 8.1.2.3, always should come first */ + if (!apr_is_empty_table(req->headers)) { + ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool, + APLOGNO(02917) + "h2_request: pseudo header after request start"); + return APR_EGENERAL; + } + + if (H2_HEADER_METHOD_LEN == nlen + && !strncmp(H2_HEADER_METHOD, name, nlen)) { + req->method = apr_pstrndup(pool, value, vlen); + } + else if (H2_HEADER_SCHEME_LEN == nlen + && !strncmp(H2_HEADER_SCHEME, name, nlen)) { + req->scheme = apr_pstrndup(pool, value, vlen); + } + else if (H2_HEADER_PATH_LEN == nlen + && !strncmp(H2_HEADER_PATH, name, nlen)) { + req->path = apr_pstrndup(pool, value, vlen); + } + else if (H2_HEADER_AUTH_LEN == nlen + && !strncmp(H2_HEADER_AUTH, name, nlen)) { + req->authority = apr_pstrndup(pool, value, vlen); + } + else { + char buffer[32]; + memset(buffer, 0, 32); + strncpy(buffer, name, (nlen > 31)? 31 : nlen); + ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool, + APLOGNO(02954) + "h2_request: ignoring unknown pseudo header %s", + buffer); + } + } + else { + /* non-pseudo header, append to work bucket of stream */ + status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen); + } + + return status; +} + +apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes) +{ + const char *s; + + /* rfc7540, ch. 8.1.2.3: + * - if we have :authority, it overrides any Host header + * - :authority MUST be ommited when converting h1->h2, so we + * might get a stream without, but then Host needs to be there */ + if (!req->authority) { + const char *host = apr_table_get(req->headers, "Host"); + if (!host) { + return APR_BADARG; + } + req->authority = host; + } + else { + apr_table_setn(req->headers, "Host", req->authority); + } + + s = apr_table_get(req->headers, "Content-Length"); + if (!s) { + /* HTTP/2 does not need a Content-Length for framing, but our + * internal request processing is used to HTTP/1.1, so we + * need to either add a Content-Length or a Transfer-Encoding + * if any content can be expected. */ + if (!eos) { + /* We have not seen a content-length and have no eos, + * simulate a chunked encoding for our HTTP/1.1 infrastructure, + * in case we have "H2SerializeHeaders on" here + */ + req->chunked = 1; + apr_table_mergen(req->headers, "Transfer-Encoding", "chunked"); + } + else if (apr_table_get(req->headers, "Content-Type")) { + /* If we have a content-type, but already seen eos, no more + * data will come. Signal a zero content length explicitly. + */ + apr_table_setn(req->headers, "Content-Length", "0"); + } + } + req->raw_bytes += raw_bytes; + + return APR_SUCCESS; +} + +h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src) +{ + h2_request *dst = apr_pmemdup(p, src, sizeof(*dst)); + dst->method = apr_pstrdup(p, src->method); + dst->scheme = apr_pstrdup(p, src->scheme); + dst->authority = apr_pstrdup(p, src->authority); + dst->path = apr_pstrdup(p, src->path); + dst->headers = apr_table_clone(p, src->headers); + return dst; +} + +request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) +{ + int access_status = HTTP_OK; + const char *rpath; + apr_pool_t *p; + request_rec *r; + const char *s; + + apr_pool_create(&p, c->pool); + apr_pool_tag(p, "request"); + r = apr_pcalloc(p, sizeof(request_rec)); + AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)c); + r->pool = p; + r->connection = c; + r->server = c->base_server; + + r->user = NULL; + r->ap_auth_type = NULL; + + r->allowed_methods = ap_make_method_list(p, 2); + + r->headers_in = apr_table_clone(r->pool, req->headers); + r->trailers_in = apr_table_make(r->pool, 5); + r->subprocess_env = apr_table_make(r->pool, 25); + r->headers_out = apr_table_make(r->pool, 12); + r->err_headers_out = apr_table_make(r->pool, 5); + r->trailers_out = apr_table_make(r->pool, 5); + r->notes = apr_table_make(r->pool, 5); + + r->request_config = ap_create_request_config(r->pool); + /* Must be set before we run create request hook */ + + r->proto_output_filters = c->output_filters; + r->output_filters = r->proto_output_filters; + r->proto_input_filters = c->input_filters; + r->input_filters = r->proto_input_filters; + ap_run_create_request(r); + r->per_dir_config = r->server->lookup_defaults; + + r->sent_bodyct = 0; /* bytect isn't for body */ + + r->read_length = 0; + r->read_body = REQUEST_NO_BODY; + + r->status = HTTP_OK; /* Until further notice */ + r->header_only = 0; + r->the_request = NULL; + + /* Begin by presuming any module can make its own path_info assumptions, + * until some module interjects and changes the value. + */ + r->used_path_info = AP_REQ_DEFAULT_PATH_INFO; + + r->useragent_addr = c->client_addr; + r->useragent_ip = c->client_ip; + + ap_run_pre_read_request(r, c); + + /* Time to populate r with the data we have. */ + r->request_time = req->request_time; + r->method = req->method; + /* Provide quick information about the request method as soon as known */ + r->method_number = ap_method_number_of(r->method); + if (r->method_number == M_GET && r->method[0] == 'H') { + r->header_only = 1; + } + + rpath = (req->path ? req->path : ""); + ap_parse_uri(r, rpath); + r->protocol = (char*)"HTTP/2.0"; + r->proto_num = HTTP_VERSION(2, 0); + + r->the_request = apr_psprintf(r->pool, "%s %s %s", + r->method, rpath, r->protocol); + + /* update what we think the virtual host is based on the headers we've + * now read. may update status. + * Leave r->hostname empty, vhost will parse if form our Host: header, + * otherwise we get complains about port numbers. + */ + r->hostname = NULL; + ap_update_vhost_from_headers(r); + + /* we may have switched to another server */ + r->per_dir_config = r->server->lookup_defaults; + + s = apr_table_get(r->headers_in, "Expect"); + if (s && s[0]) { + if (ap_cstr_casecmp(s, "100-continue") == 0) { + r->expecting_100 = 1; + } + else { + r->status = HTTP_EXPECTATION_FAILED; + ap_send_error_response(r, 0); + } + } + + /* + * Add the HTTP_IN filter here to ensure that ap_discard_request_body + * called by ap_die and by ap_send_error_response works correctly on + * status codes that do not cause the connection to be dropped and + * in situations where the connection should be kept alive. + */ + ap_add_input_filter_handle(ap_http_input_filter_handle, + NULL, r, r->connection); + + if (access_status != HTTP_OK + || (access_status = ap_run_post_read_request(r))) { + /* Request check post hooks failed. An example of this would be a + * request for a vhost where h2 is disabled --> 421. + */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03367) + "h2_request: access_status=%d, request_create failed", + access_status); + ap_die(access_status, r); + ap_update_child_status(c->sbh, SERVER_BUSY_LOG, r); + ap_run_log_transaction(r); + r = NULL; + goto traceout; + } + + AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, + (char *)r->uri, (char *)r->server->defn_name, + r->status); + return r; +traceout: + AP_READ_REQUEST_FAILURE((uintptr_t)r); + return r; +} + + diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h new file mode 100644 index 0000000..48aee09 --- /dev/null +++ b/modules/http2/h2_request.h @@ -0,0 +1,48 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_request__ +#define __mod_h2__h2_request__ + +#include "h2.h" + +apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, + request_rec *r); + +apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen); + +apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen); + +apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes); + +h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src); + +/** + * Create a request_rec representing the h2_request to be + * processed on the given connection. + * + * @param req the h2 request to process + * @param conn the connection to process the request on + * @return the request_rec representing the request + */ +request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn); + + +#endif /* defined(__mod_h2__h2_request__) */ diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c new file mode 100644 index 0000000..ed96cf0 --- /dev/null +++ b/modules/http2/h2_session.c @@ -0,0 +1,2360 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stddef.h> +#include <apr_thread_cond.h> +#include <apr_base64.h> +#include <apr_strings.h> + +#include <ap_mpm.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_config.h> +#include <http_log.h> +#include <scoreboard.h> + +#include <mpm_common.h> + +#include "h2_private.h" +#include "h2.h" +#include "h2_bucket_beam.h" +#include "h2_bucket_eos.h" +#include "h2_config.h" +#include "h2_ctx.h" +#include "h2_filter.h" +#include "h2_h2.h" +#include "h2_mplx.h" +#include "h2_push.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_stream.h" +#include "h2_task.h" +#include "h2_session.h" +#include "h2_util.h" +#include "h2_version.h" +#include "h2_workers.h" + + +static apr_status_t dispatch_master(h2_session *session); +static apr_status_t h2_session_read(h2_session *session, int block); +static void transit(h2_session *session, const char *action, + h2_session_state nstate); + +static void on_stream_state_enter(void *ctx, h2_stream *stream); +static void on_stream_state_event(void *ctx, h2_stream *stream, h2_stream_event_t ev); +static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev); + +static int h2_session_status_from_apr_status(apr_status_t rv) +{ + if (rv == APR_SUCCESS) { + return NGHTTP2_NO_ERROR; + } + else if (APR_STATUS_IS_EAGAIN(rv)) { + return NGHTTP2_ERR_WOULDBLOCK; + } + else if (APR_STATUS_IS_EOF(rv)) { + return NGHTTP2_ERR_EOF; + } + return NGHTTP2_ERR_PROTO; +} + +h2_stream *h2_session_stream_get(h2_session *session, int stream_id) +{ + return nghttp2_session_get_stream_user_data(session->ngh2, stream_id); +} + +static void dispatch_event(h2_session *session, h2_session_event_t ev, + int err, const char *msg); + +void h2_session_event(h2_session *session, h2_session_event_t ev, + int err, const char *msg) +{ + dispatch_event(session, ev, err, msg); +} + +static int rst_unprocessed_stream(h2_stream *stream, void *ctx) +{ + int unprocessed = (!h2_stream_was_closed(stream) + && (H2_STREAM_CLIENT_INITIATED(stream->id)? + (!stream->session->local.accepting + && stream->id > stream->session->local.accepted_max) + : + (!stream->session->remote.accepting + && stream->id > stream->session->remote.accepted_max)) + ); + if (unprocessed) { + h2_stream_rst(stream, H2_ERR_NO_ERROR); + return 0; + } + return 1; +} + +static void cleanup_unprocessed_streams(h2_session *session) +{ + h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session); +} + +static h2_stream *h2_session_open_stream(h2_session *session, int stream_id, + int initiated_on) +{ + h2_stream * stream; + apr_pool_t *stream_pool; + + apr_pool_create(&stream_pool, session->pool); + apr_pool_tag(stream_pool, "h2_stream"); + + stream = h2_stream_create(stream_id, stream_pool, session, + session->monitor, initiated_on); + if (stream) { + nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream); + } + return stream; +} + +/** + * Determine the importance of streams when scheduling tasks. + * - if both stream depend on the same one, compare weights + * - if one stream is closer to the root, prioritize that one + * - if both are on the same level, use the weight of their root + * level ancestors + */ +static int spri_cmp(int sid1, nghttp2_stream *s1, + int sid2, nghttp2_stream *s2, h2_session *session) +{ + nghttp2_stream *p1, *p2; + + p1 = nghttp2_stream_get_parent(s1); + p2 = nghttp2_stream_get_parent(s2); + + if (p1 == p2) { + int32_t w1, w2; + + w1 = nghttp2_stream_get_weight(s1); + w2 = nghttp2_stream_get_weight(s2); + return w2 - w1; + } + else if (!p1) { + /* stream 1 closer to root */ + return -1; + } + else if (!p2) { + /* stream 2 closer to root */ + return 1; + } + return spri_cmp(sid1, p1, sid2, p2, session); +} + +static int stream_pri_cmp(int sid1, int sid2, void *ctx) +{ + h2_session *session = ctx; + nghttp2_stream *s1, *s2; + + s1 = nghttp2_session_find_stream(session->ngh2, sid1); + s2 = nghttp2_session_find_stream(session->ngh2, sid2); + + if (s1 == s2) { + return 0; + } + else if (!s1) { + return 1; + } + else if (!s2) { + return -1; + } + return spri_cmp(sid1, s1, sid2, s2, session); +} + +/* + * Callback when nghttp2 wants to send bytes back to the client. + */ +static ssize_t send_cb(nghttp2_session *ngh2, + const uint8_t *data, size_t length, + int flags, void *userp) +{ + h2_session *session = (h2_session *)userp; + apr_status_t status; + (void)ngh2; + (void)flags; + + status = h2_conn_io_write(&session->io, (const char *)data, length); + if (status == APR_SUCCESS) { + return length; + } + if (APR_STATUS_IS_EAGAIN(status)) { + return NGHTTP2_ERR_WOULDBLOCK; + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062) + "h2_session: send error"); + return h2_session_status_from_apr_status(status); +} + +static int on_invalid_frame_recv_cb(nghttp2_session *ngh2, + const nghttp2_frame *frame, + int error, void *userp) +{ + h2_session *session = (h2_session *)userp; + (void)ngh2; + + if (APLOGcdebug(session->c)) { + char buffer[256]; + + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03063), session, + "recv invalid FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } + return 0; +} + +static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags, + int32_t stream_id, + const uint8_t *data, size_t len, void *userp) +{ + h2_session *session = (h2_session *)userp; + apr_status_t status = APR_EINVAL; + h2_stream * stream; + int rv = 0; + + stream = h2_session_stream_get(session, stream_id); + if (stream) { + status = h2_stream_recv_DATA(stream, flags, data, len); + dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream data rcvd"); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064) + "h2_stream(%ld-%d): on_data_chunk for unknown stream", + session->id, (int)stream_id); + rv = NGHTTP2_ERR_CALLBACK_FAILURE; + } + + if (status != APR_SUCCESS) { + /* count this as consumed explicitly as no one will read it */ + nghttp2_session_consume(session->ngh2, stream_id, len); + } + return rv; +} + +static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id, + uint32_t error_code, void *userp) +{ + h2_session *session = (h2_session *)userp; + h2_stream *stream; + + (void)ngh2; + stream = h2_session_stream_get(session, stream_id); + if (stream) { + if (error_code) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03065), stream, + "closing with err=%d %s"), + (int)error_code, h2_h2_err_description(error_code)); + h2_stream_rst(stream, error_code); + } + } + return 0; +} + +static int on_begin_headers_cb(nghttp2_session *ngh2, + const nghttp2_frame *frame, void *userp) +{ + h2_session *session = (h2_session *)userp; + h2_stream *s; + + /* We may see HEADERs at the start of a stream or after all DATA + * streams to carry trailers. */ + (void)ngh2; + s = h2_session_stream_get(session, frame->hd.stream_id); + if (s) { + /* nop */ + } + else { + s = h2_session_open_stream(userp, frame->hd.stream_id, 0); + } + return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; +} + +static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, + const uint8_t *name, size_t namelen, + const uint8_t *value, size_t valuelen, + uint8_t flags, + void *userp) +{ + h2_session *session = (h2_session *)userp; + h2_stream * stream; + apr_status_t status; + + (void)flags; + stream = h2_session_stream_get(session, frame->hd.stream_id); + if (!stream) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(02920) + "h2_stream(%ld-%d): on_header unknown stream", + session->id, (int)frame->hd.stream_id); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + + status = h2_stream_add_header(stream, (const char *)name, namelen, + (const char *)value, valuelen); + if (status != APR_SUCCESS && !h2_stream_is_ready(stream)) { + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + return 0; +} + +/** + * nghttp2 session has received a complete frame. Most are used by nghttp2 + * for processing of internal state. Some, like HEADER and DATA frames, + * we need to act on. + */ +static int on_frame_recv_cb(nghttp2_session *ng2s, + const nghttp2_frame *frame, + void *userp) +{ + h2_session *session = (h2_session *)userp; + h2_stream *stream; + apr_status_t rv = APR_SUCCESS; + + if (APLOGcdebug(session->c)) { + char buffer[256]; + + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03066), session, + "recv FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } + + ++session->frames_received; + switch (frame->hd.type) { + case NGHTTP2_HEADERS: + /* This can be HEADERS for a new stream, defining the request, + * or HEADER may come after DATA at the end of a stream as in + * trailers */ + stream = h2_session_stream_get(session, frame->hd.stream_id); + if (stream) { + rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags, + frame->hd.length + H2_FRAME_HDR_LEN); + } + break; + case NGHTTP2_DATA: + stream = h2_session_stream_get(session, frame->hd.stream_id); + if (stream) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(02923), stream, + "DATA, len=%ld, flags=%d"), + (long)frame->hd.length, frame->hd.flags); + rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags, + frame->hd.length + H2_FRAME_HDR_LEN); + } + break; + case NGHTTP2_PRIORITY: + session->reprioritize = 1; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_stream(%ld-%d): PRIORITY frame " + " weight=%d, dependsOn=%d, exclusive=%d", + session->id, (int)frame->hd.stream_id, + frame->priority.pri_spec.weight, + frame->priority.pri_spec.stream_id, + frame->priority.pri_spec.exclusive); + break; + case NGHTTP2_WINDOW_UPDATE: + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_stream(%ld-%d): WINDOW_UPDATE incr=%d", + session->id, (int)frame->hd.stream_id, + frame->window_update.window_size_increment); + if (nghttp2_session_want_write(session->ngh2)) { + dispatch_event(session, H2_SESSION_EV_FRAME_RCVD, 0, "window update"); + } + break; + case NGHTTP2_RST_STREAM: + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067) + "h2_stream(%ld-%d): RST_STREAM by client, errror=%d", + session->id, (int)frame->hd.stream_id, + (int)frame->rst_stream.error_code); + stream = h2_session_stream_get(session, frame->hd.stream_id); + if (stream && stream->initiated_on) { + ++session->pushes_reset; + } + else { + ++session->streams_reset; + } + break; + case NGHTTP2_GOAWAY: + if (frame->goaway.error_code == 0 + && frame->goaway.last_stream_id == ((1u << 31) - 1)) { + /* shutdown notice. Should not come from a client... */ + session->remote.accepting = 0; + } + else { + session->remote.accepted_max = frame->goaway.last_stream_id; + dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, + frame->goaway.error_code, NULL); + } + break; + case NGHTTP2_SETTINGS: + if (APLOGctrace2(session->c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length); + } + break; + default: + if (APLOGctrace2(session->c)) { + char buffer[256]; + + h2_util_frame_print(frame, buffer, + sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer); + } + break; + } + + if (session->state == H2_SESSION_ST_IDLE) { + /* We received a frame, but session is in state IDLE. That means the frame + * did not really progress any of the (possibly) open streams. It was a meta + * frame, e.g. SETTINGS/WINDOW_UPDATE/unknown/etc. + * Remember: IDLE means we cannot send because either there are no streams open or + * all open streams are blocked on exhausted WINDOWs for outgoing data. + * The more frames we receive that do not change this, the less interested we + * become in serving this connection. This is expressed in increasing "idle_delays". + * Eventually, the connection will timeout and we'll close it. */ + session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received); + ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_SSSN_MSG(session, "session has %ld idle frames"), + (long)session->idle_frames); + if (session->idle_frames > 10) { + apr_size_t busy_frames = H2MAX(session->frames_received - session->idle_frames, 1); + int idle_ratio = (int)(session->idle_frames / busy_frames); + if (idle_ratio > 100) { + session->idle_delay = apr_time_from_msec(H2MIN(1000, idle_ratio)); + } + else if (idle_ratio > 10) { + session->idle_delay = apr_time_from_msec(10); + } + else if (idle_ratio > 1) { + session->idle_delay = apr_time_from_msec(1); + } + else { + session->idle_delay = 0; + } + } + } + + if (APR_SUCCESS != rv) return NGHTTP2_ERR_PROTO; + return 0; +} + +static int h2_session_continue_data(h2_session *session) { + if (h2_mplx_has_master_events(session->mplx)) { + return 0; + } + if (h2_conn_io_needs_flush(&session->io)) { + return 0; + } + return 1; +} + +static char immortal_zeros[H2_MAX_PADLEN]; + +static int on_send_data_cb(nghttp2_session *ngh2, + nghttp2_frame *frame, + const uint8_t *framehd, + size_t length, + nghttp2_data_source *source, + void *userp) +{ + apr_status_t status = APR_SUCCESS; + h2_session *session = (h2_session *)userp; + int stream_id = (int)frame->hd.stream_id; + unsigned char padlen; + int eos; + h2_stream *stream; + apr_bucket *b; + apr_off_t len = length; + + (void)ngh2; + (void)source; + if (!h2_session_continue_data(session)) { + return NGHTTP2_ERR_WOULDBLOCK; + } + + if (frame->data.padlen > H2_MAX_PADLEN) { + return NGHTTP2_ERR_PROTO; + } + padlen = (unsigned char)frame->data.padlen; + + stream = h2_session_stream_get(session, stream_id); + if (!stream) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c, + APLOGNO(02924) + "h2_stream(%ld-%d): send_data, stream not found", + session->id, (int)stream_id); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_STRM_MSG(stream, "send_data_cb for %ld bytes"), + (long)length); + + status = h2_conn_io_write(&session->io, (const char *)framehd, 9); + if (padlen && status == APR_SUCCESS) { + status = h2_conn_io_write(&session->io, (const char *)&padlen, 1); + } + + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + H2_STRM_MSG(stream, "writing frame header")); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + status = h2_stream_read_to(stream, session->bbtmp, &len, &eos); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + H2_STRM_MSG(stream, "send_data_cb, reading stream")); + apr_brigade_cleanup(session->bbtmp); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + else if (len != length) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, + H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, " + "got %ld from stream"), (long)length, (long)len); + apr_brigade_cleanup(session->bbtmp); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + if (padlen) { + b = apr_bucket_immortal_create(immortal_zeros, padlen, + session->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(session->bbtmp, b); + } + + status = h2_conn_io_pass(&session->io, session->bbtmp); + apr_brigade_cleanup(session->bbtmp); + + if (status == APR_SUCCESS) { + stream->out_data_frames++; + stream->out_data_octets += length; + return 0; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_STRM_LOG(APLOGNO(02925), stream, "failed send_data_cb")); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } +} + +static int on_frame_send_cb(nghttp2_session *ngh2, + const nghttp2_frame *frame, + void *user_data) +{ + h2_session *session = user_data; + h2_stream *stream; + int stream_id = frame->hd.stream_id; + + ++session->frames_sent; + switch (frame->hd.type) { + case NGHTTP2_PUSH_PROMISE: + /* PUSH_PROMISE we report on the promised stream */ + stream_id = frame->push_promise.promised_stream_id; + break; + default: + break; + } + + if (APLOGcdebug(session->c)) { + char buffer[256]; + + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03068), session, + "sent FRAME[%s], frames=%ld/%ld (r/s)"), + buffer, (long)session->frames_received, + (long)session->frames_sent); + } + + stream = h2_session_stream_get(session, stream_id); + if (stream) { + h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags, + frame->hd.length + H2_FRAME_HDR_LEN); + } + return 0; +} + +#ifdef H2_NG2_INVALID_HEADER_CB +static int on_invalid_header_cb(nghttp2_session *ngh2, + const nghttp2_frame *frame, + const uint8_t *name, size_t namelen, + const uint8_t *value, size_t valuelen, + uint8_t flags, void *user_data) +{ + h2_session *session = user_data; + h2_stream *stream; + + if (APLOGcdebug(session->c)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03456) + "h2_stream(%ld-%d): invalid header '%s: %s'", + session->id, (int)frame->hd.stream_id, + apr_pstrndup(session->pool, (const char *)name, namelen), + apr_pstrndup(session->pool, (const char *)value, valuelen)); + } + stream = h2_session_stream_get(session, frame->hd.stream_id); + if (stream) { + h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + } + return 0; +} +#endif + +#define NGH2_SET_CALLBACK(callbacks, name, fn)\ +nghttp2_session_callbacks_set_##name##_callback(callbacks, fn) + +static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb) +{ + int rv = nghttp2_session_callbacks_new(pcb); + if (rv != 0) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, + APLOGNO(02926) "nghttp2_session_callbacks_new: %s", + nghttp2_strerror(rv)); + return APR_EGENERAL; + } + + NGH2_SET_CALLBACK(*pcb, send, send_cb); + NGH2_SET_CALLBACK(*pcb, on_frame_recv, on_frame_recv_cb); + NGH2_SET_CALLBACK(*pcb, on_invalid_frame_recv, on_invalid_frame_recv_cb); + NGH2_SET_CALLBACK(*pcb, on_data_chunk_recv, on_data_chunk_recv_cb); + NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb); + NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb); + NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb); + NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb); + NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb); +#ifdef H2_NG2_INVALID_HEADER_CB + NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb); +#endif + return APR_SUCCESS; +} + +static apr_status_t h2_session_shutdown_notice(h2_session *session) +{ + apr_status_t status; + + ap_assert(session); + if (!session->local.accepting) { + return APR_SUCCESS; + } + + nghttp2_submit_shutdown_notice(session->ngh2); + session->local.accepting = 0; + status = nghttp2_session_send(session->ngh2); + if (status == APR_SUCCESS) { + status = h2_conn_io_flush(&session->io); + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03457), session, "sent shutdown notice")); + return status; +} + +static apr_status_t h2_session_shutdown(h2_session *session, int error, + const char *msg, int force_close) +{ + apr_status_t status = APR_SUCCESS; + + ap_assert(session); + if (session->local.shutdown) { + return APR_SUCCESS; + } + if (!msg && error) { + msg = nghttp2_strerror(error); + } + + if (error || force_close) { + /* not a graceful shutdown, we want to leave... + * Do not start further streams that are waiting to be scheduled. + * Find out the max stream id that we habe been processed or + * are still actively working on. + * Remove all streams greater than this number without submitting + * a RST_STREAM frame, since that should be clear from the GOAWAY + * we send. */ + session->local.accepted_max = h2_mplx_shutdown(session->mplx); + session->local.error = error; + } + else { + /* graceful shutdown. we will continue processing all streams + * we have, but no longer accept new ones. Report the max stream + * we have received and discard all new ones. */ + } + + session->local.accepting = 0; + session->local.shutdown = 1; + if (!session->c->aborted) { + nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, + session->local.accepted_max, + error, (uint8_t*)msg, msg? strlen(msg):0); + status = nghttp2_session_send(session->ngh2); + if (status == APR_SUCCESS) { + status = h2_conn_io_flush(&session->io); + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03069), session, + "sent GOAWAY, err=%d, msg=%s"), error, msg? msg : ""); + } + dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg); + return status; +} + +static apr_status_t session_cleanup(h2_session *session, const char *trigger) +{ + conn_rec *c = session->c; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + H2_SSSN_MSG(session, "pool_cleanup")); + + if (session->state != H2_SESSION_ST_DONE + && session->state != H2_SESSION_ST_INIT) { + /* Not good. The connection is being torn down and we have + * not sent a goaway. This is considered a protocol error and + * the client has to assume that any streams "in flight" may have + * been processed and are not safe to retry. + * As clients with idle connection may only learn about a closed + * connection when sending the next request, this has the effect + * that at least this one request will fail. + */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, + H2_SSSN_LOG(APLOGNO(03199), session, + "connection disappeared without proper " + "goodbye, clients will be confused, should not happen")); + } + + transit(session, trigger, H2_SESSION_ST_CLEANUP); + h2_mplx_release_and_join(session->mplx, session->iowait); + session->mplx = NULL; + + ap_assert(session->ngh2); + nghttp2_session_del(session->ngh2); + session->ngh2 = NULL; + h2_ctx_clear(c); + + + return APR_SUCCESS; +} + +static apr_status_t session_pool_cleanup(void *data) +{ + conn_rec *c = data; + h2_session *session; + h2_ctx *ctx = h2_ctx_get(c, 0); + + if (ctx && (session = h2_ctx_session_get(ctx))) { + /* if the session is still there, now is the last chance + * to perform cleanup. Normally, cleanup should have happened + * earlier in the connection pre_close. Main reason is that + * any ongoing requests on slave connections might still access + * data which has, at this time, already been freed. An example + * is mod_ssl that uses request hooks. */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, + H2_SSSN_LOG(APLOGNO(10020), session, + "session cleanup triggered by pool cleanup. " + "this should have happened earlier already.")); + return session_cleanup(session, "pool cleanup"); + } + return APR_SUCCESS; +} + +static apr_status_t h2_session_create_int(h2_session **psession, + conn_rec *c, + request_rec *r, + h2_ctx *ctx, + h2_workers *workers) +{ + nghttp2_session_callbacks *callbacks = NULL; + nghttp2_option *options = NULL; + apr_allocator_t *allocator; + apr_thread_mutex_t *mutex; + uint32_t n; + apr_pool_t *pool = NULL; + h2_session *session; + apr_status_t status; + int rv; + + *psession = NULL; + status = apr_allocator_create(&allocator); + if (status != APR_SUCCESS) { + return status; + } + apr_allocator_max_free_set(allocator, ap_max_mem_free); + apr_pool_create_ex(&pool, c->pool, NULL, allocator); + if (!pool) { + apr_allocator_destroy(allocator); + return APR_ENOMEM; + } + apr_pool_tag(pool, "h2_session"); + apr_allocator_owner_set(allocator, pool); + status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool); + if (status != APR_SUCCESS) { + apr_pool_destroy(pool); + return APR_ENOMEM; + } + apr_allocator_mutex_set(allocator, mutex); + + session = apr_pcalloc(pool, sizeof(h2_session)); + if (!session) { + return APR_ENOMEM; + } + + *psession = session; + session->id = c->id; + session->c = c; + session->r = r; + session->s = h2_ctx_server_get(ctx); + session->pool = pool; + session->config = h2_config_sget(session->s); + session->workers = workers; + + session->state = H2_SESSION_ST_INIT; + session->local.accepting = 1; + session->remote.accepting = 1; + + session->max_stream_count = h2_config_geti(session->config, + H2_CONF_MAX_STREAMS); + session->max_stream_mem = h2_config_geti(session->config, + H2_CONF_STREAM_MAX_MEM); + + status = apr_thread_cond_create(&session->iowait, session->pool); + if (status != APR_SUCCESS) { + apr_pool_destroy(pool); + return status; + } + + session->in_pending = h2_iq_create(session->pool, (int)session->max_stream_count); + if (session->in_pending == NULL) { + apr_pool_destroy(pool); + return APR_ENOMEM; + } + + session->in_process = h2_iq_create(session->pool, (int)session->max_stream_count); + if (session->in_process == NULL) { + apr_pool_destroy(pool); + return APR_ENOMEM; + } + + session->monitor = apr_pcalloc(pool, sizeof(h2_stream_monitor)); + if (session->monitor == NULL) { + apr_pool_destroy(pool); + return APR_ENOMEM; + } + session->monitor->ctx = session; + session->monitor->on_state_enter = on_stream_state_enter; + session->monitor->on_state_event = on_stream_state_event; + session->monitor->on_event = on_stream_event; + + session->mplx = h2_mplx_create(c, session->pool, session->config, + workers); + + /* connection input filter that feeds the session */ + session->cin = h2_filter_cin_create(session); + ap_add_input_filter("H2_IN", session->cin, r, c); + + h2_conn_io_init(&session->io, c, session->config); + session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc); + + status = init_callbacks(c, &callbacks); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927) + "nghttp2: error in init_callbacks"); + apr_pool_destroy(pool); + return status; + } + + rv = nghttp2_option_new(&options); + if (rv != 0) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c, + APLOGNO(02928) "nghttp2_option_new: %s", + nghttp2_strerror(rv)); + apr_pool_destroy(pool); + return status; + } + nghttp2_option_set_peer_max_concurrent_streams( + options, (uint32_t)session->max_stream_count); + /* We need to handle window updates ourself, otherwise we + * get flooded by nghttp2. */ + nghttp2_option_set_no_auto_window_update(options, 1); + + rv = nghttp2_session_server_new2(&session->ngh2, callbacks, + session, options); + nghttp2_session_callbacks_del(callbacks); + nghttp2_option_del(options); + + if (rv != 0) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c, + APLOGNO(02929) "nghttp2_session_server_new: %s", + nghttp2_strerror(rv)); + apr_pool_destroy(pool); + return APR_ENOMEM; + } + + n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE); + session->push_diary = h2_push_diary_create(session->pool, n); + + if (APLOGcdebug(c)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + H2_SSSN_LOG(APLOGNO(03200), session, + "created, max_streams=%d, stream_mem=%d, " + "workers_limit=%d, workers_max=%d, " + "push_diary(type=%d,N=%d)"), + (int)session->max_stream_count, + (int)session->max_stream_mem, + session->mplx->limit_active, + session->mplx->max_active, + session->push_diary->dtype, + (int)session->push_diary->N); + } + + apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup); + return APR_SUCCESS; +} + +apr_status_t h2_session_create(h2_session **psession, + conn_rec *c, h2_ctx *ctx, h2_workers *workers) +{ + return h2_session_create_int(psession, c, NULL, ctx, workers); +} + +apr_status_t h2_session_rcreate(h2_session **psession, + request_rec *r, h2_ctx *ctx, h2_workers *workers) +{ + return h2_session_create_int(psession, r->connection, r, ctx, workers); +} + +static apr_status_t h2_session_start(h2_session *session, int *rv) +{ + apr_status_t status = APR_SUCCESS; + nghttp2_settings_entry settings[3]; + size_t slen; + int win_size; + + ap_assert(session); + /* Start the conversation by submitting our SETTINGS frame */ + *rv = 0; + if (session->r) { + const char *s, *cs; + apr_size_t dlen; + h2_stream * stream; + + /* 'h2c' mode: we should have a 'HTTP2-Settings' header with + * base64 encoded client settings. */ + s = apr_table_get(session->r->headers_in, "HTTP2-Settings"); + if (!s) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EINVAL, session->r, + APLOGNO(02931) + "HTTP2-Settings header missing in request"); + return APR_EINVAL; + } + cs = NULL; + dlen = h2_util_base64url_decode(&cs, s, session->pool); + + if (APLOGrdebug(session->r)) { + char buffer[128]; + h2_util_hex_dump(buffer, 128, (char*)cs, dlen); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, APLOGNO(03070) + "upgrading h2c session with HTTP2-Settings: %s -> %s (%d)", + s, buffer, (int)dlen); + } + + *rv = nghttp2_session_upgrade(session->ngh2, (uint8_t*)cs, dlen, NULL); + if (*rv != 0) { + status = APR_EINVAL; + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r, + APLOGNO(02932) "nghttp2_session_upgrade: %s", + nghttp2_strerror(*rv)); + return status; + } + + /* Now we need to auto-open stream 1 for the request we got. */ + stream = h2_session_open_stream(session, 1, 0); + if (!stream) { + status = APR_EGENERAL; + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r, + APLOGNO(02933) "open stream 1: %s", + nghttp2_strerror(*rv)); + return status; + } + + status = h2_stream_set_request_rec(stream, session->r, 1); + if (status != APR_SUCCESS) { + return status; + } + } + + slen = 0; + settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS; + settings[slen].value = (uint32_t)session->max_stream_count; + ++slen; + win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE); + if (win_size != H2_INITIAL_WINDOW_SIZE) { + settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; + settings[slen].value = win_size; + ++slen; + } + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_SSSN_LOG(APLOGNO(03201), session, + "start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"), + (long)win_size, (int)session->max_stream_count); + *rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE, + settings, slen); + if (*rv != 0) { + status = APR_EGENERAL; + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, + H2_SSSN_LOG(APLOGNO(02935), session, + "nghttp2_submit_settings: %s"), nghttp2_strerror(*rv)); + } + else { + /* use maximum possible value for connection window size. We are only + * interested in per stream flow control. which have the initial window + * size configured above. + * Therefore, for our use, the connection window can only get in the + * way. Example: if we allow 100 streams with a 32KB window each, we + * buffer up to 3.2 MB of data. Unless we do separate connection window + * interim updates, any smaller connection window will lead to blocking + * in DATA flow. + */ + *rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE, + 0, NGHTTP2_MAX_WINDOW_SIZE - win_size); + if (*rv != 0) { + status = APR_EGENERAL; + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, + H2_SSSN_LOG(APLOGNO(02970), session, + "nghttp2_submit_window_update: %s"), + nghttp2_strerror(*rv)); + } + } + + return status; +} + +static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, + h2_headers *headers, apr_off_t len, + int eos); + +static ssize_t stream_data_cb(nghttp2_session *ng2s, + int32_t stream_id, + uint8_t *buf, + size_t length, + uint32_t *data_flags, + nghttp2_data_source *source, + void *puser) +{ + h2_session *session = (h2_session *)puser; + apr_off_t nread = length; + int eos = 0; + apr_status_t status; + h2_stream *stream; + ap_assert(session); + + /* The session wants to send more DATA for the stream. We need + * to find out how much of the requested length we can send without + * blocking. + * Indicate EOS when we encounter it or DEFERRED if the stream + * should be suspended. Beware of trailers. + */ + + (void)ng2s; + (void)buf; + (void)source; + stream = h2_session_stream_get(session, stream_id); + if (!stream) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c, + APLOGNO(02937) + "h2_stream(%ld-%d): data_cb, stream not found", + session->id, (int)stream_id); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + status = h2_stream_out_prepare(stream, &nread, &eos, NULL); + if (nread) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_STRM_MSG(stream, "prepared no_copy, len=%ld, eos=%d"), + (long)nread, eos); + *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; + } + + switch (status) { + case APR_SUCCESS: + break; + + case APR_EOF: + eos = 1; + break; + + case APR_ECONNRESET: + case APR_ECONNABORTED: + return NGHTTP2_ERR_CALLBACK_FAILURE; + + case APR_EAGAIN: + /* If there is no data available, our session will automatically + * suspend this stream and not ask for more data until we resume + * it. Remember at our h2_stream that we need to do this. + */ + nread = 0; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03071), stream, "suspending")); + return NGHTTP2_ERR_DEFERRED; + + default: + nread = 0; + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, + H2_STRM_LOG(APLOGNO(02938), stream, "reading data")); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + if (eos) { + *data_flags |= NGHTTP2_DATA_FLAG_EOF; + } + return (ssize_t)nread; +} + +struct h2_stream *h2_session_push(h2_session *session, h2_stream *is, + h2_push *push) +{ + h2_stream *stream; + h2_ngheader *ngh; + apr_status_t status; + int nid = 0; + + status = h2_req_create_ngheader(&ngh, is->pool, push->req); + if (status == APR_SUCCESS) { + nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id, + ngh->nv, ngh->nvlen, NULL); + } + if (status != APR_SUCCESS || nid <= 0) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_STRM_LOG(APLOGNO(03075), is, + "submitting push promise fail: %s"), nghttp2_strerror(nid)); + return NULL; + } + ++session->pushes_promised; + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03076), is, "SERVER_PUSH %d for %s %s on %d"), + nid, push->req->method, push->req->path, is->id); + + stream = h2_session_open_stream(session, nid, is->id); + if (!stream) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03077), stream, + "failed to create stream obj %d"), nid); + /* kill the push_promise */ + nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid, + NGHTTP2_INTERNAL_ERROR); + return NULL; + } + + h2_session_set_prio(session, stream, push->priority); + h2_stream_set_request(stream, push->req); + ++session->unsent_promises; + return stream; +} + +static int valid_weight(float f) +{ + int w = (int)f; + return (w < NGHTTP2_MIN_WEIGHT? NGHTTP2_MIN_WEIGHT : + (w > NGHTTP2_MAX_WEIGHT)? NGHTTP2_MAX_WEIGHT : w); +} + +apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, + const h2_priority *prio) +{ + apr_status_t status = APR_SUCCESS; +#ifdef H2_NG2_CHANGE_PRIO + nghttp2_stream *s_grandpa, *s_parent, *s; + + if (prio == NULL) { + /* we treat this as a NOP */ + return APR_SUCCESS; + } + s = nghttp2_session_find_stream(session->ngh2, stream->id); + if (!s) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_STRM_MSG(stream, "lookup of nghttp2_stream failed")); + return APR_EINVAL; + } + + s_parent = nghttp2_stream_get_parent(s); + if (s_parent) { + nghttp2_priority_spec ps; + int id_parent, id_grandpa, w_parent, w; + int rv = 0; + const char *ptype = "AFTER"; + h2_dependency dep = prio->dependency; + + id_parent = nghttp2_stream_get_stream_id(s_parent); + s_grandpa = nghttp2_stream_get_parent(s_parent); + if (s_grandpa) { + id_grandpa = nghttp2_stream_get_stream_id(s_grandpa); + } + else { + /* parent of parent does not exist, + * only possible if parent == root */ + dep = H2_DEPENDANT_AFTER; + } + + switch (dep) { + case H2_DEPENDANT_INTERLEAVED: + /* PUSHed stream is to be interleaved with initiating stream. + * It is made a sibling of the initiating stream and gets a + * proportional weight [1, MAX_WEIGHT] of the initiaing + * stream weight. + */ + ptype = "INTERLEAVED"; + w_parent = nghttp2_stream_get_weight(s_parent); + w = valid_weight(w_parent * ((float)prio->weight / NGHTTP2_MAX_WEIGHT)); + nghttp2_priority_spec_init(&ps, id_grandpa, w, 0); + break; + + case H2_DEPENDANT_BEFORE: + /* PUSHed stream os to be sent BEFORE the initiating stream. + * It gets the same weight as the initiating stream, replaces + * that stream in the dependency tree and has the initiating + * stream as child. + */ + ptype = "BEFORE"; + w = w_parent = nghttp2_stream_get_weight(s_parent); + nghttp2_priority_spec_init(&ps, stream->id, w_parent, 0); + id_grandpa = nghttp2_stream_get_stream_id(s_grandpa); + rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps); + if (rv < 0) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202) + "h2_stream(%ld-%d): PUSH BEFORE, weight=%d, " + "depends=%d, returned=%d", + session->id, id_parent, ps.weight, ps.stream_id, rv); + return APR_EGENERAL; + } + nghttp2_priority_spec_init(&ps, id_grandpa, w, 0); + break; + + case H2_DEPENDANT_AFTER: + /* The PUSHed stream is to be sent after the initiating stream. + * Give if the specified weight and let it depend on the intiating + * stream. + */ + /* fall through, it's the default */ + default: + nghttp2_priority_spec_init(&ps, id_parent, valid_weight(prio->weight), 0); + break; + } + + + rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ""H2_STRM_LOG(APLOGNO(03203), stream, + "PUSH %s, weight=%d, depends=%d, returned=%d"), + ptype, ps.weight, ps.stream_id, rv); + status = (rv < 0)? APR_EGENERAL : APR_SUCCESS; + } +#else + (void)session; + (void)stream; + (void)prio; + (void)valid_weight; +#endif + return status; +} + +int h2_session_push_enabled(h2_session *session) +{ + /* iff we can and they can and want */ + return (session->remote.accepting /* remote GOAWAY received */ + && h2_config_geti(session->config, H2_CONF_PUSH) + && nghttp2_session_get_remote_settings(session->ngh2, + NGHTTP2_SETTINGS_ENABLE_PUSH)); +} + +static apr_status_t h2_session_send(h2_session *session) +{ + apr_interval_time_t saved_timeout; + int rv; + apr_socket_t *socket; + + socket = ap_get_conn_socket(session->c); + if (socket) { + apr_socket_timeout_get(socket, &saved_timeout); + apr_socket_timeout_set(socket, session->s->timeout); + } + + rv = nghttp2_session_send(session->ngh2); + + if (socket) { + apr_socket_timeout_set(socket, saved_timeout); + } + session->have_written = 1; + if (rv != 0 && rv != NGHTTP2_ERR_WOULDBLOCK) { + if (nghttp2_is_fatal(rv)) { + dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv)); + return APR_EGENERAL; + } + } + + session->unsent_promises = 0; + session->unsent_submits = 0; + + return APR_SUCCESS; +} + +/** + * headers for the stream are ready. + */ +static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, + h2_headers *headers, apr_off_t len, + int eos) +{ + apr_status_t status = APR_SUCCESS; + int rv = 0; + + ap_assert(session); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_STRM_MSG(stream, "on_headers")); + if (headers->status < 100) { + h2_stream_rst(stream, headers->status); + goto leave; + } + else if (stream->has_response) { + h2_ngheader *nh; + + status = h2_res_create_ngtrailer(&nh, stream->pool, headers); + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"), + (int)nh->nvlen); + if (status == APR_SUCCESS) { + rv = nghttp2_submit_trailer(session->ngh2, stream->id, + nh->nv, nh->nvlen); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers")); + h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + } + goto leave; + } + else { + nghttp2_data_provider provider, *pprovider = NULL; + h2_ngheader *ngh; + const char *note; + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03073), stream, "submit response %d, REMOTE_WINDOW_SIZE=%u"), + headers->status, + (unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id)); + + if (!eos || len > 0) { + memset(&provider, 0, sizeof(provider)); + provider.source.fd = stream->id; + provider.read_callback = stream_data_cb; + pprovider = &provider; + } + + /* If this stream is not a pushed one itself, + * and HTTP/2 server push is enabled here, + * and the response HTTP status is not sth >= 400, + * and the remote side has pushing enabled, + * -> find and perform any pushes on this stream + * *before* we submit the stream response itself. + * This helps clients avoid opening new streams on Link + * headers that get pushed right afterwards. + * + * *) the response code is relevant, as we do not want to + * make pushes on 401 or 403 codes and friends. + * And if we see a 304, we do not push either + * as the client, having this resource in its cache, might + * also have the pushed ones as well. + */ + if (!stream->initiated_on + && !stream->has_response + && stream->request && stream->request->method + && !strcmp("GET", stream->request->method) + && (headers->status < 400) + && (headers->status != 304) + && h2_session_push_enabled(session)) { + + h2_stream_submit_pushes(stream, headers); + } + + if (!stream->pref_priority) { + stream->pref_priority = h2_stream_get_priority(stream, headers); + } + h2_session_set_prio(session, stream, stream->pref_priority); + + note = apr_table_get(headers->notes, H2_FILTER_DEBUG_NOTE); + if (note && !strcmp("on", note)) { + int32_t connFlowIn, connFlowOut; + + connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2); + connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2); + headers = h2_headers_copy(stream->pool, headers); + apr_table_setn(headers->headers, "conn-flow-in", + apr_itoa(stream->pool, connFlowIn)); + apr_table_setn(headers->headers, "conn-flow-out", + apr_itoa(stream->pool, connFlowOut)); + } + + if (headers->status == 103 + && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) { + /* suppress sending this to the client, it might have triggered + * pushes and served its purpose nevertheless */ + rv = 0; + goto leave; + } + + status = h2_res_create_ngheader(&ngh, stream->pool, headers); + if (status == APR_SUCCESS) { + rv = nghttp2_submit_response(session->ngh2, stream->id, + ngh->nv, ngh->nvlen, pprovider); + stream->has_response = h2_headers_are_response(headers); + session->have_written = 1; + + if (stream->initiated_on) { + ++session->pushes_submitted; + } + else { + ++session->responses_submitted; + } + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + H2_STRM_LOG(APLOGNO(10025), stream, "invalid response")); + h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR); + } + } + +leave: + if (nghttp2_is_fatal(rv)) { + status = APR_EGENERAL; + dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv)); + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, + APLOGNO(02940) "submit_response: %s", + nghttp2_strerror(rv)); + } + + ++session->unsent_submits; + + /* Unsent push promises are written immediately, as nghttp2 + * 1.5.0 realizes internal stream data structures only on + * send and we might need them for other submits. + * Also, to conserve memory, we send at least every 10 submits + * so that nghttp2 does not buffer all outbound items too + * long. + */ + if (status == APR_SUCCESS + && (session->unsent_promises || session->unsent_submits > 10)) { + status = h2_session_send(session); + } + return status; +} + +/** + * A stream was resumed as new response/output data arrived. + */ +static apr_status_t on_stream_resume(void *ctx, h2_stream *stream) +{ + h2_session *session = ctx; + apr_status_t status = APR_EAGAIN; + int rv; + apr_off_t len = 0; + int eos = 0; + h2_headers *headers; + + ap_assert(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_STRM_MSG(stream, "on_resume")); + +send_headers: + headers = NULL; + status = h2_stream_out_prepare(stream, &len, &eos, &headers); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c, + H2_STRM_MSG(stream, "prepared len=%ld, eos=%d"), + (long)len, eos); + if (headers) { + status = on_stream_headers(session, stream, headers, len, eos); + if (status != APR_SUCCESS || stream->rst_error) { + return status; + } + goto send_headers; + } + else if (status != APR_EAGAIN) { + /* we have DATA to send */ + if (!stream->has_response) { + /* but no response */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03466), stream, + "no response, RST_STREAM")); + h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR); + return APR_SUCCESS; + } + rv = nghttp2_session_resume_data(session->ngh2, stream->id); + session->have_written = 1; + ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)? + APLOG_ERR : APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(02936), stream, "resumed")); + } + return status; +} + +static void h2_session_in_flush(h2_session *session) +{ + int id; + + while ((id = h2_iq_shift(session->in_process)) > 0) { + h2_stream *stream = h2_session_stream_get(session, id); + if (stream) { + ap_assert(!stream->scheduled); + if (h2_stream_prep_processing(stream) == APR_SUCCESS) { + h2_mplx_process(session->mplx, stream, stream_pri_cmp, session); + } + else { + h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); + } + } + } + + while ((id = h2_iq_shift(session->in_pending)) > 0) { + h2_stream *stream = h2_session_stream_get(session, id); + if (stream) { + h2_stream_flush_input(stream); + } + } +} + +static apr_status_t session_read(h2_session *session, apr_size_t readlen, int block) +{ + apr_status_t status, rstatus = APR_EAGAIN; + conn_rec *c = session->c; + apr_off_t read_start = session->io.bytes_read; + + while (1) { + /* H2_IN filter handles all incoming data against the session. + * We just pull at the filter chain to make it happen */ + status = ap_get_brigade(c->input_filters, + session->bbtmp, AP_MODE_READBYTES, + block? APR_BLOCK_READ : APR_NONBLOCK_READ, + H2MAX(APR_BUCKET_BUFF_SIZE, readlen)); + /* get rid of any possible data we do not expect to get */ + apr_brigade_cleanup(session->bbtmp); + + switch (status) { + case APR_SUCCESS: + /* successful read, reset our idle timers */ + rstatus = APR_SUCCESS; + if (block) { + /* successful blocked read, try unblocked to + * get more. */ + block = 0; + } + break; + case APR_EAGAIN: + return rstatus; + case APR_TIMEUP: + return status; + default: + if (session->io.bytes_read == read_start) { + /* first attempt failed */ + if (APR_STATUS_IS_ETIMEDOUT(status) + || APR_STATUS_IS_ECONNABORTED(status) + || APR_STATUS_IS_ECONNRESET(status) + || APR_STATUS_IS_EOF(status) + || APR_STATUS_IS_EBADF(status)) { + /* common status for a client that has left */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, + H2_SSSN_MSG(session, "input gone")); + } + else { + /* uncommon status, log on INFO so that we see this */ + ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c, + H2_SSSN_LOG(APLOGNO(02950), session, + "error reading, terminating")); + } + return status; + } + /* subsequent failure after success(es), return initial + * status. */ + return rstatus; + } + if ((session->io.bytes_read - read_start) > readlen) { + /* read enough in one go, give write a chance */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, + H2_SSSN_MSG(session, "read enough, returning")); + break; + } + } + return rstatus; +} + +static apr_status_t h2_session_read(h2_session *session, int block) +{ + apr_status_t status = session_read(session, session->max_stream_mem + * H2MAX(2, session->open_streams), + block); + h2_session_in_flush(session); + return status; +} + +static const char *StateNames[] = { + "INIT", /* H2_SESSION_ST_INIT */ + "DONE", /* H2_SESSION_ST_DONE */ + "IDLE", /* H2_SESSION_ST_IDLE */ + "BUSY", /* H2_SESSION_ST_BUSY */ + "WAIT", /* H2_SESSION_ST_WAIT */ + "CLEANUP", /* H2_SESSION_ST_CLEANUP */ +}; + +const char *h2_session_state_str(h2_session_state state) +{ + if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) { + return "unknown"; + } + return StateNames[state]; +} + +static void update_child_status(h2_session *session, int status, const char *msg) +{ + /* Assume that we also change code/msg when something really happened and + * avoid updating the scoreboard in between */ + if (session->last_status_code != status + || session->last_status_msg != msg) { + apr_snprintf(session->status, sizeof(session->status), + "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)", + msg? msg : "-", + (int)session->open_streams, + (int)session->remote.emitted_count, + (int)session->responses_submitted, + (int)session->pushes_submitted, + (int)session->pushes_reset + session->streams_reset); + ap_update_child_status_descr(session->c->sbh, status, session->status); + } +} + +static void transit(h2_session *session, const char *action, h2_session_state nstate) +{ + apr_time_t timeout; + int ostate, loglvl; + const char *s; + + if (session->state != nstate) { + ostate = session->state; + session->state = nstate; + + loglvl = APLOG_DEBUG; + if ((ostate == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT) + || (ostate == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){ + loglvl = APLOG_TRACE1; + } + ap_log_cerror(APLOG_MARK, loglvl, 0, session->c, + H2_SSSN_LOG(APLOGNO(03078), session, + "transit [%s] -- %s --> [%s]"), + h2_session_state_str(ostate), action, + h2_session_state_str(nstate)); + + switch (session->state) { + case H2_SESSION_ST_IDLE: + if (!session->remote.emitted_count) { + /* on fresh connections, with async mpm, do not return + * to mpm for a second. This gives the first request a better + * chance to arrive (und connection leaving IDLE state). + * If we return to mpm right away, this connection has the + * same chance of being cleaned up by the mpm as connections + * that already served requests - not fair. */ + session->idle_sync_until = apr_time_now() + apr_time_from_sec(1); + s = "timeout"; + timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout); + update_child_status(session, SERVER_BUSY_READ, "idle"); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"), + (int)apr_time_sec(H2MAX(session->s->timeout, session->s->keep_alive_timeout))); + } + else if (session->open_streams) { + s = "timeout"; + timeout = session->s->keep_alive_timeout; + update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle"); + } + else { + /* normal keepalive setup */ + s = "keepalive"; + timeout = session->s->keep_alive_timeout; + update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle"); + } + session->idle_until = apr_time_now() + timeout; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_SSSN_LOG("", session, "enter idle, %s = %d sec"), + s, (int)apr_time_sec(timeout)); + break; + case H2_SESSION_ST_DONE: + update_child_status(session, SERVER_CLOSING, "done"); + break; + default: + /* nop */ + break; + } + } +} + +static void h2_session_ev_init(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_INIT: + transit(session, "init", H2_SESSION_ST_BUSY); + break; + default: + /* nop */ + break; + } +} + +static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg) +{ + cleanup_unprocessed_streams(session); + if (!session->remote.shutdown) { + update_child_status(session, SERVER_CLOSING, "local goaway"); + } + transit(session, "local goaway", H2_SESSION_ST_DONE); +} + +static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg) +{ + if (!session->remote.shutdown) { + session->remote.error = arg; + session->remote.accepting = 0; + session->remote.shutdown = 1; + cleanup_unprocessed_streams(session); + update_child_status(session, SERVER_CLOSING, "remote goaway"); + transit(session, "remote goaway", H2_SESSION_ST_DONE); + } +} + +static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_INIT: + case H2_SESSION_ST_DONE: + /* just leave */ + transit(session, "conn error", H2_SESSION_ST_DONE); + break; + + default: + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03401), session, + "conn error -> shutdown")); + h2_session_shutdown(session, arg, msg, 0); + break; + } +} + +static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg) +{ + if (!session->local.shutdown) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_SSSN_LOG(APLOGNO(03402), session, + "proto error -> shutdown")); + h2_session_shutdown(session, arg, msg, 0); + } +} + +static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg) +{ + transit(session, msg, H2_SESSION_ST_DONE); + if (!session->local.shutdown) { + h2_session_shutdown(session, arg, msg, 1); + } +} + +static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_BUSY: + /* Nothing to READ, nothing to WRITE on the master connection. + * Possible causes: + * - we wait for the client to send us sth + * - we wait for started tasks to produce output + * - we have finished all streams and the client has sent GO_AWAY + */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_SSSN_MSG(session, "NO_IO event, %d streams open"), + session->open_streams); + h2_conn_io_flush(&session->io); + if (session->open_streams > 0) { + if (h2_mplx_awaits_data(session->mplx)) { + /* waiting for at least one stream to produce data */ + transit(session, "no io", H2_SESSION_ST_WAIT); + } + else { + /* we have streams open, and all are submitted and none + * is suspended. The only thing keeping us from WRITEing + * more must be the flow control. + * This means we only wait for WINDOW_UPDATE from the + * client and can block on READ. */ + transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE); + /* Make sure we have flushed all previously written output + * so that the client will react. */ + if (h2_conn_io_flush(&session->io) != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + return; + } + } + } + else if (session->local.accepting) { + /* When we have no streams, but accept new, switch to idle */ + transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE); + } + else { + /* We are no longer accepting new streams and there are + * none left. Time to leave. */ + h2_session_shutdown(session, arg, msg, 0); + transit(session, "no io", H2_SESSION_ST_DONE); + } + break; + default: + /* nop */ + break; + } +} + +static void h2_session_ev_frame_rcvd(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_IDLE: + case H2_SESSION_ST_WAIT: + transit(session, "frame received", H2_SESSION_ST_BUSY); + break; + default: + /* nop */ + break; + } +} + +static void h2_session_ev_stream_change(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_IDLE: + case H2_SESSION_ST_WAIT: + transit(session, "stream change", H2_SESSION_ST_BUSY); + break; + default: + /* nop */ + break; + } +} + +static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_DONE: + /* nop */ + break; + default: + transit(session, "nghttp2 done", H2_SESSION_ST_DONE); + break; + } +} + +static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_DONE: + /* nop */ + break; + default: + h2_session_shutdown_notice(session); + break; + } +} + +static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg) +{ + h2_session_shutdown(session, arg, msg, 1); +} + +static void ev_stream_open(h2_session *session, h2_stream *stream) +{ + h2_iq_append(session->in_process, stream->id); +} + +static void ev_stream_closed(h2_session *session, h2_stream *stream) +{ + apr_bucket *b; + + if (H2_STREAM_CLIENT_INITIATED(stream->id) + && (stream->id > session->local.completed_max)) { + session->local.completed_max = stream->id; + } + switch (session->state) { + case H2_SESSION_ST_IDLE: + break; + default: + break; + } + + /* The stream might have data in the buffers of the main connection. + * We can only free the allocated resources once all had been written. + * Send a special buckets on the connection that gets destroyed when + * all preceding data has been handled. On its destruction, it is safe + * to purge all resources of the stream. */ + b = h2_bucket_eos_create(session->c->bucket_alloc, stream); + APR_BRIGADE_INSERT_TAIL(session->bbtmp, b); + h2_conn_io_pass(&session->io, session->bbtmp); + apr_brigade_cleanup(session->bbtmp); +} + +static void on_stream_state_enter(void *ctx, h2_stream *stream) +{ + h2_session *session = ctx; + /* stream entered a new state */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + H2_STRM_MSG(stream, "entered state")); + switch (stream->state) { + case H2_SS_IDLE: /* stream was created */ + ++session->open_streams; + if (H2_STREAM_CLIENT_INITIATED(stream->id)) { + ++session->remote.emitted_count; + if (stream->id > session->remote.emitted_max) { + session->remote.emitted_max = stream->id; + session->local.accepted_max = stream->id; + } + } + else { + if (stream->id > session->local.emitted_max) { + ++session->local.emitted_count; + session->remote.emitted_max = stream->id; + } + } + break; + case H2_SS_OPEN: /* stream has request headers */ + case H2_SS_RSVD_L: /* stream has request headers */ + ev_stream_open(session, stream); + break; + case H2_SS_CLOSED_L: /* stream output was closed */ + break; + case H2_SS_CLOSED_R: /* stream input was closed */ + break; + case H2_SS_CLOSED: /* stream in+out were closed */ + --session->open_streams; + ev_stream_closed(session, stream); + break; + case H2_SS_CLEANUP: + h2_mplx_stream_cleanup(session->mplx, stream); + break; + default: + break; + } + dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream state change"); +} + +static void on_stream_event(void *ctx, h2_stream *stream, + h2_stream_event_t ev) +{ + h2_session *session = ctx; + switch (ev) { + case H2_SEV_IN_DATA_PENDING: + h2_iq_append(session->in_pending, stream->id); + break; + default: + /* NOP */ + break; + } +} + +static void on_stream_state_event(void *ctx, h2_stream *stream, + h2_stream_event_t ev) +{ + h2_session *session = ctx; + switch (ev) { + case H2_SEV_CANCELLED: + if (session->state != H2_SESSION_ST_DONE) { + nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, + stream->id, stream->rst_error); + } + break; + default: + /* NOP */ + break; + } +} + +static void dispatch_event(h2_session *session, h2_session_event_t ev, + int arg, const char *msg) +{ + switch (ev) { + case H2_SESSION_EV_INIT: + h2_session_ev_init(session, arg, msg); + break; + case H2_SESSION_EV_LOCAL_GOAWAY: + h2_session_ev_local_goaway(session, arg, msg); + break; + case H2_SESSION_EV_REMOTE_GOAWAY: + h2_session_ev_remote_goaway(session, arg, msg); + break; + case H2_SESSION_EV_CONN_ERROR: + h2_session_ev_conn_error(session, arg, msg); + break; + case H2_SESSION_EV_PROTO_ERROR: + h2_session_ev_proto_error(session, arg, msg); + break; + case H2_SESSION_EV_CONN_TIMEOUT: + h2_session_ev_conn_timeout(session, arg, msg); + break; + case H2_SESSION_EV_NO_IO: + h2_session_ev_no_io(session, arg, msg); + break; + case H2_SESSION_EV_FRAME_RCVD: + h2_session_ev_frame_rcvd(session, arg, msg); + break; + case H2_SESSION_EV_NGH2_DONE: + h2_session_ev_ngh2_done(session, arg, msg); + break; + case H2_SESSION_EV_MPM_STOPPING: + h2_session_ev_mpm_stopping(session, arg, msg); + break; + case H2_SESSION_EV_PRE_CLOSE: + h2_session_ev_pre_close(session, arg, msg); + break; + case H2_SESSION_EV_STREAM_CHANGE: + h2_session_ev_stream_change(session, arg, msg); + break; + default: + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_SSSN_MSG(session, "unknown event %d"), ev); + break; + } +} + +/* trigger window updates, stream resumes and submits */ +static apr_status_t dispatch_master(h2_session *session) { + apr_status_t status; + + status = h2_mplx_dispatch_master_events(session->mplx, + on_stream_resume, session); + if (status == APR_EAGAIN) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, + H2_SSSN_MSG(session, "no master event available")); + } + else if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, + H2_SSSN_MSG(session, "dispatch error")); + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, + H2_ERR_INTERNAL_ERROR, "dispatch error"); + } + return status; +} + +static const int MAX_WAIT_MICROS = 200 * 1000; + +apr_status_t h2_session_process(h2_session *session, int async) +{ + apr_status_t status = APR_SUCCESS; + conn_rec *c = session->c; + int rv, mpm_state, trace = APLOGctrace3(c); + apr_time_t now; + + if (trace) { + ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, + H2_SSSN_MSG(session, "process start, async=%d"), async); + } + + while (session->state != H2_SESSION_ST_DONE) { + now = apr_time_now(); + session->have_read = session->have_written = 0; + + if (session->local.accepting + && !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) { + if (mpm_state == AP_MPMQ_STOPPING) { + dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL); + } + } + + session->status[0] = '\0'; + + switch (session->state) { + case H2_SESSION_ST_INIT: + ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); + if (!h2_is_acceptable_connection(c, 1)) { + update_child_status(session, SERVER_BUSY_READ, + "inadequate security"); + h2_session_shutdown(session, + NGHTTP2_INADEQUATE_SECURITY, NULL, 1); + } + else { + update_child_status(session, SERVER_BUSY_READ, "init"); + status = h2_session_start(session, &rv); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + H2_SSSN_LOG(APLOGNO(03079), session, + "started on %s:%d"), + session->s->server_hostname, + c->local_addr->port); + if (status != APR_SUCCESS) { + dispatch_event(session, + H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL); + } + break; + + case H2_SESSION_ST_IDLE: + if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) { + ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c, + H2_SSSN_MSG(session, "idle, timeout reached, closing")); + if (session->idle_delay) { + apr_table_setn(session->c->notes, "short-lingering-close", "1"); + } + dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout"); + goto out; + } + + if (session->idle_delay) { + /* we are less interested in spending time on this connection */ + ap_log_cerror( APLOG_MARK, APLOG_TRACE2, status, c, + H2_SSSN_MSG(session, "session is idle (%ld ms), idle wait %ld sec left"), + (long)apr_time_as_msec(session->idle_delay), + (long)apr_time_sec(session->idle_until - now)); + apr_sleep(session->idle_delay); + session->idle_delay = 0; + } + + h2_conn_io_flush(&session->io); + if (async && !session->r && (now > session->idle_sync_until)) { + if (trace) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c, + H2_SSSN_MSG(session, + "nonblock read, %d streams open"), + session->open_streams); + } + status = h2_session_read(session, 0); + + if (status == APR_SUCCESS) { + session->have_read = 1; + } + else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) { + status = APR_EAGAIN; + goto out; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + H2_SSSN_LOG(APLOGNO(03403), session, + "no data, error")); + dispatch_event(session, + H2_SESSION_EV_CONN_ERROR, 0, "timeout"); + } + } + else { + /* make certain, we send everything before we idle */ + if (trace) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c, + H2_SSSN_MSG(session, + "sync, stutter 1-sec, %d streams open"), + session->open_streams); + } + /* We wait in smaller increments, using a 1 second timeout. + * That gives us the chance to check for MPMQ_STOPPING often. + */ + status = h2_mplx_idle(session->mplx); + if (status == APR_EAGAIN) { + break; + } + else if (status != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, + H2_ERR_ENHANCE_YOUR_CALM, "less is more"); + } + h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1)); + status = h2_session_read(session, 1); + if (status == APR_SUCCESS) { + session->have_read = 1; + } + else if (status == APR_EAGAIN) { + /* nothing to read */ + } + else if (APR_STATUS_IS_TIMEUP(status)) { + /* continue reading handling */ + } + else if (APR_STATUS_IS_ECONNABORTED(status) + || APR_STATUS_IS_ECONNRESET(status) + || APR_STATUS_IS_EOF(status) + || APR_STATUS_IS_EBADF(status)) { + ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, + H2_SSSN_MSG(session, "input gone")); + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + else { + ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, + H2_SSSN_MSG(session, + "(1 sec timeout) read failed")); + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error"); + } + } + if (nghttp2_session_want_write(session->ngh2)) { + ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL); + status = h2_session_send(session); + if (status == APR_SUCCESS) { + status = h2_conn_io_flush(&session->io); + } + if (status != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, + H2_ERR_INTERNAL_ERROR, "writing"); + break; + } + } + break; + + case H2_SESSION_ST_BUSY: + if (nghttp2_session_want_read(session->ngh2)) { + ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL); + h2_filter_cin_timeout_set(session->cin, session->s->timeout); + status = h2_session_read(session, 0); + if (status == APR_SUCCESS) { + session->have_read = 1; + } + else if (status == APR_EAGAIN) { + /* nothing to read */ + } + else if (APR_STATUS_IS_TIMEUP(status)) { + dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL); + break; + } + else { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + } + + status = dispatch_master(session); + if (status != APR_SUCCESS && status != APR_EAGAIN) { + break; + } + + if (nghttp2_session_want_write(session->ngh2)) { + ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL); + status = h2_session_send(session); + if (status == APR_SUCCESS) { + status = h2_conn_io_flush(&session->io); + } + if (status != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, + H2_ERR_INTERNAL_ERROR, "writing"); + break; + } + } + + if (session->have_read || session->have_written) { + if (session->wait_us) { + session->wait_us = 0; + } + } + else if (!nghttp2_session_want_write(session->ngh2)) { + dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL); + } + break; + + case H2_SESSION_ST_WAIT: + if (session->wait_us <= 0) { + session->wait_us = 10; + if (h2_conn_io_flush(&session->io) != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + break; + } + } + else { + /* repeating, increase timer for graceful backoff */ + session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS); + } + + if (trace) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, + "h2_session: wait for data, %ld micros", + (long)session->wait_us); + } + status = h2_mplx_out_trywait(session->mplx, session->wait_us, + session->iowait); + if (status == APR_SUCCESS) { + session->wait_us = 0; + dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, NULL); + } + else if (APR_STATUS_IS_TIMEUP(status)) { + /* go back to checking all inputs again */ + transit(session, "wait cycle", session->local.shutdown? + H2_SESSION_ST_DONE : H2_SESSION_ST_BUSY); + } + else if (APR_STATUS_IS_ECONNRESET(status) + || APR_STATUS_IS_ECONNABORTED(status)) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c, + H2_SSSN_LOG(APLOGNO(03404), session, + "waiting on conditional")); + h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, + "cond wait error", 0); + } + break; + + default: + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c, + H2_SSSN_LOG(APLOGNO(03080), session, + "unknown state")); + dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL); + break; + } + + if (!nghttp2_session_want_read(session->ngh2) + && !nghttp2_session_want_write(session->ngh2)) { + dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL); + } + if (session->reprioritize) { + h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session); + session->reprioritize = 0; + } + } + +out: + if (trace) { + ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c, + H2_SSSN_MSG(session, "process returns")); + } + + if ((session->state != H2_SESSION_ST_DONE) + && (APR_STATUS_IS_EOF(status) + || APR_STATUS_IS_ECONNRESET(status) + || APR_STATUS_IS_ECONNABORTED(status))) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + + return (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS; +} + +apr_status_t h2_session_pre_close(h2_session *session, int async) +{ + apr_status_t status; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_SSSN_MSG(session, "pre_close")); + dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0, + (session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL); + status = session_cleanup(session, "pre_close"); + if (status == APR_SUCCESS) { + /* no one should hold a reference to this session any longer and + * the h2_ctx was removed from the connection. + * Take the pool (and thus all subpools etc. down now, instead of + * during cleanup of main connection pool. */ + apr_pool_destroy(session->pool); + } + return status; +} diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h new file mode 100644 index 0000000..df2a862 --- /dev/null +++ b/modules/http2/h2_session.h @@ -0,0 +1,228 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_session__ +#define __mod_h2__h2_session__ + +#include "h2_conn_io.h" + +/** + * A HTTP/2 connection, a session with a specific client. + * + * h2_session sits on top of a httpd conn_rec* instance and takes complete + * control of the connection data. It receives protocol frames from the + * client. For new HTTP/2 streams it creates h2_task(s) that are sent + * via callback to a dispatcher (see h2_conn.c). + * h2_session keeps h2_io's for each ongoing stream which buffer the + * payload for that stream. + * + * New incoming HEADER frames are converted into a h2_stream+h2_task instance + * that both represent a HTTP/2 stream, but may have separate lifetimes. This + * allows h2_task to be scheduled in other threads without semaphores + * all over the place. It allows task memory to be freed independent of + * session lifetime and sessions may close down while tasks are still running. + * + * + */ + +#include "h2.h" + +struct apr_thread_mutext_t; +struct apr_thread_cond_t; +struct h2_ctx; +struct h2_config; +struct h2_filter_cin; +struct h2_ihash_t; +struct h2_mplx; +struct h2_priority; +struct h2_push; +struct h2_push_diary; +struct h2_session; +struct h2_stream; +struct h2_stream_monitor; +struct h2_task; +struct h2_workers; + +struct nghttp2_session; + +typedef enum { + H2_SESSION_EV_INIT, /* session was initialized */ + H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */ + H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */ + H2_SESSION_EV_CONN_ERROR, /* connection error */ + H2_SESSION_EV_PROTO_ERROR, /* protocol error */ + H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */ + H2_SESSION_EV_NO_IO, /* nothing has been read or written */ + H2_SESSION_EV_FRAME_RCVD, /* a frame has been received */ + H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */ + H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */ + H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */ + H2_SESSION_EV_STREAM_CHANGE, /* a stream (state/input/output) changed */ +} h2_session_event_t; + +typedef struct h2_session { + long id; /* identifier of this session, unique + * inside a httpd process */ + conn_rec *c; /* the connection this session serves */ + request_rec *r; /* the request that started this in case + * of 'h2c', NULL otherwise */ + server_rec *s; /* server/vhost we're starting on */ + const struct h2_config *config; /* Relevant config for this session */ + apr_pool_t *pool; /* pool to use in session */ + struct h2_mplx *mplx; /* multiplexer for stream data */ + struct h2_workers *workers; /* for executing stream tasks */ + struct h2_filter_cin *cin; /* connection input filter context */ + h2_conn_io io; /* io on httpd conn filters */ + struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */ + + h2_session_state state; /* state session is in */ + + h2_session_props local; /* properties of local session */ + h2_session_props remote; /* properites of remote session */ + + unsigned int reprioritize : 1; /* scheduled streams priority changed */ + unsigned int flush : 1; /* flushing output necessary */ + unsigned int have_read : 1; /* session has read client data */ + unsigned int have_written : 1; /* session did write data to client */ + apr_interval_time_t wait_us; /* timeout during BUSY_WAIT state, micro secs */ + + struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */ + + struct h2_stream_monitor *monitor;/* monitor callbacks for streams */ + int open_streams; /* number of client streams open */ + int unsent_submits; /* number of submitted, but not yet written responses. */ + int unsent_promises; /* number of submitted, but not yet written push promises */ + + int responses_submitted; /* number of http/2 responses submitted */ + int streams_reset; /* number of http/2 streams reset by client */ + int pushes_promised; /* number of http/2 push promises submitted */ + int pushes_submitted; /* number of http/2 pushed responses submitted */ + int pushes_reset; /* number of http/2 pushed reset by client */ + + apr_size_t frames_received; /* number of http/2 frames received */ + apr_size_t frames_sent; /* number of http/2 frames sent */ + + apr_size_t max_stream_count; /* max number of open streams */ + apr_size_t max_stream_mem; /* max buffer memory for a single stream */ + + apr_time_t idle_until; /* Time we shut down due to sheer boredom */ + apr_time_t idle_sync_until; /* Time we sync wait until keepalive handling kicks in */ + apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */ + apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */ + + apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */ + struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */ + + char status[64]; /* status message for scoreboard */ + int last_status_code; /* the one already reported */ + const char *last_status_msg; /* the one already reported */ + + struct h2_iqueue *in_pending; /* all streams with input pending */ + struct h2_iqueue *in_process; /* all streams ready for processing on slave */ + +} h2_session; + +const char *h2_session_state_str(h2_session_state state); + +/** + * Create a new h2_session for the given connection. + * The session will apply the configured parameter. + * @param psession pointer receiving the created session on success or NULL + * @param c the connection to work on + * @param cfg the module config to apply + * @param workers the worker pool to use + * @return the created session + */ +apr_status_t h2_session_create(h2_session **psession, + conn_rec *c, struct h2_ctx *ctx, + struct h2_workers *workers); + +/** + * Create a new h2_session for the given request. + * The session will apply the configured parameter. + * @param psession pointer receiving the created session on success or NULL + * @param r the request that was upgraded + * @param cfg the module config to apply + * @param workers the worker pool to use + * @return the created session + */ +apr_status_t h2_session_rcreate(h2_session **psession, + request_rec *r, struct h2_ctx *ctx, + struct h2_workers *workers); + +void h2_session_event(h2_session *session, h2_session_event_t ev, + int err, const char *msg); + +/** + * Process the given HTTP/2 session until it is ended or a fatal + * error occurred. + * + * @param session the sessionm to process + */ +apr_status_t h2_session_process(h2_session *session, int async); + +/** + * Last chance to do anything before the connection is closed. + */ +apr_status_t h2_session_pre_close(h2_session *session, int async); + +/** + * Called when a serious error occurred and the session needs to terminate + * without further connection io. + * @param session the session to abort + * @param reason the apache status that caused the abort + */ +void h2_session_abort(h2_session *session, apr_status_t reason); + +/** + * Close and deallocate the given session. + */ +void h2_session_close(h2_session *session); + +/** + * Returns if client settings have push enabled. + * @param != 0 iff push is enabled in client settings + */ +int h2_session_push_enabled(h2_session *session); + +/** + * Look up the stream in this session with the given id. + */ +struct h2_stream *h2_session_stream_get(h2_session *session, int stream_id); + +/** + * Submit a push promise on the stream and schedule the new steam for + * processing.. + * + * @param session the session to work in + * @param is the stream initiating the push + * @param push the push to promise + * @return the new promised stream or NULL + */ +struct h2_stream *h2_session_push(h2_session *session, + struct h2_stream *is, struct h2_push *push); + +apr_status_t h2_session_set_prio(h2_session *session, + struct h2_stream *stream, + const struct h2_priority *prio); + +#define H2_SSSN_MSG(s, msg) \ + "h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \ + s->open_streams + +#define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg) + +#endif /* defined(__mod_h2__h2_session__) */ diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c new file mode 100644 index 0000000..24ebc56 --- /dev/null +++ b/modules/http2/h2_stream.c @@ -0,0 +1,1078 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stddef.h> + +#include <apr_strings.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_connection.h> +#include <http_log.h> + +#include <nghttp2/nghttp2.h> + +#include "h2_private.h" +#include "h2.h" +#include "h2_bucket_beam.h" +#include "h2_conn.h" +#include "h2_config.h" +#include "h2_h2.h" +#include "h2_mplx.h" +#include "h2_push.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_session.h" +#include "h2_stream.h" +#include "h2_task.h" +#include "h2_ctx.h" +#include "h2_task.h" +#include "h2_util.h" + + +static const char *h2_ss_str(h2_stream_state_t state) +{ + switch (state) { + case H2_SS_IDLE: + return "IDLE"; + case H2_SS_RSVD_L: + return "RESERVED_LOCAL"; + case H2_SS_RSVD_R: + return "RESERVED_REMOTE"; + case H2_SS_OPEN: + return "OPEN"; + case H2_SS_CLOSED_L: + return "HALF_CLOSED_LOCAL"; + case H2_SS_CLOSED_R: + return "HALF_CLOSED_REMOTE"; + case H2_SS_CLOSED: + return "CLOSED"; + case H2_SS_CLEANUP: + return "CLEANUP"; + default: + return "UNKNOWN"; + } +} + +const char *h2_stream_state_str(h2_stream *stream) +{ + return h2_ss_str(stream->state); +} + +/* Abbreviations for stream transit tables */ +#define S_XXX (-2) /* Programming Error */ +#define S_ERR (-1) /* Protocol Error */ +#define S_NOP (0) /* No Change */ +#define S_IDL (H2_SS_IDL + 1) +#define S_RS_L (H2_SS_RSVD_L + 1) +#define S_RS_R (H2_SS_RSVD_R + 1) +#define S_OPEN (H2_SS_OPEN + 1) +#define S_CL_L (H2_SS_CLOSED_L + 1) +#define S_CL_R (H2_SS_CLOSED_R + 1) +#define S_CLS (H2_SS_CLOSED + 1) +#define S_CLN (H2_SS_CLEANUP + 1) + +/* state transisitions when certain frame types are sent */ +static int trans_on_send[][H2_SS_MAX] = { +/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */ +{ S_ERR, S_ERR, S_ERR, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* DATA */ +{ S_ERR, S_ERR, S_CL_R, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* HEADERS */ +{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */ +{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */ +{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */ +{ S_RS_L,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */ +{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */ +{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */ +{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */ +{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */ +}; +/* state transisitions when certain frame types are received */ +static int trans_on_recv[][H2_SS_MAX] = { +/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */ +{ S_ERR, S_ERR, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* DATA */ +{ S_OPEN,S_CL_L, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* HEADERS */ +{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */ +{ S_ERR, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */ +{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */ +{ S_RS_R,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */ +{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */ +{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */ +{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */ +{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */ +}; +/* state transisitions when certain events happen */ +static int trans_on_event[][H2_SS_MAX] = { +/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */ +{ S_XXX, S_ERR, S_ERR, S_CL_L, S_CLS, S_XXX, S_XXX, S_XXX, },/* EV_CLOSED_L*/ +{ S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/ +{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/ +{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/ +}; + +static int on_map(h2_stream_state_t state, int map[H2_SS_MAX]) +{ + int op = map[state]; + switch (op) { + case S_XXX: + case S_ERR: + return op; + case S_NOP: + return state; + default: + return op-1; + } +} + +static int on_frame(h2_stream_state_t state, int frame_type, + int frame_map[][H2_SS_MAX], apr_size_t maxlen) +{ + ap_assert(frame_type >= 0); + ap_assert(state >= 0); + if (frame_type >= maxlen) { + return state; /* NOP, ignore unknown frame types */ + } + return on_map(state, frame_map[frame_type]); +} + +static int on_frame_send(h2_stream_state_t state, int frame_type) +{ + return on_frame(state, frame_type, trans_on_send, H2_ALEN(trans_on_send)); +} + +static int on_frame_recv(h2_stream_state_t state, int frame_type) +{ + return on_frame(state, frame_type, trans_on_recv, H2_ALEN(trans_on_recv)); +} + +static int on_event(h2_stream* stream, h2_stream_event_t ev) +{ + if (stream->monitor && stream->monitor->on_event) { + stream->monitor->on_event(stream->monitor->ctx, stream, ev); + } + if (ev < H2_ALEN(trans_on_event)) { + return on_map(stream->state, trans_on_event[ev]); + } + return stream->state; +} + +static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag) +{ + if (APLOG_C_IS_LEVEL(s->session->c, lvl)) { + conn_rec *c = s->session->c; + char buffer[4 * 1024]; + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); + + len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer); + ap_log_cerror(APLOG_MARK, lvl, 0, c, + H2_STRM_MSG(s, "out-buffer(%s)"), len? buffer : "empty"); + } +} + +static apr_status_t setup_input(h2_stream *stream) { + if (stream->input == NULL) { + int empty = (stream->input_eof + && (!stream->in_buffer + || APR_BRIGADE_EMPTY(stream->in_buffer))); + if (!empty) { + h2_beam_create(&stream->input, stream->pool, stream->id, + "input", H2_BEAM_OWNER_SEND, 0, + stream->session->s->timeout); + h2_beam_send_from(stream->input, stream->pool); + } + } + return APR_SUCCESS; +} + +static apr_status_t close_input(h2_stream *stream) +{ + conn_rec *c = stream->session->c; + apr_status_t status = APR_SUCCESS; + + stream->input_eof = 1; + if (stream->input && h2_beam_is_closed(stream->input)) { + return APR_SUCCESS; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "closing input")); + if (stream->rst_error) { + return APR_ECONNRESET; + } + + if (stream->trailers && !apr_is_empty_table(stream->trailers)) { + apr_bucket *b; + h2_headers *r; + + if (!stream->in_buffer) { + stream->in_buffer = apr_brigade_create(stream->pool, c->bucket_alloc); + } + + r = h2_headers_create(HTTP_OK, stream->trailers, NULL, + stream->in_trailer_octets, stream->pool); + stream->trailers = NULL; + b = h2_bucket_headers_create(c->bucket_alloc, r); + APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b); + + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + H2_STRM_MSG(stream, "added trailers")); + h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING); + } + if (stream->input) { + h2_stream_flush_input(stream); + return h2_beam_close(stream->input); + } + return status; +} + +static apr_status_t close_output(h2_stream *stream) +{ + if (!stream->output || h2_beam_is_closed(stream->output)) { + return APR_SUCCESS; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "closing output")); + return h2_beam_leave(stream->output); +} + +static void on_state_enter(h2_stream *stream) +{ + if (stream->monitor && stream->monitor->on_state_enter) { + stream->monitor->on_state_enter(stream->monitor->ctx, stream); + } +} + +static void on_state_event(h2_stream *stream, h2_stream_event_t ev) +{ + if (stream->monitor && stream->monitor->on_state_event) { + stream->monitor->on_state_event(stream->monitor->ctx, stream, ev); + } +} + +static void on_state_invalid(h2_stream *stream) +{ + if (stream->monitor && stream->monitor->on_state_invalid) { + stream->monitor->on_state_invalid(stream->monitor->ctx, stream); + } + /* stream got an event/frame invalid in its state */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "invalid state event")); + switch (stream->state) { + case H2_SS_OPEN: + case H2_SS_RSVD_L: + case H2_SS_RSVD_R: + case H2_SS_CLOSED_L: + case H2_SS_CLOSED_R: + h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); + break; + default: + break; + } +} + +static apr_status_t transit(h2_stream *stream, int new_state) +{ + if (new_state == stream->state) { + return APR_SUCCESS; + } + else if (new_state < 0) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, + H2_STRM_LOG(APLOGNO(03081), stream, "invalid transition")); + on_state_invalid(stream); + return APR_EINVAL; + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "transit to [%s]"), h2_ss_str(new_state)); + stream->state = new_state; + switch (new_state) { + case H2_SS_IDLE: + break; + case H2_SS_RSVD_L: + close_input(stream); + break; + case H2_SS_RSVD_R: + break; + case H2_SS_OPEN: + break; + case H2_SS_CLOSED_L: + close_output(stream); + break; + case H2_SS_CLOSED_R: + close_input(stream); + break; + case H2_SS_CLOSED: + close_input(stream); + close_output(stream); + if (stream->out_buffer) { + apr_brigade_cleanup(stream->out_buffer); + } + break; + case H2_SS_CLEANUP: + break; + } + on_state_enter(stream); + return APR_SUCCESS; +} + +void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor) +{ + stream->monitor = monitor; +} + +void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev) +{ + int new_state; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + H2_STRM_MSG(stream, "dispatch event %d"), ev); + new_state = on_event(stream, ev); + if (new_state < 0) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, + H2_STRM_LOG(APLOGNO(10002), stream, "invalid event %d"), ev); + on_state_invalid(stream); + AP_DEBUG_ASSERT(new_state > S_XXX); + return; + } + else if (new_state == stream->state) { + /* nop */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + H2_STRM_MSG(stream, "non-state event %d"), ev); + return; + } + else { + on_state_event(stream, ev); + transit(stream, new_state); + } +} + +static void set_policy_for(h2_stream *stream, h2_request *r) +{ + int enabled = h2_session_push_enabled(stream->session); + stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, + enabled); + r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS); +} + +apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len) +{ + apr_status_t status = APR_SUCCESS; + int new_state, eos = 0; + + new_state = on_frame_send(stream->state, ftype); + if (new_state < 0) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "invalid frame %d send"), ftype); + AP_DEBUG_ASSERT(new_state > S_XXX); + return transit(stream, new_state); + } + + ++stream->out_frames; + stream->out_frame_octets += frame_len; + switch (ftype) { + case NGHTTP2_DATA: + eos = (flags & NGHTTP2_FLAG_END_STREAM); + break; + + case NGHTTP2_HEADERS: + eos = (flags & NGHTTP2_FLAG_END_STREAM); + break; + + case NGHTTP2_PUSH_PROMISE: + /* start pushed stream */ + ap_assert(stream->request == NULL); + ap_assert(stream->rtmp != NULL); + status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0); + if (status != APR_SUCCESS) { + return status; + } + set_policy_for(stream, stream->rtmp); + stream->request = stream->rtmp; + stream->rtmp = NULL; + break; + + default: + break; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "send frame %d, eos=%d"), ftype, eos); + status = transit(stream, new_state); + if (status == APR_SUCCESS && eos) { + status = transit(stream, on_event(stream, H2_SEV_CLOSED_L)); + } + return status; +} + +apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_t frame_len) +{ + apr_status_t status = APR_SUCCESS; + int new_state, eos = 0; + + new_state = on_frame_recv(stream->state, ftype); + if (new_state < 0) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "invalid frame %d recv"), ftype); + AP_DEBUG_ASSERT(new_state > S_XXX); + return transit(stream, new_state); + } + + switch (ftype) { + case NGHTTP2_DATA: + eos = (flags & NGHTTP2_FLAG_END_STREAM); + break; + + case NGHTTP2_HEADERS: + eos = (flags & NGHTTP2_FLAG_END_STREAM); + if (stream->state == H2_SS_OPEN) { + /* trailer HEADER */ + if (!eos) { + h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR); + } + stream->in_trailer_octets += frame_len; + } + else { + /* request HEADER */ + ap_assert(stream->request == NULL); + if (stream->rtmp == NULL) { + /* This can only happen, if the stream has received no header + * name/value pairs at all. The lastest nghttp2 version have become + * pretty good at detecting this early. In any case, we have + * to abort the connection here, since this is clearly a protocol error */ + return APR_EINVAL; + } + status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len); + if (status != APR_SUCCESS) { + return status; + } + set_policy_for(stream, stream->rtmp); + stream->request = stream->rtmp; + stream->rtmp = NULL; + } + break; + + default: + break; + } + status = transit(stream, new_state); + if (status == APR_SUCCESS && eos) { + status = transit(stream, on_event(stream, H2_SEV_CLOSED_R)); + } + return status; +} + +apr_status_t h2_stream_flush_input(h2_stream *stream) +{ + apr_status_t status = APR_SUCCESS; + + if (stream->in_buffer && !APR_BRIGADE_EMPTY(stream->in_buffer)) { + setup_input(stream); + status = h2_beam_send(stream->input, stream->in_buffer, APR_BLOCK_READ); + stream->in_last_write = apr_time_now(); + } + if (stream->input_eof + && stream->input && !h2_beam_is_closed(stream->input)) { + status = h2_beam_close(stream->input); + } + return status; +} + +apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags, + const uint8_t *data, size_t len) +{ + h2_session *session = stream->session; + apr_status_t status = APR_SUCCESS; + + stream->in_data_frames++; + if (len > 0) { + if (APLOGctrace3(session->c)) { + const char *load = apr_pstrndup(stream->pool, (const char *)data, len); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c, + H2_STRM_MSG(stream, "recv DATA, len=%d: -->%s<--"), + (int)len, load); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c, + H2_STRM_MSG(stream, "recv DATA, len=%d"), (int)len); + } + stream->in_data_octets += len; + if (!stream->in_buffer) { + stream->in_buffer = apr_brigade_create(stream->pool, + session->c->bucket_alloc); + } + apr_brigade_write(stream->in_buffer, NULL, NULL, (const char *)data, len); + h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING); + } + return status; +} + +static void prep_output(h2_stream *stream) { + conn_rec *c = stream->session->c; + if (!stream->out_buffer) { + stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc); + } +} + +h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session, + h2_stream_monitor *monitor, int initiated_on) +{ + h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream)); + + stream->id = id; + stream->initiated_on = initiated_on; + stream->created = apr_time_now(); + stream->state = H2_SS_IDLE; + stream->pool = pool; + stream->session = session; + stream->monitor = monitor; + stream->max_mem = session->max_stream_mem; + +#ifdef H2_NG2_LOCAL_WIN_SIZE + stream->in_window_size = + nghttp2_session_get_stream_local_window_size( + stream->session->ngh2, stream->id); +#endif + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + H2_STRM_LOG(APLOGNO(03082), stream, "created")); + on_state_enter(stream); + return stream; +} + +void h2_stream_cleanup(h2_stream *stream) +{ + apr_status_t status; + + ap_assert(stream); + if (stream->out_buffer) { + /* remove any left over output buckets that may still have + * references into request pools */ + apr_brigade_cleanup(stream->out_buffer); + } + if (stream->input) { + h2_beam_abort(stream->input); + status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ); + if (status == APR_EAGAIN) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, + H2_STRM_MSG(stream, "wait on input drain")); + status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, + H2_STRM_MSG(stream, "input drain returned")); + } + } +} + +void h2_stream_destroy(h2_stream *stream) +{ + ap_assert(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c, + H2_STRM_MSG(stream, "destroy")); + apr_pool_destroy(stream->pool); +} + +apr_status_t h2_stream_prep_processing(h2_stream *stream) +{ + if (stream->request) { + const h2_request *r = stream->request; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "schedule %s %s://%s%s chunked=%d"), + r->method, r->scheme, r->authority, r->path, r->chunked); + setup_input(stream); + stream->scheduled = 1; + return APR_SUCCESS; + } + return APR_EINVAL; +} + +void h2_stream_rst(h2_stream *stream, int error_code) +{ + stream->rst_error = error_code; + if (stream->input) { + h2_beam_abort(stream->input); + } + if (stream->output) { + h2_beam_leave(stream->output); + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "reset, error=%d"), error_code); + h2_stream_dispatch(stream, H2_SEV_CANCELLED); +} + +apr_status_t h2_stream_set_request_rec(h2_stream *stream, + request_rec *r, int eos) +{ + h2_request *req; + apr_status_t status; + + ap_assert(stream->request == NULL); + ap_assert(stream->rtmp == NULL); + if (stream->rst_error) { + return APR_ECONNRESET; + } + status = h2_request_rcreate(&req, stream->pool, r); + if (status == APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, + H2_STRM_LOG(APLOGNO(03058), stream, + "set_request_rec %s host=%s://%s%s"), + req->method, req->scheme, req->authority, req->path); + stream->rtmp = req; + /* simulate the frames that led to this */ + return h2_stream_recv_frame(stream, NGHTTP2_HEADERS, + NGHTTP2_FLAG_END_STREAM, 0); + } + return status; +} + +void h2_stream_set_request(h2_stream *stream, const h2_request *r) +{ + ap_assert(stream->request == NULL); + ap_assert(stream->rtmp == NULL); + stream->rtmp = h2_request_clone(stream->pool, r); +} + +static void set_error_response(h2_stream *stream, int http_status) +{ + if (!h2_stream_is_ready(stream)) { + conn_rec *c = stream->session->c; + apr_bucket *b; + h2_headers *response; + + response = h2_headers_die(http_status, stream->request, stream->pool); + prep_output(stream); + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b); + b = h2_bucket_headers_create(c->bucket_alloc, response); + APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b); + } +} + +static apr_status_t add_trailer(h2_stream *stream, + const char *name, size_t nlen, + const char *value, size_t vlen) +{ + conn_rec *c = stream->session->c; + char *hname, *hvalue; + + if (nlen == 0 || name[0] == ':') { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c, + H2_STRM_LOG(APLOGNO(03060), stream, + "pseudo header in trailer")); + return APR_EINVAL; + } + if (h2_req_ignore_trailer(name, nlen)) { + return APR_SUCCESS; + } + if (!stream->trailers) { + stream->trailers = apr_table_make(stream->pool, 5); + } + hname = apr_pstrndup(stream->pool, name, nlen); + hvalue = apr_pstrndup(stream->pool, value, vlen); + h2_util_camel_case_header(hname, nlen); + apr_table_mergen(stream->trailers, hname, hvalue); + + return APR_SUCCESS; +} + +apr_status_t h2_stream_add_header(h2_stream *stream, + const char *name, size_t nlen, + const char *value, size_t vlen) +{ + h2_session *session = stream->session; + int error = 0; + apr_status_t status; + + if (stream->has_response) { + return APR_EINVAL; + } + ++stream->request_headers_added; + if (name[0] == ':') { + if ((vlen) > session->s->limit_req_line) { + /* pseudo header: approximation of request line size check */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_STRM_MSG(stream, "pseudo %s too long"), name); + error = HTTP_REQUEST_URI_TOO_LARGE; + } + } + else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) { + /* header too long */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_STRM_MSG(stream, "header %s too long"), name); + error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE; + } + + if (stream->request_headers_added > session->s->limit_req_fields + 4) { + /* too many header lines, include 4 pseudo headers */ + if (stream->request_headers_added + > session->s->limit_req_fields + 4 + 100) { + /* yeah, right */ + h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM); + return APR_ECONNRESET; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_STRM_MSG(stream, "too many header lines")); + error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE; + } + + if (error) { + set_error_response(stream, error); + return APR_EINVAL; + } + else if (H2_SS_IDLE == stream->state) { + if (!stream->rtmp) { + stream->rtmp = h2_req_create(stream->id, stream->pool, + NULL, NULL, NULL, NULL, NULL, 0); + } + status = h2_request_add_header(stream->rtmp, stream->pool, + name, nlen, value, vlen); + } + else if (H2_SS_OPEN == stream->state) { + status = add_trailer(stream, name, nlen, value, vlen); + } + else { + status = APR_EINVAL; + } + + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_STRM_MSG(stream, "header %s not accepted"), name); + h2_stream_dispatch(stream, H2_SEV_CANCELLED); + } + return status; +} + +static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb) +{ + if (bb) { + apr_bucket *b = APR_BRIGADE_FIRST(bb); + while (b != APR_BRIGADE_SENTINEL(bb)) { + if (H2_BUCKET_IS_HEADERS(b)) { + return b; + } + b = APR_BUCKET_NEXT(b); + } + } + return NULL; +} + +static apr_status_t add_buffered_data(h2_stream *stream, apr_off_t requested, + apr_off_t *plen, int *peos, int *is_all, + h2_headers **pheaders) +{ + apr_bucket *b, *e; + + *peos = 0; + *plen = 0; + *is_all = 0; + if (pheaders) { + *pheaders = NULL; + } + + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "add_buffered_data"); + b = APR_BRIGADE_FIRST(stream->out_buffer); + while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { + e = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_FLUSH(b)) { + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + else if (APR_BUCKET_IS_EOS(b)) { + *peos = 1; + return APR_SUCCESS; + } + else if (H2_BUCKET_IS_HEADERS(b)) { + if (*plen > 0) { + /* data before the response, can only return up to here */ + return APR_SUCCESS; + } + else if (pheaders) { + *pheaders = h2_bucket_headers_get(b); + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "prep, -> response %d"), + (*pheaders)->status); + return APR_SUCCESS; + } + else { + return APR_EAGAIN; + } + } + } + else if (b->length == 0) { + APR_BUCKET_REMOVE(b); + apr_bucket_destroy(b); + } + else { + ap_assert(b->length != (apr_size_t)-1); + *plen += b->length; + if (*plen >= requested) { + *plen = requested; + return APR_SUCCESS; + } + } + b = e; + } + *is_all = 1; + return APR_SUCCESS; +} + +apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, + int *peos, h2_headers **pheaders) +{ + apr_status_t status = APR_SUCCESS; + apr_off_t requested, missing, max_chunk = H2_DATA_CHUNK_SIZE; + conn_rec *c; + int complete; + + ap_assert(stream); + + if (stream->rst_error) { + *plen = 0; + *peos = 1; + return APR_ECONNRESET; + } + + c = stream->session->c; + prep_output(stream); + + /* determine how much we'd like to send. We cannot send more than + * is requested. But we can reduce the size in case the master + * connection operates in smaller chunks. (TSL warmup) */ + if (stream->session->io.write_size > 0) { + max_chunk = stream->session->io.write_size - 9; /* header bits */ + } + requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk; + + /* count the buffered data until eos or a headers bucket */ + status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders); + + if (status == APR_EAGAIN) { + /* TODO: ugly, someone needs to retrieve the response first */ + h2_mplx_keep_active(stream->session->mplx, stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + H2_STRM_MSG(stream, "prep, response eagain")); + return status; + } + else if (status != APR_SUCCESS) { + return status; + } + + if (pheaders && *pheaders) { + return APR_SUCCESS; + } + + /* If there we do not have enough buffered data to satisfy the requested + * length *and* we counted the _complete_ buffer (and did not stop in the middle + * because of meta data there), lets see if we can read more from the + * output beam */ + missing = H2MIN(requested, stream->max_mem) - *plen; + if (complete && !*peos && missing > 0) { + apr_status_t rv = APR_EOF; + + if (stream->output) { + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre"); + rv = h2_beam_receive(stream->output, stream->out_buffer, + APR_NONBLOCK_READ, stream->max_mem - *plen); + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post"); + } + + if (rv == APR_SUCCESS) { + /* count the buffer again, now that we have read output */ + status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders); + } + else if (APR_STATUS_IS_EOF(rv)) { + apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos); + *peos = 1; + } + else if (APR_STATUS_IS_EAGAIN(rv)) { + /* we set this is the status of this call only if there + * is no buffered data, see check below */ + } + else { + /* real error reading. Give this back directly, even though + * we may have something buffered. */ + status = rv; + } + } + + if (status == APR_SUCCESS) { + if (*peos || *plen) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + H2_STRM_MSG(stream, "prepare, len=%ld eos=%d"), + (long)*plen, *peos); + } + else { + status = (stream->output && h2_beam_is_closed(stream->output))? APR_EOF : APR_EAGAIN; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + H2_STRM_MSG(stream, "prepare, no data")); + } + } + return status; +} + +static int is_not_headers(apr_bucket *b) +{ + return !H2_BUCKET_IS_HEADERS(b); +} + +apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb, + apr_off_t *plen, int *peos) +{ + conn_rec *c = stream->session->c; + apr_status_t status = APR_SUCCESS; + + if (stream->rst_error) { + return APR_ECONNRESET; + } + status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers); + if (status == APR_SUCCESS && !*peos && !*plen) { + status = APR_EAGAIN; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, + H2_STRM_MSG(stream, "read_to, len=%ld eos=%d"), + (long)*plen, *peos); + return status; +} + + +apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response) +{ + apr_status_t status = APR_SUCCESS; + apr_array_header_t *pushes; + int i; + + pushes = h2_push_collect_update(stream, stream->request, response); + if (pushes && !apr_is_empty_array(pushes)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + H2_STRM_MSG(stream, "found %d push candidates"), + pushes->nelts); + for (i = 0; i < pushes->nelts; ++i) { + h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*); + h2_stream *s = h2_session_push(stream->session, stream, push); + if (!s) { + status = APR_ECONNRESET; + break; + } + } + } + return status; +} + +apr_table_t *h2_stream_get_trailers(h2_stream *stream) +{ + return NULL; +} + +const h2_priority *h2_stream_get_priority(h2_stream *stream, + h2_headers *response) +{ + if (response && stream->initiated_on) { + const char *ctype = apr_table_get(response->headers, "content-type"); + if (ctype) { + /* FIXME: Not good enough, config needs to come from request->server */ + return h2_config_get_priority(stream->session->config, ctype); + } + } + return NULL; +} + +int h2_stream_is_ready(h2_stream *stream) +{ + if (stream->has_response) { + return 1; + } + else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) { + return 1; + } + return 0; +} + +int h2_stream_was_closed(const h2_stream *stream) +{ + switch (stream->state) { + case H2_SS_CLOSED: + case H2_SS_CLEANUP: + return 1; + default: + return 0; + } +} + +apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount) +{ + h2_session *session = stream->session; + + if (amount > 0) { + apr_off_t consumed = amount; + + while (consumed > 0) { + int len = (consumed > INT_MAX)? INT_MAX : (int)consumed; + nghttp2_session_consume(session->ngh2, stream->id, len); + consumed -= len; + } + +#ifdef H2_NG2_LOCAL_WIN_SIZE + if (1) { + int cur_size = nghttp2_session_get_stream_local_window_size( + session->ngh2, stream->id); + int win = stream->in_window_size; + int thigh = win * 8/10; + int tlow = win * 2/10; + const int win_max = 2*1024*1024; + const int win_min = 32*1024; + + /* Work in progress, probably should add directives for these + * values once this stabilizes somewhat. The general idea is + * to adapt stream window sizes if the input window changes + * a) very quickly (< good RTT) from full to empty + * b) only a little bit (> bad RTT) + * where in a) it grows and in b) it shrinks again. + */ + if (cur_size > thigh && amount > thigh && win < win_max) { + /* almost empty again with one reported consumption, how + * long did this take? */ + long ms = apr_time_msec(apr_time_now() - stream->in_last_write); + if (ms < 40) { + win = H2MIN(win_max, win + (64*1024)); + } + } + else if (cur_size < tlow && amount < tlow && win > win_min) { + /* staying full, for how long already? */ + long ms = apr_time_msec(apr_time_now() - stream->in_last_write); + if (ms > 700) { + win = H2MAX(win_min, win - (32*1024)); + } + } + + if (win != stream->in_window_size) { + stream->in_window_size = win; + nghttp2_session_set_local_window_size(session->ngh2, + NGHTTP2_FLAG_NONE, stream->id, win); + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_stream(%ld-%d): consumed %ld bytes, window now %d/%d", + session->id, stream->id, (long)amount, + cur_size, stream->in_window_size); + } +#endif + } + return APR_SUCCESS; +} + diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h new file mode 100644 index 0000000..7ecc0ad --- /dev/null +++ b/modules/http2/h2_stream.h @@ -0,0 +1,309 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_stream__ +#define __mod_h2__h2_stream__ + +#include "h2.h" + +/** + * A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms. + * + * A stream always belongs to a h2_session, the one managing the + * connection to the client. The h2_session writes to the h2_stream, + * adding HEADERS and DATA and finally an EOS. When headers are done, + * h2_stream is scheduled for handling, which is expected to produce + * a response h2_headers at least. + * + * The h2_headers may be followed by more h2_headers (interim responses) and + * by DATA frames read from the h2_stream until EOS is reached. Trailers + * are send when a last h2_headers is received. This always closes the stream + * output. + */ + +struct h2_mplx; +struct h2_priority; +struct h2_request; +struct h2_headers; +struct h2_session; +struct h2_task; +struct h2_bucket_beam; + +typedef struct h2_stream h2_stream; + +typedef void h2_stream_state_cb(void *ctx, h2_stream *stream); +typedef void h2_stream_event_cb(void *ctx, h2_stream *stream, + h2_stream_event_t ev); + +/** + * Callback structure for events and stream state transisitions + */ +typedef struct h2_stream_monitor { + void *ctx; + h2_stream_state_cb *on_state_enter; /* called when a state is entered */ + h2_stream_state_cb *on_state_invalid; /* called when an invalid state change + was detected */ + h2_stream_event_cb *on_state_event; /* called right before the given event + result in a new stream state */ + h2_stream_event_cb *on_event; /* called for events that do not + trigger a state change */ +} h2_stream_monitor; + +struct h2_stream { + int id; /* http2 stream identifier */ + int initiated_on; /* initiating stream id (PUSH) or 0 */ + apr_pool_t *pool; /* the memory pool for this stream */ + struct h2_session *session; /* the session this stream belongs to */ + h2_stream_state_t state; /* state of this stream */ + + apr_time_t created; /* when stream was created */ + + const struct h2_request *request; /* the request made in this stream */ + struct h2_request *rtmp; /* request being assembled */ + apr_table_t *trailers; /* optional incoming trailers */ + int request_headers_added; /* number of request headers added */ + + struct h2_bucket_beam *input; + apr_bucket_brigade *in_buffer; + int in_window_size; + apr_time_t in_last_write; + + struct h2_bucket_beam *output; + apr_bucket_brigade *out_buffer; + apr_size_t max_mem; /* maximum amount of data buffered */ + + int rst_error; /* stream error for RST_STREAM */ + unsigned int aborted : 1; /* was aborted */ + unsigned int scheduled : 1; /* stream has been scheduled */ + unsigned int has_response : 1; /* response headers are known */ + unsigned int input_eof : 1; /* no more request data coming */ + unsigned int out_checked : 1; /* output eof was double checked */ + unsigned int push_policy; /* which push policy to use for this request */ + + struct h2_task *task; /* assigned task to fullfill request */ + + const h2_priority *pref_priority; /* preferred priority for this stream */ + apr_off_t out_frames; /* # of frames sent out */ + apr_off_t out_frame_octets; /* # of RAW frame octets sent out */ + apr_off_t out_data_frames; /* # of DATA frames sent */ + apr_off_t out_data_octets; /* # of DATA octets (payload) sent */ + apr_off_t in_data_frames; /* # of DATA frames received */ + apr_off_t in_data_octets; /* # of DATA octets (payload) received */ + apr_off_t in_trailer_octets; /* # of HEADER octets (payload) received in trailers */ + + h2_stream_monitor *monitor; /* optional monitor for stream states */ +}; + + +#define H2_STREAM_RST(s, def) (s->rst_error? s->rst_error : (def)) + +/** + * Create a stream in H2_SS_IDLE state. + * @param id the stream identifier + * @param pool the memory pool to use for this stream + * @param session the session this stream belongs to + * @param monitor an optional monitor to be called for events and + * state transisitions + * @param initiated_on the id of the stream this one was initiated on (PUSH) + * + * @return the newly opened stream + */ +h2_stream *h2_stream_create(int id, apr_pool_t *pool, + struct h2_session *session, + h2_stream_monitor *monitor, + int initiated_on); + +/** + * Destroy memory pool if still owned by the stream. + */ +void h2_stream_destroy(h2_stream *stream); + +/** + * Prepare the stream so that processing may start. + * + * This is the time to allocated resources not needed before. + * + * @param stream the stream to prep + */ +apr_status_t h2_stream_prep_processing(h2_stream *stream); + +/* + * Set a new monitor for this stream, replacing any existing one. Can + * be called with NULL to have no monitor installed. + */ +void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor); + +/** + * Dispatch (handle) an event on the given stream. + * @param stream the streama the event happened on + * @param ev the type of event + */ +void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev); + +/** + * Cleanup references into requst processing. + * + * @param stream the stream to cleanup + */ +void h2_stream_cleanup(h2_stream *stream); + +/** + * Notify the stream that amount bytes have been consumed of its input + * since the last invocation of this method (delta amount). + */ +apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount); + +/** + * Set complete stream headers from given h2_request. + * + * @param stream stream to write request to + * @param r the request with all the meta data + * @param eos != 0 iff stream input is closed + */ +void h2_stream_set_request(h2_stream *stream, const h2_request *r); + +/** + * Set complete stream header from given request_rec. + * + * @param stream stream to write request to + * @param r the request with all the meta data + * @param eos != 0 iff stream input is closed + */ +apr_status_t h2_stream_set_request_rec(h2_stream *stream, + request_rec *r, int eos); + +/* + * Add a HTTP/2 header (including pseudo headers) or trailer + * to the given stream, depending on stream state. + * + * @param stream stream to write the header to + * @param name the name of the HTTP/2 header + * @param nlen the number of characters in name + * @param value the header value + * @param vlen the number of characters in value + */ +apr_status_t h2_stream_add_header(h2_stream *stream, + const char *name, size_t nlen, + const char *value, size_t vlen); + +apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len); +apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len); + +/* + * Process a frame of received DATA. + * + * @param stream stream to write the data to + * @param flags the frame flags + * @param data the beginning of the bytes to write + * @param len the number of bytes to write + */ +apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags, + const uint8_t *data, size_t len); + +apr_status_t h2_stream_flush_input(h2_stream *stream); + +/** + * Reset the stream. Stream write/reads will return errors afterwards. + * + * @param stream the stream to reset + * @param error_code the HTTP/2 error code + */ +void h2_stream_rst(h2_stream *stream, int error_code); + +/** + * Determine if stream was closed already. This is true for + * states H2_SS_CLOSED, H2_SS_CLEANUP. But not true + * for H2_SS_CLOSED_L and H2_SS_CLOSED_R. + * + * @param stream the stream to check on + * @return != 0 iff stream has been closed + */ +int h2_stream_was_closed(const h2_stream *stream); + +/** + * Do a speculative read on the stream output to determine the + * amount of data that can be read. + * + * @param stream the stream to speculatively read from + * @param plen (in-/out) number of bytes requested and on return amount of bytes that + * may be read without blocking + * @param peos (out) != 0 iff end of stream will be reached when reading plen + * bytes (out value). + * @param presponse (out) the response of one became available + * @return APR_SUCCESS if out information was computed successfully. + * APR_EAGAIN if not data is available and end of stream has not been + * reached yet. + */ +apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, + int *peos, h2_headers **presponse); + +/** + * Read a maximum number of bytes into the bucket brigade. + * + * @param stream the stream to read from + * @param bb the brigade to append output to + * @param plen (in-/out) max. number of bytes to append and on return actual + * number of bytes appended to brigade + * @param peos (out) != 0 iff end of stream has been reached while reading + * @return APR_SUCCESS if out information was computed successfully. + * APR_EAGAIN if not data is available and end of stream has not been + * reached yet. + */ +apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb, + apr_off_t *plen, int *peos); + +/** + * Get optional trailers for this stream, may be NULL. Meaningful + * results can only be expected when the end of the response body has + * been reached. + * + * @param stream to ask for trailers + * @return trailers for NULL + */ +apr_table_t *h2_stream_get_trailers(h2_stream *stream); + +/** + * Submit any server push promises on this stream and schedule + * the tasks connection with these. + * + * @param stream the stream for which to submit + */ +apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response); + +/** + * Get priority information set for this stream. + */ +const struct h2_priority *h2_stream_get_priority(h2_stream *stream, + h2_headers *response); + +/** + * Return a textual representation of the stream state as in RFC 7540 + * nomenclator, all caps, underscores. + */ +const char *h2_stream_state_str(h2_stream *stream); + +/** + * Determine if stream is ready for submitting a response or a RST + * @param stream the stream to check + */ +int h2_stream_is_ready(h2_stream *stream); + +#define H2_STRM_MSG(s, msg) \ + "h2_stream(%ld-%d,%s): "msg, s->session->id, s->id, h2_stream_state_str(s) + +#define H2_STRM_LOG(aplogno, s, msg) aplogno H2_STRM_MSG(s, msg) + +#endif /* defined(__mod_h2__h2_stream__) */ diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c new file mode 100644 index 0000000..5e73568 --- /dev/null +++ b/modules/http2/h2_switch.c @@ -0,0 +1,194 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> + +#include <apr_strings.h> +#include <apr_optional.h> +#include <apr_optional_hooks.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_config.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_log.h> + +#include "h2_private.h" + +#include "h2_config.h" +#include "h2_ctx.h" +#include "h2_conn.h" +#include "h2_h2.h" +#include "h2_switch.h" + +/******************************************************************************* + * Once per lifetime init, retrieve optional functions + */ +apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s) +{ + (void)pool; + ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_switch init"); + + return APR_SUCCESS; +} + +static int h2_protocol_propose(conn_rec *c, request_rec *r, + server_rec *s, + const apr_array_header_t *offers, + apr_array_header_t *proposals) +{ + int proposed = 0; + int is_tls = h2_h2_is_tls(c); + const char **protos = is_tls? h2_tls_protos : h2_clear_protos; + + (void)s; + if (!h2_mpm_supported()) { + return DECLINED; + } + + if (strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))) { + /* We do not know how to switch from anything else but http/1.1. + */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03083) + "protocol switch: current proto != http/1.1, declined"); + return DECLINED; + } + + if (!h2_is_acceptable_connection(c, 0)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084) + "protocol propose: connection requirements not met"); + return DECLINED; + } + + if (r) { + /* So far, this indicates an HTTP/1 Upgrade header initiated + * protocol switch. For that, the HTTP2-Settings header needs + * to be present and valid for the connection. + */ + const char *p; + + if (!h2_allows_h2_upgrade(c)) { + return DECLINED; + } + + p = apr_table_get(r->headers_in, "HTTP2-Settings"); + if (!p) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03085) + "upgrade without HTTP2-Settings declined"); + return DECLINED; + } + + p = apr_table_get(r->headers_in, "Connection"); + if (!ap_find_token(r->pool, p, "http2-settings")) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03086) + "upgrade without HTTP2-Settings declined"); + return DECLINED; + } + + /* We also allow switching only for requests that have no body. + */ + p = apr_table_get(r->headers_in, "Content-Length"); + if (p && strcmp(p, "0")) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087) + "upgrade with content-length: %s, declined", p); + return DECLINED; + } + } + + while (*protos) { + /* Add all protocols we know (tls or clear) and that + * are part of the offerings (if there have been any). + */ + if (!offers || ap_array_str_contains(offers, *protos)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "proposing protocol '%s'", *protos); + APR_ARRAY_PUSH(proposals, const char*) = *protos; + proposed = 1; + } + ++protos; + } + return proposed? DECLINED : OK; +} + +static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, + const char *protocol) +{ + int found = 0; + const char **protos = h2_h2_is_tls(c)? h2_tls_protos : h2_clear_protos; + const char **p = protos; + + (void)s; + if (!h2_mpm_supported()) { + return DECLINED; + } + + while (*p) { + if (!strcmp(*p, protocol)) { + found = 1; + break; + } + p++; + } + + if (found) { + h2_ctx *ctx = h2_ctx_get(c, 1); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "switching protocol to '%s'", protocol); + h2_ctx_protocol_set(ctx, protocol); + h2_ctx_server_set(ctx, s); + + if (r != NULL) { + apr_status_t status; + /* Switching in the middle of a request means that + * we have to send out the response to this one in h2 + * format. So we need to take over the connection + * right away. + */ + ap_remove_input_filter_byhandle(r->input_filters, "http_in"); + ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout"); + ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); + + /* Ok, start an h2_conn on this one. */ + h2_ctx_server_set(ctx, r->server); + status = h2_conn_setup(ctx, r->connection, r); + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088) + "session setup"); + h2_ctx_clear(c); + return !OK; + } + + h2_conn_run(ctx, c); + } + return OK; + } + + return DECLINED; +} + +static const char *h2_protocol_get(const conn_rec *c) +{ + return h2_ctx_protocol_get(c); +} + +void h2_switch_register_hooks(void) +{ + ap_hook_protocol_propose(h2_protocol_propose, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_protocol_switch(h2_protocol_switch, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_protocol_get(h2_protocol_get, NULL, NULL, APR_HOOK_MIDDLE); +} diff --git a/modules/http2/h2_switch.h b/modules/http2/h2_switch.h new file mode 100644 index 0000000..7be8a23 --- /dev/null +++ b/modules/http2/h2_switch.h @@ -0,0 +1,30 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_switch__ +#define __mod_h2__h2_switch__ + +/* + * One time, post config initialization. + */ +apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s); + +/* Register apache hooks for protocol switching + */ +void h2_switch_register_hooks(void); + + +#endif /* defined(__mod_h2__h2_switch__) */ diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c new file mode 100644 index 0000000..86fb026 --- /dev/null +++ b/modules/http2/h2_task.c @@ -0,0 +1,769 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <stddef.h> + +#include <apr_atomic.h> +#include <apr_strings.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_connection.h> +#include <http_protocol.h> +#include <http_request.h> +#include <http_log.h> +#include <http_vhost.h> +#include <util_filter.h> +#include <ap_mpm.h> +#include <mod_core.h> +#include <scoreboard.h> + +#include "h2_private.h" +#include "h2.h" +#include "h2_bucket_beam.h" +#include "h2_conn.h" +#include "h2_config.h" +#include "h2_ctx.h" +#include "h2_from_h1.h" +#include "h2_h2.h" +#include "h2_mplx.h" +#include "h2_request.h" +#include "h2_headers.h" +#include "h2_session.h" +#include "h2_stream.h" +#include "h2_task.h" +#include "h2_util.h" + +static void H2_TASK_OUT_LOG(int lvl, h2_task *task, apr_bucket_brigade *bb, + const char *tag) +{ + if (APLOG_C_IS_LEVEL(task->c, lvl)) { + conn_rec *c = task->c; + char buffer[4 * 1024]; + const char *line = "(null)"; + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); + + len = h2_util_bb_print(buffer, bmax, tag, "", bb); + ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s", + task->id, len? buffer : line); + } +} + +/******************************************************************************* + * task input handling + ******************************************************************************/ + +static int input_ser_header(void *ctx, const char *name, const char *value) +{ + h2_task *task = ctx; + apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value); + return 1; +} + +/******************************************************************************* + * task output handling + ******************************************************************************/ + +static apr_status_t open_output(h2_task *task) +{ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348) + "h2_task(%s): open output to %s %s %s", + task->id, task->request->method, + task->request->authority, + task->request->path); + task->output.opened = 1; + return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam); +} + +static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block) +{ + apr_off_t written, left; + apr_status_t status; + + apr_brigade_length(bb, 0, &written); + H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out"); + h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)"); + /* engines send unblocking */ + status = h2_beam_send(task->output.beam, bb, + block? APR_BLOCK_READ : APR_NONBLOCK_READ); + h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)"); + + if (APR_STATUS_IS_EAGAIN(status)) { + apr_brigade_length(bb, 0, &left); + written -= left; + status = APR_SUCCESS; + } + if (status == APR_SUCCESS) { + if (h2_task_logio_add_bytes_out) { + h2_task_logio_add_bytes_out(task->c, written); + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, + "h2_task(%s): send_out done", task->id); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, + "h2_task(%s): send_out (%ld bytes)", + task->id, (long)written); + } + return status; +} + +/* Bring the data from the brigade (which represents the result of the + * request_rec out filter chain) into the h2_mplx for further sending + * on the master connection. + */ +static apr_status_t slave_out(h2_task *task, ap_filter_t* f, + apr_bucket_brigade* bb) +{ + apr_bucket *b; + apr_status_t rv = APR_SUCCESS; + int flush = 0, blocking; + + if (task->frozen) { + h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2, + "frozen task output write, ignored", bb); + while (!APR_BRIGADE_EMPTY(bb)) { + b = APR_BRIGADE_FIRST(bb); + if (AP_BUCKET_IS_EOR(b)) { + APR_BUCKET_REMOVE(b); + task->eor = b; + } + else { + apr_bucket_delete(b); + } + } + return APR_SUCCESS; + } + +send: + /* we send block once we opened the output, so someone is there + * reading it *and* the task is not assigned to a h2_req_engine */ + blocking = (!task->assigned && task->output.opened); + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) { + if (APR_BUCKET_IS_FLUSH(b) || APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) { + flush = 1; + break; + } + } + + if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) { + /* still have data buffered from previous attempt. + * setaside and append new data and try to pass the complete data */ + if (!APR_BRIGADE_EMPTY(bb)) { + if (APR_SUCCESS != (rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool))) { + goto out; + } + } + rv = send_out(task, task->output.bb, blocking); + } + else { + /* no data buffered previously, pass brigade directly */ + rv = send_out(task, bb, blocking); + + if (APR_SUCCESS == rv && !APR_BRIGADE_EMPTY(bb)) { + /* output refused to buffer it all, time to open? */ + if (!task->output.opened && APR_SUCCESS == (rv = open_output(task))) { + /* Make another attempt to send the data. With the output open, + * the call might be blocking and send all data, so we do not need + * to save the brigade */ + goto send; + } + else if (blocking && flush) { + /* Need to keep on doing this. */ + goto send; + } + + if (APR_SUCCESS == rv) { + /* could not write all, buffer the rest */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405) + "h2_slave_out(%s): saving brigade", task->id); + ap_assert(NULL); + rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool); + flush = 1; + } + } + } + + if (APR_SUCCESS == rv && !task->output.opened && flush) { + /* got a flush or could not write all, time to tell someone to read */ + rv = open_output(task); + } +out: + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c, + "h2_slave_out(%s): slave_out leave", task->id); + return rv; +} + +static apr_status_t output_finish(h2_task *task) +{ + if (!task->output.opened) { + return open_output(task); + } + return APR_SUCCESS; +} + +/******************************************************************************* + * task slave connection filters + ******************************************************************************/ + +static apr_status_t h2_filter_slave_in(ap_filter_t* f, + apr_bucket_brigade* bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + h2_task *task; + apr_status_t status = APR_SUCCESS; + apr_bucket *b, *next; + apr_off_t bblen; + const int trace1 = APLOGctrace1(f->c); + apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)? + (apr_size_t)readbytes : APR_SIZE_MAX); + + task = h2_ctx_cget_task(f->c); + ap_assert(task); + + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld", + task->id, mode, block, (long)readbytes); + } + + if (mode == AP_MODE_INIT) { + return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes); + } + + if (f->c->aborted) { + return APR_ECONNABORTED; + } + + if (!task->input.bb) { + return APR_EOF; + } + + /* Cleanup brigades from those nasty 0 length non-meta buckets + * that apr_brigade_split_line() sometimes produces. */ + for (b = APR_BRIGADE_FIRST(task->input.bb); + b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) { + next = APR_BUCKET_NEXT(b); + if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) { + apr_bucket_delete(b); + } + } + + while (APR_BRIGADE_EMPTY(task->input.bb)) { + /* Get more input data for our request. */ + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "h2_slave_in(%s): get more data from mplx, block=%d, " + "readbytes=%ld", task->id, block, (long)readbytes); + } + if (task->input.beam) { + status = h2_beam_receive(task->input.beam, task->input.bb, block, + 128*1024); + } + else { + status = APR_EOF; + } + + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, + "h2_slave_in(%s): read returned", task->id); + } + if (APR_STATUS_IS_EAGAIN(status) + && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) { + /* chunked input handling does not seem to like it if we + * return with APR_EAGAIN from a GETLINE read... + * upload 100k test on test-ser.example.org hangs */ + status = APR_SUCCESS; + } + else if (APR_STATUS_IS_EOF(status)) { + break; + } + else if (status != APR_SUCCESS) { + return status; + } + + if (trace1) { + h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, + "input.beam recv raw", task->input.bb); + } + if (h2_task_logio_add_bytes_in) { + apr_brigade_length(bb, 0, &bblen); + h2_task_logio_add_bytes_in(f->c, bblen); + } + } + + /* Nothing there, no more data to get. Return APR_EAGAIN on + * speculative reads, this is ap_check_pipeline()'s trick to + * see if the connection needs closing. */ + if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) { + return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF; + } + + if (trace1) { + h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, + "task_input.bb", task->input.bb); + } + + if (APR_BRIGADE_EMPTY(task->input.bb)) { + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_slave_in(%s): no data", task->id); + } + return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF; + } + + if (mode == AP_MODE_EXHAUSTIVE) { + /* return all we have */ + APR_BRIGADE_CONCAT(bb, task->input.bb); + } + else if (mode == AP_MODE_READBYTES) { + status = h2_brigade_concat_length(bb, task->input.bb, rmax); + } + else if (mode == AP_MODE_SPECULATIVE) { + status = h2_brigade_copy_length(bb, task->input.bb, rmax); + } + else if (mode == AP_MODE_GETLINE) { + /* we are reading a single LF line, e.g. the HTTP headers. + * this has the nasty side effect to split the bucket, even + * though it ends with CRLF and creates a 0 length bucket */ + status = apr_brigade_split_line(bb, task->input.bb, block, + HUGE_STRING_LEN); + if (APLOGctrace1(f->c)) { + char buffer[1024]; + apr_size_t len = sizeof(buffer)-1; + apr_brigade_flatten(bb, buffer, &len); + buffer[len] = 0; + if (trace1) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "h2_slave_in(%s): getline: %s", + task->id, buffer); + } + } + } + else { + /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not + * to support it. Seems to work. */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, + APLOGNO(03472) + "h2_slave_in(%s), unsupported READ mode %d", + task->id, mode); + status = APR_ENOTIMPL; + } + + if (trace1) { + apr_brigade_length(bb, 0, &bblen); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen); + } + return status; +} + +static apr_status_t h2_filter_slave_output(ap_filter_t* filter, + apr_bucket_brigade* brigade) +{ + h2_task *task = h2_ctx_cget_task(filter->c); + apr_status_t status; + + ap_assert(task); + status = slave_out(task, filter, brigade); + if (status != APR_SUCCESS) { + h2_task_rst(task, H2_ERR_INTERNAL_ERROR); + } + return status; +} + +static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb) +{ + h2_task *task = h2_ctx_cget_task(f->c); + apr_status_t status; + + ap_assert(task); + /* There are cases where we need to parse a serialized http/1.1 + * response. One example is a 100-continue answer in serialized mode + * or via a mod_proxy setup */ + while (bb && !task->output.sent_response) { + status = h2_from_h1_parse_response(task, f, bb); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, + "h2_task(%s): parsed response", task->id); + if (APR_BRIGADE_EMPTY(bb) || status != APR_SUCCESS) { + return status; + } + } + + return ap_pass_brigade(f->next, bb); +} + +/******************************************************************************* + * task things + ******************************************************************************/ + +int h2_task_can_redo(h2_task *task) { + if (task->input.beam && h2_beam_was_received(task->input.beam)) { + /* cannot repeat that. */ + return 0; + } + return (!strcmp("GET", task->request->method) + || !strcmp("HEAD", task->request->method) + || !strcmp("OPTIONS", task->request->method)); +} + +void h2_task_redo(h2_task *task) +{ + task->rst_error = 0; +} + +void h2_task_rst(h2_task *task, int error) +{ + task->rst_error = error; + if (task->input.beam) { + h2_beam_leave(task->input.beam); + } + if (!task->worker_done) { + h2_beam_abort(task->output.beam); + } + if (task->c) { + task->c->aborted = 1; + } +} + +/******************************************************************************* + * Register various hooks + */ +static const char *const mod_ssl[] = { "mod_ssl.c", NULL}; +static int h2_task_pre_conn(conn_rec* c, void *arg); +static int h2_task_process_conn(conn_rec* c); + +APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in; +APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out; + +void h2_task_register_hooks(void) +{ + /* This hook runs on new connections before mod_ssl has a say. + * Its purpose is to prevent mod_ssl from touching our pseudo-connections + * for streams. + */ + ap_hook_pre_connection(h2_task_pre_conn, + NULL, mod_ssl, APR_HOOK_FIRST); + /* When the connection processing actually starts, we might + * take over, if the connection is for a task. + */ + ap_hook_process_connection(h2_task_process_conn, + NULL, NULL, APR_HOOK_FIRST); + + ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in, + NULL, AP_FTYPE_NETWORK); + ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output, + NULL, AP_FTYPE_NETWORK); + ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1, + NULL, AP_FTYPE_NETWORK); + + ap_register_input_filter("H2_REQUEST", h2_filter_request_in, + NULL, AP_FTYPE_PROTOCOL); + ap_register_output_filter("H2_RESPONSE", h2_filter_headers_out, + NULL, AP_FTYPE_PROTOCOL); + ap_register_output_filter("H2_TRAILERS_OUT", h2_filter_trailers_out, + NULL, AP_FTYPE_PROTOCOL); +} + +/* post config init */ +apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s) +{ + h2_task_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in); + h2_task_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out); + + return APR_SUCCESS; +} + +static int h2_task_pre_conn(conn_rec* c, void *arg) +{ + h2_ctx *ctx; + + if (!c->master) { + return OK; + } + + ctx = h2_ctx_get(c, 0); + (void)arg; + if (h2_ctx_is_task(ctx)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_h2, pre_connection, found stream task"); + ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c); + ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c); + ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c); + } + return OK; +} + +h2_task *h2_task_create(conn_rec *slave, int stream_id, + const h2_request *req, h2_mplx *m, + h2_bucket_beam *input, + apr_interval_time_t timeout, + apr_size_t output_max_mem) +{ + apr_pool_t *pool; + h2_task *task; + + ap_assert(slave); + ap_assert(req); + + apr_pool_create(&pool, slave->pool); + task = apr_pcalloc(pool, sizeof(h2_task)); + if (task == NULL) { + return NULL; + } + task->id = "000"; + task->stream_id = stream_id; + task->c = slave; + task->mplx = m; + task->pool = pool; + task->request = req; + task->timeout = timeout; + task->input.beam = input; + task->output.max_buffer = output_max_mem; + + return task; +} + +void h2_task_destroy(h2_task *task) +{ + if (task->output.beam) { + h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy"); + h2_beam_destroy(task->output.beam); + task->output.beam = NULL; + } + + if (task->eor) { + apr_bucket_destroy(task->eor); + } + if (task->pool) { + apr_pool_destroy(task->pool); + } +} + +apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) +{ + conn_rec *c; + + ap_assert(task); + c = task->c; + task->worker_started = 1; + task->started_at = apr_time_now(); + + if (c->master) { + /* Each conn_rec->id is supposed to be unique at a point in time. Since + * some modules (and maybe external code) uses this id as an identifier + * for the request_rec they handle, it needs to be unique for slave + * connections also. + * The connection id is generated by the MPM and most MPMs use the formula + * id := (child_num * max_threads) + thread_num + * which means that there is a maximum id of about + * idmax := max_child_count * max_threads + * If we assume 2024 child processes with 2048 threads max, we get + * idmax ~= 2024 * 2048 = 2 ** 22 + * On 32 bit systems, we have not much space left, but on 64 bit systems + * (and higher?) we can use the upper 32 bits without fear of collision. + * 32 bits is just what we need, since a connection can only handle so + * many streams. + */ + int slave_id, free_bits; + + task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id, + task->stream_id); + if (sizeof(unsigned long) >= 8) { + free_bits = 32; + slave_id = task->stream_id; + } + else { + /* Assume we have a more limited number of threads/processes + * and h2 workers on a 32-bit system. Use the worker instead + * of the stream id. */ + free_bits = 8; + slave_id = worker_id; + } + task->c->id = (c->master->id << free_bits)^slave_id; + c->keepalive = AP_CONN_KEEPALIVE; + } + + h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output", + H2_BEAM_OWNER_SEND, 0, task->timeout); + if (!task->output.beam) { + return APR_ENOMEM; + } + + h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer); + h2_beam_send_from(task->output.beam, task->pool); + + h2_ctx_create_for(c, task); + apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id); + + h2_slave_run_pre_connection(c, ap_get_conn_socket(c)); + + task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc); + if (task->request->serialize) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): serialize request %s %s", + task->id, task->request->method, task->request->path); + apr_brigade_printf(task->input.bb, NULL, + NULL, "%s %s HTTP/1.1\r\n", + task->request->method, task->request->path); + apr_table_do(input_ser_header, task, task->request->headers, NULL); + apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n"); + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): process connection", task->id); + + task->c->current_thread = thread; + ap_run_process_connection(c); + + if (task->frozen) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): process_conn returned frozen task", + task->id); + /* cleanup delayed */ + return APR_EAGAIN; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): processing done", task->id); + return output_finish(task); + } +} + +static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c) +{ + const h2_request *req = task->request; + conn_state_t *cs = c->cs; + request_rec *r; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): create request_rec", task->id); + r = h2_request_create_rec(req, c); + if (r && (r->status == HTTP_OK)) { + /* set timeouts for virtual host of request */ + if (task->timeout != r->server->timeout) { + task->timeout = r->server->timeout; + h2_beam_timeout_set(task->output.beam, task->timeout); + if (task->input.beam) { + h2_beam_timeout_set(task->input.beam, task->timeout); + } + } + + ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r); + + if (cs) { + cs->state = CONN_STATE_HANDLER; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): start process_request", task->id); + + /* Add the raw bytes of the request (e.g. header frame lengths to + * the logio for this request. */ + if (req->raw_bytes && h2_task_logio_add_bytes_in) { + h2_task_logio_add_bytes_in(c, req->raw_bytes); + } + + ap_process_request(r); + + if (task->frozen) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): process_request frozen", task->id); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): process_request done", task->id); + } + + /* After the call to ap_process_request, the + * request pool may have been deleted. We set + * r=NULL here to ensure that any dereference + * of r that might be added later in this function + * will result in a segfault immediately instead + * of nondeterministic failures later. + */ + if (cs) + cs->state = CONN_STATE_WRITE_COMPLETION; + r = NULL; + } + else if (!r) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): create request_rec failed, r=NULL", task->id); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): create request_rec failed, r->status=%d", + task->id, r->status); + } + + return APR_SUCCESS; +} + +static int h2_task_process_conn(conn_rec* c) +{ + h2_ctx *ctx; + + if (!c->master) { + return DECLINED; + } + + ctx = h2_ctx_get(c, 0); + if (h2_ctx_is_task(ctx)) { + if (!ctx->task->request->serialize) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_h2, processing request directly"); + h2_task_process_request(ctx->task, c); + return DONE; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s), serialized handling", ctx->task->id); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "slave_conn(%ld): has no task", c->id); + } + return DECLINED; +} + +apr_status_t h2_task_freeze(h2_task *task) +{ + if (!task->frozen) { + task->frozen = 1; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406) + "h2_task(%s), frozen", task->id); + } + return APR_SUCCESS; +} + +apr_status_t h2_task_thaw(h2_task *task) +{ + if (task->frozen) { + task->frozen = 0; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407) + "h2_task(%s), thawed", task->id); + } + task->thawed = 1; + return APR_SUCCESS; +} + +int h2_task_has_thawed(h2_task *task) +{ + return task->thawed; +} diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h new file mode 100644 index 0000000..ab6a746 --- /dev/null +++ b/modules/http2/h2_task.h @@ -0,0 +1,127 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_task__ +#define __mod_h2__h2_task__ + +#include <http_core.h> + +/** + * A h2_task fakes a HTTP/1.1 request from the data in a HTTP/2 stream + * (HEADER+CONT.+DATA) the module recieves. + * + * In order to answer a HTTP/2 stream, we want all Apache httpd infrastructure + * to be involved as usual, as if this stream can as a separate HTTP/1.1 + * request. The basic trickery to do so was derived from google's mod_spdy + * source. Basically, we fake a new conn_rec object, even with its own + * socket and give it to ap_process_connection(). + * + * Since h2_task instances are executed in separate threads, we may have + * different lifetimes than our h2_stream or h2_session instances. Basically, + * we would like to be as standalone as possible. + * + * Finally, to keep certain connection level filters, such as ourselves and + * especially mod_ssl ones, from messing with our data, we need a filter + * of our own to disble those. + */ + +struct h2_bucket_beam; +struct h2_conn; +struct h2_mplx; +struct h2_task; +struct h2_req_engine; +struct h2_request; +struct h2_response_parser; +struct h2_stream; +struct h2_worker; + +typedef struct h2_task h2_task; + +struct h2_task { + const char *id; + int stream_id; + conn_rec *c; + apr_pool_t *pool; + + const struct h2_request *request; + apr_interval_time_t timeout; + int rst_error; /* h2 related stream abort error */ + + struct { + struct h2_bucket_beam *beam; + unsigned int eos : 1; + apr_bucket_brigade *bb; + apr_bucket_brigade *bbchunk; + apr_off_t chunked_total; + } input; + struct { + struct h2_bucket_beam *beam; + unsigned int opened : 1; + unsigned int sent_response : 1; + unsigned int copy_files : 1; + struct h2_response_parser *rparser; + apr_bucket_brigade *bb; + apr_size_t max_buffer; + } output; + + struct h2_mplx *mplx; + + unsigned int filters_set : 1; + unsigned int frozen : 1; + unsigned int thawed : 1; + unsigned int worker_started : 1; /* h2_worker started processing */ + unsigned int worker_done : 1; /* h2_worker finished */ + + apr_time_t started_at; /* when processing started */ + apr_time_t done_at; /* when processing was done */ + apr_bucket *eor; + + struct h2_req_engine *engine; /* engine hosted by this task */ + struct h2_req_engine *assigned; /* engine that task has been assigned to */ +}; + +h2_task *h2_task_create(conn_rec *slave, int stream_id, + const h2_request *req, struct h2_mplx *m, + struct h2_bucket_beam *input, + apr_interval_time_t timeout, + apr_size_t output_max_mem); + +void h2_task_destroy(h2_task *task); + +apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id); + +void h2_task_redo(h2_task *task); +int h2_task_can_redo(h2_task *task); + +/** + * Reset the task with the given error code, resets all input/output. + */ +void h2_task_rst(h2_task *task, int error); + +void h2_task_register_hooks(void); +/* + * One time, post config intialization. + */ +apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s); + +extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in; +extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out; + +apr_status_t h2_task_freeze(h2_task *task); +apr_status_t h2_task_thaw(h2_task *task); +int h2_task_has_thawed(h2_task *task); + +#endif /* defined(__mod_h2__h2_task__) */ diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c new file mode 100644 index 0000000..9dacd8b --- /dev/null +++ b/modules/http2/h2_util.c @@ -0,0 +1,2015 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <apr_strings.h> +#include <apr_thread_mutex.h> +#include <apr_thread_cond.h> + +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> +#include <http_request.h> + +#include <nghttp2/nghttp2.h> + +#include "h2.h" +#include "h2_util.h" + +/* h2_log2(n) iff n is a power of 2 */ +unsigned char h2_log2(int n) +{ + int lz = 0; + if (!n) { + return 0; + } + if (!(n & 0xffff0000u)) { + lz += 16; + n = (n << 16); + } + if (!(n & 0xff000000u)) { + lz += 8; + n = (n << 8); + } + if (!(n & 0xf0000000u)) { + lz += 4; + n = (n << 4); + } + if (!(n & 0xc0000000u)) { + lz += 2; + n = (n << 2); + } + if (!(n & 0x80000000u)) { + lz += 1; + } + + return 31 - lz; +} + +size_t h2_util_hex_dump(char *buffer, size_t maxlen, + const char *data, size_t datalen) +{ + size_t offset = 0; + size_t maxoffset = (maxlen-4); + size_t i; + for (i = 0; i < datalen && offset < maxoffset; ++i) { + const char *sep = (i && i % 16 == 0)? "\n" : " "; + int n = apr_snprintf(buffer+offset, maxoffset-offset, + "%2x%s", ((unsigned int)data[i]&0xff), sep); + offset += n; + } + strcpy(buffer+offset, (i<datalen)? "..." : ""); + return strlen(buffer); +} + +size_t h2_util_header_print(char *buffer, size_t maxlen, + const char *name, size_t namelen, + const char *value, size_t valuelen) +{ + size_t offset = 0; + size_t i; + for (i = 0; i < namelen && offset < maxlen; ++i, ++offset) { + buffer[offset] = name[i]; + } + for (i = 0; i < 2 && offset < maxlen; ++i, ++offset) { + buffer[offset] = ": "[i]; + } + for (i = 0; i < valuelen && offset < maxlen; ++i, ++offset) { + buffer[offset] = value[i]; + } + buffer[offset] = '\0'; + return offset; +} + + +void h2_util_camel_case_header(char *s, size_t len) +{ + size_t start = 1; + size_t i; + for (i = 0; i < len; ++i) { + if (start) { + if (s[i] >= 'a' && s[i] <= 'z') { + s[i] -= 'a' - 'A'; + } + + start = 0; + } + else if (s[i] == '-') { + start = 1; + } + } +} + +/* base64 url encoding ****************************************************************************/ + +#define N6 (unsigned int)-1 + +static const unsigned int BASE64URL_UINT6[] = { +/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 0 */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, 62, N6, N6, /* 2 */ + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */ + N6, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */ + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, N6, N6, N6, N6, 63, /* 5 */ + N6, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */ + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, N6, N6, N6, N6, N6, /* 7 */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 8 */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 9 */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* a */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* b */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* c */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* d */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* e */ + N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6 /* f */ +}; +static const unsigned char BASE64URL_CHARS[] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */ + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */ + 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */ + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', /* 30 - 39 */ + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', /* 40 - 49 */ + 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', /* 50 - 59 */ + '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */ +}; + +#define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ] + +apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, + apr_pool_t *pool) +{ + const unsigned char *e = (const unsigned char *)encoded; + const unsigned char *p = e; + unsigned char *d; + unsigned int n; + long len, mlen, remain, i; + + while (*p && BASE64URL_UINT6[ *p ] != N6) { + ++p; + } + len = (int)(p - e); + mlen = (len/4)*4; + *decoded = apr_pcalloc(pool, (apr_size_t)len + 1); + + i = 0; + d = (unsigned char*)*decoded; + for (; i < mlen; i += 4) { + n = ((BASE64URL_UINT6[ e[i+0] ] << 18) + + (BASE64URL_UINT6[ e[i+1] ] << 12) + + (BASE64URL_UINT6[ e[i+2] ] << 6) + + (BASE64URL_UINT6[ e[i+3] ])); + *d++ = (unsigned char)(n >> 16); + *d++ = (unsigned char)(n >> 8 & 0xffu); + *d++ = (unsigned char)(n & 0xffu); + } + remain = len - mlen; + switch (remain) { + case 2: + n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) + + (BASE64URL_UINT6[ e[mlen+1] ] << 12)); + *d++ = (unsigned char)(n >> 16); + remain = 1; + break; + case 3: + n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) + + (BASE64URL_UINT6[ e[mlen+1] ] << 12) + + (BASE64URL_UINT6[ e[mlen+2] ] << 6)); + *d++ = (unsigned char)(n >> 16); + *d++ = (unsigned char)(n >> 8 & 0xffu); + remain = 2; + break; + default: /* do nothing */ + break; + } + return (apr_size_t)(mlen/4*3 + remain); +} + +const char *h2_util_base64url_encode(const char *data, + apr_size_t dlen, apr_pool_t *pool) +{ + int i, len = (int)dlen; + apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */ + const unsigned char *udata = (const unsigned char*)data; + unsigned char *enc, *p = apr_pcalloc(pool, slen); + + enc = p; + for (i = 0; i < len-2; i+= 3) { + *p++ = BASE64URL_CHAR( (udata[i] >> 2) ); + *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) ); + *p++ = BASE64URL_CHAR( (udata[i+1] << 2) + (udata[i+2] >> 6) ); + *p++ = BASE64URL_CHAR( (udata[i+2]) ); + } + + if (i < len) { + *p++ = BASE64URL_CHAR( (udata[i] >> 2) ); + if (i == (len - 1)) { + *p++ = BASE64URL_CHARS[ ((unsigned int)udata[i] << 4) & 0x3fu ]; + } + else { + *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) ); + *p++ = BASE64URL_CHAR( (udata[i+1] << 2) ); + } + } + *p++ = '\0'; + return (char *)enc; +} + +/******************************************************************************* + * ihash - hash for structs with int identifier + ******************************************************************************/ +struct h2_ihash_t { + apr_hash_t *hash; + size_t ioff; +}; + +static unsigned int ihash(const char *key, apr_ssize_t *klen) +{ + return (unsigned int)(*((int*)key)); +} + +h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int) +{ + h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t)); + ih->hash = apr_hash_make_custom(pool, ihash); + ih->ioff = offset_of_int; + return ih; +} + +size_t h2_ihash_count(h2_ihash_t *ih) +{ + return apr_hash_count(ih->hash); +} + +int h2_ihash_empty(h2_ihash_t *ih) +{ + return apr_hash_count(ih->hash) == 0; +} + +void *h2_ihash_get(h2_ihash_t *ih, int id) +{ + return apr_hash_get(ih->hash, &id, sizeof(id)); +} + +typedef struct { + h2_ihash_iter_t *iter; + void *ctx; +} iter_ctx; + +static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen, + const void *val) +{ + iter_ctx *ictx = ctx; + return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/ +} + +int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx) +{ + iter_ctx ictx; + ictx.iter = fn; + ictx.ctx = ctx; + return apr_hash_do(ihash_iter, &ictx, ih->hash); +} + +void h2_ihash_add(h2_ihash_t *ih, void *val) +{ + apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val); +} + +void h2_ihash_remove(h2_ihash_t *ih, int id) +{ + apr_hash_set(ih->hash, &id, sizeof(id), NULL); +} + +void h2_ihash_remove_val(h2_ihash_t *ih, void *val) +{ + int id = *((int*)((char *)val + ih->ioff)); + apr_hash_set(ih->hash, &id, sizeof(id), NULL); +} + + +void h2_ihash_clear(h2_ihash_t *ih) +{ + apr_hash_clear(ih->hash); +} + +typedef struct { + h2_ihash_t *ih; + void **buffer; + size_t max; + size_t len; +} collect_ctx; + +static int collect_iter(void *x, void *val) +{ + collect_ctx *ctx = x; + if (ctx->len < ctx->max) { + ctx->buffer[ctx->len++] = val; + return 1; + } + return 0; +} + +size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max) +{ + collect_ctx ctx; + size_t i; + + ctx.ih = ih; + ctx.buffer = buffer; + ctx.max = max; + ctx.len = 0; + h2_ihash_iter(ih, collect_iter, &ctx); + for (i = 0; i < ctx.len; ++i) { + h2_ihash_remove_val(ih, buffer[i]); + } + return ctx.len; +} + +/******************************************************************************* + * iqueue - sorted list of int + ******************************************************************************/ + +static void iq_grow(h2_iqueue *q, int nlen); +static void iq_swap(h2_iqueue *q, int i, int j); +static int iq_bubble_up(h2_iqueue *q, int i, int top, + h2_iq_cmp *cmp, void *ctx); +static int iq_bubble_down(h2_iqueue *q, int i, int bottom, + h2_iq_cmp *cmp, void *ctx); + +h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity) +{ + h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue)); + if (q) { + q->pool = pool; + iq_grow(q, capacity); + q->nelts = 0; + } + return q; +} + +int h2_iq_empty(h2_iqueue *q) +{ + return q->nelts == 0; +} + +int h2_iq_count(h2_iqueue *q) +{ + return q->nelts; +} + + +int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx) +{ + int i; + + if (h2_iq_contains(q, sid)) { + return 0; + } + if (q->nelts >= q->nalloc) { + iq_grow(q, q->nalloc * 2); + } + i = (q->head + q->nelts) % q->nalloc; + q->elts[i] = sid; + ++q->nelts; + + if (cmp) { + /* bubble it to the front of the queue */ + iq_bubble_up(q, i, q->head, cmp, ctx); + } + return 1; +} + +int h2_iq_append(h2_iqueue *q, int sid) +{ + return h2_iq_add(q, sid, NULL, NULL); +} + +int h2_iq_remove(h2_iqueue *q, int sid) +{ + int i; + for (i = 0; i < q->nelts; ++i) { + if (sid == q->elts[(q->head + i) % q->nalloc]) { + break; + } + } + + if (i < q->nelts) { + ++i; + for (; i < q->nelts; ++i) { + q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc]; + } + --q->nelts; + return 1; + } + return 0; +} + +void h2_iq_clear(h2_iqueue *q) +{ + q->nelts = 0; +} + +void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx) +{ + /* Assume that changes in ordering are minimal. This needs, + * best case, q->nelts - 1 comparisions to check that nothing + * changed. + */ + if (q->nelts > 0) { + int i, ni, prev, last; + + /* Start at the end of the queue and create a tail of sorted + * entries. Make that tail one element longer in each iteration. + */ + last = i = (q->head + q->nelts - 1) % q->nalloc; + while (i != q->head) { + prev = (q->nalloc + i - 1) % q->nalloc; + + ni = iq_bubble_up(q, i, prev, cmp, ctx); + if (ni == prev) { + /* i bubbled one up, bubble the new i down, which + * keeps all tasks below i sorted. */ + iq_bubble_down(q, i, last, cmp, ctx); + } + i = prev; + }; + } +} + + +int h2_iq_shift(h2_iqueue *q) +{ + int sid; + + if (q->nelts <= 0) { + return 0; + } + + sid = q->elts[q->head]; + q->head = (q->head + 1) % q->nalloc; + q->nelts--; + + return sid; +} + +size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max) +{ + int i; + for (i = 0; i < max; ++i) { + pint[i] = h2_iq_shift(q); + if (pint[i] == 0) { + break; + } + } + return i; +} + +static void iq_grow(h2_iqueue *q, int nlen) +{ + if (nlen > q->nalloc) { + int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen); + if (q->nelts > 0) { + int l = ((q->head + q->nelts) % q->nalloc) - q->head; + + memmove(nq, q->elts + q->head, sizeof(int) * l); + if (l < q->nelts) { + /* elts wrapped, append elts in [0, remain] to nq */ + int remain = q->nelts - l; + memmove(nq + l, q->elts, sizeof(int) * remain); + } + } + q->elts = nq; + q->nalloc = nlen; + q->head = 0; + } +} + +static void iq_swap(h2_iqueue *q, int i, int j) +{ + int x = q->elts[i]; + q->elts[i] = q->elts[j]; + q->elts[j] = x; +} + +static int iq_bubble_up(h2_iqueue *q, int i, int top, + h2_iq_cmp *cmp, void *ctx) +{ + int prev; + while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top) + && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) { + iq_swap(q, prev, i); + i = prev; + } + return i; +} + +static int iq_bubble_down(h2_iqueue *q, int i, int bottom, + h2_iq_cmp *cmp, void *ctx) +{ + int next; + while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom) + && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) { + iq_swap(q, next, i); + i = next; + } + return i; +} + +int h2_iq_contains(h2_iqueue *q, int sid) +{ + int i; + for (i = 0; i < q->nelts; ++i) { + if (sid == q->elts[(q->head + i) % q->nalloc]) { + return 1; + } + } + return 0; +} + +/******************************************************************************* + * FIFO queue + ******************************************************************************/ + +struct h2_fifo { + void **elems; + int nelems; + int set; + int head; + int count; + int aborted; + apr_thread_mutex_t *lock; + apr_thread_cond_t *not_empty; + apr_thread_cond_t *not_full; +}; + +static int nth_index(h2_fifo *fifo, int n) +{ + return (fifo->head + n) % fifo->nelems; +} + +static apr_status_t fifo_destroy(void *data) +{ + h2_fifo *fifo = data; + + apr_thread_cond_destroy(fifo->not_empty); + apr_thread_cond_destroy(fifo->not_full); + apr_thread_mutex_destroy(fifo->lock); + + return APR_SUCCESS; +} + +static int index_of(h2_fifo *fifo, void *elem) +{ + int i; + + for (i = 0; i < fifo->count; ++i) { + if (elem == fifo->elems[nth_index(fifo, i)]) { + return i; + } + } + return -1; +} + +static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool, + int capacity, int as_set) +{ + apr_status_t rv; + h2_fifo *fifo; + + fifo = apr_pcalloc(pool, sizeof(*fifo)); + if (fifo == NULL) { + return APR_ENOMEM; + } + + rv = apr_thread_mutex_create(&fifo->lock, + APR_THREAD_MUTEX_UNNESTED, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + rv = apr_thread_cond_create(&fifo->not_empty, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + rv = apr_thread_cond_create(&fifo->not_full, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*)); + if (fifo->elems == NULL) { + return APR_ENOMEM; + } + fifo->nelems = capacity; + fifo->set = as_set; + + *pfifo = fifo; + apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null); + + return APR_SUCCESS; +} + +apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity) +{ + return create_int(pfifo, pool, capacity, 0); +} + +apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity) +{ + return create_int(pfifo, pool, capacity, 1); +} + +apr_status_t h2_fifo_term(h2_fifo *fifo) +{ + apr_status_t rv; + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + fifo->aborted = 1; + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_fifo_interrupt(h2_fifo *fifo) +{ + apr_status_t rv; + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + apr_thread_cond_broadcast(fifo->not_empty); + apr_thread_cond_broadcast(fifo->not_full); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +int h2_fifo_count(h2_fifo *fifo) +{ + return fifo->count; +} + +static apr_status_t check_not_empty(h2_fifo *fifo, int block) +{ + while (fifo->count == 0) { + if (!block) { + return APR_EAGAIN; + } + if (fifo->aborted) { + return APR_EOF; + } + apr_thread_cond_wait(fifo->not_empty, fifo->lock); + } + return APR_SUCCESS; +} + +static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block) +{ + if (fifo->aborted) { + return APR_EOF; + } + + if (fifo->set && index_of(fifo, elem) >= 0) { + /* set mode, elem already member */ + return APR_EEXIST; + } + else if (fifo->count == fifo->nelems) { + if (block) { + while (fifo->count == fifo->nelems) { + if (fifo->aborted) { + return APR_EOF; + } + apr_thread_cond_wait(fifo->not_full, fifo->lock); + } + } + else { + return APR_EAGAIN; + } + } + + ap_assert(fifo->count < fifo->nelems); + fifo->elems[nth_index(fifo, fifo->count)] = elem; + ++fifo->count; + if (fifo->count == 1) { + apr_thread_cond_broadcast(fifo->not_empty); + } + return APR_SUCCESS; +} + +static apr_status_t fifo_push(h2_fifo *fifo, void *elem, int block) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + rv = fifo_push_int(fifo, elem, block); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem) +{ + return fifo_push(fifo, elem, 1); +} + +apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem) +{ + return fifo_push(fifo, elem, 0); +} + +static apr_status_t pull_head(h2_fifo *fifo, void **pelem, int block) +{ + apr_status_t rv; + + if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) { + *pelem = NULL; + return rv; + } + *pelem = fifo->elems[fifo->head]; + --fifo->count; + if (fifo->count > 0) { + fifo->head = nth_index(fifo, 1); + if (fifo->count+1 == fifo->nelems) { + apr_thread_cond_broadcast(fifo->not_full); + } + } + return APR_SUCCESS; +} + +static apr_status_t fifo_pull(h2_fifo *fifo, void **pelem, int block) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + rv = pull_head(fifo, pelem, block); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem) +{ + return fifo_pull(fifo, pelem, 1); +} + +apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem) +{ + return fifo_pull(fifo, pelem, 0); +} + +static apr_status_t fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx, int block) +{ + apr_status_t rv; + void *elem; + + if (fifo->aborted) { + return APR_EOF; + } + + if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) { + if (APR_SUCCESS == (rv = pull_head(fifo, &elem, block))) { + switch (fn(elem, ctx)) { + case H2_FIFO_OP_PULL: + break; + case H2_FIFO_OP_REPUSH: + rv = fifo_push_int(fifo, elem, block); + break; + } + } + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx) +{ + return fifo_peek(fifo, fn, ctx, 1); +} + +apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx) +{ + return fifo_peek(fifo, fn, ctx, 0); +} + +apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + int i, rc; + void *e; + + rc = 0; + for (i = 0; i < fifo->count; ++i) { + e = fifo->elems[nth_index(fifo, i)]; + if (e == elem) { + ++rc; + } + else if (rc) { + fifo->elems[nth_index(fifo, i-rc)] = e; + } + } + if (rc) { + fifo->count -= rc; + if (fifo->count + rc == fifo->nelems) { + apr_thread_cond_broadcast(fifo->not_full); + } + rv = APR_SUCCESS; + } + else { + rv = APR_EAGAIN; + } + + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +/******************************************************************************* + * FIFO int queue + ******************************************************************************/ + +struct h2_ififo { + int *elems; + int nelems; + int set; + int head; + int count; + int aborted; + apr_thread_mutex_t *lock; + apr_thread_cond_t *not_empty; + apr_thread_cond_t *not_full; +}; + +static int inth_index(h2_ififo *fifo, int n) +{ + return (fifo->head + n) % fifo->nelems; +} + +static apr_status_t ififo_destroy(void *data) +{ + h2_ififo *fifo = data; + + apr_thread_cond_destroy(fifo->not_empty); + apr_thread_cond_destroy(fifo->not_full); + apr_thread_mutex_destroy(fifo->lock); + + return APR_SUCCESS; +} + +static int iindex_of(h2_ififo *fifo, int id) +{ + int i; + + for (i = 0; i < fifo->count; ++i) { + if (id == fifo->elems[inth_index(fifo, i)]) { + return i; + } + } + return -1; +} + +static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool, + int capacity, int as_set) +{ + apr_status_t rv; + h2_ififo *fifo; + + fifo = apr_pcalloc(pool, sizeof(*fifo)); + if (fifo == NULL) { + return APR_ENOMEM; + } + + rv = apr_thread_mutex_create(&fifo->lock, + APR_THREAD_MUTEX_UNNESTED, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + rv = apr_thread_cond_create(&fifo->not_empty, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + rv = apr_thread_cond_create(&fifo->not_full, pool); + if (rv != APR_SUCCESS) { + return rv; + } + + fifo->elems = apr_pcalloc(pool, capacity * sizeof(int)); + if (fifo->elems == NULL) { + return APR_ENOMEM; + } + fifo->nelems = capacity; + fifo->set = as_set; + + *pfifo = fifo; + apr_pool_cleanup_register(pool, fifo, ififo_destroy, apr_pool_cleanup_null); + + return APR_SUCCESS; +} + +apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity) +{ + return icreate_int(pfifo, pool, capacity, 0); +} + +apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity) +{ + return icreate_int(pfifo, pool, capacity, 1); +} + +apr_status_t h2_ififo_term(h2_ififo *fifo) +{ + apr_status_t rv; + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + fifo->aborted = 1; + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_ififo_interrupt(h2_ififo *fifo) +{ + apr_status_t rv; + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + apr_thread_cond_broadcast(fifo->not_empty); + apr_thread_cond_broadcast(fifo->not_full); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +int h2_ififo_count(h2_ififo *fifo) +{ + return fifo->count; +} + +static apr_status_t icheck_not_empty(h2_ififo *fifo, int block) +{ + while (fifo->count == 0) { + if (!block) { + return APR_EAGAIN; + } + if (fifo->aborted) { + return APR_EOF; + } + apr_thread_cond_wait(fifo->not_empty, fifo->lock); + } + return APR_SUCCESS; +} + +static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block) +{ + if (fifo->aborted) { + return APR_EOF; + } + + if (fifo->set && iindex_of(fifo, id) >= 0) { + /* set mode, elem already member */ + return APR_EEXIST; + } + else if (fifo->count == fifo->nelems) { + if (block) { + while (fifo->count == fifo->nelems) { + if (fifo->aborted) { + return APR_EOF; + } + apr_thread_cond_wait(fifo->not_full, fifo->lock); + } + } + else { + return APR_EAGAIN; + } + } + + ap_assert(fifo->count < fifo->nelems); + fifo->elems[inth_index(fifo, fifo->count)] = id; + ++fifo->count; + if (fifo->count == 1) { + apr_thread_cond_broadcast(fifo->not_empty); + } + return APR_SUCCESS; +} + +static apr_status_t ififo_push(h2_ififo *fifo, int id, int block) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + rv = ififo_push_int(fifo, id, block); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_ififo_push(h2_ififo *fifo, int id) +{ + return ififo_push(fifo, id, 1); +} + +apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id) +{ + return ififo_push(fifo, id, 0); +} + +static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block) +{ + apr_status_t rv; + + if ((rv = icheck_not_empty(fifo, block)) != APR_SUCCESS) { + *pi = 0; + return rv; + } + *pi = fifo->elems[fifo->head]; + --fifo->count; + if (fifo->count > 0) { + fifo->head = inth_index(fifo, 1); + if (fifo->count+1 == fifo->nelems) { + apr_thread_cond_broadcast(fifo->not_full); + } + } + return APR_SUCCESS; +} + +static apr_status_t ififo_pull(h2_ififo *fifo, int *pi, int block) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + rv = ipull_head(fifo, pi, block); + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi) +{ + return ififo_pull(fifo, pi, 1); +} + +apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi) +{ + return ififo_pull(fifo, pi, 0); +} + +static apr_status_t ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx, int block) +{ + apr_status_t rv; + int id; + + if (fifo->aborted) { + return APR_EOF; + } + + if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) { + if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) { + switch (fn(id, ctx)) { + case H2_FIFO_OP_PULL: + break; + case H2_FIFO_OP_REPUSH: + rv = ififo_push_int(fifo, id, block); + break; + } + } + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx) +{ + return ififo_peek(fifo, fn, ctx, 1); +} + +apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx) +{ + return ififo_peek(fifo, fn, ctx, 0); +} + +apr_status_t h2_ififo_remove(h2_ififo *fifo, int id) +{ + apr_status_t rv; + + if (fifo->aborted) { + return APR_EOF; + } + + if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) { + int i, rc; + int e; + + rc = 0; + for (i = 0; i < fifo->count; ++i) { + e = fifo->elems[inth_index(fifo, i)]; + if (e == id) { + ++rc; + } + else if (rc) { + fifo->elems[inth_index(fifo, i-rc)] = e; + } + } + if (rc) { + fifo->count -= rc; + if (fifo->count + rc == fifo->nelems) { + apr_thread_cond_broadcast(fifo->not_full); + } + rv = APR_SUCCESS; + } + else { + rv = APR_EAGAIN; + } + + apr_thread_mutex_unlock(fifo->lock); + } + return rv; +} + +/******************************************************************************* + * h2_util for apt_table_t + ******************************************************************************/ + +typedef struct { + apr_size_t bytes; + apr_size_t pair_extra; +} table_bytes_ctx; + +static int count_bytes(void *x, const char *key, const char *value) +{ + table_bytes_ctx *ctx = x; + if (key) { + ctx->bytes += strlen(key); + } + if (value) { + ctx->bytes += strlen(value); + } + ctx->bytes += ctx->pair_extra; + return 1; +} + +apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra) +{ + table_bytes_ctx ctx; + + ctx.bytes = 0; + ctx.pair_extra = pair_extra; + apr_table_do(count_bytes, &ctx, t, NULL); + return ctx.bytes; +} + + +/******************************************************************************* + * h2_util for bucket brigades + ******************************************************************************/ + +static apr_status_t last_not_included(apr_bucket_brigade *bb, + apr_off_t maxlen, + int same_alloc, + apr_size_t *pfile_buckets_allowed, + apr_bucket **pend) +{ + apr_bucket *b; + apr_status_t status = APR_SUCCESS; + int files_allowed = pfile_buckets_allowed? (int)*pfile_buckets_allowed : 0; + + if (maxlen >= 0) { + /* Find the bucket, up to which we reach maxlen/mem bytes */ + for (b = APR_BRIGADE_FIRST(bb); + (b != APR_BRIGADE_SENTINEL(bb)); + b = APR_BUCKET_NEXT(b)) { + + if (APR_BUCKET_IS_METADATA(b)) { + /* included */ + } + else { + if (b->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (status != APR_SUCCESS) { + return status; + } + } + + if (maxlen == 0 && b->length > 0) { + *pend = b; + return status; + } + + if (same_alloc && APR_BUCKET_IS_FILE(b)) { + /* we like it move it, always */ + } + else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) { + /* this has no memory footprint really unless + * it is read, disregard it in length count, + * unless we do not move the file buckets */ + --files_allowed; + } + else if (maxlen < (apr_off_t)b->length) { + apr_bucket_split(b, (apr_size_t)maxlen); + maxlen = 0; + } + else { + maxlen -= b->length; + } + } + } + } + *pend = APR_BRIGADE_SENTINEL(bb); + return status; +} + +apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest, + apr_bucket_brigade *src, + apr_off_t length) +{ + apr_bucket *b; + apr_off_t remain = length; + apr_status_t status = APR_SUCCESS; + + while (!APR_BRIGADE_EMPTY(src)) { + b = APR_BRIGADE_FIRST(src); + + if (APR_BUCKET_IS_METADATA(b)) { + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(dest, b); + } + else { + if (remain == b->length) { + /* fall through */ + } + else if (remain <= 0) { + return status; + } + else { + if (b->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (status != APR_SUCCESS) { + return status; + } + } + + if (remain < b->length) { + apr_bucket_split(b, remain); + } + } + APR_BUCKET_REMOVE(b); + APR_BRIGADE_INSERT_TAIL(dest, b); + remain -= b->length; + } + } + return status; +} + +apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest, + apr_bucket_brigade *src, + apr_off_t length) +{ + apr_bucket *b, *next; + apr_off_t remain = length; + apr_status_t status = APR_SUCCESS; + + for (b = APR_BRIGADE_FIRST(src); + b != APR_BRIGADE_SENTINEL(src); + b = next) { + next = APR_BUCKET_NEXT(b); + + if (APR_BUCKET_IS_METADATA(b)) { + /* fall through */ + } + else { + if (remain == b->length) { + /* fall through */ + } + else if (remain <= 0) { + return status; + } + else { + if (b->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); + if (status != APR_SUCCESS) { + return status; + } + } + + if (remain < b->length) { + apr_bucket_split(b, remain); + } + } + } + status = apr_bucket_copy(b, &b); + if (status != APR_SUCCESS) { + return status; + } + APR_BRIGADE_INSERT_TAIL(dest, b); + remain -= b->length; + } + return status; +} + +int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len) +{ + apr_bucket *b, *end; + + apr_status_t status = last_not_included(bb, len, 0, 0, &end); + if (status != APR_SUCCESS) { + return status; + } + + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb) && b != end; + b = APR_BUCKET_NEXT(b)) + { + if (APR_BUCKET_IS_EOS(b)) { + return 1; + } + } + return 0; +} + +apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, + apr_off_t *plen, int *peos) +{ + apr_status_t status; + apr_off_t blen = 0; + + /* test read to determine available length */ + status = apr_brigade_length(bb, 1, &blen); + if (status != APR_SUCCESS) { + return status; + } + else if (blen == 0) { + /* brigade without data, does it have an EOS bucket somwhere? */ + *plen = 0; + *peos = h2_util_has_eos(bb, -1); + } + else { + /* data in the brigade, limit the length returned. Check for EOS + * bucket only if we indicate data. This is required since plen == 0 + * means "the whole brigade" for h2_util_hash_eos() + */ + if (blen < *plen || *plen < 0) { + *plen = blen; + } + *peos = h2_util_has_eos(bb, *plen); + } + return APR_SUCCESS; +} + +apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, + h2_util_pass_cb *cb, void *ctx, + apr_off_t *plen, int *peos) +{ + apr_status_t status = APR_SUCCESS; + int consume = (cb != NULL); + apr_off_t written = 0; + apr_off_t avail = *plen; + apr_bucket *next, *b; + + /* Pass data in our brigade through the callback until the length + * is satisfied or we encounter an EOS. + */ + *peos = 0; + for (b = APR_BRIGADE_FIRST(bb); + (status == APR_SUCCESS) && (b != APR_BRIGADE_SENTINEL(bb)); + b = next) { + + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + *peos = 1; + } + else { + /* ignore */ + } + } + else if (avail <= 0) { + break; + } + else { + const char *data = NULL; + apr_size_t data_len; + + if (b->length == ((apr_size_t)-1)) { + /* read to determine length */ + status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ); + } + else { + data_len = b->length; + } + + if (data_len > avail) { + apr_bucket_split(b, avail); + data_len = (apr_size_t)avail; + } + + if (consume) { + if (!data) { + status = apr_bucket_read(b, &data, &data_len, + APR_NONBLOCK_READ); + } + if (status == APR_SUCCESS) { + status = cb(ctx, data, data_len); + } + } + else { + data_len = b->length; + } + avail -= data_len; + written += data_len; + } + + next = APR_BUCKET_NEXT(b); + if (consume) { + apr_bucket_delete(b); + } + } + + *plen = written; + if (status == APR_SUCCESS && !*peos && !*plen) { + return APR_EAGAIN; + } + return status; +} + +apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax, + apr_bucket *b, const char *sep) +{ + apr_size_t off = 0; + if (sep && *sep) { + off += apr_snprintf(buffer+off, bmax-off, "%s", sep); + } + + if (bmax <= off) { + return off; + } + else if (APR_BUCKET_IS_METADATA(b)) { + off += apr_snprintf(buffer+off, bmax-off, "%s", b->type->name); + } + else if (bmax > off) { + off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]", + b->type->name, + (long)(b->length == ((apr_size_t)-1)? + -1 : b->length)); + } + return off; +} + +apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax, + const char *tag, const char *sep, + apr_bucket_brigade *bb) +{ + apr_size_t off = 0; + const char *sp = ""; + apr_bucket *b; + + if (bmax > 1) { + if (bb) { + memset(buffer, 0, bmax--); + off += apr_snprintf(buffer+off, bmax-off, "%s(", tag); + for (b = APR_BRIGADE_FIRST(bb); + (bmax > off) && (b != APR_BRIGADE_SENTINEL(bb)); + b = APR_BUCKET_NEXT(b)) { + + off += h2_util_bucket_print(buffer+off, bmax-off, b, sp); + sp = " "; + } + if (bmax > off) { + off += apr_snprintf(buffer+off, bmax-off, ")%s", sep); + } + } + else { + off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep); + } + } + return off; +} + +apr_status_t h2_append_brigade(apr_bucket_brigade *to, + apr_bucket_brigade *from, + apr_off_t *plen, + int *peos, + h2_bucket_gate *should_append) +{ + apr_bucket *e; + apr_off_t len = 0, remain = *plen; + apr_status_t rv; + + *peos = 0; + + while (!APR_BRIGADE_EMPTY(from)) { + e = APR_BRIGADE_FIRST(from); + + if (!should_append(e)) { + goto leave; + } + else if (APR_BUCKET_IS_METADATA(e)) { + if (APR_BUCKET_IS_EOS(e)) { + *peos = 1; + apr_bucket_delete(e); + continue; + } + } + else { + if (remain > 0 && e->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { + return rv; + } + } + + if (remain < e->length) { + if (remain <= 0) { + goto leave; + } + apr_bucket_split(e, (apr_size_t)remain); + } + } + + APR_BUCKET_REMOVE(e); + APR_BRIGADE_INSERT_TAIL(to, e); + len += e->length; + remain -= e->length; + } +leave: + *plen = len; + return APR_SUCCESS; +} + +apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb) +{ + apr_bucket *b; + apr_off_t total = 0; + + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + total += sizeof(*b); + if (b->length > 0) { + if (APR_BUCKET_IS_HEAP(b) + || APR_BUCKET_IS_POOL(b)) { + total += b->length; + } + } + } + return total; +} + + +/******************************************************************************* + * h2_ngheader + ******************************************************************************/ + +int h2_util_ignore_header(const char *name) +{ + /* never forward, ch. 8.1.2.2 */ + return (H2_HD_MATCH_LIT_CS("connection", name) + || H2_HD_MATCH_LIT_CS("proxy-connection", name) + || H2_HD_MATCH_LIT_CS("upgrade", name) + || H2_HD_MATCH_LIT_CS("keep-alive", name) + || H2_HD_MATCH_LIT_CS("transfer-encoding", name)); +} + +static int count_header(void *ctx, const char *key, const char *value) +{ + if (!h2_util_ignore_header(key)) { + (*((size_t*)ctx))++; + } + return 1; +} + +static const char *inv_field_name_chr(const char *token) +{ + const char *p = ap_scan_http_token(token); + if (p == token && *p == ':') { + p = ap_scan_http_token(++p); + } + return (p && *p)? p : NULL; +} + +static const char *inv_field_value_chr(const char *token) +{ + const char *p = ap_scan_http_field_content(token); + return (p && *p)? p : NULL; +} + +typedef struct ngh_ctx { + apr_pool_t *p; + int unsafe; + h2_ngheader *ngh; + apr_status_t status; +} ngh_ctx; + +static int add_header(ngh_ctx *ctx, const char *key, const char *value) +{ + nghttp2_nv *nv = &(ctx->ngh)->nv[(ctx->ngh)->nvlen++]; + const char *p; + + if (!ctx->unsafe) { + if ((p = inv_field_name_chr(key))) { + ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p, + "h2_request: head field '%s: %s' has invalid char %s", + key, value, p); + ctx->status = APR_EINVAL; + return 0; + } + if ((p = inv_field_value_chr(value))) { + ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p, + "h2_request: head field '%s: %s' has invalid char %s", + key, value, p); + ctx->status = APR_EINVAL; + return 0; + } + } + nv->name = (uint8_t*)key; + nv->namelen = strlen(key); + nv->value = (uint8_t*)value; + nv->valuelen = strlen(value); + + return 1; +} + +static int add_table_header(void *ctx, const char *key, const char *value) +{ + if (!h2_util_ignore_header(key)) { + add_header(ctx, key, value); + } + return 1; +} + +static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p, + int unsafe, size_t key_count, + const char *keys[], const char *values[], + apr_table_t *headers) +{ + ngh_ctx ctx; + size_t n, i; + + ctx.p = p; + ctx.unsafe = unsafe; + + n = key_count; + apr_table_do(count_header, &n, headers, NULL); + + *ph = ctx.ngh = apr_pcalloc(p, sizeof(h2_ngheader)); + if (!ctx.ngh) { + return APR_ENOMEM; + } + + ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv)); + if (!ctx.ngh->nv) { + return APR_ENOMEM; + } + + ctx.status = APR_SUCCESS; + for (i = 0; i < key_count; ++i) { + if (!add_header(&ctx, keys[i], values[i])) { + return ctx.status; + } + } + + apr_table_do(add_table_header, &ctx, headers, NULL); + + return ctx.status; +} + +static int is_unsafe(h2_headers *h) +{ + const char *v = apr_table_get(h->notes, H2_HDR_CONFORMANCE); + return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE)); +} + +apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, + h2_headers *headers) +{ + return ngheader_create(ph, p, is_unsafe(headers), + 0, NULL, NULL, headers->headers); +} + +apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + h2_headers *headers) +{ + const char *keys[] = { + ":status" + }; + const char *values[] = { + apr_psprintf(p, "%d", headers->status) + }; + return ngheader_create(ph, p, is_unsafe(headers), + H2_ALEN(keys), keys, values, headers->headers); +} + +apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + const struct h2_request *req) +{ + + const char *keys[] = { + ":scheme", + ":authority", + ":path", + ":method", + }; + const char *values[] = { + req->scheme, + req->authority, + req->path, + req->method, + }; + + ap_assert(req->scheme); + ap_assert(req->authority); + ap_assert(req->path); + ap_assert(req->method); + + return ngheader_create(ph, p, 0, H2_ALEN(keys), keys, values, req->headers); +} + +/******************************************************************************* + * header HTTP/1 <-> HTTP/2 conversions + ******************************************************************************/ + + +typedef struct { + const char *name; + size_t len; +} literal; + +#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) } +#define H2_LIT_ARGS(a) (a),H2_ALEN(a) + +static literal IgnoredRequestHeaders[] = { + H2_DEF_LITERAL("upgrade"), + H2_DEF_LITERAL("connection"), + H2_DEF_LITERAL("keep-alive"), + H2_DEF_LITERAL("http2-settings"), + H2_DEF_LITERAL("proxy-connection"), + H2_DEF_LITERAL("transfer-encoding"), +}; +static literal IgnoredRequestTrailers[] = { /* Ignore, see rfc7230, ch. 4.1.2 */ + H2_DEF_LITERAL("te"), + H2_DEF_LITERAL("host"), + H2_DEF_LITERAL("range"), + H2_DEF_LITERAL("cookie"), + H2_DEF_LITERAL("expect"), + H2_DEF_LITERAL("pragma"), + H2_DEF_LITERAL("max-forwards"), + H2_DEF_LITERAL("cache-control"), + H2_DEF_LITERAL("authorization"), + H2_DEF_LITERAL("content-length"), + H2_DEF_LITERAL("proxy-authorization"), +}; +static literal IgnoredResponseTrailers[] = { + H2_DEF_LITERAL("age"), + H2_DEF_LITERAL("date"), + H2_DEF_LITERAL("vary"), + H2_DEF_LITERAL("cookie"), + H2_DEF_LITERAL("expires"), + H2_DEF_LITERAL("warning"), + H2_DEF_LITERAL("location"), + H2_DEF_LITERAL("retry-after"), + H2_DEF_LITERAL("cache-control"), + H2_DEF_LITERAL("www-authenticate"), + H2_DEF_LITERAL("proxy-authenticate"), +}; + +static int ignore_header(const literal *lits, size_t llen, + const char *name, size_t nlen) +{ + const literal *lit; + size_t i; + + for (i = 0; i < llen; ++i) { + lit = &lits[i]; + if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) { + return 1; + } + } + return 0; +} + +int h2_req_ignore_header(const char *name, size_t len) +{ + return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len); +} + +int h2_req_ignore_trailer(const char *name, size_t len) +{ + return (h2_req_ignore_header(name, len) + || ignore_header(H2_LIT_ARGS(IgnoredRequestTrailers), name, len)); +} + +int h2_res_ignore_trailer(const char *name, size_t len) +{ + return ignore_header(H2_LIT_ARGS(IgnoredResponseTrailers), name, len); +} + +apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen) +{ + char *hname, *hvalue; + + if (h2_req_ignore_header(name, nlen)) { + return APR_SUCCESS; + } + else if (H2_HD_MATCH_LIT("cookie", name, nlen)) { + const char *existing = apr_table_get(headers, "cookie"); + if (existing) { + char *nval; + + /* Cookie header come separately in HTTP/2, but need + * to be merged by "; " (instead of default ", ") + */ + hvalue = apr_pstrndup(pool, value, vlen); + nval = apr_psprintf(pool, "%s; %s", existing, hvalue); + apr_table_setn(headers, "Cookie", nval); + return APR_SUCCESS; + } + } + else if (H2_HD_MATCH_LIT("host", name, nlen)) { + if (apr_table_get(headers, "Host")) { + return APR_SUCCESS; /* ignore duplicate */ + } + } + + hname = apr_pstrndup(pool, name, nlen); + hvalue = apr_pstrndup(pool, value, vlen); + h2_util_camel_case_header(hname, nlen); + apr_table_mergen(headers, hname, hvalue); + + return APR_SUCCESS; +} + +/******************************************************************************* + * h2 request handling + ******************************************************************************/ + +h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method, + const char *scheme, const char *authority, + const char *path, apr_table_t *header, int serialize) +{ + h2_request *req = apr_pcalloc(pool, sizeof(h2_request)); + + req->method = method; + req->scheme = scheme; + req->authority = authority; + req->path = path; + req->headers = header? header : apr_table_make(pool, 10); + req->request_time = apr_time_now(); + req->serialize = serialize; + + return req; +} + +/******************************************************************************* + * frame logging + ******************************************************************************/ + +int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen) +{ + char scratch[128]; + size_t s_len = sizeof(scratch)/sizeof(scratch[0]); + + switch (frame->hd.type) { + case NGHTTP2_DATA: { + return apr_snprintf(buffer, maxlen, + "DATA[length=%d, flags=%d, stream=%d, padlen=%d]", + (int)frame->hd.length, frame->hd.flags, + frame->hd.stream_id, (int)frame->data.padlen); + } + case NGHTTP2_HEADERS: { + return apr_snprintf(buffer, maxlen, + "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]", + (int)frame->hd.length, + !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), + frame->hd.stream_id, + !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM)); + } + case NGHTTP2_PRIORITY: { + return apr_snprintf(buffer, maxlen, + "PRIORITY[length=%d, flags=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } + case NGHTTP2_RST_STREAM: { + return apr_snprintf(buffer, maxlen, + "RST_STREAM[length=%d, flags=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } + case NGHTTP2_SETTINGS: { + if (frame->hd.flags & NGHTTP2_FLAG_ACK) { + return apr_snprintf(buffer, maxlen, + "SETTINGS[ack=1, stream=%d]", + frame->hd.stream_id); + } + return apr_snprintf(buffer, maxlen, + "SETTINGS[length=%d, stream=%d]", + (int)frame->hd.length, frame->hd.stream_id); + } + case NGHTTP2_PUSH_PROMISE: { + return apr_snprintf(buffer, maxlen, + "PUSH_PROMISE[length=%d, hend=%d, stream=%d]", + (int)frame->hd.length, + !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), + frame->hd.stream_id); + } + case NGHTTP2_PING: { + return apr_snprintf(buffer, maxlen, + "PING[length=%d, ack=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags&NGHTTP2_FLAG_ACK, + frame->hd.stream_id); + } + case NGHTTP2_GOAWAY: { + size_t len = (frame->goaway.opaque_data_len < s_len)? + frame->goaway.opaque_data_len : s_len-1; + memcpy(scratch, frame->goaway.opaque_data, len); + scratch[len] = '\0'; + return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', " + "last_stream=%d]", frame->goaway.error_code, + scratch, frame->goaway.last_stream_id); + } + case NGHTTP2_WINDOW_UPDATE: { + return apr_snprintf(buffer, maxlen, + "WINDOW_UPDATE[stream=%d, incr=%d]", + frame->hd.stream_id, + frame->window_update.window_size_increment); + } + default: + return apr_snprintf(buffer, maxlen, + "type=%d[length=%d, flags=%d, stream=%d]", + frame->hd.type, (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } +} + +/******************************************************************************* + * push policy + ******************************************************************************/ +int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled) +{ + h2_push_policy policy = H2_PUSH_NONE; + if (push_enabled) { + const char *val = apr_table_get(headers, "accept-push-policy"); + if (val) { + if (ap_find_token(p, val, "fast-load")) { + policy = H2_PUSH_FAST_LOAD; + } + else if (ap_find_token(p, val, "head")) { + policy = H2_PUSH_HEAD; + } + else if (ap_find_token(p, val, "default")) { + policy = H2_PUSH_DEFAULT; + } + else if (ap_find_token(p, val, "none")) { + policy = H2_PUSH_NONE; + } + else { + /* nothing known found in this header, go by default */ + policy = H2_PUSH_DEFAULT; + } + } + else { + policy = H2_PUSH_DEFAULT; + } + } + return policy; +} + diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h new file mode 100644 index 0000000..1eb262d --- /dev/null +++ b/modules/http2/h2_util.h @@ -0,0 +1,544 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_util__ +#define __mod_h2__h2_util__ + +#include <nghttp2/nghttp2.h> + +/******************************************************************************* + * some debugging/format helpers + ******************************************************************************/ +struct h2_request; +struct nghttp2_frame; + +size_t h2_util_hex_dump(char *buffer, size_t maxlen, + const char *data, size_t datalen); + +size_t h2_util_header_print(char *buffer, size_t maxlen, + const char *name, size_t namelen, + const char *value, size_t valuelen); + +void h2_util_camel_case_header(char *s, size_t len); + +int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); + +/******************************************************************************* + * ihash - hash for structs with int identifier + ******************************************************************************/ +typedef struct h2_ihash_t h2_ihash_t; +typedef int h2_ihash_iter_t(void *ctx, void *val); + +/** + * Create a hash for structures that have an identifying int member. + * @param pool the pool to use + * @param offset_of_int the offsetof() the int member in the struct + */ +h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int); + +size_t h2_ihash_count(h2_ihash_t *ih); +int h2_ihash_empty(h2_ihash_t *ih); +void *h2_ihash_get(h2_ihash_t *ih, int id); + +/** + * Iterate over the hash members (without defined order) and invoke + * fn for each member until 0 is returned. + * @param ih the hash to iterate over + * @param fn the function to invoke on each member + * @param ctx user supplied data passed into each iteration call + * @return 0 if one iteration returned 0, otherwise != 0 + */ +int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx); + +void h2_ihash_add(h2_ihash_t *ih, void *val); +void h2_ihash_remove(h2_ihash_t *ih, int id); +void h2_ihash_remove_val(h2_ihash_t *ih, void *val); +void h2_ihash_clear(h2_ihash_t *ih); + +size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max); + +/******************************************************************************* + * iqueue - sorted list of int with user defined ordering + ******************************************************************************/ +typedef struct h2_iqueue { + int *elts; + int head; + int nelts; + int nalloc; + apr_pool_t *pool; +} h2_iqueue; + +/** + * Comparator for two int to determine their order. + * + * @param i1 first int to compare + * @param i2 second int to compare + * @param ctx provided user data + * @return value is the same as for strcmp() and has the effect: + * == 0: s1 and s2 are treated equal in ordering + * < 0: s1 should be sorted before s2 + * > 0: s2 should be sorted before s1 + */ +typedef int h2_iq_cmp(int i1, int i2, void *ctx); + +/** + * Allocate a new queue from the pool and initialize. + * @param id the identifier of the queue + * @param pool the memory pool + */ +h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity); + +/** + * Return != 0 iff there are no tasks in the queue. + * @param q the queue to check + */ +int h2_iq_empty(h2_iqueue *q); + +/** + * Return the number of int in the queue. + * @param q the queue to get size on + */ +int h2_iq_count(h2_iqueue *q); + +/** + * Add a stream id to the queue. + * + * @param q the queue to append the id to + * @param sid the stream id to add + * @param cmp the comparator for sorting + * @param ctx user data for comparator + * @return != 0 iff id was not already there + */ +int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx); + +/** + * Append the id to the queue if not already present. + * + * @param q the queue to append the id to + * @param sid the id to append + * @return != 0 iff id was not already there + */ +int h2_iq_append(h2_iqueue *q, int sid); + +/** + * Remove the stream id from the queue. Return != 0 iff task + * was found in queue. + * @param q the task queue + * @param sid the stream id to remove + * @return != 0 iff task was found in queue + */ +int h2_iq_remove(h2_iqueue *q, int sid); + +/** + * Remove all entries in the queue. + */ +void h2_iq_clear(h2_iqueue *q); + +/** + * Sort the stream idqueue again. Call if the task ordering + * has changed. + * + * @param q the queue to sort + * @param cmp the comparator for sorting + * @param ctx user data for the comparator + */ +void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx); + +/** + * Get the first id from the queue or 0 if the queue is empty. + * The id is being removed. + * + * @param q the queue to get the first id from + * @return the first id of the queue, 0 if empty + */ +int h2_iq_shift(h2_iqueue *q); + +/** + * Get the first max ids from the queue. All these ids will be removed. + * + * @param q the queue to get the first task from + * @param pint the int array to receive the values + * @param max the maximum number of ids to shift + * @return the actual number of ids shifted + */ +size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max); + +/** + * Determine if int is in the queue already + * + * @parm q the queue + * @param sid the integer id to check for + * @return != 0 iff sid is already in the queue + */ +int h2_iq_contains(h2_iqueue *q, int sid); + +/******************************************************************************* + * FIFO queue (void* elements) + ******************************************************************************/ + +/** + * A thread-safe FIFO queue with some extra bells and whistles, if you + * do not need anything special, better use 'apr_queue'. + */ +typedef struct h2_fifo h2_fifo; + +/** + * Create a FIFO queue that can hold up to capacity elements. Elements can + * appear several times. + */ +apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity); + +/** + * Create a FIFO set that can hold up to capacity elements. Elements only + * appear once. Pushing an element already present does not change the + * queue and is successful. + */ +apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity); + +apr_status_t h2_fifo_term(h2_fifo *fifo); +apr_status_t h2_fifo_interrupt(h2_fifo *fifo); + +int h2_fifo_count(h2_fifo *fifo); + +/** + * Push en element into the queue. Blocks if there is no capacity left. + * + * @param fifo the FIFO queue + * @param elem the element to push + * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue, + * APR_EEXIST when in set mode and elem already there. + */ +apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem); +apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem); + +apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem); +apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem); + +typedef enum { + H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */ + H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */ +} h2_fifo_op_t; + +typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx); + +/** + * Call given function on the head of the queue, once it exists, and + * perform the returned operation on it. The queue will hold its lock during + * this time, so no other operations on the queue are possible. + * @param fifo the queue to peek at + * @param fn the function to call on the head, once available + * @param ctx context to pass in call to function + */ +apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx); + +/** + * Non-blocking version of h2_fifo_peek. + */ +apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx); + +/** + * Remove the elem from the queue, will remove multiple appearances. + * @param elem the element to remove + * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise. + */ +apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem); + +/******************************************************************************* + * iFIFO queue (int elements) + ******************************************************************************/ + +/** + * A thread-safe FIFO queue with some extra bells and whistles, if you + * do not need anything special, better use 'apr_queue'. + */ +typedef struct h2_ififo h2_ififo; + +/** + * Create a FIFO queue that can hold up to capacity int. ints can + * appear several times. + */ +apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity); + +/** + * Create a FIFO set that can hold up to capacity integers. Ints only + * appear once. Pushing an int already present does not change the + * queue and is successful. + */ +apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity); + +apr_status_t h2_ififo_term(h2_ififo *fifo); +apr_status_t h2_ififo_interrupt(h2_ififo *fifo); + +int h2_ififo_count(h2_ififo *fifo); + +/** + * Push an int into the queue. Blocks if there is no capacity left. + * + * @param fifo the FIFO queue + * @param id the int to push + * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue, + * APR_EEXIST when in set mode and elem already there. + */ +apr_status_t h2_ififo_push(h2_ififo *fifo, int id); +apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id); + +apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi); +apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi); + +typedef h2_fifo_op_t h2_ififo_peek_fn(int head, void *ctx); + +/** + * Call given function on the head of the queue, once it exists, and + * perform the returned operation on it. The queue will hold its lock during + * this time, so no other operations on the queue are possible. + * @param fifo the queue to peek at + * @param fn the function to call on the head, once available + * @param ctx context to pass in call to function + */ +apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx); + +/** + * Non-blocking version of h2_fifo_peek. + */ +apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx); + +/** + * Remove the integer from the queue, will remove multiple appearances. + * @param id the integer to remove + * @return APR_SUCCESS iff > 0 ints were removed, APR_EAGAIN otherwise. + */ +apr_status_t h2_ififo_remove(h2_ififo *fifo, int id); + +/******************************************************************************* + * common helpers + ******************************************************************************/ +/* h2_log2(n) iff n is a power of 2 */ +unsigned char h2_log2(int n); + +/** + * Count the bytes that all key/value pairs in a table have + * in length (exlucding terminating 0s), plus additional extra per pair. + * + * @param t the table to inspect + * @param pair_extra the extra amount to add per pair + * @return the number of bytes all key/value pairs have + */ +apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra); + +/** Match a header value against a string constance, case insensitive */ +#define H2_HD_MATCH_LIT(l, name, nlen) \ + ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) + +/******************************************************************************* + * HTTP/2 header helpers + ******************************************************************************/ +int h2_req_ignore_header(const char *name, size_t len); +int h2_req_ignore_trailer(const char *name, size_t len); +int h2_res_ignore_trailer(const char *name, size_t len); + +/** + * Set the push policy for the given request. Takes request headers into + * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00 + * for details. + * + * @param headers the http headers to inspect + * @param p the pool to use + * @param push_enabled if HTTP/2 server push is generally enabled for this request + * @return the push policy desired + */ +int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled); + +/******************************************************************************* + * base64 url encoding, different table from normal base64 + ******************************************************************************/ +/** + * I always wanted to write my own base64url decoder...not. See + * https://tools.ietf.org/html/rfc4648#section-5 for description. + */ +apr_size_t h2_util_base64url_decode(const char **decoded, + const char *encoded, + apr_pool_t *pool); +const char *h2_util_base64url_encode(const char *data, + apr_size_t len, apr_pool_t *pool); + +/******************************************************************************* + * nghttp2 helpers + ******************************************************************************/ + +#define H2_HD_MATCH_LIT_CS(l, name) \ + ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) + +#define H2_CREATE_NV_LIT_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \ + nv->namelen = sizeof(NAME) - 1; \ + nv->value = (uint8_t *)VALUE; \ + nv->valuelen = strlen(VALUE) + +#define H2_CREATE_NV_CS_LIT(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \ + nv->namelen = strlen(NAME); \ + nv->value = (uint8_t *)VALUE; \ + nv->valuelen = sizeof(VALUE) - 1 + +#define H2_CREATE_NV_CS_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \ + nv->namelen = strlen(NAME); \ + nv->value = (uint8_t *)VALUE; \ + nv->valuelen = strlen(VALUE) + +int h2_util_ignore_header(const char *name); + +struct h2_headers; + +typedef struct h2_ngheader { + nghttp2_nv *nv; + apr_size_t nvlen; +} h2_ngheader; + +apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p, + struct h2_headers *headers); +apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + struct h2_headers *headers); +apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, + const struct h2_request *req); + +apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, + const char *name, size_t nlen, + const char *value, size_t vlen); + +/******************************************************************************* + * h2_request helpers + ******************************************************************************/ + +struct h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method, + const char *scheme, const char *authority, + const char *path, apr_table_t *header, + int serialize); + +/******************************************************************************* + * apr brigade helpers + ******************************************************************************/ + +/** + * Concatenate at most length bytes from src to dest brigade, splitting + * buckets if necessary and reading buckets of indeterminate length. + */ +apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest, + apr_bucket_brigade *src, + apr_off_t length); + +/** + * Copy at most length bytes from src to dest brigade, splitting + * buckets if necessary and reading buckets of indeterminate length. + */ +apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest, + apr_bucket_brigade *src, + apr_off_t length); + +/** + * Return != 0 iff there is a FLUSH or EOS bucket in the brigade. + * @param bb the brigade to check on + * @return != 0 iff brigade holds FLUSH or EOS bucket (or both) + */ +int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len); + +/** + * Check how many bytes of the desired amount are available and if the + * end of stream is reached by that amount. + * @param bb the brigade to check + * @param plen the desired length and, on return, the available length + * @param on return, if eos has been reached + */ +apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, + apr_off_t *plen, int *peos); + +typedef apr_status_t h2_util_pass_cb(void *ctx, + const char *data, apr_off_t len); + +/** + * Read at most *plen bytes from the brigade and pass them into the + * given callback. If cb is NULL, just return the amount of data that + * could have been read. + * If an EOS was/would be encountered, set *peos != 0. + * @param bb the brigade to read from + * @param cb the callback to invoke for the read data + * @param ctx optional data passed to callback + * @param plen inout, as input gives the maximum number of bytes to read, + * on return specifies the actual/would be number of bytes + * @param peos != 0 iff an EOS bucket was/would be encountered. + */ +apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, + h2_util_pass_cb *cb, void *ctx, + apr_off_t *plen, int *peos); + +/** + * Print a bucket's meta data (type and length) to the buffer. + * @return number of characters printed + */ +apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax, + apr_bucket *b, const char *sep); + +/** + * Prints the brigade bucket types and lengths into the given buffer + * up to bmax. + * @return number of characters printed + */ +apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax, + const char *tag, const char *sep, + apr_bucket_brigade *bb); +/** + * Logs the bucket brigade (which bucket types with what length) + * to the log at the given level. + * @param c the connection to log for + * @param sid the stream identifier this brigade belongs to + * @param level the log level (as in APLOG_*) + * @param tag a short message text about the context + * @param bb the brigade to log + */ +#define h2_util_bb_log(c, sid, level, tag, bb) \ +do { \ + char buffer[4 * 1024]; \ + const char *line = "(null)"; \ + apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \ + len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \ + ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \ + ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \ +} while(0) + + +typedef int h2_bucket_gate(apr_bucket *b); +/** + * Transfer buckets from one brigade to another with a limit on the + * maximum amount of bytes transferred. Does no setaside magic, lifetime + * of brigades must fit. + * @param to brigade to transfer buckets to + * @param from brigades to remove buckets from + * @param plen maximum bytes to transfer, actual bytes transferred + * @param peos if an EOS bucket was transferred + */ +apr_status_t h2_append_brigade(apr_bucket_brigade *to, + apr_bucket_brigade *from, + apr_off_t *plen, + int *peos, + h2_bucket_gate *should_append); + +/** + * Get an approximnation of the memory footprint of the given + * brigade. This varies from apr_brigade_length as + * - no buckets are ever read + * - only buckets known to allocate memory (HEAP+POOL) are counted + * - the bucket struct itself is counted + */ +apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb); + +#endif /* defined(__mod_h2__h2_util__) */ diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h new file mode 100644 index 0000000..7079437 --- /dev/null +++ b/modules/http2/h2_version.h @@ -0,0 +1,41 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef mod_h2_h2_version_h +#define mod_h2_h2_version_h + +#undef PACKAGE_VERSION +#undef PACKAGE_TARNAME +#undef PACKAGE_STRING +#undef PACKAGE_NAME +#undef PACKAGE_BUGREPORT + +/** + * @macro + * Version number of the http2 module as c string + */ +#define MOD_HTTP2_VERSION "1.11.4" + +/** + * @macro + * Numerical representation of the version number of the http2 module + * release. This is a 24 bit number with 8 bits for major number, 8 bits + * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. + */ +#define MOD_HTTP2_VERSION_NUM 0x010b04 + + +#endif /* mod_h2_h2_version_h */ diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c new file mode 100644 index 0000000..699f533 --- /dev/null +++ b/modules/http2/h2_workers.c @@ -0,0 +1,383 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <apr_atomic.h> +#include <apr_thread_mutex.h> +#include <apr_thread_cond.h> + +#include <mpm_common.h> +#include <httpd.h> +#include <http_core.h> +#include <http_log.h> + +#include "h2.h" +#include "h2_private.h" +#include "h2_mplx.h" +#include "h2_task.h" +#include "h2_workers.h" +#include "h2_util.h" + +typedef struct h2_slot h2_slot; +struct h2_slot { + int id; + h2_slot *next; + h2_workers *workers; + int aborted; + int sticks; + h2_task *task; + apr_thread_t *thread; + apr_thread_mutex_t *lock; + apr_thread_cond_t *not_idle; +}; + +static h2_slot *pop_slot(h2_slot **phead) +{ + /* Atomically pop a slot from the list */ + for (;;) { + h2_slot *first = *phead; + if (first == NULL) { + return NULL; + } + if (apr_atomic_casptr((void*)phead, first->next, first) == first) { + first->next = NULL; + return first; + } + } +} + +static void push_slot(h2_slot **phead, h2_slot *slot) +{ + /* Atomically push a slot to the list */ + ap_assert(!slot->next); + for (;;) { + h2_slot *next = slot->next = *phead; + if (apr_atomic_casptr((void*)phead, slot, next) == next) { + return; + } + } +} + +static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx); + +static apr_status_t activate_slot(h2_workers *workers, h2_slot *slot) +{ + apr_status_t status; + + slot->workers = workers; + slot->aborted = 0; + slot->task = NULL; + + if (!slot->lock) { + status = apr_thread_mutex_create(&slot->lock, + APR_THREAD_MUTEX_DEFAULT, + workers->pool); + if (status != APR_SUCCESS) { + push_slot(&workers->free, slot); + return status; + } + } + + if (!slot->not_idle) { + status = apr_thread_cond_create(&slot->not_idle, workers->pool); + if (status != APR_SUCCESS) { + push_slot(&workers->free, slot); + return status; + } + } + + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s, + "h2_workers: new thread for slot %d", slot->id); + /* thread will either immediately start work or add itself + * to the idle queue */ + apr_thread_create(&slot->thread, workers->thread_attr, slot_run, slot, + workers->pool); + if (!slot->thread) { + push_slot(&workers->free, slot); + return APR_ENOMEM; + } + + apr_atomic_inc32(&workers->worker_count); + return APR_SUCCESS; +} + +static apr_status_t add_worker(h2_workers *workers) +{ + h2_slot *slot = pop_slot(&workers->free); + if (slot) { + return activate_slot(workers, slot); + } + return APR_EAGAIN; +} + +static void wake_idle_worker(h2_workers *workers) +{ + h2_slot *slot = pop_slot(&workers->idle); + if (slot) { + apr_thread_mutex_lock(slot->lock); + apr_thread_cond_signal(slot->not_idle); + apr_thread_mutex_unlock(slot->lock); + } + else if (workers->dynamic) { + add_worker(workers); + } +} + +static void cleanup_zombies(h2_workers *workers) +{ + h2_slot *slot; + while ((slot = pop_slot(&workers->zombies))) { + if (slot->thread) { + apr_status_t status; + apr_thread_join(&status, slot->thread); + slot->thread = NULL; + } + apr_atomic_dec32(&workers->worker_count); + slot->next = NULL; + push_slot(&workers->free, slot); + } +} + +static apr_status_t slot_pull_task(h2_slot *slot, h2_mplx *m) +{ + apr_status_t rv; + + rv = h2_mplx_pop_task(m, &slot->task); + if (slot->task) { + /* Ok, we got something to give back to the worker for execution. + * If we still have idle workers, we let the worker be sticky, + * e.g. making it poll the task's h2_mplx instance for more work + * before asking back here. */ + slot->sticks = slot->workers->max_workers; + return rv; + } + slot->sticks = 0; + return APR_EOF; +} + +static h2_fifo_op_t mplx_peek(void *head, void *ctx) +{ + h2_mplx *m = head; + h2_slot *slot = ctx; + + if (slot_pull_task(slot, m) == APR_EAGAIN) { + wake_idle_worker(slot->workers); + return H2_FIFO_OP_REPUSH; + } + return H2_FIFO_OP_PULL; +} + +/** + * Get the next task for the given worker. Will block until a task arrives + * or the max_wait timer expires and more than min workers exist. + */ +static apr_status_t get_next(h2_slot *slot) +{ + h2_workers *workers = slot->workers; + apr_status_t status; + + slot->task = NULL; + while (!slot->aborted) { + if (!slot->task) { + status = h2_fifo_try_peek(workers->mplxs, mplx_peek, slot); + if (status == APR_EOF) { + return status; + } + } + + if (slot->task) { + return APR_SUCCESS; + } + + cleanup_zombies(workers); + + apr_thread_mutex_lock(slot->lock); + push_slot(&workers->idle, slot); + apr_thread_cond_wait(slot->not_idle, slot->lock); + apr_thread_mutex_unlock(slot->lock); + } + return APR_EOF; +} + +static void slot_done(h2_slot *slot) +{ + push_slot(&(slot->workers->zombies), slot); +} + + +static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx) +{ + h2_slot *slot = wctx; + + while (!slot->aborted) { + + /* Get a h2_task from the mplxs queue. */ + get_next(slot); + while (slot->task) { + + h2_task_do(slot->task, thread, slot->id); + + /* Report the task as done. If stickyness is left, offer the + * mplx the opportunity to give us back a new task right away. + */ + if (!slot->aborted && (--slot->sticks > 0)) { + h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task); + } + else { + h2_mplx_task_done(slot->task->mplx, slot->task, NULL); + slot->task = NULL; + } + } + } + + slot_done(slot); + return NULL; +} + +static apr_status_t workers_pool_cleanup(void *data) +{ + h2_workers *workers = data; + h2_slot *slot; + + if (!workers->aborted) { + workers->aborted = 1; + /* abort all idle slots */ + for (;;) { + slot = pop_slot(&workers->idle); + if (slot) { + apr_thread_mutex_lock(slot->lock); + slot->aborted = 1; + apr_thread_cond_signal(slot->not_idle); + apr_thread_mutex_unlock(slot->lock); + } + else { + break; + } + } + + h2_fifo_term(workers->mplxs); + h2_fifo_interrupt(workers->mplxs); + + cleanup_zombies(workers); + } + return APR_SUCCESS; +} + +h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, + int min_workers, int max_workers, + int idle_secs) +{ + apr_status_t status; + h2_workers *workers; + apr_pool_t *pool; + int i, n; + + ap_assert(s); + ap_assert(server_pool); + + /* let's have our own pool that will be parent to all h2_worker + * instances we create. This happens in various threads, but always + * guarded by our lock. Without this pool, all subpool creations would + * happen on the pool handed to us, which we do not guard. + */ + apr_pool_create(&pool, server_pool); + apr_pool_tag(pool, "h2_workers"); + workers = apr_pcalloc(pool, sizeof(h2_workers)); + if (!workers) { + return NULL; + } + + workers->s = s; + workers->pool = pool; + workers->min_workers = min_workers; + workers->max_workers = max_workers; + workers->max_idle_secs = (idle_secs > 0)? idle_secs : 10; + + /* FIXME: the fifo set we use here has limited capacity. Once the + * set is full, connections with new requests do a wait. Unfortunately, + * we have optimizations in place there that makes such waiting "unfair" + * in the sense that it may take connections a looong time to get scheduled. + * + * Need to rewrite this to use one of our double-linked lists and a mutex + * to have unlimited capacity and fair scheduling. + * + * For now, we just make enough room to have many connections inside one + * process. + */ + status = h2_fifo_set_create(&workers->mplxs, pool, 8 * 1024); + if (status != APR_SUCCESS) { + return NULL; + } + + status = apr_threadattr_create(&workers->thread_attr, workers->pool); + if (status != APR_SUCCESS) { + return NULL; + } + + if (ap_thread_stacksize != 0) { + apr_threadattr_stacksize_set(workers->thread_attr, + ap_thread_stacksize); + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "h2_workers: using stacksize=%ld", + (long)ap_thread_stacksize); + } + + status = apr_thread_mutex_create(&workers->lock, + APR_THREAD_MUTEX_DEFAULT, + workers->pool); + if (status == APR_SUCCESS) { + n = workers->nslots = workers->max_workers; + workers->slots = apr_pcalloc(workers->pool, n * sizeof(h2_slot)); + if (workers->slots == NULL) { + workers->nslots = 0; + status = APR_ENOMEM; + } + for (i = 0; i < n; ++i) { + workers->slots[i].id = i; + } + } + if (status == APR_SUCCESS) { + /* we activate all for now, TODO: support min_workers again. + * do this in reverse for vanity reasons so slot 0 will most + * likely be at head of idle queue. */ + n = workers->max_workers; + for (i = n-1; i >= 0; --i) { + status = activate_slot(workers, &workers->slots[i]); + } + /* the rest of the slots go on the free list */ + for(i = n; i < workers->nslots; ++i) { + push_slot(&workers->free, &workers->slots[i]); + } + workers->dynamic = (workers->worker_count < workers->max_workers); + } + if (status == APR_SUCCESS) { + apr_pool_pre_cleanup_register(pool, workers, workers_pool_cleanup); + return workers; + } + return NULL; +} + +apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m) +{ + apr_status_t status = h2_fifo_push(workers->mplxs, m); + wake_idle_worker(workers); + return status; +} + +apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m) +{ + return h2_fifo_remove(workers->mplxs, m); +} diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h new file mode 100644 index 0000000..3561582 --- /dev/null +++ b/modules/http2/h2_workers.h @@ -0,0 +1,82 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2_workers__ +#define __mod_h2__h2_workers__ + +/* Thread pool specific to executing h2_tasks. Has a minimum and maximum + * number of workers it creates. Starts with minimum workers and adds + * some on load, reduces the number again when idle. + * + */ +struct apr_thread_mutex_t; +struct apr_thread_cond_t; +struct h2_mplx; +struct h2_request; +struct h2_task; +struct h2_fifo; + +struct h2_slot; + +typedef struct h2_workers h2_workers; + +struct h2_workers { + server_rec *s; + apr_pool_t *pool; + + int next_worker_id; + int min_workers; + int max_workers; + int max_idle_secs; + + int aborted; + int dynamic; + + apr_threadattr_t *thread_attr; + int nslots; + struct h2_slot *slots; + + volatile apr_uint32_t worker_count; + + struct h2_slot *free; + struct h2_slot *idle; + struct h2_slot *zombies; + + struct h2_fifo *mplxs; + + struct apr_thread_mutex_t *lock; +}; + + +/* Create a worker pool with the given minimum and maximum number of + * threads. + */ +h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool, + int min_size, int max_size, int idle_secs); + +/** + * Registers a h2_mplx for task scheduling. If this h2_mplx runs + * out of tasks, it will be automatically be unregistered. Should + * new tasks arrive, it needs to be registered again. + */ +apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m); + +/** + * Remove a h2_mplx from the worker registry. + */ +apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m); + +#endif /* defined(__mod_h2__h2_workers__) */ diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c new file mode 100644 index 0000000..3d278e9 --- /dev/null +++ b/modules/http2/mod_http2.c @@ -0,0 +1,393 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <apr_optional.h> +#include <apr_optional_hooks.h> +#include <apr_strings.h> +#include <apr_time.h> +#include <apr_want.h> + +#include <httpd.h> +#include <http_protocol.h> +#include <http_request.h> +#include <http_log.h> + +#include "mod_http2.h" + +#include <nghttp2/nghttp2.h> +#include "h2_stream.h" +#include "h2_alt_svc.h" +#include "h2_conn.h" +#include "h2_filter.h" +#include "h2_task.h" +#include "h2_session.h" +#include "h2_config.h" +#include "h2_ctx.h" +#include "h2_h2.h" +#include "h2_mplx.h" +#include "h2_push.h" +#include "h2_request.h" +#include "h2_switch.h" +#include "h2_version.h" + + +static void h2_hooks(apr_pool_t *pool); + +AP_DECLARE_MODULE(http2) = { + STANDARD20_MODULE_STUFF, + h2_config_create_dir, /* func to create per dir config */ + h2_config_merge_dir, /* func to merge per dir config */ + h2_config_create_svr, /* func to create per server config */ + h2_config_merge_svr, /* func to merge per server config */ + h2_cmds, /* command handlers */ + h2_hooks, +#if defined(AP_MODULE_FLAG_NONE) + AP_MODULE_FLAG_ALWAYS_MERGE +#endif +}; + +static int h2_h2_fixups(request_rec *r); + +typedef struct { + unsigned int change_prio : 1; + unsigned int sha256 : 1; + unsigned int inv_headers : 1; + unsigned int dyn_windows : 1; +} features; + +static features myfeats; +static int mpm_warned; + +/* The module initialization. Called once as apache hook, before any multi + * processing (threaded or not) happens. It is typically at least called twice, + * see + * http://wiki.apache.org/httpd/ModuleLife + * Since the first run is just a "practise" run, we want to initialize for real + * only on the second try. This defeats the purpose of the first dry run a bit, + * since apache wants to verify that a new configuration actually will work. + * So if we have trouble with the configuration, this will only be detected + * when the server has already switched. + * On the other hand, when we initialize lib nghttp2, all possible crazy things + * might happen and this might even eat threads. So, better init on the real + * invocation, for now at least. + */ +static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *s) +{ + void *data = NULL; + const char *mod_h2_init_key = "mod_http2_init_counter"; + nghttp2_info *ngh2; + apr_status_t status; + + (void)plog;(void)ptemp; +#ifdef H2_NG2_CHANGE_PRIO + myfeats.change_prio = 1; +#endif +#ifdef H2_OPENSSL + myfeats.sha256 = 1; +#endif +#ifdef H2_NG2_INVALID_HEADER_CB + myfeats.inv_headers = 1; +#endif +#ifdef H2_NG2_LOCAL_WIN_SIZE + myfeats.dyn_windows = 1; +#endif + + apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool); + if ( data == NULL ) { + ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03089) + "initializing post config dry run"); + apr_pool_userdata_set((const void *)1, mod_h2_init_key, + apr_pool_cleanup_null, s->process->pool); + return APR_SUCCESS; + } + + ngh2 = nghttp2_version(0); + ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090) + "mod_http2 (v%s, feats=%s%s%s%s, nghttp2 %s), initializing...", + MOD_HTTP2_VERSION, + myfeats.change_prio? "CHPRIO" : "", + myfeats.sha256? "+SHA256" : "", + myfeats.inv_headers? "+INVHD" : "", + myfeats.dyn_windows? "+DWINS" : "", + ngh2? ngh2->version_str : "unknown"); + + switch (h2_conn_mpm_type()) { + case H2_MPM_SIMPLE: + case H2_MPM_MOTORZ: + case H2_MPM_NETWARE: + case H2_MPM_WINNT: + /* not sure we need something extra for those. */ + break; + case H2_MPM_EVENT: + case H2_MPM_WORKER: + /* all fine, we know these ones */ + break; + case H2_MPM_PREFORK: + /* ok, we now know how to handle that one */ + break; + case H2_MPM_UNKNOWN: + /* ??? */ + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03091) + "post_config: mpm type unknown"); + break; + } + + if (!h2_mpm_supported() && !mpm_warned) { + mpm_warned = 1; + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10034) + "The mpm module (%s) is not supported by mod_http2. The mpm determines " + "how things are processed in your server. HTTP/2 has more demands in " + "this regard and the currently selected mpm will just not do. " + "This is an advisory warning. Your server will continue to work, but " + "the HTTP/2 protocol will be inactive.", + h2_conn_mpm_name()); + } + + status = h2_h2_init(p, s); + if (status == APR_SUCCESS) { + status = h2_switch_init(p, s); + } + if (status == APR_SUCCESS) { + status = h2_task_init(p, s); + } + + return status; +} + +static char *http2_var_lookup(apr_pool_t *, server_rec *, + conn_rec *, request_rec *, char *name); +static int http2_is_h2(conn_rec *); + +static apr_status_t http2_req_engine_push(const char *ngn_type, + request_rec *r, + http2_req_engine_init *einit) +{ + return h2_mplx_req_engine_push(ngn_type, r, einit); +} + +static apr_status_t http2_req_engine_pull(h2_req_engine *ngn, + apr_read_type_e block, + int capacity, + request_rec **pr) +{ + return h2_mplx_req_engine_pull(ngn, block, capacity, pr); +} + +static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, + apr_status_t status) +{ + h2_mplx_req_engine_done(ngn, r_conn, status); +} + +static void http2_get_num_workers(server_rec *s, int *minw, int *maxw) +{ + h2_get_num_workers(s, minw, maxw); +} + +/* Runs once per created child process. Perform any process + * related initionalization here. + */ +static void h2_child_init(apr_pool_t *pool, server_rec *s) +{ + /* Set up our connection processing */ + apr_status_t status = h2_conn_child_init(pool, s); + if (status != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, status, s, + APLOGNO(02949) "initializing connection handling"); + } + +} + +/* Install this module into the apache2 infrastructure. + */ +static void h2_hooks(apr_pool_t *pool) +{ + static const char *const mod_ssl[] = { "mod_ssl.c", NULL}; + + APR_REGISTER_OPTIONAL_FN(http2_is_h2); + APR_REGISTER_OPTIONAL_FN(http2_var_lookup); + APR_REGISTER_OPTIONAL_FN(http2_req_engine_push); + APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull); + APR_REGISTER_OPTIONAL_FN(http2_req_engine_done); + APR_REGISTER_OPTIONAL_FN(http2_get_num_workers); + + ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks"); + + /* Run once after configuration is set, but before mpm children initialize. + */ + ap_hook_post_config(h2_post_config, mod_ssl, NULL, APR_HOOK_MIDDLE); + + /* Run once after a child process has been created. + */ + ap_hook_child_init(h2_child_init, NULL, NULL, APR_HOOK_MIDDLE); + + h2_h2_register_hooks(); + h2_switch_register_hooks(); + h2_task_register_hooks(); + + h2_alt_svc_register_hooks(); + + /* Setup subprocess env for certain variables + */ + ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE); + + /* test http2 connection status handler */ + ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE); +} + +static const char *val_HTTP2(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + return ctx? "on" : "off"; +} + +static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + if (r) { + h2_task *task = h2_ctx_get_task(ctx); + if (task) { + h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id); + if (stream && stream->push_policy != H2_PUSH_NONE) { + return "on"; + } + } + } + else if (c && h2_session_push_enabled(ctx->session)) { + return "on"; + } + } + else if (s) { + const h2_config *cfg = h2_config_sget(s); + if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) { + return "on"; + } + } + return "off"; +} + +static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + h2_task *task = h2_ctx_get_task(ctx); + if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { + return "PUSHED"; + } + } + return ""; +} + +static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + h2_task *task = h2_ctx_get_task(ctx); + if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { + h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id); + if (stream) { + return apr_itoa(p, stream->initiated_on); + } + } + } + return ""; +} + +static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + h2_task *task = h2_ctx_get_task(ctx); + if (task) { + return task->id; + } + } + return ""; +} + +static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx); + if (cp && (cp = ap_strchr_c(cp, '-'))) { + return ++cp; + } + return NULL; +} + +typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx); +typedef struct h2_var_def { + const char *name; + h2_var_lookup *lookup; + unsigned int subprocess : 1; /* should be set in r->subprocess_env */ +} h2_var_def; + +static h2_var_def H2_VARS[] = { + { "HTTP2", val_HTTP2, 1 }, + { "H2PUSH", val_H2_PUSH, 1 }, + { "H2_PUSH", val_H2_PUSH, 1 }, + { "H2_PUSHED", val_H2_PUSHED, 1 }, + { "H2_PUSHED_ON", val_H2_PUSHED_ON, 1 }, + { "H2_STREAM_ID", val_H2_STREAM_ID, 1 }, + { "H2_STREAM_TAG", val_H2_STREAM_TAG, 1 }, +}; + +#ifndef H2_ALEN +#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) +#endif + + +static int http2_is_h2(conn_rec *c) +{ + return h2_ctx_get(c->master? c->master : c, 0) != NULL; +} + +static char *http2_var_lookup(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, char *name) +{ + int i; + /* If the # of vars grow, we need to put definitions in a hash */ + for (i = 0; i < H2_ALEN(H2_VARS); ++i) { + h2_var_def *vdef = &H2_VARS[i]; + if (!strcmp(vdef->name, name)) { + h2_ctx *ctx = (r? h2_ctx_rget(r) : + h2_ctx_get(c->master? c->master : c, 0)); + return (char *)vdef->lookup(p, s, c, r, ctx); + } + } + return (char*)""; +} + +static int h2_h2_fixups(request_rec *r) +{ + if (r->connection->master) { + h2_ctx *ctx = h2_ctx_rget(r); + int i; + + for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) { + h2_var_def *vdef = &H2_VARS[i]; + if (vdef->subprocess) { + apr_table_setn(r->subprocess_env, vdef->name, + vdef->lookup(r->pool, r->server, r->connection, + r, ctx)); + } + } + } + return DECLINED; +} diff --git a/modules/http2/mod_http2.dep b/modules/http2/mod_http2.dep new file mode 100644 index 0000000..52f2286 --- /dev/null +++ b/modules/http2/mod_http2.dep @@ -0,0 +1,1433 @@ +# Microsoft Developer Studio Generated Dependency File, included by mod_http2.mak + +./h2_alt_svc.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2_alt_svc.h"\ + ".\h2_config.h"\ + ".\h2_ctx.h"\ + ".\h2_h2.h"\ + ".\h2_private.h"\ + ".\h2_util.h"\ + + +./h2_bucket_eos.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_bucket_eos.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_stream.h"\ + + +./h2_config.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_vhost.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_lib.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_alt_svc.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_ctx.h"\ + ".\h2_h2.h"\ + ".\h2_private.h"\ + + +./h2_conn.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_filter.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_version.h"\ + ".\h2_workers.h"\ + + +./h2_conn_io.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_bucket_eos.h"\ + ".\h2_config.h"\ + ".\h2_conn_io.h"\ + ".\h2_h2.h"\ + ".\h2_private.h"\ + ".\h2_session.h"\ + ".\h2_util.h"\ + + +./h2_ctx.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_private.h"\ + ".\h2_session.h"\ + ".\h2_task.h"\ + + +./h2_filter.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_filter.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_push.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + ".\h2_version.h"\ + + +./h2_from_h1.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\include\util_time.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_lib.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_from_h1.h"\ + ".\h2_private.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + + +./h2_h2.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + "..\ssl\mod_ssl.h"\ + ".\h2.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_h2.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + ".\mod_http2.h"\ + + +./h2_int_queue.c : \ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + + +./h2_io.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + + +./h2_io_set.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2_io_set.h"\ + ".\h2_private.h"\ + + +./h2_mplx.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_ctx.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_ngn_shed.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + ".\h2_workers.h"\ + ".\mod_http2.h"\ + + +./h2_ngn_shed.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_ctx.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_ngn_shed.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + ".\mod_http2.h"\ + + +./h2_push.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_lib.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_conn_io.h"\ + ".\h2_h2.h"\ + ".\h2_private.h"\ + ".\h2_push.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_util.h"\ + + +./h2_request.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\http_vhost.h"\ + "..\..\include\httpd.h"\ + "..\..\include\mod_core.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_private.h"\ + ".\h2_push.h"\ + ".\h2_request.h"\ + ".\h2_util.h"\ + + +./h2_session.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_base64.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_bucket_eos.h"\ + ".\h2_config.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_filter.h"\ + ".\h2_from_h1.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_push.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + ".\h2_version.h"\ + ".\h2_workers.h"\ + + +./h2_stream.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_filter.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_push.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + + +./h2_switch.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_ctx.h"\ + ".\h2_h2.h"\ + ".\h2_private.h"\ + ".\h2_switch.h"\ + + +./h2_task.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\http_vhost.h"\ + "..\..\include\httpd.h"\ + "..\..\include\mod_core.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_atomic.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_from_h1.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + + +./h2_task_input.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + + +./h2_task_output.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_from_h1.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_task.h"\ + ".\h2_util.h"\ + + +./h2_util.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_private.h"\ + ".\h2_request.h"\ + ".\h2_util.h"\ + + +./h2_workers.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\httpd.h"\ + "..\..\include\mpm_common.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_atomic.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_cond.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_mplx.h"\ + ".\h2_private.h"\ + ".\h2_task.h"\ + ".\h2_workers.h"\ + + +..\..\build\win32\httpd.rc : \ + "..\..\include\ap_release.h"\ + + +./mod_http2.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_queue.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + ".\h2.h"\ + ".\h2_alt_svc.h"\ + ".\h2_config.h"\ + ".\h2_conn.h"\ + ".\h2_conn_io.h"\ + ".\h2_ctx.h"\ + ".\h2_filter.h"\ + ".\h2_h2.h"\ + ".\h2_mplx.h"\ + ".\h2_push.h"\ + ".\h2_request.h"\ + ".\h2_session.h"\ + ".\h2_stream.h"\ + ".\h2_switch.h"\ + ".\h2_task.h"\ + ".\h2_version.h"\ + ".\mod_http2.h"\ + diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp new file mode 100644 index 0000000..d1c4322 --- /dev/null +++ b/modules/http2/mod_http2.dsp @@ -0,0 +1,195 @@ +# Microsoft Developer Studio Project File - Name="mod_http2" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=mod_http2 - Win32 Release +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "mod_http2.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "mod_http2 - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c +# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_http2_src" /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /fo"Release/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so +# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref +# Begin Special Build Tool +TargetPath=.\Release\mod_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2 +# End Special Build Tool + +!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_http2_src" /FD /c +# ADD BASE MTL /nologo /D "_DEBUG" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /fo"Debug/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so +# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so +# Begin Special Build Tool +TargetPath=.\Debug\mod_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2 +# End Special Build Tool + +!ENDIF + +# Begin Target + +# Name "mod_http2 - Win32 Release" +# Name "mod_http2 - Win32 Debug" +# Begin Source File + +SOURCE=./h2_alt_svc.c +# End Source File +# Begin Source File + +SOURCE=./h2_bucket_beam.c +# End Source File +# Begin Source File + +SOURCE=./h2_bucket_eos.c +# End Source File +# Begin Source File + +SOURCE=./h2_config.c +# End Source File +# Begin Source File + +SOURCE=./h2_conn.c +# End Source File +# Begin Source File + +SOURCE=./h2_conn_io.c +# End Source File +# Begin Source File + +SOURCE=./h2_ctx.c +# End Source File +# Begin Source File + +SOURCE=./h2_filter.c +# End Source File +# Begin Source File + +SOURCE=./h2_from_h1.c +# End Source File +# Begin Source File + +SOURCE=./h2_h2.c +# End Source File +# Begin Source File + +SOURCE=./h2_mplx.c +# End Source File +# Begin Source File + +SOURCE=./h2_ngn_shed.c +# End Source File +# Begin Source File + +SOURCE=./h2_push.c +# End Source File +# Begin Source File + +SOURCE=./h2_request.c +# End Source File +# Begin Source File + +SOURCE=./h2_headers.c +# End Source File +# Begin Source File + +SOURCE=./h2_session.c +# End Source File +# Begin Source File + +SOURCE=./h2_stream.c +# End Source File +# Begin Source File + +SOURCE=./h2_switch.c +# End Source File +# Begin Source File + +SOURCE=./h2_task.c +# End Source File +# Begin Source File + +SOURCE=./h2_util.c +# End Source File +# Begin Source File + +SOURCE=./h2_workers.c +# End Source File +# Begin Source File + +SOURCE=./mod_http2.c +# End Source File +# Begin Source File + +SOURCE=..\..\build\win32\httpd.rc +# End Source File +# End Target +# End Project diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h new file mode 100644 index 0000000..7a1b49a --- /dev/null +++ b/modules/http2/mod_http2.h @@ -0,0 +1,101 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MOD_HTTP2_H__ +#define __MOD_HTTP2_H__ + +/** The http2_var_lookup() optional function retrieves HTTP2 environment + * variables. */ +APR_DECLARE_OPTIONAL_FN(char *, + http2_var_lookup, (apr_pool_t *, server_rec *, + conn_rec *, request_rec *, char *)); + +/** An optional function which returns non-zero if the given connection + * or its master connection is using HTTP/2. */ +APR_DECLARE_OPTIONAL_FN(int, + http2_is_h2, (conn_rec *)); + + +/******************************************************************************* + * HTTP/2 request engines + ******************************************************************************/ + +struct apr_thread_cond_t; + +typedef struct h2_req_engine h2_req_engine; + +typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); + +/** + * Initialize a h2_req_engine. The structure will be passed in but + * only the name and master are set. The function should initialize + * all fields. + * @param engine the allocated, partially filled structure + * @param r the first request to process, or NULL + */ +typedef apr_status_t http2_req_engine_init(h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_size_t req_buffer_size, + request_rec *r, + http2_output_consumed **pconsumed, + void **pbaton); + +/** + * Push a request to an engine with the specified name for further processing. + * If no such engine is available, einit is not NULL, einit is called + * with a new engine record and the caller is responsible for running the + * new engine instance. + * @param engine_type the type of the engine to add the request to + * @param r the request to push to an engine for processing + * @param einit an optional initialization callback for a new engine + * of the requested type, should no instance be available. + * By passing a non-NULL callback, the caller is willing + * to init and run a new engine itself. + * @return APR_SUCCESS iff slave was successfully added to an engine + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_req_engine_push, (const char *engine_type, + request_rec *r, + http2_req_engine_init *einit)); + +/** + * Get a new request for processing in this engine. + * @param engine the engine which is done processing the slave + * @param block if call should block waiting for request to come + * @param capacity how many parallel requests are acceptable + * @param pr the request that needs processing or NULL + * @return APR_SUCCESS if new request was assigned + * APR_EAGAIN if no new request is available + * APR_EOF if engine may shut down, as no more request will be scheduled + * APR_ECONNABORTED if the engine needs to shut down immediately + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_req_engine_pull, (h2_req_engine *engine, + apr_read_type_e block, + int capacity, + request_rec **pr)); +APR_DECLARE_OPTIONAL_FN(void, + http2_req_engine_done, (h2_req_engine *engine, + conn_rec *rconn, + apr_status_t status)); + +APR_DECLARE_OPTIONAL_FN(void, + http2_get_num_workers, (server_rec *s, + int *minw, int *max)); + +#endif diff --git a/modules/http2/mod_http2.mak b/modules/http2/mod_http2.mak new file mode 100644 index 0000000..10ae887 --- /dev/null +++ b/modules/http2/mod_http2.mak @@ -0,0 +1,542 @@ +# Microsoft Developer Studio Generated NMAKE File, Based on mod_http2.dsp +!IF "$(CFG)" == "" +CFG=mod_http2 - Win32 Release +!MESSAGE No configuration specified. Defaulting to mod_http2 - Win32 Release. +!ENDIF + +!IF "$(CFG)" != "mod_http2 - Win32 Release" && "$(CFG)" != "mod_http2 - Win32 Debug" +!MESSAGE Invalid configuration "$(CFG)" specified. +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE +!ERROR An invalid configuration is specified. +!ENDIF + +!IF "$(OS)" == "Windows_NT" +NULL= +!ELSE +NULL=nul +!ENDIF + +!IF "$(CFG)" == "mod_http2 - Win32 Release" + +OUTDIR=.\Release +INTDIR=.\Release +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep +# Begin Custom Macros +OutDir=.\Release +# End Custom Macros + +!IF "$(RECURSE)" == "0" + +ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)" + +!ELSE + +ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)" + +!ENDIF + +!IF "$(RECURSE)" == "1" +CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" +!ELSE +CLEAN : +!ENDIF + -@erase "$(INTDIR)\h2_alt_svc.obj" + -@erase "$(INTDIR)\h2_bucket_beam.obj" + -@erase "$(INTDIR)\h2_bucket_eos.obj" + -@erase "$(INTDIR)\h2_config.obj" + -@erase "$(INTDIR)\h2_conn.obj" + -@erase "$(INTDIR)\h2_conn_io.obj" + -@erase "$(INTDIR)\h2_ctx.obj" + -@erase "$(INTDIR)\h2_filter.obj" + -@erase "$(INTDIR)\h2_from_h1.obj" + -@erase "$(INTDIR)\h2_h2.obj" + -@erase "$(INTDIR)\h2_headers.obj" + -@erase "$(INTDIR)\h2_mplx.obj" + -@erase "$(INTDIR)\h2_ngn_shed.obj" + -@erase "$(INTDIR)\h2_push.obj" + -@erase "$(INTDIR)\h2_request.obj" + -@erase "$(INTDIR)\h2_session.obj" + -@erase "$(INTDIR)\h2_stream.obj" + -@erase "$(INTDIR)\h2_switch.obj" + -@erase "$(INTDIR)\h2_task.obj" + -@erase "$(INTDIR)\h2_util.obj" + -@erase "$(INTDIR)\h2_workers.obj" + -@erase "$(INTDIR)\mod_http2.obj" + -@erase "$(INTDIR)\mod_http2.res" + -@erase "$(INTDIR)\mod_http2_src.idb" + -@erase "$(INTDIR)\mod_http2_src.pdb" + -@erase "$(OUTDIR)\mod_http2.exp" + -@erase "$(OUTDIR)\mod_http2.lib" + -@erase "$(OUTDIR)\mod_http2.pdb" + -@erase "$(OUTDIR)\mod_http2.so" + +"$(OUTDIR)" : + if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" + +CPP=cl.exe +CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /c + +.c{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.c{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +MTL=midl.exe +MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32 +RSC=rc.exe +RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc" +BSC32_SBRS= \ + +LINK32=link.exe +LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref +LINK32_OBJS= \ + "$(INTDIR)\h2_alt_svc.obj" \ + "$(INTDIR)\h2_bucket_beam.obj" \ + "$(INTDIR)\h2_bucket_eos.obj" \ + "$(INTDIR)\h2_config.obj" \ + "$(INTDIR)\h2_conn.obj" \ + "$(INTDIR)\h2_conn_io.obj" \ + "$(INTDIR)\h2_ctx.obj" \ + "$(INTDIR)\h2_filter.obj" \ + "$(INTDIR)\h2_from_h1.obj" \ + "$(INTDIR)\h2_h2.obj" \ + "$(INTDIR)\h2_headers.obj" \ + "$(INTDIR)\h2_mplx.obj" \ + "$(INTDIR)\h2_ngn_shed.obj" \ + "$(INTDIR)\h2_push.obj" \ + "$(INTDIR)\h2_request.obj" \ + "$(INTDIR)\h2_session.obj" \ + "$(INTDIR)\h2_stream.obj" \ + "$(INTDIR)\h2_switch.obj" \ + "$(INTDIR)\h2_task.obj" \ + "$(INTDIR)\h2_util.obj" \ + "$(INTDIR)\h2_workers.obj" \ + "$(INTDIR)\mod_http2.obj" \ + "$(INTDIR)\mod_http2.res" \ + "..\..\srclib\apr\Release\libapr-1.lib" \ + "..\..\srclib\apr-util\Release\libaprutil-1.lib" \ + "..\..\Release\libhttpd.lib" + +"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +TargetPath=.\Release\mod_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep + +# Begin Custom Macros +OutDir=.\Release +# End Custom Macros + +"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so" + if exist .\Release\mod_http2.so.manifest mt.exe -manifest .\Release\mod_http2.so.manifest -outputresource:.\Release\mod_http2.so;2 + echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)" + +!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug" + +OUTDIR=.\Debug +INTDIR=.\Debug +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep +# Begin Custom Macros +OutDir=.\Debug +# End Custom Macros + +!IF "$(RECURSE)" == "0" + +ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)" + +!ELSE + +ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)" + +!ENDIF + +!IF "$(RECURSE)" == "1" +CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" +!ELSE +CLEAN : +!ENDIF + -@erase "$(INTDIR)\h2_alt_svc.obj" + -@erase "$(INTDIR)\h2_bucket_beam.obj" + -@erase "$(INTDIR)\h2_bucket_eos.obj" + -@erase "$(INTDIR)\h2_config.obj" + -@erase "$(INTDIR)\h2_conn.obj" + -@erase "$(INTDIR)\h2_conn_io.obj" + -@erase "$(INTDIR)\h2_ctx.obj" + -@erase "$(INTDIR)\h2_filter.obj" + -@erase "$(INTDIR)\h2_from_h1.obj" + -@erase "$(INTDIR)\h2_h2.obj" + -@erase "$(INTDIR)\h2_headers.obj" + -@erase "$(INTDIR)\h2_mplx.obj" + -@erase "$(INTDIR)\h2_ngn_shed.obj" + -@erase "$(INTDIR)\h2_push.obj" + -@erase "$(INTDIR)\h2_request.obj" + -@erase "$(INTDIR)\h2_session.obj" + -@erase "$(INTDIR)\h2_stream.obj" + -@erase "$(INTDIR)\h2_switch.obj" + -@erase "$(INTDIR)\h2_task.obj" + -@erase "$(INTDIR)\h2_util.obj" + -@erase "$(INTDIR)\h2_workers.obj" + -@erase "$(INTDIR)\mod_http2.obj" + -@erase "$(INTDIR)\mod_http2.res" + -@erase "$(INTDIR)\mod_http2_src.idb" + -@erase "$(INTDIR)\mod_http2_src.pdb" + -@erase "$(OUTDIR)\mod_http2.exp" + -@erase "$(OUTDIR)\mod_http2.lib" + -@erase "$(OUTDIR)\mod_http2.pdb" + -@erase "$(OUTDIR)\mod_http2.so" + +"$(OUTDIR)" : + if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" + +CPP=cl.exe +CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /EHsc /c + +.c{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.c{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +MTL=midl.exe +MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32 +RSC=rc.exe +RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc" +BSC32_SBRS= \ + +LINK32=link.exe +LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so +LINK32_OBJS= \ + "$(INTDIR)\h2_alt_svc.obj" \ + "$(INTDIR)\h2_bucket_beam.obj" \ + "$(INTDIR)\h2_bucket_eos.obj" \ + "$(INTDIR)\h2_config.obj" \ + "$(INTDIR)\h2_conn.obj" \ + "$(INTDIR)\h2_conn_io.obj" \ + "$(INTDIR)\h2_ctx.obj" \ + "$(INTDIR)\h2_filter.obj" \ + "$(INTDIR)\h2_from_h1.obj" \ + "$(INTDIR)\h2_h2.obj" \ + "$(INTDIR)\h2_headers.obj" \ + "$(INTDIR)\h2_mplx.obj" \ + "$(INTDIR)\h2_ngn_shed.obj" \ + "$(INTDIR)\h2_push.obj" \ + "$(INTDIR)\h2_request.obj" \ + "$(INTDIR)\h2_session.obj" \ + "$(INTDIR)\h2_stream.obj" \ + "$(INTDIR)\h2_switch.obj" \ + "$(INTDIR)\h2_task.obj" \ + "$(INTDIR)\h2_util.obj" \ + "$(INTDIR)\h2_workers.obj" \ + "$(INTDIR)\mod_http2.obj" \ + "$(INTDIR)\mod_http2.res" \ + "..\..\srclib\apr\Debug\libapr-1.lib" \ + "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \ + "..\..\Debug\libhttpd.lib" + +"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +TargetPath=.\Debug\mod_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep + +# Begin Custom Macros +OutDir=.\Debug +# End Custom Macros + +"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so" + if exist .\Debug\mod_http2.so.manifest mt.exe -manifest .\Debug\mod_http2.so.manifest -outputresource:.\Debug\mod_http2.so;2 + echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)" + +!ENDIF + + +!IF "$(NO_EXTERNAL_DEPS)" != "1" +!IF EXISTS("mod_http2.dep") +!INCLUDE "mod_http2.dep" +!ELSE +!MESSAGE Warning: cannot find "mod_http2.dep" +!ENDIF +!ENDIF + + +!IF "$(CFG)" == "mod_http2 - Win32 Release" || "$(CFG)" == "mod_http2 - Win32 Debug" + +!IF "$(CFG)" == "mod_http2 - Win32 Release" + +"libapr - Win32 Release" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" + cd "..\..\modules\http2" + +"libapr - Win32 ReleaseCLEAN" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug" + +"libapr - Win32 Debug" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" + cd "..\..\modules\http2" + +"libapr - Win32 DebugCLEAN" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ENDIF + +!IF "$(CFG)" == "mod_http2 - Win32 Release" + +"libaprutil - Win32 Release" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" + cd "..\..\modules\http2" + +"libaprutil - Win32 ReleaseCLEAN" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug" + +"libaprutil - Win32 Debug" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" + cd "..\..\modules\http2" + +"libaprutil - Win32 DebugCLEAN" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ENDIF + +!IF "$(CFG)" == "mod_http2 - Win32 Release" + +"libhttpd - Win32 Release" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" + cd ".\modules\http2" + +"libhttpd - Win32 ReleaseCLEAN" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN + cd ".\modules\http2" + +!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug" + +"libhttpd - Win32 Debug" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" + cd ".\modules\http2" + +"libhttpd - Win32 DebugCLEAN" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN + cd ".\modules\http2" + +!ENDIF + +SOURCE=./h2_alt_svc.c + +"$(INTDIR)\h2_alt_svc.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_bucket_beam.c + +"$(INTDIR)/h2_bucket_beam.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_bucket_eos.c + +"$(INTDIR)\h2_bucket_eos.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_config.c + +"$(INTDIR)\h2_config.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_conn.c + +"$(INTDIR)\h2_conn.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_conn_io.c + +"$(INTDIR)\h2_conn_io.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_ctx.c + +"$(INTDIR)\h2_ctx.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_filter.c + +"$(INTDIR)\h2_filter.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_from_h1.c + +"$(INTDIR)\h2_from_h1.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_h2.c + +"$(INTDIR)\h2_h2.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_headers.c + +"$(INTDIR)\h2_headers.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_mplx.c + +"$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_ngn_shed.c + +"$(INTDIR)\h2_ngn_shed.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_push.c + +"$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_request.c + +"$(INTDIR)\h2_request.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_session.c + +"$(INTDIR)\h2_session.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_stream.c + +"$(INTDIR)\h2_stream.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_switch.c + +"$(INTDIR)\h2_switch.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_task.c + +"$(INTDIR)\h2_task.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_util.c + +"$(INTDIR)\h2_util.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_workers.c + +"$(INTDIR)\h2_workers.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=..\..\build\win32\httpd.rc + +!IF "$(CFG)" == "mod_http2 - Win32 Release" + + +"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)" + $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE) + + +!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug" + + +"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)" + $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE) + + +!ENDIF + +SOURCE=./mod_http2.c + +"$(INTDIR)\mod_http2.obj" : $(SOURCE) "$(INTDIR)" + + + +!ENDIF + diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c new file mode 100644 index 0000000..a7e0dcd --- /dev/null +++ b/modules/http2/mod_proxy_http2.c @@ -0,0 +1,674 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <nghttp2/nghttp2.h> + +#include <httpd.h> +#include <mod_proxy.h> +#include "mod_http2.h" + + +#include "mod_proxy_http2.h" +#include "h2_request.h" +#include "h2_proxy_util.h" +#include "h2_version.h" +#include "h2_proxy_session.h" + +#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) + +static void register_hook(apr_pool_t *p); + +AP_DECLARE_MODULE(proxy_http2) = { + STANDARD20_MODULE_STUFF, + NULL, /* create per-directory config structure */ + NULL, /* merge per-directory config structures */ + NULL, /* create per-server config structure */ + NULL, /* merge per-server config structures */ + NULL, /* command apr_table_t */ + register_hook, /* register hooks */ +#if defined(AP_MODULE_FLAG_NONE) + AP_MODULE_FLAG_ALWAYS_MERGE +#endif +}; + +/* Optional functions from mod_http2 */ +static int (*is_h2)(conn_rec *c); +static apr_status_t (*req_engine_push)(const char *name, request_rec *r, + http2_req_engine_init *einit); +static apr_status_t (*req_engine_pull)(h2_req_engine *engine, + apr_read_type_e block, + int capacity, + request_rec **pr); +static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn, + apr_status_t status); + +typedef struct h2_proxy_ctx { + conn_rec *owner; + apr_pool_t *pool; + request_rec *rbase; + server_rec *server; + const char *proxy_func; + char server_portstr[32]; + proxy_conn_rec *p_conn; + proxy_worker *worker; + proxy_server_conf *conf; + + h2_req_engine *engine; + const char *engine_id; + const char *engine_type; + apr_pool_t *engine_pool; + apr_size_t req_buffer_size; + h2_proxy_fifo *requests; + int capacity; + + unsigned standalone : 1; + unsigned is_ssl : 1; + unsigned flushall : 1; + + apr_status_t r_status; /* status of our first request work */ + h2_proxy_session *session; /* current http2 session against backend */ +} h2_proxy_ctx; + +static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog, + apr_pool_t *ptemp, server_rec *s) +{ + void *data = NULL; + const char *init_key = "mod_proxy_http2_init_counter"; + nghttp2_info *ngh2; + apr_status_t status = APR_SUCCESS; + (void)plog;(void)ptemp; + + apr_pool_userdata_get(&data, init_key, s->process->pool); + if ( data == NULL ) { + apr_pool_userdata_set((const void *)1, init_key, + apr_pool_cleanup_null, s->process->pool); + return APR_SUCCESS; + } + + ngh2 = nghttp2_version(0); + ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03349) + "mod_proxy_http2 (v%s, nghttp2 %s), initializing...", + MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown"); + + is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2); + req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push); + req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull); + req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done); + + /* we need all of them */ + if (!req_engine_push || !req_engine_pull || !req_engine_done) { + req_engine_push = NULL; + req_engine_pull = NULL; + req_engine_done = NULL; + } + + return status; +} + +/** + * canonicalize the url into the request, if it is meant for us. + * slightly modified copy from mod_http + */ +static int proxy_http2_canon(request_rec *r, char *url) +{ + char *host, *path, sport[7]; + char *search = NULL; + const char *err; + const char *scheme; + const char *http_scheme; + apr_port_t port, def_port; + + /* ap_port_of_scheme() */ + if (ap_cstr_casecmpn(url, "h2c:", 4) == 0) { + url += 4; + scheme = "h2c"; + http_scheme = "http"; + } + else if (ap_cstr_casecmpn(url, "h2:", 3) == 0) { + url += 3; + scheme = "h2"; + http_scheme = "https"; + } + else { + return DECLINED; + } + port = def_port = ap_proxy_port_of_scheme(http_scheme); + + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "HTTP2: canonicalising URL %s", url); + + /* do syntatic check. + * We break the URL into host, port, path, search + */ + err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); + if (err) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03350) + "error parsing URL %s: %s", url, err); + return HTTP_BAD_REQUEST; + } + + /* + * now parse path/search args, according to rfc1738: + * process the path. + * + * In a reverse proxy, our URL has been processed, so canonicalise + * unless proxy-nocanon is set to say it's raw + * In a forward proxy, we have and MUST NOT MANGLE the original. + */ + switch (r->proxyreq) { + default: /* wtf are we doing here? */ + case PROXYREQ_REVERSE: + if (apr_table_get(r->notes, "proxy-nocanon")) { + path = url; /* this is the raw path */ + } + else { + path = ap_proxy_canonenc(r->pool, url, (int)strlen(url), + enc_path, 0, r->proxyreq); + search = r->args; + } + break; + case PROXYREQ_PROXY: + path = url; + break; + } + + if (path == NULL) { + return HTTP_BAD_REQUEST; + } + + if (port != def_port) { + apr_snprintf(sport, sizeof(sport), ":%d", port); + } + else { + sport[0] = '\0'; + } + + if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */ + host = apr_pstrcat(r->pool, "[", host, "]", NULL); + } + r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport, + "/", path, (search) ? "?" : "", (search) ? search : "", NULL); + return OK; +} + +static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes) +{ + h2_proxy_ctx *ctx = baton; + + if (ctx->session) { + h2_proxy_session_update_window(ctx->session, c, bytes); + } +} + +static apr_status_t proxy_engine_init(h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_size_t req_buffer_size, + request_rec *r, + http2_output_consumed **pconsumed, + void **pctx) +{ + h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config, + &proxy_http2_module); + if (!ctx) { + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368) + "h2_proxy_session, engine init, no ctx found"); + return APR_ENOTIMPL; + } + + ctx->pool = pool; + ctx->engine = engine; + ctx->engine_id = id; + ctx->engine_type = type; + ctx->engine_pool = pool; + ctx->req_buffer_size = req_buffer_size; + ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests)); + + *pconsumed = out_consumed; + *pctx = ctx; + return APR_SUCCESS; +} + +static apr_status_t add_request(h2_proxy_session *session, request_rec *r) +{ + h2_proxy_ctx *ctx = session->user_data; + const char *url; + apr_status_t status; + + url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE); + apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu", + ctx->p_conn->connection->local_addr->port)); + status = h2_proxy_session_submit(session, url, r, ctx->standalone); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351) + "pass request body failed to %pI (%s) from %s (%s)", + ctx->p_conn->addr, ctx->p_conn->hostname ? + ctx->p_conn->hostname: "", session->c->client_ip, + session->c->remote_host ? session->c->remote_host: ""); + } + return status; +} + +static void request_done(h2_proxy_ctx *ctx, request_rec *r, + apr_status_t status, int touched) +{ + const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection, + "h2_proxy_session(%s): request done %s, touched=%d", + ctx->engine_id, task_id, touched); + if (status != APR_SUCCESS) { + if (!touched) { + /* untouched request, need rescheduling */ + status = h2_proxy_fifo_push(ctx->requests, r); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, + APLOGNO(03369) + "h2_proxy_session(%s): rescheduled request %s", + ctx->engine_id, task_id); + return; + } + else { + const char *uri; + uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, + APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s " + "not complete, cannot repeat", + ctx->engine_id, task_id, uri); + } + } + + if (r == ctx->rbase) { + ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS + : HTTP_SERVICE_UNAVAILABLE); + } + + if (req_engine_done && ctx->engine) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, + APLOGNO(03370) + "h2_proxy_session(%s): finished request %s", + ctx->engine_id, task_id); + req_engine_done(ctx->engine, r->connection, status); + } +} + +static void session_req_done(h2_proxy_session *session, request_rec *r, + apr_status_t status, int touched) +{ + request_done(session->user_data, r, status, touched); +} + +static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave) +{ + if (h2_proxy_fifo_count(ctx->requests) > 0) { + return APR_SUCCESS; + } + else if (req_engine_pull && ctx->engine) { + apr_status_t status; + request_rec *r = NULL; + + status = req_engine_pull(ctx->engine, before_leave? + APR_BLOCK_READ: APR_NONBLOCK_READ, + ctx->capacity, &r); + if (status == APR_SUCCESS && r) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner, + "h2_proxy_engine(%s): pulled request (%s) %s", + ctx->engine_id, + before_leave? "before leave" : "regular", + r->the_request); + h2_proxy_fifo_push(ctx->requests, r); + } + return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status; + } + return APR_EOF; +} + +static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) { + apr_status_t status = OK; + int h2_front; + request_rec *r; + + /* Step Four: Send the Request in a new HTTP/2 stream and + * loop until we got the response or encounter errors. + */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner, + "eng(%s): setup session", ctx->engine_id); + h2_front = is_h2? is_h2(ctx->owner) : 0; + ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf, + h2_front, 30, + h2_proxy_log2((int)ctx->req_buffer_size), + session_req_done); + if (!ctx->session) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, + APLOGNO(03372) "session unavailable"); + return HTTP_SERVICE_UNAVAILABLE; + } + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373) + "eng(%s): run session %s", ctx->engine_id, ctx->session->id); + ctx->session->user_data = ctx; + + while (!ctx->owner->aborted) { + if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) { + add_request(ctx->session, r); + } + + status = h2_proxy_session_process(ctx->session); + + if (status == APR_SUCCESS) { + apr_status_t s2; + /* ongoing processing, call again */ + if (ctx->session->remote_max_concurrent > 0 + && ctx->session->remote_max_concurrent != ctx->capacity) { + ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent, + h2_proxy_fifo_capacity(ctx->requests)); + } + s2 = next_request(ctx, 0); + if (s2 == APR_ECONNABORTED) { + /* master connection gone */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner, + APLOGNO(03374) "eng(%s): pull request", + ctx->engine_id); + /* give notice that we're leaving and cancel all ongoing + * streams. */ + next_request(ctx, 1); + h2_proxy_session_cancel_all(ctx->session); + h2_proxy_session_process(ctx->session); + status = ctx->r_status = APR_SUCCESS; + break; + } + if ((h2_proxy_fifo_count(ctx->requests) == 0) + && h2_proxy_ihash_empty(ctx->session->streams)) { + break; + } + } + else { + /* end of processing, maybe error */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, + APLOGNO(03375) "eng(%s): end of session %s", + ctx->engine_id, ctx->session->id); + /* + * Any open stream of that session needs to + * a) be reopened on the new session iff safe to do so + * b) reported as done (failed) otherwise + */ + h2_proxy_session_cleanup(ctx->session, session_req_done); + break; + } + } + + ctx->session->user_data = NULL; + ctx->session = NULL; + + return status; +} + +static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r) +{ + conn_rec *c = ctx->owner; + const char *engine_type, *hostname; + + hostname = (ctx->p_conn->ssl_hostname? + ctx->p_conn->ssl_hostname : ctx->p_conn->hostname); + engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname, + ctx->server_portstr); + + if (c->master && req_engine_push && r && is_h2 && is_h2(c)) { + /* If we are have req_engine capabilities, push the handling of this + * request (e.g. slave connection) to a proxy_http2 engine which + * uses the same backend. We may be called to create an engine + * ourself. */ + if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) { + if (ctx->engine == NULL) { + /* request has been assigned to an engine in another thread */ + return SUSPENDED; + } + } + } + + if (!ctx->engine) { + /* No engine was available or has been initialized, handle this + * request just by ourself. */ + ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id); + ctx->engine_type = engine_type; + ctx->engine_pool = ctx->pool; + ctx->req_buffer_size = (32*1024); + ctx->standalone = 1; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_proxy_http2(%ld): setup standalone engine for type %s", + c->id, engine_type); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "H2: hosting engine %s", ctx->engine_id); + } + + return h2_proxy_fifo_push(ctx->requests, r); +} + +static int proxy_http2_handler(request_rec *r, + proxy_worker *worker, + proxy_server_conf *conf, + char *url, + const char *proxyname, + apr_port_t proxyport) +{ + const char *proxy_func; + char *locurl = url, *u; + apr_size_t slen; + int is_ssl = 0; + apr_status_t status; + h2_proxy_ctx *ctx; + apr_uri_t uri; + int reconnects = 0; + + /* find the scheme */ + if ((url[0] != 'h' && url[0] != 'H') || url[1] != '2') { + return DECLINED; + } + u = strchr(url, ':'); + if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') { + return DECLINED; + } + slen = (u - url); + switch(slen) { + case 2: + proxy_func = "H2"; + is_ssl = 1; + break; + case 3: + if (url[2] != 'c' && url[2] != 'C') { + return DECLINED; + } + proxy_func = "H2C"; + break; + default: + return DECLINED; + } + + ctx = apr_pcalloc(r->pool, sizeof(*ctx)); + ctx->owner = r->connection; + ctx->pool = r->pool; + ctx->rbase = r; + ctx->server = r->server; + ctx->proxy_func = proxy_func; + ctx->is_ssl = is_ssl; + ctx->worker = worker; + ctx->conf = conf; + ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0; + ctx->r_status = HTTP_SERVICE_UNAVAILABLE; + + h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100); + + ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx); + + /* scheme says, this is for us. */ + apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url); + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase, + "H2: serving URL %s", url); + +run_connect: + /* Get a proxy_conn_rec from the worker, might be a new one, might + * be one still open from another request, or it might fail if the + * worker is stopped or in error. */ + if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn, + ctx->worker, ctx->server)) != OK) { + goto cleanup; + } + + ctx->p_conn->is_ssl = ctx->is_ssl; + if (ctx->is_ssl && ctx->p_conn->connection) { + /* If there are some metadata on the connection (e.g. TLS alert), + * let mod_ssl detect them, and create a new connection below. + */ + apr_bucket_brigade *tmp_bb; + tmp_bb = apr_brigade_create(ctx->rbase->pool, + ctx->rbase->connection->bucket_alloc); + status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb, + AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1); + if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) { + ctx->p_conn->close = 1; + } + apr_brigade_cleanup(tmp_bb); + } + + /* Step One: Determine the URL to connect to (might be a proxy), + * initialize the backend accordingly and determine the server + * port string we can expect in responses. */ + if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker, + ctx->p_conn, &uri, &locurl, + proxyname, proxyport, + ctx->server_portstr, + sizeof(ctx->server_portstr))) != OK) { + goto cleanup; + } + + /* If we are not already hosting an engine, try to push the request + * to an already existing engine or host a new engine here. */ + if (r && !ctx->engine) { + ctx->r_status = push_request_somewhere(ctx, r); + r = NULL; + if (ctx->r_status == SUSPENDED) { + /* request was pushed to another thread, leave processing here */ + goto cleanup; + } + } + + /* Step Two: Make the Connection (or check that an already existing + * socket is still usable). On success, we have a socket connected to + * backend->hostname. */ + if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker, + ctx->server)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352) + "H2: failed to make connection to backend: %s", + ctx->p_conn->hostname); + goto reconnect; + } + + /* Step Three: Create conn_rec for the socket we have open now. */ + if (!ctx->p_conn->connection) { + status = ap_proxy_connection_create_ex(ctx->proxy_func, + ctx->p_conn, ctx->rbase); + if (status != OK) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353) + "setup new connection: is_ssl=%d %s %s %s", + ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname, + locurl, ctx->p_conn->hostname); + goto reconnect; + } + + if (!ctx->p_conn->data) { + /* New conection: set a note on the connection what CN is + * requested and what protocol we want */ + if (ctx->p_conn->ssl_hostname) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner, + "set SNI to %s for (%s)", + ctx->p_conn->ssl_hostname, + ctx->p_conn->hostname); + apr_table_setn(ctx->p_conn->connection->notes, + "proxy-request-hostname", ctx->p_conn->ssl_hostname); + } + if (ctx->is_ssl) { + apr_table_setn(ctx->p_conn->connection->notes, + "proxy-request-alpn-protos", "h2"); + } + } + } + +run_session: + status = proxy_engine_run(ctx); + if (status == APR_SUCCESS) { + /* session and connection still ok */ + if (next_request(ctx, 1) == APR_SUCCESS) { + /* more requests, run again */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376) + "run_session, again"); + goto run_session; + } + /* done */ + ctx->engine = NULL; + } + +reconnect: + if (next_request(ctx, 1) == APR_SUCCESS) { + /* Still more to do, tear down old conn and start over */ + if (ctx->p_conn) { + ctx->p_conn->close = 1; + /*only in trunk so far */ + /*proxy_run_detach_backend(r, ctx->p_conn);*/ + ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); + ctx->p_conn = NULL; + } + ++reconnects; + if (reconnects < 5 && !ctx->owner->aborted) { + goto run_connect; + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023) + "giving up after %d reconnects, %d requests todo", + reconnects, h2_proxy_fifo_count(ctx->requests)); + } + +cleanup: + if (ctx->p_conn) { + if (status != APR_SUCCESS) { + /* close socket when errors happened or session shut down (EOF) */ + ctx->p_conn->close = 1; + } + /*only in trunk so far */ + /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/ + ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); + ctx->p_conn = NULL; + } + + /* Any requests will still have need to fail */ + while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) { + request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1); + } + + ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, + APLOGNO(03377) "leaving handler"); + return ctx->r_status; +} + +static void register_hook(apr_pool_t *p) +{ + ap_hook_post_config(h2_proxy_post_config, NULL, NULL, APR_HOOK_MIDDLE); + + proxy_hook_scheme_handler(proxy_http2_handler, NULL, NULL, APR_HOOK_FIRST); + proxy_hook_canon_handler(proxy_http2_canon, NULL, NULL, APR_HOOK_FIRST); +} + diff --git a/modules/http2/mod_proxy_http2.dep b/modules/http2/mod_proxy_http2.dep new file mode 100644 index 0000000..641fca6 --- /dev/null +++ b/modules/http2/mod_proxy_http2.dep @@ -0,0 +1,208 @@ +# Microsoft Developer Studio Generated Dependency File, included by mod_proxy_http2.mak + +./h2_proxy_session.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_mpm.h"\ + "..\..\include\ap_provider.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\ap_slotmem.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_main.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\http_vhost.h"\ + "..\..\include\httpd.h"\ + "..\..\include\mod_proxy.h"\ + "..\..\include\mpm_common.h"\ + "..\..\include\os.h"\ + "..\..\include\scoreboard.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_charset.h"\ + "..\..\include\util_ebcdic.h"\ + "..\..\include\util_filter.h"\ + "..\..\include\util_mutex.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_date.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_md5.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_reslist.h"\ + "..\..\srclib\apr-util\include\apr_strmatch.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apr_uuid.h"\ + "..\..\srclib\apr-util\include\apr_xlate.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_fnmatch.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_lib.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\ + "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\ + ".\h2.h"\ + ".\h2_proxy_session.h"\ + ".\h2_proxy_util.h"\ + ".\mod_http2.h"\ + + +./h2_proxy_util.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_request.h"\ + "..\..\include\httpd.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_filter.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\ + "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\ + ".\h2.h"\ + ".\h2_proxy_util.h"\ + + +..\..\build\win32\httpd.rc : \ + "..\..\include\ap_release.h"\ + + +./mod_proxy_http2.c : \ + "..\..\include\ap_config.h"\ + "..\..\include\ap_config_layout.h"\ + "..\..\include\ap_expr.h"\ + "..\..\include\ap_hooks.h"\ + "..\..\include\ap_mmn.h"\ + "..\..\include\ap_provider.h"\ + "..\..\include\ap_regex.h"\ + "..\..\include\ap_release.h"\ + "..\..\include\ap_slotmem.h"\ + "..\..\include\apache_noprobes.h"\ + "..\..\include\http_config.h"\ + "..\..\include\http_connection.h"\ + "..\..\include\http_core.h"\ + "..\..\include\http_log.h"\ + "..\..\include\http_main.h"\ + "..\..\include\http_protocol.h"\ + "..\..\include\http_request.h"\ + "..\..\include\http_vhost.h"\ + "..\..\include\httpd.h"\ + "..\..\include\mod_proxy.h"\ + "..\..\include\os.h"\ + "..\..\include\util_cfgtree.h"\ + "..\..\include\util_charset.h"\ + "..\..\include\util_ebcdic.h"\ + "..\..\include\util_filter.h"\ + "..\..\include\util_mutex.h"\ + "..\..\srclib\apr-util\include\apr_buckets.h"\ + "..\..\srclib\apr-util\include\apr_date.h"\ + "..\..\srclib\apr-util\include\apr_hooks.h"\ + "..\..\srclib\apr-util\include\apr_md5.h"\ + "..\..\srclib\apr-util\include\apr_optional.h"\ + "..\..\srclib\apr-util\include\apr_optional_hooks.h"\ + "..\..\srclib\apr-util\include\apr_reslist.h"\ + "..\..\srclib\apr-util\include\apr_strmatch.h"\ + "..\..\srclib\apr-util\include\apr_uri.h"\ + "..\..\srclib\apr-util\include\apr_uuid.h"\ + "..\..\srclib\apr-util\include\apr_xlate.h"\ + "..\..\srclib\apr-util\include\apu.h"\ + "..\..\srclib\apr\include\apr.h"\ + "..\..\srclib\apr\include\apr_allocator.h"\ + "..\..\srclib\apr\include\apr_dso.h"\ + "..\..\srclib\apr\include\apr_errno.h"\ + "..\..\srclib\apr\include\apr_file_info.h"\ + "..\..\srclib\apr\include\apr_file_io.h"\ + "..\..\srclib\apr\include\apr_fnmatch.h"\ + "..\..\srclib\apr\include\apr_general.h"\ + "..\..\srclib\apr\include\apr_global_mutex.h"\ + "..\..\srclib\apr\include\apr_hash.h"\ + "..\..\srclib\apr\include\apr_inherit.h"\ + "..\..\srclib\apr\include\apr_lib.h"\ + "..\..\srclib\apr\include\apr_mmap.h"\ + "..\..\srclib\apr\include\apr_network_io.h"\ + "..\..\srclib\apr\include\apr_poll.h"\ + "..\..\srclib\apr\include\apr_pools.h"\ + "..\..\srclib\apr\include\apr_portable.h"\ + "..\..\srclib\apr\include\apr_proc_mutex.h"\ + "..\..\srclib\apr\include\apr_ring.h"\ + "..\..\srclib\apr\include\apr_shm.h"\ + "..\..\srclib\apr\include\apr_strings.h"\ + "..\..\srclib\apr\include\apr_tables.h"\ + "..\..\srclib\apr\include\apr_thread_mutex.h"\ + "..\..\srclib\apr\include\apr_thread_proc.h"\ + "..\..\srclib\apr\include\apr_time.h"\ + "..\..\srclib\apr\include\apr_user.h"\ + "..\..\srclib\apr\include\apr_want.h"\ + "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\ + "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\ + ".\h2.h"\ + ".\h2_proxy_session.h"\ + ".\h2_request.h"\ + ".\h2_proxy_util.h"\ + ".\h2_version.h"\ + ".\mod_http2.h"\ + ".\mod_proxy_http2.h"\ + diff --git a/modules/http2/mod_proxy_http2.dsp b/modules/http2/mod_proxy_http2.dsp new file mode 100644 index 0000000..5d6305f --- /dev/null +++ b/modules/http2/mod_proxy_http2.dsp @@ -0,0 +1,119 @@ +# Microsoft Developer Studio Project File - Name="mod_proxy_http2" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102 + +CFG=mod_proxy_http2 - Win32 Release +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "mod_proxy_http2.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +MTL=midl.exe +RSC=rc.exe + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "Release" +# PROP Intermediate_Dir "Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c +# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_proxy_http2_src" /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /fo"Release/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so +# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref +# Begin Special Build Tool +TargetPath=.\Release\mod_proxy_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2 +# End Special Build Tool + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "Debug" +# PROP Intermediate_Dir "Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_proxy_http2_src" /FD /c +# ADD BASE MTL /nologo /D "_DEBUG" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /fo"Debug/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so +# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so +# Begin Special Build Tool +TargetPath=.\Debug\mod_proxy_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2 +# End Special Build Tool + +!ENDIF + +# Begin Target + +# Name "mod_proxy_http2 - Win32 Release" +# Name "mod_proxy_http2 - Win32 Debug" +# Begin Source File + +SOURCE=./h2_proxy_session.c +# End Source File +# Begin Source File + +SOURCE=./h2_proxy_util.c +# End Source File +# Begin Source File + +SOURCE=./mod_proxy_http2.c +# End Source File +# Begin Source File + +SOURCE=..\..\build\win32\httpd.rc +# End Source File +# End Target +# End Project diff --git a/modules/http2/mod_proxy_http2.h b/modules/http2/mod_proxy_http2.h new file mode 100644 index 0000000..0048ed9 --- /dev/null +++ b/modules/http2/mod_proxy_http2.h @@ -0,0 +1,21 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MOD_PROXY_HTTP2_H__ +#define __MOD_PROXY_HTTP2_H__ + + +#endif diff --git a/modules/http2/mod_proxy_http2.mak b/modules/http2/mod_proxy_http2.mak new file mode 100644 index 0000000..e8e0624 --- /dev/null +++ b/modules/http2/mod_proxy_http2.mak @@ -0,0 +1,427 @@ +# Microsoft Developer Studio Generated NMAKE File, Based on mod_proxy_http2.dsp +!IF "$(CFG)" == "" +CFG=mod_proxy_http2 - Win32 Release +!MESSAGE No configuration specified. Defaulting to mod_proxy_http2 - Win32 Release. +!ENDIF + +!IF "$(CFG)" != "mod_proxy_http2 - Win32 Release" && "$(CFG)" != "mod_proxy_http2 - Win32 Debug" +!MESSAGE Invalid configuration "$(CFG)" specified. +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE +!ERROR An invalid configuration is specified. +!ENDIF + +!IF "$(OS)" == "Windows_NT" +NULL= +!ELSE +NULL=nul +!ENDIF + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +OUTDIR=.\Release +INTDIR=.\Release +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep +# Begin Custom Macros +OutDir=.\Release +# End Custom Macros + +!IF "$(RECURSE)" == "0" + +ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)" + +!ELSE + +ALL : "mod_proxy - Win32 Release" "mod_http2 - Win32 Release" "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)" + +!ENDIF + +!IF "$(RECURSE)" == "1" +CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" "mod_http2 - Win32 ReleaseCLEAN" "mod_proxy - Win32 ReleaseCLEAN" +!ELSE +CLEAN : +!ENDIF + -@erase "$(INTDIR)\h2_proxy_session.obj" + -@erase "$(INTDIR)\h2_proxy_util.obj" + -@erase "$(INTDIR)\mod_proxy_http2.obj" + -@erase "$(INTDIR)\mod_proxy_http2.res" + -@erase "$(INTDIR)\mod_proxy_http2_src.idb" + -@erase "$(INTDIR)\mod_proxy_http2_src.pdb" + -@erase "$(OUTDIR)\mod_proxy_http2.exp" + -@erase "$(OUTDIR)\mod_proxy_http2.lib" + -@erase "$(OUTDIR)\mod_proxy_http2.pdb" + -@erase "$(OUTDIR)\mod_proxy_http2.so" + +"$(OUTDIR)" : + if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" + +CPP=cl.exe +CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /c + +.c{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.c{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +MTL=midl.exe +MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32 +RSC=rc.exe +RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc" +BSC32_SBRS= \ + +LINK32=link.exe +LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref +LINK32_OBJS= \ + "$(INTDIR)\h2_proxy_session.obj" \ + "$(INTDIR)\h2_proxy_util.obj" \ + "$(INTDIR)\mod_proxy_http2.obj" \ + "$(INTDIR)\mod_proxy_http2.res" \ + "..\..\srclib\apr\Release\libapr-1.lib" \ + "..\..\srclib\apr-util\Release\libaprutil-1.lib" \ + "..\..\Release\libhttpd.lib" \ + "$(OUTDIR)\mod_http2.lib" \ + "..\proxy\Release\mod_proxy.lib" + +"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +TargetPath=.\Release\mod_proxy_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep + +# Begin Custom Macros +OutDir=.\Release +# End Custom Macros + +"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so" + if exist .\Release\mod_proxy_http2.so.manifest mt.exe -manifest .\Release\mod_proxy_http2.so.manifest -outputresource:.\Release\mod_proxy_http2.so;2 + echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)" + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +OUTDIR=.\Debug +INTDIR=.\Debug +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep +# Begin Custom Macros +OutDir=.\Debug +# End Custom Macros + +!IF "$(RECURSE)" == "0" + +ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)" + +!ELSE + +ALL : "mod_proxy - Win32 Debug" "mod_http2 - Win32 Debug" "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)" + +!ENDIF + +!IF "$(RECURSE)" == "1" +CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" "mod_http2 - Win32 DebugCLEAN" "mod_proxy - Win32 DebugCLEAN" +!ELSE +CLEAN : +!ENDIF + -@erase "$(INTDIR)\h2_proxy_session.obj" + -@erase "$(INTDIR)\h2_proxy_util.obj" + -@erase "$(INTDIR)\mod_proxy_http2.obj" + -@erase "$(INTDIR)\mod_proxy_http2.res" + -@erase "$(INTDIR)\mod_proxy_http2_src.idb" + -@erase "$(INTDIR)\mod_proxy_http2_src.pdb" + -@erase "$(OUTDIR)\mod_proxy_http2.exp" + -@erase "$(OUTDIR)\mod_proxy_http2.lib" + -@erase "$(OUTDIR)\mod_proxy_http2.pdb" + -@erase "$(OUTDIR)\mod_proxy_http2.so" + +"$(OUTDIR)" : + if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)" + +CPP=cl.exe +CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /EHsc /c + +.c{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.obj:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.c{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cpp{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +.cxx{$(INTDIR)}.sbr:: + $(CPP) @<< + $(CPP_PROJ) $< +<< + +MTL=midl.exe +MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32 +RSC=rc.exe +RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" +BSC32=bscmake.exe +BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc" +BSC32_SBRS= \ + +LINK32=link.exe +LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so +LINK32_OBJS= \ + "$(INTDIR)\h2_proxy_session.obj" \ + "$(INTDIR)\h2_proxy_util.obj" \ + "$(INTDIR)\mod_proxy_http2.obj" \ + "$(INTDIR)\mod_proxy_http2.res" \ + "..\..\srclib\apr\Debug\libapr-1.lib" \ + "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \ + "..\..\Debug\libhttpd.lib" \ + "$(OUTDIR)\mod_http2.lib" \ + "..\proxy\Debug\mod_proxy.lib" + +"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +TargetPath=.\Debug\mod_proxy_http2.so +SOURCE="$(InputPath)" +PostBuild_Desc=Embed .manifest +DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep + +# Begin Custom Macros +OutDir=.\Debug +# End Custom Macros + +"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so" + if exist .\Debug\mod_proxy_http2.so.manifest mt.exe -manifest .\Debug\mod_proxy_http2.so.manifest -outputresource:.\Debug\mod_proxy_http2.so;2 + echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)" + +!ENDIF + + +!IF "$(NO_EXTERNAL_DEPS)" != "1" +!IF EXISTS("mod_proxy_http2.dep") +!INCLUDE "mod_proxy_http2.dep" +!ELSE +!MESSAGE Warning: cannot find "mod_proxy_http2.dep" +!ENDIF +!ENDIF + + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" || "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +"libapr - Win32 Release" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" + cd "..\..\modules\http2" + +"libapr - Win32 ReleaseCLEAN" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +"libapr - Win32 Debug" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" + cd "..\..\modules\http2" + +"libapr - Win32 DebugCLEAN" : + cd ".\..\..\srclib\apr" + $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ENDIF + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +"libaprutil - Win32 Release" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" + cd "..\..\modules\http2" + +"libaprutil - Win32 ReleaseCLEAN" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +"libaprutil - Win32 Debug" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" + cd "..\..\modules\http2" + +"libaprutil - Win32 DebugCLEAN" : + cd ".\..\..\srclib\apr-util" + $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN + cd "..\..\modules\http2" + +!ENDIF + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +"libhttpd - Win32 Release" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" + cd ".\modules\http2" + +"libhttpd - Win32 ReleaseCLEAN" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN + cd ".\modules\http2" + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +"libhttpd - Win32 Debug" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" + cd ".\modules\http2" + +"libhttpd - Win32 DebugCLEAN" : + cd ".\..\.." + $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN + cd ".\modules\http2" + +!ENDIF + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +"mod_http2 - Win32 Release" : + cd "." + $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release" + cd "." + +"mod_http2 - Win32 ReleaseCLEAN" : + cd "." + $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release" RECURSE=1 CLEAN + cd "." + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +"mod_http2 - Win32 Debug" : + cd "." + $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug" + cd "." + +"mod_http2 - Win32 DebugCLEAN" : + cd "." + $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug" RECURSE=1 CLEAN + cd "." + +!ENDIF + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + +"mod_proxy - Win32 Release" : + cd ".\..\proxy" + $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release" + cd "..\http2" + +"mod_proxy - Win32 ReleaseCLEAN" : + cd ".\..\proxy" + $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release" RECURSE=1 CLEAN + cd "..\http2" + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + +"mod_proxy - Win32 Debug" : + cd ".\..\proxy" + $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug" + cd "..\http2" + +"mod_proxy - Win32 DebugCLEAN" : + cd ".\..\proxy" + $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug" RECURSE=1 CLEAN + cd "..\http2" + +!ENDIF + +SOURCE=./h2_proxy_session.c + +"$(INTDIR)\h2_proxy_session.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=./h2_proxy_util.c + +"$(INTDIR)\h2_proxy_util.obj" : $(SOURCE) "$(INTDIR)" + + +SOURCE=..\..\build\win32\httpd.rc + +!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" + + +"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)" + $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE) + + +!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug" + + +"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)" + $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE) + + +!ENDIF + +SOURCE=./mod_proxy_http2.c + +"$(INTDIR)\mod_proxy_http2.obj" : $(SOURCE) "$(INTDIR)" + + + +!ENDIF + |