summaryrefslogtreecommitdiffstats
path: root/src/seastar/doc
diff options
context:
space:
mode:
Diffstat (limited to 'src/seastar/doc')
-rw-r--r--src/seastar/doc/CMakeLists.txt58
-rw-r--r--src/seastar/doc/Doxyfile.in2367
-rw-r--r--src/seastar/doc/DoxygenLayout.xml189
-rw-r--r--src/seastar/doc/building-arch.md12
-rw-r--r--src/seastar/doc/building-centos.md15
-rw-r--r--src/seastar/doc/building-docker.md34
-rw-r--r--src/seastar/doc/building-dpdk.md11
-rw-r--r--src/seastar/doc/building-fedora.md22
-rw-r--r--src/seastar/doc/building-ubuntu.md14
-rw-r--r--src/seastar/doc/compatibility.md144
-rw-r--r--src/seastar/doc/contributing.md30
-rwxr-xr-xsrc/seastar/doc/htmlsplit.py171
-rw-r--r--src/seastar/doc/io-properties-file.md41
-rw-r--r--src/seastar/doc/io-tester.md81
-rw-r--r--src/seastar/doc/lambda-coroutine-fiasco.md100
-rwxr-xr-xsrc/seastar/doc/md2html31
-rwxr-xr-xsrc/seastar/doc/md2pdf23
-rw-r--r--src/seastar/doc/mini-tutorial.md202
-rw-r--r--src/seastar/doc/native-stack.md54
-rw-r--r--src/seastar/doc/network-configuration.md65
-rw-r--r--src/seastar/doc/network-connection-load-balancing.md49
-rw-r--r--src/seastar/doc/prometheus.md72
-rw-r--r--src/seastar/doc/rpc-compression.md26
-rw-r--r--src/seastar/doc/rpc-streaming.md124
-rw-r--r--src/seastar/doc/rpc.md170
-rw-r--r--src/seastar/doc/shared-token-bucket.md109
-rw-r--r--src/seastar/doc/template.css113
-rw-r--r--src/seastar/doc/template.tex82
-rw-r--r--src/seastar/doc/tutorial.md2318
-rw-r--r--src/seastar/doc/websocket.md45
30 files changed, 6772 insertions, 0 deletions
diff --git a/src/seastar/doc/CMakeLists.txt b/src/seastar/doc/CMakeLists.txt
new file mode 100644
index 000000000..30cb08e30
--- /dev/null
+++ b/src/seastar/doc/CMakeLists.txt
@@ -0,0 +1,58 @@
+find_program (Seastar_DOXYGEN_EXECUTABLE doxygen)
+
+configure_file (
+ ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
+ ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+ @ONLY)
+
+configure_file (
+ ${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml
+ ${CMAKE_CURRENT_BINARY_DIR}/DoxygenLayout.xml
+ COPYONLY)
+
+add_custom_target (doc_api
+ COMMAND ${Seastar_DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
+
+add_custom_command (
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md
+ COMMAND
+ ${CMAKE_CURRENT_SOURCE_DIR}/md2html
+ ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md
+ ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html)
+
+add_custom_target (doc_tutorial_html
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html)
+
+add_custom_command (
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/split
+ DEPENDS
+ # Necessary because file-level dependencies are not propagated for custom targets.
+ ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html
+ doc_tutorial_html
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/html/split
+ COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/htmlsplit.py
+ --input ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html
+ --output-dir ${CMAKE_CURRENT_BINARY_DIR}/html/split)
+
+add_custom_target (doc_tutorial_html_split
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/html/split)
+
+add_custom_command (
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/tutorial.pdf
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md
+ COMMAND
+ ${CMAKE_CURRENT_SOURCE_DIR}/md2pdf
+ ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md
+ ${CMAKE_CURRENT_BINARY_DIR}/tutorial.pdf)
+
+add_custom_target (doc_tutorial_pdf
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/tutorial.pdf)
+
+# Logical target for all documentation.
+add_custom_target (docs
+ DEPENDS
+ doc_api
+ doc_tutorial_html
+ doc_tutorial_html_split
+ doc_tutorial_pdf)
diff --git a/src/seastar/doc/Doxyfile.in b/src/seastar/doc/Doxyfile.in
new file mode 100644
index 000000000..3693558e2
--- /dev/null
+++ b/src/seastar/doc/Doxyfile.in
@@ -0,0 +1,2367 @@
+# Doxyfile 1.8.9.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "Seastar"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF = "High performance C++ framework for concurrent servers"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = YES
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = YES
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH = @Seastar_SOURCE_DIR@/include @Seastar_BINARY_DIR@/gen/include @Seastar_SOURCE_DIR@/doc
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH = @Seastar_SOURCE_DIR@/include @Seastar_BINARY_DIR@/gen/include
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = YES
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = NO
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO, these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = YES
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = @Seastar_SOURCE_DIR@/include
+INPUT += @Seastar_BINARY_DIR@/gen/include
+INPUT += @Seastar_SOURCE_DIR@/doc/rpc.md
+INPUT += @Seastar_SOURCE_DIR@/doc/rpc-streaming.md
+INPUT += @Seastar_SOURCE_DIR@/doc/rpc-compression.md
+INPUT += @Seastar_SOURCE_DIR@/doc/compatibility.md
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE = build dpdk tests apps scripts
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = test.py
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS = seastar::internal seastar::coroutine::internal
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH = @Seastar_SOURCE_DIR@/demos @Seastar_SOURCE_DIR@/tests/unit
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: NO.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH = @Seastar_SOURCE_DIR@/include @Seastar_BINARY_DIR@/gen/include
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED = SEASTAR_API_LEVEL=6
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/src/seastar/doc/DoxygenLayout.xml b/src/seastar/doc/DoxygenLayout.xml
new file mode 100644
index 000000000..3056ba501
--- /dev/null
+++ b/src/seastar/doc/DoxygenLayout.xml
@@ -0,0 +1,189 @@
+<doxygenlayout version="1.0">
+ <!-- Generated by doxygen 1.8.13 -->
+ <!-- Navigation index tabs for HTML output -->
+ <navindex>
+ <tab type="mainpage" visible="yes" title=""/>
+ <tab type="pages" visible="yes" title="" intro=""/>
+ <tab type="modules" visible="yes" title="" intro=""/>
+ <tab type="namespaces" visible="yes" title="">
+ <tab type="namespacelist" visible="yes" title="" intro=""/>
+ <tab type="namespacemembers" visible="yes" title="" intro=""/>
+ </tab>
+ <tab type="classes" visible="yes" title="">
+ <tab type="classlist" visible="yes" title="" intro=""/>
+ <tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
+ <tab type="hierarchy" visible="yes" title="" intro=""/>
+ <tab type="classmembers" visible="yes" title="" intro=""/>
+ </tab>
+ <tab type="files" visible="yes" title="">
+ <tab type="filelist" visible="yes" title="" intro=""/>
+ <tab type="globals" visible="yes" title="" intro=""/>
+ </tab>
+ <tab type="examples" visible="yes" title="" intro=""/>
+ </navindex>
+
+ <!-- Layout definition for a class page -->
+ <class>
+ <detaileddescription title=""/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <inheritancegraph visible="$CLASS_GRAPH"/>
+ <collaborationgraph visible="$COLLABORATION_GRAPH"/>
+ <memberdecl>
+ <nestedclasses visible="yes" title=""/>
+ <publictypes title=""/>
+ <services title=""/>
+ <interfaces title=""/>
+ <publicslots title=""/>
+ <signals title=""/>
+ <publicmethods title=""/>
+ <publicstaticmethods title=""/>
+ <publicattributes title=""/>
+ <publicstaticattributes title=""/>
+ <protectedtypes title=""/>
+ <protectedslots title=""/>
+ <protectedmethods title=""/>
+ <protectedstaticmethods title=""/>
+ <protectedattributes title=""/>
+ <protectedstaticattributes title=""/>
+ <packagetypes title=""/>
+ <packagemethods title=""/>
+ <packagestaticmethods title=""/>
+ <packageattributes title=""/>
+ <packagestaticattributes title=""/>
+ <properties title=""/>
+ <events title=""/>
+ <privatetypes title=""/>
+ <privateslots title=""/>
+ <privatemethods title=""/>
+ <privatestaticmethods title=""/>
+ <privateattributes title=""/>
+ <privatestaticattributes title=""/>
+ <friends title=""/>
+ <related title="" subtitle=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <memberdef>
+ <inlineclasses title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <services title=""/>
+ <interfaces title=""/>
+ <constructors title=""/>
+ <functions title=""/>
+ <related title=""/>
+ <variables title=""/>
+ <properties title=""/>
+ <events title=""/>
+ </memberdef>
+ <allmemberslink visible="yes"/>
+ <usedfiles visible="$SHOW_USED_FILES"/>
+ <authorsection visible="yes"/>
+ </class>
+
+ <!-- Layout definition for a namespace page -->
+ <namespace>
+ <detaileddescription title=""/>
+ <memberdecl>
+ <nestednamespaces visible="yes" title=""/>
+ <constantgroups visible="yes" title=""/>
+ <classes visible="yes" title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <memberdef>
+ <inlineclasses title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </namespace>
+
+ <!-- Layout definition for a file page -->
+ <file>
+ <detaileddescription title=""/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <includegraph visible="$INCLUDE_GRAPH"/>
+ <includedbygraph visible="$INCLUDED_BY_GRAPH"/>
+ <sourcelink visible="yes"/>
+ <memberdecl>
+ <classes visible="yes" title=""/>
+ <namespaces visible="yes" title=""/>
+ <constantgroups visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <memberdef>
+ <inlineclasses title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection/>
+ </file>
+
+ <!-- Layout definition for a group page -->
+ <group>
+ <detaileddescription title=""/>
+ <groupgraph visible="$GROUP_GRAPHS"/>
+ <memberdecl>
+ <nestedgroups visible="yes" title=""/>
+ <dirs visible="yes" title=""/>
+ <files visible="yes" title=""/>
+ <namespaces visible="yes" title=""/>
+ <classes visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <signals title=""/>
+ <publicslots title=""/>
+ <protectedslots title=""/>
+ <privateslots title=""/>
+ <events title=""/>
+ <properties title=""/>
+ <friends title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <memberdef>
+ <pagedocs/>
+ <inlineclasses title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <signals title=""/>
+ <publicslots title=""/>
+ <protectedslots title=""/>
+ <privateslots title=""/>
+ <events title=""/>
+ <properties title=""/>
+ <friends title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </group>
+
+ <!-- Layout definition for a directory page -->
+ <directory>
+ <detaileddescription title=""/>
+ <directorygraph visible="yes"/>
+ <memberdecl>
+ <dirs visible="yes"/>
+ <files visible="yes"/>
+ </memberdecl>
+ </directory>
+</doxygenlayout>
diff --git a/src/seastar/doc/building-arch.md b/src/seastar/doc/building-arch.md
new file mode 100644
index 000000000..a565de89d
--- /dev/null
+++ b/src/seastar/doc/building-arch.md
@@ -0,0 +1,12 @@
+## Building Seastar on Arch
+
+Installing required packages:
+```
+sudo ./install-dependencies.sh
+```
+
+To compile Seastar use:
+```
+./configure.py --mode=release
+ninja -C build
+```
diff --git a/src/seastar/doc/building-centos.md b/src/seastar/doc/building-centos.md
new file mode 100644
index 000000000..b07761271
--- /dev/null
+++ b/src/seastar/doc/building-centos.md
@@ -0,0 +1,15 @@
+## Building Seastar on CentOS
+
+### Building seastar on CentOS 7
+
+Installing required packages:
+```
+sudo ./install-dependencies.sh
+./cooking.sh -r dev -i c-ares -i fmt -t Release
+```
+
+To compile Seastar explicitly using gcc 5, use:
+```
+CXX=/opt/scylladb/bin/g++ ./cooking.sh -i c-ares -i fmt -t Release
+ninja-build -C build
+```
diff --git a/src/seastar/doc/building-docker.md b/src/seastar/doc/building-docker.md
new file mode 100644
index 000000000..ef95c7903
--- /dev/null
+++ b/src/seastar/doc/building-docker.md
@@ -0,0 +1,34 @@
+## Building seastar in Docker container
+
+To build a Docker image:
+
+```
+docker build -t seastar-dev -f docker/dev/Dockerfile .
+```
+
+Building is done with two commands:
+
+```
+$ ./configure.py
+$ ninja -C build/release
+```
+
+You can run them inside container, e.g. like this
+
+```
+$ seabuild() { docker run -v $HOME/seastar/:/seastar -u $(id -u):$(id -g) -w /seastar -t seastar-dev "$@"; }
+$ seabuild ./configure.py
+$ seabuild ninja -C build/release
+```
+
+Alternatively there's a `scripts/build.sh` script with the usage of
+
+```
+build.sh <mode> [<compiler>] [<compiler version>] [<c++ dialect>]
+```
+
+that will do the above steps itself, e.g. the above example would be like
+
+```
+$ scripts/build.sh release
+```
diff --git a/src/seastar/doc/building-dpdk.md b/src/seastar/doc/building-dpdk.md
new file mode 100644
index 000000000..45491feef
--- /dev/null
+++ b/src/seastar/doc/building-dpdk.md
@@ -0,0 +1,11 @@
+## Building with a DPDK network backend
+
+ 1. Setup host to compile DPDK:
+ - Ubuntu
+ `sudo apt-get install -y build-essential linux-image-extra-$(uname -r$)`
+ 2. Configure the project with DPDK enabled: `./configure.py --mode=release --enable-dpdk`
+ 3. Run `ninja-build build/release`.
+
+To run with the DPDK backend for a native stack give the seastar application `--dpdk-pmd 1` parameter.
+
+You can also configure DPDK as an [external package](README-DPDK.md).
diff --git a/src/seastar/doc/building-fedora.md b/src/seastar/doc/building-fedora.md
new file mode 100644
index 000000000..c4a0965bb
--- /dev/null
+++ b/src/seastar/doc/building-fedora.md
@@ -0,0 +1,22 @@
+## Building Seastar on Fedora
+
+### Building seastar on Fedora 21 and later
+
+Installing required packages:
+```
+sudo ./install-dependencies.sh
+```
+
+You then need to run the following to create the build directory:
+```
+./configure.py --mode=release
+```
+Note it is enough to run this once, and you don't need to repeat it before
+every build.
+
+Then finally:
+```
+ninja-build -C build/release
+```
+
+In case there are compilation issues, especially like ```g++: internal compiler error: Killed (program cc1plus)``` try giving more memory to gcc, either by limiting the amount of threads ( -j1 ) and/or allowing at least 4g ram to your machine
diff --git a/src/seastar/doc/building-ubuntu.md b/src/seastar/doc/building-ubuntu.md
new file mode 100644
index 000000000..4499492e2
--- /dev/null
+++ b/src/seastar/doc/building-ubuntu.md
@@ -0,0 +1,14 @@
+## Building Seastar on Ubuntu
+
+### Building seastar on Ubuntu 14.04/15.10/16.04
+
+Installing required packages:
+```
+sudo ./install-dependencies.sh
+```
+
+To compile Seastar explicitly using gcc 5, use:
+```
+CXX=g++-5 ./cooking.sh -i c-ares -i fmt -t Release
+ninja -C build
+```
diff --git a/src/seastar/doc/compatibility.md b/src/seastar/doc/compatibility.md
new file mode 100644
index 000000000..4d0d4fb38
--- /dev/null
+++ b/src/seastar/doc/compatibility.md
@@ -0,0 +1,144 @@
+Compatibility
+=============
+
+As a library, Seastar aims to maintain backwards compatibility
+in terms of the source (application code should continue to
+build with newer versions of Seastar) and any binary protocols
+that Seastar exposes (e.g. rpc).
+
+Link compatibility is not maintained - you cannot link an
+application built with one version of Seastar with another
+version of Seastar.
+
+Language standards
+==================
+
+Seastar will support the last two standards approved by the
+ISO C++ committee. For example, after C++20 is released,
+Seastar supports C++17 and C++20. Similarly, when C++23 is released,
+Seastar will support C++20 and C++23.
+
+Some features may only be enabled for newer dialects.
+
+
+Platforms
+=========
+
+Seastar supports Linux. There is no known minimum kernel version,
+but very old kernels might not work. Performance can be significantly
+better for newer kernels.
+
+Filesystem implementation quality can have significant effect on
+file I/O performance. XFS is known to be working, ext4 may work well
+too. Test your filesystem and kernel versions to be sure.
+
+Patches for new platforms (e.g, Windows) are welcome.
+
+
+Compilers
+=========
+
+Seastar supports gcc and clang. Ports to other compilers are
+welcome.
+
+The last two major releases of a compiler are supported (e.g.
+gcc 9 and gcc 10). Patches to support older versions are welcome,
+as long as they don't require onerous compromises.
+
+Deprecation
+===========
+
+Occasionally, we discover that we took the wrong approach with
+an API. In these cases we will offer a new API and tag the old
+API with the [[deprecated]] attribute. The deprecated API will
+be removed after a transition period (which can vary depending on
+how central the deprecated API is).
+
+Breaking changes
+================
+
+Rarely, we have to make breaking changes. We try to limit those,
+but sometimes there is no choice.
+
+To support a transition period for breaking changes, Seastar
+offers the Seastar_API_LEVEL cmake variable (and corresponding
+--api-level configure.py option). An API level selects different
+versions of the API. For example.
+
+ - Seastar_API_LEVEL=1 selects an old version of the
+ server_socket::accept() API that returns a variadic
+ future (which is deprecated)
+ - Seastar_API_LEVEL=2 selects a new version of the
+ server_socket::accept() API that returns a non-variadic
+ future
+ - Seastar_API_LEVEL=6 makes futures non-variadic
+
+Applications can use an old API_LEVEL during a transition
+period, fix their code, and move to the new API_LEVEL.
+
+Old API levels only live for a transition period, so if
+you are using an API level below the latest, you should
+upgrade quickly.
+
+Note the application should not refer to the `api_vN`
+sub-namespaces that Seastar defines as part of the API_LEVEL
+mechanism; these are internal.
+
+Internal namespace
+==================
+
+Identifiers in the `seastar::internal` namespace are not subject
+to source level compatibility and are subject to change or removal
+without notice. In addition the `api_vN` sub-namespaces are also
+internal.
+
+Accidentally exposed internal identifiers
+=========================================
+
+Some identifiers predate the internal namespace, and are only
+exposed accidentally. These can also be removed or changed. Exposed
+identifiers are documented using doxygen, but not all exposed
+APIs are documented. In case of doubt, ask on the mailing list.
+
+
+API Level History
+=================
+
+|Level|Introduced |Mandatory|Description |
+|:---:|:---------:|:-------:| -------------------------------------------- |
+| 2 | 2019-07 | 2020-04 | Non-variadic futures in socket::accept() |
+| 3 | 2020-05 | | make_file_data_sink() closes file and returns a future<> |
+| 4 | 2020-06 | | Non-variadic futures in when_all_succeed() |
+
+
+Note: The "mandatory" column indicates when backwards compatibility
+support for the API preceding the new level was removed.
+
+Implementation notes for API levels
+===================================
+
+API levels are implemented by defining internal sub-namespaces
+for each API level: `seastar::api_v1`, `seatar::api_v2` etc. `#ifdef`s
+are used to inline the user-selected API level namespace into the
+main `seastar` namespace, making it visible.
+
+Usually, the old API is implemented in terms of the new API to
+avoid code duplication.
+
+Here is an example about the transition from API_LEVEL 1 to 2. The
+transition from 2 to 3 and similar is analogous.
+
+Unconditionally:
+ - the new API is defined in sub-namespace `api_v2`
+
+If API_LEVEL is 2:
+ - `api_v2` namespace is inlined into the `seastar` namespace
+
+If API_LEVEL is 1:
+ - the old API is defined in sub-namespace `api_v1`
+ - `api_v1` is implemented in terms of `api_v2` to prevent code duplication
+ - `api_v1` namespace is inlined into the `seastar` namespace
+
+After a transition period:
+ - everthing in `api_v1` is dropped
+ - `api_v2` is removed, and its contents is placed in the parent namespace
diff --git a/src/seastar/doc/contributing.md b/src/seastar/doc/contributing.md
new file mode 100644
index 000000000..d6a0cbce3
--- /dev/null
+++ b/src/seastar/doc/contributing.md
@@ -0,0 +1,30 @@
+Contributing to Seastar
+=======================
+
+# Sending Patches
+Seastar follows a patch submission similar to Linux. Send patches to seastar-dev, with a DCO signed off message. Use git send-email to send your patch.
+
+Example:
+
+1. When you commit, use "-s " in your git commit command, which adds a DCO signed off message. DCO is a "Developer's Certificate of Origin" http://elinux.org/Developer_Certificate_Of_Origin
+
+For the commit message, you can prefix a tag for an area of the codebase the patch is addressing
+
+ git commit -s -m "core: some descriptive commit message"
+
+2. then send an email to the google group
+
+ git send-email <revision>..<final_revision> --to seastar-dev@googlegroups.com
+
+NOTE: for sending replies to patches, use --in-reply-to with the message ID of the original message. Also, if you are sending out a new version of the change, use git rebase and then a `git send-email` with a `-v2`, for instance, to denote that it is a second version.
+
+# Testing and Approval
+Run test.py and ensure tests are passing (at least) as well as before the patch.
+
+
+
+
+
+
+
+
diff --git a/src/seastar/doc/htmlsplit.py b/src/seastar/doc/htmlsplit.py
new file mode 100755
index 000000000..119b9f325
--- /dev/null
+++ b/src/seastar/doc/htmlsplit.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python3
+
+# This script takes the single-page HTML output from pandoc - tutorial.html -
+# and splits it into many pages in split/: one page index.html for the table
+# of contents, and an additional page for each chapter. We make sure that
+# links from the TOC to each chapter, and also links across chapters,
+# continue to work correctly, and also had links from each chapter back to
+# the TOC, as well as to the next and previous chapters.
+
+
+# Copyright (C) 2018 ScyllaDB.
+#
+# This file is open source software, licensed to you under the terms
+# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
+# distributed with this work for additional information regarding copyright
+# ownership. You may not use this file except in compliance with the License.
+#
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from xml.etree import ElementTree
+import argparse
+import copy
+import os
+
+# chapter number to chapter title
+titles = {}
+# section id => chapter number
+sections = {}
+
+
+def add_elem_to_body(tree, e):
+ body = next(tree.iterfind('./body'))
+ body.append(e)
+
+
+def add_nav_to_body(tree, chap_num):
+ body = next(tree.iterfind('./body'))
+
+ nav = ElementTree.SubElement(body, 'div')
+ e = ElementTree.SubElement(nav, 'a',
+ href='index.html')
+ e.text = 'Back to table of contents'
+ e.tail = '.'
+ prev_index = chap_num - 1
+ if prev_index in titles:
+ e.tail += " Previous: "
+ prev_title = titles[prev_index]
+ e = ElementTree.SubElement(nav, 'a',
+ href=f'{prev_index}.html')
+ e.text = f'{prev_index} {prev_title}'
+ e.tail = '.'
+ next_index = chap_num + 1
+ if next_index in titles:
+ e.tail += " Next: "
+ next_title = titles[next_index]
+ e = ElementTree.SubElement(nav, 'a',
+ href=f'{next_index}.html')
+ e.text = f'{next_index} {next_title}'
+ e.tail = '.'
+
+
+def handle_toc(toc):
+ for chap in toc.iterfind('./ul/li'):
+ chap_href_elem = next(chap.iterfind('./a[@href]'))
+ chap_num_elem = next(chap_href_elem.iterfind(
+ './span[@class="toc-section-number"]'))
+ # For chapters, remember the mapping from number to name in the
+ # map "titles", so we can use them later in links to next and
+ # previous chapter
+ chap_num = int(chap_num_elem.text)
+ titles[chap_num] = chap_num_elem.tail.strip()
+
+ # For all sections, remember the mapping from name-with-dashes
+ # to the chapter number they are in in "sections". We need this
+ # to support links to other sections.
+ href = chap_href_elem.get('href')
+ sections[href] = chap_num
+ for section in chap.iterfind('.//ul/li/a[@href]'):
+ href = section.get('href')
+ # replace the link to '#section' with number N.M to chapterN#section
+ if href.startswith('#'):
+ sections[href] = chap_num
+
+
+def fix_links(e):
+ for link in e.findall('.//a[@href]'):
+ href = link.get('href')
+ if href.startswith('#') and href in sections:
+ # In a chapter we can have a link to a different subsection, which
+ # looks like <a href="#some-title">Some title</A>. We need to
+ # replace this to refer to the right file after the split.
+ chap_num = sections[href]
+ link.set('href', f'{chap_num}.html{href}')
+
+
+def remove_ns_prefix(tree):
+ prefix = '{http://www.w3.org/1999/xhtml}'
+ for e in tree.iter():
+ if e.tag.startswith(prefix):
+ e.tag = e.tag[len(prefix):]
+
+
+def get_chap_num(element):
+ data_num = e.get('data-number')
+ if data_num:
+ return int(data_num)
+ data_num = e.findtext('./span[@class="header-section-number"]')
+ if data_num:
+ return int(data_num)
+ assert data_num, "section number not found"
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--input')
+parser.add_argument('--output-dir')
+args = parser.parse_args()
+
+tree = ElementTree.parse(args.input)
+for e in tree.iter():
+ remove_ns_prefix(e)
+template = copy.deepcopy(tree.getroot())
+template_body = next(template.iterfind('./body'))
+template_body.clear()
+
+# iterate through the children elements in body
+# body element is composed of
+# - header
+# - toc
+# - h1,h2,p,...
+# h1 marks the beginning of a chapter
+
+chap_num = 0
+chap_tree = None
+for e in next(tree.iterfind('./body')):
+ if e.tag == 'header':
+ template_body.append(e)
+ elif e.get('id') == 'TOC':
+ handle_toc(e)
+ fix_links(e)
+ toc_tree = ElementTree.ElementTree(copy.deepcopy(template))
+ add_elem_to_body(toc_tree, e)
+ toc_tree.write(os.path.join(args.output_dir, 'index.html'),
+ method='html')
+ elif e.tag == 'h1':
+ assert titles
+ assert sections
+ if chap_num > 0:
+ add_nav_to_body(chap_tree, chap_num)
+ chap_tree.write(os.path.join(args.output_dir, f'{chap_num}.html'),
+ method='html')
+ chap_num = get_chap_num(e)
+ chap_tree = ElementTree.ElementTree(copy.deepcopy(template))
+ add_nav_to_body(chap_tree, chap_num)
+ add_elem_to_body(chap_tree, e)
+ else:
+ assert chap_tree is not None
+ fix_links(e)
+ add_elem_to_body(chap_tree, e)
+
+add_nav_to_body(chap_tree, chap_num)
+chap_tree.write(os.path.join(args.output_dir, f'{chap_num}.html'),
+ method='html')
diff --git a/src/seastar/doc/io-properties-file.md b/src/seastar/doc/io-properties-file.md
new file mode 100644
index 000000000..750319077
--- /dev/null
+++ b/src/seastar/doc/io-properties-file.md
@@ -0,0 +1,41 @@
+# Specifying the I/O properties of a system
+
+The I/O properties of a system can be specified as a YAML string, by
+using the option --io-properties, or as a YAML file with the option
+--io-properties-file.
+
+The expected format starts with a map of sections at the top level.
+Currently only `disks` is supported.
+
+## The disks section
+
+Inside the `disks` section, the user can specify a list of mount points.
+
+For each mount point, 4 properties have to be specified (none are
+optional):
+
+* `read_iops`: read IOPS speed of the device
+* `read_bandwidth`: read bandwidth speed of the device
+* `write_iops`: write IOPS speed of the device
+* `write_bandwidth`: write bandwidth speed of the device
+
+
+Additionally the following optional properties can be added:
+
+* `read_saturation_length`: read buffer length to saturate the device throughput
+* `write_saturation_length`: write buffer length to saturate the device throughput
+
+Those quantities can be specified in raw form, or followed with a
+suffix (k, M, G, or T).
+
+Example:
+
+```
+disks:
+ - mountpoint: /var/lib/some_seastar_app
+ read_iops: 95000
+ read_bandwidth: 545M
+ write_iops: 85000
+ write_bandwidth: 510M
+ write_saturation_length: 64k
+```
diff --git a/src/seastar/doc/io-tester.md b/src/seastar/doc/io-tester.md
new file mode 100644
index 000000000..6c75072e1
--- /dev/null
+++ b/src/seastar/doc/io-tester.md
@@ -0,0 +1,81 @@
+## I/O Tester utility
+
+The I/O Tester utility, `io_tester` generates a user-defined I/O pattern
+spanning one of multiple shards that is designed to simulate the I/O behavior
+of a complex Seastar application.
+
+# Running I/O tester:
+
+I/O tester takes the same options as Seastar, and those options may be used
+to test the behavior of I/O under the circumnstances established by those
+options. For instance, one may adjust the `--task-quota-ms` option to see
+if that affects higher percentile latencies.
+
+Aside from the usual seastar options, I/O tester accepts the following options:
+
+* `duration`: for how long to run the evaluation,
+* `directory`: a directory where to run the evaluation (it must be on XFS),
+* `conf`: the path to a YAML file describing the evaluation.
+
+# Describing the evaluation
+
+The evaluation is described in a YAML file that contains multiple classes.
+Each class spans jobs of similar characteristics in different shards and (for now)
+all jobs run concurrently.
+
+The YAML file contains a list of maps where each element of the list describes a class.
+A class has some properties that are common to all elements of the class, and a nested map
+that contain properties of a job (class instance in a shard)
+
+For example:
+
+```
+- name: big_writes
+ type: seqread
+ shards: all
+ shard_info:
+ parallelism: 10
+ reqsize: 256kB
+ shares: 10
+ think_time: 0
+```
+
+* `name`: mandatory property, a string that identifies jobs of this class
+* `type`: mandatory property, one of seqread, seqwrite, randread, randwrite, append, cpu
+* `shards`: mandatory property, either the string "all" or a list of shards where this class should place jobs.
+
+The properties under `shard_info` represent properties of the job that will
+be replicated to each shard. All properties under `shard_info` are optional, and in case not specified, defaults are used.
+
+* `parallelism`: the amount of parallel requests this job will generate in a specific shard. Requests can be either active or thinking (see `think_time`)
+* `reqsize` : (I/O loads only) the size of requests generated by this job
+* `shares` : how many shares requests in this job will have in the scheduler
+* `think_time`: how long to wait before submitting another request in this job once one finishes.
+* `execution_time`: (cpu loads only) for how long to execute a CPU loop
+
+# Example output
+
+```
+ Creating initial files...
+ Starting evaluation...
+ Shard 0
+ Class 0(big_writes: 10 shares, 262144-byte SEQ WRITE, 10 concurrent requests, NO think time)
+ Throughput : 436556 KB/s
+ Lat average : 5847 usec
+ Lat quantile= 0.5 : 2678 usec
+ Lat quantile= 0.95 : 13029 usec
+ Lat quantile= 0.99 : 20835 usec
+ Lat quantile=0.999 : 246090 usec
+ Lat max : 450785 usec
+```
+
+# Future
+
+Some ideas for extending I/O tester:
+
+* allow properties like think time, request size, etc, to be specified as distributions instead of a fixed number
+* allow classes to have class-wide properties. For instance, we could define a class with parallelism of 100, and distribute those 100 requests over all shards in which this class is placed
+* allow some jobs to be executed sequentially in relationship to others, so we can have preparation jobs.
+* support other types, like delete, fsync, etc.
+* provide functionality similar to diskplorer.
+
diff --git a/src/seastar/doc/lambda-coroutine-fiasco.md b/src/seastar/doc/lambda-coroutine-fiasco.md
new file mode 100644
index 000000000..e679ccd63
--- /dev/null
+++ b/src/seastar/doc/lambda-coroutine-fiasco.md
@@ -0,0 +1,100 @@
+# The Lambda Coroutine Fiasco
+
+Lambda coroutines and Seastar APIs that accept continuations interact badly. This
+document explain the bad interaction and how it is mitigated.
+
+## Lambda coroutines revisited
+
+A lambda coroutine is a lambda function that is also a coroutine due
+to the use of the coroutine keywords (typically co_await). A lambda
+coroutine is notionally translated by the compiler into a struct with a
+function call operator:
+
+```cpp
+[captures] (arguments) -> seastar::future<> {
+ body
+ co_return result
+}
+```
+
+becomes (more or less)
+
+```cpp
+struct lambda {
+ captures;
+ seastar::future<> operator()(arguments) const {
+ body
+ }
+};
+```
+
+## Lambda coroutines and coroutine argument capture
+
+In addition to a lambda capturing variables from its environment, the
+coroutine also captures its arguments. This capture can happen by value
+or reference, depending on how each argument is declared.
+
+The lambda's captures however are captured by reference. To understand why,
+consider that the coroutine translation process notionally transforms a member function
+(`lambda::operator()`) to a free function:
+
+```cpp
+// before
+seastar::future<> lambda::operator()(arguments) const;
+
+// after
+seastar::future<> lambda_call_operator(const lambda& self, arguments);
+```
+
+This transform means that the lambda structure, which contains all the captured variables,
+is itself captured by the coroutine by reference.
+
+## Interaction with Seastar APIs accepting continuations
+
+Consider a Seastar API that accepts a continuation, such as
+`seastar::future::then(Func continuation)`. The behavior
+is that `continuation` is moved or copied into a private memory
+area managed by `then()`. Sometime later, the continuation is
+executed (`Func::operator()`) and the memory area is freed.
+Crucially, the memory area is freed as soon as `Func::operator()`
+returns, which can be before the future returned by it becomes
+ready. However, the coroutine can access the lambda captures
+stored in this memory area after the future is returned and before
+it becomes ready. This is a use-after-free.
+
+## Solution
+
+The solution is to avoid copying or moving the lambda into
+the memory area managed by `seastar::future::then()`. Instead,
+the lambda spends its life as a temporary. We then rely on C++
+temporary lifetime extension rules to extend its life until the
+future returned is ready, at which point the captures can longer
+be accessed.
+
+```cpp
+ co_await seastar::yield().then(seastar::coroutine::lambda([captures] () -> future<> {
+ co_await seastar::coroutine::maybe_yield();
+ // Can use `captures` here safely.
+ }));
+```
+
+`seastar::coroutine::lambda` is very similar to `std::reference_wrapper` (the
+only difference is that it works with temporaries); it can be safely moved to
+the memory area managed by `seastar::future::then()` since it's only used
+to call the real lambda, and then is safe to discard.
+
+## Alternative solution when lifetime extension cannot be used.
+
+If the lambda coroutine is not co_await'ed immediately, we cannot rely on
+lifetime extension and so we must name the coroutine and use `std::ref()` to
+refer to it without copying it from the coroutine frame:
+
+```cpp
+ auto a_lambda = [captures] () -> future<> {
+ co_await seastar::coroutine::maybe_yield();
+ // Can use `captures` here safely.
+ };
+ auto f = seastar::yield().then(std::ref(a_lambda));
+ co_await std::move(f);
+```
+
diff --git a/src/seastar/doc/md2html b/src/seastar/doc/md2html
new file mode 100755
index 000000000..68a06aa1a
--- /dev/null
+++ b/src/seastar/doc/md2html
@@ -0,0 +1,31 @@
+#!/bin/sh
+# This file is open source software, licensed to you under the terms
+# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
+# distributed with this work for additional information regarding copyright
+# ownership. You may not use this file except in compliance with the License.
+#
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Copyright (C) 2018 ScyllaDB
+
+dir=$(dirname "$0")
+
+case `pandoc --version` in
+"pandoc 1."*)
+ SMART_OPT=--smart
+ SMART_EXT=
+ ;;
+*)
+ SMART_OPT=
+ SMART_EXT=+smart
+esac
+pandoc --self-contained $SMART_OPT --toc -c ${dir}/template.css --number-sections -f markdown+pandoc_title_block+implicit_header_references$SMART_EXT -V lang=en --highlight-style tango "$1" -o "$2"
diff --git a/src/seastar/doc/md2pdf b/src/seastar/doc/md2pdf
new file mode 100755
index 000000000..19c9e094a
--- /dev/null
+++ b/src/seastar/doc/md2pdf
@@ -0,0 +1,23 @@
+#!/bin/sh
+# This file is open source software, licensed to you under the terms
+# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
+# distributed with this work for additional information regarding copyright
+# ownership. You may not use this file except in compliance with the License.
+#
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Copyright (C) 2018 ScyllaDB
+
+dir=$(dirname "$0")
+
+pandoc -f markdown+pandoc_title_block+implicit_header_references --highlight-style tango --template=${dir}/template.tex "$1" -o "$2"
+
diff --git a/src/seastar/doc/mini-tutorial.md b/src/seastar/doc/mini-tutorial.md
new file mode 100644
index 000000000..9c6eb2575
--- /dev/null
+++ b/src/seastar/doc/mini-tutorial.md
@@ -0,0 +1,202 @@
+Futures and promises
+--------------------
+
+A *future* is a result of a computation that may not be available yet.
+Examples include:
+
+ * a data buffer that we are reading from the network
+ * the expiration of a timer
+ * the completion of a disk write
+ * the result computation that requires the values from
+ one or more other futures.
+
+a *promise* is an object or function that provides you with a future,
+with the expectation that it will fulfill the future.
+
+Promises and futures simplify asynchronous programming since they decouple
+the event producer (the promise) and the event consumer (whoever uses the
+future). Whether the promise is fulfilled before the future is consumed,
+or vice versa, does not change the outcome of the code.
+
+Consuming a future
+------------------
+
+You consume a future by using its *then()* method, providing it with a
+callback (typically a lambda). For example, consider the following
+operation:
+
+```C++
+future<int> get(); // promises an int will be produced eventually
+future<> put(int) // promises to store an int
+
+void f() {
+ get().then([] (int value) {
+ put(value + 1).then([] {
+ std::cout << "value stored successfully\n";
+ });
+ });
+}
+```
+
+Here, we initiate a *get()* operation, requesting that when it completes, a
+*put()* operation will be scheduled with an incremented value. We also
+request that when the *put()* completes, some text will be printed out.
+
+Chaining futures
+----------------
+
+If a *then()* lambda returns a future (call it x), then that *then()*
+will return a future (call it y) that will receive the same value. This
+removes the need for nesting lambda blocks; for example the code above
+could be rewritten as:
+
+```C++
+future<int> get(); // promises an int will be produced eventually
+future<> put(int) // promises to store an int
+
+void f() {
+ get().then([] (int value) {
+ return put(value + 1);
+ }).then([] {
+ std::cout << "value stored successfully\n";
+ });
+}
+```
+
+Loops
+-----
+
+Loops are achieved with a tail call; for example:
+
+```C++
+future<int> get(); // promises an int will be produced eventually
+future<> put(int) // promises to store an int
+
+future<> loop_to(int end) {
+ if (value == end) {
+ return make_ready_future<>();
+ }
+ get().then([end] (int value) {
+ return put(value + 1);
+ }).then([end] {
+ return loop_to(end);
+ });
+}
+```
+
+The *make_ready_future()* function returns a future that is already
+available --- corresponding to the loop termination condition, where
+no further I/O needs to take place.
+
+Under the hood
+--------------
+
+When the loop above runs, both *then* method calls execute immediately
+--- but without executing the bodies. What happens is the following:
+
+1. `get()` is called, initiates the I/O operation, and allocates a
+ temporary structure (call it `f1`).
+2. The first `then()` call chains its body to `f1` and allocates
+ another temporary structure, `f2`.
+3. The second `then()` call chains its body to `f2`.
+
+Again, all this runs immediately without waiting for anything.
+
+After the I/O operation initiated by `get()` completes, it calls the
+continuation stored in `f1`, calls it, and frees `f1`. The continuation
+calls `put()`, which initiates the I/O operation required to perform
+the store, and allocates a temporary object `f12`, and chains some glue
+code to it.
+
+After the I/O operation initiated by `put()` completes, it calls the
+continuation associated with `f12`, which simply tells it to call the
+continuation associated with `f2`. This continuation simply calls
+`loop_to()`. Both `f12` and `f2` are freed. `loop_to()` then calls
+`get()`, which starts the process all over again, allocating new versions
+of `f1` and `f2`.
+
+Handling exceptions
+-------------------
+
+If a `.then()` clause throws an exception, the scheduler will catch it
+and cancel any dependent `.then()` clauses. If you want to trap the
+exception, add a `.then_wrapped()` clause at the end:
+
+```C++
+future<buffer> receive();
+request parse(buffer buf);
+future<response> process(request req);
+future<> send(response resp);
+
+void f() {
+ receive().then([] (buffer buf) {
+ return process(parse(std::move(buf));
+ }).then([] (response resp) {
+ return send(std::move(resp));
+ }).then([] {
+ f();
+ }).then_wrapped([] (auto&& f) {
+ try {
+ f.get();
+ } catch (std::exception& e) {
+ // your handler goes here
+ }
+ });
+}
+```
+
+The previous future is passed as a parameter to the lambda, and its value can
+be inspected with `f.get()`. When the `get()` variable is called as a
+function, it will re-throw the exception that aborted processing, and you can
+then apply any needed error handling. It is essentially a transformation of
+
+```C++
+buffer receive();
+request parse(buffer buf);
+response process(request req);
+void send(response resp);
+
+void f() {
+ try {
+ while (true) {
+ auto req = parse(receive());
+ auto resp = process(std::move(req));
+ send(std::move(resp));
+ }
+ } catch (std::exception& e) {
+ // your handler goes here
+ }
+}
+```
+
+Note, however, that the `.then_wrapped()` clause will be scheduled both when
+exception occurs or not. Therefore, the mere fact that `.then_wrapped()` is
+executed does not mean that an exception was thrown. Only the execution of the
+catch block can guarantee that.
+
+
+This is shown below:
+
+```C++
+
+future<my_type> receive();
+
+void f() {
+ receive().then_wrapped([] (future<my_type> f) {
+ try {
+ my_type x = f.get();
+ return do_something(x);
+ } catch (std::exception& e) {
+ // your handler goes here
+ }
+ });
+}
+```
+### Setup notes
+
+SeaStar is a high performance framework and tuned to get the best
+performance by default. As such, we're tuned towards polling vs interrupt
+driven. Our assumption is that applications written for SeaStar will be
+busy handling 100,000 IOPS and beyond. Polling means that each of our
+cores will consume 100% cpu even when no work is given to it.
+
diff --git a/src/seastar/doc/native-stack.md b/src/seastar/doc/native-stack.md
new file mode 100644
index 000000000..92c519f97
--- /dev/null
+++ b/src/seastar/doc/native-stack.md
@@ -0,0 +1,54 @@
+Seastar Native TCP/IP Stack
+---------------------------
+
+Seastar comes with a native, sharded TCP/IP stack. Usually it is used with the [DPDK](building-dpdk.md) environment, but there are also vhost drivers for testing in a development environment.
+
+To enable the native network stack, pass the `--network-stack native` parameter to a seastar application.
+
+To test the native stack without dpdk, install and start the `libvirt` daemon. This will create a bridge device named `virbr0`, which seastar will connect to.
+
+Seastar's vhost driver will need a tap device to connect to. The scripts `scripts/tap.sh` will set up a tap device and bind it to `virbr0`:
+
+ $ sh ./scripts/tap.sh
+ Set 'tap0' nonpersistent
+ bridge name bridge id STP enabled interfaces
+ virbr0 8000.5254008be729 no tap0
+ virbr0-nic
+ virbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
+ inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255
+ ether 52:54:00:8b:e7:29 txqueuelen 1000 (Ethernet)
+ RX packets 384938 bytes 21866184 (20.8 MiB)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 547098 bytes 2508723098 (2.3 GiB)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+You can now run a seastar application; for example, the http server:
+
+ $ ./build/release/apps/httpd/httpd --network-stack native
+ DHCP sending discover
+ DHCP Got offer for 192.168.122.18
+ DHCP sending request for 192.168.122.18
+ DHCP Got ack on request
+ DHCP ip: 192.168.122.18
+ DHCP nm: 255.255.255.0
+ DHCP gw: 192.168.122.1
+ Seastar HTTP server listening on port 10000 ...
+
+You can now ping the IP address shown (`192.168.122.18`) or connect to it:
+
+ $ ping 192.168.122.18
+ PING 192.168.122.18 (192.168.122.18) 56(84) bytes of data.
+ 64 bytes from 192.168.122.18: icmp_seq=1 ttl=64 time=0.160 ms
+ 64 bytes from 192.168.122.18: icmp_seq=2 ttl=64 time=0.110 ms
+ 64 bytes from 192.168.122.18: icmp_seq=3 ttl=64 time=0.116 ms
+ 64 bytes from 192.168.122.18: icmp_seq=4 ttl=64 time=0.112 ms
+ 64 bytes from 192.168.122.18: icmp_seq=5 ttl=64 time=0.093 ms
+ 64 bytes from 192.168.122.18: icmp_seq=6 ttl=64 time=0.108 ms
+ ^C
+ --- 192.168.122.18 ping statistics ---
+ 6 packets transmitted, 6 received, 0% packet loss, time 4999ms
+ rtt min/avg/max/mdev = 0.093/0.116/0.160/0.023 ms
+
+ $ curl http://192.168.122.18:10000/
+ "hello"
+
diff --git a/src/seastar/doc/network-configuration.md b/src/seastar/doc/network-configuration.md
new file mode 100644
index 000000000..404b4690a
--- /dev/null
+++ b/src/seastar/doc/network-configuration.md
@@ -0,0 +1,65 @@
+Network Configuration
+---------------------
+
+In order to support multiple network devices in Seastar, new Network Configuration format has to be introduced.
+New format is Yaml based and contains a list of network devices along with its IP parameters and optional (if DPDK is used) hardware parameters.
+The new configuration can be provided either by command line with --net-config option or by configuration file by --net-config-file.
+
+### DPDK access
+Network device (called port in DPDK) can be accessed by either port index ( zero based index of device shown by dpdk-setup.sh ) or its PCI address (shown by lspci, lshw tools)
+
+Example config line with pci address given:
+
+```
+eth0: {pci_address: 0000:06:00.0, ip: 192.168.100.10, gateway: 192.168.100.1, netmask: 255.255.255.0 }
+```
+
+Example config line with port index given:
+
+```
+eth0: {port_index: 0, ip: 192.168.100.10, gateway: 192.168.100.1, netmask: 255.255.255.0 }
+```
+
+Please note that device name - eth0 above, is not used by DPDK itself, it remains only for configuration consistency.
+The hardware configuration has to be specied in the same way accross all network devices, so for example if pci_address is specified for one network device, port_index cannot be specified for any other.
+
+
+## Non-DPDK access
+When there is no pci_address neither port_index defined then Non-DPDK access is assumed provided by libvirt deamon (see native-stack.md), eg:
+
+```
+virbr0: { ip: 192.168.100.10, gateway: 192.168.100.1, netmask: 255.255.255.0 }
+```
+
+## Other hardware related options
+
+There are other hardware related optional options, some of the are common for both DPDK and non-DPDK mode
+- lro ( large receive offload ), boolean, deafult true
+- tso ( tcp segmentation offload ), boolean, default true
+- ufo ( udp fragmentation offload ), boolean, default true
+- hw-fc ( hardware flow control ), boolean, default true,
+- csum-offload ( IP checksum offload ), boolean, default true
+- ring-size ( device ring buffer size ), unsigned, default 256, libvirt only
+- event-index ( VIRTIO_RING_F_EVENT_IDX support enabled ), boolean, default true, libvirt only
+
+
+## DHCP
+
+IP configuration can be set by either IP/gateway/netmask (as seen in examples above), but also by DHCP with dhcp=true setting, eg:
+
+```
+eth0: {pci_address: 0000:06:00.0, dhcp=true}
+```
+
+DHCP can be selected per network device, so it would also be perfectly valid to define dhcp for eth0, but ip/netmask/gateway for eth1.
+
+
+## Multiple devices
+Configuration formay for multiple devices is a comma separated lists of single devices with added YAML brackets, eg:
+
+```
+{virbr0: { ip: 192.168.100.10, gateway: 192.168.100.1, netmask: 255.255.255.0 } , virbr1: { dhcp: true } }
+```
+
+
+
diff --git a/src/seastar/doc/network-connection-load-balancing.md b/src/seastar/doc/network-connection-load-balancing.md
new file mode 100644
index 000000000..2069ae913
--- /dev/null
+++ b/src/seastar/doc/network-connection-load-balancing.md
@@ -0,0 +1,49 @@
+# Motivation
+
+In sharded systems like seastar it is important for work to be
+distributed equally between all shards to achieve maximum performance
+from the system. Networking subsystem has its part in distributing work
+equally. For instance if on a server all connections will be served by
+single shard only, the system will be working with the speed of this
+one shard and all other shards will be underutilized.
+
+# Common ways to distribute work received over network between shards
+
+Two common ways to distribute work between shards are:
+ - do the work at a shard that received it
+ - shard that does actual work depends on a data been processed
+ (one way to do it is to hash(data) % smp_count = shard,
+ another way is to bind shards to different server addresses)
+
+# Load Balancing
+
+Those two ways asks for different strategy to distribute connections
+between shards. The first one will work best if each cpu will have the
+same amount of connections (assuming each connection gets same amount of
+works) the second will work best if data will arrive to a shard where
+it is going to be processed and actual connection distribution does
+not matter.
+
+Seastar's posix stack supports both of those strategies. Desired
+one can be chosen by specifying load balancing algorithm in
+listen_options provided to reactor::listen() call. Available options
+are:
+
+- load_balancing_algorithm::connection_distribution
+
+ Make sure that new connection will be placed to a shard with smallest
+ amount of connections of the same type.
+
+- load_balancing_algorithm::port
+
+ Destination shard is chosen as a function of client's local port:
+ shard = port_number % num_shards. This allows a client to make sure that
+ a connection will be processed by a specific shard by choosing its local
+ port accordingly (the knowledge about amount of shards in the server is
+ needed and can be negotiated by different channel).
+
+- load_balancing_algorithm::fixed
+
+ Destination shard is statically configured in listen_options::fixed_cpu. This
+ allows a client to make sure that a connection to a server address will be
+ established in a specific shard, without any further negotiations.
diff --git a/src/seastar/doc/prometheus.md b/src/seastar/doc/prometheus.md
new file mode 100644
index 000000000..6ae30ed46
--- /dev/null
+++ b/src/seastar/doc/prometheus.md
@@ -0,0 +1,72 @@
+# The Prometheus Protocol
+
+Seastar supports the Prometheus protocol for metrics reporting.
+Supported exposition formats are the 0.0.4 text and protocol buffer formats.
+
+More on the formats can be found at the [Prometheus documentations](https://prometheus.io/docs/instrumenting/exposition_formats/)
+
+By default, Seastar would listen on port `9180` and the `localhost`.
+
+See the Seastar configuration documentation on how to change the default configuration.
+
+Seastar would reply based on the content type header, so pointing your browser to:
+`http://localhost:9180/metrics/` will return a text representation of the metrics with their documentation.
+
+Starting from Prometheus 2.0, the binary protocol is no longer supported.
+While seastar still supports the binary protocol, it would be deprecated in a future release.
+
+## Querying subset of the metrics
+Seastar supports querying for a subset of the metrics by their names and labels.
+
+### Filtering by a metric name
+Use the `__name__` query parameter to select according to a metric name or a prefix.
+
+For example, to get all the http metrics, point your browser to:
+`http://localhost:9180/metrics?__name__=http*` note the asterisk symbol following the http.
+Filtering by name only supports prefix matching.
+
+To query for only the http requests served metric, point your browser to `http://localhost:9180/metrics?__name__=httpd_requests_served`
+
+### Filtering by a label value
+The Prometheus protocol uses labels to differentiate the characteristics of the thing that is being measured.
+For example, in Seastar, it is common to report each metric per shard and add a `shard` label to the metric.
+
+You can filter by any label using regular expressions. If you use multiple labels in your query, all conditions should be met.
+A missing label is considered an empty string. The expression should match the entire label value,
+to match a missing label, you can use `label=` or `label=^$`.
+
+Here are a few examples:
+
+To return all metrics from shard 1 or shard 0:
+http://localhost:9180/metrics?shard=1|0
+
+To get all metrics without a `service` label:
+http://localhost:9180/metrics?service=
+
+To get all metrics with a `service` label equals `prometheus` and from shard `0`:
+http://localhost:9180/service=prometheus&shard=0
+
+## Remove the help lines
+Sending the help associated with each metric on each request is an overhead.
+Prometheus itself does not use those help lines.
+Seastar supports an option to remove those lines from the metrics output using the `__help__` query parameter.
+To remove the help lines set `__help__=false`
+for example:
+`http://localhost:9180/metrics?__help__=false`
+
+### Configuring the Prometheus server for picking specific metrics
+The [Prometheus configuration](https://prometheus.io/docs/prometheus/1.8/configuration/configuration/) describes the general Prometheus configuration.
+
+To specify a specific metric or metrics add a `metrics_path` to the scrap config in the prometheue.yml file
+
+For example, the following scrap config, will query for all the http metrics:
+
+```
+ scrape_configs:
+ - job_name: http
+ honor_labels: true
+ metrics_path: /metrics
+ params:
+ __name__: ['http*']
+```
+
diff --git a/src/seastar/doc/rpc-compression.md b/src/seastar/doc/rpc-compression.md
new file mode 100644
index 000000000..7d62074ee
--- /dev/null
+++ b/src/seastar/doc/rpc-compression.md
@@ -0,0 +1,26 @@
+# RPC provided compression infrastructure
+
+## Compression algorithm negotiation
+
+RPC protocol only defines `COMPRESS` feature bit but does not define format of its data.
+If application supports multiple compression algorithms it may use the data for algorithm
+negotiation. RPC provides convenience class `multi_algo_compressor_factory` to do it
+so that each application will not have to re-implement the same logic. The class gets list
+of supported compression algorithms and send them as comma separated list in the client `COMPRESS`
+feature payload. On receiving of the list it matches common algorithm between client and server.
+In case there is more than one the order of algorithms in client's list is considered to be a tie
+breaker (first algorithm wins). Once a compressor is chosen by the server, it puts the identifier of
+this in the returned `COMPRESS` feature payload, informing the client of which algorithm should be used
+for the connection.
+
+## Compression algorithms
+
+### `LZ4` compressor
+
+This compressor uses LZ4 to compress and decompress RPC messages. It requires all memory buffers to be contiguous, which may force it to temporarily linearise fragmented messages. LZ4 is fast enough to often make the cost of those copies not negligible compared to the cost of the whole compression or decompression routine. Therefore, this algorithm is best suited if there is an upper bound of the message size and they are expected to fit in a single fragment of input and output memory buffers.
+
+### `LZ4_FRAGMENTED` compressor
+
+This compressor uses LZ4 streaming interface to compress and decompress even large messages without linearising them. The LZ4 streaming routines tend to be slower than the basic ones and the general logic for handling buffers is more complex, so this compressor is best suited only when there is no clear upper bound on the message size or if the messages are expected to be fragmented.
+
+Internally, the compressor processes data in a 32 kB chunks and tries to avoid unnecessary copies as much as possible. It is therefore, recommended, that the application uses memory buffer fragment sizes that are an integral multiple of 32 kB.
diff --git a/src/seastar/doc/rpc-streaming.md b/src/seastar/doc/rpc-streaming.md
new file mode 100644
index 000000000..497be8e95
--- /dev/null
+++ b/src/seastar/doc/rpc-streaming.md
@@ -0,0 +1,124 @@
+# RPC streaming
+
+## Streaming API
+
+### Sink and Source
+
+Basic element of streaming API is `rpc::sink` and `rpc::source`. The former
+is used to send data and the later is to receive it. Client and server
+has their own pair of sink and source. `rpc::sink` and `rpc::source` are
+templated classes where template parameters describe a type of the data
+that is sent/received. For instance the sink that is used to send messages
+containing `int` and `long` will be of a type `rpc::sink<int, long>`. The
+opposite end of the stream will have a source of the type `rpc::source<int, long>`
+which will be used to receive those messages. Messages are received at a
+source as `std::optional` containing an actual message as an `std::tuple`. Unengaged
+optional means EOS (end of stream) - the stream was closed by a peer. If
+error happen before EOS is received a receiver cannot be sure it received all
+the data.
+
+To send the data using `rpc::source<int, long>` one can write (assuming `seastar::async` context):
+
+```cpp
+ while (has_data()) {
+ int data1 = get_data1();
+ long data2 = get_data2();
+ sink(data1, data2).get(); // sends data
+ }
+ sink.close().get(); // closes stream
+```
+
+To receive:
+
+```cpp
+ while (true) {
+ std:optional<std::tuple<int, long>> data = source().get0();
+ if (!data) {
+ // unengaged optional means EOS
+ break;
+ } else {
+ auto [data1, data2] = *data;
+ // process data
+ }
+ }
+```
+
+### Creating a stream
+
+To open an RPC stream one needs RPC client to be created already. The stream
+will be associated with the client and will be aborted if the client is closed
+before streaming is. Given RPC client `rc`, and a `serializer` class that models the Serializer concept (as explained in the rpc::protocol class), one creates `rpc::sink` as follows
+(again assuming `seastar::async` context):
+
+```cpp
+ rpc::sink<int, long> sink = rc.make_stream_sink<serializer, int, long>().get0();
+```
+
+Now the client has the sink that can be used for streaming data to
+a server, but how the server will get a corresponding `rpc::source` to
+read it? For that the sink should be passed to the server by an RPC
+call. To receive a sink a server should register an RPC handler that will
+be used to receive it along with any auxiliary information deemed necessary.
+To receive the sink above one may register an RPC handler like that:
+
+```cpp
+ rpc_proto.register_handler(1, [] (int aux_data, rpc::source<int, long> source) {
+ });
+```
+
+Notice that `rpc::sink` is received as an `rpc::source` since at the server
+side it will be used for receive. Now all is left to do is for the client to
+invoke this RPC handler with aux_data and the sink.
+
+But what about communicating in another direction: from a server to a
+client. For that a server also has to have a sink and a client has to have
+a source and since messages in this direction may be of a different type
+than from client to server the sink and the source may be of a different
+type as well.
+
+Server initiates creation of a communication channel in another direction.
+It does this by creating a sink from the source it receives and returning the sink
+from RPC handler which will cause it to be received as a source by a client. Lets look
+at the full example where server want to send message containing sstring to a client.
+
+Server handler will look like that:
+
+```cpp
+ rpc_proto.register_handler(1, [] (int aux_data, rpc::source<int, long> source) {
+ rpc::sink<sstring> sink = source.make_sink<serializer, sstring>();
+ // use sink and source asynchronously
+ return sink;
+ });
+```
+
+Client code will be:
+
+```cpp
+ auto rpc_call = rpc_proto.make_client<rpc::source<sstring> (int, rpc::sink<int>)>(1);
+ rpc::sink<int, long> sink = rc.make_stream_sink<serializer, int, long>().get0();
+ rpc::source<sstring> source = rpc_call(rc, aux_data, sink).get0();
+ // use sink and source here
+```
+
+## Implementation notes
+
+### RPC stream creation
+
+RPC stream is implemented as a separate TCP connection. RPC server knows that a connection
+will be used for streaming if during RPC negotiation `Stream parent` feature is present.
+The feature will contain ID of an RPC client that was used to create the stream.
+
+So in the example from previous chapter:
+
+```cpp
+ rpc::sink<int, long> sink = rc.make_stream_sink<serializer, int, long>().get0();
+```
+
+the call will initiate a new TCP connection to the same server `rc` is connected to. During RPC
+protocol negotiation this connection will have `Stream parent` feature with `rc`'s ID as a value.
+
+### Passing sink/source over RPC call
+
+When `rpc::sink` is sent over RPC call it is serialized as its connection ID. Server's RPC handler
+then lookups the connection and creates an `rpc::source` from it. When RPC handler returns `rpc::sink`
+the same happens in other direction.
diff --git a/src/seastar/doc/rpc.md b/src/seastar/doc/rpc.md
new file mode 100644
index 000000000..fdab06dac
--- /dev/null
+++ b/src/seastar/doc/rpc.md
@@ -0,0 +1,170 @@
+# RPC protocol
+
+## Data encoding
+
+All integral data is encoded in little endian format.
+
+## Protocol negotiation
+
+The negotiation works by exchanging negotiation frame immediately after connection establishment. The negotiation frame format is:
+
+ uint8_t magic[8] = SSTARRPC
+ uint32_t len
+ uint8_t data[len]
+
+The negotiation frame data is itself composed of multiple records, one for each feature number present. Feature numbers begin at zero and will be defined by later versions of this document.
+
+
+ struct negotiation_frame_feature_record {
+ uint32_t feature_number;
+ uint32_t len;
+ uint8_t data[len];
+ }
+
+A `negotiation_frame_feature_record` signals that an optional feature is present in the client, and can contain additional feature-specific data. The feature number will be omitted in a server response if an optional feature is declined by the server.
+
+Actual negotiation looks like this:
+
+ Client Server
+ --------------------------------------------------------------------------------------------------
+ send negotiation frame
+ recv frame
+ check magic (disconnect if magic is not SSTARRPC)
+ send negotiation frame back
+ recv frame
+ check magic (disconnect if magic is not SSTARRPC)
+
+### Supported features
+
+#### Compression
+ feature_number: 0
+ data : opaque data that is passed to a compressor factory
+ provided by an application. Compressor factory is
+ responsible for negotiation of compression algorithm.
+
+ If compression is negotiated request and response frames are encapsulated in a compressed frame.
+
+#### Timeout propagation
+ feature_number: 1
+ data : none
+
+ If timeout propagation is negotiated request frame has additional 8 bytes that hold timeout value
+ for a request in milliseconds. Zero value means that timeout value was not specified.
+ If timeout is specified and server cannot handle the request in specified time frame it my choose
+ to not send the reply back (sending it back will not be an error either).
+
+#### Connection ID
+ feature_number: 2
+ uint64_t conenction_id : RPC connection ID
+
+ Server assigns unique connection ID for each connection and sends it to a client using
+ this feature.
+
+#### Stream parent
+ feature_number: 3
+ uint64_t connection_id : RPC connection ID representing a parent of the stream
+
+ If this feature is present it means that the connection is not regular RPC connection
+ but stream connection. If parent connection is closed or aborted all streams belonging
+ to it will be closed as well.
+
+ Stream connection is a connection that allows bidirectional flow of bytes which may carry one or
+ more messages in each direction. Stream connection should be explicitly closed by both client and
+ server. Closing is done by sending special EOS frame (described below).
+
+
+#### Isolation
+ feature number: 4
+ uint32_t isolation_cookie_len
+ uint8_t isolation_cookie[len]
+
+ The `isolation_cookie` field is used by the server to select a
+ `seastar::scheduling_group` (or equivalent in another implementation) that
+ will run this connection. In the future it will also be used for rpc buffer
+ isolation, to avoid rpc traffic in one isolation group from starving another.
+
+ The server does not directly assign meaning to values of `isolation_cookie`;
+ instead, the interpretation is left to user code.
+
+##### Compressed frame format
+ uint32_t len
+ uint8_t compressed_data[len]
+
+ after compressed_data is uncompressed it becomes regular request, response or streaming frame
+
+## Request frame format
+ uint64_t timeout_in_ms - only present if timeout propagation is negotiated
+ uint64_t verb_type
+ int64_t msg_id
+ uint32_t len
+ uint8_t data[len]
+
+msg_id has to be positive and may never be reused.
+data is transparent for the protocol and serialized/deserialized by a user
+
+## Response frame format
+ int64_t msg_id
+ uint32_t len
+ uint8_t data[len]
+
+if msg_id < 0 enclosed response contains an exception that came as a response to msg id abs(msg_id)
+data is transparent for the protocol and serialized/deserialized by a user
+
+## Stream frame format
+ uint32_t len
+ uint8_t data[len]
+
+len == 0xffffffff signals end of stream
+data is transparent for the protocol and serialized/deserialized by a user
+
+## Exception encoding
+ uint32_t type
+ uint32_t len
+ uint8_t data[len]
+
+### Known exception types
+ USER = 0
+ UNKNOWN_VERB = 1
+
+#### USER exception encoding
+
+ uint32_t len
+ char[len]
+
+This exception is sent as a reply if rpc handler throws an exception.
+It is delivered to a caller as rpc::remote_verb_error(char[len])
+
+#### UNKNOWN_VERB exception encoding
+
+ uint64_t verb_id
+
+This exception is sent as a response to a request with unknown verb_id, the verb id is passed back as part of the exception payload.
+
+## More formal protocol description
+
+ request_stream = negotiation_frame, { request | compressed_request }
+ request = verb_type, msg_id, len, { byte }*len
+ compressed_request = len, { bytes }*len
+ response_stream = negotiation_frame, { response | compressed_response }
+ response = reply | exception
+ compressed_response = len, { byte }*len
+ streaming_stream = negotiation_frame, { streaming_frame | compressed_streaming_frame }
+ streaming_frame = len, { byte }*len
+ compressed_streaming_frame = len, { byte }*len
+ reply = msg_id, len, { byte }*len
+ exception = exception_header, serialized_exception
+ exception_header = -msg_id, len
+ serialized_exception = (user|unknown_verb)
+ user = len, {byte}*len
+ unknown_verb = verb_type
+ verb_type = uint64_t
+ msg_id = int64_t
+ len = uint32_t
+ byte = uint8_t
+ negotiation_frame = 'SSTARRPC' len32(negotiation_frame_data) negotiation_frame_data
+ negotiation_frame_data = negotiation_frame_feature_record*
+ negotiation_frame_feature_record = feature_number len {byte}*len
+ feature_number = uint32_t
+
+Note that replies can come in order different from requests, and some requests may not have a reply at all.
+
diff --git a/src/seastar/doc/shared-token-bucket.md b/src/seastar/doc/shared-token-bucket.md
new file mode 100644
index 000000000..6869c7f04
--- /dev/null
+++ b/src/seastar/doc/shared-token-bucket.md
@@ -0,0 +1,109 @@
+# Shared token bucket
+
+## Intro
+
+The classical token bucket has two parameters -- rate and limit. The rate
+is the amount of tokens that are put into bucket per some period of time
+(say -- second), the limit is the maximum amount of tokens that can be
+accumulated in the bucket. The process of regeneration of tokens this way
+is called "replenishing" below. When a request needs to be served it should
+try to get a certain amount of tokens from the bucket.
+
+The shared token bucket implements the above model for seastar sharded
+architecture. The implementation doesn't use locks and is built on atomic
+arithmetics.
+
+## Theory
+
+### Rovers
+
+The bucket is implemented as a pair of increasing counters -- tail and
+head rovers. The consumer of tokens advances the tail rover. To replenish
+tokens into the bucket the head rover is advanced.
+
+ +--------------------------------------------------------------------->
+ ^ ^
+ tail head
+
+ grab N tokens:
+
+ +--------------------------------------------------------------------->
+ .---> ^ ^
+ tail head
+
+ replenish N tokens:
+
+ +--------------------------------------------------------------------->
+ ^ .---> ^
+ tail head
+
+It's possible that after grab the tail would overrun the head and will occur
+in front of it. This would mean that there's not enough tokens in the bucket
+and that some amount of tokens were claimed from it.
+
+ grab a lot of tokens:
+
+ +--------------------------------------------------------------------->
+ .------------ ^ --> ^
+ head tail
+
+To check if the tokens were grabbed the caller needs to check if the head is
+(still) in front of the tail. This approach adds the ability for the consumers
+to line up in the queue when they all try to grab tokens from a contented
+bucket. The "ticket lock" model works the same way.
+
+### Capped release
+
+The implementation additionally support so called "capped release". This is
+when tokens are not replenished from nowhere, but leak into the main bucket
+from another bucket into which the caller should explicitly put them. This mode
+can be useful in cases when the token bucket guards the entrance into some
+location that can temporarily (or constantly, but in that case it would denote
+a bucket misconfiguration) slow down and stop handling tokens at the given
+rate. To prevent token bucket from over-subscribing the guardee at those times,
+the second bucket can be refilled with the completions coming from the latter.
+
+In terms of rovers this is implemented with the help of a third rover called
+ceiling (or ceil in the code). This ceil rover actually defines the upper
+limit at which the head rover may point. Respectively, putting tokens into
+the second bucket (it's called releasing below) means advancing the ceil.
+
+## Practice
+
+### API
+
+To work with the token bucket there are 4 calls:
+
+ * `grab(N)` -- grabs a certain amount of tokens from the bucket and returns
+ back the resulting "tail" rover value. The value is useless per-se and is
+ only needed to call the deficiency() method
+
+ * `replenish(time)` -- tries to replenish tokens into the bucket. The amount
+ of replenished tokens is how many had accumulated since last replenish
+ till the `time` parameter
+
+ * `release(N)` -- releases the given number of token making them available
+ for replenishment. Only works if capped release is turned on by the
+ template parameter, otherwise asserts
+
+ * `deficiency(tail)` -- returns back the number of tokens that were claimed
+ from the bucket but that are not yet there. Non-zero number means that the
+ bucket is contented and the request dispatching should be delayed
+
+### Example
+
+For example, the simple dispatch loop may look like this
+
+ while (true) {
+ request = get_next_request()
+ tail = token_bucket.grab(request.cost())
+ while (token_bucket.deficiency(tail)) {
+ yield
+ }
+ request.dispatch()
+ }
+
+And in the background there should run a timer calling `token_bucket.replenish(now())`
+
+Additionally, if there's a need to cap the token bucket with the real request serving
+rate, upon request completion one should call `token_bucket.release(request.cost())`
diff --git a/src/seastar/doc/template.css b/src/seastar/doc/template.css
new file mode 100644
index 000000000..6451a3f54
--- /dev/null
+++ b/src/seastar/doc/template.css
@@ -0,0 +1,113 @@
+/* CSS style for Seastar's tutorial.
+ * TODO: We also get some style for syntax highlighting inserted by our
+ * use of "highlight-style tango" in configure.py. Perhaps we can insert
+ * this style here too, for finer control.
+ */
+
+/* Some defaults */
+body {
+ color: #000000;
+ background: #FFFFFF;
+ font-size: 13pt;
+ line-height: 1.10;
+ font-family: arial, sans-serif;
+ margin-left: 15pt;
+ margin-right: 15pt;
+ text-align: justify;
+}
+
+/* In older versions, Pandoc puts the title, author and date, if any, in its
+ * own div id="header". In recent versions, it uses a "header" tag instead.
+*/
+div#header, header {
+ border-top: 1px solid #aaa;
+ border-bottom: 1px solid #aaa;
+ background: #F0F0C0;
+ margin: 10pt;
+ margin-left: 10%;
+ margin-right: 10%;
+}
+
+/* The title is in an h1.title, in the above div#header */
+.title {
+ color: #000000;
+ margin: 5pt;
+ text-align: center;
+ font-family: serif;
+ font-weight: bold;
+ font-size: 32pt;
+}
+/* The author/date are h2.author and h3.date */
+.author, .date {
+ color: #000000;
+ margin: 0pt;
+ text-align: center;
+ font-family: serif;
+ font-weight: normal;
+ font-size: 16pt;
+}
+
+/* table of contents is in div id="TOC" in older versions, or a nav id="TOC"
+ * in newer versions */
+div#TOC, nav#TOC {
+ border-top: 1px solid #aaa;
+ border-bottom: 1px solid #aaa;
+ background: #F9F9F9;
+ margin: 10pt;
+ margin-left: 20%;
+ margin-right: 20%;
+}
+
+
+h1, h2, h3, h4, h5, h6 {
+ color: #EE3300;
+}
+
+a {
+ text-decoration: none;
+}
+a:link, a:visited {
+ color: #0000CC;
+}
+a:hover {
+ color: #CC0000;
+ text-decoration: underline;
+}
+
+
+/* Multiline code snippets are wrapped in a "code" inside a "pre".
+ * Inline code snippets are just in a "code".
+ */
+code {
+ background-color: #FFFFFF;
+ /* BEGIN word wrap */
+ /* Need all the following to word wrap instead of scroll box */
+ /* This will override the overflow:auto if present */
+ white-space: pre-wrap; /* css-3 */
+ white-space: -moz-pre-wrap !important; /* Mozilla, since 1999 */
+ white-space: -pre-wrap; /* Opera 4-6 */
+ white-space: -o-pre-wrap; /* Opera 7 */
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
+ /* END word wrap */
+}
+pre {
+ padding: 0.5em;
+ border: 1px dotted #777;
+ margin-left: 15pt;
+ margin-right: 15pt;
+}
+pre, pre > code {
+ background-color: #f8f8f8;
+}
+
+/* Fix stuff for printing, in case somebody tries to print the HTML instead
+ * of getting a PDF and printing that. For example, too big fonts and big
+ * margins may be a waste of paper.
+ * buttondown.css has a nice trick for replacing links with the actual text
+ * of the URL - might be nice to copy it one day.
+ */
+@media print {
+ body { font-size: 11pt; }
+ a { color: black; background: transparent; }
+ pre { border: 1px solid #aaa; }
+}
diff --git a/src/seastar/doc/template.tex b/src/seastar/doc/template.tex
new file mode 100644
index 000000000..8c26c2070
--- /dev/null
+++ b/src/seastar/doc/template.tex
@@ -0,0 +1,82 @@
+% The pandoc command line (see configure.py) can communicate variables to this
+% LaTeX skelaton, by using the "-V varname=value" command line parameter, and
+% there are also some builtin variables defined by other options. With such a
+% variable, we can use "$varname$" for the value, and $if(varname)$...$endif$
+% for conditional code when this varname is defined.
+%
+% For an example of a more complete LaTeX template covering more of pandoc's
+% features, check out the example templates/default.latex in pandoc's
+% installation, or pandoc.org/demo/mytemplate.tex on their site.
+
+\documentclass[11]{article}
+\usepackage[a4paper, margin=1in]{geometry}
+\usepackage{lmodern}
+\usepackage{microtype}
+
+% In code snippets, show backtick (`, grave accent) as a short straight line,
+% not a curved quote. I could have used \IfFileExists{upquote.sty}{...}{}
+% to make this package optional, but I really think it's important, and
+% the user in a modern Linux distribution should be able to get it easily.
+\usepackage{upquote}
+
+% Nice header with document's name and section name, and footer
+% with page number.
+\usepackage{fancyhdr}
+\pagestyle{fancy}
+\lhead{\itshape Seastar}
+\chead{}
+\rhead{\itshape{\nouppercase{\leftmark}}}
+\lfoot{}
+\cfoot{}
+\rfoot{\thepage}
+
+$if(highlighting-macros)$
+$highlighting-macros$
+$endif$
+
+% Support links in PDF files
+\usepackage[]{hyperref}
+\hypersetup{breaklinks=true,
+ bookmarks=true, % show bookmark bar (i.e, TOC sidebar) in PDF viewer
+ pdfstartview=FitH, % cause Adobe Acrobat to do "fit to width"
+ pdfauthor={ScyllaDB},
+ pdftitle={Asynchronous Programming with Seastar},
+ colorlinks=true, % if false, uses boxed links
+ urlcolor=blue, % color of external links
+ linkcolor=magenta % color of internal links (to other sections)
+}
+
+% Indentation went out of style with Disco music... It is especially
+% inconvenient in text which includes a lot of code snippets, etc, which
+% causes a lot of indents not preceeded by a text paragraph. So replace
+% the indentation with increased spacing between paragraphs. Note that this
+% also makes the TOC more spacious, I'm not sure it's a great idea.
+\setlength{\parindent}{0pt}
+\setlength{\parskip}{6pt plus 2pt minus 1pt}
+
+% Apparently, newer versions of pandoc use a "\tightlist" command, and
+% we need to implement it.
+\providecommand{\tightlist}{%
+\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
+
+% Fancy-formatted title, but take the author list and date (if set) from the
+% preable in the markdown file.
+\title{\textbf{\fontsize{0.75cm}{1.5em}\selectfont Asynchronous Programming} \\ \textbf{\fontsize{0.75cm}{1.5em}\selectfont with} \\ \textbf{\fontsize{3cm}{1.5em}\selectfont Seastar}}
+\author{$for(author)$$author$$sep$ \and $endfor$}
+\date{$date$}
+
+\begin{document}
+\maketitle
+
+% Table of contents, before the body of the document. Make the TOC clickable,
+% with links to the respective sections, but override the internal link color
+% set above (magenta) by normal black color, because the entire TOC being in
+% a different color looks rather conspicuous.
+{
+\hypersetup{linkcolor=black}
+\tableofcontents
+}
+
+$body$
+
+\end{document}
diff --git a/src/seastar/doc/tutorial.md b/src/seastar/doc/tutorial.md
new file mode 100644
index 000000000..6dd6556bf
--- /dev/null
+++ b/src/seastar/doc/tutorial.md
@@ -0,0 +1,2318 @@
+% Asynchronous Programming with Seastar
+% Nadav Har'El - nyh@ScyllaDB.com
+ Avi Kivity - avi@ScyllaDB.com
+
+# Introduction
+**Seastar**, which we introduce in this document, is a C++ library for writing highly efficient complex server applications on modern multi-core machines.
+
+Traditionally, the programming languages libraries and frameworks used for writing server applications have been divided into two distinct camps: those focusing on efficiency, and those focusing on complexity. Some frameworks are extremely efficient and yet allow building only simple applications (e.g., DPDK allows applications which process packets individually), while other frameworks allow building extremely complex applications, at the cost of run-time efficiency. Seastar is our attempt to get the best of both worlds: To create a library which allows building highly complex server applications, and yet achieve optimal performance.
+
+The inspiration and first use case of Seastar was Scylla, a rewrite of Apache Cassandra. Cassandra is a very complex application, and yet, with Seastar we were able to re-implement it with as much as 10-fold throughput increase, as well as significantly lower and more consistent latencies.
+
+Seastar offers a complete asynchronous programming framework, which uses two concepts - **futures** and **continuations** - to uniformly represent, and handle, every type of asynchronous event, including network I/O, disk I/O, and complex combinations of other events.
+
+Since modern multi-core and multi-socket machines have steep penalties for sharing data between cores (atomic instructions, cache line bouncing and memory fences), Seastar programs use the share-nothing programming model, i.e., the available memory is divided between the cores, each core works on data in its own part of memory, and communication between cores happens via explicit message passing (which itself happens using the SMP's shared memory hardware, of course).
+
+## Asynchronous programming
+A server for a network protocol, such as the classic HTTP (Web) or SMTP (e-mail) servers, inherently deals with parallelism: Multiple clients send requests in parallel, and we cannot finish handling one request before starting to handle the next: A request may, and often does, need to block because of various reasons --- a full TCP window (i.e., a slow connection), disk I/O, or even the client holding on to an inactive connection --- and the server needs to handle other connections as well.
+
+The most straightforward way to handle such parallel connections, employed by classic network servers such as Inetd, Apache Httpd and Sendmail, is to use a separate operating-system process per connection. This technique evolved over the years to improve its performance: At first, a new process was spawned to handle each new connection; Later, a pool of existing processes was kept and each new connection was assigned to an unemployed process from the pool; Finally, the processes were replaced by threads. However, the common idea behind all these implementations is that at each moment, each process handles exclusively a single connection. Therefore, the server code is free to use blocking system calls, such as reading or writing to a connection, or reading from disk, and if this process blocks, all is well because we have many additional processes ready to handle other connections.
+
+Programming a server which uses a process (or a thread) per connection is known as *synchronous* programming, because the code is written linearly, and one line of code starts to run after the previous line finished. For example, the code may read a request from a socket, parse the request, and then piecemeal read a file from disk and write it back to the socket. Such code is easy to write, almost like traditional non-parallel programs. In fact, it's even possible to run an external non-parallel program to handle each request --- this is for example how Apache HTTPd ran "CGI" programs, the first implementation of dynamic Web-page generation.
+
+>NOTE: although the synchronous server application is written in a linear, non-parallel, fashion, behind the scenes the kernel helps ensure that everything happens in parallel and the machine's resources --- CPUs, disk and network --- are fully utilized. Beyond the process parallelism (we have multiple processes handling multiple connections in parallel), the kernel may even parallelize the work of one individual connection --- for example process an outstanding disk request (e.g., read from a disk file) in parallel with handling the network connection (send buffered-but-yet-unsent data, and buffer newly-received data until the application is ready to read it).
+
+But synchronous, process-per-connection, server programming didn't come without disadvantages and costs. Slowly but surely, server authors realized that starting a new process is slow, context switching is slow, and each process comes with significant overheads --- most notably the size of its stack. Server and kernel authors worked hard to mitigate these overheads: They switched from processes to threads, from creating new threads to thread pools, they lowered default stack size of each thread, and increased the virtual memory size to allow more partially-utilized stacks. But still, servers with synchronous designs had unsatisfactory performance, and scaled badly as the number of concurrent connections grew. In 1999, Dan Kigel popularized "the C10K problem", the need of a single server to efficiently handle 10,000 concurrent connections --- most of them slow or even inactive.
+
+The solution, which became popular in the following decade, was to abandon the cozy but inefficient synchronous server design, and switch to a new type of server design --- the *asynchronous*, or *event-driven*, server. An event-driven server has just one thread, or more accurately, one thread per CPU. This single thread runs a tight loop which, at each iteration, checks, using ```poll()``` (or the more efficient ```epoll```) for new events on many open file descriptors, e.g., sockets. For example, an event can be a socket becoming readable (new data has arrived from the remote end) or becoming writable (we can send more data on this connection). The application handles this event by doing some non-blocking operations, modifying one or more of the file descriptors, and maintaining its knowledge of the _state_ of this connection.
+
+However, writers of asynchronous server applications faced, and still face today, two significant challenges:
+
+* **Complexity:** Writing a simple asynchronous server is straightforward. But writing a *complex* asynchronous server is notoriously difficult. The handling of a single connection, instead of being a simple easy-to-read function call, now involves a large number of small callback functions, and a complex state machine to remember which function needs to be called when each event occurs.
+
+* **Non-blocking:** Having just one thread per core is important for the performance of the server application, because context switches are slow. However, if we only have one thread per core, the event-handling functions must _never_ block, or the core will remain idle. But some existing programming languages and frameworks leave the server author no choice but to use blocking functions, and therefore multiple threads.
+For example, ```Cassandra``` was written as an asynchronous server application; But because disk I/O was implemented with ```mmap```ed files, which can uncontrollably block the whole thread when accessed, they are forced to run multiple threads per CPU.
+
+Moreover, when the best possible performance is desired, the server application, and its programming framework, has no choice but to also take the following into account:
+
+* **Modern Machines**: Modern machines are very different from those of just 10 years ago. They have many cores and deep memory hierarchies (from L1 caches to NUMA) which reward certain programming practices and penalizes others: Unscalable programming practices (such as taking locks) can devastate performance on many cores; Shared memory and lock-free synchronization primitives are available (i.e., atomic operations and memory-ordering fences) but are dramatically slower than operations that involve only data in a single core's cache, and also prevent the application from scaling to many cores.
+
+* **Programming Language:** High-level languages such Java, Javascript, and similar "modern" languages are convenient, but each comes with its own set of assumptions which conflict with the requirements listed above. These languages, aiming to be portable, also give the programmer less control over the performance of critical code. For really optimal performance, we need a programming language which gives the programmer full control, zero run-time overheads, and on the other hand --- sophisticated compile-time code generation and optimization.
+
+Seastar is a framework for writing asynchronous server applications which aims to solve all four of the above challenges: It is a framework for writing *complex* asynchronous applications involving both network and disk I/O. The framework's fast path is entirely single-threaded (per core), scalable to many cores and minimizes the use of costly sharing of memory between cores. It is a C++14 library, giving the user sophisticated compile-time features and full control over performance, without run-time overhead.
+
+## Seastar
+
+
+Seastar is an event-driven framework allowing you to write non-blocking, asynchronous code in a relatively straightforward manner (once understood). Its APIs are based on futures. Seastar utilizes the following concepts to achieve extreme performance:
+
+* **Cooperative micro-task scheduler**: instead of running threads, each core runs a cooperative task scheduler. Each task is typically very lightweight -- only running for as long as it takes to process the last I/O operation's result and to submit a new one.
+* **Share-nothing SMP architecture**: each core runs independently of other cores in an SMP system. Memory, data structures, and CPU time are not shared; instead, inter-core communication uses explicit message passing. A Seastar core is often termed a shard. TODO: more here https://github.com/scylladb/seastar/wiki/SMP
+* **Future based APIs**: futures allow you to submit an I/O operation and to chain tasks to be executed on completion of the I/O operation. It is easy to run multiple I/O operations in parallel - for example, in response to a request coming from a TCP connection, you can issue multiple disk I/O requests, send messages to other cores on the same system, or send requests to other nodes in the cluster, wait for some or all of the results to complete, aggregate the results, and send a response.
+* **Share-nothing TCP stack**: while Seastar can use the host operating system's TCP stack, it also provides its own high-performance TCP/IP stack built on top of the task scheduler and the share-nothing architecture. The stack provides zero-copy in both directions: you can process data directly from the TCP stack's buffers, and send the contents of your own data structures as part of a message without incurring a copy. Read more...
+* **DMA-based storage APIs**: as with the networking stack, Seastar provides zero-copy storage APIs, allowing you to DMA your data to and from your storage devices.
+
+This tutorial is intended for developers already familiar with the C++ language, and will cover how to use Seastar to create a new application.
+
+TODO: copy text from https://github.com/scylladb/seastar/wiki/SMP
+https://github.com/scylladb/seastar/wiki/Networking
+
+# Getting started
+
+The simplest Seastar program is this:
+
+```cpp
+#include <seastar/core/app-template.hh>
+#include <seastar/core/reactor.hh>
+#include <iostream>
+
+int main(int argc, char** argv) {
+ seastar::app_template app;
+ app.run(argc, argv, [] {
+ std::cout << "Hello world\n";
+ return seastar::make_ready_future<>();
+ });
+}
+```
+
+As we do in this example, each Seastar program must define and run, an `app_template` object. This object starts the main event loop (the Seastar *engine*) on one or more CPUs, and then runs the given function - in this case an unnamed function, a *lambda* - once.
+
+The `return make_ready_future<>();` causes the event loop, and the whole application, to exit immediately after printing the "Hello World" message. In a more typical Seastar application, we will want event loop to remain alive and process incoming packets (for example), until explicitly exited. Such applications will return a _future_ which determines when to exit the application. We will introduce futures and how to use them below. In any case, the regular C `exit()` should not be used, because it prevents Seastar or the application from cleaning up appropriately.
+
+As shown in this example, all Seastar functions and types live in the "`seastar`" namespace. An user can either type this namespace prefix every time, or use shortcuts like "`using seastar::app_template`" or even "`using namespace seastar`" to avoid typing this prefix. We generally recommend to use the namespace prefixes `seastar` and `std` explicitly, and will follow this style in all the examples below.
+
+To compile this program (it's present in the `demos/hello-world.cc` file) you can just use Docker.
+
+```
+$ docker build -t seastar-dev -f ./docker/dev/Dockerfile .
+$ scripts/build.sh dev
+$ docker run -it --rm -v $(pwd):/seastar seastar-dev /seastar/build/dev/demos/hello-world_demo -c1
+```
+
+Without the docker help, first make sure you have downloaded, built, and optionally installed Seastar, and put the above program in a source file anywhere you want, let's call the file `getting-started.cc`.
+
+Linux's [pkg-config](http://www.freedesktop.org/wiki/Software/pkg-config/) is one way for easily determining the compilation and linking parameters needed for using various libraries - such as Seastar. For example, if Seastar was built in the directory `$SEASTAR` but not installed, one can compile `getting-started.cc` with it using the command:
+```
+c++ getting-started.cc `pkg-config --cflags --libs --static $SEASTAR/build/release/seastar.pc`
+```
+The "`--static`" is needed because currently, Seastar is built as a static library, so we need to tell `pkg-config` to include its dependencies in the link command (whereas, had Seastar been a shared library, it could have pulled in its own dependencies).
+
+If Seastar _was_ installed, the `pkg-config` command line is even shorter:
+```
+c++ getting-started.cc `pkg-config --cflags --libs --static seastar`
+```
+
+Alternatively, one can easily build a Seastar program with CMake. Given the following `CMakeLists.txt`
+
+```cmake
+cmake_minimum_required (VERSION 3.5)
+
+project (SeastarExample)
+
+find_package (Seastar REQUIRED)
+
+add_executable (example
+ getting-started.cc)
+
+target_link_libraries (example
+ PRIVATE Seastar::seastar)
+```
+
+you can compile the example with the following commands:
+
+```none
+$ mkdir build
+$ cd build
+$ cmake ..
+$ make
+```
+
+The program now runs as expected:
+```none
+$ ./example
+Hello world
+$
+```
+
+# Threads and memory
+## Seastar threads
+As explained in the introduction, Seastar-based programs run a single thread on each CPU. Each of these threads runs its own event loop, known as the *engine* in Seastar nomenclature. By default, the Seastar application will take over all the available cores, starting one thread per core. We can see this with the following program, printing `seastar::smp::count` which is the number of started threads:
+
+```cpp
+#include <seastar/core/app-template.hh>
+#include <seastar/core/reactor.hh>
+#include <iostream>
+
+int main(int argc, char** argv) {
+ seastar::app_template app;
+ app.run(argc, argv, [] {
+ std::cout << seastar::smp::count << "\n";
+ return seastar::make_ready_future<>();
+ });
+}
+```
+
+On a machine with 4 hardware threads (two cores, and hyperthreading enabled), Seastar will by default start 4 engine threads:
+
+```none
+$ ./a.out
+4
+```
+
+Each of these 4 engine threads will be pinned (a la **taskset(1)**) to a different hardware thread. Note how, as we mentioned above, the app's initialization function is run only on one thread, so we see the ouput "4" only once. Later in the tutorial we'll see how to make use of all threads.
+
+The user can pass a command line parameter, `-c`, to tell Seastar to start fewer threads than the available number of hardware threads. For example, to start Seastar on only 2 threads, the user can do:
+```none
+$ ./a.out -c2
+2
+```
+When the machine is configured as in the example above - two cores with two hyperthreads on each - and only two threads are requested, Seastar ensures that each thread is pinned to a different core, and we don't get the two threads competing as hyperthreads of the same core (which would, of course, damage performance).
+
+We cannot start more threads than the number of hardware threads, as allowing this will be grossly inefficient. Trying it will result in an error:
+```none
+$ ./a.out -c5
+Could not initialize seastar: std::runtime_error (insufficient processing units)
+```
+
+The error is an exception thrown from app.run, which was caught by seastar itself and turned into a non-zero exit code. Note that catching the exceptions this way does **not** catch exceptions thrown in the application's actual asynchronous code. We will discuss these later in this tutorial.
+
+## Seastar memory
+As explained in the introduction, Seastar applications shard their memory. Each thread is preallocated with a large piece of memory (on the same NUMA node it is running on), and uses only that memory for its allocations (such as `malloc()` or `new`).
+
+By default, the machine's **entire memory** except a certain reservation left for the OS (defaulting to the maximum of 1.5G or 7% of total memory) is pre-allocated for the application in this manner. This default can be changed by *either* changing the amount reserved for the OS (not used by Seastar) with the `--reserve-memory` option, or by explicitly giving the amount of memory given to the Seastar application, with the `-m` option. This amount of memory can be in bytes, or using the units "k", "M", "G" or "T". These units use the power-of-two values: "M" is a **mebibyte**, 2^20 (=1,048,576) bytes, not a **megabyte** (10^6 or 1,000,000 bytes).
+
+Trying to give Seastar more memory than physical memory immediately fails:
+```none
+$ ./a.out -m10T
+Couldn't start application: std::runtime_error (insufficient physical memory)
+```
+
+# Introducing futures and continuations
+Futures and continuations, which we will introduce now, are the building blocks of asynchronous programming in Seastar. Their strength lies in the ease of composing them together into a large, complex, asynchronous program, while keeping the code fairly readable and understandable.
+
+A [future](\ref future) is a result of a computation that may not be available yet.
+Examples include:
+
+ * a data buffer that we are reading from the network
+ * the expiration of a timer
+ * the completion of a disk write
+ * the result of a computation that requires the values from
+ one or more other futures.
+
+The type `future<int>` variable holds an int that will eventually be available - at this point might already be available, or might not be available yet. The method available() tests if a value is already available, and the method get() gets the value. The type `future<>` indicates something which will eventually complete, but not return any value.
+
+A future is usually returned by an **asynchronous function**, a function which returns a future and arranges for this future to be eventually resolved. Because asynchronous functions _promise_ to eventually resolve the future which they returned, asynchronous functions are sometimes called "promises"; But we will avoid this term because it tends to confuse more than it explains.
+
+One simple example of an asynchronous function is Seastar's function sleep():
+
+```cpp
+future<> sleep(std::chrono::duration<Rep, Period> dur);
+```
+
+This function arranges a timer so that the returned future becomes available (without an associated value) when the given time duration elapses.
+
+A **continuation** is a callback (typically a lambda) to run when a future becomes available. A continuation is attached to a future with the `then()` method. Here is a simple example:
+
+```cpp
+#include <seastar/core/app-template.hh>
+#include <seastar/core/sleep.hh>
+#include <iostream>
+
+int main(int argc, char** argv) {
+ seastar::app_template app;
+ app.run(argc, argv, [] {
+ std::cout << "Sleeping... " << std::flush;
+ using namespace std::chrono_literals;
+ return seastar::sleep(1s).then([] {
+ std::cout << "Done.\n";
+ });
+ });
+}
+```
+
+In this example we see us getting a future from `seastar::sleep(1s)`, and attaching to it a continuation which prints a "Done." message. The future will become available after 1 second has passed, at which point the continuation is executed. Running this program, we indeed see the message "Sleeping..." immediately, and one second later the message "Done." appears and the program exits.
+
+The return value of `then()` is itself a future which is useful for chaining multiple continuations one after another, as we will explain below. But here we just note that we `return` this future from `app.run`'s function, so that the program will exit only after both the sleep and its continuation are done.
+
+To avoid repeating the boilerplate "app_engine" part in every code example in this tutorial, let's create a simple main() with which we will compile the following examples. This main just calls function `future<> f()`, does the appropriate exception handling, and exits when the future returned by `f` is resolved:
+
+```cpp
+#include <seastar/core/app-template.hh>
+#include <seastar/util/log.hh>
+#include <iostream>
+#include <stdexcept>
+
+extern seastar::future<> f();
+
+int main(int argc, char** argv) {
+ seastar::app_template app;
+ try {
+ app.run(argc, argv, f);
+ } catch(...) {
+ std::cerr << "Couldn't start application: "
+ << std::current_exception() << "\n";
+ return 1;
+ }
+ return 0;
+}
+```
+
+Compiling together with this `main.cc`, the above sleep() example code becomes:
+
+```cpp
+#include <seastar/core/sleep.hh>
+#include <iostream>
+
+seastar::future<> f() {
+ std::cout << "Sleeping... " << std::flush;
+ using namespace std::chrono_literals;
+ return seastar::sleep(1s).then([] {
+ std::cout << "Done.\n";
+ });
+}
+```
+
+So far, this example was not very interesting - there is no parallelism, and the same thing could have been achieved by the normal blocking POSIX `sleep()`. Things become much more interesting when we start several sleep() futures in parallel, and attach a different continuation to each. Futures and continuation make parallelism very easy and natural:
+
+```cpp
+#include <seastar/core/sleep.hh>
+#include <iostream>
+
+seastar::future<> f() {
+ std::cout << "Sleeping... " << std::flush;
+ using namespace std::chrono_literals;
+ seastar::sleep(200ms).then([] { std::cout << "200ms " << std::flush; });
+ seastar::sleep(100ms).then([] { std::cout << "100ms " << std::flush; });
+ return seastar::sleep(1s).then([] { std::cout << "Done.\n"; });
+}
+```
+
+Each `sleep()` and `then()` call returns immediately: `sleep()` just starts the requested timer, and `then()` sets up the function to call when the timer expires. So all three lines happen immediately and f returns. Only then, the event loop starts to wait for the three outstanding futures to become ready, and when each one becomes ready, the continuation attached to it is run. The output of the above program is of course:
+```none
+$ ./a.out
+Sleeping... 100ms 200ms Done.
+```
+
+`sleep()` returns `future<>`, meaning it will complete at a future time, but once complete, does not return any value. More interesting futures do specify a value of any type (or multiple values) that will become available later. In the following example, we have a function returning a `future<int>`, and a continuation to be run once this value becomes available. Note how the continuation gets the future's value as a parameter:
+
+```cpp
+#include <seastar/core/sleep.hh>
+#include <iostream>
+
+seastar::future<int> slow() {
+ using namespace std::chrono_literals;
+ return seastar::sleep(100ms).then([] { return 3; });
+}
+
+seastar::future<> f() {
+ return slow().then([] (int val) {
+ std::cout << "Got " << val << "\n";
+ });
+}
+```
+
+The function `slow()` deserves more explanation. As usual, this function returns a `future<int>` immediately, and doesn't wait for the sleep to complete, and the code in `f()` can chain a continuation to this future's completion. The future returned by `slow()` is itself a chain of futures: It will become ready once sleep's future becomes ready and then the value 3 is returned. We'll explain below in more details how `then()` returns a future, and how this allows *chaining* futures.
+
+This example begins to show the convenience of the futures programming model, which allows the programmer to neatly encapsulate complex asynchronous operations. `slow()` might involve a complex asynchronous operation requiring multiple steps, but its user can use it just as easily as a simple `sleep()`, and Seastar's engine takes care of running the continuations whose futures have become ready at the right time.
+
+## Ready futures
+A future value might already be ready when `then()` is called to chain a continuation to it. This important case is optimized, and *usually* the continuation is run immediately instead of being registered to run later in the next iteration of the event loop.
+
+This optimization is done *usually*, though sometimes it is avoided: The implementation of `then()` holds a counter of such immediate continuations, and after many continuations have been run immediately without returning to the event loop (currently the limit is 256), the next continuation is deferred to the event loop in any case. This is important because in some cases (such as future loops, discussed later) we could find that each ready continuation spawns a new one, and without this limit we can starve the event loop. It is important not to starve the event loop, as this would starve continuations of futures that weren't ready but have since become ready, and also starve the important **polling** done by the event loop (e.g., checking whether there is new activity on the network card).
+
+`make_ready_future<>` can be used to return a future which is already ready. The following example is identical to the previous one, except the promise function `fast()` returns a future which is already ready, and not one which will be ready in a second as in the previous example. The nice thing is that the consumer of the future does not care, and uses the future in the same way in both cases.
+
+```cpp
+#include <seastar/core/future.hh>
+#include <iostream>
+
+seastar::future<int> fast() {
+ return seastar::make_ready_future<int>(3);
+}
+
+seastar::future<> f() {
+ return fast().then([] (int val) {
+ std::cout << "Got " << val << "\n";
+ });
+}
+```
+
+# Coroutines
+
+Note: coroutines require C++20 and a supporting compiler. Clang 10 and above is known to work.
+
+The simplest way to write efficient asynchronous code with Seastar is to use coroutines. Coroutines don't share most of the pitfalls of traditional continuations (below), and so are the preferred way to write new code.
+
+A coroutine is a function that returns a `seastar::future<T>` and uses the `co_await` or `co_return` keywords. Coroutines are invisible to their callers and callees; they integrate with traditional Seastar code in either role. If you are not familiar with C++ coroutines, you may want to consult [A more general introduction to C++ coroutines](https://medium.com/pranayaggarwal25/coroutines-in-cpp-15afdf88e17e); this section focuses on how coroutines integrate with Seastar.
+
+Here's an example of a simple Seastar coroutine:
+
+```cpp
+#include <seastar/core/coroutine.hh>
+
+seastar::future<int> read();
+seastar::future<> write(int n);
+
+seastar::future<int> slow_fetch_and_increment() {
+ auto n = co_await read(); // #1
+ co_await seastar::sleep(1s); // #2
+ auto new_n = n + 1; // #3
+ co_await write(new_n); // #4
+ co_return n; // #5
+}
+```
+
+In #1, we call the `read()` function, which returns a future. The `co_await` keyword instructs Seastar to inspect the returned future. If the future is ready, then the value (an `int`) is extracted from the future and assigned to `n`. If the future is not ready, the coroutine arranges for itself to be called when the future becomes ready, and control is returned to Seastar. Once the future becomes ready, the coroutine is awakened and the value is extracted from the future and assigned to `n`.
+
+In #2, we call `seastar::sleep()` and wait for the returned future to become ready, which it will in a second. This demonstrates that `n` is preserved across `co_await` calls, and the author of the coroutine need not arrange for storage for coroutine local variables.
+
+Line #3 demonstrates the addition operation, with which the reader is assumed to be familiar.
+
+In #4, we call a function that returns a `seastar::future<>`. In this case, the future carries no value, and so no value is extracted and assigned.
+
+Line #5 demonstrates returning a value. The integer value is used to satisfy the `future<int>` that our caller got when calling the coroutine.
+
+## Lambda coroutines
+
+A lambda function can be a coroutine. Due to an interaction between how C++ lambda coroutines are specified and how
+Seastar coroutines work, using lambda coroutines as continuations can result in use-after-free. To avoid such problems,
+take one of the following approaches:
+
+1. Use lambda coroutines as arguments to functions that explicitly claim support for them
+2. Wrap lambda coroutines with seastar::coroutine::lambda(), and ensure the lambda coroutine is fully awaited within the statement it is defined in.
+
+An example of wrapping a lambda coroutine is:
+
+```cpp
+#include <seastar/core/coroutine.hh>
+#include <seastar/coroutine/maybe_yield.hh>
+
+future<> foo() {
+ int n = 3;
+ int m = co_await seastar::yield().then(seastar::coroutine::lambda([n] () -> future<int> {
+ co_await seastar::coroutine::maybe_yield();
+ // `n` can be safely used here
+ co_return n;
+ }));
+ assert(n == m);
+}
+```
+
+Notes:
+1. seastar::future::then() accepts a continuation
+2. We wrap the argument to seastar::future::then() with seastar::coroutine::lambda()
+3. We ensure evaluation of the lambda completes within the same expression using the outer co_await.
+
+More information can be found in lambda-coroutine-fiasco.md.
+
+## Generators in coroutines
+
+Sometimes, it would be convenient to model a view of `input_range` with a coroutine which emits the elements one after
+another asynchronously. From the consumer of the view's perspective, it can retrieve the elements by `co_await`ing
+the return value of the coroutine. From the coroutine's perspective, it is able to produce the elements multiple times
+using `co_yield` without "leaving" the coroutine. A function producing a sequence of values can be named "generator".
+But unlike the regular coroutine which returns a single `seastar::future<T>`, a generator should return
+`seastar::coroutine::experimental::generator<T>`. Please note, `generator<T>` is still at its early stage of developing,
+the public interface this template is subject to change before it is stablized enough.
+
+Example
+
+```cpp
+#include <seastar/core/coroutine.hh>
+#include <seastar/core/sleep.hh>
+#include <seastar/coroutine/generator.hh>
+
+seastar::future<Preprocessed> prepare_ingredients(Ingredients&&);
+seastar::future<Dish> cook_a_dish(Preprocessed&&);
+seastar::future<> consume_a_dish(Dish&&);
+
+seastar::coroutine::experimental::generator<Dish>
+make_dishes(coroutine::experimental::buffer_size_t max_dishes_on_table,
+ Ingredients&& ingredients) {
+ while (ingredients) {
+ auto some_ingredients = ingredients.alloc();
+ auto preprocessed = co_await prepare_ingredients(std::move(some_ingredients));
+ co_yield co_await cook_a_dish(std::move(preprocessed));
+ }
+}
+
+seastar::future<> have_a_dinner(unsigned max_dishes_on_table) {
+ Ingredients ingredients;
+ auto dishes = make_dishes(std::move(ingredients));
+ while (auto dish = co_await dishes()) {
+ co_await consume_a_dish(std::move(dish));
+ }
+}
+```
+
+In this hypothetical kitchen, a chef and a diner are working in parallel. Instead of preparing
+all dishes beforehand, the chef cooks the dishes while the diner is consuming them one after another.
+Under most circumstances, neither the chef or the diner is blocked by its peer. But if the diner
+is too slow so that there are `max_dishes_on_table` dishes left on the table, the chef would wait
+until the number of dishes is less than this setting. And, apparently, if there is no dishes on the
+table, the diner would wait for new ones to be prepared by the chef.
+
+## Exceptions in coroutines
+
+Coroutines automatically translate exceptions to futures and back.
+
+Calling `co_await foo()`, when `foo()` returns an exceptional future, will throw the exception carried by the future.
+
+Similarly throwing within a coroutine will cause the coroutine to return an exceptional future.
+
+Example:
+
+```cpp
+#include <seastar/core/coroutine.hh>
+
+seastar::future<> function_returning_an_exceptional_future();
+
+seastar::future<> exception_handling() {
+ try {
+ co_await function_returning_an_exceptional_future();
+ } catch (...) {
+ // exception will be handled here
+ }
+ throw 3; // will be captured by coroutine and returned as
+ // an exceptional future
+}
+```
+
+In certain cases, exceptions can also be propagated directly, without throwing or rethrowing them. It can be achieved by returning a `coroutine::exception` wrapper, but it unfortunately only works for coroutines which return `future<T>`, not `future<>`, due to the limitations in compilers. In particular, the example above won't compile if the return type is changed to `future<>`.
+
+Example:
+
+```cpp
+seastar::future<int> exception_propagating() {
+ std::exception_ptr eptr;
+ try {
+ co_await function_returning_an_exceptional_future();
+ } catch (...) {
+ eptr = std::current_exception();
+ }
+ if (eptr) {
+ co_return seastar::coroutine::exception(eptr); // Saved exception pointer can be propagated without rethrowing
+ }
+ co_return seastar::coroutine::make_exception(3); // Custom exceptions can be propagated without throwing
+}
+```
+
+## Concurrency in coroutines
+
+The `co_await` operator allows for simple sequential execution. Multiple coroutines can execute in parallel, but each coroutine has only one outstanding computation at a time.
+
+The `seastar::coroutine::all` class template allows a coroutine to fork into several concurrently executing sub-coroutines (or Seastar fibers, see below) and join again when they complete. Consider this example:
+
+
+```cpp
+#include <seastar/core/coroutines.hh>
+#include <seastar/coroutine/all.hh>
+
+seastar::future<int> read(int key);
+
+seastar::future<int> parallel_sum(int key1, int key2) {
+ int [a, b] = co_await seastar::coroutine::all(
+ [&] {
+ return read(key1);
+ },
+ [&] {
+ return read(key2);
+ }
+ );
+ co_return a + b;
+}
+```
+
+Here, two read() calls are launched concurrently. The coroutine is paused until both reads complete, and the values returned are assigned to `a` and `b`. If `read(key)` is an operation that involves I/O, then the concurrent execution will complete sooner than if we `co_await`ed each call separately, since I/O can be overlapped.
+
+
+Note that `all` waits for all of its sub-computations, even if some throw an exception. If an exception is thrown, it is propagated to the calling coroutine.
+
+The `seastar::coroutine::parallel_for_each` class template allows a coroutine to fork into several concurrently executing function invocations (or Seastar fibers, see below) over a range of elements and join again when they complete. Consider this example:
+
+```cpp
+#include <seastar/core/coroutines.hh>
+#include <seastar/coroutine/parallel_for_each.hh>
+
+seastar::future<bool> all_exist(std::vector<sstring> filenames) {
+ bool res = true;
+ co_await seastar::coroutine::parallel_for_each(filenames, [&res] (const seastar::sstring& name) -> seastar::future<> {
+ res &= co_await seastar::file_exists(name);
+ });
+ co_return res;
+}
+```
+
+Here, the lambda function passed to parallel_for_each is launched concurrently for each element in the filenames vector. The coroutine is paused until all calls complete.
+
+## Breaking up long running computations
+
+Seastar is generally used for I/O, and coroutines usually launch I/O operations and consume their results, with little computation in between. But occasionally a long running computation is needed, and this risks preventing the reactor from performing I/O and scheduling other tasks.
+
+A coroutine will automatically yield in a `co_await` expression; but in a computation we do not `co_await` anything. We can use the `seastar::coroutine::maybe_yield` class in such cases:
+
+```cpp
+#include <seastar/coroutine/maybe_yield>
+
+seastar::future<int> long_loop(int n) {
+ float acc = 0;
+ for (int i = 0; i < n; ++i) {
+ acc += std::sin(float(i));
+ // Give the Seastar reactor opportunity to perform I/O or schedule
+ // other tasks.
+ co_await seastar::coroutine::maybe_yield();
+ }
+ co_return acc;
+}
+```
+
+## Bypassing preemption checks in coroutines
+
+By default, `co_await`-ing a future performs a preemption check, and will suspend if the task quota is already depleted. However, in certain cases it might be useful to be able to assume that awaiting a ready future will not yield.
+For such cases, it's possible to explicitly bypass the preemption check:
+
+```cpp
+#include <seastar/core/coroutine.hh>
+
+struct resource;
+seastar::future<int> compute_always_ready(int i, resource& r);
+
+seastar::future<int> accumulate(int n, resource& important_resource) {
+ float acc = 0;
+ for (int i = 0; i < n; ++i) {
+ // This await will not yield the control, so we're sure that nobody will
+ // be able to touch important_resource while we accumulate all the results.
+ acc += co_await seastar::coroutine::without_preemption_check(compute_always_ready(i, important_resource));
+ }
+ co_return acc;
+}
+```
+
+# Continuations
+## Capturing state in continuations
+
+We've already seen that Seastar *continuations* are lambdas, passed to the `then()` method of a future. In the examples we've seen so far, lambdas have been nothing more than anonymous functions. But C++11 lambdas have one more trick up their sleeve, which is extremely important for future-based asynchronous programming in Seastar: Lambdas can **capture** state. Consider the following example:
+
+```cpp
+#include <seastar/core/sleep.hh>
+#include <iostream>
+
+seastar::future<int> incr(int i) {
+ using namespace std::chrono_literals;
+ return seastar::sleep(10ms).then([i] { return i + 1; });
+}
+
+seastar::future<> f() {
+ return incr(3).then([] (int val) {
+ std::cout << "Got " << val << "\n";
+ });
+}
+```
+
+The future operation `incr(i)` takes some time to complete (it needs to sleep a bit first...), and in that duration, it needs to save the `i` value it is working on. In the early event-driven programming models, the programmer needed to explicitly define an object for holding this state, and to manage all these objects. Everything is much simpler in Seastar, with C++11's lambdas: The *capture syntax* "`[i]`" in the above example means that the value of i, as it existed when incr() was called() is captured into the lambda. The lambda is not just a function - it is in fact an *object*, with both code and data. In essence, the compiler created for us automatically the state object, and we neither need to define it, nor to keep track of it (it gets saved together with the continuation, when the continuation is deferred, and gets deleted automatically after the continuation runs).
+
+One implementation detail worth understanding is that when a continuation has captured state and is run immediately, this capture incurs no runtime overhead. However, when the continuation cannot be run immediately (because the future is not yet ready) and needs to be saved till later, memory needs to be allocated on the heap for this data, and the continuation's captured data needs to be copied there. This has runtime overhead, but it is unavoidable, and is very small compared to the related overhead in the threaded programming model (in a threaded program, this sort of state usually resides on the stack of the blocked thread, but the stack is much larger than our tiny capture state, takes up a lot of memory and causes a lot of cache pollution on context switches between those threads).
+
+In the above example, we captured `i` *by value* - i.e., a copy of the value of `i` was saved into the continuation. C++ has two additional capture options: capturing by *reference* and capturing by *move*:
+
+Using capture-by-reference in a continuation is usually a mistake, and can lead to serious bugs. For example, if in the above example we captured a reference to i, instead of copying it,
+```cpp
+seastar::future<int> incr(int i) {
+ using namespace std::chrono_literals;
+ // Oops, the "&" below is wrong:
+ return seastar::sleep(10ms).then([&i] { return i + 1; });
+}
+```
+this would have meant that the continuation would contain the address of `i`, not its value. But `i` is a stack variable, and the incr() function returns immediately, so when the continuation eventually gets to run, long after incr() returns, this address will contain unrelated content.
+
+An exception to the capture-by-reference-is-usually-a-mistake rule is the `do_with()` idiom, which we will introduce later. This idiom ensures that an object lives throughout the life of the continuation, and makes capture-by-reference possible, and very convenient.
+
+Using capture-by-*move* in continuations is also very useful in Seastar applications. By **moving** an object into a continuation, we transfer ownership of this object to the continuation, and make it easy for the object to be automatically deleted when the continuation ends. For example, consider a traditional function taking a `std::unique_ptr<T>`.
+```cpp
+int do_something(std::unique_ptr<T> obj) {
+ // do some computation based on the contents of obj, let's say the result is 17
+ return 17;
+ // at this point, obj goes out of scope so the compiler delete()s it.
+```
+By using unique_ptr in this way, the caller passes an object to the function, but tells it the object is now its exclusive responsibility - and when the function is done with the object, it automatically deletes it. How do we use unique_ptr in a continuation? The following won't work:
+
+```cpp
+seastar::future<int> slow_do_something(std::unique_ptr<T> obj) {
+ using namespace std::chrono_literals;
+ // The following line won't compile...
+ return seastar::sleep(10ms).then([obj] () mutable { return do_something(std::move(obj)); });
+}
+```
+
+The problem is that a unique_ptr cannot be passed into a continuation by value, as this would require copying it, which is forbidden because it violates the guarantee that only one copy of this pointer exists. We can, however, *move* obj into the continuation:
+```cpp
+seastar::future<int> slow_do_something(std::unique_ptr<T> obj) {
+ using namespace std::chrono_literals;
+ return seastar::sleep(10ms).then([obj = std::move(obj)] () mutable {
+ return do_something(std::move(obj));
+ });
+}
+```
+Here the use of `std::move()` causes obj's move-assignment is used to move the object from the outer function into the continuation. The notion of move (*move semantics*), introduced in C++11, is similar to a shallow copy followed by invalidating the source copy (so that the two copies do not co-exist, as forbidden by unique_ptr). After moving obj into the continuation, the top-level function can no longer use it (in this case it's of course ok, because we return anyway).
+
+The `[obj = ...]` capture syntax we used here is new to C++14. This is the main reason why Seastar requires C++14, and does not support older C++11 compilers.
+
+The extra `() mutable` syntax was needed here because by default when C++ captures a value (in this case, the value of std::move(obj)) into a lambda, it makes this value read-only, so our lambda cannot, in this example, move it again. Adding `mutable` removes this artificial restriction.
+
+## Evaluation order considerations (C++14 only)
+
+C++14 (and below) does *not* guarantee that lambda captures in continuations will be evaluated after the futures they relate to are evaluated
+(See https://en.cppreference.com/w/cpp/language/eval_order).
+
+Consequently, avoid the programming pattern below:
+```cpp
+ return do_something(obj).then([obj = std::move(obj)] () mutable {
+ return do_something_else(std::move(obj));
+ });
+```
+
+In the example above, `[obj = std::move(obj)]` might be evaluated before `do_something(obj)` is called, potentially leading to use-after-move of `obj`.
+
+To guarantee the desired evaluation order, the expression above may be broken into separate statements as follows:
+```cpp
+ auto fut = do_something(obj);
+ return fut.then([obj = std::move(obj)] () mutable {
+ return do_something_else(std::move(obj));
+ });
+```
+
+This was changed in C++17. The expression that creates the object the function `then` is called on (the future) is evaluated before all the arguments to the function, so this style is not required in C++17 and above.
+
+## Chaining continuations
+TODO: We already saw chaining example in slow() above. talk about the return from then, and returning a future and chaining more thens.
+
+# Handling exceptions
+
+An exception thrown in a continuation is implicitly captured by the system and stored in the future. A future that stores such an exception is similar to a ready future in that it can cause its continuation to be launched, but it does not contain a value -- only the exception.
+
+Calling `.then()` on such a future skips over the continuation, and transfers the exception for the input future (the object on which `.then()` is called) to the output future (`.then()`'s return value).
+
+This default handling parallels normal exception behavior -- if an exception is thrown in straight-line code, all following lines are skipped:
+
+```cpp
+line1();
+line2(); // throws!
+line3(); // skipped
+```
+
+is similar to
+
+```cpp
+return line1().then([] {
+ return line2(); // throws!
+}).then([] {
+ return line3(); // skipped
+});
+```
+
+Usually, aborting the current chain of operations and returning an exception is what's needed, but sometimes more fine-grained control is required. There are several primitives for handling exceptions:
+
+1. `.then_wrapped()`: instead of passing the values carried by the future into the continuation, `.then_wrapped()` passes the input future to the continuation. The future is guaranteed to be in ready state, so the continuation can examine whether it contains a value or an exception, and take appropriate action.
+2. `.finally()`: similar to a Java finally block, a `.finally()` continuation is executed whether or not its input future carries an exception or not. The result of the finally continuation is its input future, so `.finally()` can be used to insert code in a flow that is executed unconditionally, but otherwise does not alter the flow.
+
+TODO: give example code for the above. Also mention handle_exception - although perhaps delay that to a later chapter?
+
+## Exceptions vs. exceptional futures
+An asynchronous function can fail in one of two ways: It can fail immediately, by throwing an exception, or it can return a future which will eventually fail (resolve to an exception). These two modes of failure appear similar to the uninitiated, but behave differently when attempting to handle exceptions using `finally()`, `handle_exception()`, or `then_wrapped()`. For example, consider the code:
+
+```cpp
+#include <seastar/core/future.hh>
+#include <iostream>
+#include <exception>
+
+class my_exception : public std::exception {
+ virtual const char* what() const noexcept override { return "my exception"; }
+};
+
+seastar::future<> fail() {
+ return seastar::make_exception_future<>(my_exception());
+}
+
+seastar::future<> f() {
+ return fail().finally([] {
+ std::cout << "cleaning up\n";
+ });
+}
+```
+
+This code will, as expected, print the "cleaning up" message - the asynchronous function `fail()` returns a future which resolves to a failure, and the `finally()` continuation is run despite this failure, as expected.
+
+Now consider that in the above example we had a different definition for `fail()`:
+
+```cpp
+seastar::future<> fail() {
+ throw my_exception();
+}
+```
+
+Here, `fail()` does not return a failing future. Rather, it fails to return a future at all! The exception it throws stops the entire function `f()`, and the `finally()` continuation does not not get attached to the future (which was never returned), and will never run. The "cleaning up" message is not printed now.
+
+We recommend that to reduce the chance for such errors, asynchronous functions should always return a failed future rather than throw an actual exception. If the asynchronous function calls another function _before_ returning a future, and that second function might throw, it should use `try`/`catch` to catch the exception and convert it into a failed future:
+
+```cpp
+void inner() {
+ throw my_exception();
+}
+seastar::future<> fail() {
+ try {
+ inner();
+ } catch(...) {
+ return seastar::make_exception_future(std::current_exception());
+ }
+ return seastar::make_ready_future<>();
+}
+```
+
+Here, `fail()` catches the exception thrown by `inner()`, whatever it might be, and returns a failed future with that failure. Written this way, the `finally()` continuation will be reached, and the "cleaning up" message printed.
+
+>Despite this recommendation that asynchronous functions avoid throwing, some asynchronous functions do throw exceptions in addition to returning exceptional futures. A common example are functions which allocate memory and throw `std::bad_alloc` when running out of memory, instead of returning a future. The `future<> seastar::semaphore::wait()` method is one such function: It returns a future which may be exceptional if the semaphore was `broken()` or the wait timed out, but may also *throw* an exception when failing to allocate memory it needs to hold the list of waiters.
+> Therefore, unless a function --- including asynchronous functions --- is explicitly tagged "`noexcept`", the application should be prepared to handle exceptions thrown from it. In modern C++, code usually uses RAII to be exception-safe without sprinkling it with `try`/`catch`. `seastar::defer()` is a RAII-based idiom that ensures that some cleanup code is run even if an exception is thrown.
+
+
+Seastar has a convenient generic function, `futurize_invoke()`, which can be useful here. `futurize_invoke(func, args...)` runs a function which may return either a future value or an immediate value, and in both cases convert the result into a future value. `futurize_invoke()` also converts an immediate exception thrown by the function, if any, into a failed future, just like we did above. So using `futurize_invoke()` we can make the above example work even if `fail()` did throw exceptions:
+
+```cpp
+seastar::future<> fail() {
+ throw my_exception();
+}
+seastar::future<> f() {
+ return seastar::futurize_invoke(fail).finally([] {
+ std::cout << "cleaning up\n";
+ });
+}
+```
+
+Note that most of this discussion becomes moot if the risk of exception is inside a _continuation_. Consider the following code:
+
+```cpp
+seastar::future<> f() {
+ return seastar::sleep(1s).then([] {
+ throw my_exception();
+ }).finally([] {
+ std::cout << "cleaning up\n";
+ });
+}
+```
+
+Here, the lambda function of the first continuation does throw an exception instead of returning a failed future. However, we do _not_ have the same problem as before, which only happened because an asynchronous function threw an exception _before_ returning a valid future. Here, `f()` does return a valid future immediately - the failure will only be known later, after `sleep()` resolves. The message in `finally()` will be printed. The methods which attach continuations (such as `then()` and `finally()`) run the continuation the same way, so continuation functions may return immediate values or, in this case, throw an immediate exception, and still work properly.
+
+# Lifetime management
+An asynchronous function starts an operation which may continue long after the function returns: The function itself returns a `future<T>` almost immediately, but it may take a while until this future is resolved.
+
+When such an asynchronous operation needs to operate on existing objects, or to use temporary objects, we need to worry about the *lifetime* of these objects: We need to ensure that these objects do not get destroyed before the asynchronous function completes (or it will try to use the freed object and malfunction or crash), and to also ensure that the object finally get destroyed when it is no longer needed (otherwise we will have a memory leak).
+Seastar offers a variety of mechanisms for safely and efficiently keeping objects alive for the right duration. In this section we will explore these mechanisms, and when to use each mechanism.
+
+## Passing ownership to continuation
+The most straightforward way to ensure that an object is alive when a continuation runs and is destroyed afterwards is to pass its ownership to the continuation. When continuation *owns* the object, the object will be kept until the continuation runs, and will be destroyed as soon as the continuation is not needed (i.e., it may have run, or skipped in case of exception and `then()` continuation).
+
+We already saw above that the way for a continuation to get ownership of an object is through *capturing*:
+
+```cpp
+seastar::future<> slow_incr(int i) {
+ return seastar::sleep(10ms).then([i] { return i + 1; });
+}
+```
+Here the continuation captures the value of `i`. In other words, the continuation includes a copy of `i`. When the continuation runs 10ms later, it will have access to this value, and as soon as the continuation finishes its object is destroyed, together with its captured copy of `i`. The continuation owns this copy of `i`.
+
+Capturing by value as we did here - making a copy of the object we need in the continuation - is useful mainly for very small objects such as the integer in the previous example. Other objects are expensive to copy, or sometimes even cannot be copied. For example, the following is **not** a good idea:
+```cpp
+seastar::future<> slow_op(std::vector<int> v) {
+ // this makes another copy of v:
+ return seastar::sleep(10ms).then([v] { /* do something with v */ });
+}
+```
+This would be inefficient - as the vector `v`, potentially very long, will be copied and the copy will be saved in the continuation. In this example, there is no reason to copy `v` - it was anyway passed to the function by value and will not be used again after capturing it into the continuation, as right after the capture, the function returns and destroys its copy of `v`.
+
+For such cases, C++14 allows *moving* the object into the continuation:
+```cpp
+seastar::future<> slow_op(std::vector<int> v) {
+ // v is not copied again, but instead moved:
+ return seastar::sleep(10ms).then([v = std::move(v)] { /* do something with v */ });
+}
+```
+Now, instead of copying the object `v` into the continuation, it is *moved* into the continuation. The C++11-introduced move constructor moves the vector's data into the continuation and clears the original vector. Moving is a quick operation - for a vector it only requires copying a few small fields such as the pointer to the data. As before, once the continuation is dismissed the vector is destroyed - and its data array (which was moved in the move operation) is finally freed.
+
+TODO: talk about temporary_buffer as an example of an object designed to be moved in this way.
+
+In some cases, moving the object is undesirable. For example, some code keeps references to an object or one of its fields and the references become invalid if the object is moved. In some complex objects, even the move constructor is slow. For these cases, C++ provides the useful wrapper `std::unique_ptr<T>`. A `unique_ptr<T>` object owns an object of type `T` allocated on the heap. When a `unique_ptr<T>` is moved, the object of type T is not touched at all - just the pointer to it is moved. An example of using `std::unique_ptr<T>` in capture is:
+
+```cpp
+seastar::future<> slow_op(std::unique_ptr<T> p) {
+ return seastar::sleep(10ms).then([p = std::move(p)] { /* do something with *p */ });
+}
+```
+
+`std::unique_ptr<T>` is the standard C++ mechanism for passing unique ownership of an object to a function: The object is only owned by one piece of code at a time, and ownership is transferred by moving the `unique_ptr` object. A `unique_ptr` cannot be copied: If we try to capture p by value, not by move, we will get a compilation error.
+
+## Keeping ownership at the caller
+
+The technique we described above - giving the continuation ownership of the object it needs to work on - is powerful and safe. But often it becomes hard and verbose to use. When an asynchronous operation involves not just one continuation but a chain of continuations that each needs to work on the same object, we need to pass the ownership of the object between each successive continuation, which can become inconvenient. It is especially inconvenient when we need to pass the same object into two separate asynchronous functions (or continuations) - after we move the object into one, the object needs to be returned so it can be moved again into the second. E.g.,
+```cpp
+seastar::future<> slow_op(T o) {
+ return seastar::sleep(10ms).then([o = std::move(o)] {
+ // first continuation, doing something with o
+ ...
+ // return o so the next continuation can use it!
+ return std::move(o);
+ }).then([](T o) {
+ // second continuation, doing something with o
+ ...
+ });
+}
+```
+
+This complexity arises because we wanted asynchronous functions and continuations to take the ownership of the objects they operated on. A simpler approach would be to have the *caller* of the asynchronous function continue to be the owner of the object, and just pass *references* to the object to the various other asynchronous functions and continuations which need the object. For example:
+
+```cpp
+seastar::future<> slow_op(T& o) { // <-- pass by reference
+ return seastar::sleep(10ms).then([&o] {// <-- capture by reference
+ // first continuation, doing something with o
+ ...
+ }).then([&o]) { // <-- another capture by reference
+ // second continuation, doing something with o
+ ...
+ });
+}
+```
+
+This approach raises a question: The caller of `slow_op` is now responsible for keeping the object `o` alive while the asynchronous code started by `slow_op` needs this object. But how will this caller know how long this object is actually needed by the asynchronous operation it started?
+
+The most reasonable answer is that an asynchronous function may need access to its parameters until the future it returns is resolved - at which point the asynchronous code completes and no longer needs access to its parameters. We therefore recommend that Seastar code adopt the following convention:
+
+> **Whenever an asynchronous function takes a parameter by reference, the caller must ensure that the referred object lives until the future returned by the function is resolved.**
+
+Note that this is merely a convention suggested by Seastar, and unfortunately nothing in the C++ language enforces it. C++ programmers in non-Seastar programs often pass large objects to functions as a const reference just to avoid a slow copy, and assume that the called function will *not* save this reference anywhere. But in Seastar code, that is a dangerous practice because even if the asynchronous function did not intend to save the reference anywhere, it may end up doing it implicitly by passing this reference to another function and eventually capturing it in a continuation.
+
+> It would be nice if future versions of C++ could help us catch incorrect uses of references. Perhaps we could have a tag for a special kind of reference, an "immediate reference" which a function can use use immediately (i.e, before returning a future), but cannot be captured into a continuation.
+
+With this convention in place, it is easy to write complex asynchronous functions functions like `slow_op` which pass the object around, by reference, until the asynchronous operation is done. But how does the caller ensure that the object lives until the returned future is resolved? The following is *wrong*:
+```cpp
+seastar::future<> f() {
+ T obj; // wrong! will be destroyed too soon!
+ return slow_op(obj);
+}
+```
+It is wrong because the object `obj` here is local to the call of `f`, and is destroyed as soon as `f` returns a future - not when this returned future is resolved! The correct thing for a caller to do would be to create the object `obj` on the heap (so it does not get destroyed as soon as `f` returns), and then run `slow_op(obj)` and when that future resolves (i.e., with `.finally()`), destroy the object.
+
+Seastar provides a convenient idiom, `do_with()` for doing this correctly:
+```cpp
+seastar::future<> f() {
+ return seastar::do_with(T(), [] (auto& obj) {
+ // obj is passed by reference to slow_op, and this is fine:
+ return slow_op(obj);
+ }
+}
+```
+`do_with` will *do* the given function *with* the given object alive.
+
+`do_with` saves the given object on the heap, and calls the given lambda with a reference to the new object. Finally it ensures that the new object is destroyed after the returned future is resolved. Usually, do_with is given an *rvalue*, i.e., an unnamed temporary object or an `std::move()`ed object, and `do_with` moves that object into its final place on the heap. `do_with` returns a future which resolves after everything described above is done (the lambda's future is resolved and the object is destroyed).
+
+For convenience, `do_with` can also be given multiple objects to hold alive. For example here we create two objects and hold alive them until the future resolves:
+```cpp
+seastar::future<> f() {
+ return seastar::do_with(T1(), T2(), [] (auto& obj1, auto& obj2) {
+ return slow_op(obj1, obj2);
+ }
+}
+```
+
+While `do_with` can the lifetime of the objects it holds, if the user accidentally makes copies of these objects, these copies might have the wrong lifetime. Unfortunately, a simple typo like forgetting an "&" can cause such accidental copies. For example, the following code is broken:
+```cpp
+seastar::future<> f() {
+ return seastar::do_with(T(), [] (T obj) { // WRONG: should be T&, not T
+ return slow_op(obj);
+ }
+}
+```
+In this wrong snippet, `obj` is mistakenly not a reference to the object which `do_with` allocated, but rather a copy of it - a copy which is destroyed as soon as the lambda function returns, rather than when the future it returns resolved. Such code will most likely crash because the object is used after being freed. Unfortunately the compiler will not warn about such mistakes. Users should get used to always using the type "auto&" with `do_with` - as in the above correct examples - to reduce the chance of such mistakes.
+
+ For the same reason, the following code snippet is also wrong:
+```cpp
+seastar::future<> slow_op(T obj); // WRONG: should be T&, not T
+seastar::future<> f() {
+ return seastar::do_with(T(), [] (auto& obj) {
+ return slow_op(obj);
+ }
+}
+```
+Here, although `obj` was correctly passed to the lambda by reference, we later accidentally passed `slow_op()` a copy of it (because here `slow_op` takes the object by value, not by reference), and this copy will be destroyed as soon as `slow_op` returns, not waiting until the returned future resolves.
+
+When using `do_with`, always remember it requires adhering to the convention described above: The asynchronous function which we call inside `do_with` must not use the objects held by `do_with` *after* the returned future is resolved. It is a serious use-after-free bug for an asynchronous function to return a future which resolves while still having background operations using the `do_with()`ed objects.
+
+In general, it is rarely a good idea for an asynchronous function to resolve while leaving behind background operations - even if those operations do not use the `do_with()`ed objects. Background operations that we do not wait for may cause us to run out of memory (if we don't limit their number) and make it difficult to shut down the application cleanly.
+
+
+## Sharing ownership (reference counting)
+In the beginning of this chapter, we already noted that capturing a copy of an object into a continuation is the simplest way to ensure that the object is alive when the continuation runs and destroyed afterwards. However, complex objects are often expensive (in time and memory) to copy. Some objects cannot be copied at all, or are read-write and the continuation should modify the original object, not a new copy. The solution to all these issues are **reference counted**, a.k.a. **shared** objects:
+
+A simple example of a reference-counted object in Seastar is a `seastar::file`, an object holding an open file object (we will introduce `seastar::file` in a later section). A `file` object can be copied, but copying does not involve copying the file descriptor (let alone the file). Instead, both copies point to the same open file, and a reference count is increased by 1. When a file object is destroyed, the file's reference count is decreased by one, and only when the reference count reaches 0 the underlying file is actually closed.
+
+The fact that `file` objects can be copied very quickly and all copies actually point to the same file, make it very convenient to pass them to asynchronous code; For example,
+
+```cpp
+seastar::future<uint64_t> slow_size(file f) {
+ return seastar::sleep(10ms).then([f] {
+ return f.size();
+ });
+}
+```
+
+Note how calling `slow_size` is as simple as calling `slow_size(f)`, passing a copy of `f`, without needing to do anything special to ensure that `f` is only destroyed when no longer needed. That simply happens naturally when nothing refers to `f` any more.
+
+You may wonder why `return f.size()` in the above example is safe: Doesn't it start an asynchronous operation on `f` (the file's size may be stored on disk, so not immediately available), and `f` may be immediately destroyed when we return and nothing keeps holding a copy of `f`? If `f` is really the last reference, that is indeed a bug, but there is another one: the file is never closed. The assumption that makes the code valid is that there is another reference to `f` that will be used to close it. The close member function holds on to the reference count of that object, so it continues to live even if nothing else keeps holding on to it. Since all futures produced by a file object complete before it is closed, all that is needed for correctness is to remember to always close files.
+
+The reference counting has a run-time cost, but it is usually very small; It is important to remember that Seastar objects are always used by a single CPU only, so the reference-count increment and decrement operations are not the slow atomic operations often used for reference counting, but just regular CPU-local integer operations. Moreover, judicious use of `std::move()` and the compiler's optimizer can reduce the number of unnecessary back-and-forth increment and decrement of the reference count.
+
+C++11 offers a standard way of creating reference-counted shared objects - using the template `std::shared_ptr<T>`. A `shared_ptr` can be used to wrap any type into a reference-counted shared object like `seastar::file` above. However, the standard `std::shared_ptr` was designed with multi-threaded applications in mind so it uses slow atomic increment/decrement operations for the reference count which we already noted is unnecessary in Seastar. For this reason Seastar offers its own single-threaded implementation of this template, `seastar::shared_ptr<T>`. It is similar to `std::shared_ptr<T>` except no atomic operations are used.
+
+Additionally, Seastar also provides an even lower overhead variant of `shared_ptr`: `seastar::lw_shared_ptr<T>`. The full-featured `shared_ptr` is complicated by the need to support polymorphic types correctly (a shared object created of one class, and accessed through a pointer to a base class). It makes `shared_ptr` need to add two words to the shared object, and two words to each `shared_ptr` copy. The simplified `lw_shared_ptr` - which does **not** support polymorphic types - adds just one word in the object (the reference count) and each copy is just one word - just like copying a regular pointer. For this reason, the light-weight `seastar::lw_shared_ptr<T>` should be preferred when possible (`T` is not a polymorphic type), otherwise `seastar::shared_ptr<T>`. The slower `std::shared_ptr<T>` should never be used in sharded Seastar applications.
+
+## Saving objects on the stack
+Wouldn't it be convenient if we could save objects on a stack just like we normally do in synchronous code? I.e., something like:
+```cpp
+int i = ...;
+seastar::sleep(10ms).get();
+return i;
+```
+Seastar allows writing such code, by using a `seastar::thread` object which comes with its own stack. A complete example using a `seastar::thread` might look like this:
+```cpp
+seastar::future<> slow_incr(int i) {
+ return seastar::async([i] {
+ seastar::sleep(10ms).get();
+ // We get here after the 10ms of wait, i is still available.
+ return i + 1;
+ });
+}
+```
+We present `seastar::thread`, `seastar::async()` and `seastar::future::get()` in the [seastar::thread] section.
+
+# Advanced futures
+## Futures and interruption
+TODO: A future, e.g., sleep(10s) cannot be interrupted. So if we need to, the promise needs to have a mechanism to interrupt it. Mention pipe's close feature, semaphore stop feature, etc.
+
+## Futures are single use
+TODO: Talk about if we have a `future<int>` variable, as soon as we `get()` or `then()` it, it becomes invalid - we need to store the value somewhere else. Think if there's an alternative we can suggest
+
+# Fibers
+Seastar continuations are normally short, but often chained to one another, so that one continuation does a bit of work and then schedules another continuation for later. Such chains can be long, and often even involve loopings - see the following section, "Loops". We call such chains "fibers" of execution.
+
+These fibers are not threads - each is just a string of continuations - but they share some common requirements with traditional threads. For example, we want to avoid one fiber getting starved while a second fiber continuously runs its continuations one after another. As another example, fibers may want to communicate - e.g., one fiber produces data that a second fiber consumes, and we wish to ensure that both fibers get a chance to run, and that if one stops prematurely, the other doesn't hang forever.
+
+TODO: Mention fiber-related sections like loops, semaphores, gates, pipes, etc.
+
+# Loops
+A majority of time-consuming computations involve using loops. Seastar provides several primitives for expressing them in a way that composes nicely with the future/promise model. A very important aspect of Seastar loop primitives is that each iteration is followed by a preemption point, thus allowing other tasks to run inbetween iterations.
+
+## repeat
+A loop created with `repeat` executes its body until it receives a `stop_iteration` object, which informs if the iteration should continue (`stop_iteration::no`) or stop (`stop_iteration::yes`). Next iteration will be launched only after the first one has finished. The loop body passed to `repeat` is expected to have a `future<stop_iteration>` return type.
+```cpp
+seastar::future<int> recompute_number(int number);
+
+seastar::future<> push_until_100(seastar::lw_shared_ptr<std::vector<int>> queue, int element) {
+ return seastar::repeat([queue, element] {
+ if (queue->size() == 100) {
+ return make_ready_future<stop_iteration>(stop_iteration::yes);
+ }
+ return recompute_number(element).then([queue] (int new_element) {
+ queue->push_back(new_element);
+ return stop_iteration::no;
+ });
+ });
+}
+```
+
+## do_until
+Do until is a close relative of `repeat`, but it uses an explicitly passed condition to decide whether it should stop iterating. The above example could be expressed with `do_until` as follows:
+```cpp
+seastar::future<int> recompute_number(int number);
+
+seastar::future<> push_until_100(seastar::lw_shared_ptr<std::vector<int>> queue, int element) {
+ return seastar::do_until([queue] { return queue->size() == 100; }, [queue, element] {
+ return recompute_number(element).then([queue] (int new_element) {
+ queue->push_back(new_element);
+ });
+ });
+}
+```
+Note that the loop body is expected to return a `future<>`, which allows composing complex continuations inside the loop.
+
+## do_for_each
+A `do_for_each` is an equivalent of a `for` loop in Seastar world. It accepts a range (or a pair of iterators) and a function body, which it applies to each argument, in order, one by one. The next iteration will be launched only after the first one has finished, as was the case with `repeat`. As usual, `do_for_each` expects its loop body to return a `future<>`.
+```cpp
+seastar::future<> append(seastar::lw_shared_ptr<std::vector<int>> queue1, seastar::lw_shared_ptr<std::vector<int>> queue2) {
+ return seastar::do_for_each(queue2, [queue1] (int element) {
+ queue1->push_back(element);
+ });
+}
+
+seastar::future<> append_iota(seastar::lw_shared_ptr<std::vector<int>> queue1, int n) {
+ return seastar::do_for_each(boost::make_counting_iterator<size_t>(0), boost::make_counting_iterator<size_t>(n), [queue1] (int element) {
+ queue1->push_back(element);
+ });
+}
+```
+`do_for_each` accepts either an lvalue reference to a container or a pair of iterators. It implies that the responsibility to ensure that the container is alive during the whole loop execution belongs to the caller. If the container needs its lifetime prolonged, it can be easily achieved with `do_with`:
+```cpp
+seastar::future<> do_something(int number);
+
+seastar::future<> do_for_all(std::vector<int> numbers) {
+ // Note that the "numbers" vector will be destroyed as soon as this function
+ // returns, so we use do_with to guarantee it lives during the whole loop execution:
+ return seastar::do_with(std::move(numbers), [] (std::vector<int>& numbers) {
+ return seastar::do_for_each(numbers, [] (int number) {
+ return do_something(number);
+ });
+ });
+}
+```
+
+## parallel_for_each
+Parallel for each is a high concurrency variant of `do_for_each`. When using `parallel_for_each`, all iterations are queued simultaneously - which means that there's no guarantee in which order they finish their operations.
+
+```cpp
+seastar::future<> flush_all_files(seastar::lw_shared_ptr<std::vector<seastar::file>> files) {
+ return seastar::parallel_for_each(files, [] (seastar::file f) {
+ // file::flush() returns a future<>
+ return f.flush();
+ });
+}
+```
+`parallel_for_each` is a powerful tool, as it allows spawning many tasks in parallel. It can be a great performance gain, but there are also caveats. First of all, too high concurrency may be troublesome - the details can be found in chapter **Limiting parallelism of loops**.
+
+To restrict the concurrency of `parallel_for_each` by an integer number, use `max_concurrent_for_each` that is described below.
+More details about dealing with parallelism can be found in chapter **Limiting parallelism of loops**.
+
+Secondly, take note that the order in which iterations will be executed within a `parallel_for_each` loop is arbitrary - if a strict ordering is needed, consider using `do_for_each` instead.
+
+TODO: map_reduce, as a shortcut (?) for parallel_for_each which needs to produce some results (e.g., logical_or of boolean results), so we don't need to create a lw_shared_ptr explicitly (or do_with).
+
+TODO: See seastar commit "input_stream: Fix possible infinite recursion in consume()" for an example on why recursion is a possible, but bad, replacement for repeat(). See also my comment on https://groups.google.com/d/msg/seastar-dev/CUkLVBwva3Y/3DKGw-9aAQAJ on why Seastar's iteration primitives should be used over tail call optimization.
+
+## max_concurrent_for_each
+Max concurrent for each is a variant of `parallel_for_each` with restricted parallelism.
+It accepts an additional parameter - `max_concurrent` - with which, up to `max_concurrent` iterations are queued simultaneously, with no guarantee in which order they finish their operations.
+
+```cpp
+seastar::future<> flush_all_files(seastar::lw_shared_ptr<std::vector<seastar::file>> files, size_t max_concurrent) {
+ return seastar::max_concurrent_for_each(files, max_concurrent, [] (seastar::file f) {
+ return f.flush();
+ });
+}
+```
+
+Determining the maximum concurrency limit is out of the scope of this document.
+It should typically be derived from the actual capabilities of the system the software is running on, like the number of parallel execution units or I/O channels, so to optimize utilization of resources without overwhelming the system.
+
+# when_all: Waiting for multiple futures
+Above we've seen `parallel_for_each()`, which starts a number of asynchronous operations, and then waits for all to complete. Seastar has another idiom, `when_all()`, for waiting for several already-existing futures to complete.
+
+The first variant of `when_all()` is variadic, i.e., the futures are given as separate parameters, the exact number of which is known at compile time. The individual futures may have different types. For example,
+
+```cpp
+#include <seastar/core/sleep.hh>
+
+future<> f() {
+ using namespace std::chrono_literals;
+ future<int> slow_two = sleep(2s).then([] { return 2; });
+ return when_all(sleep(1s), std::move(slow_two),
+ make_ready_future<double>(3.5)
+ ).discard_result();
+}
+```
+
+This starts three futures - one which sleeps for one second (and doesn't return anything), one which sleeps for two seconds and returns the integer 2, and one which returns the double 3.5 immediately - and then waits for them. The `when_all()` function returns a future which resolves as soon as all three futures resolves, i.e., after two seconds. This future also has a value, which we shall explain below, but in this example, we simply waited for the future to resolve and discarded its value.
+
+Note that `when_all()` accept only rvalues, which can be temporaries (like the return value of an asynchronous function or `make_ready_future`) or an `std::move()`'ed variable holding a future.
+
+The future returned by `when_all()` resolves to a tuple of futures which are already resolved, and contain the results of the three input futures. Continuing the above example,
+
+```cpp
+future<> f() {
+ using namespace std::chrono_literals;
+ future<int> slow_two = sleep(2s).then([] { return 2; });
+ return when_all(sleep(1s), std::move(slow_two),
+ make_ready_future<double>(3.5)
+ ).then([] (auto tup) {
+ std::cout << std::get<0>(tup).available() << "\n";
+ std::cout << std::get<1>(tup).get0() << "\n";
+ std::cout << std::get<2>(tup).get0() << "\n";
+ });
+}
+```
+
+The output of this program (which comes after two seconds) is `1, 2, 3.5`: the first future in the tuple is available (but has no value), the second has the integer value 2, and the third a double value 3.5 - as expected.
+
+One or more of the waited futures might resolve in an exception, but this does not change how `when_all()` works: It still waits for all the futures to resolve, each with either a value or an exception, and in the returned tuple some of the futures may contain an exception instead of a value. For example,
+
+```cpp
+future<> f() {
+ using namespace std::chrono_literals;
+ future<> slow_success = sleep(1s);
+ future<> slow_exception = sleep(2s).then([] { throw 1; });
+ return when_all(std::move(slow_success), std::move(slow_exception)
+ ).then([] (auto tup) {
+ std::cout << std::get<0>(tup).available() << "\n";
+ std::cout << std::get<1>(tup).failed() << "\n";
+ std::get<1>(tup).ignore_ready_future();
+ });
+}
+```
+
+Both futures are `available()` (resolved), but the second has `failed()` (resulted in an exception instead of a value). Note how we called `ignore_ready_future()` on this failed future, because silently ignoring a failed future is considered a bug, and will result in an "Exceptional future ignored" error message. More typically, an application will log the failed future instead of ignoring it.
+
+The above example demonstrate that `when_all()` is inconvenient and verbose to use properly. The results are wrapped in a tuple, leading to verbose tuple syntax, and uses ready futures which must all be inspected individually for an exception to avoid error messages.
+
+So Seastar also provides an easier to use `when_all_succeed()` function. This function too returns a future which resolves when all the given futures have resolved. If all of them succeeded, it passes a tuple of the resulting values to continuation, without wrapping each of them in a future first. Sometimes, it could be tedious to unpack the tuple for consuming the resulting values. In that case, `then_unpack()` can be used in place of `then()`. `then_unpack()` unpacks the returned tuple and passes its elements to the following continuation as its parameters. If, however, one or more of the futures failed, `when_all_succeed()` resolves to a failed future, containing the exception from one of the failed futures. If more than one of the given future failed, one of those will be passed on (it is unspecified which one is chosen), and the rest will be silently ignored. For example,
+
+```cpp
+using namespace seastar;
+future<> f() {
+ using namespace std::chrono_literals;
+ return when_all_succeed(sleep(1s), make_ready_future<int>(2),
+ make_ready_future<double>(3.5)
+ ).then_unpack([] (int i, double d) {
+ std::cout << i << " " << d << "\n";
+ });
+}
+```
+
+Note how the integer and double values held by the futures are conveniently passed, individually to the continuation. Since `sleep()` does not contain a value, it is waited for, but no third value is passed to the continuation. That also means that if we `when_all_succeed()` on several `future<>` (without a value), the result is a `future<tuple<>>`:
+
+```cpp
+using namespace seastar;
+future<> f() {
+ using namespace std::chrono_literals;
+ return when_all_succeed(sleep(1s), sleep(2s), sleep(3s)).then_unpack([] {
+ return make_ready_future<>();
+ });
+}
+```
+
+This example simply waits for 3 seconds (the maximum of 1, 2 and 3 seconds).
+
+An example of `when_all_succeed()` with an exception:
+
+```cpp
+using namespace seastar;
+future<> f() {
+ using namespace std::chrono_literals;
+ return when_all_succeed(make_ready_future<int>(2),
+ make_exception_future<double>("oops")
+ ).then_unpack([] (int i, double d) {
+ std::cout << i << " " << d << "\n";
+ }).handle_exception([] (std::exception_ptr e) {
+ std::cout << "exception: " << e << "\n";
+ });
+}
+```
+
+In this example, one of the futures fails, so the result of `when_all_succeed` is a failed future, so the normal continuation is not run, and the `handle_exception()` continuation is done.
+
+TODO: also explain `when_all` and `when_all_succeed` for vectors.
+
+# Semaphores
+Seastar's semaphores are the standard computer-science semaphores, adapted for futures. A semaphore is a counter into which you can deposit units or take them away. Taking units from the counter may wait if not enough units are available.
+
+## Limiting parallelism with semaphores
+The most common use for a semaphore in Seastar is for limiting parallelism, i.e., limiting the number of instances of some code which can run in parallel. This can be important when each of the parallel invocations uses a limited resource (e.g., memory) so letting an unlimited number of them run in parallel can exhaust this resource.
+
+Consider a case where an external source of events (e.g., an incoming network request) causes an asynchronous function ```g()``` to be called. Imagine that we want to limit the number of concurrent ```g()``` operations to 100. I.e., If g() is started when 100 other invocations are still ongoing, we want it to delay its real work until one of the other invocations has completed. We can do this with a semaphore:
+
+```cpp
+seastar::future<> g() {
+ static thread_local seastar::semaphore limit(100);
+ return limit.wait(1).then([] {
+ return slow(); // do the real work of g()
+ }).finally([] {
+ limit.signal(1);
+ });
+}
+```
+
+In this example, the semaphore starts with the counter at 100. The asynchronous operation `slow()` is only started when we can reduce the counter by one (`wait(1)`), and when `slow()` is done, either successfully or with exception, the counter is increased back by one (```signal(1)```). This way, when 100 operations have already started their work and have not yet finished, the 101st operation will wait, until one of the ongoing operations finishes and returns a unit to the semaphore. This ensures that at each time we have at most 100 concurrent `slow()` operations running in the above code.
+
+Note how we used a ```static thread_local``` semaphore, so that all calls to ```g()``` from the same shard count towards the same limit; As usual, a Seastar application is sharded so this limit is separate per shard (CPU thread). This is usually fine, because sharded applications consider resources to be separate per shard.
+
+Luckily, the above code happens to be exception safe: `limit.wait(1)` can throw an exception when it runs out of memory (keeping a list of waiters), and in that case the semaphore counter is not decreased but the continuations below are not run so it is not increased either. `limit.wait(1)` can also return an exceptional future when the semaphore is *broken* (we'll discuss this later) but in that case the extra `signal()` call is ignored. Finally, `slow()` may also throw, or return an exceptional future, but the `finally()` ensures the semaphore is still increased.
+
+However, as the application code becomes more complex, it becomes harder to ensure that we never forget to call `signal()` after the operation is done, regardless of which code path or exceptions happen. As an example of what might go wrong, consider the following *buggy* code snippet, which differs subtly from the above one, and also appears, on first sight, to be correct:
+
+```cpp
+seastar::future<> g() {
+ static thread_local seastar::semaphore limit(100);
+ return limit.wait(1).then([] {
+ return slow().finally([] { limit.signal(1); });
+ });
+}
+```
+
+But this version is **not** exception safe: Consider what happens if `slow()` throws an exception before returning a future (this is different from `slow()` returning an exceptional future - we discussed this difference in the section about exception handling). In this case, we decreased the counter, but the `finally()` will never be reached, and the counter will never be increased back. There is a way to fix this code, by replacing the call to `slow()` with `seastar::futurize_invoke(slow)`. But the point we're trying to make here is not how to fix buggy code, but rather that by using the separate `semaphore::wait()` and `semaphore::signal()` functions, you can very easily get things wrong.
+
+For exception safety, in C++ it is generally not recommended to have separate resource acquisition and release functions. Instead, C++ offers safer mechanisms for acquiring a resource (in this case semaphore units) and later releasing it: lambda functions, and RAII ("resource acquisition is initialization"):
+
+The lambda-based solution is a function ```seastar::with_semaphore()``` which is a shortcut for the code in the examples above:
+
+```cpp
+seastar::future<> g() {
+ static thread_local seastar::semaphore limit(100);
+ return seastar::with_semaphore(limit, 1, [] {
+ return slow(); // do the real work of g()
+ });
+}
+```
+
+`with_semaphore()`, like the earlier code snippets, waits for the given number of units from the semaphore, then runs the given lambda, and when the future returned by the lambda is resolved, `with_semaphore()` returns back the units to the semaphore. `with_semaphore()` returns a future which only resolves after all these steps are done.
+
+The function `seastar::get_units()` is more general. It provides an exception-safe alternative to `seastar::semaphore`'s separate `wait()` and `signal()` methods, based on C++'s RAII philosophy: The function returns an opaque units object, which while held, keeps the semaphore's counter decreased - and as soon as this object is destructed, the counter is increased back. With this interface you cannot forget to increase the counter, or increase it twice, or increase without decreasing: The counter will always be decreased once when the units object is created, and if that succeeded, increased when the object is destructed. When the units object is moved into a continuation, no matter how this continuation ends, when the continuation is destructed, the units object is destructed and the units are returned to the semaphore's counter. The above examples, written with `get_units()`, looks like this:
+
+```cpp
+seastar::future<> g() {
+ static thread_local semaphore limit(100);
+ return seastar::get_units(limit, 1).then([] (auto units) {
+ return slow().finally([units = std::move(units)] {});
+ });
+}
+```
+
+Note the somewhat convoluted way that `get_units()` needs to be used: The continuations must be nested because we need the `units` object to be moved to the last continuation. If `slow()` returns a future (and does not throw immediately), the `finally()` continuation captures the `units` object until everything is done, but does not run any code.
+
+Seastars programmers should generally avoid using the the `semaphore::wait()` and `semaphore::signal()` functions directly, and always prefer either `with_semaphore()` (when applicable) or `get_units()`.
+
+
+## Limiting resource use
+Because semaphores support waiting for any number of units, not just 1, we can use them for more than simple limiting of the *number* of parallel invocation. For example, consider we have an asynchronous function ```using_lots_of_memory(size_t bytes)```, which uses ```bytes``` bytes of memory, and we want to ensure that not more than 1 MB of memory is used by all parallel invocations of this function --- and that additional calls are delayed until previous calls have finished. We can do this with a semaphore:
+
+```cpp
+seastar::future<> using_lots_of_memory(size_t bytes) {
+ static thread_local seastar::semaphore limit(1000000); // limit to 1MB
+ return seastar::with_semaphore(limit, bytes, [bytes] {
+ // do something allocating 'bytes' bytes of memory
+ });
+}
+```
+
+Watch out that in the above example, a call to `using_lots_of_memory(2000000)` will return a future that never resolves, because the semaphore will never contain enough units to satisfy the semaphore wait. `using_lots_of_memory()` should probably check whether `bytes` is above the limit, and throw an exception in that case. Seastar doesn't do this for you.
+
+
+## Limiting parallelism of loops
+Above, we looked at a function `g()` which gets called by some external event, and wanted to control its parallelism. In this section, we look at parallelism of loops, which also can be controlled with semaphores.
+
+Consider the following simple loop:
+
+```cpp
+#include <seastar/core/sleep.hh>
+seastar::future<> slow() {
+ std::cerr << ".";
+ return seastar::sleep(std::chrono::seconds(1));
+}
+seastar::future<> f() {
+ return seastar::repeat([] {
+ return slow().then([] { return seastar::stop_iteration::no; });
+ });
+}
+```
+
+This loop runs the ```slow()``` function (taking one second to complete) without any parallelism --- the next ```slow()``` call starts only when the previous one completed. But what if we do not need to serialize the calls to ```slow()```, and want to allow multiple instances of it to be ongoing concurrently?
+
+Naively, we could achieve more parallelism, by starting the next call to ```slow()``` right after the previous call --- ignoring the future returned by the previous call to ```slow()``` and not waiting for it to resolve:
+```cpp
+seastar::future<> f() {
+ return seastar::repeat([] {
+ slow();
+ return seastar::stop_iteration::no;
+ });
+}
+```
+
+But in this loop, there is no limit to the amount of parallelism --- millions of ```sleep()``` calls might be active in parallel, before the first one ever returned. Eventually, this loop may consume all available memory and crash.
+
+Using a semaphore allows us to run many instances of ```slow()``` in parallel, but limit the number of these parallel instances to, in the following example, 100:
+
+```cpp
+seastar::future<> f() {
+ return seastar::do_with(seastar::semaphore(100), [] (auto& limit) {
+ return seastar::repeat([&limit] {
+ return limit.wait(1).then([&limit] {
+ seastar::futurize_invoke(slow).finally([&limit] {
+ limit.signal(1);
+ });
+ return seastar::stop_iteration::no;
+ });
+ });
+ });
+}
+```
+
+Note how this code differs from the code we saw above for limiting the number of parallel invocations of a function `g()`:
+
+1. Here we cannot use a single `thread_local` semaphore. Each call to `f()` has its loop with parallelism of 100, so needs its own semaphore "`limit`", kept alive during the loop with `do_with()`.
+2. Here we do not wait for `slow()` to complete before continuing the loop, i.e., we do not `return` the future chain starting at `futurize_invoke(slow)`. The loop continues to the next iteration when a semaphore unit becomes available, while (in our example) 99 other operations might be ongoing in the background and we do not wait for them.
+
+In the examples in this section, we cannot use the `with_semaphore()` shortcut. `with_semaphore()` returns a future which only resolves after the lambda's returned future resolves. But in the above example, the loop needs to know when just the semaphore units are available, to start the next iteration --- and not wait for the previous iteration to complete. We could not achieve that with `with_semaphore()`. But the more general exception-safe idiom, `seastar::get_units()`, can be used in this case, and is recommended:
+
+
+```cpp
+seastar::future<> f() {
+ return seastar::do_with(seastar::semaphore(100), [] (auto& limit) {
+ return seastar::repeat([&limit] {
+ return seastar::get_units(limit, 1).then([] (auto units) {
+ slow().finally([units = std::move(units)] {});
+ return seastar::stop_iteration::no;
+ });
+ });
+ });
+}
+```
+
+The above examples are not realistic, because they have a never-ending loop and the future returned by `f()` will never resolve. In more realistic cases, the loop has an end, and at the end of the loop we need to wait for all the background operations which the loop started. We can do this by ```wait()```ing on the original count of the semaphore: When the full count is finally available, it means that *all* the operations have completed. For example, the following loop ends after 456 iterations:
+
+```cpp
+seastar::future<> f() {
+ return seastar::do_with(seastar::semaphore(100), [] (auto& limit) {
+ return seastar::do_for_each(boost::counting_iterator<int>(0),
+ boost::counting_iterator<int>(456), [&limit] (int i) {
+ return seastar::get_units(limit, 1).then([] (auto units) {
+ slow().finally([units = std::move(units)] {});
+ });
+ }).finally([&limit] {
+ return limit.wait(100);
+ });
+ });
+}
+````
+
+The last `finally` is what ensures that we wait for the last operations to complete: After the `repeat` loop ends (whether successfully or prematurely because of an exception in one of the iterations), we do a `wait(100)` to wait for the semaphore to reach its original value 100, meaning that all operations that we started have completed. Without this `finally`, the future returned by `f()` will resolve *before* all the iterations of the loop actually completed (the last 100 may still be running).
+
+In the idiom we saw in the above example, the same semaphore is used both for limiting the number of background operations, and later to wait for all of them to complete. Sometimes, we want several different loops to use the same semaphore to limit their *total* parallelism. In that case we must use a separate mechanism for waiting for the completion of the background operations started by the loop. The most convenient way to wait for ongoing operations is using a gate, which we will describe in detail later. A typical example of a loop whose parallelism is limited by an external semaphore:
+
+```cpp
+thread_local seastar::semaphore limit(100);
+seastar::future<> f() {
+ return seastar::do_with(seastar::gate(), [] (auto& gate) {
+ return seastar::do_for_each(boost::counting_iterator<int>(0),
+ boost::counting_iterator<int>(456), [&gate] (int i) {
+ return seastar::get_units(limit, 1).then([&gate] (auto units) {
+ gate.enter();
+ seastar::futurize_invoke(slow).finally([&gate, units = std::move(units)] {
+ gate.leave();
+ });
+ });
+ }).finally([&gate] {
+ return gate.close();
+ });
+ });
+}
+```
+In this code, we use the external semaphore `limit` to limit the number of concurrent operations, but additionally have a gate specific to this loop to help us wait for all ongoing operations to complete.
+
+TODO: also allow `get_units()` or something similar on a gate, and use that instead of the explicit gate.enter/gate.leave.
+
+TODO: say something about semaphore fairness - if someone is waiting for a lot of units and later someone asks for 1 unit, will both wait or will the request for 1 unit be satisfied?
+
+TODO: say something about broken semaphores? (or in later section especially about breaking/closing/shutting down/etc?)
+
+TODO: Have a few paragraphs, or even a section, on additional uses of semaphores. One is for mutual exclusion using semaphore(1) - we need to explain why although why in Seastar we don't have multiple threads touching the same data, if code is composed of different continuations (i.e., a fiber) it can switch to a different fiber in the middle, so if data needs to be protected between two continuations, it needs a mutex. Another example is something akin to wait_all: we start with a semaphore(0), run a known number N of asynchronous functions with finally sem.signal(), and from all this return the future sem.wait(N). PERHAPS even have a separate section on mutual exclusion, where we begin with semaphore(1) but also mention shared_mutex
+
+# Pipes
+Seastar's `pipe<T>` is a mechanism to transfer data between two fibers, one producing data, and the other consuming it. It has a fixed-size buffer to ensures a balanced execution of the two fibers, because the producer fiber blocks when it writes to a full pipe, until the consumer fiber gets to run and read from the pipe.
+
+A `pipe<T>` resembles a Unix pipe, in that it has a read side, a write side, and a fixed-sized buffer between them, and supports either end to be closed independently (and EOF or broken pipe when using the other side). A `pipe<T>` object holds the reader and write sides of the pipe as two separate objects. These objects can be moved into two different fibers. Importantly, if one of the pipe ends is destroyed (i.e., the continuations capturing it end), the other end of the pipe will stop blocking, so the other fiber will not hang.
+
+The pipe's read and write interfaces are future-based blocking. I.e., the write() and read() methods return a future which is fulfilled when the operation is complete. The pipe is single-reader single-writer, meaning that until the future returned by read() is fulfilled, read() must not be called again (and same for write).
+Note: The pipe reader and writer are movable, but *not* copyable. It is often convenient to wrap each end in a shared pointer, so it can be copied (e.g., used in an std::function which needs to be copyable) or easily captured into multiple continuations.
+
+# Shutting down a service with a gate
+Consider an application which has some long operation `slow()`, and many such operations may be started at any time. A number of `slow()` operations may even even be active in parallel. Now, you want to shut down this service, but want to make sure that before that, all outstanding operations are completed. Moreover, you don't want to allow new `slow()` operations to start while the shut-down is in progress.
+
+This is the purpose of a `seastar::gate`. A gate `g` maintains an internal counter of operations in progress. We call `g.enter()` when entering an operation (i.e., before running `slow()`), and call `g.leave()` when leaving the operation (when a call to `slow()` completed). The method `g.close()` *closes the gate*, which means it forbids any further calls to `g.enter()` (such attempts will generate an exception); Moreover `g.close()` returns a future which resolves when all the existing operations have completed. In other words, when `g.close()` resolves, we know that no more invocations of `slow()` can be in progress - because the ones that already started have completed, and new ones could not have started.
+
+The construct
+```cpp
+seastar::with_gate(g, [] { return slow(); })
+```
+can be used as a shortcut to the idiom
+```cpp
+g.enter();
+slow().finally([&g] { g.leave(); });
+```
+
+Here is a typical example of using a gate:
+
+```cpp
+#include <seastar/core/sleep.hh>
+#include <seastar/core/gate.hh>
+#include <boost/iterator/counting_iterator.hpp>
+
+seastar::future<> slow(int i) {
+ std::cerr << "starting " << i << "\n";
+ return seastar::sleep(std::chrono::seconds(10)).then([i] {
+ std::cerr << "done " << i << "\n";
+ });
+}
+seastar::future<> f() {
+ return seastar::do_with(seastar::gate(), [] (auto& g) {
+ return seastar::do_for_each(boost::counting_iterator<int>(1),
+ boost::counting_iterator<int>(6),
+ [&g] (int i) {
+ seastar::with_gate(g, [i] { return slow(i); });
+ // wait one second before starting the next iteration
+ return seastar::sleep(std::chrono::seconds(1));
+ }).then([&g] {
+ seastar::sleep(std::chrono::seconds(1)).then([&g] {
+ // This will fail, because it will be after the close()
+ seastar::with_gate(g, [] { return slow(6); });
+ });
+ return g.close();
+ });
+ });
+}
+```
+
+In this example, we have a function `future<> slow()` taking 10 seconds to complete. We run it in a loop 5 times, waiting 1 second between calls, and surround each call with entering and leaving the gate (using `with_gate`). After the 5th call, while all calls are still ongoing (because each takes 10 seconds to complete), we close the gate and wait for it before exiting the program. We also test that new calls cannot begin after closing the gate, by trying to enter the gate again one second after closing it.
+
+The output of this program looks like this:
+```
+starting 1
+starting 2
+starting 3
+starting 4
+starting 5
+WARNING: exceptional future ignored of type 'seastar::gate_closed_exception': gate closed
+done 1
+done 2
+done 3
+done 4
+done 5
+```
+
+Here, the invocations of `slow()` were started at 1 second intervals. After the "`starting 5`" message, we closed the gate and another attempt to use it resulted in a `seastar::gate_closed_exception`, which we ignored and hence this message. At this point the application waits for the future returned by `g.close()`. This will happen once all the `slow()` invocations have completed: Immediately after printing "`done 5`", the test program stops.
+
+As explained so far, a gate can prevent new invocations of an operation, and wait for any in-progress operations to complete. However, these in-progress operations may take a very long time to complete. Often, a long operation would like to know that a shut-down has been requested, so it could stop its work prematurely. An operation can check whether its gate was closed by calling the gate's `check()` method: If the gate is already closed, the `check()` method throws an exception (the same `seastar::gate_closed_exception` that `enter()` would throw at that point). The intent is that the exception will cause the operation calling it to stop at this point.
+
+In the previous example code, we had an un-interruptible operation `slow()` which slept for 10 seconds. Let's replace it by a loop of 10 one-second sleeps, calling `g.check()` each second:
+
+```cpp
+seastar::future<> slow(int i, seastar::gate &g) {
+ std::cerr << "starting " << i << "\n";
+ return seastar::do_for_each(boost::counting_iterator<int>(0),
+ boost::counting_iterator<int>(10),
+ [&g] (int) {
+ g.check();
+ return seastar::sleep(std::chrono::seconds(1));
+ }).finally([i] {
+ std::cerr << "done " << i << "\n";
+ });
+}
+```
+
+Now, just one second after gate is closed (after the "starting 5" message is printed), all the `slow()` operations notice the gate was closed, and stop. As expected, the exception stops the `do_for_each()` loop, and the `finally()` continuation is performed so we see the "done" messages for all five operations.
+
+
+# Introducing shared-nothing programming
+
+TODO: Explain in more detail Seastar's shared-nothing approach where the entire memory is divided up-front to cores, malloc/free and pointers only work on one core.
+
+TODO: Introduce our shared_ptr (and lw_shared_ptr) and sstring and say the standard ones use locked instructions which are unnecessary when we assume these objects (like all others) are for a single thread. Our futures and continuations do the same.
+
+
+# More about Seastar's event loop
+TODO: Mention the event loop (scheduler). remind that continuations on the same thread do not run in parallel, so do not need locks, atomic variables, etc (different threads shouldn't access the same data - more on that below). continuations obviously must not use blocking operations, or they block the whole thread.
+
+TODO: Talk about polling that we currently do, and how today even sleep() or waiting for incoming connections or whatever, takes 100% of all CPUs.
+
+# Introducing Seastar's network stack
+
+TODO: Mention the two modes of operation: Posix and native (i.e., take a L2 (Ethernet) interface (vhost or dpdk) and on top of it we built (in Seastar itself) an L3 interface (TCP/IP)).
+
+For optimal performance, Seastar's network stack is sharded just like Seastar applications are: each shard (thread) takes responsibility for a different subset of the connections. Each incoming connection is directed to one of the threads, and after a connection is established, it continues to be handled on the same thread.
+
+In the examples we saw earlier, `main()` ran our function `f()` only once, on the first thread. Unless the server is run with the `"-c1"` option (one thread only), this will mean that any connection arriving to a different thread will not be handled. So in all the examples below, we will need to run the same service loop on all cores. We can easily do this with the `smp::submit_to` function:
+
+```cpp
+seastar::future<> service_loop();
+
+seastar::future<> f() {
+ return seastar::parallel_for_each(boost::irange<unsigned>(0, seastar::smp::count),
+ [] (unsigned c) {
+ return seastar::smp::submit_to(c, service_loop);
+ });
+}
+```
+
+Here we ask each of Seastar cores (from 0 to `smp::count`-1) to run the same function `service_loop()`. Each of these invocations returns a future, and `f()` will return when all of them have returned (in the examples below, they will never return - we will discuss shutting down services in later sections).
+
+We begin with a simple example of a TCP network server written in Seastar. This server repeatedly accepts connections on TCP port 1234, and returns an empty response:
+
+```cpp
+#include <seastar/core/seastar.hh>
+#include <seastar/core/reactor.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/net/api.hh>
+
+seastar::future<> service_loop() {
+ return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234})),
+ [] (auto& listener) {
+ return seastar::keep_doing([&listener] () {
+ return listener.accept().then(
+ [] (seastar::accept_result res) {
+ std::cout << "Accepted connection from " << res.remote_address << "\n";
+ });
+ });
+ });
+}
+```
+
+This code works as follows:
+
+1. The ```listen()``` call creates a ```server_socket``` object, ```listener```, which listens on TCP port 1234 (on any network interface).
+2. We use ```do_with()``` to ensure that the listener socket lives throughout the loop.
+3. To handle one connection, we call ```listener```'s ```accept()``` method. This method returns a ```future<accept_result>```, i.e., is eventually resolved with an incoming TCP connection from a client (```accept_result.connection```) and the client's IP address and port (```accept_result.remote_address```).
+4. To repeatedly accept new connections, we use the ```keep_doing()``` loop idiom. ```keep_doing()``` runs its lambda parameter over and over, starting the next iteration as soon as the future returned by the previous iteration completes. The iterations only stop if an exception is encountered. The future returned by ```keep_doing()``` itself completes only when the iteration stops (i.e., only on exception).
+
+Output from this server looks like the following example:
+
+```
+$ ./a.out
+Accepted connection from 127.0.0.1:47578
+Accepted connection from 127.0.0.1:47582
+...
+```
+
+If you run the above example server immediately after killing the previous server, it often fails to start again, complaining that:
+
+```
+$ ./a.out
+program failed with uncaught exception: bind: Address already in use
+```
+
+This happens because by default, Seastar refuses to reuse the local port if there are any vestiges of old connections using that port. In our silly server, because the server is the side which first closes the connection, each connection lingers for a while in the "```TIME_WAIT```" state after being closed, and these prevent ```listen()``` on the same port from succeeding. Luckily, we can give listen an option to work despite these remaining ```TIME_WAIT```. This option is analogous to ```socket(7)```'s ```SO_REUSEADDR``` option:
+
+```cpp
+ seastar::listen_options lo;
+ lo.reuse_address = true;
+ return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234}), lo),
+```
+
+Most servers will always turn on this ```reuse_address``` listen option. Stevens' book "Unix Network Programming" even says that "All TCP servers should specify this socket option to allow the server to be restarted". Therefore in the future Seastar should probably default to this option being on --- even if for historic reasons this is not the default in Linux's socket API.
+
+Let's advance our example server by outputting some canned response to each connection, instead of closing each connection immediately with an empty reply.
+
+```cpp
+#include <seastar/core/seastar.hh>
+#include <seastar/core/reactor.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/net/api.hh>
+
+const char* canned_response = "Seastar is the future!\n";
+
+seastar::future<> service_loop() {
+ seastar::listen_options lo;
+ lo.reuse_address = true;
+ return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234}), lo),
+ [] (auto& listener) {
+ return seastar::keep_doing([&listener] () {
+ return listener.accept().then(
+ [] (seastar::accept_result res) {
+ auto s = std::move(res.connection);
+ auto out = s.output();
+ return seastar::do_with(std::move(s), std::move(out),
+ [] (auto& s, auto& out) {
+ return out.write(canned_response).then([&out] {
+ return out.close();
+ });
+ });
+ });
+ });
+ });
+}
+```
+
+The new part of this code begins by taking the ```connected_socket```'s ```output()```, which returns an ```output_stream<char>``` object. On this output stream ```out``` we can write our response using the ```write()``` method. The simple-looking ```write()``` operation is in fact a complex asynchronous operation behind the scenes, possibly causing multiple packets to be sent, retransmitted, etc., as needed. ```write()``` returns a future saying when it is ok to ```write()``` again to this output stream; This does not necessarily guarantee that the remote peer received all the data we sent it, but it guarantees that the output stream has enough buffer space (or in the TCP case, there is enough room in the TCP congestion window) to allow another write to begin.
+
+After ```write()```ing the response to ```out```, the example code calls ```out.close()``` and waits for the future it returns. This is necessary, because ```write()``` attempts to batch writes so might not have yet written anything to the TCP stack at this point, and only when close() concludes can we be sure that all the data we wrote to the output stream has actually reached the TCP stack --- and only at this point we may finally dispose of the ```out``` and ```s``` objects.
+
+Indeed, this server returns the expected response:
+
+```
+$ telnet localhost 1234
+...
+Seastar is the future!
+Connection closed by foreign host.
+```
+
+In the above example we only saw writing to the socket. Real servers will also want to read from the socket. The ```connected_socket```'s ```input()``` method returns an ```input_stream<char>``` object which can be used to read from the socket. The simplest way to read from this stream is using the ```read()``` method which returns a future ```temporary_buffer<char>```, containing some more bytes read from the socket --- or an empty buffer when the remote end shut down the connection.
+
+```temporary_buffer<char>``` is a convenient and safe way to pass around byte buffers that are only needed temporarily (e.g., while processing a request). As soon as this object goes out of scope (by normal return, or exception), the memory it holds gets automatically freed. Ownership of buffer can also be transferred by ```std::move()```ing it. We'll discuss ```temporary_buffer``` in more details in a later section.
+
+Let's look at a simple example server involving both reads an writes. This is a simple echo server, as described in RFC 862: The server listens for connections from the client, and once a connection is established, any data received is simply sent back - until the client closes the connection.
+
+```cpp
+#include <seastar/core/seastar.hh>
+#include <seastar/core/reactor.hh>
+#include <seastar/core/future-util.hh>
+#include <seastar/net/api.hh>
+
+seastar::future<> handle_connection(seastar::connected_socket s,
+ seastar::socket_address a) {
+ auto out = s.output();
+ auto in = s.input();
+ return do_with(std::move(s), std::move(out), std::move(in),
+ [] (auto& s, auto& out, auto& in) {
+ return seastar::repeat([&out, &in] {
+ return in.read().then([&out] (auto buf) {
+ if (buf) {
+ return out.write(std::move(buf)).then([&out] {
+ return out.flush();
+ }).then([] {
+ return seastar::stop_iteration::no;
+ });
+ } else {
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::yes);
+ }
+ });
+ }).then([&out] {
+ return out.close();
+ });
+ });
+}
+
+seastar::future<> service_loop_3() {
+ seastar::listen_options lo;
+ lo.reuse_address = true;
+ return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234}), lo),
+ [] (auto& listener) {
+ return seastar::keep_doing([&listener] () {
+ return listener.accept().then(
+ [] (seastar::accept_result res) {
+ // Note we ignore, not return, the future returned by
+ // handle_connection(), so we do not wait for one
+ // connection to be handled before accepting the next one.
+ (void)handle_connection(std::move(res.connection), std::move(res.remote_address)).handle_exception(
+ [] (std::exception_ptr ep) {
+ fmt::print(stderr, "Could not handle connection: {}\n", ep);
+ });
+ });
+ });
+ });
+}
+```
+
+The main function ```service_loop()``` loops accepting new connections, and for each connection calls ```handle_connection()``` to handle this connection. Our ```handle_connection()``` returns a future saying when handling this connection completed, but importantly, we do ***not*** wait for this future: Remember that ```keep_doing``` will only start the next iteration when the future returned by the previous iteration is resolved. Because we want to allow parallel ongoing connections, we don't want the next ```accept()``` to wait until the previously accepted connection was closed. So we call ```handle_connection()``` to start the handling of the connection, but return nothing from the continuation, which resolves that future immediately, so ```keep_doing``` will continue to the next ```accept()```.
+
+This demonstrates how easy it is to run parallel _fibers_ (chains of continuations) in Seastar - When a continuation runs an asynchronous function but ignores the future it returns, the asynchronous operation continues in parallel, but never waited for.
+
+It is often a mistake to silently ignore an exception, so if the future we're ignoring might resolve with an except, it is recommended to handle this case, e.g. using a ```handle_exception()``` continuation. In our case, a failed connection is fine (e.g., the client might close its connection will we're sending it output), so we did not bother to handle the exception.
+
+The ```handle_connection()``` function itself is straightforward --- it repeatedly calls ```read()``` read on the input stream, to receive a ```temporary_buffer``` with some data, and then moves this temporary buffer into a ```write()``` call on the output stream. The buffer will eventually be freed, automatically, when the ```write()``` is done with it. When ```read()``` eventually returns an empty buffer signifying the end of input, we stop ```repeat```'s iteration by returning a ```stop_iteration::yes```.
+
+# Sharded services
+
+In the previous section we saw that a Seastar application usually needs to run its code on all available CPU cores. We saw that the `seastar::smp::submit_to()` function allows the main function, which initially runs only on the first core, to start the server's code on all `seastar::smp::count` cores.
+
+However, usually one needs not just to run code on each core, but also to have an object that contains the state of this code. Additionally, one may like to interact with those different objects, and also have a mechanism to stop the service running on the different cores.
+
+The `seastar::sharded<T>` template provides a structured way create such a _sharded service_. It creates a separate object of type `T` in each core, and provides mechanisms to interact with those copies, to start some code on each, and finally to cleanly stop the service.
+
+To use `seastar::sharded`, first create a class for the object holding the state of the service on a single core. For example:
+
+```cpp
+#include <seastar/core/future.hh>
+#include <iostream>
+
+class my_service {
+public:
+ std::string _str;
+ my_service(const std::string& str) : _str(str) { }
+ seastar::future<> run() {
+ std::cerr << "running on " << seastar::engine().cpu_id() <<
+ ", _str = " << _str << "\n";
+ return seastar::make_ready_future<>();
+ }
+ seastar::future<> stop() {
+ return seastar::make_ready_future<>();
+ }
+};
+```
+
+The only mandatory method in this object is `stop()`, which will be called in each core when we want to stop the sharded service and want to wait until it stops on all cores.
+
+Now let's see how to use it:
+
+```cpp
+#include <seastar/core/sharded.hh>
+
+seastar::sharded<my_service> s;
+
+seastar::future<> f() {
+ return s.start(std::string("hello")).then([] {
+ return s.invoke_on_all([] (my_service& local_service) {
+ return local_service.run();
+ });
+ }).then([] {
+ return s.stop();
+ });
+}
+```
+
+The `s.start()` starts the service by creating a `my_service` object on each of the cores. The arguments to `s.start()`, if any (in this example, `std::string("hello")`), are passed to `my_service`'s constructor.
+
+But `s.start()` did not start running any code yet (besides the object's constructor). For that, we have the `s.invoke_on_all()` which runs the given lambda on all the cores - giving each lambda the local `my_service` object on that core. In this example, we have a `run()` method on each object, so we run that.
+
+Finally, at the end of the run we want to give the service on all cores a chance to shut down cleanly, so we call `s.stop()`. This will call the `stop()` method on each core's object, and wait for all of them to finish. Calling `s.stop()` before destroying `s` is mandatory - Seastar will warn you if you forget to do it.
+
+In addition to `invoke_on_all()` which runs the same code on all shards, another feature a sharded service often needs is for one shard to invoke code another specific shard. This is done by calling the sharded service's `invoke_on()` method. For example:
+
+```cpp
+seastar::sharded<my_service> s;
+...
+return s.invoke_on(0, [] (my_service& local_service) {
+ std::cerr << "invoked on " << seastar::engine().cpu_id() <<
+ ", _str = " << local_service._str << "\n";
+});
+```
+
+This runs the lambda function on shard 0, with a reference to the local `my_service` object on that shard.
+
+
+# Shutting down cleanly
+
+TODO: Handling interrupt, shutting down services, etc.
+
+Move the seastar::gate section here.
+
+# Command line options
+## Standard Seastar command-line options
+All Seastar applications accept a standard set of command-line arguments, such as those we've already seen above: The `-c` option for controlling the number of threads used, or `-m` for determining the amount of memory given to the application.
+
+TODO: list and explain more of these options.
+
+Every Seastar application also accepts the `-h` (or `--help`) option, which lists and explains all the available options --- the standard Seastar ones, and the user-defined ones as explained below.
+
+## User-defined command-line options
+Seastar parses the command line options (`argv[]`) when it is passed to `app_template::run()`, looking for its own standard options. Therefore, it is not recommended that the application tries to parse `argv[]` on its own because the application might not understand some of the standard Seastar options and not be able to correctly skip them.
+
+Rather, applications which want to have command-line options of their own should tell Seastar's command line parser of these additional application-specific options, and ask Seastar's command line parser to recognize them too. Seastar's command line parser is actually the Boost library's `boost::program_options`. An application adds its own option by using the `add_options()` and `add_positional_options()` methods on the `app_template` to define options, and later calling `configuration()` to retrieve the setting of these options. For example,
+
+```cpp
+#include <iostream>
+#include <seastar/core/app-template.hh>
+#include <seastar/core/reactor.hh>
+int main(int argc, char** argv) {
+ seastar::app_template app;
+ namespace bpo = boost::program_options;
+ app.add_options()
+ ("flag", "some optional flag")
+ ("size,s", bpo::value<int>()->default_value(100), "size")
+ ;
+ app.add_positional_options({
+ { "filename", bpo::value<std::vector<seastar::sstring>>()->default_value({}),
+ "sstable files to verify", -1}
+ });
+ app.run(argc, argv, [&app] {
+ auto& args = app.configuration();
+ if (args.count("flag")) {
+ std::cout << "Flag is on\n";
+ }
+ std::cout << "Size is " << args["size"].as<int>() << "\n";
+ auto& filenames = args["filename"].as<std::vector<seastar::sstring>>();
+ for (auto&& fn : filenames) {
+ std::cout << fn << "\n";
+ }
+ return seastar::make_ready_future<>();
+ });
+ return 0;
+}
+```
+
+In this example, we add via `add_options()` two application-specific options: `--flag` is an optional parameter which doesn't take any additional arguments, and `--size` (or `-s`) takes an integer value, which defaults (if this option is missing) to 100. Additionally, we ask via `add_positional_options()` that an unlimited number of arguments that do not begin with a "`-`" --- the so-called _positional_ arguments --- be collected to a vector of strings under the "filename" option. Some example outputs from this program:
+
+```
+$ ./a.out
+Size is 100
+$ ./a.out --flag
+Flag is on
+Size is 100
+$ ./a.out --flag -s 3
+Flag is on
+Size is 3
+$ ./a.out --size 3 hello hi
+Size is 3
+hello
+hi
+$ ./a.out --filename hello --size 3 hi
+Size is 3
+hello
+hi
+```
+
+`boost::program_options` has more powerful features, such as required options, option checking and combining, various option types, and more. Please refer to Boost's documentation for more information.
+
+# Debugging a Seastar program
+## Debugging ignored exceptions
+If a future resolves with an exception, and the application neglects to handle that exception or to explicitly ignore it, the application may have missed an important problem. This is likely to be an application bug.
+
+Therefore, Seastar prints a warning message to the log if a future is destroyed when it stores an exception that hasn't been handled.
+
+For example, consider this code:
+```cpp
+#include <seastar/core/future.hh>
+#include <seastar/core/sleep.hh>
+#include <seastar/core/app-template.hh>
+
+class myexception {};
+
+seastar::future<> g() {
+ return seastar::make_exception_future<>(myexception());
+}
+
+seastar::future<> f() {
+ g();
+ return seastar::sleep(std::chrono::seconds(1));
+}
+
+int main(int argc, char** argv) {
+ seastar::app_template app;
+ app.run(argc, argv, f);
+}
+```
+
+Here, the main function `f()` calls `g()`, but doesn't do anything with the future it returns. But this future resolves with an exception, and this exception is silently ignored. So Seastar prints this warning message about the ignored exception:
+```
+WARN 2020-03-31 11:08:09,208 [shard 0] seastar - Exceptional future ignored: myexception, backtrace: /lib64/libasan.so.5+0x6ce7f
+ 0x1a64193
+ 0x1a6265f
+ 0xf326cc
+ 0xeaf1a0
+ 0xeaffe4
+ 0xead7be
+ 0xeb5917
+ 0xee2477
+ 0xec312e
+ 0xec8fcf
+ 0xeec765
+ 0xee1b29
+ 0xed9fab
+ 0xec27c8
+ 0xec867f
+ 0xf00acc
+ 0xef179d
+ 0xef1824
+ 0xef18b5
+ 0xee4827
+ 0xee470f
+ 0xf00f81
+ 0xebac29
+ 0xeb9095
+ 0xeb9174
+ 0xeb925a
+ 0xeb9964
+ 0xebef89
+ 0x10f74c3
+ 0x10fb439
+ 0x11005fd
+ 0xec4f08
+ 0xec2f43
+ 0xec3461
+ 0xeadabe
+ /lib64/libc.so.6+0x271a2
+ 0xead52d
+```
+
+This message says that an exceptional future was ignored, and that the type of the exception was "`myexception`". The type of the exception is usually not enough to pinpoint where the problem happened, so the warning message also includes the backtrace - the call chain - leading to where the exceptional future was destroyed. The backtrace is given as a list of addresses, where code in other shared libraries is written as a shared library plus offset (when ASLR is enabled, the shared libraries are mapped in a different address each time).
+
+Seastar includes a utility, `seastar-addr2line`, for translating these addresses into readable backtraces including exact method names, source files and line numbers. This utility needs the _unstripped_ executable. Typically, a stripped executable is used for production, but an unstripped copy is kept separately to be used in debugging - including `seastar-addr2line`.
+
+To decode the backtrace, we run
+```
+seastar-addr2line -e a.out
+```
+And then paste the list of addresses in the warning message, and conclude with a `control-D` (it's also possible, if you want, to put the list of addresses in the `seastar-addr2line` command line). The result looks like this:
+
+```
+void seastar::backtrace<seastar::current_backtrace()::{lambda(seastar::frame)#1}>(seastar::current_backtrace()::{lambda(seastar::frame)#1}&&) at include/seastar/util/backtrace.hh:56
+seastar::current_backtrace() at src/util/backtrace.cc:84
+seastar::report_failed_future(std::__exception_ptr::exception_ptr const&) at src/core/future.cc:116
+seastar::future_state_base::~future_state_base() at include/seastar/core/future.hh:335
+seastar::future_state<>::~future_state() at include/seastar/core/future.hh:414
+ (inlined by) seastar::future<>::~future() at include/seastar/core/future.hh:990
+f() at test.cc:12
+std::_Function_handler<seastar::future<> (), seastar::future<> (*)()>::_M_invoke(std::_Any_data const&) at /usr/include/c++/9/bits/std_function.h:286
+std::function<seastar::future<> ()>::operator()() const at /usr/include/c++/9/bits/std_function.h:690
+seastar::app_template::run(int, char**, std::function<seastar::future<> ()>&&)::{lambda()#1}::operator()() const at src/core/app-template.cc:131
+std::_Function_handler<seastar::future<int> (), seastar::app_template::run(int, char**, std::function<seastar::future<> ()>&&)::{lambda()#1}>::_M_invoke(std::_Any_data const&) at /usr/include/c++/9/bits/std_function.h:286
+std::function<seastar::future<int> ()>::operator()() const at /usr/include/c++/9/bits/std_function.h:690
+seastar::future<int> seastar::futurize<seastar::future<int> >::invoke<std::function<seastar::future<int> ()>&>(std::function<seastar::future<int> ()>&) at include/seastar/core/future.hh:1670
+auto seastar::futurize_invoke<std::function<seastar::future<int> ()>&>(std::function<seastar::future<int> ()>&) at include/seastar/core/future.hh:1754
+seastar::app_template::run(int, char**, std::function<seastar::future<int> ()>&&)::{lambda()#1}::operator()() at src/core/app-template.cc:120 (discriminator 4)
+std::_Function_handler<void (), seastar::app_template::run(int, char**, std::function<seastar::future<int> ()>&&)::{lambda()#1}>::_M_invoke(std::_Any_data const&) at /usr/include/c++/9/bits/std_function.h:300
+std::function<void ()>::operator()() const at /usr/include/c++/9/bits/std_function.h:690
+seastar::apply_helper<std::function<void ()>&, std::tuple<>&&, std::integer_sequence<unsigned long> >::apply(std::function<void ()>&, std::tuple<>&&) at include/seastar/core/apply.hh:36
+auto seastar::apply<std::function<void ()>&>(std::function<void ()>&, std::tuple<>&&) at include/seastar/core/apply.hh:44
+seastar::future<> seastar::futurize<void>::apply<std::function<void ()>&>(std::function<void ()>&, std::tuple<>&&) at include/seastar/core/future.hh:1634
+auto seastar::futurize_apply<std::function<void ()>&>(std::function<void ()>&, std::tuple<>&&) at include/seastar/core/future.hh:1766
+seastar::future<>::then<std::function<void ()>, seastar::future<> >(std::function<void ()>&&)::{lambda()#1}::operator()() at include/seastar/core/future.hh:1191
+seastar::noncopyable_function<seastar::future<> ()>::direct_vtable_for<seastar::future<>::then<std::function<void ()>, seastar::future<> >(std::function<void ()>&&)::{lambda()#1}>::call(seastar::noncopyable_function<seastar::future<> ()> const*) at include/seastar/util/noncopyable_function.hh:101
+seastar::noncopyable_function<seastar::future<> ()>::operator()() const at include/seastar/util/noncopyable_function.hh:184
+seastar::apply_helper<seastar::noncopyable_function<seastar::future<> ()>, std::tuple<>&&, std::integer_sequence<unsigned long> >::apply(seastar::noncopyable_function<seastar::future<> ()>&&, std::tuple<>&&) at include/seastar/core/apply.hh:36
+auto seastar::apply<seastar::noncopyable_function<seastar::future<> ()>>(seastar::noncopyable_function<seastar::future<> ()>&&, std::tuple<>&&) at include/seastar/core/apply.hh:44
+seastar::future<> seastar::futurize<seastar::future<> >::apply<seastar::noncopyable_function<seastar::future<> ()>>(seastar::noncopyable_function<seastar::future<> ()>&&, std::tuple<>&&) at include/seastar/core/future.hh:1660
+seastar::future<>::then_impl_nrvo<seastar::noncopyable_function<seastar::future<> ()>, seastar::future<> >(seastar::noncopyable_function<seastar::future<> ()>&&)::{lambda()#1}::operator()() const::{lambda(seastar::internal::promise_base_with_type<>&, seastar::future_state<>&&)#1}::operator()(seastar::internal::promise_base_with_type<>, seastar::future_state<>) at include/seastar/core/future.hh:1213
+seastar::continuation<seastar::internal::promise_base_with_type<>, seastar::future<>::then_impl_nrvo<seastar::noncopyable_function<seastar::future<> ()>, seastar::future<> >(seastar::noncopyable_function<seastar::future<> ()>&&)::{lambda()#1}::operator()() const::{lambda(seastar::internal::promise_base_with_type<>&, seastar::future_state<>&&)#1}>::run_and_dispose() at include/seastar/core/future.hh:509
+seastar::reactor::run_tasks(seastar::reactor::task_queue&) at src/core/reactor.cc:2124
+seastar::reactor::run_some_tasks() at src/core/reactor.cc:2539 (discriminator 2)
+seastar::reactor::run() at src/core/reactor.cc:2694
+seastar::app_template::run_deprecated(int, char**, std::function<void ()>&&) at src/core/app-template.cc:199 (discriminator 1)
+seastar::app_template::run(int, char**, std::function<seastar::future<int> ()>&&) at src/core/app-template.cc:115 (discriminator 2)
+seastar::app_template::run(int, char**, std::function<seastar::future<> ()>&&) at src/core/app-template.cc:130 (discriminator 2)
+main at test.cc:19 (discriminator 1)
+__libc_start_main at /usr/src/debug/glibc-2.30-34-g994e529a37/csu/../csu/libc-start.c:308
+_start at ??:?
+```
+
+Most of the lines at the bottom of this backtrace are not interesting, and just showing the internal details of how Seastar ended up running the main function `f()`. The only interesting part is the _first_ few lines:
+
+```
+seastar::report_failed_future(std::__exception_ptr::exception_ptr const&) at src/core/future.cc:116
+seastar::future_state_base::~future_state_base() at include/seastar/core/future.hh:335
+seastar::future_state<>::~future_state() at include/seastar/core/future.hh:414
+ (inlined by) seastar::future<>::~future() at include/seastar/core/future.hh:990
+f() at test.cc:12
+```
+
+Here we see that the warning message was printed by the `seastar::report_failed_future()` function which was called when destroying a future (`future<>::~future`) that had not been handled. The future's destructor was called in line 11 of our test code (`26.cc`), which is indeed the line where we called `g()` and ignored its result.
+This backtrace gives us an accurate understanding of where our code destroyed an exceptional future without handling it first, which is usually helpful in solving these kinds of bugs. Note that this technique does not tell us where the exception was first created, nor what code passed around the exceptional future before it was destroyed - we just learn where the future was destroyed. To learn where the exception was originally thrown, see the next section:
+
+## Finding where an exception was thrown
+Sometimes an application logs an exception, and we want to know where in the code the exception was originally thrown. Unlike languages like Java, C++ does not have a builtin method of attaching a backtrace to every exception. So Seastar provides functions which allow adding to an exception the backtrace recorded when throwing it.
+
+For example, in the following code we throw and catch an `std::runtime_error` normally:
+
+```cpp
+#include <seastar/core/future.hh>
+#include <seastar/util/log.hh>
+#include <exception>
+#include <iostream>
+
+seastar::future<> g() {
+ return seastar::make_exception_future<>(std::runtime_error("hello"));
+}
+
+seastar::future<> f() {
+ return g().handle_exception([](std::exception_ptr e) {
+ std::cerr << "Exception: " << e << "\n";
+ });
+}
+```
+The output is
+```
+Exception: std::runtime_error (hello)
+```
+From this output, we have no way of knowing that the exception was thrown in `g()`. We can solve this if we use `make_exception_future_with_backtrace` instead of `make_exception_future`:
+
+```
+#include <util/backtrace.hh>
+seastar::future<> g() {
+ return seastar::make_exception_future_with_backtrace<>(std::runtime_error("hello"));
+}
+```
+Now the output looks like
+```
+Exception: seastar::internal::backtraced<std::runtime_error> (hello Backtrace: 0x678bd3
+ 0x677204
+ 0x67736b
+ 0x678cd5
+ 0x4f923c
+ 0x4f9c38
+ 0x4ff4d0
+...
+)
+```
+Which, as above, can be converted to a human-readable backtrace by using the `seastar-addr2line` script.
+
+In addition to `seastar::make_exception_future_with_backtrace()`, Seastar also provides a function `throw_with_backtrace()`, to throw an exception instead of returning an exceptional future. For example:
+```
+ seastar::throw_with_backtrace<std::runtime_error>("hello");
+```
+
+In the current implementation, both `make_exception_future_with_backtrace` and `throw_with_backtrace` require that the original exception type (in the above example, `std::runtime_error`) is a subclass of the `std::exception` class. The original exception provides a `what()` string, and the wrapped exception adds the backtrace to this string, as demonstrated above. Moreover, the wrapped exception type is a _subclass_ of the original exception type, which allows `catch(...)` code to continue filtering by the exception original type - despite the addition of the backtrace.
+
+
+## Debugging with gdb
+
+```
+handle SIGUSR1 pass noprint
+handle SIGALRM pass noprint
+```
+
+# Promise objects
+
+As we already defined above, An **asynchronous function**, also called a **promise**, is a function which returns a future and arranges for this future to be eventually resolved. As we already saw, an asynchronous function is usually written in terms of other asynchronous functions, for example we saw the function `slow()` which waits for the existing asynchronous function `sleep()` to complete, and then returns 3:
+
+```cpp
+seastar::future<int> slow() {
+ using namespace std::chrono_literals;
+ return seastar::sleep(100ms).then([] { return 3; });
+}
+```
+
+The most basic building block for writing promises is the **promise object**, an object of type `promise<T>`. A `promise<T>` has a method `future<T> get_future()` to returns a future, and a method `set_value(T)`, to resolve this future. An asynchronous function can create a promise object, return its future, and the `set_value` method to be eventually called - which will finally resolve the future it returned.
+
+CONTINUE HERE. write an example, e.g., something which writes a message every second, and after 10 messages, completes the future.
+
+# Memory allocation in Seastar
+## Per-thread memory allocation
+Seastar requires that applications be sharded, i.e., that code running on different threads operate on different objects in memory. We already saw in [Seastar memory] how Seastar takes over a given amount of memory (often, most of the machine's memory) and divides it equally between the different threads. Modern multi-socket machines have non-uniform memory access (NUMA), meaning that some parts of memory are closer to some of the cores, and Seastar takes this knowledge into account when dividing the memory between threads. Currently, the division of memory between threads is static, and equal - the threads are expected to experience roughly equal amount of load and require roughly equal amounts of memory.
+
+To achieve this per-thread allocation, Seastar redefines the C library functions `malloc()`, `free()`, and their numerous relatives --- `calloc()`, `realloc()`, `posix_memalign()`, `memalign()`, `malloc_usable_size()`, and `malloc_trim()`. It also redefines the C++ memory allocation functions, `operator new`, `operator delete`, and all their variants (including array versions, the C++14 delete taking a size, and the C++17 variants taking required alignment).
+
+It is important to remember that Seastar's different threads *can* see memory allocated by other threads, but they are nonetheless strongly discouraged from actually doing this. Sharing data objects between threads on modern multi-core machines results in stiff performance penalties from locks, memory barriers, and cache-line bouncing. Rather, Seastar encourages applications to avoid sharing objects between threads when possible (by *sharding* --- each thread owns a subset of the objects), and when threads do need to interact they do so with explicit message passing, with `submit_to()`, as we shall see later.
+
+## Foreign pointers
+An object allocated on one thread will be owned by this thread, and eventually should be freed by the same thread. Freeing memory on the *wrong* thread is strongly discouraged, but is currently supported (albeit slowly) to support library code beyond Seastar's control. For example, `std::exception_ptr` allocates memory; So if we invoke an asynchronous operation on a remote thread and this operation returns an exception, when we free the returned `std::exception_ptr` this will happen on the "wrong" core. So Seastar allows it, but inefficiently.
+
+In most cases objects should spend their entire life on a single thread and be used only by this thread. But in some cases we want to reassign ownership of an object which started its life on one thread, to a different thread. This can be done using a `seastar::foreign_ptr<>`. A pointer, or smart pointer, to an object is wrapped in a `seastar::foreign_ptr<P>`. This wrapper can then be moved into code running in a different thread (e.g., using `submit_to()`).
+
+The most common use-case is a `seastar::foreign_ptr<std::unique_ptr<T>>`. The thread receiving this `foreign_ptr` will get exclusive use of the object, and when it destroys this wrapper, it will go back to the original thread to destroy the object. Note that the object is not only freed on the original shard - it is also *destroyed* (i.e., its destructor is run) there. This is often important when the object's destructor needs to access other state which belongs to the original shard - e.g., unlink itself from a container.
+
+Although `foreign_ptr` ensures that the object's *destructor* automatically runs on the object's home thread, it does not absolve the user from worrying where to run the object's other methods. Some simple methods, e.g., methods which just read from the object's fields, can be run on the receiving thread. However, other methods may need to access other data owned by the object's home shard, or need to prevent concurrent operations. Even if we're sure that object is now used exclusively by the receiving thread, such methods must still be run, explicitly, on the home thread:
+```
+ // fp is some foreign_ptr<>
+ return smp::submit_to(fp.get_owner_shard(), [p=fp.get()]
+ { return p->some_method(); });
+```
+So `seastar::foreign_ptr<>` not only has functional benefits (namely, to run the destructor on the home shard), it also has *documentational* benefits - it warns the programmer to watch out every time the object is used, that this is a *foreign* pointer, and if we want to do anything non-trivial with the pointed object, we may need to do it on the home shard.
+
+Above, we discussed the case of transferring ownership of an object to a another shard, via `seastar::foreign_ptr<std::unique_ptr<T>>`. However, sometimes the sender does not want to relinquish ownership of the object. Sometimes, it wants the remote thread to operate on its object and return with the object intact. Sometimes, it wants to send the same object to multiple shards. In such cases, `seastar::foreign_ptr<seastar::lw_shared_ptr<T>> is useful. The user needs to watch out, of course, not to operate on the same object from multiple threads concurrently. If this cannot be ensured by program logic alone, some methods of serialization must be used - such as running the operations on the home shard with `submit_to()` as described above.
+
+Normally, a `seastar::foreign_ptr` cannot not be copied - only moved. However, when it holds a smart pointer that can be copied (namely, a `shared_ptr`), one may want to make an additional copy of that pointer and create a second `foreign_ptr`. Doing this is inefficient and asynchronous (it requires communicating with the original owner of the object to create the copies), so a method `future<foreign_ptr> copy()` needs to be explicitly used instead of the normal copy constructor.
+
+# Seastar::thread
+Seastar's programming model, using futures and continuations, is very powerful and efficient. However, as we've already seen in examples above, it is also relatively verbose: Every time that we need to wait before proceeding with a computation, we need to write another continuation. We also need to worry about passing the data between the different continuations (using techniques like those described in the [Lifetime management] section). Simple flow-control constructs such as loops also become more involved using continuations. For example, consider this simple classical synchronous code:
+```cpp
+ std::cout << "Hi.\n";
+ for (int i = 1; i < 4; i++) {
+ sleep(1);
+ std::cout << i << "\n";
+ }
+```
+In Seastar, using futures and continuations, we need to write something like this:
+```cpp
+ std::cout << "Hi.\n";
+ return seastar::do_for_each(boost::counting_iterator<int>(1),
+ boost::counting_iterator<int>(4), [] (int i) {
+ return seastar::sleep(std::chrono::seconds(1)).then([i] {
+ std::cout << i << "\n";
+ });
+ });
+```
+
+But Seastar also allows, via `seastar::thread`, to write code which looks more like synchronous code. A `seastar::thread` provides an execution environment where blocking is tolerated; You can issue an asynchronous function, and wait for it in the same function, rather then establishing a callback to be called with `future<>::then()`:
+
+```cpp
+ seastar::thread th([] {
+ std::cout << "Hi.\n";
+ for (int i = 1; i < 4; i++) {
+ seastar::sleep(std::chrono::seconds(1)).get();
+ std::cout << i << "\n";
+ }
+ });
+```
+A `seastar::thread` is **not** a separate operating system thread. It still uses continuations, which are scheduled on Seastar's single thread (per core). It works as follows:
+
+The `seastar::thread` allocates a 128KB stack, and runs the given function until the it *blocks* on the call to a future's `get()` method. Outside a `seastar::thread` context, `get()` may only be called on a future which is already available. But inside a thread, calling `get()` on a future which is not yet available stops running the thread function, and schedules a continuation for this future, which continues to run the thread's function (on the same saved stack) when the future becomes available.
+
+Just like normal Seastar continuations, `seastar::thread`s always run on the same core they were launched on. They are also cooperative: they are never preempted except when `seastar::future::get()` blocks or on explicit calls to `seastar::thread::yield()`.
+
+It is worth reiterating that a `seastar::thread` is not a POSIX thread, and it can only block on Seastar futures, not on blocking system calls. The above example used `seastar::sleep()`, not the `sleep()` system call. The `seastar::thread`'s function can throw and catch exceptions normally. Remember that `get()` will throw an exception if the future resolves with an exception.
+
+In addition to `seastar::future::get()`, we also have `seastar::future::wait()` to wait *without* fetching the future's result. This can sometimes be useful when you want to avoid throwing an exception when the future failed (as `get()` does). For example:
+```cpp
+ future<char> getchar();
+ int try_getchar() noexcept { // run this in seastar::thread context
+ future fut = get_char();
+ fut.wait();
+ if (fut.failed()) {
+ return -1;
+ } else {
+ // Here we already know that get() will return immediately,
+ // and will not throw.
+ return fut.get();
+ }
+ }
+```
+
+## Starting and ending a seastar::thread
+After we created a `seastar::thread` object, we need wait until it ends, using its `join()` method. We also need to keep that object alive until `join()` completes. A complete example using `seastar::thread` will therefore look like this:
+
+```cpp
+#include <seastar/core/sleep.hh>
+#include <seastar/core/thread.hh>
+seastar::future<> f() {
+ seastar::thread th([] {
+ std::cout << "Hi.\n";
+ for (int i = 1; i < 4; i++) {
+ seastar::sleep(std::chrono::seconds(1)).get();
+ std::cout << i << "\n";
+ }
+ });
+ return do_with(std::move(th), [] (auto& th) {
+ return th.join();
+ });
+}
+```
+
+The `seastar::async()` function provides a convenient shortcut for creating a `seastar::thread` and returning a future which resolves when the thread completes:
+```cpp
+#include <seastar/core/sleep.hh>
+#include <seastar/core/thread.hh>
+seastar::future<> f() {
+ return seastar::async([] {
+ std::cout << "Hi.\n";
+ for (int i = 1; i < 4; i++) {
+ seastar::sleep(std::chrono::seconds(1)).get();
+ std::cout << i << "\n";
+ }
+ });
+}
+```
+
+`seastar::async()`'s lambda may return a value, and `seastar::async()` returns it when it completes. For example:
+
+```cpp
+seastar::future<seastar::sstring> read_file(sstring file_name) {
+ return seastar::async([file_name] () { // lambda executed in a thread
+ file f = seastar::open_file_dma(file_name).get0(); // get0() call "blocks"
+ auto buf = f.dma_read(0, 512).get0(); // "block" again
+ return seastar::sstring(buf.get(), buf.size());
+ });
+};
+```
+
+While `seastar::thread`s and `seastar::async()` make programming more convenient, they also add overhead beyond that of programming directly with continuations. Most notably, each `seastar::thread` requires additional memory for its stack. It is therefore not a good idea to use a `seastar::thread` to handle a highly concurrent operation. For example, if you need to handle 10,000 concurrent requests, do not use a `seastar::thread` to handle each --- use futures and continuations. But if you are writing code where you know that only a few instances will ever run concurrently, e.g., a background cleanup operation in your application, `seastar::thread` is a good match. `seastar::thread` is also great for code which doesn't care about performance --- such as test code.
+
+# Isolation of application components
+Seastar makes multi-tasking very easy - as easy as running an asynchronous function. It is therefore easy for a server to do many unrelated things in parallel. For example, a server might be in the process of answering 100 users' requests, and at the same time also be making progress on some long background operation.
+
+But in the above example, what percentage of the CPU and disk throughput will the background operation get? How long can one of the user's requests be delayed by the background operation? Without the mechanisms we describe in this section, these questions cannot be reliably answered:
+
+* The background operation may be a very "considerate" single fiber, i.e., run a very short continuation and then schedule the next continuation to run later. At each point the scheduler sees 100 request-handling continuations and just one of the background continuations ready to run. The background task gets around 1% of the CPU time, and users' requests are hardly delayed.
+* On the other hand, the background operation may spawn 1,000 fibers in parallel and have 1,000 ready-to-run continuations at each time. The background operation will get about 90% of the runtime, and the continuation handling a user's request may get stuck behind 1,000 of these background continuations, and experience huge latency.
+
+Complex Seastar applications often have different components which run in parallel and have different performance objectives. In the above example we saw two components - user requests and the background operation. The first goal of the mechanisms we describe in this section is to _isolate_ the performance of each component from the others; In other words, the throughput and latency of one component should not depend on decisions that another component makes - e.g., how many continuations it runs in parallel. The second goal is to allow the application to _control_ this isolation, e.g., in the above example allow the application to explicitly control the amount of CPU the background operation receives, so that it completes at a desired pace.
+
+In the above examples we used CPU time as the limited resource that the different components need to share effectively. As we show later, another important shared resource is disk I/O.
+
+## Scheduling groups (CPU scheduler)
+Consider the following asynchronous function `loop()`, which loops until some shared variable `stop` becomes true. It keeps a `counter` of the number of iterations until stopping, and returns this counter when finally stopping.
+```cpp
+seastar::future<long> loop(int parallelism, bool& stop) {
+ return seastar::do_with(0L, [parallelism, &stop] (long& counter) {
+ return seastar::parallel_for_each(boost::irange<unsigned>(0, parallelism),
+ [&stop, &counter] (unsigned c) {
+ return seastar::do_until([&stop] { return stop; }, [&counter] {
+ ++counter;
+ return seastar::make_ready_future<>();
+ });
+ }).then([&counter] { return counter; });
+ });
+}
+```
+The `parallelism` parameter determines the parallelism of the silly counting operation: `parallelism=1` means we have just one loop incrementing the counter; `parallelism=10` means we start 10 loops in parallel all incrementing the same counter.
+
+What happens if we start two `loop()` calls in parallel and let them run for 10 seconds?
+```c++
+seastar::future<> f() {
+ return seastar::do_with(false, [] (bool& stop) {
+ seastar::sleep(std::chrono::seconds(10)).then([&stop] {
+ stop = true;
+ });
+ return seastar::when_all_succeed(loop(1, stop), loop(1, stop)).then_unpack(
+ [] (long n1, long n2) {
+ std::cout << "Counters: " << n1 << ", " << n2 << "\n";
+ });
+ });
+}
+```
+It turns out that if the two `loop()` calls had the same parallelism `1`, we get roughly the same amount of work from both of them:
+```
+Counters: 3'559'635'758, 3'254'521'376
+```
+But if for example we ran a `loop(1)` in parallel with a `loop(10)`, the result is that the `loop(10)` gets 10 times more work done:
+```
+Counters: 629'482'397, 6'320'167'297
+```
+
+Why does the amount of work that loop(1) can do in ten seconds depends on the parallelism chosen by its competitor, and how can we solve this?
+
+The reason this happens is as follows: When a future resolves and a continuation was linked to it, this continuation becomes ready to run. By default, Seastar's scheduler keeps a single list of ready-to-run continuations (in each shard, of course), and runs the continuations at the same order they became ready to run. In the above example, `loop(1)` always has one ready-to-run continuation, but `loop(10)`, which runs 10 loops in parallel, always has ten ready-to-run continuations. So for every continuation of `loop(1)`, Seastar's default scheduler will run 10 continuations of `loop(10)`, which is why loop(10) gets 10 times more work done.
+
+To solve this, Seastar allows an application to define separate components known as **scheduling groups**, which each has a separate list of ready-to-run continuations. Each scheduling group gets to run its own continuations on a desired percentage of the CPU time, but the number of runnable continuations in one scheduling group does not affect the amount of CPU that another scheduling group gets. Let's look at how this is done:
+
+A scheduling group is defined by a value of type `scheduling_group`. This value is opaque, but internally it is a small integer (similar to a process ID in Linux). We use the `seastar::with_scheduling_group()` function to run code in the desired scheduling group:
+
+```cpp
+seastar::future<long>
+loop_in_sg(int parallelism, bool& stop, seastar::scheduling_group sg) {
+ return seastar::with_scheduling_group(sg, [parallelism, &stop] {
+ return loop(parallelism, stop);
+ });
+}
+```
+
+TODO: explain what `with_scheduling_group` group really does, how the group is "inherited" to the continuations started inside it.
+
+
+Now let's create two scheduling groups, and run `loop(1)` in the first scheduling group and `loop(10)` in the second scheduling group:
+```cpp
+seastar::future<> f() {
+ return seastar::when_all_succeed(
+ seastar::create_scheduling_group("loop1", 100),
+ seastar::create_scheduling_group("loop2", 100)).then_unpack(
+ [] (seastar::scheduling_group sg1, seastar::scheduling_group sg2) {
+ return seastar::do_with(false, [sg1, sg2] (bool& stop) {
+ seastar::sleep(std::chrono::seconds(10)).then([&stop] {
+ stop = true;
+ });
+ return seastar::when_all_succeed(loop_in_sg(1, stop, sg1), loop_in_sg(10, stop, sg2)).then_unpack(
+ [] (long n1, long n2) {
+ std::cout << "Counters: " << n1 << ", " << n2 << "\n";
+ });
+ });
+ });
+}
+```
+Here we created two scheduling groups, `sg1` and `sg2`. Each scheduling group has an arbitrary name (which is used for diagnostic purposes only), and a number of *shares*, a number traditionally between 1 and 1000: If one scheduling group has twice the number of shares than a second scheduling group, it will get twice the amount of CPU time. In this example, we used the same number of shares (100) for both groups, so they should get equal CPU time.
+
+Unlike most objects in Seastar which are separate per shard, Seastar wants the identities and numbering of the scheduling groups to be the same on all shards, because it is important when invoking tasks on remote shards. For this reason, the function to create a scheduling group, `seastar::create_scheduling_group()`, is an asynchronous function returning a `future<scheduling_group>`.
+
+Running the above example, with both scheduling group set up with the same number of shares (100), indeed results in both scheduling groups getting the same amount of CPU time:
+```
+Counters: 3'353'900'256, 3'350'871'461
+```
+
+Note how now both loops got the same amount of work done - despite one loop having 10 times the parallelism of the second loop.
+
+If we change the definition of the second scheduling group to have 200 shares, twice the number of shares of the first scheduling group, we'll see the second scheduling group getting twice the amount of CPU time:
+```
+Counters: 2'273'783'385, 4'549'995'716
+```
+## Latency
+TODO: Task quota, preempt, loops with built-in preemption check, etc.
+
+## Disk I/O scheduler
+TODO
+
+## Network scheduler
+TODO: Say that not yet available. Give example of potential problem - e.g., sharing a slow WAN link.
+
+## Controllers
+TODO: Talk about how to dynamically change the number of shares, and why.
+
+## Multi-tenancy
+TODO
diff --git a/src/seastar/doc/websocket.md b/src/seastar/doc/websocket.md
new file mode 100644
index 000000000..bb7407e51
--- /dev/null
+++ b/src/seastar/doc/websocket.md
@@ -0,0 +1,45 @@
+# WebSocket protocol implementation
+
+Seastar includes an experimental implementation of a WebSocket server.
+Refs:
+https://datatracker.ietf.org/doc/html/rfc6455
+https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API/Writing_WebSocket_servers
+
+## Handlers
+
+A WebSocket server needs a user-defined handler in order to be functiomal. WebSocket specification defines a concept of subprotocols, and Seastar WebSocket server allows registering a single handler per subprotocol.
+
+Each subprotocol has a unique name and is expected to be sent by the connecting client during handshake,
+by sending a `Sec-Websocket-Protocol` header with the chosen value.
+
+Aside from specifying the chosen subprotocol name for a handler, the developer is expected to provide a function
+which handles the incoming stream of data and returns responses into the output stream.
+
+Here's an example of how to register a simple echo protocol:
+
+```cpp
+using namespace seastar;
+static experimental::websocket::server ws;
+ws.register_handler("echo", [] (input_stream<char>& in, output_stream<char>& out) -> future<> {
+ while (true) {
+ auto buf = co_await in.read();
+ if (buf.empty()) {
+ co_return;
+ }
+ co_await out.write(std::move(buf));
+ co_await out.flush();
+ }
+});
+```
+
+Note: the developers should assume that the input stream provides decoded and unmasked data - so the stream should be treated as if it was backed by a TCP socket. Similarly, responses should be sent to the output stream as is, and the WebSocket server implementation will handle its proper serialization, masking and so on.
+
+## Error handling
+
+Registered WebSocket handlers can throw arbitrary exceptions during their operation. Currently, exceptions that aren't explicitly handled within the handler will cause the established WebSocket connection to be terminated, and a proper error message will be logged.
+
+## Secure WebSocket (wss://)
+
+Implementation of Secure WebSocket standard, based on HTTPS is currently work in advanced progress. Once reviewed and merged, this section will contain documentation for it.
+Ref: https://github.com/scylladb/seastar/pull/1044
+