From 2eeb62e38ae17a3523ad3cd81c3de9f20f9e7742 Mon Sep 17 00:00:00 2001
From: Daniel Baumann
Date: Sat, 25 May 2024 06:41:28 +0200
Subject: Adding debian version 2.4.59-1~deb10u1.
Signed-off-by: Daniel Baumann
---
debian/changelog | 15 +
debian/control | 3 +-
...522-HTTP-Response-Smuggling-mod_proxy_uws.patch | 120 -
debian/patches/0053-CVE-2023-25690-1.patch | 170 -
debian/patches/0054-CVE-2023-25690-2.patch | 35 -
.../patches/0055-CVE-2023-25690-Regression-1.patch | 131 -
.../patches/0056-CVE-2023-25690-Regression-2.patch | 138 -
.../patches/0057-CVE-2023-25690-Regression-3.patch | 24 -
debian/patches/CVE-2006-20001.patch | 37 -
debian/patches/CVE-2019-0196.patch | 27 -
debian/patches/CVE-2019-0197.patch | 93 -
debian/patches/CVE-2019-0211.patch | 249 -
debian/patches/CVE-2019-0215.patch | 52 -
debian/patches/CVE-2019-0217.patch | 147 -
debian/patches/CVE-2019-0220-1.patch | 278 -
debian/patches/CVE-2019-0220-2.patch | 50 -
debian/patches/CVE-2019-0220-3.patch | 43 -
debian/patches/CVE-2019-10092.patch | 193 -
debian/patches/CVE-2019-10097.patch | 72 -
debian/patches/CVE-2019-10098.patch | 20 -
debian/patches/CVE-2020-11984.patch | 45 -
debian/patches/CVE-2020-1927.patch | 92 -
debian/patches/CVE-2020-1934.patch | 75 -
debian/patches/CVE-2020-35452.patch | 27 -
debian/patches/CVE-2021-26690.patch | 20 -
debian/patches/CVE-2021-26691.patch | 18 -
debian/patches/CVE-2021-30641.patch | 50 -
debian/patches/CVE-2021-31618.patch | 20 -
debian/patches/CVE-2021-33193.patch | 702 --
debian/patches/CVE-2021-34798.patch | 40 -
debian/patches/CVE-2021-36160-2.patch | 32 -
debian/patches/CVE-2021-36160.patch | 51 -
debian/patches/CVE-2021-39275.patch | 35 -
debian/patches/CVE-2021-40438.patch | 124 -
debian/patches/CVE-2021-44224-1.patch | 206 -
debian/patches/CVE-2021-44224-2.patch | 93 -
debian/patches/CVE-2021-44790.patch | 18 -
debian/patches/CVE-2022-22719.patch | 95 -
debian/patches/CVE-2022-22720.patch | 190 -
debian/patches/CVE-2022-22721.patch | 116 -
debian/patches/CVE-2022-23943-1.patch | 360 -
debian/patches/CVE-2022-23943-2.patch | 63 -
debian/patches/CVE-2022-26377.patch | 39 -
debian/patches/CVE-2022-28614.patch | 65 -
debian/patches/CVE-2022-28615.patch | 35 -
debian/patches/CVE-2022-29404.patch | 82 -
debian/patches/CVE-2022-30522.patch | 561 --
debian/patches/CVE-2022-30556.patch | 250 -
debian/patches/CVE-2022-31813.patch | 242 -
debian/patches/CVE-2022-36760.patch | 27 -
debian/patches/CVE-2022-37436.patch | 125 -
debian/patches/build_suexec-custom.patch | 2 +-
debian/patches/customize_apxs.patch | 22 +-
debian/patches/fhs_compliance.patch | 38 +-
debian/patches/fix-macro.patch | 160 +
.../patches/import-http2-module-from-2.4.46.patch | 7588 -------------------
debian/patches/reproducible_builds.diff | 2 +-
debian/patches/series | 55 +-
debian/patches/spelling-errors.patch | 196 -
debian/patches/suexec-CVE-2007-1742.patch | 2 +-
debian/patches/suexec-custom.patch | 32 +-
debian/perl-framework/Apache-Test/Changes | 50 +-
debian/perl-framework/Apache-Test/Makefile.PL | 16 +-
debian/perl-framework/Apache-Test/RELEASE | 13 +-
.../perl-framework/Apache-Test/lib/Apache/Test.pm | 2 +-
.../Apache-Test/lib/Apache/TestCommon.pm | 2 +-
.../Apache-Test/lib/Apache/TestConfig.pm | 155 +-
.../Apache-Test/lib/Apache/TestConfigParse.pm | 2 +-
.../Apache-Test/lib/Apache/TestRun.pm | 2 +-
.../Apache-Test/lib/Apache/TestRunPHP.pm | 2 +-
.../Apache-Test/lib/Apache/TestSSLCA.pm | 29 +-
.../Apache-Test/lib/Apache/TestTrace.pm | 4 +-
debian/perl-framework/Makefile.PL | 20 +
debian/perl-framework/NOTICE | 2 +-
debian/perl-framework/README | 22 +-
debian/perl-framework/STATUS | 2 +-
.../c-modules/echo_post/mod_echo_post.c | 2 +-
.../test_pass_brigade/mod_test_pass_brigade.c | 46 +-
.../c-modules/test_rwrite/mod_test_rwrite.c | 19 +-
.../c-modules/test_session/mod_test_session.c | 2 +-
.../c-modules/test_ssl/mod_test_ssl.c | 2 +-
debian/perl-framework/scripts/httpd-sub.ldif | 15 +
debian/perl-framework/scripts/httpd.ldif | 56 +
debian/perl-framework/scripts/ldap-init.sh | 28 +
debian/perl-framework/scripts/memcached-init.sh | 8 +
debian/perl-framework/scripts/non-anon.ldif | 14 +
debian/perl-framework/scripts/redis-init.sh | 8 +
debian/perl-framework/scripts/slapd-config.ldif | 10 +
debian/perl-framework/scripts/suffix.ldif | 5 +
debian/perl-framework/t/ab/base.t | 10 +-
debian/perl-framework/t/apache/expr.t | 2 +
debian/perl-framework/t/apache/expr_string.t | 9 +-
debian/perl-framework/t/apache/hostcheck.t | 5 +-
debian/perl-framework/t/apache/leaks.t | 76 +-
debian/perl-framework/t/apache/limits.t | 6 +-
debian/perl-framework/t/apache/mergeslashes.t | 117 +
debian/perl-framework/t/apache/pr64339.t | 46 +
debian/perl-framework/t/apache/teclchunk.t | 57 +
debian/perl-framework/t/conf/core.conf.in | 40 +-
debian/perl-framework/t/conf/extra.conf.in | 251 +-
debian/perl-framework/t/conf/http2.conf.in | 105 -
debian/perl-framework/t/conf/include.conf.in | 5 +
debian/perl-framework/t/conf/proxy.conf.in | 74 +-
debian/perl-framework/t/conf/ssl/proxyssl.conf.in | 61 +-
debian/perl-framework/t/conf/ssl/ssl.conf.in | 20 +-
debian/perl-framework/t/filter/case.t | 3 +-
.../t/htdocs/modules/actions/action/dummy.txt | 0
.../t/htdocs/modules/actions/script/dummy.txt | 0
.../t/htdocs/modules/allowmethods/Get/none/.empty | 0
.../t/htdocs/modules/allowmethods/Get/post/foo.txt | 1 +
.../t/htdocs/modules/allowmethods/NoPost/.empty | 0
.../htdocs/modules/allowmethods/Post/reset/.empty | 0
.../t/htdocs/modules/cgi/perl_post.pl.PL | 23 +
.../t/htdocs/modules/dir/htaccess/sub1/index.html | 1 +
debian/perl-framework/t/htdocs/modules/h2/001.html | 10 -
debian/perl-framework/t/htdocs/modules/h2/002.jpg | Bin 90364 -> 0 bytes
debian/perl-framework/t/htdocs/modules/h2/003.html | 11 -
.../t/htdocs/modules/h2/003/003_img.jpg | Bin 90364 -> 0 bytes
debian/perl-framework/t/htdocs/modules/h2/004.html | 23 -
.../t/htdocs/modules/h2/004/gophertiles.jpg | Bin 742 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_002.jpg | Bin 945 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_003.jpg | Bin 697 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_004.jpg | Bin 725 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_005.jpg | Bin 837 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_006.jpg | Bin 770 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_007.jpg | Bin 747 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_008.jpg | Bin 694 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_009.jpg | Bin 704 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_010.jpg | Bin 994 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_011.jpg | Bin 979 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_012.jpg | Bin 895 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_013.jpg | Bin 958 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_014.jpg | Bin 894 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_015.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_016.jpg | Bin 703 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_017.jpg | Bin 707 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_018.jpg | Bin 701 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_019.jpg | Bin 1013 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_020.jpg | Bin 737 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_021.jpg | Bin 801 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_022.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_023.jpg | Bin 905 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_024.jpg | Bin 980 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_025.jpg | Bin 708 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_026.jpg | Bin 694 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_027.jpg | Bin 697 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_028.jpg | Bin 795 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_029.jpg | Bin 978 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_030.jpg | Bin 707 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_031.jpg | Bin 1060 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_032.jpg | Bin 688 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_033.jpg | Bin 701 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_034.jpg | Bin 898 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_035.jpg | Bin 986 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_036.jpg | Bin 770 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_037.jpg | Bin 959 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_038.jpg | Bin 936 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_039.jpg | Bin 700 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_040.jpg | Bin 784 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_041.jpg | Bin 758 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_042.jpg | Bin 796 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_043.jpg | Bin 813 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_044.jpg | Bin 924 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_045.jpg | Bin 978 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_046.jpg | Bin 752 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_047.jpg | Bin 751 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_048.jpg | Bin 737 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_049.jpg | Bin 992 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_050.jpg | Bin 688 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_051.jpg | Bin 697 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_052.jpg | Bin 699 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_053.jpg | Bin 1052 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_054.jpg | Bin 694 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_055.jpg | Bin 767 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_056.jpg | Bin 952 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_057.jpg | Bin 788 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_058.jpg | Bin 759 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_059.jpg | Bin 700 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_060.jpg | Bin 985 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_061.jpg | Bin 915 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_062.jpg | Bin 681 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_063.jpg | Bin 707 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_064.jpg | Bin 693 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_065.jpg | Bin 861 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_066.jpg | Bin 991 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_067.jpg | Bin 1056 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_068.jpg | Bin 697 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_069.jpg | Bin 1066 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_070.jpg | Bin 1024 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_071.jpg | Bin 784 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_072.jpg | Bin 698 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_073.jpg | Bin 1004 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_074.jpg | Bin 969 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_075.jpg | Bin 915 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_076.jpg | Bin 784 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_077.jpg | Bin 697 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_078.jpg | Bin 692 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_079.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_080.jpg | Bin 725 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_081.jpg | Bin 877 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_082.jpg | Bin 743 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_083.jpg | Bin 785 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_084.jpg | Bin 690 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_085.jpg | Bin 724 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_086.jpg | Bin 1054 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_087.jpg | Bin 883 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_088.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_089.jpg | Bin 693 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_090.jpg | Bin 947 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_091.jpg | Bin 959 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_092.jpg | Bin 736 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_093.jpg | Bin 806 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_094.jpg | Bin 820 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_095.jpg | Bin 918 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_096.jpg | Bin 689 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_097.jpg | Bin 796 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_098.jpg | Bin 686 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_099.jpg | Bin 698 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_100.jpg | Bin 686 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_101.jpg | Bin 686 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_102.jpg | Bin 682 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_103.jpg | Bin 703 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_104.jpg | Bin 698 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_105.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_106.jpg | Bin 989 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_107.jpg | Bin 720 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_108.jpg | Bin 834 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_109.jpg | Bin 756 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_110.jpg | Bin 703 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_111.jpg | Bin 815 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_112.jpg | Bin 780 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_113.jpg | Bin 992 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_114.jpg | Bin 862 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_115.jpg | Bin 1024 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_116.jpg | Bin 756 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_117.jpg | Bin 1012 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_118.jpg | Bin 905 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_119.jpg | Bin 808 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_120.jpg | Bin 814 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_121.jpg | Bin 832 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_122.jpg | Bin 704 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_123.jpg | Bin 741 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_124.jpg | Bin 694 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_125.jpg | Bin 950 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_126.jpg | Bin 770 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_127.jpg | Bin 749 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_128.jpg | Bin 942 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_129.jpg | Bin 997 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_130.jpg | Bin 708 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_131.jpg | Bin 821 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_132.jpg | Bin 849 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_133.jpg | Bin 715 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_134.jpg | Bin 794 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_135.jpg | Bin 869 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_136.jpg | Bin 1040 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_137.jpg | Bin 757 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_138.jpg | Bin 991 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_139.jpg | Bin 704 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_140.jpg | Bin 707 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_141.jpg | Bin 959 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_142.jpg | Bin 691 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_143.jpg | Bin 921 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_144.jpg | Bin 932 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_145.jpg | Bin 696 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_146.jpg | Bin 711 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_147.jpg | Bin 817 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_148.jpg | Bin 966 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_149.jpg | Bin 1002 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_150.jpg | Bin 900 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_151.jpg | Bin 724 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_152.jpg | Bin 1043 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_153.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_154.jpg | Bin 971 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_155.jpg | Bin 708 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_156.jpg | Bin 699 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_157.jpg | Bin 834 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_158.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_159.jpg | Bin 880 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_160.jpg | Bin 701 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_161.jpg | Bin 688 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_162.jpg | Bin 853 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_163.jpg | Bin 690 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_164.jpg | Bin 759 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_165.jpg | Bin 831 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_166.jpg | Bin 732 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_167.jpg | Bin 955 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_168.jpg | Bin 1043 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_169.jpg | Bin 969 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_170.jpg | Bin 701 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_171.jpg | Bin 755 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_172.jpg | Bin 924 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_173.jpg | Bin 958 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_174.jpg | Bin 998 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_175.jpg | Bin 702 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_176.jpg | Bin 760 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_177.jpg | Bin 732 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_178.jpg | Bin 929 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_179.jpg | Bin 712 -> 0 bytes
.../t/htdocs/modules/h2/004/gophertiles_180.jpg | Bin 1013 -> 0 bytes
debian/perl-framework/t/htdocs/modules/h2/006.html | 23 -
.../perl-framework/t/htdocs/modules/h2/006/006.css | 21 -
.../perl-framework/t/htdocs/modules/h2/006/006.js | 31 -
debian/perl-framework/t/htdocs/modules/h2/007.html | 21 -
.../perl-framework/t/htdocs/modules/h2/007/007.py | 29 -
debian/perl-framework/t/htdocs/modules/h2/009.py | 21 -
.../t/htdocs/modules/h2/files/empty.txt | 0
debian/perl-framework/t/htdocs/modules/h2/hello.pl | 13 -
.../perl-framework/t/htdocs/modules/h2/index.html | 45 -
.../perl-framework/t/htdocs/modules/h2/index.jpg | Bin 952 -> 0 bytes
debian/perl-framework/t/htdocs/modules/h2/info.php | 3 -
debian/perl-framework/t/htdocs/modules/h2/necho.pl | 29 -
.../perl-framework/t/htdocs/modules/h2/upload.pl | 47 -
.../perl-framework/t/htdocs/modules/h2/upload.py | 47 -
.../t/htdocs/modules/h2/xxx-1.0.2a.tar.gz | Bin 489 -> 0 bytes
.../t/htdocs/modules/include/comment.shtml | 5 +
.../htdocs/modules/include/mod_request/echo.shtml | 1 +
.../htdocs/modules/include/mod_request/post.shtml | 1 +
.../t/htdocs/modules/lua/filters.lua | 16 +
.../t/htdocs/modules/lua/websockets.lua | 18 +
.../negotiation/de/compressed/index.html.zh-TW | 1 +
.../htdocs/modules/negotiation/de/index.html.zh-TW | 1 +
.../modules/negotiation/de/two/index.zh-TW.html | 1 +
.../t/htdocs/modules/negotiation/de/two/map.var | 4 +
.../negotiation/en/compressed/index.html.zh-TW | 1 +
.../htdocs/modules/negotiation/en/index.html.zh-TW | 1 +
.../modules/negotiation/en/two/index.zh-TW.html | 1 +
.../t/htdocs/modules/negotiation/en/two/map.var | 4 +
.../negotiation/fr/compressed/index.html.zh-TW | 1 +
.../htdocs/modules/negotiation/fr/index.html.zh-TW | 1 +
.../modules/negotiation/fr/two/index.zh-TW.html | 1 +
.../t/htdocs/modules/negotiation/fr/two/map.var | 4 +
.../negotiation/fu/compressed/index.html.zh-TW | 1 +
.../htdocs/modules/negotiation/fu/index.html.zh-TW | 1 +
.../modules/negotiation/fu/two/index.zh-TW.html | 1 +
.../t/htdocs/modules/negotiation/fu/two/map.var | 4 +
.../negotiation/zh-TW/compressed/index.html.de | 1 +
.../negotiation/zh-TW/compressed/index.html.en | 1 +
.../negotiation/zh-TW/compressed/index.html.fr | 1 +
.../negotiation/zh-TW/compressed/index.html.fu | 1 +
.../negotiation/zh-TW/compressed/index.html.zh-TW | 1 +
.../htdocs/modules/negotiation/zh-TW/index.html.de | 1 +
.../htdocs/modules/negotiation/zh-TW/index.html.en | 1 +
.../htdocs/modules/negotiation/zh-TW/index.html.fr | 1 +
.../htdocs/modules/negotiation/zh-TW/index.html.fu | 1 +
.../modules/negotiation/zh-TW/index.html.zh-TW | 1 +
.../modules/negotiation/zh-TW/two/index.de.html | 1 +
.../modules/negotiation/zh-TW/two/index.en.html | 1 +
.../modules/negotiation/zh-TW/two/index.fr.html | 1 +
.../modules/negotiation/zh-TW/two/index.fu.html | 1 +
.../modules/negotiation/zh-TW/two/index.zh-TW.html | 1 +
.../t/htdocs/modules/negotiation/zh-TW/two/map.var | 21 +
.../t/htdocs/modules/substitute/.empty | 0
.../t/htdocs/modules/xml2enc/doc.fooxml | 1 +
.../t/htdocs/modules/xml2enc/doc.isohtml | 1 +
.../t/htdocs/modules/xml2enc/doc.notxml | 1 +
.../t/htdocs/modules/xml2enc/doc.xml | 1 +
.../perl-framework/t/htdocs/servlet/mapping.html | 1 +
debian/perl-framework/t/modules/actions.t | 59 +
debian/perl-framework/t/modules/alias.t | 31 +
debian/perl-framework/t/modules/allowmethods.t | 25 +-
debian/perl-framework/t/modules/autoindex.t | 13 +-
debian/perl-framework/t/modules/brotli.t | 109 +-
debian/perl-framework/t/modules/cgi.t | 17 +-
debian/perl-framework/t/modules/deflate.t | 7 +-
debian/perl-framework/t/modules/dir.t | 17 +-
debian/perl-framework/t/modules/headers.t | 178 +-
debian/perl-framework/t/modules/heartbeat.t | 30 +
debian/perl-framework/t/modules/http2.t | 528 --
debian/perl-framework/t/modules/include.t | 49 +-
debian/perl-framework/t/modules/info.t | 2 +-
debian/perl-framework/t/modules/ldap.t | 52 +
debian/perl-framework/t/modules/lua.t | 2 +
debian/perl-framework/t/modules/negotiation.t | 6 +-
debian/perl-framework/t/modules/proxy.t | 47 +-
debian/perl-framework/t/modules/proxy_balancer.t | 118 +-
debian/perl-framework/t/modules/proxy_fcgi.t | 12 +-
debian/perl-framework/t/modules/proxy_websockets.t | 81 +
.../t/modules/proxy_websockets_ssl.t | 86 +
debian/perl-framework/t/modules/rewrite.t | 92 +-
debian/perl-framework/t/modules/sed.t | 48 +
debian/perl-framework/t/modules/session.t | 6 +-
debian/perl-framework/t/modules/setenvif.t | 14 +-
debian/perl-framework/t/modules/speling.t | 12 +-
debian/perl-framework/t/modules/substitute.t | 19 +-
debian/perl-framework/t/modules/usertrack.t | 14 +-
debian/perl-framework/t/php-fpm/log/.empty | 0
debian/perl-framework/t/php-fpm/pools/www/.empty | 0
debian/perl-framework/t/php-fpm/run/.empty | 0
debian/perl-framework/t/php-fpm/var/log/.empty | 0
debian/perl-framework/t/security/CVE-2009-3555.t | 6 +
debian/perl-framework/t/ssl/ocsp.t | 19 +-
debian/perl-framework/t/ssl/pha.t | 47 +
debian/perl-framework/t/ssl/pr12355.t | 21 +-
debian/perl-framework/t/ssl/pr43738.t | 21 +-
debian/perl-framework/t/ssl/proxy.t | 16 +-
debian/perl-framework/t/ssl/varlookup.t | 7 +-
debian/upstream/signing-key.asc | 7691 ++++++--------------
debian/watch | 2 +-
398 files changed, 4990 insertions(+), 20340 deletions(-)
delete mode 100644 debian/patches/0052-CVE-2023-27522-HTTP-Response-Smuggling-mod_proxy_uws.patch
delete mode 100644 debian/patches/0053-CVE-2023-25690-1.patch
delete mode 100644 debian/patches/0054-CVE-2023-25690-2.patch
delete mode 100644 debian/patches/0055-CVE-2023-25690-Regression-1.patch
delete mode 100644 debian/patches/0056-CVE-2023-25690-Regression-2.patch
delete mode 100644 debian/patches/0057-CVE-2023-25690-Regression-3.patch
delete mode 100644 debian/patches/CVE-2006-20001.patch
delete mode 100644 debian/patches/CVE-2019-0196.patch
delete mode 100644 debian/patches/CVE-2019-0197.patch
delete mode 100644 debian/patches/CVE-2019-0211.patch
delete mode 100644 debian/patches/CVE-2019-0215.patch
delete mode 100644 debian/patches/CVE-2019-0217.patch
delete mode 100644 debian/patches/CVE-2019-0220-1.patch
delete mode 100644 debian/patches/CVE-2019-0220-2.patch
delete mode 100644 debian/patches/CVE-2019-0220-3.patch
delete mode 100644 debian/patches/CVE-2019-10092.patch
delete mode 100644 debian/patches/CVE-2019-10097.patch
delete mode 100644 debian/patches/CVE-2019-10098.patch
delete mode 100644 debian/patches/CVE-2020-11984.patch
delete mode 100644 debian/patches/CVE-2020-1927.patch
delete mode 100644 debian/patches/CVE-2020-1934.patch
delete mode 100644 debian/patches/CVE-2020-35452.patch
delete mode 100644 debian/patches/CVE-2021-26690.patch
delete mode 100644 debian/patches/CVE-2021-26691.patch
delete mode 100644 debian/patches/CVE-2021-30641.patch
delete mode 100644 debian/patches/CVE-2021-31618.patch
delete mode 100644 debian/patches/CVE-2021-33193.patch
delete mode 100644 debian/patches/CVE-2021-34798.patch
delete mode 100644 debian/patches/CVE-2021-36160-2.patch
delete mode 100644 debian/patches/CVE-2021-36160.patch
delete mode 100644 debian/patches/CVE-2021-39275.patch
delete mode 100644 debian/patches/CVE-2021-40438.patch
delete mode 100644 debian/patches/CVE-2021-44224-1.patch
delete mode 100644 debian/patches/CVE-2021-44224-2.patch
delete mode 100644 debian/patches/CVE-2021-44790.patch
delete mode 100644 debian/patches/CVE-2022-22719.patch
delete mode 100644 debian/patches/CVE-2022-22720.patch
delete mode 100644 debian/patches/CVE-2022-22721.patch
delete mode 100644 debian/patches/CVE-2022-23943-1.patch
delete mode 100644 debian/patches/CVE-2022-23943-2.patch
delete mode 100644 debian/patches/CVE-2022-26377.patch
delete mode 100644 debian/patches/CVE-2022-28614.patch
delete mode 100644 debian/patches/CVE-2022-28615.patch
delete mode 100644 debian/patches/CVE-2022-29404.patch
delete mode 100644 debian/patches/CVE-2022-30522.patch
delete mode 100644 debian/patches/CVE-2022-30556.patch
delete mode 100644 debian/patches/CVE-2022-31813.patch
delete mode 100644 debian/patches/CVE-2022-36760.patch
delete mode 100644 debian/patches/CVE-2022-37436.patch
create mode 100644 debian/patches/fix-macro.patch
delete mode 100644 debian/patches/import-http2-module-from-2.4.46.patch
delete mode 100644 debian/patches/spelling-errors.patch
create mode 100644 debian/perl-framework/scripts/httpd-sub.ldif
create mode 100644 debian/perl-framework/scripts/httpd.ldif
create mode 100755 debian/perl-framework/scripts/ldap-init.sh
create mode 100755 debian/perl-framework/scripts/memcached-init.sh
create mode 100644 debian/perl-framework/scripts/non-anon.ldif
create mode 100755 debian/perl-framework/scripts/redis-init.sh
create mode 100644 debian/perl-framework/scripts/slapd-config.ldif
create mode 100644 debian/perl-framework/scripts/suffix.ldif
create mode 100644 debian/perl-framework/t/apache/mergeslashes.t
create mode 100644 debian/perl-framework/t/apache/pr64339.t
create mode 100644 debian/perl-framework/t/apache/teclchunk.t
delete mode 100644 debian/perl-framework/t/conf/http2.conf.in
create mode 100644 debian/perl-framework/t/htdocs/modules/actions/action/dummy.txt
create mode 100644 debian/perl-framework/t/htdocs/modules/actions/script/dummy.txt
create mode 100644 debian/perl-framework/t/htdocs/modules/allowmethods/Get/none/.empty
create mode 100644 debian/perl-framework/t/htdocs/modules/allowmethods/Get/post/foo.txt
create mode 100644 debian/perl-framework/t/htdocs/modules/allowmethods/NoPost/.empty
create mode 100644 debian/perl-framework/t/htdocs/modules/allowmethods/Post/reset/.empty
create mode 100755 debian/perl-framework/t/htdocs/modules/cgi/perl_post.pl.PL
create mode 100644 debian/perl-framework/t/htdocs/modules/dir/htaccess/sub1/index.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/001.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/002.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/003.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/003/003_img.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_002.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_003.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_004.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_005.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_006.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_007.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_008.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_009.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_010.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_011.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_012.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_013.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_014.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_015.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_016.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_017.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_018.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_019.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_020.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_021.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_022.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_023.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_024.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_025.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_026.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_027.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_028.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_029.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_030.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_031.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_032.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_033.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_034.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_035.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_036.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_037.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_038.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_039.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_040.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_041.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_042.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_043.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_044.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_045.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_046.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_047.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_048.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_049.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_050.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_051.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_052.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_053.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_054.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_055.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_056.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_057.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_058.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_059.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_060.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_061.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_062.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_063.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_064.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_065.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_066.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_067.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_068.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_069.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_070.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_071.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_072.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_073.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_074.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_075.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_076.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_077.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_078.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_079.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_080.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_081.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_082.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_083.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_084.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_085.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_086.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_087.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_088.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_089.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_090.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_091.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_092.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_093.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_094.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_095.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_096.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_097.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_098.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_099.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_100.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_101.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_102.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_103.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_104.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_105.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_106.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_107.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_108.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_109.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_110.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_111.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_112.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_113.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_114.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_115.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_116.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_117.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_118.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_119.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_120.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_121.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_122.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_123.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_124.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_125.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_126.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_127.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_128.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_129.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_130.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_131.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_132.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_133.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_134.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_135.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_136.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_137.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_138.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_139.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_140.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_141.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_142.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_143.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_144.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_145.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_146.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_147.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_148.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_149.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_150.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_151.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_152.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_153.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_154.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_155.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_156.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_157.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_158.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_159.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_160.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_161.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_162.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_163.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_164.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_165.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_166.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_167.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_168.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_169.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_170.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_171.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_172.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_173.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_174.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_175.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_176.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_177.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_178.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_179.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/004/gophertiles_180.jpg
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/006.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/006/006.css
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/006/006.js
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/007.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/007/007.py
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/009.py
delete mode 100644 debian/perl-framework/t/htdocs/modules/h2/files/empty.txt
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/hello.pl
delete mode 100644 debian/perl-framework/t/htdocs/modules/h2/index.html
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/index.jpg
delete mode 100644 debian/perl-framework/t/htdocs/modules/h2/info.php
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/necho.pl
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/upload.pl
delete mode 100755 debian/perl-framework/t/htdocs/modules/h2/upload.py
delete mode 100644 debian/perl-framework/t/htdocs/modules/h2/xxx-1.0.2a.tar.gz
create mode 100755 debian/perl-framework/t/htdocs/modules/include/comment.shtml
create mode 100755 debian/perl-framework/t/htdocs/modules/include/mod_request/echo.shtml
create mode 100755 debian/perl-framework/t/htdocs/modules/include/mod_request/post.shtml
create mode 100644 debian/perl-framework/t/htdocs/modules/lua/filters.lua
create mode 100644 debian/perl-framework/t/htdocs/modules/lua/websockets.lua
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/de/compressed/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/de/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/de/two/index.zh-TW.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/en/compressed/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/en/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/en/two/index.zh-TW.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/fr/compressed/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/fr/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/fr/two/index.zh-TW.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/fu/compressed/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/fu/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/fu/two/index.zh-TW.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/compressed/index.html.de
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/compressed/index.html.en
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/compressed/index.html.fr
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/compressed/index.html.fu
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/compressed/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/index.html.de
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/index.html.en
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/index.html.fr
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/index.html.fu
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/index.html.zh-TW
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/two/index.de.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/two/index.en.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/two/index.fr.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/two/index.fu.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/two/index.zh-TW.html
create mode 100755 debian/perl-framework/t/htdocs/modules/negotiation/zh-TW/two/map.var
create mode 100644 debian/perl-framework/t/htdocs/modules/substitute/.empty
create mode 100644 debian/perl-framework/t/htdocs/modules/xml2enc/doc.fooxml
create mode 100644 debian/perl-framework/t/htdocs/modules/xml2enc/doc.isohtml
create mode 100644 debian/perl-framework/t/htdocs/modules/xml2enc/doc.notxml
create mode 100644 debian/perl-framework/t/htdocs/modules/xml2enc/doc.xml
create mode 100644 debian/perl-framework/t/htdocs/servlet/mapping.html
create mode 100644 debian/perl-framework/t/modules/actions.t
create mode 100644 debian/perl-framework/t/modules/heartbeat.t
delete mode 100644 debian/perl-framework/t/modules/http2.t
create mode 100644 debian/perl-framework/t/modules/ldap.t
create mode 100644 debian/perl-framework/t/modules/proxy_websockets.t
create mode 100644 debian/perl-framework/t/modules/proxy_websockets_ssl.t
create mode 100644 debian/perl-framework/t/modules/sed.t
create mode 100644 debian/perl-framework/t/php-fpm/log/.empty
create mode 100644 debian/perl-framework/t/php-fpm/pools/www/.empty
create mode 100644 debian/perl-framework/t/php-fpm/run/.empty
create mode 100644 debian/perl-framework/t/php-fpm/var/log/.empty
create mode 100644 debian/perl-framework/t/ssl/pha.t
diff --git a/debian/changelog b/debian/changelog
index ee0857b..01f68a0 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,18 @@
+apache2 (2.4.59-1~deb10u1) buster-security; urgency=medium
+
+ [ Yadd ]
+ * Team upload
+ * New upstream version 2.4.59 (Closes: CVE-2019-17567, CVE-2023-31122,
+ CVE-2023-38709, CVE-2023-45802, CVE-2024-24795, CVE-2024-27316)
+ * Update test framework
+ * Drop old patches
+ * Update patches
+
+ [ Bastien Roucariès ]
+ * Break against fossil
+
+ -- Bastien Roucariès Fri, 24 May 2024 22:36:21 +0000
+
apache2 (2.4.38-3+deb10u10) buster-security; urgency=medium
* Non-maintainer upload by the LTS Team.
diff --git a/debian/control b/debian/control
index 4d5ca9f..61bef7b 100644
--- a/debian/control
+++ b/debian/control
@@ -82,7 +82,8 @@ Suggests: apache2-doc,
www-browser
Breaks: gridsite (<< 3.0.0~20170225gitd51b2fd-1~),
libapache2-mod-dacs (<= 1.4.38a-2),
- libapache2-mod-proxy-uwsgi (<< 2.4.33)
+ libapache2-mod-proxy-uwsgi (<< 2.4.33),
+ fossil (<< 2.8-1+deb10u1~)
Provides: ${apache2:API}
Replaces: libapache2-mod-proxy-uwsgi (<< 2.4.33)
Description: Apache HTTP Server (modules and other binary files)
diff --git a/debian/patches/0052-CVE-2023-27522-HTTP-Response-Smuggling-mod_proxy_uws.patch b/debian/patches/0052-CVE-2023-27522-HTTP-Response-Smuggling-mod_proxy_uws.patch
deleted file mode 100644
index f39fa72..0000000
--- a/debian/patches/0052-CVE-2023-27522-HTTP-Response-Smuggling-mod_proxy_uws.patch
+++ /dev/null
@@ -1,120 +0,0 @@
-From: Eric Covener
-Date: Sun, 5 Mar 2023 20:22:52 +0000
-Subject: CVE-2023-27522: HTTP Response Smuggling mod_proxy_uwsgi
-
-HTTP Response Smuggling vulnerability in Apache HTTP Server via mod_proxy_uwsgi.
-This issue affects Apache HTTP Server: from 2.4.30 through 2.4.55.
-Special characters in the origin response header can truncate/split the response forwarded to the client.
-
-mod_proxy_uwsgi: Stricter backend HTTP response parsing/validation
-
-Reviewed By: ylavic, covener, gbechis, rpluem
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1908094 13f79535-47bb-0310-9956-ffa450edef68
-origin: https://github.com/apache/httpd/commit/d753ea76b5972a85349b68c31b59d04c60014f2d.patch
-bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1032476
-bug-debian-security: https://security-tracker.debian.org/tracker/CVE-2023-27522
-bug-cve: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-27522
----
- .../proxy_uwsgi_response_validation.txt | 2 +
- modules/proxy/mod_proxy_uwsgi.c | 49 +++++++++++++++-------
- 2 files changed, 37 insertions(+), 14 deletions(-)
- create mode 100644 changes-entries/proxy_uwsgi_response_validation.txt
-
-diff --git a/changes-entries/proxy_uwsgi_response_validation.txt b/changes-entries/proxy_uwsgi_response_validation.txt
-new file mode 100644
-index 0000000..2cdb6c6
---- /dev/null
-+++ b/changes-entries/proxy_uwsgi_response_validation.txt
-@@ -0,0 +1,2 @@
-+ *) mod_proxy_uwsgi: Stricter backend HTTP response parsing/validation.
-+ [Yann Ylavic]
-diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c
-index ebe16e8..9ba10b9 100644
---- a/modules/proxy/mod_proxy_uwsgi.c
-+++ b/modules/proxy/mod_proxy_uwsgi.c
-@@ -303,18 +303,16 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
- pass_bb = apr_brigade_create(r->pool, c->bucket_alloc);
-
- len = ap_getline(buffer, sizeof(buffer), rp, 1);
--
- if (len <= 0) {
-- /* oops */
-+ /* invalid or empty */
- return HTTP_INTERNAL_SERVER_ERROR;
- }
--
- backend->worker->s->read += len;
--
-- if (len >= sizeof(buffer) - 1) {
-- /* oops */
-+ if ((apr_size_t)len >= sizeof(buffer)) {
-+ /* too long */
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-+
- /* Position of http status code */
- if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
- status_start = 9;
-@@ -323,8 +321,8 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
- status_start = 7;
- }
- else {
-- /* oops */
-- return HTTP_INTERNAL_SERVER_ERROR;
-+ /* not HTTP */
-+ return HTTP_BAD_GATEWAY;
- }
- status_end = status_start + 3;
-
-@@ -344,21 +342,44 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
- }
- r->status_line = apr_pstrdup(r->pool, &buffer[status_start]);
-
-- /* start parsing headers */
-+ /* parse headers */
- while ((len = ap_getline(buffer, sizeof(buffer), rp, 1)) > 0) {
-+ if ((apr_size_t)len >= sizeof(buffer)) {
-+ /* too long */
-+ len = -1;
-+ break;
-+ }
- value = strchr(buffer, ':');
-- /* invalid header skip */
-- if (!value)
-- continue;
-- *value = '\0';
-- ++value;
-+ if (!value) {
-+ /* invalid header */
-+ len = -1;
-+ break;
-+ }
-+ *value++ = '\0';
-+ if (*ap_scan_http_token(buffer)) {
-+ /* invalid name */
-+ len = -1;
-+ break;
-+ }
- while (apr_isspace(*value))
- ++value;
- for (end = &value[strlen(value) - 1];
- end > value && apr_isspace(*end); --end)
- *end = '\0';
-+ if (*ap_scan_http_field_content(value)) {
-+ /* invalid value */
-+ len = -1;
-+ break;
-+ }
- apr_table_add(r->headers_out, buffer, value);
- }
-+ if (len < 0) {
-+ /* Reset headers, but not to NULL because things below the chain expect
-+ * this to be non NULL e.g. the ap_content_length_filter.
-+ */
-+ r->headers_out = apr_table_make(r->pool, 1);
-+ return HTTP_BAD_GATEWAY;
-+ }
-
- if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
- ap_set_content_type(r, apr_pstrdup(r->pool, buf));
diff --git a/debian/patches/0053-CVE-2023-25690-1.patch b/debian/patches/0053-CVE-2023-25690-1.patch
deleted file mode 100644
index a7370c7..0000000
--- a/debian/patches/0053-CVE-2023-25690-1.patch
+++ /dev/null
@@ -1,170 +0,0 @@
-From 8789f6bb926fa4c33b4231a8444340515c82bdff Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Sun, 5 Mar 2023 20:28:43 +0000
-Subject: [PATCH] [1/2] Fix CVE-2023-25690: HTTP Request Smuggling in mod_proxy*
-
- don't forward invalid query strings
-
- Submitted by: rpluem
-
-Reviewed By: covener, fielding, rpluem, gbechis
-bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2023-25690
-bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1032476
-bug-debian-security: https://security-tracker.debian.org/tracker/CVE-2023-25690
-origin: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1908096 13f79535-47bb-0310-9956-ffa450edef68
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1908096 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/mod_proxy_http2.c | 10 ++++++++++
- modules/mappers/mod_rewrite.c | 22 ++++++++++++++++++++++
- modules/proxy/mod_proxy_ajp.c | 10 ++++++++++
- modules/proxy/mod_proxy_balancer.c | 10 ++++++++++
- modules/proxy/mod_proxy_http.c | 10 ++++++++++
- modules/proxy/mod_proxy_wstunnel.c | 10 ++++++++++
- 6 files changed, 72 insertions(+)
-
-diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
-index 3faf03472bb..aa299b937a5 100644
---- a/modules/http2/mod_proxy_http2.c
-+++ b/modules/http2/mod_proxy_http2.c
-@@ -158,6 +158,16 @@ static int proxy_http2_canon(request_rec *r, char *url)
- path = ap_proxy_canonenc(r->pool, url, (int)strlen(url),
- enc_path, 0, r->proxyreq);
- search = r->args;
-+ if (search && *(ap_scan_vchar_obstext(search))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO()
-+ "To be forwarded query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
- }
- break;
- case PROXYREQ_PROXY:
-diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
-index 943996560e5..f6398f19386 100644
---- a/modules/mappers/mod_rewrite.c
-+++ b/modules/mappers/mod_rewrite.c
-@@ -4729,6 +4729,17 @@ static int hook_uri2file(request_rec *r)
- unsigned skip;
- apr_size_t flen;
-
-+ if (r->args && *(ap_scan_vchar_obstext(r->args))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10410)
-+ "Rewritten query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
-+
- if (ACTION_STATUS == rulestatus) {
- int n = r->status;
-
-@@ -5013,6 +5024,17 @@ static int hook_fixup(request_rec *r)
- if (rulestatus) {
- unsigned skip;
-
-+ if (r->args && *(ap_scan_vchar_obstext(r->args))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10411)
-+ "Rewritten query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
-+
- if (ACTION_STATUS == rulestatus) {
- int n = r->status;
-
-diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
-index 1449acad733..e46bd903a36 100644
---- a/modules/proxy/mod_proxy_ajp.c
-+++ b/modules/proxy/mod_proxy_ajp.c
-@@ -69,6 +69,16 @@ static int proxy_ajp_canon(request_rec *r, char *url)
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
- search = r->args;
-+ if (search && *(ap_scan_vchar_obstext(search))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10406)
-+ "To be forwarded query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
- }
- if (path == NULL)
- return HTTP_BAD_REQUEST;
-diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c
-index f6fb6345ae3..7f990084336 100644
---- a/modules/proxy/mod_proxy_balancer.c
-+++ b/modules/proxy/mod_proxy_balancer.c
-@@ -106,6 +106,16 @@ static int proxy_balancer_canon(request_rec *r, char *url)
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
- search = r->args;
-+ if (search && *(ap_scan_vchar_obstext(search))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10407)
-+ "To be forwarded query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
- }
- if (path == NULL)
- return HTTP_BAD_REQUEST;
-diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
-index ec4e7fb06b5..51d19a0a21b 100644
---- a/modules/proxy/mod_proxy_http.c
-+++ b/modules/proxy/mod_proxy_http.c
-@@ -125,6 +125,16 @@ static int proxy_http_canon(request_rec *r, char *url)
- path = ap_proxy_canonenc(r->pool, url, strlen(url),
- enc_path, 0, r->proxyreq);
- search = r->args;
-+ if (search && *(ap_scan_vchar_obstext(search))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10408)
-+ "To be forwarded query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
- }
- break;
- case PROXYREQ_PROXY:
-diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c
-index bcbba42f9a4..88f86a49dbb 100644
---- a/modules/proxy/mod_proxy_wstunnel.c
-+++ b/modules/proxy/mod_proxy_wstunnel.c
-@@ -114,6 +114,16 @@ static int proxy_wstunnel_canon(request_rec *r, char *url)
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
- search = r->args;
-+ if (search && *(ap_scan_vchar_obstext(search))) {
-+ /*
-+ * We have a raw control character or a ' ' in r->args.
-+ * Correct encoding was missed.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10409)
-+ "To be forwarded query string contains control "
-+ "characters or spaces");
-+ return HTTP_FORBIDDEN;
-+ }
- }
- if (path == NULL)
- return HTTP_BAD_REQUEST;
-
diff --git a/debian/patches/0054-CVE-2023-25690-2.patch b/debian/patches/0054-CVE-2023-25690-2.patch
deleted file mode 100644
index 978be78..0000000
--- a/debian/patches/0054-CVE-2023-25690-2.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 8b93a6512f14f5f68887ddfe677e91233ed79fb0 Mon Sep 17 00:00:00 2001
-From: Ruediger Pluem
-Date: Mon, 6 Mar 2023 10:00:09 +0000
-Subject: [PATCH] [2/2] Fix CVE-2023-25690: HTTP Request Smuggling in mod_proxy*
-
-* modules/http2/mod_proxy_http2.c: Fix missing APLOGNO.
-
-Submitted by: jorton
-Reviewed by: rpluem
-
-Note: mod_proxy_http2 is CTR on 2.4.x.
-
-bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2023-25690
-bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1032476
-bug-debian-security: https://security-tracker.debian.org/tracker/CVE-2023-25690
-origin: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1908118 13f79535-47bb-0310-9956-ffa450edef68
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1908118 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/mod_proxy_http2.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
-index aa299b937a5..2a9967e5d57 100644
---- a/modules/http2/mod_proxy_http2.c
-+++ b/modules/http2/mod_proxy_http2.c
-@@ -163,7 +163,7 @@ static int proxy_http2_canon(request_rec *r, char *url)
- * We have a raw control character or a ' ' in r->args.
- * Correct encoding was missed.
- */
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO()
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10412)
- "To be forwarded query string contains control "
- "characters or spaces");
- return HTTP_FORBIDDEN;
-
diff --git a/debian/patches/0055-CVE-2023-25690-Regression-1.patch b/debian/patches/0055-CVE-2023-25690-Regression-1.patch
deleted file mode 100644
index d57a71c..0000000
--- a/debian/patches/0055-CVE-2023-25690-Regression-1.patch
+++ /dev/null
@@ -1,131 +0,0 @@
-From 815cf05bb2d506f44a35b65e93de393d5410c779 Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Tue, 1 Mar 2022 13:26:03 +0000
-Subject: [PATCH] mod_rewrite: URI-to-filename rewrites to transparently handle
- proxy mappings.
-
-Since mod_rewrite works on r->filename and mod_proxy's mapping=servlet|decoded
-sets its "proxy:" URL there at pre_translate_name stage (i.e. before
-mod_rewrite's translate_name hook), users have to match the full proxy URL in
-their RewriteRules to handle proxy mappings, which is not very friendly nor
-consistent with how proxy non-mapping requests have to be matched.
-
-Let's use r->filename = r->uri in hook_uri2file() for pre_trans'ed reverse
-proxy requests, and restore r->filename to its original value if the request
-was finally DECLINED (like in hook_fixup).
-
-But if a proxy mapping gets rewritten to a non-proxy request, clear any
-proxy specific r->proxyreq or r->handler so that processing continues
-accordingly.
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1898509 13f79535-47bb-0310-9956-ffa450edef68
----
- changes-entries/rewrite_vs_proxy_mapping.txt | 2 ++
- modules/mappers/mod_rewrite.c | 38 +++++++++++++++-----
- 2 files changed, 32 insertions(+), 8 deletions(-)
- create mode 100644 changes-entries/rewrite_vs_proxy_mapping.txt
-
-Index: apache2/changes-entries/rewrite_vs_proxy_mapping.txt
-===================================================================
---- /dev/null
-+++ apache2/changes-entries/rewrite_vs_proxy_mapping.txt
-@@ -0,0 +1,2 @@
-+ *) mod_rewrite: Make URI-to-filename rewrites work transparently with
-+ proxy early mappings (mapping=servlet/decoded). [Yann Ylavic]
-\ No newline at end of file
-Index: apache2/modules/mappers/mod_rewrite.c
-===================================================================
---- apache2.orig/modules/mappers/mod_rewrite.c
-+++ apache2/modules/mappers/mod_rewrite.c
-@@ -4575,6 +4575,7 @@ static int hook_uri2file(request_rec *r)
- unsigned int port;
- int rulestatus;
- void *skipdata;
-+ char *ofilename;
- const char *oargs;
-
- /*
-@@ -4628,7 +4629,10 @@ static int hook_uri2file(request_rec *r)
- /*
- * remember the original query string for later check, since we don't
- * want to apply URL-escaping when no substitution has changed it.
-+ * also, we'll restore original r->filename if we decline this
-+ * request.
- */
-+ ofilename = r->filename;
- oargs = r->args;
-
- /*
-@@ -4671,13 +4675,15 @@ static int hook_uri2file(request_rec *r)
- apr_table_setn(r->subprocess_env, ENVVAR_SCRIPT_URI, var);
-
- if (!(saved_rulestatus = apr_table_get(r->notes,"mod_rewrite_rewritten"))) {
-- /* if filename was not initially set,
-- * we start with the requested URI
-+ /* If r->filename was not initially set or if it's a pre_trans reverse
-+ * "proxy:" scheme, we start with the requested URI.
- */
-- if (r->filename == NULL) {
-+ if (r->filename == NULL || (r->proxyreq == PROXYREQ_REVERSE &&
-+ strncmp(r->filename, "proxy:", 6) == 0)) {
- r->filename = apr_pstrdup(r->pool, r->uri);
-- rewritelog((r, 2, NULL, "init rewrite engine with requested uri %s",
-- r->filename));
-+ rewritelog((r, 2, NULL, "init rewrite engine with requested uri "
-+ "%s. Original filename = %s", r->filename,
-+ ((ofilename) ? ofilename : "n/a")));
- }
- else {
- rewritelog((r, 2, NULL, "init rewrite engine with passed filename "
-@@ -4701,6 +4707,7 @@ static int hook_uri2file(request_rec *r)
- if (rulestatus) {
- unsigned skip;
- apr_size_t flen;
-+ int to_proxyreq;
-
- if (r->args && *(ap_scan_vchar_obstext(r->args))) {
- /*
-@@ -4721,7 +4728,19 @@ static int hook_uri2file(request_rec *r)
- }
-
- flen = r->filename ? strlen(r->filename) : 0;
-- if (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0) {
-+ to_proxyreq = (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0);
-+
-+ /* If a pre_trans reverse "proxy:" filename gets rewritten to
-+ * a non-proxy one this is not a proxy request anymore.
-+ */
-+ if (r->proxyreq == PROXYREQ_REVERSE && !to_proxyreq) {
-+ if (r->handler && strcmp(r->handler, "proxy-server") == 0) {
-+ r->handler = NULL;
-+ }
-+ r->proxyreq = PROXYREQ_NONE;
-+ }
-+
-+ if (to_proxyreq) {
- /* it should be go on as an internal proxy request */
-
- /* check if the proxy module is enabled, so
-@@ -4888,7 +4907,9 @@ static int hook_uri2file(request_rec *r)
- }
- }
- else {
-- rewritelog((r, 1, NULL, "pass through %s", r->filename));
-+ rewritelog((r, 1, NULL, "pass through %s, filename %s",
-+ r->filename, ((ofilename) ? ofilename : "n/a")));
-+ r->filename = ofilename;
- return DECLINED;
- }
- }
-@@ -5234,7 +5255,8 @@ static int hook_fixup(request_rec *r)
- }
- }
- else {
-- rewritelog((r, 1, dconf->directory, "pass through %s", r->filename));
-+ rewritelog((r, 1, dconf->directory, "pass through %s, filename %s",
-+ r->filename, ((ofilename) ? ofilename : "n/a")));
- r->filename = ofilename;
- return DECLINED;
- }
diff --git a/debian/patches/0056-CVE-2023-25690-Regression-2.patch b/debian/patches/0056-CVE-2023-25690-Regression-2.patch
deleted file mode 100644
index 55eaa6b..0000000
--- a/debian/patches/0056-CVE-2023-25690-Regression-2.patch
+++ /dev/null
@@ -1,138 +0,0 @@
-From 07b802c934b841d376733a3e2ecfa55f6b0ee994 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Sat, 11 Mar 2023 20:57:52 +0000
-Subject: [PATCH] allow decoded chars when they will be escaped
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1908296 13f79535-47bb-0310-9956-ffa450edef68
----
- changes-entries/rewrite-escape.diff | 3 ++
- modules/mappers/mod_rewrite.c | 45 +++++++++++++++++------------
- 2 files changed, 30 insertions(+), 18 deletions(-)
- create mode 100644 changes-entries/rewrite-escape.diff
-
-Index: apache2/changes-entries/rewrite-escape.diff
-===================================================================
---- /dev/null
-+++ apache2/changes-entries/rewrite-escape.diff
-@@ -0,0 +1,3 @@
-+ *) mod_rewrite: Re-allow some proxy and redirect substitutions flagged as
-+ 403 errors in 2.4.56. [Eric Covener]
-+
-Index: apache2/modules/mappers/mod_rewrite.c
-===================================================================
---- apache2.orig/modules/mappers/mod_rewrite.c
-+++ apache2/modules/mappers/mod_rewrite.c
-@@ -4705,14 +4705,19 @@ static int hook_uri2file(request_rec *r)
- }
-
- if (rulestatus) {
-- unsigned skip;
-- apr_size_t flen;
-- int to_proxyreq;
--
-- if (r->args && *(ap_scan_vchar_obstext(r->args))) {
-+ unsigned skip_absolute = is_absolute_uri(r->filename, NULL);
-+ apr_size_t flen = r->filename ? strlen(r->filename) : 0;
-+ int to_proxyreq = (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0);
-+ int will_escape = (to_proxyreq || skip_absolute)
-+ && (rulestatus != ACTION_NOESCAPE);
-+
-+ if (r->args
-+ && !will_escape
-+ && *(ap_scan_vchar_obstext(r->args))) {
- /*
- * We have a raw control character or a ' ' in r->args.
-- * Correct encoding was missed.
-+ * Correct encoding was missed and we're not going to escape
-+ * it before returning.
- */
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10410)
- "Rewritten query string contains control "
-@@ -4727,9 +4732,6 @@ static int hook_uri2file(request_rec *r)
- return n;
- }
-
-- flen = r->filename ? strlen(r->filename) : 0;
-- to_proxyreq = (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0);
--
- /* If a pre_trans reverse "proxy:" filename gets rewritten to
- * a non-proxy one this is not a proxy request anymore.
- */
-@@ -4782,15 +4784,15 @@ static int hook_uri2file(request_rec *r)
- r->filename));
- return OK;
- }
-- else if ((skip = is_absolute_uri(r->filename, NULL)) > 0) {
-+ else if (skip_absolute > 0) {
- int n;
-
- /* it was finally rewritten to a remote URL */
-
- if (rulestatus != ACTION_NOESCAPE) {
- rewritelog((r, 1, NULL, "escaping %s for redirect",
-- r->filename));
-- r->filename = escape_absolute_uri(r->pool, r->filename, skip);
-+ r->filename));
-+ r->filename = escape_absolute_uri(r->pool, r->filename, skip_absolute);
- }
-
- /* append the QUERY_STRING part */
-@@ -5016,9 +5018,17 @@ static int hook_fixup(request_rec *r)
- */
- rulestatus = apply_rewrite_list(r, dconf->rewriterules, dconf->directory);
- if (rulestatus) {
-- unsigned skip;
-+ unsigned skip_absolute = is_absolute_uri(r->filename, NULL);
-+ int to_proxyreq = 0;
-+ int will_escape = 0;
-
-- if (r->args && *(ap_scan_vchar_obstext(r->args))) {
-+ l = strlen(r->filename);
-+ to_proxyreq = l > 6 && strncmp(r->filename, "proxy:", 6) == 0;
-+ will_escape = skip_absolute && (rulestatus != ACTION_NOESCAPE);
-+
-+ if (r->args
-+ && !will_escape
-+ && *(ap_scan_vchar_obstext(r->args))) {
- /*
- * We have a raw control character or a ' ' in r->args.
- * Correct encoding was missed.
-@@ -5036,8 +5046,7 @@ static int hook_fixup(request_rec *r)
- return n;
- }
-
-- l = strlen(r->filename);
-- if (l > 6 && strncmp(r->filename, "proxy:", 6) == 0) {
-+ if (to_proxyreq) {
- /* it should go on as an internal proxy request */
-
- /* make sure the QUERY_STRING and
-@@ -5061,7 +5070,7 @@ static int hook_fixup(request_rec *r)
- "%s [OK]", r->filename));
- return OK;
- }
-- else if ((skip = is_absolute_uri(r->filename, NULL)) > 0) {
-+ else if (skip_absolute > 0) {
- /* it was finally rewritten to a remote URL */
-
- /* because we are in a per-dir context
-@@ -5070,7 +5079,7 @@ static int hook_fixup(request_rec *r)
- */
- if (dconf->baseurl != NULL) {
- /* skip 'scheme://' */
-- cp = r->filename + skip;
-+ cp = r->filename + skip_absolute;
-
- if ((cp = ap_strchr(cp, '/')) != NULL && *(++cp)) {
- rewritelog((r, 2, dconf->directory,
-@@ -5114,8 +5123,8 @@ static int hook_fixup(request_rec *r)
- /* now prepare the redirect... */
- if (rulestatus != ACTION_NOESCAPE) {
- rewritelog((r, 1, dconf->directory, "escaping %s for redirect",
-- r->filename));
-- r->filename = escape_absolute_uri(r->pool, r->filename, skip);
-+ r->filename));
-+ r->filename = escape_absolute_uri(r->pool, r->filename, skip_absolute);
- }
-
- /* append the QUERY_STRING part */
diff --git a/debian/patches/0057-CVE-2023-25690-Regression-3.patch b/debian/patches/0057-CVE-2023-25690-Regression-3.patch
deleted file mode 100644
index 431f145..0000000
--- a/debian/patches/0057-CVE-2023-25690-Regression-3.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-From 1a4aac3d209f4314bcb511d73cf12f8c25c8c984 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Sat, 11 Mar 2023 21:29:11 +0000
-Subject: [PATCH] followup to r1908296: only for redirects
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1908299 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/mappers/mod_rewrite.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-Index: apache2/modules/mappers/mod_rewrite.c
-===================================================================
---- apache2.orig/modules/mappers/mod_rewrite.c
-+++ apache2/modules/mappers/mod_rewrite.c
-@@ -4708,8 +4708,7 @@ static int hook_uri2file(request_rec *r)
- unsigned skip_absolute = is_absolute_uri(r->filename, NULL);
- apr_size_t flen = r->filename ? strlen(r->filename) : 0;
- int to_proxyreq = (flen > 6 && strncmp(r->filename, "proxy:", 6) == 0);
-- int will_escape = (to_proxyreq || skip_absolute)
-- && (rulestatus != ACTION_NOESCAPE);
-+ int will_escape = skip_absolute && (rulestatus != ACTION_NOESCAPE);
-
- if (r->args
- && !will_escape
diff --git a/debian/patches/CVE-2006-20001.patch b/debian/patches/CVE-2006-20001.patch
deleted file mode 100644
index 0ba150b..0000000
--- a/debian/patches/CVE-2006-20001.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 7469547c3f617717ca545d0f7c56d01134703813 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Tue, 10 Jan 2023 13:21:48 +0000
-Subject: [PATCH] Merge r1906487 from trunk:
-
-* modules/dav/main/util.c (dav_process_if_header): Fix error
- path for "Not" prefix parsing.
-
-
-Submitted By: jorton
-Reviewed By: jorton, covener, rpluem
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1906543 13f79535-47bb-0310-9956-ffa450edef68
----
- STATUS | 8 --------
- modules/dav/main/util.c | 8 +++++++-
- 2 files changed, 7 insertions(+), 9 deletions(-)
-
---- a/modules/dav/main/util.c
-+++ b/modules/dav/main/util.c
-@@ -746,8 +746,14 @@
- "for the same state.");
- }
- condition = DAV_IF_COND_NOT;
-+ list += 2;
-+ }
-+ else {
-+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
-+ DAV_ERR_IF_UNK_CHAR, 0,
-+ "Invalid \"If:\" header: "
-+ "Unexpected character in List");
- }
-- list += 2;
- break;
-
- case ' ':
diff --git a/debian/patches/CVE-2019-0196.patch b/debian/patches/CVE-2019-0196.patch
deleted file mode 100644
index eaec989..0000000
--- a/debian/patches/CVE-2019-0196.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 8de3c6f2a0df79d1476c89ec480a96f9282cea28 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Tue, 5 Feb 2019 11:52:28 +0000
-Subject: [PATCH] Merge of r1852986 from trunk:
-
-mod_http2: disentangelment of stream and request method.
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1852989 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/h2_request.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
-index 8899c4feb75..5ee88e9679f 100644
---- a/modules/http2/h2_request.c
-+++ b/modules/http2/h2_request.c
-@@ -266,7 +266,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
-
- /* Time to populate r with the data we have. */
- r->request_time = req->request_time;
-- r->method = req->method;
-+ r->method = apr_pstrdup(r->pool, req->method);
- /* Provide quick information about the request method as soon as known */
- r->method_number = ap_method_number_of(r->method);
- if (r->method_number == M_GET && r->method[0] == 'H') {
diff --git a/debian/patches/CVE-2019-0197.patch b/debian/patches/CVE-2019-0197.patch
deleted file mode 100644
index 92d2943..0000000
--- a/debian/patches/CVE-2019-0197.patch
+++ /dev/null
@@ -1,93 +0,0 @@
-# https://svn.apache.org/r1855406
---- apache2.orig/modules/http2/h2_conn.c
-+++ apache2/modules/http2/h2_conn.c
-@@ -305,6 +305,10 @@ conn_rec *h2_slave_create(conn_rec *mast
- c->notes = apr_table_make(pool, 5);
- c->input_filters = NULL;
- c->output_filters = NULL;
-+ c->keepalives = 0;
-+#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
-+ c->filter_conn_ctx = NULL;
-+#endif
- c->bucket_alloc = apr_bucket_alloc_create(pool);
- c->data_in_input_filters = 0;
- c->data_in_output_filters = 0;
-@@ -332,16 +336,15 @@ conn_rec *h2_slave_create(conn_rec *mast
- ap_set_module_config(c->conn_config, mpm, cfg);
- }
-
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-- "h2_stream(%ld-%d): created slave", master->id, slave_id);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
-+ "h2_slave(%s): created", c->log_id);
- return c;
- }
-
- void h2_slave_destroy(conn_rec *slave)
- {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
-- "h2_stream(%s): destroy slave",
-- apr_table_get(slave->notes, H2_TASK_ID_NOTE));
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
-+ "h2_slave(%s): destroy", slave->log_id);
- slave->sbh = NULL;
- apr_pool_destroy(slave->pool);
- }
-@@ -365,6 +368,7 @@ apr_status_t h2_slave_run_pre_connection
- slave->keepalive = AP_CONN_CLOSE;
- return ap_run_pre_connection(slave, csd);
- }
-+ ap_assert(slave->output_filters);
- return APR_SUCCESS;
- }
-
---- apache2.orig/modules/http2/h2_mplx.c
-+++ apache2/modules/http2/h2_mplx.c
-@@ -327,7 +327,8 @@ static int stream_destroy_iter(void *ctx
- && !task->rst_error);
- }
-
-- if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
-+ task->c = NULL;
-+ if (reuse_slave) {
- h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
- APLOGNO(03385) "h2_task_destroy, reuse slave");
- h2_task_destroy(task);
-@@ -437,6 +438,8 @@ void h2_mplx_release_and_join(h2_mplx *m
- apr_status_t status;
- int i, wait_secs = 60;
-
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ "h2_mplx(%ld): start release", m->id);
- /* How to shut down a h2 connection:
- * 0. abort and tell the workers that no more tasks will come from us */
- m->aborted = 1;
-@@ -977,6 +980,9 @@ static apr_status_t unschedule_slow_task
- */
- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ "h2_mplx(%s): unschedule, resetting task for redo later",
-+ stream->task->id);
- h2_task_rst(stream->task, H2_ERR_CANCEL);
- h2_ihash_add(m->sredo, stream);
- --n;
---- apache2.orig/modules/http2/h2_task.c
-+++ apache2/modules/http2/h2_task.c
-@@ -504,7 +504,7 @@ static int h2_task_pre_conn(conn_rec* c,
- (void)arg;
- if (h2_ctx_is_task(ctx)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-- "h2_h2, pre_connection, found stream task");
-+ "h2_slave(%s), pre_connection, adding filters", c->log_id);
- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
- ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
- ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
-@@ -545,7 +545,6 @@ h2_task *h2_task_create(conn_rec *slave,
- void h2_task_destroy(h2_task *task)
- {
- if (task->output.beam) {
-- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
- h2_beam_destroy(task->output.beam);
- task->output.beam = NULL;
- }
diff --git a/debian/patches/CVE-2019-0211.patch b/debian/patches/CVE-2019-0211.patch
deleted file mode 100644
index 1b69f45..0000000
--- a/debian/patches/CVE-2019-0211.patch
+++ /dev/null
@@ -1,249 +0,0 @@
-From df7edb5ddae609ea1fd4285f7439f0d590d97b37 Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Wed, 13 Mar 2019 08:59:54 +0000
-Subject: [PATCH] Merge r1855306 from trunk:
-
-MPMs unix: bind the bucket number of each child to its slot number
-
-We need not remember each child's bucket number in SHM for restarts, for the
-lifetime of the httpd main process the bucket number can be bound to the slot
-number such that: bucket = slot % num_buckets.
-
-This both simplifies the logic and helps children maintenance per bucket in
-threaded MPMs, where previously perform_idle_server_maintenance() could create
-or kill children processes for the buckets it was not in charge of.
-
-Submitted by: ylavic
-Reviewed by: ylavic, rpluem, jorton
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855378 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 3 +++
- include/scoreboard.h | 4 +++-
- server/mpm/event/event.c | 13 ++++++++-----
- server/mpm/prefork/prefork.c | 19 +++++++------------
- server/mpm/worker/worker.c | 10 ++++++----
- 5 files changed, 27 insertions(+), 22 deletions(-)
-
-#diff --git a/CHANGES b/CHANGES
-#index e79251389d5..6b560802119 100644
-#--- a/CHANGES
-#+++ b/CHANGES
-#@@ -1,6 +1,9 @@
-# -*- coding: utf-8 -*-
-# Changes with Apache 2.4.39
-#
-#+ *) MPMs unix: bind the bucket number of each child to its slot number, for a
-#+ more efficient per bucket maintenance. [Yann Ylavic]
-#+
-# *) mod_auth_digest: Fix a race condition. Authentication with valid
-# credentials could be refused in case of concurrent accesses from
-# different users. PR 63124. [Simon Kappel ]
-diff --git a/include/scoreboard.h b/include/scoreboard.h
-index 9376da246b0..92d198d6de1 100644
---- a/include/scoreboard.h
-+++ b/include/scoreboard.h
-@@ -148,7 +148,9 @@ struct process_score {
- apr_uint32_t lingering_close; /* async connections in lingering close */
- apr_uint32_t keep_alive; /* async connections in keep alive */
- apr_uint32_t suspended; /* connections suspended by some module */
-- int bucket; /* Listener bucket used by this child */
-+ int bucket; /* Listener bucket used by this child; this field is DEPRECATED
-+ * and no longer updated by the MPMs (i.e. always zero).
-+ */
- };
-
- /* Scoreboard is now in 'local' memory, since it isn't updated once created,
-diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
-index 4cfb09c5b28..5e5af339adc 100644
---- a/server/mpm/event/event.c
-+++ b/server/mpm/event/event.c
-@@ -2696,7 +2696,6 @@ static int make_child(server_rec * s, int slot, int bucket)
-
- ap_scoreboard_image->parent[slot].quiescing = 0;
- ap_scoreboard_image->parent[slot].not_accepting = 0;
-- ap_scoreboard_image->parent[slot].bucket = bucket;
- event_note_child_started(slot, pid);
- active_daemons++;
- retained->total_daemons++;
-@@ -2735,6 +2734,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- * that threads_per_child is always > 0 */
- int status = SERVER_DEAD;
- int child_threads_active = 0;
-+ int bucket = i % num_buckets;
-
- if (i >= retained->max_daemons_limit &&
- free_length == retained->idle_spawn_rate[child_bucket]) {
-@@ -2758,7 +2758,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- */
- if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
- && ps->generation == retained->mpm->my_generation
-- && ps->bucket == child_bucket)
-+ && bucket == child_bucket)
- {
- ++idle_thread_count;
- }
-@@ -2769,7 +2769,9 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- last_non_dead = i;
- }
- active_thread_count += child_threads_active;
-- if (!ps->pid && free_length < retained->idle_spawn_rate[child_bucket])
-+ if (!ps->pid
-+ && bucket == child_bucket
-+ && free_length < retained->idle_spawn_rate[child_bucket])
- free_slots[free_length++] = i;
- else if (child_threads_active == threads_per_child)
- had_healthy_child = 1;
-@@ -2962,13 +2964,14 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets)
- retained->total_daemons--;
- if (processed_status == APEXIT_CHILDSICK) {
- /* resource shortage, minimize the fork rate */
-- retained->idle_spawn_rate[ps->bucket] = 1;
-+ retained->idle_spawn_rate[child_slot % num_buckets] = 1;
- }
- else if (remaining_children_to_start) {
- /* we're still doing a 1-for-1 replacement of dead
- * children with new children
- */
-- make_child(ap_server_conf, child_slot, ps->bucket);
-+ make_child(ap_server_conf, child_slot,
-+ child_slot % num_buckets);
- --remaining_children_to_start;
- }
- }
-diff --git a/server/mpm/prefork/prefork.c b/server/mpm/prefork/prefork.c
-index 8efda72ee18..7c006257301 100644
---- a/server/mpm/prefork/prefork.c
-+++ b/server/mpm/prefork/prefork.c
-@@ -637,8 +637,9 @@ static void child_main(int child_num_arg, int child_bucket)
- }
-
-
--static int make_child(server_rec *s, int slot, int bucket)
-+static int make_child(server_rec *s, int slot)
- {
-+ int bucket = slot % retained->mpm->num_buckets;
- int pid;
-
- if (slot + 1 > retained->max_daemons_limit) {
-@@ -716,7 +717,6 @@ static int make_child(server_rec *s, int slot, int bucket)
- child_main(slot, bucket);
- }
-
-- ap_scoreboard_image->parent[slot].bucket = bucket;
- prefork_note_child_started(slot, pid);
-
- return 0;
-@@ -732,7 +732,7 @@ static void startup_children(int number_to_start)
- if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) {
- continue;
- }
-- if (make_child(ap_server_conf, i, i % retained->mpm->num_buckets) < 0) {
-+ if (make_child(ap_server_conf, i) < 0) {
- break;
- }
- --number_to_start;
-@@ -741,8 +741,6 @@ static void startup_children(int number_to_start)
-
- static void perform_idle_server_maintenance(apr_pool_t *p)
- {
-- static int bucket_make_child_record = -1;
-- static int bucket_kill_child_record = -1;
- int i;
- int idle_count;
- worker_score *ws;
-@@ -789,6 +787,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p)
- }
- retained->max_daemons_limit = last_non_dead + 1;
- if (idle_count > ap_daemons_max_free) {
-+ static int bucket_kill_child_record = -1;
- /* kill off one child... we use the pod because that'll cause it to
- * shut down gracefully, in case it happened to pick up a request
- * while we were counting
-@@ -819,10 +818,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p)
- idle_count, total_non_dead);
- }
- for (i = 0; i < free_length; ++i) {
-- bucket_make_child_record++;
-- bucket_make_child_record %= retained->mpm->num_buckets;
-- make_child(ap_server_conf, free_slots[i],
-- bucket_make_child_record);
-+ make_child(ap_server_conf, free_slots[i]);
- }
- /* the next time around we want to spawn twice as many if this
- * wasn't good enough, but not if we've just done a graceful
-@@ -867,7 +863,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
-
- if (one_process) {
- AP_MONCONTROL(1);
-- make_child(ap_server_conf, 0, 0);
-+ make_child(ap_server_conf, 0);
- /* NOTREACHED */
- ap_assert(0);
- return !OK;
-@@ -976,8 +972,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
- /* we're still doing a 1-for-1 replacement of dead
- * children with new children
- */
-- make_child(ap_server_conf, child_slot,
-- ap_get_scoreboard_process(child_slot)->bucket);
-+ make_child(ap_server_conf, child_slot);
- --remaining_children_to_start;
- }
- #if APR_HAS_OTHER_CHILD
-diff --git a/server/mpm/worker/worker.c b/server/mpm/worker/worker.c
-index 8012fe29d8d..a92794245c5 100644
---- a/server/mpm/worker/worker.c
-+++ b/server/mpm/worker/worker.c
-@@ -1339,7 +1339,6 @@ static int make_child(server_rec *s, int slot, int bucket)
- worker_note_child_lost_slot(slot, pid);
- }
- ap_scoreboard_image->parent[slot].quiescing = 0;
-- ap_scoreboard_image->parent[slot].bucket = bucket;
- worker_note_child_started(slot, pid);
- return 0;
- }
-@@ -1388,6 +1387,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- int any_dead_threads = 0;
- int all_dead_threads = 1;
- int child_threads_active = 0;
-+ int bucket = i % num_buckets;
-
- if (i >= retained->max_daemons_limit &&
- totally_free_length == retained->idle_spawn_rate[child_bucket]) {
-@@ -1420,7 +1420,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- if (status <= SERVER_READY &&
- !ps->quiescing &&
- ps->generation == retained->mpm->my_generation &&
-- ps->bucket == child_bucket) {
-+ bucket == child_bucket) {
- ++idle_thread_count;
- }
- if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
-@@ -1430,6 +1430,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- }
- active_thread_count += child_threads_active;
- if (any_dead_threads
-+ && bucket == child_bucket
- && totally_free_length < retained->idle_spawn_rate[child_bucket]
- && free_length < MAX_SPAWN_RATE / num_buckets
- && (!ps->pid /* no process in the slot */
-@@ -1615,14 +1616,15 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets)
- ps->quiescing = 0;
- if (processed_status == APEXIT_CHILDSICK) {
- /* resource shortage, minimize the fork rate */
-- retained->idle_spawn_rate[ps->bucket] = 1;
-+ retained->idle_spawn_rate[child_slot % num_buckets] = 1;
- }
- else if (remaining_children_to_start
- && child_slot < ap_daemons_limit) {
- /* we're still doing a 1-for-1 replacement of dead
- * children with new children
- */
-- make_child(ap_server_conf, child_slot, ps->bucket);
-+ make_child(ap_server_conf, child_slot,
-+ child_slot % num_buckets);
- --remaining_children_to_start;
- }
- }
diff --git a/debian/patches/CVE-2019-0215.patch b/debian/patches/CVE-2019-0215.patch
deleted file mode 100644
index 6c0461e..0000000
--- a/debian/patches/CVE-2019-0215.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 84edf5f49db23ced03259812bbf9426685f7d82a Mon Sep 17 00:00:00 2001
-From: Joe Orton
-Date: Wed, 20 Mar 2019 15:45:16 +0000
-Subject: [PATCH] Merge r1855849 from trunk:
-
-* modules/ssl/ssl_engine_kernel.c (ssl_hook_Access_modern): Correctly
- restore SSL verify state after PHA failure in TLSv1.3.
-
-Submitted by: Michael Kaufmann
-Reviewed by: jorton, covener, jim
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855917 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 3 +++
- modules/ssl/ssl_engine_kernel.c | 2 ++
- 2 files changed, 5 insertions(+)
-
-#diff --git a/CHANGES b/CHANGES
-#index 6b03eadfa07..6f20d688ece 100644
-#--- a/CHANGES
-#+++ b/CHANGES
-#@@ -1,6 +1,9 @@
-# -*- coding: utf-8 -*-
-# Changes with Apache 2.4.39
-#
-#+ *) mod_ssl: Correctly restore SSL verify state after TLSv1.3 PHA failure.
-#+ [Michael Kaufmann ]
-#+
-# *) mod_log_config: Support %{c}h for conn-hostname, %h for useragent_host
-# PR 55348
-#
-Index: apache2-2.4.38/modules/ssl/ssl_engine_kernel.c
-===================================================================
---- apache2-2.4.38.orig/modules/ssl/ssl_engine_kernel.c 2019-04-03 14:31:14.279214679 -0400
-+++ apache2-2.4.38/modules/ssl/ssl_engine_kernel.c 2019-04-03 14:31:14.279214679 -0400
-@@ -1154,6 +1154,7 @@ static int ssl_hook_Access_modern(reques
- ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server);
- apr_table_setn(r->notes, "error-notes",
- "Reason: Cannot perform Post-Handshake Authentication.
");
-+ SSL_set_verify(ssl, vmode_inplace, NULL);
- return HTTP_FORBIDDEN;
- }
-
-@@ -1175,6 +1176,7 @@ static int ssl_hook_Access_modern(reques
- * Finally check for acceptable renegotiation results
- */
- if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) {
-+ SSL_set_verify(ssl, vmode_inplace, NULL);
- return rc;
- }
- }
diff --git a/debian/patches/CVE-2019-0217.patch b/debian/patches/CVE-2019-0217.patch
deleted file mode 100644
index e8f1090..0000000
--- a/debian/patches/CVE-2019-0217.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-From 44b3ddc560c490c60600998fa2bf59b142d08e05 Mon Sep 17 00:00:00 2001
-From: Joe Orton
-Date: Tue, 12 Mar 2019 09:24:26 +0000
-Subject: [PATCH] Merge r1853190 from trunk:
-
-Fix a race condition. Authentication with valid credentials could be
-refused in case of concurrent accesses from different users.
-
-PR: 63124
-Submitted by: Simon Kappel
-Reviewed by: jailletc36, icing, jorton
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855298 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- modules/aaa/mod_auth_digest.c | 26 ++++++++++++--------------
- 2 files changed, 16 insertions(+), 14 deletions(-)
-
-#diff --git a/CHANGES b/CHANGES
-#index 08fc740db30..e79251389d5 100644
-#--- a/CHANGES
-#+++ b/CHANGES
-#@@ -1,6 +1,10 @@
-# -*- coding: utf-8 -*-
-# Changes with Apache 2.4.39
-#
-#+ *) mod_auth_digest: Fix a race condition. Authentication with valid
-#+ credentials could be refused in case of concurrent accesses from
-#+ different users. PR 63124. [Simon Kappel ]
-#+
-# *) mod_proxy_wstunnel: Fix websocket proxy over UDS.
-# PR 62932
-#
-diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c
-index a67f06986f2..b76094114dd 100644
---- a/modules/aaa/mod_auth_digest.c
-+++ b/modules/aaa/mod_auth_digest.c
-@@ -92,7 +92,6 @@ typedef struct digest_config_struct {
- int check_nc;
- const char *algorithm;
- char *uri_list;
-- const char *ha1;
- } digest_config_rec;
-
-
-@@ -153,6 +152,7 @@ typedef struct digest_header_struct {
- apr_time_t nonce_time;
- enum hdr_sts auth_hdr_sts;
- int needed_auth;
-+ const char *ha1;
- client_entry *client;
- } digest_header_rec;
-
-@@ -1304,7 +1304,7 @@ static int hook_note_digest_auth_failure(request_rec *r, const char *auth_type)
- */
-
- static authn_status get_hash(request_rec *r, const char *user,
-- digest_config_rec *conf)
-+ digest_config_rec *conf, const char **rethash)
- {
- authn_status auth_result;
- char *password;
-@@ -1356,7 +1356,7 @@ static authn_status get_hash(request_rec *r, const char *user,
- } while (current_provider);
-
- if (auth_result == AUTH_USER_FOUND) {
-- conf->ha1 = password;
-+ *rethash = password;
- }
-
- return auth_result;
-@@ -1483,25 +1483,24 @@ static int check_nonce(request_rec *r, digest_header_rec *resp,
-
- /* RFC-2069 */
- static const char *old_digest(const request_rec *r,
-- const digest_header_rec *resp, const char *ha1)
-+ const digest_header_rec *resp)
- {
- const char *ha2;
-
- ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":",
- resp->uri, NULL));
- return ap_md5(r->pool,
-- (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce,
-- ":", ha2, NULL));
-+ (unsigned char *)apr_pstrcat(r->pool, resp->ha1, ":",
-+ resp->nonce, ":", ha2, NULL));
- }
-
- /* RFC-2617 */
- static const char *new_digest(const request_rec *r,
-- digest_header_rec *resp,
-- const digest_config_rec *conf)
-+ digest_header_rec *resp)
- {
- const char *ha1, *ha2, *a2;
-
-- ha1 = conf->ha1;
-+ ha1 = resp->ha1;
-
- a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL);
- ha2 = ap_md5(r->pool, (const unsigned char *)a2);
-@@ -1514,7 +1513,6 @@ static const char *new_digest(const request_rec *r,
- NULL));
- }
-
--
- static void copy_uri_components(apr_uri_t *dst,
- apr_uri_t *src, request_rec *r) {
- if (src->scheme && src->scheme[0] != '\0') {
-@@ -1759,7 +1757,7 @@ static int authenticate_digest_user(request_rec *r)
- return HTTP_UNAUTHORIZED;
- }
-
-- return_code = get_hash(r, r->user, conf);
-+ return_code = get_hash(r, r->user, conf, &resp->ha1);
-
- if (return_code == AUTH_USER_NOT_FOUND) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01790)
-@@ -1789,7 +1787,7 @@ static int authenticate_digest_user(request_rec *r)
-
- if (resp->message_qop == NULL) {
- /* old (rfc-2069) style digest */
-- if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) {
-+ if (strcmp(resp->digest, old_digest(r, resp))) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01792)
- "user %s: password mismatch: %s", r->user,
- r->uri);
-@@ -1819,7 +1817,7 @@ static int authenticate_digest_user(request_rec *r)
- return HTTP_UNAUTHORIZED;
- }
-
-- exp_digest = new_digest(r, resp, conf);
-+ exp_digest = new_digest(r, resp);
- if (!exp_digest) {
- /* we failed to allocate a client struct */
- return HTTP_INTERNAL_SERVER_ERROR;
-@@ -1903,7 +1901,7 @@ static int add_auth_info(request_rec *r)
-
- /* calculate rspauth attribute
- */
-- ha1 = conf->ha1;
-+ ha1 = resp->ha1;
-
- a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL);
- ha2 = ap_md5(r->pool, (const unsigned char *)a2);
diff --git a/debian/patches/CVE-2019-0220-1.patch b/debian/patches/CVE-2019-0220-1.patch
deleted file mode 100644
index 021c369..0000000
--- a/debian/patches/CVE-2019-0220-1.patch
+++ /dev/null
@@ -1,278 +0,0 @@
-From 9bc1917a27a2323e535aadb081e38172ae0e3fc2 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Mon, 18 Mar 2019 08:49:59 +0000
-Subject: [PATCH] Merge of r1855705 from trunk:
-
-core: merge consecutive slashes in the path
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855737 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- docs/manual/mod/core.xml | 26 ++++++++++++++++++++++++++
- include/ap_mmn.h | 4 +++-
- include/http_core.h | 2 +-
- include/httpd.h | 14 ++++++++++++--
- server/core.c | 13 +++++++++++++
- server/request.c | 25 +++++++++----------------
- server/util.c | 10 +++++++---
- 8 files changed, 75 insertions(+), 23 deletions(-)
-
-#diff --git a/CHANGES b/CHANGES
-#index e3e8a98db24..9dd7045c232 100644
-#--- a/CHANGES
-#+++ b/CHANGES
-#@@ -1,6 +1,10 @@
-# -*- coding: utf-8 -*-
-# Changes with Apache 2.4.39
-#
-#+ *) core: new configuration option 'MergeSlashes on|off' that controls handling of
-#+ multiple, consecutive slash ('/') characters in the path component of the request URL.
-#+ [Eric Covener]
-#+
-# *) mod_http2: when SSL renegotiation is inhibited and a 403 ErrorDocument is
-# in play, the proper HTTP/2 stream reset did not trigger with H2_ERR_HTTP_1_1_REQUIRED.
-# Fixed. [Michael Kaufmann]
-#diff --git a/docs/manual/mod/core.xml b/docs/manual/mod/core.xml
-#index fc664116727..460b4367621 100644
-#--- a/docs/manual/mod/core.xml
-#+++ b/docs/manual/mod/core.xml
-#@@ -5138,4 +5138,30 @@ recognized methods to modules.
-# AllowMethods
-#
-#
-#+
-#+MergeSlashes
-#+Controls whether the server merges consecutive slashes in URLs.
-#+
-#+MergeSlashes ON|OFF
-#+MergeSlashes ON
-#+server configvirtual host
-#+
-#+Added in 2.5.1
-#+
-#+
-#+ By default, the server merges (or collapses) multiple consecutive slash
-#+ ('/') characters in the path component of the request URL.
-#+
-#+ When mapping URL's to the filesystem, these multiple slashes are not
-#+ significant. However, URL's handled other ways, such as by CGI or proxy,
-#+ might prefer to retain the significance of multiple consecutive slashes.
-#+ In these cases MergeSlashes can be set to
-#+ OFF to retain the multiple consecutive slashes. In these
-#+ configurations, regular expressions used in the configuration file that match
-#+ the path component of the URL (LocationMatch,
-#+ RewriteRule, ...) need to take into account multiple
-#+ consecutive slashes.
-#+
-#+
-#+
-#
-diff --git a/include/ap_mmn.h b/include/ap_mmn.h
-index 2167baa0325..4739f7f64d3 100644
---- a/include/ap_mmn.h
-+++ b/include/ap_mmn.h
-@@ -523,6 +523,8 @@
- * 20120211.82 (2.4.35-dev) Add optional function declaration for
- * ap_proxy_balancer_get_best_worker to mod_proxy.h.
- * 20120211.83 (2.4.35-dev) Add client64 field to worker_score struct
-+ * 20120211.84 (2.4.35-dev) Add ap_no2slash_ex() and merge_slashes to
-+ * core_server_conf.
- *
- */
-
-@@ -531,7 +533,7 @@
- #ifndef MODULE_MAGIC_NUMBER_MAJOR
- #define MODULE_MAGIC_NUMBER_MAJOR 20120211
- #endif
--#define MODULE_MAGIC_NUMBER_MINOR 83 /* 0...n */
-+#define MODULE_MAGIC_NUMBER_MINOR 84 /* 0...n */
-
- /**
- * Determine if the server's current MODULE_MAGIC_NUMBER is at least a
-diff --git a/include/http_core.h b/include/http_core.h
-index 35df5dc9601..8e109882244 100644
---- a/include/http_core.h
-+++ b/include/http_core.h
-@@ -740,7 +740,7 @@ typedef struct {
- #define AP_HTTP_METHODS_LENIENT 1
- #define AP_HTTP_METHODS_REGISTERED 2
- char http_methods;
--
-+ unsigned int merge_slashes;
- } core_server_config;
-
- /* for AddOutputFiltersByType in core.c */
-diff --git a/include/httpd.h b/include/httpd.h
-index 65392f83546..99f7f041aea 100644
---- a/include/httpd.h
-+++ b/include/httpd.h
-@@ -1697,11 +1697,21 @@ AP_DECLARE(int) ap_unescape_url_keep2f(char *url, int decode_slashes);
- AP_DECLARE(int) ap_unescape_urlencoded(char *query);
-
- /**
-- * Convert all double slashes to single slashes
-- * @param name The string to convert
-+ * Convert all double slashes to single slashes, except where significant
-+ * to the filesystem on the current platform.
-+ * @param name The string to convert, assumed to be a filesystem path
- */
- AP_DECLARE(void) ap_no2slash(char *name);
-
-+/**
-+ * Convert all double slashes to single slashes, except where significant
-+ * to the filesystem on the current platform.
-+ * @param name The string to convert
-+ * @param is_fs_path if set to 0, the significance of any double-slashes is
-+ * ignored.
-+ */
-+AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path);
-+
- /**
- * Remove all ./ and xx/../ substrings from a file name. Also remove
- * any leading ../ or /../ substrings.
-diff --git a/server/core.c b/server/core.c
-index e2a91c7a0c6..eacb54fecec 100644
---- a/server/core.c
-+++ b/server/core.c
-@@ -490,6 +490,7 @@ static void *create_core_server_config(apr_pool_t *a, server_rec *s)
-
- conf->protocols = apr_array_make(a, 5, sizeof(const char *));
- conf->protocols_honor_order = -1;
-+ conf->merge_slashes = AP_CORE_CONFIG_UNSET;
-
- return (void *)conf;
- }
-@@ -555,6 +556,7 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv)
- conf->protocols_honor_order = ((virt->protocols_honor_order < 0)?
- base->protocols_honor_order :
- virt->protocols_honor_order);
-+ AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt);
-
- return conf;
- }
-@@ -1863,6 +1865,13 @@ static const char *set_qualify_redirect_url(cmd_parms *cmd, void *d_, int flag)
- return NULL;
- }
-
-+static const char *set_core_server_flag(cmd_parms *cmd, void *s_, int flag)
-+{
-+ core_server_config *conf =
-+ ap_get_core_module_config(cmd->server->module_config);
-+ return ap_set_flag_slot(cmd, conf, flag);
-+}
-+
- static const char *set_override_list(cmd_parms *cmd, void *d_, int argc, char *const argv[])
- {
- core_dir_config *d = d_;
-@@ -4562,6 +4571,10 @@ AP_INIT_ITERATE("HttpProtocolOptions", set_http_protocol_options, NULL, RSRC_CON
- "'Unsafe' or 'Strict' (default). Sets HTTP acceptance rules"),
- AP_INIT_ITERATE("RegisterHttpMethod", set_http_method, NULL, RSRC_CONF,
- "Registers non-standard HTTP methods"),
-+AP_INIT_FLAG("MergeSlashes", set_core_server_flag,
-+ (void *)APR_OFFSETOF(core_server_config, merge_slashes),
-+ RSRC_CONF,
-+ "Controls whether consecutive slashes in the URI path are merged"),
- { NULL }
- };
-
-diff --git a/server/request.c b/server/request.c
-index dbe3e07f150..1ce8908824b 100644
---- a/server/request.c
-+++ b/server/request.c
-@@ -167,6 +167,8 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
- int file_req = (r->main && r->filename);
- int access_status;
- core_dir_config *d;
-+ core_server_config *sconf =
-+ ap_get_core_module_config(r->server->module_config);
-
- /* Ignore embedded %2F's in path for proxy requests */
- if (!r->proxyreq && r->parsed_uri.path) {
-@@ -191,6 +193,10 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
- }
-
- ap_getparents(r->uri); /* OK --- shrinking transformations... */
-+ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
-+ ap_no2slash(r->uri);
-+ ap_no2slash(r->parsed_uri.path);
-+ }
-
- /* All file subrequests are a huge pain... they cannot bubble through the
- * next several steps. Only file subrequests are allowed an empty uri,
-@@ -1411,20 +1417,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
-
- cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r);
- cached = (cache->cached != NULL);
--
-- /* Location and LocationMatch differ on their behaviour w.r.t. multiple
-- * slashes. Location matches multiple slashes with a single slash,
-- * LocationMatch doesn't. An exception, for backwards brokenness is
-- * absoluteURIs... in which case neither match multiple slashes.
-- */
-- if (r->uri[0] != '/') {
-- entry_uri = r->uri;
-- }
-- else {
-- char *uri = apr_pstrdup(r->pool, r->uri);
-- ap_no2slash(uri);
-- entry_uri = uri;
-- }
-+ entry_uri = r->uri;
-
- /* If we have an cache->cached location that matches r->uri,
- * and the vhost's list of locations hasn't changed, we can skip
-@@ -1491,7 +1484,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
- pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t));
- }
-
-- if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) {
-+ if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) {
- continue;
- }
-
-@@ -1501,7 +1494,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
- apr_table_setn(r->subprocess_env,
- ((const char **)entry_core->refs->elts)[i],
- apr_pstrndup(r->pool,
-- r->uri + pmatch[i].rm_so,
-+ entry_uri + pmatch[i].rm_so,
- pmatch[i].rm_eo - pmatch[i].rm_so));
- }
- }
-diff --git a/server/util.c b/server/util.c
-index fd7a0a14763..607c4850d86 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -561,16 +561,16 @@ AP_DECLARE(void) ap_getparents(char *name)
- name[l] = '\0';
- }
- }
--
--AP_DECLARE(void) ap_no2slash(char *name)
-+AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
- {
-+
- char *d, *s;
-
- s = d = name;
-
- #ifdef HAVE_UNC_PATHS
- /* Check for UNC names. Leave leading two slashes. */
-- if (s[0] == '/' && s[1] == '/')
-+ if (is_fs_path && s[0] == '/' && s[1] == '/')
- *d++ = *s++;
- #endif
-
-@@ -587,6 +587,10 @@ AP_DECLARE(void) ap_no2slash(char *name)
- *d = '\0';
- }
-
-+AP_DECLARE(void) ap_no2slash(char *name)
-+{
-+ ap_no2slash_ex(name, 1);
-+}
-
- /*
- * copy at most n leading directories of s into d
diff --git a/debian/patches/CVE-2019-0220-2.patch b/debian/patches/CVE-2019-0220-2.patch
deleted file mode 100644
index 0204259..0000000
--- a/debian/patches/CVE-2019-0220-2.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From c4ef468b25718a26f2b92cbea3ca093729b79331 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Mon, 18 Mar 2019 12:10:15 +0000
-Subject: [PATCH] merge 1855743,1855744 ^/httpd/httpd/trunk .
-
-r->parsed_uri.path safety in recent backport
-
-*) core: fix SEGFAULT in CONNECT with recent change
- 2.4.x: svn merge -c 1855743,1855744 ^/httpd/httpd/trunk .
- +1: rpluem, icing, covener
-
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855751 13f79535-47bb-0310-9956-ffa450edef68
----
- server/request.c | 4 +++-
- server/util.c | 4 ++++
- 2 files changed, 7 insertions(+), 1 deletion(-)
-
-diff --git a/server/request.c b/server/request.c
-index 1ce8908824b..d5c558afa30 100644
---- a/server/request.c
-+++ b/server/request.c
-@@ -195,7 +195,9 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
- ap_getparents(r->uri); /* OK --- shrinking transformations... */
- if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
- ap_no2slash(r->uri);
-- ap_no2slash(r->parsed_uri.path);
-+ if (r->parsed_uri.path) {
-+ ap_no2slash(r->parsed_uri.path);
-+ }
- }
-
- /* All file subrequests are a huge pain... they cannot bubble through the
-diff --git a/server/util.c b/server/util.c
-index 607c4850d86..f3b17f1581e 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -566,6 +566,10 @@ AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
-
- char *d, *s;
-
-+ if (!name || !*name) {
-+ return;
-+ }
-+
- s = d = name;
-
- #ifdef HAVE_UNC_PATHS
diff --git a/debian/patches/CVE-2019-0220-3.patch b/debian/patches/CVE-2019-0220-3.patch
deleted file mode 100644
index 7b3ff6f..0000000
--- a/debian/patches/CVE-2019-0220-3.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 3451fc2bf8708b0dc8cd6a7d0ac0fe5b6401befc Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Tue, 19 Mar 2019 18:01:21 +0000
-Subject: [PATCH] *) maintainer mode fix for util.c no2slash_ex trunk
- patch: http://svn.apache.org/r1855755 2.4.x patch svn merge -c 1855755
- ^/httpd/httpd/trunk . +1: covener, rpluem, jim, ylavic
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855853 13f79535-47bb-0310-9956-ffa450edef68
----
- STATUS | 6 ------
- server/util.c | 2 +-
- 2 files changed, 1 insertion(+), 7 deletions(-)
-
-#diff --git a/STATUS b/STATUS
-#index ffe5d22550c..1f8cb2f7884 100644
-#--- a/STATUS
-#+++ b/STATUS
-#@@ -126,12 +126,6 @@ RELEASE SHOWSTOPPERS:
-# PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
-# [ start all new proposals below, under PATCHES PROPOSED. ]
-#
-#- *) maintainer mode fix for util.c no2slash_ex
-#- trunk patch: http://svn.apache.org/r1855755
-#- 2.4.x patch svn merge -c 1855755 ^/httpd/httpd/trunk .
-#- +1: covener, rpluem, jim, ylavic
-#-
-#-
-# PATCHES PROPOSED TO BACKPORT FROM TRUNK:
-# [ New proposals should be added at the end of the list ]
-#
-diff --git a/server/util.c b/server/util.c
-index f3b17f1581e..e0c558cee2d 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -566,7 +566,7 @@ AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
-
- char *d, *s;
-
-- if (!name || !*name) {
-+ if (!*name) {
- return;
- }
-
diff --git a/debian/patches/CVE-2019-10092.patch b/debian/patches/CVE-2019-10092.patch
deleted file mode 100644
index eb3352c..0000000
--- a/debian/patches/CVE-2019-10092.patch
+++ /dev/null
@@ -1,193 +0,0 @@
-Description: Fix for CVE-2019-10092
-Author: Stefan Eissing
-Origin: upstream, https://svn.apache.org/viewvc?view=revision&revision=1864191
-Bug: https://security-tracker.debian.org/tracker/CVE-2019-10092
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2019-10-11
-[Salvatore Bonaccorso: Add additional change from https://svn.apache.org/r1864699
-to add missing APLOGNO's in mod_proxy.c and mod_proxy_ftp.c]
---- a/modules/http/http_protocol.c
-+++ b/modules/http/http_protocol.c
-@@ -1132,13 +1132,10 @@
- "\">here.\n",
- NULL));
- case HTTP_USE_PROXY:
-- return(apr_pstrcat(p,
-- "This resource is only accessible "
-- "through the proxy\n",
-- ap_escape_html(r->pool, location),
-- "
\nYou will need to configure "
-- "your client to use that proxy.
\n",
-- NULL));
-+ return("This resource is only accessible "
-+ "through the proxy\n"
-+ "
\nYou will need to configure "
-+ "your client to use that proxy.
\n");
- case HTTP_PROXY_AUTHENTICATION_REQUIRED:
- case HTTP_UNAUTHORIZED:
- return("This server could not verify that you\n"
-@@ -1154,34 +1151,20 @@
- "error-notes",
- "
\n"));
- case HTTP_FORBIDDEN:
-- s1 = apr_pstrcat(p,
-- "You don't have permission to access ",
-- ap_escape_html(r->pool, r->uri),
-- "\non this server.
\n",
-- NULL);
-- return(add_optional_notes(r, s1, "error-notes", "
\n"));
-+ return(add_optional_notes(r, "You don't have permission to access this resource.", "error-notes", "
\n"));
- case HTTP_NOT_FOUND:
-- return(apr_pstrcat(p,
-- "The requested URL ",
-- ap_escape_html(r->pool, r->uri),
-- " was not found on this server.
\n",
-- NULL));
-+ return("The requested URL was not found on this server.
\n");
- case HTTP_METHOD_NOT_ALLOWED:
- return(apr_pstrcat(p,
- "The requested method ",
- ap_escape_html(r->pool, r->method),
-- " is not allowed for the URL ",
-- ap_escape_html(r->pool, r->uri),
-- ".
\n",
-+ " is not allowed for this URL.\n",
- NULL));
- case HTTP_NOT_ACCEPTABLE:
-- s1 = apr_pstrcat(p,
-- "An appropriate representation of the "
-- "requested resource ",
-- ap_escape_html(r->pool, r->uri),
-- " could not be found on this server.
\n",
-- NULL);
-- return(add_optional_notes(r, s1, "variant-list", ""));
-+ return(add_optional_notes(r,
-+ "An appropriate representation of the requested resource "
-+ "could not be found on this server.
\n",
-+ "variant-list", ""));
- case HTTP_MULTIPLE_CHOICES:
- return(add_optional_notes(r, "", "variant-list", ""));
- case HTTP_LENGTH_REQUIRED:
-@@ -1192,18 +1175,13 @@
- NULL);
- return(add_optional_notes(r, s1, "error-notes", "\n"));
- case HTTP_PRECONDITION_FAILED:
-- return(apr_pstrcat(p,
-- "The precondition on the request "
-- "for the URL ",
-- ap_escape_html(r->pool, r->uri),
-- " evaluated to false.
\n",
-- NULL));
-+ return("The precondition on the request "
-+ "for this URL evaluated to false.
\n");
- case HTTP_NOT_IMPLEMENTED:
- s1 = apr_pstrcat(p,
- "",
-- ap_escape_html(r->pool, r->method), " to ",
-- ap_escape_html(r->pool, r->uri),
-- " not supported.
\n",
-+ ap_escape_html(r->pool, r->method), " ",
-+ " not supported for current URL.
\n",
- NULL);
- return(add_optional_notes(r, s1, "error-notes", "
\n"));
- case HTTP_BAD_GATEWAY:
-@@ -1211,29 +1189,19 @@
- "response from an upstream server.
" CRLF;
- return(add_optional_notes(r, s1, "error-notes", "\n"));
- case HTTP_VARIANT_ALSO_VARIES:
-- return(apr_pstrcat(p,
-- "A variant for the requested "
-- "resource\n
\n",
-- ap_escape_html(r->pool, r->uri),
-- "\n
\nis itself a negotiable resource. "
-- "This indicates a configuration error.\n",
-- NULL));
-+ return("A variant for the requested "
-+ "resource\n
\n"
-+ "\n
\nis itself a negotiable resource. "
-+ "This indicates a configuration error.\n");
- case HTTP_REQUEST_TIME_OUT:
- return("Server timeout waiting for the HTTP request from the client.
\n");
- case HTTP_GONE:
-- return(apr_pstrcat(p,
-- "The requested resource
",
-- ap_escape_html(r->pool, r->uri),
-- "
\nis no longer available on this server "
-- "and there is no forwarding address.\n"
-- "Please remove all references to this "
-- "resource.
\n",
-- NULL));
-+ return("The requested resource is no longer available on this server"
-+ " and there is no forwarding address.\n"
-+ "Please remove all references to this resource.
\n");
- case HTTP_REQUEST_ENTITY_TOO_LARGE:
- return(apr_pstrcat(p,
-- "The requested resource
",
-- ap_escape_html(r->pool, r->uri), "
\n",
-- "does not allow request data with ",
-+ "The requested resource does not allow request data with ",
- ap_escape_html(r->pool, r->method),
- " requests, or the amount of data provided in\n"
- "the request exceeds the capacity limit.\n",
-@@ -1317,11 +1285,9 @@
- "the Server Name Indication (SNI) in use for this\n"
- "connection.\n");
- case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
-- s1 = apr_pstrcat(p,
-- "Access to ", ap_escape_html(r->pool, r->uri),
-- "\nhas been denied for legal reasons.
\n",
-- NULL);
-- return(add_optional_notes(r, s1, "error-notes", "
\n"));
-+ return(add_optional_notes(r,
-+ "Access to this URL has been denied for legal reasons.
\n",
-+ "error-notes", "
\n"));
- default: /* HTTP_INTERNAL_SERVER_ERROR */
- /*
- * This comparison to expose error-notes could be modified to
---- a/modules/proxy/mod_proxy.c
-+++ b/modules/proxy/mod_proxy.c
-@@ -1049,9 +1049,10 @@
- char *end;
- maxfwd = apr_strtoi64(str, &end, 10);
- if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) {
-- return ap_proxyerror(r, HTTP_BAD_REQUEST,
-- apr_psprintf(r->pool,
-- "Max-Forwards value '%s' could not be parsed", str));
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10188)
-+ "Max-Forwards value '%s' could not be parsed", str);
-+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
-+ "Max-Forwards request header could not be parsed");
- }
- else if (maxfwd == 0) {
- switch (r->method_number) {
---- a/modules/proxy/mod_proxy_ftp.c
-+++ b/modules/proxy/mod_proxy_ftp.c
-@@ -1024,8 +1024,9 @@
- /* We break the URL into host, port, path-search */
- if (r->parsed_uri.hostname == NULL) {
- if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) {
-- return ap_proxyerror(r, HTTP_BAD_REQUEST,
-- apr_psprintf(p, "URI cannot be parsed: %s", url));
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10189)
-+ "URI cannot be parsed: %s", url);
-+ return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed");
- }
- connectname = uri.hostname;
- connectport = uri.port;
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -368,12 +368,9 @@
-
- PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
- {
-- const char *uri = ap_escape_html(r->pool, r->uri);
- apr_table_setn(r->notes, "error-notes",
- apr_pstrcat(r->pool,
-- "The proxy server could not handle the request ", ap_escape_html(r->pool, r->method), " ", uri,
-- ".\n"
-+ "The proxy server could not handle the request
"
- "Reason: ", ap_escape_html(r->pool, message),
- "
",
- NULL));
diff --git a/debian/patches/CVE-2019-10097.patch b/debian/patches/CVE-2019-10097.patch
deleted file mode 100644
index 0be05f5..0000000
--- a/debian/patches/CVE-2019-10097.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-Description: Fix for CVE-2019-10097
-Author: jorton
-Origin: upstream, https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864613
-Bug: https://security-tracker.debian.org/tracker/CVE-2019-10097
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2019-08-17
-
---- a/modules/metadata/mod_remoteip.c
-+++ b/modules/metadata/mod_remoteip.c
-@@ -987,15 +987,13 @@
- return HDR_ERROR;
- #endif
- default:
-- /* unsupported protocol, keep local connection address */
-- return HDR_DONE;
-+ /* unsupported protocol */
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10183)
-+ "RemoteIPProxyProtocol: unsupported protocol %.2hx",
-+ (unsigned short)hdr->v2.fam);
-+ return HDR_ERROR;
- }
- break; /* we got a sockaddr now */
--
-- case 0x00: /* LOCAL command */
-- /* keep local connection address for LOCAL */
-- return HDR_DONE;
--
- default:
- /* not a supported command */
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03507)
-@@ -1087,11 +1085,24 @@
- /* try to read a header's worth of data */
- while (!ctx->done) {
- if (APR_BRIGADE_EMPTY(ctx->bb)) {
-- ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block,
-- ctx->need - ctx->rcvd);
-+ apr_off_t got, want = ctx->need - ctx->rcvd;
-+
-+ ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, want);
- if (ret != APR_SUCCESS) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10184)
-+ "failed reading input");
- return ret;
- }
-+
-+ ret = apr_brigade_length(ctx->bb, 1, &got);
-+ if (ret || got > want) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10185)
-+ "RemoteIPProxyProtocol header too long, "
-+ "got %" APR_OFF_T_FMT " expected %" APR_OFF_T_FMT,
-+ got, want);
-+ f->c->aborted = 1;
-+ return APR_ECONNABORTED;
-+ }
- }
- if (APR_BRIGADE_EMPTY(ctx->bb)) {
- return block == APR_NONBLOCK_READ ? APR_SUCCESS : APR_EOF;
-@@ -1139,6 +1150,13 @@
- if (ctx->rcvd >= MIN_V2_HDR_LEN) {
- ctx->need = MIN_V2_HDR_LEN +
- remoteip_get_v2_len((proxy_header *) ctx->header);
-+ if (ctx->need > sizeof(proxy_v2)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(10186)
-+ "RemoteIPProxyProtocol protocol header length too long");
-+ f->c->aborted = 1;
-+ apr_brigade_destroy(ctx->bb);
-+ return APR_ECONNABORTED;
-+ }
- }
- if (ctx->rcvd >= ctx->need) {
- psts = remoteip_process_v2_header(f->c, conn_conf,
diff --git a/debian/patches/CVE-2019-10098.patch b/debian/patches/CVE-2019-10098.patch
deleted file mode 100644
index b2c66b2..0000000
--- a/debian/patches/CVE-2019-10098.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-Description: patch to set PCRE_DOTALL by default
-Author: ylavic
-Origin: upstream, https://svn.apache.org/viewvc?view=revision&revision=1864192
-Bug: https://security-tracker.debian.org/tracker/CVE-2019-10098
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2019-08-18
-
---- a/server/util_pcre.c
-+++ b/server/util_pcre.c
-@@ -120,7 +120,8 @@
- * Compile a regular expression *
- *************************************************/
-
--static int default_cflags = AP_REG_DOLLAR_ENDONLY;
-+static int default_cflags = AP_REG_DOTALL |
-+ AP_REG_DOLLAR_ENDONLY;
-
- AP_DECLARE(int) ap_regcomp_get_default_cflags(void)
- {
diff --git a/debian/patches/CVE-2020-11984.patch b/debian/patches/CVE-2020-11984.patch
deleted file mode 100644
index 409f958..0000000
--- a/debian/patches/CVE-2020-11984.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-Description: fix error out on HTTP header larger than 16K
- The uwsgi protocol does not let us serialize more than 16K of HTTP header,
- so fail early with 500 if it happens.
-Author: ylavic
-Origin: upstream, https://github.com/apache/httpd/commit/0c543e3f
-Bug: https://security-tracker.debian.org/tracker/CVE-2020-11984
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2020-08-25
-
---- a/modules/proxy/mod_proxy_uwsgi.c
-+++ b/modules/proxy/mod_proxy_uwsgi.c
-@@ -136,7 +136,7 @@
- int j;
-
- apr_size_t headerlen = 4;
-- apr_uint16_t pktsize, keylen, vallen;
-+ apr_size_t pktsize, keylen, vallen;
- const char *script_name;
- const char *path_info;
- const char *auth;
-@@ -177,6 +177,14 @@
- for (j = 0; j < env_table->nelts; ++j) {
- headerlen += 2 + strlen(env[j].key) + 2 + strlen(env[j].val);
- }
-+ pktsize = headerlen - 4;
-+ if (pktsize > APR_UINT16_MAX) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10259)
-+ "can't send headers to %s:%u: packet size too "
-+ "large (%" APR_SIZE_T_FMT ")",
-+ conn->hostname, conn->port, pktsize);
-+ return HTTP_INTERNAL_SERVER_ERROR;
-+ }
-
- ptr = buf = apr_palloc(r->pool, headerlen);
-
-@@ -196,8 +204,6 @@
- ptr += vallen;
- }
-
-- pktsize = headerlen - 4;
--
- buf[0] = 0;
- buf[1] = (apr_byte_t) (pktsize & 0xff);
- buf[2] = (apr_byte_t) ((pktsize >> 8) & 0xff);
diff --git a/debian/patches/CVE-2020-1927.patch b/debian/patches/CVE-2020-1927.patch
deleted file mode 100644
index cbdd84f..0000000
--- a/debian/patches/CVE-2020-1927.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-Description: fix for CVE-2020-1927
-Author: covener
-Origin: upstream, https://svn.apache.org/r1873905
- https://svn.apache.org/r1874191
-Bug: https://security-tracker.debian.org/tracker/CVE-2020-1927
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2020-08-25
-
---- a/include/ap_regex.h
-+++ b/include/ap_regex.h
-@@ -84,7 +84,11 @@
-
- #define AP_REG_DOLLAR_ENDONLY 0x200 /* '$' matches at end of subject string only */
-
--#define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */
-+#define AP_REG_NO_DEFAULT 0x400 /**< Don't implicitely add AP_REG_DEFAULT options */
-+
-+#define AP_REG_MATCH "MATCH_" /**< suggested prefix for ap_regname */
-+
-+#define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY)
-
- /* Error values: */
- enum {
---- a/modules/filters/mod_substitute.c
-+++ b/modules/filters/mod_substitute.c
-@@ -667,8 +667,10 @@
-
- /* first see if we can compile the regex */
- if (!is_pattern) {
-- r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED |
-- (ignore_case ? AP_REG_ICASE : 0));
-+ int flags = AP_REG_NO_DEFAULT
-+ | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY)
-+ | (ignore_case ? AP_REG_ICASE : 0);
-+ r = ap_pregcomp(cmd->pool, from, flags);
- if (!r)
- return "Substitute could not compile regex";
- }
---- a/server/core.c
-+++ b/server/core.c
-@@ -4937,7 +4937,7 @@
- apr_pool_cleanup_register(pconf, NULL, reset_config_defines,
- apr_pool_cleanup_null);
-
-- ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY);
-+ ap_regcomp_set_default_cflags(AP_REG_DEFAULT);
-
- mpm_common_pre_config(pconf);
-
---- a/server/util_pcre.c
-+++ b/server/util_pcre.c
-@@ -120,8 +120,7 @@
- * Compile a regular expression *
- *************************************************/
-
--static int default_cflags = AP_REG_DOTALL |
-- AP_REG_DOLLAR_ENDONLY;
-+static int default_cflags = AP_REG_DEFAULT;
-
- AP_DECLARE(int) ap_regcomp_get_default_cflags(void)
- {
-@@ -169,7 +168,9 @@
- int errcode = 0;
- int options = PCRE_DUPNAMES;
-
-- cflags |= default_cflags;
-+ if ((cflags & AP_REG_NO_DEFAULT) == 0)
-+ cflags |= default_cflags;
-+
- if ((cflags & AP_REG_ICASE) != 0)
- options |= PCRE_CASELESS;
- if ((cflags & AP_REG_NEWLINE) != 0)
---- a/server/util_regex.c
-+++ b/server/util_regex.c
-@@ -94,6 +94,7 @@
- }
-
- /* anything after the current delimiter is flags */
-+ ret->flags = ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY;
- while (*++endp) {
- switch (*endp) {
- case 'i': ret->flags |= AP_REG_ICASE; break;
-@@ -106,7 +107,7 @@
- default: break; /* we should probably be stricter here */
- }
- }
-- if (ap_regcomp(&ret->rx, rxstr, ret->flags) == 0) {
-+ if (ap_regcomp(&ret->rx, rxstr, AP_REG_NO_DEFAULT | ret->flags) == 0) {
- apr_pool_cleanup_register(pool, &ret->rx, rxplus_cleanup,
- apr_pool_cleanup_null);
- }
diff --git a/debian/patches/CVE-2020-1934.patch b/debian/patches/CVE-2020-1934.patch
deleted file mode 100644
index 295ab45..0000000
--- a/debian/patches/CVE-2020-1934.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-Description: fix uninitialized memory when proxying to a malicious FTP server
-Author: covener
-Origin: upstream, https://svn.apache.org/viewvc?view=revision&revision=1873745
-Bug: https://security-tracker.debian.org/tracker/CVE-2020-1934
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2020-08-25
-
---- a/modules/proxy/mod_proxy_ftp.c
-+++ b/modules/proxy/mod_proxy_ftp.c
-@@ -218,7 +218,7 @@
- * (EBCDIC) machines either.
- */
- static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
-- char *buff, apr_size_t bufflen, int *eos)
-+ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen)
- {
- apr_bucket *e;
- apr_status_t rv;
-@@ -230,6 +230,7 @@
- /* start with an empty string */
- buff[0] = 0;
- *eos = 0;
-+ *outlen = 0;
-
- /* loop through each brigade */
- while (!found) {
-@@ -273,6 +274,7 @@
- if (len > 0) {
- memcpy(pos, response, len);
- pos += len;
-+ *outlen += len;
- }
- }
- apr_bucket_delete(e);
-@@ -385,28 +387,35 @@
- char buff[5];
- char *mb = msgbuf, *me = &msgbuf[msglen];
- apr_status_t rv;
-+ apr_size_t nread;
-+
- int eos;
-
-- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
-+ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
- return -1;
- }
- /*
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233)
- "<%s", response);
- */
-+ if (nread < 4) {
-+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response);
-+ *mb = '\0';
-+ return -1;
-+ }
- if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) ||
-- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
-+ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
- status = 0;
- else
- status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0';
-
- mb = apr_cpystrn(mb, response + 4, me - mb);
-
-- if (response[3] == '-') {
-+ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */
- memcpy(buff, response, 3);
- buff[3] = ' ';
- do {
-- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
-+ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
- return -1;
- }
- mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb);
diff --git a/debian/patches/CVE-2020-35452.patch b/debian/patches/CVE-2020-35452.patch
deleted file mode 100644
index 5204210..0000000
--- a/debian/patches/CVE-2020-35452.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-Description:
-Author: Apache authors
-Origin: upstream, https://github.com/apache/httpd/commit/3b6431e
-Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2020-35452
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-06-10
-
---- a/modules/aaa/mod_auth_digest.c
-+++ b/modules/aaa/mod_auth_digest.c
-@@ -1422,9 +1422,14 @@
- time_rec nonce_time;
- char tmp, hash[NONCE_HASH_LEN+1];
-
-- if (strlen(resp->nonce) != NONCE_LEN) {
-+ /* Since the time part of the nonce is a base64 encoding of an
-+ * apr_time_t (8 bytes), it should end with a '=', fail early otherwise.
-+ */
-+ if (strlen(resp->nonce) != NONCE_LEN
-+ || resp->nonce[NONCE_TIME_LEN - 1] != '=') {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01775)
-- "invalid nonce %s received - length is not %d",
-+ "invalid nonce '%s' received - length is not %d "
-+ "or time encoding is incorrect",
- resp->nonce, NONCE_LEN);
- note_digest_auth_failure(r, conf, resp, 1);
- return HTTP_UNAUTHORIZED;
diff --git a/debian/patches/CVE-2021-26690.patch b/debian/patches/CVE-2021-26690.patch
deleted file mode 100644
index 72c7457..0000000
--- a/debian/patches/CVE-2021-26690.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-Description:
-Author: Apache authors
-Origin: upstream, https://github.com/apache/httpd/commit/67bd9bfe
-Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2021-26690
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-06-10
-
---- a/modules/session/mod_session.c
-+++ b/modules/session/mod_session.c
-@@ -392,8 +392,8 @@
- char *plast = NULL;
- const char *psep = "=";
- char *key = apr_strtok(pair, psep, &plast);
-- char *val = apr_strtok(NULL, psep, &plast);
- if (key && *key) {
-+ char *val = apr_strtok(NULL, sep, &plast);
- if (!val || !*val) {
- apr_table_unset(z->entries, key);
- }
diff --git a/debian/patches/CVE-2021-26691.patch b/debian/patches/CVE-2021-26691.patch
deleted file mode 100644
index 7b96fad..0000000
--- a/debian/patches/CVE-2021-26691.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-Description: mod_session: account for the '&' in identity_concat().
-Author: Apache authors
-Origin: upstream, https://github.com/apache/httpd/commit/7e09dd71
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-06-10
-
---- a/modules/session/mod_session.c
-+++ b/modules/session/mod_session.c
-@@ -305,7 +305,7 @@
- static int identity_count(void *v, const char *key, const char *val)
- {
- int *count = v;
-- *count += strlen(key) * 3 + strlen(val) * 3 + 1;
-+ *count += strlen(key) * 3 + strlen(val) * 3 + 2;
- return 1;
- }
-
diff --git a/debian/patches/CVE-2021-30641.patch b/debian/patches/CVE-2021-30641.patch
deleted file mode 100644
index 7486e1b..0000000
--- a/debian/patches/CVE-2021-30641.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-Description: legacy default slash-matching behavior w/ 'MergeSlashes OFF'
-Author: Apache authors
-Origin: upstream, https://github.com/apache/httpd/commit/eb986059
-Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2021-30641
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-06-10
-
---- a/server/request.c
-+++ b/server/request.c
-@@ -1419,7 +1419,20 @@
-
- cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r);
- cached = (cache->cached != NULL);
-- entry_uri = r->uri;
-+
-+ /*
-+ * When merge_slashes is set to AP_CORE_CONFIG_OFF the slashes in r->uri
-+ * have not been merged. But for Location walks we always go with merged
-+ * slashes no matter what merge_slashes is set to.
-+ */
-+ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
-+ entry_uri = r->uri;
-+ }
-+ else {
-+ char *uri = apr_pstrdup(r->pool, r->uri);
-+ ap_no2slash(uri);
-+ entry_uri = uri;
-+ }
-
- /* If we have an cache->cached location that matches r->uri,
- * and the vhost's list of locations hasn't changed, we can skip
-@@ -1486,7 +1499,7 @@
- pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t));
- }
-
-- if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) {
-+ if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) {
- continue;
- }
-
-@@ -1496,7 +1509,7 @@
- apr_table_setn(r->subprocess_env,
- ((const char **)entry_core->refs->elts)[i],
- apr_pstrndup(r->pool,
-- entry_uri + pmatch[i].rm_so,
-+ r->uri + pmatch[i].rm_so,
- pmatch[i].rm_eo - pmatch[i].rm_so));
- }
- }
diff --git a/debian/patches/CVE-2021-31618.patch b/debian/patches/CVE-2021-31618.patch
deleted file mode 100644
index 12d59c8..0000000
--- a/debian/patches/CVE-2021-31618.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-Description: fix NULL pointer dereference on specially crafted HTTP/2 request
-Author: Upstream
-Origin: upstream, http://svn.apache.org/viewvc/httpd/httpd/branches/2.4.x/modules/http2/h2_stream.c?r1=1889759&r2=1889758&pathrev=1889759
-Bug: https://httpd.apache.org/security/vulnerabilities_24.html#CVE-2021-31618
-Bug-Debian: https://bugs.debian.org/989562
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-06-10
-
---- a/modules/http2/h2_stream.c
-+++ b/modules/http2/h2_stream.c
-@@ -638,7 +638,7 @@
-
- static void set_error_response(h2_stream *stream, int http_status)
- {
-- if (!h2_stream_is_ready(stream)) {
-+ if (!h2_stream_is_ready(stream) && stream->rtmp) {
- conn_rec *c = stream->session->c;
- apr_bucket *b;
- h2_headers *response;
diff --git a/debian/patches/CVE-2021-33193.patch b/debian/patches/CVE-2021-33193.patch
deleted file mode 100644
index d2737b8..0000000
--- a/debian/patches/CVE-2021-33193.patch
+++ /dev/null
@@ -1,702 +0,0 @@
-Description: Fix for CVE-2021-33193: mod_proxy HTTP/2 validation bypass
- A crafted method sent through HTTP/2 will bypass validation and be forwarded by
- mod_proxy, which can lead to request splitting or cache poisoning.
-Origin: other, https://git.centos.org/rpms/httpd/blob/c496dea5e0b6e82a9f503e973fc5d5ea93a94180/f/SOURCES/httpd-2.4.37-CVE-2021-33193.patch
-Forwarded: not-needed
-Applied-Upstream: 2.4.49, https://github.com/apache/httpd/commit/ecebcc035ccd8d0e2984fe41420d9e944f456b3c
-Last-Update: 2023-02-24
----
-This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
---- a/include/http_core.h
-+++ b/include/http_core.h
-@@ -741,6 +741,7 @@
- #define AP_HTTP_METHODS_REGISTERED 2
- char http_methods;
- unsigned int merge_slashes;
-+ unsigned int strict_host_check;
- } core_server_config;
-
- /* for AddOutputFiltersByType in core.c */
-@@ -769,6 +770,11 @@
- typedef struct core_output_filter_ctx core_output_filter_ctx_t;
- typedef struct core_filter_ctx core_ctx_t;
-
-+struct core_filter_ctx {
-+ apr_bucket_brigade *b;
-+ apr_bucket_brigade *tmpbb;
-+};
-+
- typedef struct core_net_rec {
- /** Connection to the client */
- apr_socket_t *client_socket;
---- a/include/http_protocol.h
-+++ b/include/http_protocol.h
-@@ -54,6 +54,13 @@
- */
-
- /**
-+ * Read an empty request and set reasonable defaults.
-+ * @param c The current connection
-+ * @return The new request_rec
-+ */
-+AP_DECLARE(request_rec *) ap_create_request(conn_rec *c);
-+
-+/**
- * Read a request and fill in the fields.
- * @param c The current connection
- * @return The new request_rec
-@@ -61,6 +68,20 @@
- request_rec *ap_read_request(conn_rec *c);
-
- /**
-+ * Parse and validate the request line.
-+ * @param r The current request
-+ * @return 1 on success, 0 on failure
-+ */
-+AP_DECLARE(int) ap_parse_request_line(request_rec *r);
-+
-+/**
-+ * Validate the request header and select vhost.
-+ * @param r The current request
-+ * @return 1 on success, 0 on failure
-+ */
-+AP_DECLARE(int) ap_check_request_header(request_rec *r);
-+
-+/**
- * Read the mime-encoded headers.
- * @param r The current request
- */
---- a/include/http_vhost.h
-+++ b/include/http_vhost.h
-@@ -100,6 +100,19 @@
- AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r);
-
- /**
-+ * Updates r->server with the best name-based virtual host match, within
-+ * the chain of matching virtual hosts selected by ap_update_vhost_given_ip.
-+ * @param r The current request
-+ * @param require_match 1 to return an HTTP error if the requested hostname is
-+ * not explicitly matched to a VirtualHost.
-+ * @return return HTTP_OK unless require_match was specified and the requested
-+ * hostname did not match any ServerName, ServerAlias, or VirtualHost
-+ * address-spec.
-+ */
-+AP_DECLARE(int) ap_update_vhost_from_headers_ex(request_rec *r, int require_match);
-+
-+
-+/**
- * Match the host in the header with the hostname of the server for this
- * request.
- * @param r The current request
---- a/server/core.c
-+++ b/server/core.c
-@@ -494,6 +494,8 @@
- conf->protocols_honor_order = -1;
- conf->merge_slashes = AP_CORE_CONFIG_UNSET;
-
-+ conf->strict_host_check= AP_CORE_CONFIG_UNSET;
-+
- return (void *)conf;
- }
-
-@@ -559,7 +561,13 @@
- base->protocols_honor_order :
- virt->protocols_honor_order);
- AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt);
--
-+
-+ conf->strict_host_check = (virt->strict_host_check != AP_CORE_CONFIG_UNSET)
-+ ? virt->strict_host_check
-+ : base->strict_host_check;
-+
-+ AP_CORE_MERGE_FLAG(strict_host_check, conf, base, virt);
-+
- return conf;
- }
-
-@@ -4518,7 +4526,10 @@
- AP_INIT_FLAG("QualifyRedirectURL", set_qualify_redirect_url, NULL, OR_FILEINFO,
- "Controls whether HTTP authorization headers, normally hidden, will "
- "be passed to scripts"),
--
-+AP_INIT_FLAG("StrictHostCheck", set_core_server_flag,
-+ (void *)APR_OFFSETOF(core_server_config, strict_host_check),
-+ RSRC_CONF,
-+ "Controls whether a hostname match is required"),
- AP_INIT_TAKE1("ForceType", ap_set_string_slot_lower,
- (void *)APR_OFFSETOF(core_dir_config, mime_type), OR_FILEINFO,
- "a mime type that overrides other configured type"),
-@@ -5492,4 +5503,3 @@
- core_cmds, /* command apr_table_t */
- register_hooks /* register hooks */
- };
--
---- a/server/core_filters.c
-+++ b/server/core_filters.c
-@@ -84,11 +84,6 @@
- apr_size_t bytes_written;
- };
-
--struct core_filter_ctx {
-- apr_bucket_brigade *b;
-- apr_bucket_brigade *tmpbb;
--};
--
-
- apr_status_t ap_core_input_filter(ap_filter_t *f, apr_bucket_brigade *b,
- ap_input_mode_t mode, apr_read_type_e block,
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -611,8 +611,15 @@
- }
-
- r->args = r->parsed_uri.query;
-- r->uri = r->parsed_uri.path ? r->parsed_uri.path
-- : apr_pstrdup(r->pool, "/");
-+ if (r->parsed_uri.path) {
-+ r->uri = r->parsed_uri.path;
-+ }
-+ else if (r->method_number == M_OPTIONS) {
-+ r->uri = apr_pstrdup(r->pool, "*");
-+ }
-+ else {
-+ r->uri = apr_pstrdup(r->pool, "/");
-+ }
-
- #if defined(OS2) || defined(WIN32)
- /* Handle path translations for OS/2 and plug security hole.
-@@ -649,13 +656,6 @@
-
- static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
- {
-- enum {
-- rrl_none, rrl_badmethod, rrl_badwhitespace, rrl_excesswhitespace,
-- rrl_missinguri, rrl_baduri, rrl_badprotocol, rrl_trailingtext,
-- rrl_badmethod09, rrl_reject09
-- } deferred_error = rrl_none;
-- char *ll;
-- char *uri;
- apr_size_t len;
- int num_blank_lines = DEFAULT_LIMIT_BLANK_LINES;
- core_server_config *conf = ap_get_core_module_config(r->server->module_config);
-@@ -720,6 +720,20 @@
- }
-
- r->request_time = apr_time_now();
-+ return 1;
-+}
-+
-+AP_DECLARE(int) ap_parse_request_line(request_rec *r)
-+{
-+ core_server_config *conf = ap_get_core_module_config(r->server->module_config);
-+ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
-+ enum {
-+ rrl_none, rrl_badmethod, rrl_badwhitespace, rrl_excesswhitespace,
-+ rrl_missinguri, rrl_baduri, rrl_badprotocol, rrl_trailingtext,
-+ rrl_badmethod09, rrl_reject09
-+ } deferred_error = rrl_none;
-+ apr_size_t len = 0;
-+ char *uri, *ll;
-
- r->method = r->the_request;
-
-@@ -751,7 +765,6 @@
- if (deferred_error == rrl_none)
- deferred_error = rrl_missinguri;
- r->protocol = uri = "";
-- len = 0;
- goto rrl_done;
- }
- else if (strict && ll[0] && apr_isspace(ll[1])
-@@ -782,7 +795,6 @@
- /* Verify URI terminated with a single SP, or mark as specific error */
- if (!ll) {
- r->protocol = "";
-- len = 0;
- goto rrl_done;
- }
- else if (strict && ll[0] && apr_isspace(ll[1])
-@@ -875,6 +887,14 @@
- r->header_only = 1;
-
- ap_parse_uri(r, uri);
-+ if (r->status == HTTP_OK
-+ && (r->parsed_uri.path != NULL)
-+ && (r->parsed_uri.path[0] != '/')
-+ && (r->method_number != M_OPTIONS
-+ || strcmp(r->parsed_uri.path, "*") != 0)) {
-+ /* Invalid request-target per RFC 7230 section 5.3 */
-+ r->status = HTTP_BAD_REQUEST;
-+ }
-
- /* With the request understood, we can consider HTTP/0.9 specific errors */
- if (r->proto_num == HTTP_VERSION(0, 9) && deferred_error == rrl_none) {
-@@ -982,6 +1002,79 @@
- return 0;
- }
-
-+AP_DECLARE(int) ap_check_request_header(request_rec *r)
-+{
-+ core_server_config *conf;
-+ int strict_host_check;
-+ const char *expect;
-+ int access_status;
-+
-+ conf = ap_get_core_module_config(r->server->module_config);
-+
-+ /* update what we think the virtual host is based on the headers we've
-+ * now read. may update status.
-+ */
-+ strict_host_check = (conf->strict_host_check == AP_CORE_CONFIG_ON);
-+ access_status = ap_update_vhost_from_headers_ex(r, strict_host_check);
-+ if (strict_host_check && access_status != HTTP_OK) {
-+ if (r->server == ap_server_conf) {
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10156)
-+ "Requested hostname '%s' did not match any ServerName/ServerAlias "
-+ "in the global server configuration ", r->hostname);
-+ }
-+ else {
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10157)
-+ "Requested hostname '%s' did not match any ServerName/ServerAlias "
-+ "in the matching virtual host (default vhost for "
-+ "current connection is %s:%u)",
-+ r->hostname, r->server->defn_name, r->server->defn_line_number);
-+ }
-+ r->status = access_status;
-+ }
-+ if (r->status != HTTP_OK) {
-+ return 0;
-+ }
-+
-+ if ((!r->hostname && (r->proto_num >= HTTP_VERSION(1, 1)))
-+ || ((r->proto_num == HTTP_VERSION(1, 1))
-+ && !apr_table_get(r->headers_in, "Host"))) {
-+ /*
-+ * Client sent us an HTTP/1.1 or later request without telling us the
-+ * hostname, either with a full URL or a Host: header. We therefore
-+ * need to (as per the 1.1 spec) send an error. As a special case,
-+ * HTTP/1.1 mentions twice (S9, S14.23) that a request MUST contain
-+ * a Host: header, and the server MUST respond with 400 if it doesn't.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00569)
-+ "client sent HTTP/1.1 request without hostname "
-+ "(see RFC2616 section 14.23): %s", r->uri);
-+ r->status = HTTP_BAD_REQUEST;
-+ return 0;
-+ }
-+
-+ if (((expect = apr_table_get(r->headers_in, "Expect")) != NULL)
-+ && (expect[0] != '\0')) {
-+ /*
-+ * The Expect header field was added to HTTP/1.1 after RFC 2068
-+ * as a means to signal when a 100 response is desired and,
-+ * unfortunately, to signal a poor man's mandatory extension that
-+ * the server must understand or return 417 Expectation Failed.
-+ */
-+ if (ap_cstr_casecmp(expect, "100-continue") == 0) {
-+ r->expecting_100 = 1;
-+ }
-+ else {
-+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00570)
-+ "client sent an unrecognized expectation value "
-+ "of Expect: %s", expect);
-+ r->status = HTTP_EXPECTATION_FAILED;
-+ return 0;
-+ }
-+ }
-+
-+ return 1;
-+}
-+
- static int table_do_fn_check_lengths(void *r_, const char *key,
- const char *value)
- {
-@@ -1265,16 +1358,10 @@
- apr_brigade_destroy(tmp_bb);
- }
-
--request_rec *ap_read_request(conn_rec *conn)
-+AP_DECLARE(request_rec *) ap_create_request(conn_rec *conn)
- {
- request_rec *r;
- apr_pool_t *p;
-- const char *expect;
-- int access_status;
-- apr_bucket_brigade *tmp_bb;
-- apr_socket_t *csd;
-- apr_interval_time_t cur_timeout;
--
-
- apr_pool_create(&p, conn->pool);
- apr_pool_tag(p, "request");
-@@ -1313,6 +1400,7 @@
- r->read_body = REQUEST_NO_BODY;
-
- r->status = HTTP_OK; /* Until further notice */
-+ r->header_only = 0;
- r->the_request = NULL;
-
- /* Begin by presuming any module can make its own path_info assumptions,
-@@ -1323,12 +1411,33 @@
- r->useragent_addr = conn->client_addr;
- r->useragent_ip = conn->client_ip;
-
-+ return r;
-+}
-+
-+/* Apply the server's timeout/config to the connection/request. */
-+static void apply_server_config(request_rec *r)
-+{
-+ apr_socket_t *csd;
-+
-+ csd = ap_get_conn_socket(r->connection);
-+ apr_socket_timeout_set(csd, r->server->timeout);
-+
-+ r->per_dir_config = r->server->lookup_defaults;
-+}
-+
-+request_rec *ap_read_request(conn_rec *conn)
-+{
-+ int access_status;
-+ apr_bucket_brigade *tmp_bb;
-+
-+ request_rec *r = ap_create_request(conn);
- tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
-
- ap_run_pre_read_request(r, conn);
-
- /* Get the request... */
-- if (!read_request_line(r, tmp_bb)) {
-+ if (!read_request_line(r, tmp_bb) || !ap_parse_request_line(r)) {
-+ apr_brigade_cleanup(tmp_bb);
- switch (r->status) {
- case HTTP_REQUEST_URI_TOO_LARGE:
- case HTTP_BAD_REQUEST:
-@@ -1344,49 +1453,38 @@
- "request failed: malformed request line");
- }
- access_status = r->status;
-- r->status = HTTP_OK;
-- ap_die(access_status, r);
-- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
-- ap_run_log_transaction(r);
-- r = NULL;
-- apr_brigade_destroy(tmp_bb);
-- goto traceout;
-+ goto die_unusable_input;
-+
- case HTTP_REQUEST_TIME_OUT:
-+ /* Just log, no further action on this connection. */
- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, NULL);
- if (!r->connection->keepalives)
- ap_run_log_transaction(r);
-- apr_brigade_destroy(tmp_bb);
-- goto traceout;
-- default:
-- apr_brigade_destroy(tmp_bb);
-- r = NULL;
-- goto traceout;
-+ break;
- }
-+ /* Not worth dying with. */
-+ conn->keepalive = AP_CONN_CLOSE;
-+ apr_pool_destroy(r->pool);
-+ goto ignore;
- }
-+ apr_brigade_cleanup(tmp_bb);
-
- /* We may have been in keep_alive_timeout mode, so toggle back
- * to the normal timeout mode as we fetch the header lines,
- * as necessary.
- */
-- csd = ap_get_conn_socket(conn);
-- apr_socket_timeout_get(csd, &cur_timeout);
-- if (cur_timeout != conn->base_server->timeout) {
-- apr_socket_timeout_set(csd, conn->base_server->timeout);
-- cur_timeout = conn->base_server->timeout;
-- }
-+ apply_server_config(r);
-
- if (!r->assbackwards) {
- const char *tenc;
-
- ap_get_mime_headers_core(r, tmp_bb);
-+ apr_brigade_cleanup(tmp_bb);
- if (r->status != HTTP_OK) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00567)
- "request failed: error reading the headers");
-- ap_send_error_response(r, 0);
-- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
-- ap_run_log_transaction(r);
-- apr_brigade_destroy(tmp_bb);
-- goto traceout;
-+ access_status = r->status;
-+ goto die_unusable_input;
- }
-
- tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
-@@ -1402,13 +1500,8 @@
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02539)
- "client sent unknown Transfer-Encoding "
- "(%s): %s", tenc, r->uri);
-- r->status = HTTP_BAD_REQUEST;
-- conn->keepalive = AP_CONN_CLOSE;
-- ap_send_error_response(r, 0);
-- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
-- ap_run_log_transaction(r);
-- apr_brigade_destroy(tmp_bb);
-- goto traceout;
-+ access_status = HTTP_BAD_REQUEST;
-+ goto die_unusable_input;
- }
-
- /* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23
-@@ -1421,88 +1514,81 @@
- }
- }
-
-- apr_brigade_destroy(tmp_bb);
--
-- /* update what we think the virtual host is based on the headers we've
-- * now read. may update status.
-- */
-- ap_update_vhost_from_headers(r);
-- access_status = r->status;
--
-- /* Toggle to the Host:-based vhost's timeout mode to fetch the
-- * request body and send the response body, if needed.
-- */
-- if (cur_timeout != r->server->timeout) {
-- apr_socket_timeout_set(csd, r->server->timeout);
-- cur_timeout = r->server->timeout;
-- }
--
-- /* we may have switched to another server */
-- r->per_dir_config = r->server->lookup_defaults;
--
-- if ((!r->hostname && (r->proto_num >= HTTP_VERSION(1, 1)))
-- || ((r->proto_num == HTTP_VERSION(1, 1))
-- && !apr_table_get(r->headers_in, "Host"))) {
-- /*
-- * Client sent us an HTTP/1.1 or later request without telling us the
-- * hostname, either with a full URL or a Host: header. We therefore
-- * need to (as per the 1.1 spec) send an error. As a special case,
-- * HTTP/1.1 mentions twice (S9, S14.23) that a request MUST contain
-- * a Host: header, and the server MUST respond with 400 if it doesn't.
-- */
-- access_status = HTTP_BAD_REQUEST;
-- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00569)
-- "client sent HTTP/1.1 request without hostname "
-- "(see RFC2616 section 14.23): %s", r->uri);
-- }
--
- /*
- * Add the HTTP_IN filter here to ensure that ap_discard_request_body
- * called by ap_die and by ap_send_error_response works correctly on
- * status codes that do not cause the connection to be dropped and
- * in situations where the connection should be kept alive.
- */
--
- ap_add_input_filter_handle(ap_http_input_filter_handle,
- NULL, r, r->connection);
-
-- if (access_status != HTTP_OK
-- || (access_status = ap_post_read_request(r))) {
-- ap_die(access_status, r);
-- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
-- ap_run_log_transaction(r);
-- r = NULL;
-- goto traceout;
-+ /* Validate Host/Expect headers and select vhost. */
-+ if (!ap_check_request_header(r)) {
-+ /* we may have switched to another server still */
-+ apply_server_config(r);
-+ access_status = r->status;
-+ goto die_before_hooks;
- }
-
-- if (((expect = apr_table_get(r->headers_in, "Expect")) != NULL)
-- && (expect[0] != '\0')) {
-- /*
-- * The Expect header field was added to HTTP/1.1 after RFC 2068
-- * as a means to signal when a 100 response is desired and,
-- * unfortunately, to signal a poor man's mandatory extension that
-- * the server must understand or return 417 Expectation Failed.
-- */
-- if (strcasecmp(expect, "100-continue") == 0) {
-- r->expecting_100 = 1;
-- }
-- else {
-- r->status = HTTP_EXPECTATION_FAILED;
-- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00570)
-- "client sent an unrecognized expectation value of "
-- "Expect: %s", expect);
-- ap_send_error_response(r, 0);
-- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
-- ap_run_log_transaction(r);
-- goto traceout;
-- }
-+ /* we may have switched to another server */
-+ apply_server_config(r);
-+
-+ if ((access_status = ap_run_post_read_request(r))) {
-+ goto die;
- }
-
-- AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, (char *)r->uri, (char *)r->server->defn_name, r->status);
-+ AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
-+ (char *)r->uri, (char *)r->server->defn_name,
-+ r->status);
-+
- return r;
-- traceout:
-+
-+ /* Everything falls through on failure */
-+
-+die_unusable_input:
-+ /* Input filters are in an undeterminate state, cleanup (including
-+ * CORE_IN's socket) such that any further attempt to read is EOF.
-+ */
-+ {
-+ ap_filter_t *f = conn->input_filters;
-+ while (f) {
-+ if (f->frec == ap_core_input_filter_handle) {
-+ core_net_rec *net = f->ctx;
-+ apr_brigade_cleanup(net->in_ctx->b);
-+ break;
-+ }
-+ ap_remove_input_filter(f);
-+ f = f->next;
-+ }
-+ conn->input_filters = r->input_filters = f;
-+ conn->keepalive = AP_CONN_CLOSE;
-+ }
-+
-+die_before_hooks:
-+ /* First call to ap_die() (non recursive) */
-+ r->status = HTTP_OK;
-+
-+die:
-+ ap_die(access_status, r);
-+
-+ /* ap_die() sent the response through the output filters, we must now
-+ * end the request with an EOR bucket for stream/pipeline accounting.
-+ */
-+ {
-+ apr_bucket_brigade *eor_bb;
-+ eor_bb = apr_brigade_create(conn->pool, conn->bucket_alloc);
-+ APR_BRIGADE_INSERT_TAIL(eor_bb,
-+ ap_bucket_eor_create(conn->bucket_alloc, r));
-+ ap_pass_brigade(conn->output_filters, eor_bb);
-+ apr_brigade_cleanup(eor_bb);
-+ }
-+
-+ignore:
-+ r = NULL;
-+
- AP_READ_REQUEST_FAILURE((uintptr_t)r);
-- return r;
-+ return NULL;
- }
-
- AP_DECLARE(int) ap_post_read_request(request_rec *r)
---- a/server/vhost.c
-+++ b/server/vhost.c
-@@ -34,6 +34,7 @@
- #include "http_vhost.h"
- #include "http_protocol.h"
- #include "http_core.h"
-+#include "http_main.h"
-
- #if APR_HAVE_ARPA_INET_H
- #include
-@@ -973,7 +974,13 @@
- }
-
-
--static void check_hostalias(request_rec *r)
-+/*
-+ * Updates r->server from ServerName/ServerAlias. Per the interaction
-+ * of ip and name-based vhosts, it only looks in the best match from the
-+ * connection-level ip-based matching.
-+ * Returns HTTP_BAD_REQUEST if there was no match.
-+ */
-+static int update_server_from_aliases(request_rec *r)
- {
- /*
- * Even if the request has a Host: header containing a port we ignore
-@@ -1050,11 +1057,18 @@
- goto found;
- }
-
-- return;
-+ if (!r->connection->vhost_lookup_data) {
-+ if (matches_aliases(r->server, host)) {
-+ s = r->server;
-+ goto found;
-+ }
-+ }
-+ return HTTP_BAD_REQUEST;
-
- found:
- /* s is the first matching server, we're done */
- r->server = s;
-+ return HTTP_OK;
- }
-
-
-@@ -1071,7 +1085,7 @@
- * This is in conjunction with the ServerPath code in http_core, so we
- * get the right host attached to a non- Host-sending request.
- *
-- * See the comment in check_hostalias about how each vhost can be
-+ * See the comment in update_server_from_aliases about how each vhost can be
- * listed multiple times.
- */
-
-@@ -1135,10 +1149,16 @@
-
- AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r)
- {
-+ ap_update_vhost_from_headers_ex(r, 0);
-+}
-+
-+AP_DECLARE(int) ap_update_vhost_from_headers_ex(request_rec *r, int require_match)
-+{
- core_server_config *conf = ap_get_core_module_config(r->server->module_config);
- const char *host_header = apr_table_get(r->headers_in, "Host");
- int is_v6literal = 0;
- int have_hostname_from_url = 0;
-+ int rc = HTTP_OK;
-
- if (r->hostname) {
- /*
-@@ -1151,8 +1171,8 @@
- else if (host_header != NULL) {
- is_v6literal = fix_hostname(r, host_header, conf->http_conformance);
- }
-- if (r->status != HTTP_OK)
-- return;
-+ if (!require_match && r->status != HTTP_OK)
-+ return HTTP_OK;
-
- if (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE) {
- /*
-@@ -1173,10 +1193,16 @@
- /* check if we tucked away a name_chain */
- if (r->connection->vhost_lookup_data) {
- if (r->hostname)
-- check_hostalias(r);
-+ rc = update_server_from_aliases(r);
- else
- check_serverpath(r);
- }
-+ else if (require_match && r->hostname) {
-+ /* check the base server config */
-+ rc = update_server_from_aliases(r);
-+ }
-+
-+ return rc;
- }
-
- /**
diff --git a/debian/patches/CVE-2021-34798.patch b/debian/patches/CVE-2021-34798.patch
deleted file mode 100644
index bd6261a..0000000
--- a/debian/patches/CVE-2021-34798.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-Description: Initialize the request fields on read failure to avoid NULLs
-Origin: upstream, https://github.com/apache/httpd/commit/74c097f0,
- https://github.com/apache/httpd/commit/6945bb2
-Bug: https://security-tracker.debian.org/tracker/CVE-2021-34798
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-09-21
-
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -643,6 +643,8 @@
- return end - field;
- }
-
-+static const char m_invalid_str[] = "-";
-+
- static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
- {
- enum {
-@@ -685,6 +687,11 @@
- if (rv != APR_SUCCESS) {
- r->request_time = apr_time_now();
-
-+ /* Fall through with an invalid (non NULL) request */
-+ r->method = m_invalid_str;
-+ r->method_number = M_INVALID;
-+ r->uri = r->unparsed_uri = apr_pstrdup(r->pool, "-");
-+
- /* ap_rgetline returns APR_ENOSPC if it fills up the
- * buffer before finding the end-of-line. This is only going to
- * happen if it exceeds the configured limit for a request-line.
-@@ -1330,7 +1337,7 @@
- "request failed: client's request-line exceeds LimitRequestLine (longer than %d)",
- r->server->limit_req_line);
- }
-- else if (r->method == NULL) {
-+ else if (r->method == m_invalid_str) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00566)
- "request failed: malformed request line");
- }
diff --git a/debian/patches/CVE-2021-36160-2.patch b/debian/patches/CVE-2021-36160-2.patch
deleted file mode 100644
index cad5774..0000000
--- a/debian/patches/CVE-2021-36160-2.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-Description: mod_proxy_uwsgi: Remove duplicate slashes at the beginning of PATH_INFO.
- Relaxes the behaviour introduced by the CVE-2021-36160 fix
-Author: Stefan Eissing
-Origin: upstream, https://github.com/apache/httpd/commit/8966e290a
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-12-21
-
---- a/modules/proxy/mod_proxy_uwsgi.c
-+++ b/modules/proxy/mod_proxy_uwsgi.c
-@@ -467,11 +467,20 @@
-
- /* ADD PATH_INFO (unescaped) */
- u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/');
-- if (!u_path_info || ap_unescape_url(u_path_info) != OK) {
-+ if (!u_path_info) {
-+ u_path_info = apr_pstrdup(r->pool, "/");
-+ }
-+ else if (ap_unescape_url(u_path_info) != OK) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100)
- "unable to decode uwsgi uri: %s", url);
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-+ else {
-+ /* Remove duplicate slashes at the beginning of PATH_INFO */
-+ while (u_path_info[1] == '/') {
-+ u_path_info++;
-+ }
-+ }
- apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info);
-
-
diff --git a/debian/patches/CVE-2021-36160.patch b/debian/patches/CVE-2021-36160.patch
deleted file mode 100644
index fcd8087..0000000
--- a/debian/patches/CVE-2021-36160.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-Description: mod_proxy_uwsgi: Fix PATH_INFO setting for generic worker
-Author: Yann Ylavic
-Origin: upstream, https://github.com/apache/httpd/commit/b364cad7
-Bug: https://security-tracker.debian.org/tracker/CVE-2021-36160
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-09-21
-
---- a/modules/proxy/mod_proxy_uwsgi.c
-+++ b/modules/proxy/mod_proxy_uwsgi.c
-@@ -452,11 +452,8 @@
- const char *proxyname, apr_port_t proxyport)
- {
- int status;
-- int delta = 0;
-- int decode_status;
- proxy_conn_rec *backend = NULL;
- apr_pool_t *p = r->pool;
-- size_t w_len;
- char server_portstr[32];
- char *u_path_info;
- apr_uri_t *uri;
-@@ -468,23 +465,14 @@
-
- uri = apr_palloc(r->pool, sizeof(*uri));
-
-- /* ADD PATH_INFO */
--#if AP_MODULE_MAGIC_AT_LEAST(20111130,0)
-- w_len = strlen(worker->s->name);
--#else
-- w_len = strlen(worker->name);
--#endif
-- u_path_info = r->filename + 6 + w_len;
-- if (u_path_info[0] != '/') {
-- delta = 1;
-- }
-- decode_status = ap_unescape_url(url + w_len - delta);
-- if (decode_status) {
-+ /* ADD PATH_INFO (unescaped) */
-+ u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/');
-+ if (!u_path_info || ap_unescape_url(u_path_info) != OK) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100)
-- "unable to decode uri: %s", url + w_len - delta);
-+ "unable to decode uwsgi uri: %s", url);
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-- apr_table_add(r->subprocess_env, "PATH_INFO", url + w_len - delta);
-+ apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info);
-
-
- /* Create space for state information */
diff --git a/debian/patches/CVE-2021-39275.patch b/debian/patches/CVE-2021-39275.patch
deleted file mode 100644
index d489891..0000000
--- a/debian/patches/CVE-2021-39275.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Description: Backport of
- From c69d4cc90c0e27703030b3ff09f91bf4dcbcfd51 Mon Sep 17 00:00:00 2001
- From: Stefan Eissing
- Date: Tue, 10 Aug 2021 08:55:54 +0000
- Subject: [PATCH] Merged r1892012 from trunk:
- .
- and
- From ac62c7e7436560cf4f7725ee586364ce95c07804 Mon Sep 17 00:00:00 2001
- From: Graham Leggett
- Date: Sat, 21 Aug 2021 21:35:04 +0000
- Subject: [PATCH] Backport:
-Author: Moritz Muehlenhoff
-Origin: upstream
-Forwarded: not-needed
-Last-Update: 2021-09-30
-
---- a/server/util.c
-+++ b/server/util.c
-@@ -2460,13 +2460,12 @@
- * in front of every " that doesn't already have one.
- */
- while (*inchr != '\0') {
-- if ((*inchr == '\\') && (inchr[1] != '\0')) {
-- *outchr++ = *inchr++;
-- *outchr++ = *inchr++;
-- }
- if (*inchr == '"') {
- *outchr++ = '\\';
- }
-+ if ((*inchr == '\\') && (inchr[1] != '\0')) {
-+ *outchr++ = *inchr++;
-+ }
- if (*inchr != '\0') {
- *outchr++ = *inchr++;
- }
diff --git a/debian/patches/CVE-2021-40438.patch b/debian/patches/CVE-2021-40438.patch
deleted file mode 100644
index 8cf60a7..0000000
--- a/debian/patches/CVE-2021-40438.patch
+++ /dev/null
@@ -1,124 +0,0 @@
-Description: Backport of the following patches:
-Origin: upstream,
- https://github.com/apache/httpd/commit/496c863776c68bd08cdbeb7d8fa5935ba63b76c2
- https://github.com/apache/httpd/commit/d4901cb32133bc0e59ad193a29d1665597080d67
- https://github.com/apache/httpd/commit/81a8b0133b46c4cf7dfc4b5476ad46eb34aa0a5c
- https://github.com/apache/httpd/commit/6e768a811c59ca6a0769b72681aaef381823339f
-Forwarded: not-needed
-Reviewed-By: Moritz Muehlenhoff
-Last-Update: 2021-09-30
-
---- a/modules/mappers/mod_rewrite.c
-+++ b/modules/mappers/mod_rewrite.c
-@@ -620,6 +620,13 @@
- return 6;
- }
- break;
-+
-+ case 'u':
-+ case 'U':
-+ if (!ap_cstr_casecmpn(uri, "nix:", 4)) { /* unix: */
-+ *sqs = 1;
-+ return (uri[4] == '/' && uri[5] == '/') ? 7 : 5;
-+ }
- }
-
- return 0;
---- a/modules/proxy/mod_proxy.c
-+++ b/modules/proxy/mod_proxy.c
-@@ -1690,7 +1690,7 @@
- * the UDS path... ignore it
- */
- if (!strncasecmp(url, "unix:", 5) &&
-- ((ptr = ap_strchr_c(url, '|')) != NULL)) {
-+ ((ptr = ap_strchr_c(url + 5, '|')) != NULL)) {
- /* move past the 'unix:...|' UDS path info */
- const char *ret, *c;
-
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -2077,33 +2077,43 @@
- * were passed a UDS url (eg: from mod_proxy) and adjust uds_path
- * as required.
- */
--static void fix_uds_filename(request_rec *r, char **url)
-+static int fix_uds_filename(request_rec *r, char **url)
- {
-- char *ptr, *ptr2;
-- if (!r || !r->filename) return;
-+ char *uds_url = r->filename + 6, *origin_url;
-
- if (!strncmp(r->filename, "proxy:", 6) &&
-- (ptr2 = ap_strcasestr(r->filename, "unix:")) &&
-- (ptr = ap_strchr(ptr2, '|'))) {
-+ !ap_cstr_casecmpn(uds_url, "unix:", 5) &&
-+ (origin_url = ap_strchr(uds_url + 5, '|'))) {
-+ char *uds_path = NULL;
-+ apr_size_t url_len;
- apr_uri_t urisock;
- apr_status_t rv;
-- *ptr = '\0';
-- rv = apr_uri_parse(r->pool, ptr2, &urisock);
-- if (rv == APR_SUCCESS) {
-- char *rurl = ptr+1;
-- char *sockpath = ap_runtime_dir_relative(r->pool, urisock.path);
-- apr_table_setn(r->notes, "uds_path", sockpath);
-- *url = apr_pstrdup(r->pool, rurl); /* so we get the scheme for the uds */
-- /* r->filename starts w/ "proxy:", so add after that */
-- memmove(r->filename+6, rurl, strlen(rurl)+1);
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-- "*: rewrite of url due to UDS(%s): %s (%s)",
-- sockpath, *url, r->filename);
-+
-+ *origin_url = '\0';
-+ rv = apr_uri_parse(r->pool, uds_url, &urisock);
-+ *origin_url++ = '|';
-+
-+ if (rv == APR_SUCCESS && urisock.path && (!urisock.hostname
-+ || !urisock.hostname[0])) {
-+ uds_path = ap_runtime_dir_relative(r->pool, urisock.path);
- }
-- else {
-- *ptr = '|';
-+ if (!uds_path) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10292)
-+ "Invalid proxy UDS filename (%s)", r->filename);
-+ return 0;
- }
-+ apr_table_setn(r->notes, "uds_path", uds_path);
-+
-+ /* Remove the UDS path from *url and r->filename */
-+ url_len = strlen(origin_url);
-+ *url = apr_pstrmemdup(r->pool, origin_url, url_len);
-+ memcpy(uds_url, *url, url_len + 1);
-+
-+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-+ "*: rewrite of url due to UDS(%s): %s (%s)",
-+ uds_path, *url, r->filename);
- }
-+ return 1;
- }
-
- PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
-@@ -2121,7 +2131,9 @@
- "%s: found worker %s for %s",
- (*worker)->s->scheme, (*worker)->s->name, *url);
- *balancer = NULL;
-- fix_uds_filename(r, url);
-+ if (!fix_uds_filename(r, url)) {
-+ return HTTP_INTERNAL_SERVER_ERROR;
-+ }
- access_status = OK;
- }
- else if (r->proxyreq == PROXYREQ_PROXY) {
-@@ -2152,7 +2164,9 @@
- * regarding the Connection header in the request.
- */
- apr_table_setn(r->subprocess_env, "proxy-nokeepalive", "1");
-- fix_uds_filename(r, url);
-+ if (!fix_uds_filename(r, url)) {
-+ return HTTP_INTERNAL_SERVER_ERROR;
-+ }
- }
- }
- }
diff --git a/debian/patches/CVE-2021-44224-1.patch b/debian/patches/CVE-2021-44224-1.patch
deleted file mode 100644
index 0f540c8..0000000
--- a/debian/patches/CVE-2021-44224-1.patch
+++ /dev/null
@@ -1,206 +0,0 @@
-Description: CVE-2021-44224
-Author: Yann Ylavic
-Origin: upstream, https://github.com/apache/httpd/commit/a962ba73
-Bug: https://security-tracker.debian.org/tracker/CVE-2021-44224
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-12-21
-
---- a/include/http_protocol.h
-+++ b/include/http_protocol.h
-@@ -75,6 +75,13 @@
- AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r,
- apr_bucket_brigade *bb);
-
-+/**
-+ * Run post_read_request hook and validate.
-+ * @param r The current request
-+ * @return OK or HTTP_...
-+ */
-+AP_DECLARE(int) ap_post_read_request(request_rec *r);
-+
- /* Finish up stuff after a request */
-
- /**
---- a/modules/http/http_request.c
-+++ b/modules/http/http_request.c
-@@ -681,7 +681,7 @@
- * to do their thing on internal redirects as well. Perhaps this is a
- * misnamed function.
- */
-- if ((access_status = ap_run_post_read_request(new))) {
-+ if ((access_status = ap_post_read_request(new))) {
- ap_die(access_status, new);
- return NULL;
- }
---- a/modules/http2/h2_request.c
-+++ b/modules/http2/h2_request.c
-@@ -337,7 +337,7 @@
- NULL, r, r->connection);
-
- if (access_status != HTTP_OK
-- || (access_status = ap_run_post_read_request(r))) {
-+ || (access_status = ap_post_read_request(r))) {
- /* Request check post hooks failed. An example of this would be a
- * request for a vhost where h2 is disabled --> 421.
- */
---- a/modules/proxy/mod_proxy.c
-+++ b/modules/proxy/mod_proxy.c
-@@ -576,13 +576,13 @@
-
- /* Ick... msvc (perhaps others) promotes ternary short results to int */
-
-- if (conf->req && r->parsed_uri.scheme) {
-+ if (conf->req && r->parsed_uri.scheme && r->parsed_uri.hostname) {
- /* but it might be something vhosted */
-- if (!(r->parsed_uri.hostname
-- && !strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r))
-- && ap_matches_request_vhost(r, r->parsed_uri.hostname,
-- (apr_port_t)(r->parsed_uri.port_str ? r->parsed_uri.port
-- : ap_default_port(r))))) {
-+ if (strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
-+ || !ap_matches_request_vhost(r, r->parsed_uri.hostname,
-+ (apr_port_t)(r->parsed_uri.port_str
-+ ? r->parsed_uri.port
-+ : ap_default_port(r)))) {
- r->proxyreq = PROXYREQ_PROXY;
- r->uri = r->unparsed_uri;
- r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL);
-@@ -1722,6 +1722,7 @@
- struct proxy_alias *new;
- char *f = cmd->path;
- char *r = NULL;
-+ const char *real;
- char *word;
- apr_table_t *params = apr_table_make(cmd->pool, 5);
- const apr_array_header_t *arr;
-@@ -1787,6 +1788,10 @@
- if (r == NULL) {
- return "ProxyPass|ProxyPassMatch needs a path when not defined in a location";
- }
-+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, r))) {
-+ return "ProxyPass|ProxyPassMatch uses an invalid \"unix:\" URL";
-+ }
-+
-
- /* if per directory, save away the single alias */
- if (cmd->path) {
-@@ -1803,7 +1808,7 @@
- }
-
- new->fake = apr_pstrdup(cmd->pool, f);
-- new->real = apr_pstrdup(cmd->pool, ap_proxy_de_socketfy(cmd->pool, r));
-+ new->real = apr_pstrdup(cmd->pool, real);
- new->flags = flags;
- if (use_regex) {
- new->regex = ap_pregcomp(cmd->pool, f, AP_REG_EXTENDED);
-@@ -2280,6 +2285,7 @@
- proxy_worker *worker;
- char *path = cmd->path;
- char *name = NULL;
-+ const char *real;
- char *word;
- apr_table_t *params = apr_table_make(cmd->pool, 5);
- const apr_array_header_t *arr;
-@@ -2320,6 +2326,9 @@
- return "BalancerMember must define balancer name when outside section";
- if (!name)
- return "BalancerMember must define remote proxy server";
-+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) {
-+ return "BalancerMember uses an invalid \"unix:\" URL";
-+ }
-
- ap_str_tolower(path); /* lowercase scheme://hostname */
-
-@@ -2332,7 +2341,7 @@
- }
-
- /* Try to find existing worker */
-- worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, ap_proxy_de_socketfy(cmd->temp_pool, name));
-+ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, real);
- if (!worker) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01147)
- "Defining worker '%s' for balancer '%s'",
-@@ -2421,7 +2430,14 @@
- }
- }
- else {
-- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->temp_pool, name));
-+ const char *real;
-+
-+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) {
-+ return "ProxySet uses an invalid \"unix:\" URL";
-+ }
-+
-+ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf,
-+ real);
- if (!worker) {
- if (in_proxy_section) {
- err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
-@@ -2563,8 +2579,14 @@
- }
- }
- else {
-+ const char *real;
-+
-+ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, conf->p))) {
-+ return " uses an invalid \"unix:\" URL";
-+ }
-+
- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf,
-- ap_proxy_de_socketfy(cmd->temp_pool, (char*)conf->p));
-+ real);
- if (!worker) {
- err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
- sconf, conf->p, 0);
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -1662,6 +1662,9 @@
- }
-
- url = ap_proxy_de_socketfy(p, url);
-+ if (!url) {
-+ return NULL;
-+ }
-
- c = ap_strchr_c(url, ':');
- if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -1465,7 +1465,7 @@
- NULL, r, r->connection);
-
- if (access_status != HTTP_OK
-- || (access_status = ap_run_post_read_request(r))) {
-+ || (access_status = ap_post_read_request(r))) {
- ap_die(access_status, r);
- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
- ap_run_log_transaction(r);
-@@ -1503,6 +1503,27 @@
- return r;
- }
-
-+AP_DECLARE(int) ap_post_read_request(request_rec *r)
-+{
-+ int status;
-+
-+ if ((status = ap_run_post_read_request(r))) {
-+ return status;
-+ }
-+
-+ /* Enforce http(s) only scheme for non-forward-proxy requests */
-+ if (!r->proxyreq
-+ && r->parsed_uri.scheme
-+ && (ap_cstr_casecmpn(r->parsed_uri.scheme, "http", 4) != 0
-+ || (r->parsed_uri.scheme[4] != '\0'
-+ && (apr_tolower(r->parsed_uri.scheme[4]) != 's'
-+ || r->parsed_uri.scheme[5] != '\0')))) {
-+ return HTTP_BAD_REQUEST;
-+ }
-+
-+ return OK;
-+}
-+
- /* if a request with a body creates a subrequest, remove original request's
- * input headers which pertain to the body which has already been read.
- * out-of-line helper function for ap_set_sub_req_protocol.
diff --git a/debian/patches/CVE-2021-44224-2.patch b/debian/patches/CVE-2021-44224-2.patch
deleted file mode 100644
index 6b841dd..0000000
--- a/debian/patches/CVE-2021-44224-2.patch
+++ /dev/null
@@ -1,93 +0,0 @@
-Description: mod_proxy: Don't prevent forwarding URIs w/ no hostname.
- (fix for r1895955 already in 2.4.x)
- .
- Part not applied:
- #--- a/modules/proxy/mod_proxy.h
- #+++ b/modules/proxy/mod_proxy.h
- #@@ -323,6 +323,8 @@
- # #define PROXY_WORKER_HC_FAIL_FLAG 'C'
- # #define PROXY_WORKER_HOT_SPARE_FLAG 'R'
- #
- #+#define AP_PROXY_WORKER_NO_UDS (1u << 3)
- #+
- # #define PROXY_WORKER_NOT_USABLE_BITMAP ( PROXY_WORKER_IN_SHUTDOWN | \
- # PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR | \
- # PROXY_WORKER_HC_FAIL )
- #--- a/modules/proxy/proxy_util.c
- #+++ b/modules/proxy/proxy_util.c
- #@@ -1661,9 +1661,11 @@
- # return NULL;
- # }
- #
- #- url = ap_proxy_de_socketfy(p, url);
- #- if (!url) {
- #- return NULL;
- #+ if (!(mask & AP_PROXY_WORKER_NO_UDS)) {
- #+ url = ap_proxy_de_socketfy(p, url);
- #+ if (!url) {
- #+ return NULL;
- #+ }
- # }
- #
- # c = ap_strchr_c(url, ':');
-Author: Stefan Eissing
-Origin: upstream, https://github.com/apache/httpd/commit/a0521d289
-Bug: https://security-tracker.debian.org/tracker/CVE-2021-44224
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-12-21
-
---- a/modules/proxy/mod_proxy.c
-+++ b/modules/proxy/mod_proxy.c
-@@ -576,9 +576,10 @@
-
- /* Ick... msvc (perhaps others) promotes ternary short results to int */
-
-- if (conf->req && r->parsed_uri.scheme && r->parsed_uri.hostname) {
-+ if (conf->req && r->parsed_uri.scheme) {
- /* but it might be something vhosted */
-- if (strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
-+ if (!r->parsed_uri.hostname
-+ || strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0
- || !ap_matches_request_vhost(r, r->parsed_uri.hostname,
- (apr_port_t)(r->parsed_uri.port_str
- ? r->parsed_uri.port
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -2128,22 +2128,21 @@
-
- access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
- if (access_status == DECLINED && *balancer == NULL) {
-+ const int forward = (r->proxyreq == PROXYREQ_PROXY);
- *worker = ap_proxy_get_worker(r->pool, NULL, conf, *url);
- if (*worker) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "%s: found worker %s for %s",
- (*worker)->s->scheme, (*worker)->s->name, *url);
-- *balancer = NULL;
-- if (!fix_uds_filename(r, url)) {
-+ if (!forward && !fix_uds_filename(r, url)) {
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- access_status = OK;
- }
-- else if (r->proxyreq == PROXYREQ_PROXY) {
-+ else if (forward) {
- if (conf->forward) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "*: found forward proxy worker for %s", *url);
-- *balancer = NULL;
- *worker = conf->forward;
- access_status = OK;
- /*
-@@ -2157,8 +2156,8 @@
- else if (r->proxyreq == PROXYREQ_REVERSE) {
- if (conf->reverse) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-- "*: using default reverse proxy worker for %s (no keepalive)", *url);
-- *balancer = NULL;
-+ "*: using default reverse proxy worker for %s "
-+ "(no keepalive)", *url);
- *worker = conf->reverse;
- access_status = OK;
- /*
diff --git a/debian/patches/CVE-2021-44790.patch b/debian/patches/CVE-2021-44790.patch
deleted file mode 100644
index dbba745..0000000
--- a/debian/patches/CVE-2021-44790.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-Description: Improve error handling
-Author: Stefan Eissing
-Origin: upstream, https://github.com/apache/httpd/commit/07b9768c
-Bug: https://security-tracker.debian.org/tracker/CVE-2021-44790
-Forwarded: not-needed
-Reviewed-By: Yadd
-Last-Update: 2021-12-21
-
---- a/modules/lua/lua_request.c
-+++ b/modules/lua/lua_request.c
-@@ -376,6 +376,7 @@
- if (end == NULL) break;
- key = (char *) apr_pcalloc(r->pool, 256);
- filename = (char *) apr_pcalloc(r->pool, 256);
-+ if (end - crlf <= 8) break;
- vlen = end - crlf - 8;
- buffer = (char *) apr_pcalloc(r->pool, vlen+1);
- memcpy(buffer, crlf + 4, vlen);
diff --git a/debian/patches/CVE-2022-22719.patch b/debian/patches/CVE-2022-22719.patch
deleted file mode 100644
index c52ceef..0000000
--- a/debian/patches/CVE-2022-22719.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-From 1b96582269d9ec7c82ee0fea1f67934e4b8176ad Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Mon, 7 Mar 2022 14:51:19 +0000
-Subject: [PATCH] mod_lua: Error out if lua_read_body() or lua_write_body()
- fail.
-
-Otherwise r:requestbody() or r:parsebody() failures might go unnoticed for
-the user.
-
-
-Merge r1898689 from trunk.
-Submitted by: rpluem
-Reviewed by: rpluem, covener, ylavic
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898694 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/lua/lua_request.c | 33 ++++++++++++++++++++-------------
- 1 file changed, 20 insertions(+), 13 deletions(-)
-
-diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c
-index 493b2bb431..1eab7b6a47 100644
---- a/modules/lua/lua_request.c
-+++ b/modules/lua/lua_request.c
-@@ -235,14 +235,16 @@ static int lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size,
- {
- int rc = OK;
-
-+ *rbuf = NULL;
-+ *size = 0;
-+
- if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR))) {
- return (rc);
- }
- if (ap_should_client_block(r)) {
-
- /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
-- char argsbuffer[HUGE_STRING_LEN];
-- apr_off_t rsize, len_read, rpos = 0;
-+ apr_off_t len_read, rpos = 0;
- apr_off_t length = r->remaining;
- /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
-
-@@ -250,18 +252,18 @@ static int lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size,
- return APR_EINCOMPLETE; /* Only room for incomplete data chunk :( */
- }
- *rbuf = (const char *) apr_pcalloc(r->pool, (apr_size_t) (length + 1));
-- *size = length;
-- while ((len_read = ap_get_client_block(r, argsbuffer, sizeof(argsbuffer))) > 0) {
-- if ((rpos + len_read) > length) {
-- rsize = length - rpos;
-- }
-- else {
-- rsize = len_read;
-- }
--
-- memcpy((char *) *rbuf + rpos, argsbuffer, (size_t) rsize);
-- rpos += rsize;
-+ while ((rpos < length)
-+ && (len_read = ap_get_client_block(r, (char *) *rbuf + rpos,
-+ length - rpos)) > 0) {
-+ rpos += len_read;
-+ }
-+ if (len_read < 0) {
-+ return APR_EINCOMPLETE;
- }
-+ *size = rpos;
-+ }
-+ else {
-+ rc = DONE;
- }
-
- return (rc);
-@@ -278,6 +280,8 @@ static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t *
- {
- apr_status_t rc = OK;
-
-+ *size = 0;
-+
- if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)))
- return rc;
- if (ap_should_client_block(r)) {
-@@ -303,6 +307,9 @@ static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t *
- rpos += rsize;
- }
- }
-+ else {
-+ rc = DONE;
-+ }
-
- return rc;
- }
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-22720.patch b/debian/patches/CVE-2022-22720.patch
deleted file mode 100644
index a296824..0000000
--- a/debian/patches/CVE-2022-22720.patch
+++ /dev/null
@@ -1,190 +0,0 @@
-From 19aa2d83b379719420f3a178413325156d7a62f3 Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Mon, 7 Mar 2022 14:46:08 +0000
-Subject: [PATCH] core: Simpler connection close logic if discarding the
- request body fails.
-
-If ap_discard_request_body() sets AP_CONN_CLOSE by itself it simplifies and
-allows to consolidate end_output_stream() and error_output_stream().
-
-
-Merge r1898683 from trunk.
-Submitted by: ylavic, rpluem
-Reviewed by: ylavic, rpluem, covener
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898692 13f79535-47bb-0310-9956-ffa450edef68
----
- changes-entries/discard_body.diff | 2 +
- modules/http/http_filters.c | 69 ++++++++++++++++---------------
- server/protocol.c | 14 +++++--
- 3 files changed, 48 insertions(+), 37 deletions(-)
- create mode 100644 changes-entries/discard_body.diff
-
-diff --git a/changes-entries/discard_body.diff b/changes-entries/discard_body.diff
-new file mode 100644
-index 0000000000..6b467ac5ee
---- /dev/null
-+++ b/changes-entries/discard_body.diff
-@@ -0,0 +1,2 @@
-+ *) core: Simpler connection close logic if discarding the request body fails.
-+ [Yann Ylavic, Ruediger Pluem]
-\ No newline at end of file
-diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
-index d9b3621215..43e8c6dd5d 100644
---- a/modules/http/http_filters.c
-+++ b/modules/http/http_filters.c
-@@ -1598,9 +1598,9 @@ AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status)
- */
- AP_DECLARE(int) ap_discard_request_body(request_rec *r)
- {
-+ int rc = OK;
-+ conn_rec *c = r->connection;
- apr_bucket_brigade *bb;
-- int seen_eos;
-- apr_status_t rv;
-
- /* Sometimes we'll get in a state where the input handling has
- * detected an error where we want to drop the connection, so if
-@@ -1609,54 +1609,57 @@ AP_DECLARE(int) ap_discard_request_body(request_rec *r)
- *
- * This function is also a no-op on a subrequest.
- */
-- if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
-- ap_status_drops_connection(r->status)) {
-+ if (r->main || c->keepalive == AP_CONN_CLOSE) {
-+ return OK;
-+ }
-+ if (ap_status_drops_connection(r->status)) {
-+ c->keepalive = AP_CONN_CLOSE;
- return OK;
- }
-
- bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
-- seen_eos = 0;
-- do {
-- apr_bucket *bucket;
-+ for (;;) {
-+ apr_status_t rv;
-
- rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
- APR_BLOCK_READ, HUGE_STRING_LEN);
--
- if (rv != APR_SUCCESS) {
-- apr_brigade_destroy(bb);
-- return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
-+ rc = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
-+ goto cleanup;
- }
-
-- for (bucket = APR_BRIGADE_FIRST(bb);
-- bucket != APR_BRIGADE_SENTINEL(bb);
-- bucket = APR_BUCKET_NEXT(bucket))
-- {
-- const char *data;
-- apr_size_t len;
-+ while (!APR_BRIGADE_EMPTY(bb)) {
-+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
-
-- if (APR_BUCKET_IS_EOS(bucket)) {
-- seen_eos = 1;
-- break;
-- }
--
-- /* These are metadata buckets. */
-- if (bucket->length == 0) {
-- continue;
-+ if (APR_BUCKET_IS_EOS(b)) {
-+ goto cleanup;
- }
-
-- /* We MUST read because in case we have an unknown-length
-- * bucket or one that morphs, we want to exhaust it.
-+ /* There is no need to read empty or metadata buckets or
-+ * buckets of known length, but we MUST read buckets of
-+ * unknown length in order to exhaust them.
- */
-- rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
-- if (rv != APR_SUCCESS) {
-- apr_brigade_destroy(bb);
-- return HTTP_BAD_REQUEST;
-+ if (b->length == (apr_size_t)-1) {
-+ apr_size_t len;
-+ const char *data;
-+
-+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ rc = HTTP_BAD_REQUEST;
-+ goto cleanup;
-+ }
- }
-+
-+ apr_bucket_delete(b);
- }
-- apr_brigade_cleanup(bb);
-- } while (!seen_eos);
-+ }
-
-- return OK;
-+cleanup:
-+ apr_brigade_cleanup(bb);
-+ if (rc != OK) {
-+ c->keepalive = AP_CONN_CLOSE;
-+ }
-+ return rc;
- }
-
- /* Here we deal with getting the request message body from the client.
-diff --git a/server/protocol.c b/server/protocol.c
-index 2214f72b5a..298f61e1fb 100644
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -1687,23 +1687,29 @@ AP_DECLARE(void) ap_set_sub_req_protocol(request_rec *rnew,
- rnew->main = (request_rec *) r;
- }
-
--static void end_output_stream(request_rec *r)
-+static void end_output_stream(request_rec *r, int status)
- {
- conn_rec *c = r->connection;
- apr_bucket_brigade *bb;
- apr_bucket *b;
-
- bb = apr_brigade_create(r->pool, c->bucket_alloc);
-+ if (status != OK) {
-+ b = ap_bucket_error_create(status, NULL, r->pool, c->bucket_alloc);
-+ APR_BRIGADE_INSERT_TAIL(bb, b);
-+ }
- b = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, b);
-+
- ap_pass_brigade(r->output_filters, bb);
-+ apr_brigade_cleanup(bb);
- }
-
- AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub)
- {
- /* tell the filter chain there is no more content coming */
- if (!sub->eos_sent) {
-- end_output_stream(sub);
-+ end_output_stream(sub, OK);
- }
- }
-
-@@ -1714,11 +1720,11 @@ AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub)
- */
- AP_DECLARE(void) ap_finalize_request_protocol(request_rec *r)
- {
-- (void) ap_discard_request_body(r);
-+ int status = ap_discard_request_body(r);
-
- /* tell the filter chain there is no more content coming */
- if (!r->eos_sent) {
-- end_output_stream(r);
-+ end_output_stream(r, status);
- }
- }
-
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-22721.patch b/debian/patches/CVE-2022-22721.patch
deleted file mode 100644
index 2f607aa..0000000
--- a/debian/patches/CVE-2022-22721.patch
+++ /dev/null
@@ -1,116 +0,0 @@
-From 5a72f0fe6f2f8ce35c45242e99a421dc19251ab5 Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Mon, 7 Mar 2022 14:48:54 +0000
-Subject: [PATCH] core: Make sure and check that LimitXMLRequestBody fits in
- system memory.
-
-LimitXMLRequestBody can not exceed the size needed to ap_escape_html2() the
-body without failing to allocate memory, so enforce this at load time based
-on APR_SIZE_MAX, and make sure that ap_escape_html2() is within the bounds.
-
-Document the limits for LimitXMLRequestBody in our docs.
-
-
-Merge r1898686 from trunk.
-Submitted by: ylavic, rpluem
-Reviewed by: ylavic, covener, rpluem
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898693 13f79535-47bb-0310-9956-ffa450edef68
----
- changes-entries/AP_MAX_LIMIT_XML_BODY.diff | 2 ++
- docs/manual/mod/core.xml | 12 +++++++++---
- server/core.c | 9 +++++++++
- server/util.c | 8 ++++++--
- server/util_xml.c | 2 +-
- 5 files changed, 27 insertions(+), 6 deletions(-)
- create mode 100644 changes-entries/AP_MAX_LIMIT_XML_BODY.diff
-
-diff --git a/changes-entries/AP_MAX_LIMIT_XML_BODY.diff b/changes-entries/AP_MAX_LIMIT_XML_BODY.diff
-new file mode 100644
-index 0000000000..07fef3c624
---- /dev/null
-+++ b/changes-entries/AP_MAX_LIMIT_XML_BODY.diff
-@@ -0,0 +1,2 @@
-+ *) core: Make sure and check that LimitXMLRequestBody fits in system memory.
-+ [Ruediger Pluem, Yann Ylavic]
-\ No newline at end of file
-diff --git a/server/core.c b/server/core.c
-index 798212b480..090e397642 100644
---- a/server/core.c
-+++ b/server/core.c
-@@ -72,6 +72,8 @@
- /* LimitXMLRequestBody handling */
- #define AP_LIMIT_UNSET ((long) -1)
- #define AP_DEFAULT_LIMIT_XML_BODY ((apr_size_t)1000000)
-+/* Hard limit for ap_escape_html2() */
-+#define AP_MAX_LIMIT_XML_BODY ((apr_size_t)(APR_SIZE_MAX / 6 - 1))
-
- #define AP_MIN_SENDFILE_BYTES (256)
-
-@@ -3761,6 +3763,11 @@ static const char *set_limit_xml_req_body(cmd_parms *cmd, void *conf_,
- if (conf->limit_xml_body < 0)
- return "LimitXMLRequestBody requires a non-negative integer.";
-
-+ /* zero is AP_MAX_LIMIT_XML_BODY (implicitly) */
-+ if ((apr_size_t)conf->limit_xml_body > AP_MAX_LIMIT_XML_BODY)
-+ return apr_psprintf(cmd->pool, "LimitXMLRequestBody must not exceed "
-+ "%" APR_SIZE_T_FMT, AP_MAX_LIMIT_XML_BODY);
-+
- return NULL;
- }
-
-@@ -3849,6 +3856,8 @@ AP_DECLARE(apr_size_t) ap_get_limit_xml_body(const request_rec *r)
- conf = ap_get_core_module_config(r->per_dir_config);
- if (conf->limit_xml_body == AP_LIMIT_UNSET)
- return AP_DEFAULT_LIMIT_XML_BODY;
-+ if (conf->limit_xml_body == 0)
-+ return AP_MAX_LIMIT_XML_BODY;
-
- return (apr_size_t)conf->limit_xml_body;
- }
-diff --git a/server/util.c b/server/util.c
-index 6cfe0035c4..604be1a1ce 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -2142,11 +2142,14 @@ AP_DECLARE(char *) ap_escape_urlencoded(apr_pool_t *p, const char *buffer)
-
- AP_DECLARE(char *) ap_escape_html2(apr_pool_t *p, const char *s, int toasc)
- {
-- int i, j;
-+ apr_size_t i, j;
- char *x;
-
- /* first, count the number of extra characters */
-- for (i = 0, j = 0; s[i] != '\0'; i++)
-+ for (i = 0, j = 0; s[i] != '\0'; i++) {
-+ if (i + j > APR_SIZE_MAX - 6) {
-+ abort();
-+ }
- if (s[i] == '<' || s[i] == '>')
- j += 3;
- else if (s[i] == '&')
-@@ -2155,6 +2158,7 @@ AP_DECLARE(char *) ap_escape_html2(apr_pool_t *p, const char *s, int toasc)
- j += 5;
- else if (toasc && !apr_isascii(s[i]))
- j += 5;
-+ }
-
- if (j == 0)
- return apr_pstrmemdup(p, s, i);
-diff --git a/server/util_xml.c b/server/util_xml.c
-index 4845194656..22806fa8a4 100644
---- a/server/util_xml.c
-+++ b/server/util_xml.c
-@@ -85,7 +85,7 @@ AP_DECLARE(int) ap_xml_parse_input(request_rec * r, apr_xml_doc **pdoc)
- }
-
- total_read += len;
-- if (limit_xml_body && total_read > limit_xml_body) {
-+ if (total_read > limit_xml_body) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00539)
- "XML request body is larger than the configured "
- "limit of %lu", (unsigned long)limit_xml_body);
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-23943-1.patch b/debian/patches/CVE-2022-23943-1.patch
deleted file mode 100644
index d82fd1d..0000000
--- a/debian/patches/CVE-2022-23943-1.patch
+++ /dev/null
@@ -1,360 +0,0 @@
-From 943f57b336f264d77e5b780c82ab73daf3d14deb Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Mon, 7 Mar 2022 14:52:42 +0000
-Subject: [PATCH] mod_sed: use size_t to allow for larger buffer sizes and
- unsigned arithmetics.
-
-Let's switch to apr_size_t buffers and get rid of the ints.
-
-
-Merge r1898690 from trunk.
-Submitted by: rpluem
-Reviewed by: rpluem, covener, ylavic
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898695 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/filters/libsed.h | 12 +++---
- modules/filters/mod_sed.c | 10 ++---
- modules/filters/sed1.c | 79 +++++++++++++++++++++++----------------
- 3 files changed, 58 insertions(+), 43 deletions(-)
-
-diff --git a/modules/filters/libsed.h b/modules/filters/libsed.h
-index 76cbc0ce8a..0256b1ea83 100644
---- a/modules/filters/libsed.h
-+++ b/modules/filters/libsed.h
-@@ -60,7 +60,7 @@ struct sed_label_s {
- };
-
- typedef apr_status_t (sed_err_fn_t)(void *data, const char *error);
--typedef apr_status_t (sed_write_fn_t)(void *ctx, char *buf, int sz);
-+typedef apr_status_t (sed_write_fn_t)(void *ctx, char *buf, apr_size_t sz);
-
- typedef struct sed_commands_s sed_commands_t;
- #define NWFILES 11 /* 10 plus one for standard output */
-@@ -69,7 +69,7 @@ struct sed_commands_s {
- sed_err_fn_t *errfn;
- void *data;
-
-- unsigned lsize;
-+ apr_size_t lsize;
- char *linebuf;
- char *lbend;
- const char *saveq;
-@@ -116,15 +116,15 @@ struct sed_eval_s {
- apr_int64_t lnum;
- void *fout;
-
-- unsigned lsize;
-+ apr_size_t lsize;
- char *linebuf;
- char *lspend;
-
-- unsigned hsize;
-+ apr_size_t hsize;
- char *holdbuf;
- char *hspend;
-
-- unsigned gsize;
-+ apr_size_t gsize;
- char *genbuf;
- char *lcomend;
-
-@@ -160,7 +160,7 @@ apr_status_t sed_init_eval(sed_eval_t *eval, sed_commands_t *commands,
- sed_err_fn_t *errfn, void *data,
- sed_write_fn_t *writefn, apr_pool_t *p);
- apr_status_t sed_reset_eval(sed_eval_t *eval, sed_commands_t *commands, sed_err_fn_t *errfn, void *data);
--apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void *fout);
-+apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz, void *fout);
- apr_status_t sed_eval_file(sed_eval_t *eval, apr_file_t *fin, void *fout);
- apr_status_t sed_finalize_eval(sed_eval_t *eval, void *f);
- void sed_destroy_eval(sed_eval_t *eval);
-diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
-index 9b408029a8..7092dd5e7f 100644
---- a/modules/filters/mod_sed.c
-+++ b/modules/filters/mod_sed.c
-@@ -51,7 +51,7 @@ typedef struct sed_filter_ctxt
- apr_bucket_brigade *bbinp;
- char *outbuf;
- char *curoutbuf;
-- int bufsize;
-+ apr_size_t bufsize;
- apr_pool_t *tpool;
- int numbuckets;
- } sed_filter_ctxt;
-@@ -100,7 +100,7 @@ static void alloc_outbuf(sed_filter_ctxt* ctx)
- /* append_bucket
- * Allocate a new bucket from buf and sz and append to ctx->bb
- */
--static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, int sz)
-+static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, apr_size_t sz)
- {
- apr_status_t status = APR_SUCCESS;
- apr_bucket *b;
-@@ -133,7 +133,7 @@ static apr_status_t append_bucket(sed_filter_ctxt* ctx, char* buf, int sz)
- */
- static apr_status_t flush_output_buffer(sed_filter_ctxt *ctx)
- {
-- int size = ctx->curoutbuf - ctx->outbuf;
-+ apr_size_t size = ctx->curoutbuf - ctx->outbuf;
- char *out;
- apr_status_t status = APR_SUCCESS;
- if ((ctx->outbuf == NULL) || (size <=0))
-@@ -147,12 +147,12 @@ static apr_status_t flush_output_buffer(sed_filter_ctxt *ctx)
- /* This is a call back function. When libsed wants to generate the output,
- * this function will be invoked.
- */
--static apr_status_t sed_write_output(void *dummy, char *buf, int sz)
-+static apr_status_t sed_write_output(void *dummy, char *buf, apr_size_t sz)
- {
- /* dummy is basically filter context. Context is passed during invocation
- * of sed_eval_buffer
- */
-- int remainbytes = 0;
-+ apr_size_t remainbytes = 0;
- apr_status_t status = APR_SUCCESS;
- sed_filter_ctxt *ctx = (sed_filter_ctxt *) dummy;
- if (ctx->outbuf == NULL) {
-diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c
-index be03506788..67a8d06515 100644
---- a/modules/filters/sed1.c
-+++ b/modules/filters/sed1.c
-@@ -71,7 +71,7 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
- static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2);
- static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- step_vars_storage *step_vars);
--static apr_status_t wline(sed_eval_t *eval, char *buf, int sz);
-+static apr_status_t wline(sed_eval_t *eval, char *buf, apr_size_t sz);
- static apr_status_t arout(sed_eval_t *eval);
-
- static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
-@@ -92,11 +92,11 @@ static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
- * grow_buffer
- */
- static void grow_buffer(apr_pool_t *pool, char **buffer,
-- char **spend, unsigned int *cursize,
-- unsigned int newsize)
-+ char **spend, apr_size_t *cursize,
-+ apr_size_t newsize)
- {
- char* newbuffer = NULL;
-- int spendsize = 0;
-+ apr_size_t spendsize = 0;
- if (*cursize >= newsize)
- return;
- /* Avoid number of times realloc is called. It could cause huge memory
-@@ -124,7 +124,7 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
- /*
- * grow_line_buffer
- */
--static void grow_line_buffer(sed_eval_t *eval, int newsize)
-+static void grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
- {
- grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
- &eval->lsize, newsize);
-@@ -133,7 +133,7 @@ static void grow_line_buffer(sed_eval_t *eval, int newsize)
- /*
- * grow_hold_buffer
- */
--static void grow_hold_buffer(sed_eval_t *eval, int newsize)
-+static void grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
- {
- grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
- &eval->hsize, newsize);
-@@ -142,7 +142,7 @@ static void grow_hold_buffer(sed_eval_t *eval, int newsize)
- /*
- * grow_gen_buffer
- */
--static void grow_gen_buffer(sed_eval_t *eval, int newsize,
-+static void grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
- char **gspend)
- {
- if (gspend == NULL) {
-@@ -156,9 +156,9 @@ static void grow_gen_buffer(sed_eval_t *eval, int newsize,
- /*
- * appendmem_to_linebuf
- */
--static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, int len)
-+static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
- {
-- unsigned int reqsize = (eval->lspend - eval->linebuf) + len;
-+ apr_size_t reqsize = (eval->lspend - eval->linebuf) + len;
- if (eval->lsize < reqsize) {
- grow_line_buffer(eval, reqsize);
- }
-@@ -169,21 +169,36 @@ static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, int len)
- /*
- * append_to_linebuf
- */
--static void append_to_linebuf(sed_eval_t *eval, const char* sz)
-+static void append_to_linebuf(sed_eval_t *eval, const char* sz,
-+ step_vars_storage *step_vars)
- {
-- int len = strlen(sz);
-+ apr_size_t len = strlen(sz);
-+ char *old_linebuf = eval->linebuf;
- /* Copy string including null character */
- appendmem_to_linebuf(eval, sz, len + 1);
- --eval->lspend; /* lspend will now point to NULL character */
-+ /* Sync step_vars after a possible linebuf expansion */
-+ if (step_vars && old_linebuf != eval->linebuf) {
-+ if (step_vars->loc1) {
-+ step_vars->loc1 = step_vars->loc1 - old_linebuf + eval->linebuf;
-+ }
-+ if (step_vars->loc2) {
-+ step_vars->loc2 = step_vars->loc2 - old_linebuf + eval->linebuf;
-+ }
-+ if (step_vars->locs) {
-+ step_vars->locs = step_vars->locs - old_linebuf + eval->linebuf;
-+ }
-+ }
- }
-
- /*
- * copy_to_linebuf
- */
--static void copy_to_linebuf(sed_eval_t *eval, const char* sz)
-+static void copy_to_linebuf(sed_eval_t *eval, const char* sz,
-+ step_vars_storage *step_vars)
- {
- eval->lspend = eval->linebuf;
-- append_to_linebuf(eval, sz);
-+ append_to_linebuf(eval, sz, step_vars);
- }
-
- /*
-@@ -191,8 +206,8 @@ static void copy_to_linebuf(sed_eval_t *eval, const char* sz)
- */
- static void append_to_holdbuf(sed_eval_t *eval, const char* sz)
- {
-- int len = strlen(sz);
-- unsigned int reqsize = (eval->hspend - eval->holdbuf) + len + 1;
-+ apr_size_t len = strlen(sz);
-+ apr_size_t reqsize = (eval->hspend - eval->holdbuf) + len + 1;
- if (eval->hsize <= reqsize) {
- grow_hold_buffer(eval, reqsize);
- }
-@@ -215,8 +230,8 @@ static void copy_to_holdbuf(sed_eval_t *eval, const char* sz)
- */
- static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
- {
-- int len = strlen(sz);
-- unsigned int reqsize = (*gspend - eval->genbuf) + len + 1;
-+ apr_size_t len = strlen(sz);
-+ apr_size_t reqsize = (*gspend - eval->genbuf) + len + 1;
- if (eval->gsize < reqsize) {
- grow_gen_buffer(eval, reqsize, gspend);
- }
-@@ -230,8 +245,8 @@ static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
- */
- static void copy_to_genbuf(sed_eval_t *eval, const char* sz)
- {
-- int len = strlen(sz);
-- unsigned int reqsize = len + 1;
-+ apr_size_t len = strlen(sz);
-+ apr_size_t reqsize = len + 1;
- if (eval->gsize < reqsize) {
- grow_gen_buffer(eval, reqsize, NULL);
- }
-@@ -353,7 +368,7 @@ apr_status_t sed_eval_file(sed_eval_t *eval, apr_file_t *fin, void *fout)
- /*
- * sed_eval_buffer
- */
--apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void *fout)
-+apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz, void *fout)
- {
- apr_status_t rv;
-
-@@ -383,7 +398,7 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, int bufsz, void
-
- while (bufsz) {
- char *n;
-- int llen;
-+ apr_size_t llen;
-
- n = memchr(buf, '\n', bufsz);
- if (n == NULL)
-@@ -442,7 +457,7 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
- * buffer is not a newline.
- */
- /* Assure space for NULL */
-- append_to_linebuf(eval, "");
-+ append_to_linebuf(eval, "", NULL);
- }
-
- *eval->lspend = '\0';
-@@ -666,7 +681,7 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
- lp = step_vars->loc2;
- step_vars->loc2 = sp - eval->genbuf + eval->linebuf;
- append_to_genbuf(eval, lp, &sp);
-- copy_to_linebuf(eval, eval->genbuf);
-+ copy_to_linebuf(eval, eval->genbuf, step_vars);
- return rv;
- }
-
-@@ -676,8 +691,8 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
- static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2)
- {
- char *sp = asp;
-- int n = al2 - al1;
-- unsigned int reqsize = (sp - eval->genbuf) + n + 1;
-+ apr_size_t n = al2 - al1;
-+ apr_size_t reqsize = (sp - eval->genbuf) + n + 1;
-
- if (eval->gsize < reqsize) {
- grow_gen_buffer(eval, reqsize, &sp);
-@@ -735,7 +750,7 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- }
-
- p1++;
-- copy_to_linebuf(eval, p1);
-+ copy_to_linebuf(eval, p1, step_vars);
- eval->jflag++;
- break;
-
-@@ -745,12 +760,12 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- break;
-
- case GCOM:
-- copy_to_linebuf(eval, eval->holdbuf);
-+ copy_to_linebuf(eval, eval->holdbuf, step_vars);
- break;
-
- case CGCOM:
-- append_to_linebuf(eval, "\n");
-- append_to_linebuf(eval, eval->holdbuf);
-+ append_to_linebuf(eval, "\n", step_vars);
-+ append_to_linebuf(eval, eval->holdbuf, step_vars);
- break;
-
- case HCOM:
-@@ -881,7 +896,7 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- if (rv != APR_SUCCESS)
- return rv;
- }
-- append_to_linebuf(eval, "\n");
-+ append_to_linebuf(eval, "\n", step_vars);
- eval->pending = ipc->next;
- break;
-
-@@ -956,7 +971,7 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
-
- case XCOM:
- copy_to_genbuf(eval, eval->linebuf);
-- copy_to_linebuf(eval, eval->holdbuf);
-+ copy_to_linebuf(eval, eval->holdbuf, step_vars);
- copy_to_holdbuf(eval, eval->genbuf);
- break;
-
-@@ -1013,7 +1028,7 @@ static apr_status_t arout(sed_eval_t *eval)
- /*
- * wline
- */
--static apr_status_t wline(sed_eval_t *eval, char *buf, int sz)
-+static apr_status_t wline(sed_eval_t *eval, char *buf, apr_size_t sz)
- {
- apr_status_t rv = APR_SUCCESS;
- rv = eval->writefn(eval->fout, buf, sz);
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-23943-2.patch b/debian/patches/CVE-2022-23943-2.patch
deleted file mode 100644
index bcf883c..0000000
--- a/debian/patches/CVE-2022-23943-2.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From e266bd09c313a668d7cca17a8b096d189148be49 Mon Sep 17 00:00:00 2001
-From: Ruediger Pluem
-Date: Wed, 9 Mar 2022 07:41:40 +0000
-Subject: [PATCH] Merge r1898735 from trunk:
-
-* Improve the logic flow
-
-Reviewed by: rpluem, covener, ylavic
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1898772 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/filters/mod_sed.c | 30 +++++++++++++++++++-----------
- 1 file changed, 19 insertions(+), 11 deletions(-)
-
-diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
-index 7092dd5e7f..4bdb4ce33a 100644
---- a/modules/filters/mod_sed.c
-+++ b/modules/filters/mod_sed.c
-@@ -168,21 +168,29 @@ static apr_status_t sed_write_output(void *dummy, char *buf, apr_size_t sz)
- }
- /* buffer is now full */
- status = append_bucket(ctx, ctx->outbuf, ctx->bufsize);
-- /* old buffer is now used so allocate new buffer */
-- alloc_outbuf(ctx);
-- /* if size is bigger than the allocated buffer directly add to output
-- * brigade */
-- if ((status == APR_SUCCESS) && (sz >= ctx->bufsize)) {
-- char* newbuf = apr_pmemdup(ctx->tpool, buf, sz);
-- status = append_bucket(ctx, newbuf, sz);
-- /* pool might get clear after append_bucket */
-- if (ctx->outbuf == NULL) {
-+ if (status == APR_SUCCESS) {
-+ /* if size is bigger than the allocated buffer directly add to output
-+ * brigade */
-+ if (sz >= ctx->bufsize) {
-+ char* newbuf = apr_pmemdup(ctx->tpool, buf, sz);
-+ status = append_bucket(ctx, newbuf, sz);
-+ if (status == APR_SUCCESS) {
-+ /* old buffer is now used so allocate new buffer */
-+ alloc_outbuf(ctx);
-+ }
-+ else {
-+ clear_ctxpool(ctx);
-+ }
-+ }
-+ else {
-+ /* old buffer is now used so allocate new buffer */
- alloc_outbuf(ctx);
-+ memcpy(ctx->curoutbuf, buf, sz);
-+ ctx->curoutbuf += sz;
- }
- }
- else {
-- memcpy(ctx->curoutbuf, buf, sz);
-- ctx->curoutbuf += sz;
-+ clear_ctxpool(ctx);
- }
- }
- else {
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-26377.patch b/debian/patches/CVE-2022-26377.patch
deleted file mode 100644
index af59776..0000000
--- a/debian/patches/CVE-2022-26377.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From f7f15f3d8bfe3032926c8c39eb8434529f680bd4 Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Wed, 1 Jun 2022 13:48:21 +0000
-Subject: [PATCH] mod_proxy_ajp: T-E has precedence over C-L.
-
-Merge r1901521 from trunk.
-Submitted by: rpluem
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901522 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/f7f15f3d8bfe3032926c8c39eb8434529f680bd4
----
- modules/proxy/mod_proxy_ajp.c | 15 ++++++++++++---
- 1 file changed, 12 insertions(+), 3 deletions(-)
-
---- a/modules/proxy/mod_proxy_ajp.c
-+++ b/modules/proxy/mod_proxy_ajp.c
-@@ -245,9 +245,18 @@
- /* read the first bloc of data */
- input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
- tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
-- if (tenc && (strcasecmp(tenc, "chunked") == 0)) {
-- /* The AJP protocol does not want body data yet */
-- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870) "request is chunked");
-+ if (tenc) {
-+ if (strcasecmp(tenc, "chunked") == 0) {
-+ /* The AJP protocol does not want body data yet */
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870)
-+ "request is chunked");
-+ }
-+ else {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396)
-+ "%s Transfer-Encoding is not supported",
-+ tenc);
-+ return HTTP_INTERNAL_SERVER_ERROR;
-+ }
- } else {
- /* Get client provided Content-Length header */
- content_length = get_content_length(r);
diff --git a/debian/patches/CVE-2022-28614.patch b/debian/patches/CVE-2022-28614.patch
deleted file mode 100644
index fdd8f6b..0000000
--- a/debian/patches/CVE-2022-28614.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From 8c14927162cf3b4f810683e1c5505e9ef9e1f123 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Wed, 1 Jun 2022 12:34:16 +0000
-Subject: [PATCH] Merge r1901500 from trunk:
-
-handle large writes in ap_rputs
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901501 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/8c14927162cf3b4f810683e1c5505e9ef9e1f123
----
- include/http_protocol.h | 22 +++++++++++++++++++++-
- server/protocol.c | 3 +++
- 2 files changed, 24 insertions(+), 1 deletion(-)
-
-diff --git a/include/http_protocol.h b/include/http_protocol.h
-index 20bd202226..94c481e5f4 100644
---- a/include/http_protocol.h
-+++ b/include/http_protocol.h
-@@ -475,7 +475,27 @@ AP_DECLARE(int) ap_rwrite(const void *buf, int nbyte, request_rec *r);
- */
- static APR_INLINE int ap_rputs(const char *str, request_rec *r)
- {
-- return ap_rwrite(str, (int)strlen(str), r);
-+ apr_size_t len;
-+
-+ len = strlen(str);
-+
-+ for (;;) {
-+ if (len <= INT_MAX) {
-+ return ap_rwrite(str, (int)len, r);
-+ }
-+ else {
-+ int rc;
-+
-+ rc = ap_rwrite(str, INT_MAX, r);
-+ if (rc < 0) {
-+ return rc;
-+ }
-+ else {
-+ str += INT_MAX;
-+ len -= INT_MAX;
-+ }
-+ }
-+ }
- }
-
- /**
-diff --git a/server/protocol.c b/server/protocol.c
-index 298f61e1fb..7adc7f75c1 100644
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -2128,6 +2128,9 @@ AP_DECLARE(int) ap_rputc(int c, request_rec *r)
-
- AP_DECLARE(int) ap_rwrite(const void *buf, int nbyte, request_rec *r)
- {
-+ if (nbyte < 0)
-+ return -1;
-+
- if (r->connection->aborted)
- return -1;
-
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-28615.patch b/debian/patches/CVE-2022-28615.patch
deleted file mode 100644
index 2c15157..0000000
--- a/debian/patches/CVE-2022-28615.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 6503d09ab51047554c384a6d03646ce1a8848120 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Wed, 1 Jun 2022 12:21:45 +0000
-Subject: [PATCH] Merge r1901494 from trunk:
-
-fix types
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901495 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/6503d09ab51047554c384a6d03646ce1a8848120
----
- server/util.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/server/util.c
-+++ b/server/util.c
-@@ -186,7 +186,7 @@
- */
- AP_DECLARE(int) ap_strcmp_match(const char *str, const char *expected)
- {
-- int x, y;
-+ apr_size_t x, y;
-
- for (x = 0, y = 0; expected[y]; ++y, ++x) {
- if ((!str[x]) && (expected[y] != '*'))
-@@ -210,7 +210,7 @@
-
- AP_DECLARE(int) ap_strcasecmp_match(const char *str, const char *expected)
- {
-- int x, y;
-+ apr_size_t x, y;
-
- for (x = 0, y = 0; expected[y]; ++y, ++x) {
- if (!str[x] && expected[y] != '*')
diff --git a/debian/patches/CVE-2022-29404.patch b/debian/patches/CVE-2022-29404.patch
deleted file mode 100644
index 259e920..0000000
--- a/debian/patches/CVE-2022-29404.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From ce259c4061905bf834f9af51c92456cfe8335ddc Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Wed, 1 Jun 2022 12:31:48 +0000
-Subject: [PATCH] Merge r1901497 from trunk:
-
-use a liberal default limit for LimitRequestBody of 1GB
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901499 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/ce259c4061905bf834f9af51c92456cfe8335ddc
----
- modules/http/http_filters.c | 6 ++++++
- modules/proxy/mod_proxy_http.c | 14 --------------
- server/core.c | 2 +-
- 3 files changed, 7 insertions(+), 15 deletions(-)
-
---- a/modules/http/http_filters.c
-+++ b/modules/http/http_filters.c
-@@ -1657,6 +1657,7 @@
- {
- const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
- const char *lenp = apr_table_get(r->headers_in, "Content-Length");
-+ apr_off_t limit_req_body = ap_get_limit_req_body(r);
-
- r->read_body = read_policy;
- r->read_chunked = 0;
-@@ -1695,6 +1696,11 @@
- return HTTP_REQUEST_ENTITY_TOO_LARGE;
- }
-
-+ if (limit_req_body > 0 && (r->remaining > limit_req_body)) {
-+ /* will be logged when the body is discarded */
-+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
-+ }
-+
- #ifdef AP_DEBUG
- {
- /* Make sure ap_getline() didn't leave any droppings. */
---- a/server/core.c
-+++ b/server/core.c
-@@ -61,7 +61,7 @@
-
- /* LimitRequestBody handling */
- #define AP_LIMIT_REQ_BODY_UNSET ((apr_off_t) -1)
--#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 0)
-+#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 1<<30) /* 1GB */
-
- /* LimitXMLRequestBody handling */
- #define AP_LIMIT_UNSET ((long) -1)
---- a/modules/proxy/mod_proxy_http.c
-+++ b/modules/proxy/mod_proxy_http.c
-@@ -512,12 +512,9 @@
- apr_bucket *e;
- apr_off_t bytes, bytes_spooled = 0, fsize = 0;
- apr_file_t *tmpfile = NULL;
-- apr_off_t limit;
-
- body_brigade = apr_brigade_create(p, bucket_alloc);
-
-- limit = ap_get_limit_req_body(r);
--
- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade)))
- {
- /* If this brigade contains EOS, either stop or remove it. */
-@@ -532,17 +529,6 @@
- apr_brigade_length(input_brigade, 1, &bytes);
-
- if (bytes_spooled + bytes > MAX_MEM_SPOOL) {
-- /*
-- * LimitRequestBody does not affect Proxy requests (Should it?).
-- * Let it take effect if we decide to store the body in a
-- * temporary file on disk.
-- */
-- if (limit && (bytes_spooled + bytes > limit)) {
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
-- "Request body is larger than the configured "
-- "limit of %" APR_OFF_T_FMT, limit);
-- return HTTP_REQUEST_ENTITY_TOO_LARGE;
-- }
- /* can't spool any more in memory; write latest brigade to disk */
- if (tmpfile == NULL) {
- const char *temp_dir;
diff --git a/debian/patches/CVE-2022-30522.patch b/debian/patches/CVE-2022-30522.patch
deleted file mode 100644
index 5ad124e..0000000
--- a/debian/patches/CVE-2022-30522.patch
+++ /dev/null
@@ -1,561 +0,0 @@
-From db47781128e42bd49f55076665b3f6ca4e2bc5e2 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Wed, 1 Jun 2022 12:50:40 +0000
-Subject: [PATCH] Merge r1901506 from trunk:
-
-limit mod_sed memory use
-
-Resync mod_sed.c with trunk due to merge conflicts.
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901509 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/db47781128e42bd49f55076665b3f6ca4e2bc5e2
----
- modules/filters/mod_sed.c | 75 ++++++++----------
- modules/filters/sed1.c | 158 +++++++++++++++++++++++++++-----------
- 2 files changed, 147 insertions(+), 86 deletions(-)
-
-diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
-index 4bdb4ce33a..12cb04a20f 100644
---- a/modules/filters/mod_sed.c
-+++ b/modules/filters/mod_sed.c
-@@ -59,7 +59,7 @@ typedef struct sed_filter_ctxt
- module AP_MODULE_DECLARE_DATA sed_module;
-
- /* This function will be call back from libsed functions if there is any error
-- * happend during execution of sed scripts
-+ * happened during execution of sed scripts
- */
- static apr_status_t log_sed_errf(void *data, const char *error)
- {
-@@ -277,7 +277,7 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
- apr_bucket_brigade *bb)
- {
- apr_bucket *b;
-- apr_status_t status;
-+ apr_status_t status = APR_SUCCESS;
- sed_config *cfg = ap_get_module_config(f->r->per_dir_config,
- &sed_module);
- sed_filter_ctxt *ctx = f->ctx;
-@@ -302,9 +302,9 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
- return status;
- ctx = f->ctx;
- apr_table_unset(f->r->headers_out, "Content-Length");
-- }
-
-- ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
-+ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
-+ }
-
- /* Here is the main logic. Iterate through all the buckets, read the
- * content of the bucket, call sed_eval_buffer on the data.
-@@ -326,63 +326,52 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
- * in sed's internal buffer which can't be flushed until new line
- * character is arrived.
- */
-- for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb);) {
-- const char *buf = NULL;
-- apr_size_t bytes = 0;
-+ while (!APR_BRIGADE_EMPTY(bb)) {
-+ b = APR_BRIGADE_FIRST(bb);
- if (APR_BUCKET_IS_EOS(b)) {
-- apr_bucket *b1 = APR_BUCKET_NEXT(b);
- /* Now clean up the internal sed buffer */
- sed_finalize_eval(&ctx->eval, ctx);
- status = flush_output_buffer(ctx);
- if (status != APR_SUCCESS) {
-- clear_ctxpool(ctx);
-- return status;
-+ break;
- }
-+ /* Move the eos bucket to ctx->bb brigade */
- APR_BUCKET_REMOVE(b);
-- /* Insert the eos bucket to ctx->bb brigade */
- APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
-- b = b1;
- }
- else if (APR_BUCKET_IS_FLUSH(b)) {
-- apr_bucket *b1 = APR_BUCKET_NEXT(b);
-- APR_BUCKET_REMOVE(b);
- status = flush_output_buffer(ctx);
- if (status != APR_SUCCESS) {
-- clear_ctxpool(ctx);
-- return status;
-+ break;
- }
-+ /* Move the flush bucket to ctx->bb brigade */
-+ APR_BUCKET_REMOVE(b);
- APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
-- b = b1;
-- }
-- else if (APR_BUCKET_IS_METADATA(b)) {
-- b = APR_BUCKET_NEXT(b);
- }
-- else if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ)
-- == APR_SUCCESS) {
-- apr_bucket *b1 = APR_BUCKET_NEXT(b);
-- status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
-- if (status != APR_SUCCESS) {
-- clear_ctxpool(ctx);
-- return status;
-+ else {
-+ if (!APR_BUCKET_IS_METADATA(b)) {
-+ const char *buf = NULL;
-+ apr_size_t bytes = 0;
-+
-+ status = apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ);
-+ if (status == APR_SUCCESS) {
-+ status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
-+ }
-+ if (status != APR_SUCCESS) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10394) "error evaluating sed on output");
-+ break;
-+ }
- }
-- APR_BUCKET_REMOVE(b);
- apr_bucket_delete(b);
-- b = b1;
-- }
-- else {
-- apr_bucket *b1 = APR_BUCKET_NEXT(b);
-- APR_BUCKET_REMOVE(b);
-- b = b1;
- }
- }
-- apr_brigade_cleanup(bb);
-- status = flush_output_buffer(ctx);
-- if (status != APR_SUCCESS) {
-- clear_ctxpool(ctx);
-- return status;
-+ if (status == APR_SUCCESS) {
-+ status = flush_output_buffer(ctx);
- }
- if (!APR_BRIGADE_EMPTY(ctx->bb)) {
-- status = ap_pass_brigade(f->next, ctx->bb);
-+ if (status == APR_SUCCESS) {
-+ status = ap_pass_brigade(f->next, ctx->bb);
-+ }
- apr_brigade_cleanup(ctx->bb);
- }
- clear_ctxpool(ctx);
-@@ -433,7 +422,7 @@ static apr_status_t sed_request_filter(ap_filter_t *f,
- * the buckets in bbinp and read the data from buckets and invoke
- * sed_eval_buffer on the data. libsed will generate its output using
- * sed_write_output which will add data in ctx->bb. Do it until it have
-- * atleast one bucket in ctx->bb. At the end of data eos bucket
-+ * at least one bucket in ctx->bb. At the end of data eos bucket
- * should be there.
- *
- * Once eos bucket is seen, then invoke sed_finalize_eval to clear the
-@@ -475,8 +464,10 @@ static apr_status_t sed_request_filter(ap_filter_t *f,
- if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ)
- == APR_SUCCESS) {
- status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
-- if (status != APR_SUCCESS)
-+ if (status != APR_SUCCESS) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10395) "error evaluating sed on input");
- return status;
-+ }
- flush_output_buffer(ctx);
- }
- }
-diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c
-index 67a8d06515..047f49ba13 100644
---- a/modules/filters/sed1.c
-+++ b/modules/filters/sed1.c
-@@ -87,18 +87,20 @@ static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
- }
-
- #define INIT_BUF_SIZE 1024
-+#define MAX_BUF_SIZE 1024*8192
-
- /*
- * grow_buffer
- */
--static void grow_buffer(apr_pool_t *pool, char **buffer,
-+static apr_status_t grow_buffer(apr_pool_t *pool, char **buffer,
- char **spend, apr_size_t *cursize,
- apr_size_t newsize)
- {
- char* newbuffer = NULL;
- apr_size_t spendsize = 0;
-- if (*cursize >= newsize)
-- return;
-+ if (*cursize >= newsize) {
-+ return APR_SUCCESS;
-+ }
- /* Avoid number of times realloc is called. It could cause huge memory
- * requirement if line size is huge e.g 2 MB */
- if (newsize < *cursize * 2) {
-@@ -107,6 +109,9 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
-
- /* Align it to 4 KB boundary */
- newsize = (newsize + ((1 << 12) - 1)) & ~((1 << 12) - 1);
-+ if (newsize > MAX_BUF_SIZE) {
-+ return APR_ENOMEM;
-+ }
- newbuffer = apr_pcalloc(pool, newsize);
- if (*spend && *buffer && (*cursize > 0)) {
- spendsize = *spend - *buffer;
-@@ -119,63 +124,77 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
- if (spend != buffer) {
- *spend = *buffer + spendsize;
- }
-+ return APR_SUCCESS;
- }
-
- /*
- * grow_line_buffer
- */
--static void grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
-+static apr_status_t grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
- {
-- grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
-+ return grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
- &eval->lsize, newsize);
- }
-
- /*
- * grow_hold_buffer
- */
--static void grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
-+static apr_status_t grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
- {
-- grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
-+ return grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
- &eval->hsize, newsize);
- }
-
- /*
- * grow_gen_buffer
- */
--static void grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
-+static apr_status_t grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
- char **gspend)
- {
-+ apr_status_t rc = 0;
- if (gspend == NULL) {
- gspend = &eval->genbuf;
- }
-- grow_buffer(eval->pool, &eval->genbuf, gspend,
-- &eval->gsize, newsize);
-- eval->lcomend = &eval->genbuf[71];
-+ rc = grow_buffer(eval->pool, &eval->genbuf, gspend,
-+ &eval->gsize, newsize);
-+ if (rc == APR_SUCCESS) {
-+ eval->lcomend = &eval->genbuf[71];
-+ }
-+ return rc;
- }
-
- /*
- * appendmem_to_linebuf
- */
--static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
-+static apr_status_t appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
- {
-+ apr_status_t rc = 0;
- apr_size_t reqsize = (eval->lspend - eval->linebuf) + len;
- if (eval->lsize < reqsize) {
-- grow_line_buffer(eval, reqsize);
-+ rc = grow_line_buffer(eval, reqsize);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- }
- memcpy(eval->lspend, sz, len);
- eval->lspend += len;
-+ return APR_SUCCESS;
- }
-
- /*
- * append_to_linebuf
- */
--static void append_to_linebuf(sed_eval_t *eval, const char* sz,
-+static apr_status_t append_to_linebuf(sed_eval_t *eval, const char* sz,
- step_vars_storage *step_vars)
- {
- apr_size_t len = strlen(sz);
- char *old_linebuf = eval->linebuf;
-+ apr_status_t rc = 0;
- /* Copy string including null character */
-- appendmem_to_linebuf(eval, sz, len + 1);
-+ rc = appendmem_to_linebuf(eval, sz, len + 1);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- --eval->lspend; /* lspend will now point to NULL character */
- /* Sync step_vars after a possible linebuf expansion */
- if (step_vars && old_linebuf != eval->linebuf) {
-@@ -189,68 +208,84 @@ static void append_to_linebuf(sed_eval_t *eval, const char* sz,
- step_vars->locs = step_vars->locs - old_linebuf + eval->linebuf;
- }
- }
-+ return APR_SUCCESS;
- }
-
- /*
- * copy_to_linebuf
- */
--static void copy_to_linebuf(sed_eval_t *eval, const char* sz,
-+static apr_status_t copy_to_linebuf(sed_eval_t *eval, const char* sz,
- step_vars_storage *step_vars)
- {
- eval->lspend = eval->linebuf;
-- append_to_linebuf(eval, sz, step_vars);
-+ return append_to_linebuf(eval, sz, step_vars);
- }
-
- /*
- * append_to_holdbuf
- */
--static void append_to_holdbuf(sed_eval_t *eval, const char* sz)
-+static apr_status_t append_to_holdbuf(sed_eval_t *eval, const char* sz)
- {
- apr_size_t len = strlen(sz);
- apr_size_t reqsize = (eval->hspend - eval->holdbuf) + len + 1;
-+ apr_status_t rc = 0;
- if (eval->hsize <= reqsize) {
-- grow_hold_buffer(eval, reqsize);
-+ rc = grow_hold_buffer(eval, reqsize);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- }
- memcpy(eval->hspend, sz, len + 1);
- /* hspend will now point to NULL character */
- eval->hspend += len;
-+ return APR_SUCCESS;
- }
-
- /*
- * copy_to_holdbuf
- */
--static void copy_to_holdbuf(sed_eval_t *eval, const char* sz)
-+static apr_status_t copy_to_holdbuf(sed_eval_t *eval, const char* sz)
- {
- eval->hspend = eval->holdbuf;
-- append_to_holdbuf(eval, sz);
-+ return append_to_holdbuf(eval, sz);
- }
-
- /*
- * append_to_genbuf
- */
--static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
-+static apr_status_t append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
- {
- apr_size_t len = strlen(sz);
- apr_size_t reqsize = (*gspend - eval->genbuf) + len + 1;
-+ apr_status_t rc = 0;
- if (eval->gsize < reqsize) {
-- grow_gen_buffer(eval, reqsize, gspend);
-+ rc = grow_gen_buffer(eval, reqsize, gspend);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- }
- memcpy(*gspend, sz, len + 1);
- /* *gspend will now point to NULL character */
- *gspend += len;
-+ return APR_SUCCESS;
- }
-
- /*
- * copy_to_genbuf
- */
--static void copy_to_genbuf(sed_eval_t *eval, const char* sz)
-+static apr_status_t copy_to_genbuf(sed_eval_t *eval, const char* sz)
- {
- apr_size_t len = strlen(sz);
- apr_size_t reqsize = len + 1;
-+ apr_status_t rc = APR_SUCCESS;;
- if (eval->gsize < reqsize) {
-- grow_gen_buffer(eval, reqsize, NULL);
-+ rc = grow_gen_buffer(eval, reqsize, NULL);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- }
- memcpy(eval->genbuf, sz, len + 1);
-+ return rc;
- }
-
- /*
-@@ -397,6 +432,7 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
- }
-
- while (bufsz) {
-+ apr_status_t rc = 0;
- char *n;
- apr_size_t llen;
-
-@@ -411,7 +447,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
- break;
- }
-
-- appendmem_to_linebuf(eval, buf, llen + 1);
-+ rc = appendmem_to_linebuf(eval, buf, llen + 1);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- --eval->lspend;
- /* replace new line character with NULL */
- *eval->lspend = '\0';
-@@ -426,7 +465,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
-
- /* Save the leftovers for later */
- if (bufsz) {
-- appendmem_to_linebuf(eval, buf, bufsz);
-+ apr_status_t rc = appendmem_to_linebuf(eval, buf, bufsz);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- }
-
- return APR_SUCCESS;
-@@ -448,6 +490,7 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
- /* Process leftovers */
- if (eval->lspend > eval->linebuf) {
- apr_status_t rv;
-+ apr_status_t rc = 0;
-
- if (eval->lreadyflag) {
- eval->lreadyflag = 0;
-@@ -457,7 +500,10 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
- * buffer is not a newline.
- */
- /* Assure space for NULL */
-- append_to_linebuf(eval, "", NULL);
-+ rc = append_to_linebuf(eval, "", NULL);
-+ if (rc != APR_SUCCESS) {
-+ return rc;
-+ }
- }
-
- *eval->lspend = '\0';
-@@ -655,11 +701,15 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
- sp = eval->genbuf;
- rp = rhsbuf;
- sp = place(eval, sp, lp, step_vars->loc1);
-+ if (sp == NULL) {
-+ return APR_EGENERAL;
-+ }
- while ((c = *rp++) != 0) {
- if (c == '&') {
- sp = place(eval, sp, step_vars->loc1, step_vars->loc2);
-- if (sp == NULL)
-+ if (sp == NULL) {
- return APR_EGENERAL;
-+ }
- }
- else if (c == '\\') {
- c = *rp++;
-@@ -675,13 +725,19 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
- *sp++ = c;
- if (sp >= eval->genbuf + eval->gsize) {
- /* expand genbuf and set the sp appropriately */
-- grow_gen_buffer(eval, eval->gsize + 1024, &sp);
-+ rv = grow_gen_buffer(eval, eval->gsize + 1024, &sp);
-+ if (rv != APR_SUCCESS) {
-+ return rv;
-+ }
- }
- }
- lp = step_vars->loc2;
- step_vars->loc2 = sp - eval->genbuf + eval->linebuf;
-- append_to_genbuf(eval, lp, &sp);
-- copy_to_linebuf(eval, eval->genbuf, step_vars);
-+ rv = append_to_genbuf(eval, lp, &sp);
-+ if (rv != APR_SUCCESS) {
-+ return rv;
-+ }
-+ rv = copy_to_linebuf(eval, eval->genbuf, step_vars);
- return rv;
- }
-
-@@ -695,7 +751,10 @@ static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2)
- apr_size_t reqsize = (sp - eval->genbuf) + n + 1;
-
- if (eval->gsize < reqsize) {
-- grow_gen_buffer(eval, reqsize, &sp);
-+ apr_status_t rc = grow_gen_buffer(eval, reqsize, &sp);
-+ if (rc != APR_SUCCESS) {
-+ return NULL;
-+ }
- }
- memcpy(sp, al1, n);
- return sp + n;
-@@ -750,7 +809,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- }
-
- p1++;
-- copy_to_linebuf(eval, p1, step_vars);
-+ rv = copy_to_linebuf(eval, p1, step_vars);
-+ if (rv != APR_SUCCESS) return rv;
- eval->jflag++;
- break;
-
-@@ -760,21 +820,27 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- break;
-
- case GCOM:
-- copy_to_linebuf(eval, eval->holdbuf, step_vars);
-+ rv = copy_to_linebuf(eval, eval->holdbuf, step_vars);
-+ if (rv != APR_SUCCESS) return rv;
- break;
-
- case CGCOM:
-- append_to_linebuf(eval, "\n", step_vars);
-- append_to_linebuf(eval, eval->holdbuf, step_vars);
-+ rv = append_to_linebuf(eval, "\n", step_vars);
-+ if (rv != APR_SUCCESS) return rv;
-+ rv = append_to_linebuf(eval, eval->holdbuf, step_vars);
-+ if (rv != APR_SUCCESS) return rv;
- break;
-
- case HCOM:
-- copy_to_holdbuf(eval, eval->linebuf);
-+ rv = copy_to_holdbuf(eval, eval->linebuf);
-+ if (rv != APR_SUCCESS) return rv;
- break;
-
- case CHCOM:
-- append_to_holdbuf(eval, "\n");
-- append_to_holdbuf(eval, eval->linebuf);
-+ rv = append_to_holdbuf(eval, "\n");
-+ if (rv != APR_SUCCESS) return rv;
-+ rv = append_to_holdbuf(eval, eval->linebuf);
-+ if (rv != APR_SUCCESS) return rv;
- break;
-
- case ICOM:
-@@ -896,7 +962,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- if (rv != APR_SUCCESS)
- return rv;
- }
-- append_to_linebuf(eval, "\n", step_vars);
-+ rv = append_to_linebuf(eval, "\n", step_vars);
-+ if (rv != APR_SUCCESS) return rv;
- eval->pending = ipc->next;
- break;
-
-@@ -970,9 +1037,12 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
- break;
-
- case XCOM:
-- copy_to_genbuf(eval, eval->linebuf);
-- copy_to_linebuf(eval, eval->holdbuf, step_vars);
-- copy_to_holdbuf(eval, eval->genbuf);
-+ rv = copy_to_genbuf(eval, eval->linebuf);
-+ if (rv != APR_SUCCESS) return rv;
-+ rv = copy_to_linebuf(eval, eval->holdbuf, step_vars);
-+ if (rv != APR_SUCCESS) return rv;
-+ rv = copy_to_holdbuf(eval, eval->genbuf);
-+ if (rv != APR_SUCCESS) return rv;
- break;
-
- case YCOM:
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-30556.patch b/debian/patches/CVE-2022-30556.patch
deleted file mode 100644
index f9b541d..0000000
--- a/debian/patches/CVE-2022-30556.patch
+++ /dev/null
@@ -1,250 +0,0 @@
-From 3a561759fcb37af179585adb8478922dc9bc6a85 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Wed, 1 Jun 2022 12:36:39 +0000
-Subject: [PATCH] Merge r1901502 from trunk:
-
-use filters consistently
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901503 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/3a561759fcb37af179585adb8478922dc9bc6a85
----
- modules/lua/lua_request.c | 144 ++++++++++++++------------------------
- 1 file changed, 53 insertions(+), 91 deletions(-)
-
-diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c
-index a3e3b613bc..2ec453e86b 100644
---- a/modules/lua/lua_request.c
-+++ b/modules/lua/lua_request.c
-@@ -2227,23 +2227,20 @@ static int lua_websocket_greet(lua_State *L)
- return 0;
- }
-
--static apr_status_t lua_websocket_readbytes(conn_rec* c, char* buffer,
-- apr_off_t len)
-+static apr_status_t lua_websocket_readbytes(conn_rec* c,
-+ apr_bucket_brigade *brigade,
-+ char* buffer, apr_off_t len)
- {
-- apr_bucket_brigade *brigade = apr_brigade_create(c->pool, c->bucket_alloc);
-+ apr_size_t delivered;
- apr_status_t rv;
-+
- rv = ap_get_brigade(c->input_filters, brigade, AP_MODE_READBYTES,
- APR_BLOCK_READ, len);
- if (rv == APR_SUCCESS) {
-- if (!APR_BRIGADE_EMPTY(brigade)) {
-- apr_bucket* bucket = APR_BRIGADE_FIRST(brigade);
-- const char* data = NULL;
-- apr_size_t data_length = 0;
-- rv = apr_bucket_read(bucket, &data, &data_length, APR_BLOCK_READ);
-- if (rv == APR_SUCCESS) {
-- memcpy(buffer, data, len);
-- }
-- apr_bucket_delete(bucket);
-+ delivered = len;
-+ rv = apr_brigade_flatten(brigade, buffer, &delivered);
-+ if ((rv == APR_SUCCESS) && (delivered < len)) {
-+ rv = APR_INCOMPLETE;
- }
- }
- apr_brigade_cleanup(brigade);
-@@ -2273,35 +2270,28 @@ static int lua_websocket_peek(lua_State *L)
-
- static int lua_websocket_read(lua_State *L)
- {
-- apr_socket_t *sock;
- apr_status_t rv;
- int do_read = 1;
- int n = 0;
-- apr_size_t len = 1;
- apr_size_t plen = 0;
- unsigned short payload_short = 0;
- apr_uint64_t payload_long = 0;
- unsigned char *mask_bytes;
- char byte;
-- int plaintext;
--
--
-+ apr_bucket_brigade *brigade;
-+ conn_rec* c;
-+
- request_rec *r = ap_lua_check_request_rec(L, 1);
-- plaintext = ap_lua_ssl_is_https(r->connection) ? 0 : 1;
-+ c = r->connection;
-
--
- mask_bytes = apr_pcalloc(r->pool, 4);
-- sock = ap_get_conn_socket(r->connection);
-+
-+ brigade = apr_brigade_create(r->pool, c->bucket_alloc);
-
- while (do_read) {
- do_read = 0;
- /* Get opcode and FIN bit */
-- if (plaintext) {
-- rv = apr_socket_recv(sock, &byte, &len);
-- }
-- else {
-- rv = lua_websocket_readbytes(r->connection, &byte, 1);
-- }
-+ rv = lua_websocket_readbytes(c, brigade, &byte, 1);
- if (rv == APR_SUCCESS) {
- unsigned char ubyte, fin, opcode, mask, payload;
- ubyte = (unsigned char)byte;
-@@ -2311,12 +2301,7 @@ static int lua_websocket_read(lua_State *L)
- opcode = ubyte & 0xf;
-
- /* Get the payload length and mask bit */
-- if (plaintext) {
-- rv = apr_socket_recv(sock, &byte, &len);
-- }
-- else {
-- rv = lua_websocket_readbytes(r->connection, &byte, 1);
-- }
-+ rv = lua_websocket_readbytes(c, brigade, &byte, 1);
- if (rv == APR_SUCCESS) {
- ubyte = (unsigned char)byte;
- /* Mask is the first bit */
-@@ -2327,40 +2312,25 @@ static int lua_websocket_read(lua_State *L)
-
- /* Extended payload? */
- if (payload == 126) {
-- len = 2;
-- if (plaintext) {
-- /* XXX: apr_socket_recv does not receive len bits, only up to len bits! */
-- rv = apr_socket_recv(sock, (char*) &payload_short, &len);
-- }
-- else {
-- rv = lua_websocket_readbytes(r->connection,
-- (char*) &payload_short, 2);
-- }
-- payload_short = ntohs(payload_short);
-+ rv = lua_websocket_readbytes(c, brigade,
-+ (char*) &payload_short, 2);
-
-- if (rv == APR_SUCCESS) {
-- plen = payload_short;
-- }
-- else {
-+ if (rv != APR_SUCCESS) {
- return 0;
- }
-+
-+ plen = ntohs(payload_short);
- }
- /* Super duper extended payload? */
- if (payload == 127) {
-- len = 8;
-- if (plaintext) {
-- rv = apr_socket_recv(sock, (char*) &payload_long, &len);
-- }
-- else {
-- rv = lua_websocket_readbytes(r->connection,
-- (char*) &payload_long, 8);
-- }
-- if (rv == APR_SUCCESS) {
-- plen = ap_ntoh64(&payload_long);
-- }
-- else {
-+ rv = lua_websocket_readbytes(c, brigade,
-+ (char*) &payload_long, 8);
-+
-+ if (rv != APR_SUCCESS) {
- return 0;
- }
-+
-+ plen = ap_ntoh64(&payload_long);
- }
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03210)
- "Websocket: Reading %" APR_SIZE_T_FMT " (%s) bytes, masking is %s. %s",
-@@ -2369,46 +2339,27 @@ static int lua_websocket_read(lua_State *L)
- mask ? "on" : "off",
- fin ? "This is a final frame" : "more to follow");
- if (mask) {
-- len = 4;
-- if (plaintext) {
-- rv = apr_socket_recv(sock, (char*) mask_bytes, &len);
-- }
-- else {
-- rv = lua_websocket_readbytes(r->connection,
-- (char*) mask_bytes, 4);
-- }
-+ rv = lua_websocket_readbytes(c, brigade,
-+ (char*) mask_bytes, 4);
-+
- if (rv != APR_SUCCESS) {
- return 0;
- }
- }
- if (plen < (HUGE_STRING_LEN*1024) && plen > 0) {
- apr_size_t remaining = plen;
-- apr_size_t received;
-- apr_off_t at = 0;
- char *buffer = apr_palloc(r->pool, plen+1);
- buffer[plen] = 0;
-
-- if (plaintext) {
-- while (remaining > 0) {
-- received = remaining;
-- rv = apr_socket_recv(sock, buffer+at, &received);
-- if (received > 0 ) {
-- remaining -= received;
-- at += received;
-- }
-- }
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
-- "Websocket: Frame contained %" APR_OFF_T_FMT " bytes, pushed to Lua stack",
-- at);
-- }
-- else {
-- rv = lua_websocket_readbytes(r->connection, buffer,
-- remaining);
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
-- "Websocket: SSL Frame contained %" APR_SIZE_T_FMT " bytes, "\
-- "pushed to Lua stack",
-- remaining);
-+ rv = lua_websocket_readbytes(c, brigade, buffer, remaining);
-+
-+ if (rv != APR_SUCCESS) {
-+ return 0;
- }
-+
-+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
-+ "Websocket: Frame contained %" APR_SIZE_T_FMT \
-+ " bytes, pushed to Lua stack", remaining);
- if (mask) {
- for (n = 0; n < plen; n++) {
- buffer[n] ^= mask_bytes[n%4];
-@@ -2420,14 +2371,25 @@ static int lua_websocket_read(lua_State *L)
- return 2;
- }
-
--
- /* Decide if we need to react to the opcode or not */
- if (opcode == 0x09) { /* ping */
- char frame[2];
-- plen = 2;
-+ apr_bucket *b;
-+
- frame[0] = 0x8A;
- frame[1] = 0;
-- apr_socket_send(sock, frame, &plen); /* Pong! */
-+
-+ /* Pong! */
-+ b = apr_bucket_transient_create(frame, 2, c->bucket_alloc);
-+ APR_BRIGADE_INSERT_TAIL(brigade, b);
-+
-+ rv = ap_pass_brigade(c->output_filters, brigade);
-+ apr_brigade_cleanup(brigade);
-+
-+ if (rv != APR_SUCCESS) {
-+ return 0;
-+ }
-+
- do_read = 1;
- }
- }
---
-2.30.2
-
diff --git a/debian/patches/CVE-2022-31813.patch b/debian/patches/CVE-2022-31813.patch
deleted file mode 100644
index d2bd341..0000000
--- a/debian/patches/CVE-2022-31813.patch
+++ /dev/null
@@ -1,242 +0,0 @@
-From 956f708b094698ac9ad570d640d4f30eb0df7305 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Wed, 1 Jun 2022 07:51:04 +0000
-Subject: [PATCH] Merge r1901461 from trunk via #320:
-
- *) mod_proxy: ap_proxy_create_hdrbrgd() to clear hop-by-hop first and fixup last.
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901480 13f79535-47bb-0310-9956-ffa450edef68
-Origin: https://github.com/apache/httpd/commit/956f708b094698ac9ad570d640d4f30eb0df7305
----
- modules/proxy/proxy_util.c | 153 ++++++++++++++++++++++-----------------------
- 1 file changed, 77 insertions(+), 76 deletions(-)
-
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -3396,12 +3396,14 @@
- char **old_cl_val,
- char **old_te_val)
- {
-+ int rc = OK;
- conn_rec *c = r->connection;
- int counter;
- char *buf;
-+ apr_table_t *saved_headers_in = r->headers_in;
-+ const char *saved_host = apr_table_get(saved_headers_in, "Host");
- const apr_array_header_t *headers_in_array;
- const apr_table_entry_t *headers_in;
-- apr_table_t *saved_headers_in;
- apr_bucket *e;
- int do_100_continue;
- conn_rec *origin = p_conn->connection;
-@@ -3437,6 +3439,52 @@
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-+
-+ /*
-+ * Make a copy on r->headers_in for the request we make to the backend,
-+ * modify the copy in place according to our configuration and connection
-+ * handling, use it to fill in the forwarded headers' brigade, and finally
-+ * restore the saved/original ones in r->headers_in.
-+ *
-+ * Note: We need to take r->pool for apr_table_copy as the key / value
-+ * pairs in r->headers_in have been created out of r->pool and
-+ * p might be (and actually is) a longer living pool.
-+ * This would trigger the bad pool ancestry abort in apr_table_copy if
-+ * apr is compiled with APR_POOL_DEBUG.
-+ *
-+ * icing: if p indeed lives longer than r->pool, we should allocate
-+ * all new header values from r->pool as well and avoid leakage.
-+ */
-+ r->headers_in = apr_table_copy(r->pool, saved_headers_in);
-+
-+ /* Return the original Transfer-Encoding and/or Content-Length values
-+ * then drop the headers, they must be set by the proxy handler based
-+ * on the actual body being forwarded.
-+ */
-+ if ((*old_te_val = (char *)apr_table_get(r->headers_in,
-+ "Transfer-Encoding"))) {
-+ apr_table_unset(r->headers_in, "Transfer-Encoding");
-+ }
-+ if ((*old_cl_val = (char *)apr_table_get(r->headers_in,
-+ "Content-Length"))) {
-+ apr_table_unset(r->headers_in, "Content-Length");
-+ }
-+
-+ /* Clear out hop-by-hop request headers not to forward */
-+ if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
-+ rc = HTTP_BAD_REQUEST;
-+ goto cleanup;
-+ }
-+
-+ /* RFC2616 13.5.1 says we should strip these */
-+ apr_table_unset(r->headers_in, "Keep-Alive");
-+ apr_table_unset(r->headers_in, "Upgrade");
-+ apr_table_unset(r->headers_in, "Trailer");
-+ apr_table_unset(r->headers_in, "TE");
-+
-+ /* We used to send `Host: ` always first, so let's keep it that
-+ * way. No telling which legacy backend is relying no this.
-+ */
- if (dconf->preserve_host == 0) {
- if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
- if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
-@@ -3458,7 +3506,7 @@
- /* don't want to use r->hostname, as the incoming header might have a
- * port attached
- */
-- const char* hostname = apr_table_get(r->headers_in,"Host");
-+ const char* hostname = saved_host;
- if (!hostname) {
- hostname = r->server->server_hostname;
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
-@@ -3472,21 +3520,7 @@
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
--
-- /*
-- * Save the original headers in here and restore them when leaving, since
-- * we will apply proxy purpose only modifications (eg. clearing hop-by-hop
-- * headers, add Via or X-Forwarded-* or Expect...), whereas the originals
-- * will be needed later to prepare the correct response and logging.
-- *
-- * Note: We need to take r->pool for apr_table_copy as the key / value
-- * pairs in r->headers_in have been created out of r->pool and
-- * p might be (and actually is) a longer living pool.
-- * This would trigger the bad pool ancestry abort in apr_table_copy if
-- * apr is compiled with APR_POOL_DEBUG.
-- */
-- saved_headers_in = r->headers_in;
-- r->headers_in = apr_table_copy(r->pool, saved_headers_in);
-+ apr_table_unset(r->headers_in, "Host");
-
- /* handle Via */
- if (conf->viaopt == via_block) {
-@@ -3561,8 +3595,6 @@
- */
- if (dconf->add_forwarded_headers) {
- if (PROXYREQ_REVERSE == r->proxyreq) {
-- const char *buf;
--
- /* Add X-Forwarded-For: so that the upstream has a chance to
- * determine, where the original request came from.
- */
-@@ -3572,8 +3604,9 @@
- /* Add X-Forwarded-Host: so that upstream knows what the
- * original request hostname was.
- */
-- if ((buf = apr_table_get(r->headers_in, "Host"))) {
-- apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
-+ if (saved_host) {
-+ apr_table_mergen(r->headers_in, "X-Forwarded-Host",
-+ saved_host);
- }
-
- /* Add X-Forwarded-Server: so that upstream knows what the
-@@ -3585,67 +3618,37 @@
- }
- }
-
-- proxy_run_fixups(r);
-- if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
-- return HTTP_BAD_REQUEST;
-+ /* Do we want to strip Proxy-Authorization ?
-+ * If we haven't used it, then NO
-+ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
-+ * So let's make it configurable by env.
-+ */
-+ if (r->user != NULL /* we've authenticated */
-+ && !apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
-+ apr_table_unset(r->headers_in, "Proxy-Authorization");
- }
-
-+ /* for sub-requests, ignore freshness/expiry headers */
-+ if (r->main) {
-+ apr_table_unset(r->headers_in, "If-Match");
-+ apr_table_unset(r->headers_in, "If-Modified-Since");
-+ apr_table_unset(r->headers_in, "If-Range");
-+ apr_table_unset(r->headers_in, "If-Unmodified-Since");
-+ apr_table_unset(r->headers_in, "If-None-Match");
-+ }
-+
-+ /* run hook to fixup the request we are about to send */
-+ proxy_run_fixups(r);
-+
- /* send request headers */
- headers_in_array = apr_table_elts(r->headers_in);
- headers_in = (const apr_table_entry_t *) headers_in_array->elts;
- for (counter = 0; counter < headers_in_array->nelts; counter++) {
- if (headers_in[counter].key == NULL
-- || headers_in[counter].val == NULL
--
-- /* Already sent */
-- || !strcasecmp(headers_in[counter].key, "Host")
--
-- /* Clear out hop-by-hop request headers not to send
-- * RFC2616 13.5.1 says we should strip these headers
-- */
-- || !strcasecmp(headers_in[counter].key, "Keep-Alive")
-- || !strcasecmp(headers_in[counter].key, "TE")
-- || !strcasecmp(headers_in[counter].key, "Trailer")
-- || !strcasecmp(headers_in[counter].key, "Upgrade")
--
-- ) {
-- continue;
-- }
-- /* Do we want to strip Proxy-Authorization ?
-- * If we haven't used it, then NO
-- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
-- * So let's make it configurable by env.
-- */
-- if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) {
-- if (r->user != NULL) { /* we've authenticated */
-- if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
-- continue;
-- }
-- }
-- }
--
-- /* Skip Transfer-Encoding and Content-Length for now.
-- */
-- if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
-- *old_te_val = headers_in[counter].val;
-- continue;
-- }
-- if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
-- *old_cl_val = headers_in[counter].val;
-+ || headers_in[counter].val == NULL) {
- continue;
- }
-
-- /* for sub-requests, ignore freshness/expiry headers */
-- if (r->main) {
-- if ( !strcasecmp(headers_in[counter].key, "If-Match")
-- || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
-- || !strcasecmp(headers_in[counter].key, "If-Range")
-- || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
-- || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
-- continue;
-- }
-- }
--
- buf = apr_pstrcat(p, headers_in[counter].key, ": ",
- headers_in[counter].val, CRLF,
- NULL);
-@@ -3654,11 +3657,9 @@
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
- }
-
-- /* Restore the original headers in (see comment above),
-- * we won't modify them anymore.
-- */
-+cleanup:
- r->headers_in = saved_headers_in;
-- return OK;
-+ return rc;
- }
-
- PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
diff --git a/debian/patches/CVE-2022-36760.patch b/debian/patches/CVE-2022-36760.patch
deleted file mode 100644
index ebeefa3..0000000
--- a/debian/patches/CVE-2022-36760.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From d93e61e3e9622bacff746772cb9c97fdcaed8baf Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Tue, 10 Jan 2023 13:20:55 +0000
-Subject: [PATCH] Merge r1906540 from trunk:
-
-cleanup on error
-
-
-Reviewed By: rpluem, gbechis, covener
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1906542 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/proxy/mod_proxy_ajp.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/modules/proxy/mod_proxy_ajp.c
-+++ b/modules/proxy/mod_proxy_ajp.c
-@@ -255,6 +255,8 @@
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396)
- "%s Transfer-Encoding is not supported",
- tenc);
-+ /* We had a failure: Close connection to backend */
-+ conn->close = 1;
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- } else {
diff --git a/debian/patches/CVE-2022-37436.patch b/debian/patches/CVE-2022-37436.patch
deleted file mode 100644
index a123959..0000000
--- a/debian/patches/CVE-2022-37436.patch
+++ /dev/null
@@ -1,125 +0,0 @@
-From 8b6d55f6a047acf62675e32606b037f5eea8ccc7 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Tue, 10 Jan 2023 13:20:09 +0000
-Subject: [PATCH] Merge r1906539 from trunk:
-
-fail on bad header
-
-Submitted By: covener
-Reviewed By: covener, rpluem, gbechis
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1906541 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/proxy/mod_proxy_http.c | 46 ++++++++++++++++++++--------------
- server/protocol.c | 2 ++
- 2 files changed, 29 insertions(+), 19 deletions(-)
-
---- a/modules/proxy/mod_proxy_http.c
-+++ b/modules/proxy/mod_proxy_http.c
-@@ -1011,7 +1011,7 @@
- * any sense at all, since we depend on buffer still containing
- * what was read by ap_getline() upon return.
- */
--static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
-+static apr_status_t ap_proxy_read_headers(request_rec *r, request_rec *rr,
- char *buffer, int size,
- conn_rec *c, int *pread_len)
- {
-@@ -1043,19 +1043,26 @@
- rc = ap_proxygetline(tmp_bb, buffer, size, rr,
- AP_GETLINE_FOLD | AP_GETLINE_NOSPC_EOL, &len);
-
-- if (len <= 0)
-- break;
-
-- if (APR_STATUS_IS_ENOSPC(rc)) {
-- /* The header could not fit in the provided buffer, warn.
-- * XXX: falls through with the truncated header, 5xx instead?
-- */
-- int trunc = (len > 128 ? 128 : len) / 2;
-- ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124)
-- "header size is over the limit allowed by "
-- "ResponseFieldSize (%d bytes). "
-- "Bad response header: '%.*s[...]%s'",
-- size, trunc, buffer, buffer + len - trunc);
-+ if (rc != APR_SUCCESS) {
-+ if (APR_STATUS_IS_ENOSPC(rc)) {
-+ int trunc = (len > 128 ? 128 : len) / 2;
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124)
-+ "header size is over the limit allowed by "
-+ "ResponseFieldSize (%d bytes). "
-+ "Bad response header: '%.*s[...]%s'",
-+ size, trunc, buffer, buffer + len - trunc);
-+ }
-+ else {
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10404)
-+ "Error reading headers from backend");
-+ }
-+ r->headers_out = NULL;
-+ return rc;
-+ }
-+
-+ if (len <= 0) {
-+ break;
- }
- else {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "%s", buffer);
-@@ -1078,7 +1085,7 @@
- if (psc->badopt == bad_error) {
- /* Nope, it wasn't even an extra HTTP header. Give up. */
- r->headers_out = NULL;
-- return;
-+ return APR_EINVAL;
- }
- else if (psc->badopt == bad_body) {
- /* if we've already started loading headers_out, then
-@@ -1092,13 +1099,13 @@
- "in headers returned by %s (%s)",
- r->uri, r->method);
- *pread_len = len;
-- return;
-+ return APR_SUCCESS;
- }
- else {
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01099)
- "No HTTP headers returned by %s (%s)",
- r->uri, r->method);
-- return;
-+ return APR_SUCCESS;
- }
- }
- }
-@@ -1128,6 +1135,7 @@
- process_proxy_header(r, dconf, buffer, value);
- saw_headers = 1;
- }
-+ return APR_SUCCESS;
- }
-
-
-@@ -1398,10 +1406,10 @@
- "Set-Cookie", NULL);
-
- /* shove the headers direct into r->headers_out */
-- ap_proxy_read_headers(r, backend->r, buffer, response_field_size, origin,
-- &pread_len);
-+ rc = ap_proxy_read_headers(r, backend->r, buffer, response_field_size,
-+ origin, &pread_len);
-
-- if (r->headers_out == NULL) {
-+ if (rc != APR_SUCCESS || r->headers_out == NULL) {
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106)
- "bad HTTP/%d.%d header returned by %s (%s)",
- major, minor, r->uri, r->method);
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -508,6 +508,8 @@
- /* PR#43039: We shouldn't accept NULL bytes within the line */
- bytes_handled = strlen(*s);
- if (bytes_handled < *read) {
-+ ap_log_data(APLOG_MARK, APLOG_DEBUG, ap_server_conf,
-+ "NULL bytes in header", *s, *read, 0);
- *read = bytes_handled;
- if (rv == APR_SUCCESS) {
- rv = APR_EINVAL;
diff --git a/debian/patches/build_suexec-custom.patch b/debian/patches/build_suexec-custom.patch
index e03d54b..a509cd5 100644
--- a/debian/patches/build_suexec-custom.patch
+++ b/debian/patches/build_suexec-custom.patch
@@ -4,7 +4,7 @@ Author: Stefan Fritsch
Last-Update: 2012-02-25
--- a/Makefile.in
+++ b/Makefile.in
-@@ -272,23 +272,26 @@
+@@ -293,23 +293,26 @@
install-suexec: install-suexec-$(INSTALL_SUEXEC)
install-suexec-binary:
diff --git a/debian/patches/customize_apxs.patch b/debian/patches/customize_apxs.patch
index 9c75ff1..281b910 100644
--- a/debian/patches/customize_apxs.patch
+++ b/debian/patches/customize_apxs.patch
@@ -8,7 +8,7 @@ Last-Update: 2012-03-17
--- a/support/apxs.in
+++ b/support/apxs.in
-@@ -38,7 +38,7 @@
+@@ -48,7 +48,7 @@
my $CFG_TARGET = get_vars("progname");
my $CFG_SYSCONFDIR = get_vars("sysconfdir");
my $CFG_CFLAGS = join ' ', map { get_vars($_) }
@@ -16,8 +16,8 @@ Last-Update: 2012-03-17
+ qw(SHLTCFLAGS CFLAGS CPPFLAGS NOTEST_CPPFLAGS EXTRA_CPPFLAGS EXTRA_CFLAGS);
my $CFG_LDFLAGS = join ' ', map { get_vars($_) }
qw(LDFLAGS NOTEST_LDFLAGS SH_LDFLAGS);
- my $includedir = get_vars("includedir");
-@@ -49,7 +49,7 @@
+ my $includedir = $destdir . get_vars("includedir");
+@@ -59,7 +59,7 @@
my $sbindir = get_vars("sbindir");
my $CFG_SBINDIR = eval qq("$sbindir");
my $ltflags = $ENV{'LTFLAGS'};
@@ -26,7 +26,7 @@ Last-Update: 2012-03-17
my %internal_vars = map {$_ => 1}
qw(TARGET CC CFLAGS CFLAGS_SHLIB LD_SHLIB LDFLAGS_SHLIB LIBS_SHLIB
-@@ -276,6 +276,7 @@
+@@ -286,6 +286,7 @@
$data =~ s|%TARGET%|$CFG_TARGET|sg;
$data =~ s|%PREFIX%|$prefix|sg;
$data =~ s|%INSTALLBUILDDIR%|$installbuilddir|sg;
@@ -34,7 +34,7 @@ Last-Update: 2012-03-17
my ($mkf, $mods, $src) = ($data =~ m|^(.+)-=#=-\n(.+)-=#=-\n(.+)|s);
-@@ -428,7 +429,7 @@
+@@ -438,7 +439,7 @@
$la =~ s|\.c$|.la|;
my $o = $s;
$o =~ s|\.c$|.o|;
@@ -43,7 +43,7 @@ Last-Update: 2012-03-17
unshift(@objs, $lo);
}
-@@ -469,7 +470,7 @@
+@@ -479,7 +480,7 @@
$opt .= " -rpath $CFG_LIBEXECDIR -module -avoid-version $apr_ldflags";
}
@@ -52,16 +52,16 @@ Last-Update: 2012-03-17
# execute the commands
&execute_cmds(@cmds);
-@@ -503,7 +504,7 @@
+@@ -513,7 +514,7 @@
if ($opt_i) {
- push(@cmds, "$installbuilddir/instdso.sh SH_LIBTOOL='" .
+ push(@cmds, $destdir . "$installbuilddir/instdso.sh SH_LIBTOOL='" .
"$libtool' $f $CFG_LIBEXECDIR");
- push(@cmds, "chmod 755 $CFG_LIBEXECDIR/$t");
+ push(@cmds, "chmod 644 $CFG_LIBEXECDIR/$t");
}
# determine module symbolname and filename
-@@ -539,10 +540,11 @@
+@@ -549,10 +550,11 @@
$filename = "mod_${name}.c";
}
my $dir = $CFG_LIBEXECDIR;
@@ -75,7 +75,7 @@ Last-Update: 2012-03-17
}
# execute the commands
-@@ -550,108 +552,35 @@
+@@ -560,108 +562,35 @@
# activate module via LoadModule/AddModule directive
if ($opt_a or $opt_A) {
@@ -207,7 +207,7 @@ Last-Update: 2012-03-17
}
}
-@@ -671,8 +600,8 @@
+@@ -681,8 +610,8 @@
##
builddir=.
diff --git a/debian/patches/fhs_compliance.patch b/debian/patches/fhs_compliance.patch
index 00f8f71..986d8bc 100644
--- a/debian/patches/fhs_compliance.patch
+++ b/debian/patches/fhs_compliance.patch
@@ -1,33 +1,31 @@
Description: Fix up FHS file locations for apache2 droppings.
Forwarded: not-needed
Author: Adam Conrad
-Last-Update: 2012-02-25
+Reviewed-By: Yadd
+Last-Update: 2023-10-19
+
--- a/configure
+++ b/configure
-@@ -39688,17 +39688,17 @@
+@@ -42812,13 +42812,13 @@
+ ap_prefix="${ap_cur}"
- cat >>confdefs.h <<_ACEOF
--#define HTTPD_ROOT "${ap_prefix}"
-+#define HTTPD_ROOT "/etc/apache2"
- _ACEOF
+-printf "%s\n" "#define HTTPD_ROOT \"${ap_prefix}\"" >>confdefs.h
++printf "%s\n" "#define HTTPD_ROOT \"/etc/apache2\"" >>confdefs.h
- cat >>confdefs.h <<_ACEOF
--#define SERVER_CONFIG_FILE "${rel_sysconfdir}/${progname}.conf"
-+#define SERVER_CONFIG_FILE "${progname}.conf"
- _ACEOF
+-printf "%s\n" "#define SERVER_CONFIG_FILE \"${rel_sysconfdir}/${progname}.conf\"" >>confdefs.h
++printf "%s\n" "#define SERVER_CONFIG_FILE \"${progname}.conf\"" >>confdefs.h
- cat >>confdefs.h <<_ACEOF
--#define AP_TYPES_CONFIG_FILE "${rel_sysconfdir}/mime.types"
-+#define AP_TYPES_CONFIG_FILE "mime.types"
- _ACEOF
+-printf "%s\n" "#define AP_TYPES_CONFIG_FILE \"${rel_sysconfdir}/mime.types\"" >>confdefs.h
++printf "%s\n" "#define AP_TYPES_CONFIG_FILE \"mime.types\"" >>confdefs.h
+ perlbin=`$ac_aux_dir/PrintPath perl`
--- a/configure.in
+++ b/configure.in
-@@ -871,11 +871,11 @@
+@@ -928,11 +928,11 @@
echo $MODLIST | $AWK -f $srcdir/build/build-modules-c.awk > modules.c
APR_EXPAND_VAR(ap_prefix, $prefix)
@@ -53,12 +51,16 @@ Last-Update: 2012-02-25
#endif /* AP_CONFIG_LAYOUT_H */
--- a/include/httpd.h
+++ b/include/httpd.h
-@@ -109,7 +109,7 @@
- #define DOCUMENT_LOCATION HTTPD_ROOT "/docs"
+@@ -107,10 +107,10 @@
+ #ifndef DOCUMENT_LOCATION
+ #ifdef OS2
+ /* Set default for OS/2 file system */
+-#define DOCUMENT_LOCATION HTTPD_ROOT "/docs"
++#define DOCUMENT_LOCATION "/var/www/html"
#else
/* Set default for non OS/2 file system */
-#define DOCUMENT_LOCATION HTTPD_ROOT "/htdocs"
-+#define DOCUMENT_LOCATION "/var/www/html"
++#define DOCUMENT_LOCATION "/var/www/html"
#endif
#endif /* DOCUMENT_LOCATION */
diff --git a/debian/patches/fix-macro.patch b/debian/patches/fix-macro.patch
new file mode 100644
index 0000000..ea83a64
--- /dev/null
+++ b/debian/patches/fix-macro.patch
@@ -0,0 +1,160 @@
+Description: add macro_ignore_empty and macro_ignore_bad_nesting parameters
+Author: Upstream authors
+Origin: upstream, https://svn.apache.org/viewvc/httpd/httpd/trunk/modules/core/mod_macro.c?r1=1770843&r2=1770842&pathrev=1770843
+Forwarded: not-needed
+Reviewed-By: Yadd
+Last-Update: 2021-10-25
+
+--- a/modules/core/mod_macro.c
++++ b/modules/core/mod_macro.c
+@@ -49,6 +49,10 @@
+
+ /********************************************************** MACRO MANAGEMENT */
+
++/* Global warning modifiers */
++int ignore_empty = FALSE; /* no warning about empty argument */
++int ignore_bad_nesting = FALSE; /* no warning about bad nesting */
++
+ /*
+ this is a macro: name, arguments, contents, location.
+ */
+@@ -58,6 +62,8 @@
+ apr_array_header_t *arguments; /* of char*, macro parameter names */
+ apr_array_header_t *contents; /* of char*, macro body */
+ char *location; /* of macro definition, for error messages */
++ int ignore_empty; /* no warning about empty argument */
++ int ignore_bad_nesting; /* no warning about bad nesting */
+ } ap_macro_t;
+
+ /* configuration tokens.
+@@ -67,6 +73,10 @@
+ #define USE_MACRO "Use"
+ #define UNDEF_MACRO "UndefMacro"
+
++#define IGNORE_EMPTY_MACRO_FLAG "/IgnoreEmptyArgs"
++#define IGNORE_BAD_NESTING_MACRO_FLAG "/IgnoreBadNesting"
++#define IGNORE_EMPTY_MACRO_DIRECTIVE "MacroIgnoreEmptyArgs"
++#define IGNORE_BAD_NESTING_MACRO_DIRECTIVE "MacroIgnoreBadNesting"
+ /*
+ Macros are kept globally...
+ They are not per-server or per-directory entities.
+@@ -135,7 +145,8 @@
+ const char *end_token,
+ const char *begin_token,
+ const char *where,
+- apr_array_header_t ** plines)
++ apr_array_header_t ** plines,
++ int ignore_nesting)
+ {
+ apr_array_header_t *lines = apr_array_make(pool, 1, sizeof(char *));
+ char line[MAX_STRING_LEN]; /* sorry, but this is expected by getline:-( */
+@@ -153,7 +164,7 @@
+ /* detect nesting... */
+ if (!strncmp(first, "", 2)) {
+ any_nesting--;
+- if (any_nesting < 0) {
++ if (!ignore_nesting && (any_nesting < 0)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING,
+ 0, NULL, APLOGNO(02793)
+ "bad (negative) nesting on line %d of %s",
+@@ -180,7 +191,7 @@
+
+ macro_nesting--;
+ if (!macro_nesting) {
+- if (any_nesting) {
++ if (!ignore_nesting && any_nesting) {
+ ap_log_error(APLOG_MARK,
+ APLOG_WARNING, 0, NULL, APLOGNO(02795)
+ "bad cumulated nesting (%+d) in %s",
+@@ -255,6 +266,13 @@
+ tab[i], i + 1, ARG_PREFIX);
+ }
+
++ if ((tab[i][0] == '$') && (tab[i][1] == '{')) {
++ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, APLOGNO(02805)
++ "macro '%s' (%s) "
++ "argument name '%s' (#%d) clashes with 'Define' syntax '${...}', better use '$(...)'.",
++ macro->name, macro->location, tab[i], i + 1);
++ }
++
+ for (j = i + 1; j < nelts; j++) {
+ size_t ltabj = strlen(tab[j]);
+
+@@ -763,7 +781,25 @@
+ where, ARG_PREFIX);
+ }
+
+- /* get macro parameters */
++ /* get/remove macro modifiers from parameters */
++#define CHECK_MACRO_FLAG(arg_, flag_str, flag_val) if (!strncasecmp(arg_, flag_str, strlen(flag_str))) { flag_val = TRUE; arg_ += strlen(flag_str); if (!*arg) break;}
++ while (*arg == '/') {
++ CHECK_MACRO_FLAG(arg, IGNORE_EMPTY_MACRO_FLAG, macro->ignore_empty);
++ CHECK_MACRO_FLAG(arg, IGNORE_BAD_NESTING_MACRO_FLAG, macro->ignore_bad_nesting);
++ if (*arg != ' ') {
++ char *c = ap_strchr(arg, ' ');
++ if (c) *c = '\0';
++ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, APLOGNO(02804)
++ "%s: unknown flag '%s'", where, arg);
++ if (c) {
++ *c = ' ';
++ arg = c;
++ }
++ }
++ arg++;
++ }
++
++ /* get macro parameters */
+ macro->arguments = get_arguments(pool, arg);
+
+ errmsg = check_macro_arguments(cmd->temp_pool, macro);
+@@ -774,7 +810,7 @@
+
+ errmsg = get_lines_till_end_token(pool, cmd->config_file,
+ END_MACRO, BEGIN_MACRO,
+- where, ¯o->contents);
++ where, ¯o->contents, ignore_bad_nesting || macro->ignore_bad_nesting);
+
+ if (errmsg) {
+ return apr_psprintf(cmd->temp_pool,
+@@ -860,7 +896,8 @@
+ cmd->config_file->line_number,
+ cmd->config_file->name);
+
+- check_macro_use_arguments(where, replacements);
++ if (!ignore_empty && !macro->ignore_empty)
++ check_macro_use_arguments(where, replacements);
+
+ errmsg = process_content(cmd->temp_pool, macro, replacements,
+ NULL, &contents);
+@@ -911,6 +948,18 @@
+ return NULL;
+ }
+
++static const char *macro_ignore_empty(cmd_parms * cmd, void *dummy)
++{
++ ignore_empty = TRUE;
++ return NULL;
++}
++
++static const char *macro_ignore_bad_nesting(cmd_parms * cmd, void *dummy)
++{
++ ignore_bad_nesting = TRUE;
++ return NULL;
++}
++
+ /************************************************************* EXPORT MODULE */
+
+ /*
+@@ -924,7 +973,11 @@
+ AP_INIT_RAW_ARGS(USE_MACRO, use_macro, NULL, EXEC_ON_READ | OR_ALL,
+ "Use of a macro."),
+ AP_INIT_TAKE1(UNDEF_MACRO, undef_macro, NULL, EXEC_ON_READ | OR_ALL,
+- "Remove a macro definition."),
++ "Remove a macro definition."),
++ AP_INIT_NO_ARGS(IGNORE_EMPTY_MACRO_DIRECTIVE, macro_ignore_empty, NULL, EXEC_ON_READ | OR_ALL,
++ "Globally ignore warnings about empty arguments."),
++ AP_INIT_NO_ARGS(IGNORE_BAD_NESTING_MACRO_DIRECTIVE, macro_ignore_bad_nesting, NULL, EXEC_ON_READ | OR_ALL,
++ "Globally ignore warnings about bad nesting."),
+
+ {NULL}
+ };
diff --git a/debian/patches/import-http2-module-from-2.4.46.patch b/debian/patches/import-http2-module-from-2.4.46.patch
deleted file mode 100644
index cdca37d..0000000
--- a/debian/patches/import-http2-module-from-2.4.46.patch
+++ /dev/null
@@ -1,7588 +0,0 @@
-Description: import http2 module from 2.4.41
- There are too many changes in http2 module to distiguish CVE-2019-9517,
- CVE-2019-10082 and CVE-2019-10081 changes.
-Author: Apache authors
-Bug: https://security-tracker.debian.org/tracker/CVE-2019-9517
- https://security-tracker.debian.org/tracker/CVE-2019-10082
- https://security-tracker.debian.org/tracker/CVE-2019-10081
- https://security-tracker.debian.org/tracker/CVE-2020-9490
- https://security-tracker.debian.org/tracker/CVE-2020-11993
-Forwarded: not-needed
-Reviewed-By: Xavier Guimard
-Last-Update: 2020-08-25
-
---- a/modules/http2/config2.m4
-+++ b/modules/http2/config2.m4
-@@ -31,7 +31,6 @@
- h2_h2.lo dnl
- h2_headers.lo dnl
- h2_mplx.lo dnl
--h2_ngn_shed.lo dnl
- h2_push.lo dnl
- h2_request.lo dnl
- h2_session.lo dnl
---- a/modules/http2/h2.h
-+++ b/modules/http2/h2.h
-@@ -48,12 +48,12 @@
- #define H2_HEADER_PATH_LEN 5
- #define H2_CRLF "\r\n"
-
--/* Max data size to write so it fits inside a TLS record */
--#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9)
--
- /* Size of the frame header itself in HTTP/2 */
- #define H2_FRAME_HDR_LEN 9
-
-+/* Max data size to write so it fits inside a TLS record */
-+#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - H2_FRAME_HDR_LEN)
-+
- /* Maximum number of padding bytes in a frame, rfc7540 */
- #define H2_MAX_PADLEN 256
- /* Initial default window size, RFC 7540 ch. 6.5.2 */
-@@ -138,7 +138,7 @@
- apr_table_t *headers;
-
- apr_time_t request_time;
-- unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
-+ unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
- unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
- apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
- };
-@@ -162,5 +162,6 @@
- #define H2_FILTER_DEBUG_NOTE "http2-debug"
- #define H2_HDR_CONFORMANCE "http2-hdr-conformance"
- #define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
-+#define H2_PUSH_MODE_NOTE "http2-push-mode"
-
- #endif /* defined(__mod_h2__h2__) */
---- a/modules/http2/h2_alt_svc.c
-+++ b/modules/http2/h2_alt_svc.c
-@@ -75,7 +75,7 @@
-
- static int h2_alt_svc_handler(request_rec *r)
- {
-- const h2_config *cfg;
-+ apr_array_header_t *alt_svcs;
- int i;
-
- if (r->connection->keepalives > 0) {
-@@ -87,8 +87,8 @@
- return DECLINED;
- }
-
-- cfg = h2_config_sget(r->server);
-- if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) {
-+ alt_svcs = h2_config_alt_svcs(r);
-+ if (r->hostname && alt_svcs && alt_svcs->nelts > 0) {
- const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
- if (!alt_svc_used) {
- /* We have alt-svcs defined and client is not already using
-@@ -99,7 +99,7 @@
- const char *alt_svc = "";
- const char *svc_ma = "";
- int secure = h2_h2_is_tls(r->connection);
-- int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE);
-+ int ma = h2_config_rgeti(r, H2_CONF_ALT_SVC_MAX_AGE);
- if (ma >= 0) {
- svc_ma = apr_psprintf(r->pool, "; ma=%d", ma);
- }
-@@ -107,8 +107,8 @@
- "h2_alt_svc: announce %s for %s:%d",
- (secure? "secure" : "insecure"),
- r->hostname, (int)r->server->port);
-- for (i = 0; i < cfg->alt_svcs->nelts; ++i) {
-- h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i);
-+ for (i = 0; i < alt_svcs->nelts; ++i) {
-+ h2_alt_svc *as = h2_alt_svc_IDX(alt_svcs, i);
- const char *ahost = as->host;
- if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) {
- ahost = NULL;
---- a/modules/http2/h2_bucket_beam.c
-+++ b/modules/http2/h2_bucket_beam.c
-@@ -196,7 +196,7 @@
- * bucket beam that can transport buckets across threads
- ******************************************************************************/
-
--static void mutex_leave(void *ctx, apr_thread_mutex_t *lock)
-+static void mutex_leave(apr_thread_mutex_t *lock)
- {
- apr_thread_mutex_unlock(lock);
- }
-@@ -217,7 +217,7 @@
- static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
- {
- if (pbl->leave) {
-- pbl->leave(pbl->leave_ctx, pbl->mutex);
-+ pbl->leave(pbl->mutex);
- }
- }
-
---- a/modules/http2/h2_bucket_beam.h
-+++ b/modules/http2/h2_bucket_beam.h
-@@ -126,12 +126,11 @@
- * buffers until the transmission is complete. Star gates use a similar trick.
- */
-
--typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock);
-+typedef void h2_beam_mutex_leave(struct apr_thread_mutex_t *lock);
-
- typedef struct {
- apr_thread_mutex_t *mutex;
- h2_beam_mutex_leave *leave;
-- void *leave_ctx;
- } h2_beam_lock;
-
- typedef struct h2_bucket_beam h2_bucket_beam;
---- a/modules/http2/h2_config.c
-+++ b/modules/http2/h2_config.c
-@@ -42,6 +42,55 @@
- #define H2_CONFIG_GET(a, b, n) \
- (((a)->n == DEF_VAL)? (b) : (a))->n
-
-+#define H2_CONFIG_SET(a, n, v) \
-+ ((a)->n = v)
-+
-+#define CONFIG_CMD_SET(cmd,dir,var,val) \
-+ h2_config_seti(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
-+
-+#define CONFIG_CMD_SET64(cmd,dir,var,val) \
-+ h2_config_seti64(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
-+
-+/* Apache httpd module configuration for h2. */
-+typedef struct h2_config {
-+ const char *name;
-+ int h2_max_streams; /* max concurrent # streams (http2) */
-+ int h2_window_size; /* stream window size (http2) */
-+ int min_workers; /* min # of worker threads/child */
-+ int max_workers; /* max # of worker threads/child */
-+ int max_worker_idle_secs; /* max # of idle seconds for worker */
-+ int stream_max_mem_size; /* max # bytes held in memory/stream */
-+ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
-+ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
-+ int serialize_headers; /* Use serialized HTTP/1.1 headers for
-+ processing, better compatibility */
-+ int h2_direct; /* if mod_h2 is active directly */
-+ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
-+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
-+ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
-+ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
-+ int h2_push; /* if HTTP/2 server push is enabled */
-+ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
-+
-+ int push_diary_size; /* # of entries in push diary */
-+ int copy_files; /* if files shall be copied vs setaside on output */
-+ apr_array_header_t *push_list;/* list of h2_push_res configurations */
-+ int early_hints; /* support status code 103 */
-+ int padding_bits;
-+ int padding_always;
-+} h2_config;
-+
-+typedef struct h2_dir_config {
-+ const char *name;
-+ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
-+ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
-+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
-+ int h2_push; /* if HTTP/2 server push is enabled */
-+ apr_array_header_t *push_list;/* list of h2_push_res configurations */
-+ int early_hints; /* support status code 103 */
-+} h2_dir_config;
-+
-+
- static h2_config defconf = {
- "default",
- 100, /* max_streams */
-@@ -64,6 +113,18 @@
- 0, /* copy files across threads */
- NULL, /* push list */
- 0, /* early hints, http status 103 */
-+ 0, /* padding bits */
-+ 1, /* padding always */
-+};
-+
-+static h2_dir_config defdconf = {
-+ "default",
-+ NULL, /* no alt-svcs */
-+ -1, /* alt-svc max age */
-+ -1, /* HTTP/1 Upgrade support */
-+ -1, /* HTTP/2 server push enabled */
-+ NULL, /* push list */
-+ -1, /* early hints, http status 103 */
- };
-
- void h2_config_init(apr_pool_t *pool)
-@@ -71,12 +132,10 @@
- (void)pool;
- }
-
--static void *h2_config_create(apr_pool_t *pool,
-- const char *prefix, const char *x)
-+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
- {
- h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
-- const char *s = x? x : "unknown";
-- char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
-+ char *name = apr_pstrcat(pool, "srv[", s->defn_name, "]", NULL);
-
- conf->name = name;
- conf->h2_max_streams = DEF_VAL;
-@@ -98,19 +157,11 @@
- conf->copy_files = DEF_VAL;
- conf->push_list = NULL;
- conf->early_hints = DEF_VAL;
-+ conf->padding_bits = DEF_VAL;
-+ conf->padding_always = DEF_VAL;
- return conf;
- }
-
--void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
--{
-- return h2_config_create(pool, "srv", s->defn_name);
--}
--
--void *h2_config_create_dir(apr_pool_t *pool, char *x)
--{
-- return h2_config_create(pool, "dir", x);
--}
--
- static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
- {
- h2_config *base = (h2_config *)basev;
-@@ -149,25 +200,52 @@
- n->push_list = add->push_list? add->push_list : base->push_list;
- }
- n->early_hints = H2_CONFIG_GET(add, base, early_hints);
-+ n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
-+ n->padding_always = H2_CONFIG_GET(add, base, padding_always);
- return n;
- }
-
--void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
-+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
- {
- return h2_config_merge(pool, basev, addv);
- }
-
--void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
-+void *h2_config_create_dir(apr_pool_t *pool, char *x)
- {
-- return h2_config_merge(pool, basev, addv);
-+ h2_dir_config *conf = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
-+ const char *s = x? x : "unknown";
-+ char *name = apr_pstrcat(pool, "dir[", s, "]", NULL);
-+
-+ conf->name = name;
-+ conf->alt_svc_max_age = DEF_VAL;
-+ conf->h2_upgrade = DEF_VAL;
-+ conf->h2_push = DEF_VAL;
-+ conf->early_hints = DEF_VAL;
-+ return conf;
- }
-
--int h2_config_geti(const h2_config *conf, h2_config_var_t var)
-+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
- {
-- return (int)h2_config_geti64(conf, var);
-+ h2_dir_config *base = (h2_dir_config *)basev;
-+ h2_dir_config *add = (h2_dir_config *)addv;
-+ h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
-+
-+ n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
-+ n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
-+ n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
-+ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
-+ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
-+ if (add->push_list && base->push_list) {
-+ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
-+ }
-+ else {
-+ n->push_list = add->push_list? add->push_list : base->push_list;
-+ }
-+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
-+ return n;
- }
-
--apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
-+static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t var)
- {
- switch(var) {
- case H2_CONF_MAX_STREAMS:
-@@ -204,12 +282,93 @@
- return H2_CONFIG_GET(conf, &defconf, copy_files);
- case H2_CONF_EARLY_HINTS:
- return H2_CONFIG_GET(conf, &defconf, early_hints);
-+ case H2_CONF_PADDING_BITS:
-+ return H2_CONFIG_GET(conf, &defconf, padding_bits);
-+ case H2_CONF_PADDING_ALWAYS:
-+ return H2_CONFIG_GET(conf, &defconf, padding_always);
- default:
- return DEF_VAL;
- }
- }
-
--const h2_config *h2_config_sget(server_rec *s)
-+static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
-+{
-+ switch(var) {
-+ case H2_CONF_MAX_STREAMS:
-+ H2_CONFIG_SET(conf, h2_max_streams, val);
-+ break;
-+ case H2_CONF_WIN_SIZE:
-+ H2_CONFIG_SET(conf, h2_window_size, val);
-+ break;
-+ case H2_CONF_MIN_WORKERS:
-+ H2_CONFIG_SET(conf, min_workers, val);
-+ break;
-+ case H2_CONF_MAX_WORKERS:
-+ H2_CONFIG_SET(conf, max_workers, val);
-+ break;
-+ case H2_CONF_MAX_WORKER_IDLE_SECS:
-+ H2_CONFIG_SET(conf, max_worker_idle_secs, val);
-+ break;
-+ case H2_CONF_STREAM_MAX_MEM:
-+ H2_CONFIG_SET(conf, stream_max_mem_size, val);
-+ break;
-+ case H2_CONF_ALT_SVC_MAX_AGE:
-+ H2_CONFIG_SET(conf, alt_svc_max_age, val);
-+ break;
-+ case H2_CONF_SER_HEADERS:
-+ H2_CONFIG_SET(conf, serialize_headers, val);
-+ break;
-+ case H2_CONF_MODERN_TLS_ONLY:
-+ H2_CONFIG_SET(conf, modern_tls_only, val);
-+ break;
-+ case H2_CONF_UPGRADE:
-+ H2_CONFIG_SET(conf, h2_upgrade, val);
-+ break;
-+ case H2_CONF_DIRECT:
-+ H2_CONFIG_SET(conf, h2_direct, val);
-+ break;
-+ case H2_CONF_TLS_WARMUP_SIZE:
-+ H2_CONFIG_SET(conf, tls_warmup_size, val);
-+ break;
-+ case H2_CONF_TLS_COOLDOWN_SECS:
-+ H2_CONFIG_SET(conf, tls_cooldown_secs, val);
-+ break;
-+ case H2_CONF_PUSH:
-+ H2_CONFIG_SET(conf, h2_push, val);
-+ break;
-+ case H2_CONF_PUSH_DIARY_SIZE:
-+ H2_CONFIG_SET(conf, push_diary_size, val);
-+ break;
-+ case H2_CONF_COPY_FILES:
-+ H2_CONFIG_SET(conf, copy_files, val);
-+ break;
-+ case H2_CONF_EARLY_HINTS:
-+ H2_CONFIG_SET(conf, early_hints, val);
-+ break;
-+ case H2_CONF_PADDING_BITS:
-+ H2_CONFIG_SET(conf, padding_bits, val);
-+ break;
-+ case H2_CONF_PADDING_ALWAYS:
-+ H2_CONFIG_SET(conf, padding_always, val);
-+ break;
-+ default:
-+ break;
-+ }
-+}
-+
-+static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64_t val)
-+{
-+ switch(var) {
-+ case H2_CONF_TLS_WARMUP_SIZE:
-+ H2_CONFIG_SET(conf, tls_warmup_size, val);
-+ break;
-+ default:
-+ h2_srv_config_seti(conf, var, (int)val);
-+ break;
-+ }
-+}
-+
-+static h2_config *h2_config_sget(server_rec *s)
- {
- h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
- &http2_module);
-@@ -217,9 +376,162 @@
- return cfg;
- }
-
--const struct h2_priority *h2_config_get_priority(const h2_config *conf,
-- const char *content_type)
-+static const h2_dir_config *h2_config_rget(request_rec *r)
-+{
-+ h2_dir_config *cfg = (h2_dir_config *)ap_get_module_config(r->per_dir_config,
-+ &http2_module);
-+ ap_assert(cfg);
-+ return cfg;
-+}
-+
-+static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var)
-+{
-+ switch(var) {
-+ case H2_CONF_ALT_SVC_MAX_AGE:
-+ return H2_CONFIG_GET(conf, &defdconf, alt_svc_max_age);
-+ case H2_CONF_UPGRADE:
-+ return H2_CONFIG_GET(conf, &defdconf, h2_upgrade);
-+ case H2_CONF_PUSH:
-+ return H2_CONFIG_GET(conf, &defdconf, h2_push);
-+ case H2_CONF_EARLY_HINTS:
-+ return H2_CONFIG_GET(conf, &defdconf, early_hints);
-+
-+ default:
-+ return DEF_VAL;
-+ }
-+}
-+
-+static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, int val)
-+{
-+ int set_srv = !dconf;
-+ if (dconf) {
-+ switch(var) {
-+ case H2_CONF_ALT_SVC_MAX_AGE:
-+ H2_CONFIG_SET(dconf, alt_svc_max_age, val);
-+ break;
-+ case H2_CONF_UPGRADE:
-+ H2_CONFIG_SET(dconf, h2_upgrade, val);
-+ break;
-+ case H2_CONF_PUSH:
-+ H2_CONFIG_SET(dconf, h2_push, val);
-+ break;
-+ case H2_CONF_EARLY_HINTS:
-+ H2_CONFIG_SET(dconf, early_hints, val);
-+ break;
-+ default:
-+ /* not handled in dir_conf */
-+ set_srv = 1;
-+ break;
-+ }
-+ }
-+
-+ if (set_srv) {
-+ h2_srv_config_seti(conf, var, val);
-+ }
-+}
-+
-+static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, apr_int64_t val)
- {
-+ int set_srv = !dconf;
-+ if (dconf) {
-+ switch(var) {
-+ default:
-+ /* not handled in dir_conf */
-+ set_srv = 1;
-+ break;
-+ }
-+ }
-+
-+ if (set_srv) {
-+ h2_srv_config_seti64(conf, var, val);
-+ }
-+}
-+
-+static const h2_config *h2_config_get(conn_rec *c)
-+{
-+ h2_ctx *ctx = h2_ctx_get(c, 0);
-+
-+ if (ctx) {
-+ if (ctx->config) {
-+ return ctx->config;
-+ }
-+ else if (ctx->server) {
-+ ctx->config = h2_config_sget(ctx->server);
-+ return ctx->config;
-+ }
-+ }
-+
-+ return h2_config_sget(c->base_server);
-+}
-+
-+int h2_config_cgeti(conn_rec *c, h2_config_var_t var)
-+{
-+ return (int)h2_srv_config_geti64(h2_config_get(c), var);
-+}
-+
-+apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var)
-+{
-+ return h2_srv_config_geti64(h2_config_get(c), var);
-+}
-+
-+int h2_config_sgeti(server_rec *s, h2_config_var_t var)
-+{
-+ return (int)h2_srv_config_geti64(h2_config_sget(s), var);
-+}
-+
-+apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var)
-+{
-+ return h2_srv_config_geti64(h2_config_sget(s), var);
-+}
-+
-+int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var)
-+{
-+ return (int)h2_config_geti64(r, s, var);
-+}
-+
-+apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var)
-+{
-+ apr_int64_t mode = r? (int)h2_dir_config_geti64(h2_config_rget(r), var) : DEF_VAL;
-+ return (mode != DEF_VAL)? mode : h2_config_sgeti64(s, var);
-+}
-+
-+int h2_config_rgeti(request_rec *r, h2_config_var_t var)
-+{
-+ return h2_config_geti(r, r->server, var);
-+}
-+
-+apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var)
-+{
-+ return h2_config_geti64(r, r->server, var);
-+}
-+
-+apr_array_header_t *h2_config_push_list(request_rec *r)
-+{
-+ const h2_config *sconf;
-+ const h2_dir_config *conf = h2_config_rget(r);
-+
-+ if (conf && conf->push_list) {
-+ return conf->push_list;
-+ }
-+ sconf = h2_config_sget(r->server);
-+ return sconf? sconf->push_list : NULL;
-+}
-+
-+apr_array_header_t *h2_config_alt_svcs(request_rec *r)
-+{
-+ const h2_config *sconf;
-+ const h2_dir_config *conf = h2_config_rget(r);
-+
-+ if (conf && conf->alt_svcs) {
-+ return conf->alt_svcs;
-+ }
-+ sconf = h2_config_sget(r->server);
-+ return sconf? sconf->alt_svcs : NULL;
-+}
-+
-+const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
-+{
-+ const h2_config *conf = h2_config_get(c);
- if (content_type && conf->priorities) {
- size_t len = strcspn(content_type, "; \t");
- h2_priority *prio = apr_hash_get(conf->priorities, content_type, len);
-@@ -228,166 +540,156 @@
- return NULL;
- }
-
--static const char *h2_conf_set_max_streams(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_max_streams(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->h2_max_streams = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->h2_max_streams < 1) {
-+ apr_int64_t ival = (int)apr_atoi64(value);
-+ if (ival < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_STREAMS, ival);
- return NULL;
- }
-
--static const char *h2_conf_set_window_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_window_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->h2_window_size = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->h2_window_size < 1024) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1024) {
- return "value must be >= 1024";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WIN_SIZE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_min_workers(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_min_workers(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->min_workers = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->min_workers < 1) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MIN_WORKERS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_max_workers(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_max_workers(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->max_workers = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->max_workers < 1) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKERS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->max_worker_idle_secs = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->max_worker_idle_secs < 1) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_SECS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
--
--
-- cfg->stream_max_mem_size = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->stream_max_mem_size < 1024) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1024) {
- return "value must be >= 1024";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_STREAM_MAX_MEM, val);
- return NULL;
- }
-
--static const char *h2_add_alt_svc(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_add_alt_svc(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
- if (value && *value) {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool);
-+ h2_alt_svc *as = h2_alt_svc_parse(value, cmd->pool);
- if (!as) {
- return "unable to parse alt-svc specifier";
- }
-- if (!cfg->alt_svcs) {
-- cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*));
-+
-+ if (cmd->path) {
-+ h2_dir_config *dcfg = (h2_dir_config *)dirconf;
-+ if (!dcfg->alt_svcs) {
-+ dcfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
-+ }
-+ APR_ARRAY_PUSH(dcfg->alt_svcs, h2_alt_svc*) = as;
-+ }
-+ else {
-+ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
-+ if (!cfg->alt_svcs) {
-+ cfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
-+ }
-+ APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
- }
-- APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
- }
-- (void)arg;
- return NULL;
- }
-
--static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_alt_svc_max_age(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->alt_svc_max_age = (int)apr_atoi64(value);
-- (void)arg;
-+ int val = (int)apr_atoi64(value);
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_ALT_SVC_MAX_AGE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
- /* deprecated, ignore */
-- (void)arg;
-+ (void)dirconf;
- (void)value;
-- ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */
-+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, /* NO LOGNO */
- "H2SessionExtraFiles is obsolete and will be ignored");
- return NULL;
- }
-
--static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_serialize_headers(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->serialize_headers = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->serialize_headers = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_direct(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_direct(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->h2_direct = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->h2_direct = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_push(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->h2_push = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->h2_push = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
-@@ -419,7 +721,7 @@
- else if (!strcasecmp("BEFORE", sdependency)) {
- dependency = H2_DEPENDANT_BEFORE;
- if (sweight) {
-- return "dependency 'Before' does not allow a weight";
-+ return "dependecy 'Before' does not allow a weight";
- }
- }
- else if (!strcasecmp("INTERLEAVED", sdependency)) {
-@@ -447,100 +749,88 @@
- return NULL;
- }
-
--static const char *h2_conf_set_modern_tls_only(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_modern_tls_only(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->modern_tls_only = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->modern_tls_only = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_upgrade(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_upgrade(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->h2_upgrade = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->h2_upgrade = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_tls_warmup_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->tls_warmup_size = apr_atoi64(value);
-- (void)arg;
-+ apr_int64_t val = apr_atoi64(value);
-+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_WARMUP_SIZE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->tls_cooldown_secs = (int)apr_atoi64(value);
-- (void)arg;
-+ apr_int64_t val = (int)apr_atoi64(value);
-+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_COOLDOWN_SECS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_push_diary_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- (void)arg;
-- cfg->push_diary_size = (int)apr_atoi64(value);
-- if (cfg->push_diary_size < 0) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 0) {
- return "value must be >= 0";
- }
-- if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
-+ if (val > 0 && (val & (val-1))) {
- return "value must a power of 2";
- }
-- if (cfg->push_diary_size > (1 << 15)) {
-+ if (val > (1 << 15)) {
- return "value must <= 65536";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH_DIARY_SIZE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_copy_files(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_copy_files(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)arg;
- if (!strcasecmp(value, "On")) {
-- cfg->copy_files = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->copy_files = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push)
-+static void add_push(apr_array_header_t **plist, apr_pool_t *pool, h2_push_res *push)
- {
- h2_push_res *new;
-- if (!conf->push_list) {
-- conf->push_list = apr_array_make(pool, 10, sizeof(*push));
-+ if (!*plist) {
-+ *plist = apr_array_make(pool, 10, sizeof(*push));
- }
-- new = apr_array_push(conf->push_list);
-+ new = apr_array_push(*plist);
- new->uri_ref = push->uri_ref;
- new->critical = push->critical;
- }
-@@ -549,8 +839,6 @@
- const char *arg1, const char *arg2,
- const char *arg3)
- {
-- h2_config *dconf = (h2_config*)dirconf ;
-- h2_config *sconf = (h2_config*)h2_config_sget(cmd->server);
- h2_push_res push;
- const char *last = arg3;
-
-@@ -575,42 +863,54 @@
- }
- }
-
-- /* server command? set both */
-- if (cmd->path == NULL) {
-- add_push(cmd->pool, sconf, &push);
-- add_push(cmd->pool, dconf, &push);
-+ if (cmd->path) {
-+ add_push(&(((h2_dir_config*)dirconf)->push_list), cmd->pool, &push);
- }
- else {
-- add_push(cmd->pool, dconf, &push);
-+ add_push(&(h2_config_sget(cmd->server)->push_list), cmd->pool, &push);
- }
-+ return NULL;
-+}
-
-+static const char *h2_conf_set_early_hints(cmd_parms *cmd,
-+ void *dirconf, const char *value)
-+{
-+ int val;
-+
-+ if (!strcasecmp(value, "On")) val = 1;
-+ else if (!strcasecmp(value, "Off")) val = 0;
-+ else return "value must be On or Off";
-+
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_EARLY_HINTS, val);
-+ if (cmd->path) {
-+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool,
-+ "H2EarlyHints = %d on path %s", val, cmd->path);
-+ }
- return NULL;
- }
-
--static const char *h2_conf_set_early_hints(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_padding(cmd_parms *cmd, void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- if (!strcasecmp(value, "On")) {
-- cfg->early_hints = 1;
-- return NULL;
-+ int val;
-+
-+ val = (int)apr_atoi64(value);
-+ if (val < 0) {
-+ return "number of bits must be >= 0";
- }
-- else if (!strcasecmp(value, "Off")) {
-- cfg->early_hints = 0;
-- return NULL;
-+ if (val > 8) {
-+ return "number of bits must be <= 8";
- }
--
-- (void)arg;
-- return "value must be On or Off";
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PADDING_BITS, val);
-+ return NULL;
- }
-
-+
- void h2_get_num_workers(server_rec *s, int *minw, int *maxw)
- {
- int threads_per_child = 0;
-- const h2_config *config = h2_config_sget(s);
-
-- *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
-- *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
-+ *minw = h2_config_sgeti(s, H2_CONF_MIN_WORKERS);
-+ *maxw = h2_config_sgeti(s, H2_CONF_MAX_WORKERS);
- ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child);
-
- if (*minw <= 0) {
-@@ -652,7 +952,7 @@
- AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
- RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
- AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
-- RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"),
-+ RSRC_CONF|OR_AUTHCFG, "on to allow HTTP/1 Upgrades to h2/h2c"),
- AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
- RSRC_CONF, "on to enable direct HTTP/2 mode"),
- AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
-@@ -662,7 +962,7 @@
- AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
- RSRC_CONF, "seconds of idle time on TLS before shrinking writes"),
- AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL,
-- RSRC_CONF, "off to disable HTTP/2 server push"),
-+ RSRC_CONF|OR_AUTHCFG, "off to disable HTTP/2 server push"),
- AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
- RSRC_CONF, "define priority of PUSHed resources per content type"),
- AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
-@@ -670,33 +970,12 @@
- AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL,
- OR_FILEINFO, "on to perform copy of file data"),
- AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL,
-- OR_FILEINFO, "add a resource to be pushed in this location/on this server."),
-+ OR_FILEINFO|OR_AUTHCFG, "add a resource to be pushed in this location/on this server."),
- AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL,
- RSRC_CONF, "on to enable interim status 103 responses"),
-+ AP_INIT_TAKE1("H2Padding", h2_conf_set_padding, NULL,
-+ RSRC_CONF, "set payload padding"),
- AP_END_CMD
- };
-
-
--const h2_config *h2_config_rget(request_rec *r)
--{
-- h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config,
-- &http2_module);
-- return cfg? cfg : h2_config_sget(r->server);
--}
--
--const h2_config *h2_config_get(conn_rec *c)
--{
-- h2_ctx *ctx = h2_ctx_get(c, 0);
--
-- if (ctx) {
-- if (ctx->config) {
-- return ctx->config;
-- }
-- else if (ctx->server) {
-- ctx->config = h2_config_sget(ctx->server);
-- return ctx->config;
-- }
-- }
--
-- return h2_config_sget(c->base_server);
--}
---- a/modules/http2/h2_config.h
-+++ b/modules/http2/h2_config.h
-@@ -42,6 +42,8 @@
- H2_CONF_PUSH_DIARY_SIZE,
- H2_CONF_COPY_FILES,
- H2_CONF_EARLY_HINTS,
-+ H2_CONF_PADDING_BITS,
-+ H2_CONF_PADDING_ALWAYS,
- } h2_config_var_t;
-
- struct apr_hash_t;
-@@ -53,33 +55,6 @@
- int critical;
- } h2_push_res;
-
--/* Apache httpd module configuration for h2. */
--typedef struct h2_config {
-- const char *name;
-- int h2_max_streams; /* max concurrent # streams (http2) */
-- int h2_window_size; /* stream window size (http2) */
-- int min_workers; /* min # of worker threads/child */
-- int max_workers; /* max # of worker threads/child */
-- int max_worker_idle_secs; /* max # of idle seconds for worker */
-- int stream_max_mem_size; /* max # bytes held in memory/stream */
-- apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
-- int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
-- int serialize_headers; /* Use serialized HTTP/1.1 headers for
-- processing, better compatibility */
-- int h2_direct; /* if mod_h2 is active directly */
-- int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
-- int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
-- apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
-- int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
-- int h2_push; /* if HTTP/2 server push is enabled */
-- struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
--
-- int push_diary_size; /* # of entries in push diary */
-- int copy_files; /* if files shall be copied vs setaside on output */
-- apr_array_header_t *push_list;/* list of h2_push_res configurations */
-- int early_hints; /* support status code 103 */
--} h2_config;
--
-
- void *h2_config_create_dir(apr_pool_t *pool, char *x);
- void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv);
-@@ -88,19 +63,37 @@
-
- extern const command_rec h2_cmds[];
-
--const h2_config *h2_config_get(conn_rec *c);
--const h2_config *h2_config_sget(server_rec *s);
--const h2_config *h2_config_rget(request_rec *r);
-+int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var);
-+apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var);
-
--int h2_config_geti(const h2_config *conf, h2_config_var_t var);
--apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var);
-+/**
-+ * Get the configured value for variable at the given connection.
-+ */
-+int h2_config_cgeti(conn_rec *c, h2_config_var_t var);
-+apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var);
-+
-+/**
-+ * Get the configured value for variable at the given server.
-+ */
-+int h2_config_sgeti(server_rec *s, h2_config_var_t var);
-+apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var);
-+
-+/**
-+ * Get the configured value for variable at the given request,
-+ * if configured for the request location.
-+ * Fallback to request server config otherwise.
-+ */
-+int h2_config_rgeti(request_rec *r, h2_config_var_t var);
-+apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
-
--void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
-+apr_array_header_t *h2_config_push_list(request_rec *r);
-+apr_array_header_t *h2_config_alt_svcs(request_rec *r);
-
-+
-+void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
- void h2_config_init(apr_pool_t *pool);
-
--const struct h2_priority *h2_config_get_priority(const h2_config *conf,
-- const char *content_type);
-+const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type);
-
- #endif /* __mod_h2__h2_config_h__ */
-
---- a/modules/http2/h2_conn.c
-+++ b/modules/http2/h2_conn.c
-@@ -18,6 +18,7 @@
- #include
-
- #include
-+#include
-
- #include
- #include
-@@ -79,7 +80,7 @@
- mpm_type = H2_MPM_PREFORK;
- mpm_module = m;
- /* While http2 can work really well on prefork, it collides
-- * today's use case for prefork: runnning single-thread app engines
-+ * today's use case for prefork: running single-thread app engines
- * like php. If we restrict h2_workers to 1 per process, php will
- * work fine, but browser will be limited to 1 active request at a
- * time. */
-@@ -109,7 +110,6 @@
-
- apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
- {
-- const h2_config *config = h2_config_sget(s);
- apr_status_t status = APR_SUCCESS;
- int minw, maxw;
- int max_threads_per_child = 0;
-@@ -129,7 +129,7 @@
-
- h2_get_num_workers(s, &minw, &maxw);
-
-- idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
-+ idle_secs = h2_config_sgeti(s, H2_CONF_MAX_WORKER_IDLE_SECS);
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
- "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
- minw, maxw, max_threads_per_child, idle_secs);
-@@ -138,7 +138,7 @@
- ap_register_input_filter("H2_IN", h2_filter_core_input,
- NULL, AP_FTYPE_CONNECTION);
-
-- status = h2_mplx_child_init(pool, s);
-+ status = h2_mplx_m_child_init(pool, s);
-
- if (status == APR_SUCCESS) {
- status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
-@@ -172,9 +172,10 @@
- return mpm_module;
- }
-
--apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
-+apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s)
- {
- h2_session *session;
-+ h2_ctx *ctx;
- apr_status_t status;
-
- if (!workers) {
-@@ -183,24 +184,25 @@
- return APR_EGENERAL;
- }
-
-- if (r) {
-- status = h2_session_rcreate(&session, r, ctx, workers);
-- }
-- else {
-- status = h2_session_create(&session, c, ctx, workers);
-- }
--
-- if (status == APR_SUCCESS) {
-+ if (APR_SUCCESS == (status = h2_session_create(&session, c, r, s, workers))) {
-+ ctx = h2_ctx_get(c, 1);
- h2_ctx_session_set(ctx, session);
-+
-+ /* remove the input filter of mod_reqtimeout, now that the connection
-+ * is established and we have swtiched to h2. reqtimeout has supervised
-+ * possibly configured handshake timeouts and needs to get out of the way
-+ * now since the rest of its state handling assumes http/1.x to take place. */
-+ ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
- }
-+
- return status;
- }
-
--apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
-+apr_status_t h2_conn_run(conn_rec *c)
- {
- apr_status_t status;
- int mpm_state = 0;
-- h2_session *session = h2_ctx_session_get(ctx);
-+ h2_session *session = h2_ctx_get_session(c);
-
- ap_assert(session);
- do {
-@@ -235,6 +237,13 @@
- case H2_SESSION_ST_BUSY:
- case H2_SESSION_ST_WAIT:
- c->cs->state = CONN_STATE_WRITE_COMPLETION;
-+ if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
-+ /* let the MPM know that we are not done and want
-+ * the Timeout behaviour instead of a KeepAliveTimeout
-+ * See PR 63534.
-+ */
-+ c->cs->sense = CONN_SENSE_WANT_READ;
-+ }
- break;
- case H2_SESSION_ST_CLEANUP:
- case H2_SESSION_ST_DONE:
-@@ -249,7 +258,7 @@
-
- apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
- {
-- h2_session *session = h2_ctx_session_get(ctx);
-+ h2_session *session = h2_ctx_get_session(c);
- if (session) {
- apr_status_t status = h2_session_pre_close(session, async_mpm);
- return (status == APR_SUCCESS)? DONE : status;
-@@ -257,7 +266,7 @@
- return DONE;
- }
-
--conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
-+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent)
- {
- apr_allocator_t *allocator;
- apr_status_t status;
-@@ -268,11 +277,11 @@
-
- ap_assert(master);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
-- "h2_stream(%ld-%d): create slave", master->id, slave_id);
-+ "h2_stream(%ld-%d): create secondary", master->id, sec_id);
-
- /* We create a pool with its own allocator to be used for
- * processing a request. This is the only way to have the processing
-- * independant of its parent pool in the sense that it can work in
-+ * independent of its parent pool in the sense that it can work in
- * another thread. Also, the new allocator needs its own mutex to
- * synchronize sub-pools.
- */
-@@ -281,18 +290,18 @@
- status = apr_pool_create_ex(&pool, parent, NULL, allocator);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master,
-- APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
-- master->id, slave_id);
-+ APLOGNO(10004) "h2_session(%ld-%d): create secondary pool",
-+ master->id, sec_id);
- return NULL;
- }
- apr_allocator_owner_set(allocator, pool);
-- apr_pool_tag(pool, "h2_slave_conn");
-+ apr_pool_tag(pool, "h2_secondary_conn");
-
- c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
- if (c == NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
-- APLOGNO(02913) "h2_session(%ld-%d): create slave",
-- master->id, slave_id);
-+ APLOGNO(02913) "h2_session(%ld-%d): create secondary",
-+ master->id, sec_id);
- apr_pool_destroy(pool);
- return NULL;
- }
-@@ -310,26 +319,28 @@
- c->filter_conn_ctx = NULL;
- #endif
- c->bucket_alloc = apr_bucket_alloc_create(pool);
-+#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
- c->data_in_input_filters = 0;
- c->data_in_output_filters = 0;
-+#endif
- /* prevent mpm_event from making wrong assumptions about this connection,
- * like e.g. using its socket for an async read check. */
- c->clogging_input_filters = 1;
- c->log = NULL;
- c->log_id = apr_psprintf(pool, "%ld-%d",
-- master->id, slave_id);
-+ master->id, sec_id);
- c->aborted = 0;
-- /* We cannot install the master connection socket on the slaves, as
-+ /* We cannot install the master connection socket on the secondary, as
- * modules mess with timeouts/blocking of the socket, with
- * unwanted side effects to the master connection processing.
-- * Fortunately, since we never use the slave socket, we can just install
-+ * Fortunately, since we never use the secondary socket, we can just install
- * a single, process-wide dummy and everyone is happy.
- */
- ap_set_module_config(c->conn_config, &core_module, dummy_socket);
- /* TODO: these should be unique to this thread */
- c->sbh = master->sbh;
-- /* TODO: not all mpm modules have learned about slave connections yet.
-- * copy their config from master to slave.
-+ /* TODO: not all mpm modules have learned about secondary connections yet.
-+ * copy their config from master to secondary.
- */
- if ((mpm = h2_conn_mpm_module()) != NULL) {
- cfg = ap_get_module_config(master->conn_config, mpm);
-@@ -337,38 +348,38 @@
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
-- "h2_slave(%s): created", c->log_id);
-+ "h2_secondary(%s): created", c->log_id);
- return c;
- }
-
--void h2_slave_destroy(conn_rec *slave)
-+void h2_secondary_destroy(conn_rec *secondary)
- {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
-- "h2_slave(%s): destroy", slave->log_id);
-- slave->sbh = NULL;
-- apr_pool_destroy(slave->pool);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, secondary,
-+ "h2_secondary(%s): destroy", secondary->log_id);
-+ secondary->sbh = NULL;
-+ apr_pool_destroy(secondary->pool);
- }
-
--apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
-+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd)
- {
-- if (slave->keepalives == 0) {
-+ if (secondary->keepalives == 0) {
- /* Simulate that we had already a request on this connection. Some
- * hooks trigger special behaviour when keepalives is 0.
- * (Not necessarily in pre_connection, but later. Set it here, so it
- * is in place.) */
-- slave->keepalives = 1;
-+ secondary->keepalives = 1;
- /* We signal that this connection will be closed after the request.
- * Which is true in that sense that we throw away all traffic data
-- * on this slave connection after each requests. Although we might
-+ * on this secondary connection after each requests. Although we might
- * reuse internal structures like memory pools.
- * The wanted effect of this is that httpd does not try to clean up
- * any dangling data on this connection when a request is done. Which
-- * is unneccessary on a h2 stream.
-+ * is unnecessary on a h2 stream.
- */
-- slave->keepalive = AP_CONN_CLOSE;
-- return ap_run_pre_connection(slave, csd);
-+ secondary->keepalive = AP_CONN_CLOSE;
-+ return ap_run_pre_connection(secondary, csd);
- }
-- ap_assert(slave->output_filters);
-+ ap_assert(secondary->output_filters);
- return APR_SUCCESS;
- }
-
---- a/modules/http2/h2_conn.h
-+++ b/modules/http2/h2_conn.h
-@@ -23,21 +23,21 @@
- /**
- * Setup the connection and our context for HTTP/2 processing
- *
-- * @param ctx the http2 context to setup
- * @param c the connection HTTP/2 is starting on
- * @param r the upgrade request that still awaits an answer, optional
-+ * @param s the server selected for this connection (can be != c->base_server)
- */
--apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r);
-+apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s);
-
- /**
- * Run the HTTP/2 connection in synchronous fashion.
- * Return when the HTTP/2 session is done
- * and the connection will close or a fatal error occurred.
- *
-- * @param ctx the http2 context to run
-+ * @param c the http2 connection to run
- * @return APR_SUCCESS when session is done.
- */
--apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c);
-+apr_status_t h2_conn_run(conn_rec *c);
-
- /**
- * The connection is about to close. If we have not send a GOAWAY
-@@ -68,10 +68,10 @@
- const char *h2_conn_mpm_name(void);
- int h2_mpm_supported(void);
-
--conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
--void h2_slave_destroy(conn_rec *slave);
-+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent);
-+void h2_secondary_destroy(conn_rec *secondary);
-
--apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
--void h2_slave_run_connection(conn_rec *slave);
-+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd);
-+void h2_secondary_run_connection(conn_rec *secondary);
-
- #endif /* defined(__mod_h2__h2_conn__) */
---- a/modules/http2/h2_conn_io.c
-+++ b/modules/http2/h2_conn_io.c
-@@ -40,12 +40,17 @@
- * ~= 1300 bytes */
- #define WRITE_SIZE_INITIAL 1300
-
--/* Calculated like this: max TLS record size 16*1024
-- * - 40 (IP) - 20 (TCP) - 40 (TCP options)
-- * - TLS overhead (60-100)
-- * which seems to create less TCP packets overall
-+/* The maximum we'd like to write in one chunk is
-+ * the max size of a TLS record. When pushing
-+ * many frames down the h2 connection, this might
-+ * align differently because of headers and other
-+ * frames or simply as not sufficient data is
-+ * in a response body.
-+ * However keeping frames at or below this limit
-+ * should make optimizations at the layer that writes
-+ * to TLS easier.
- */
--#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100)
-+#define WRITE_SIZE_MAX (TLS_DATA_MAX)
-
-
- static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
-@@ -123,21 +128,20 @@
-
- }
-
--apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
-- const h2_config *cfg)
-+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s)
- {
- io->c = c;
- io->output = apr_brigade_create(c->pool, c->bucket_alloc);
- io->is_tls = h2_h2_is_tls(c);
- io->buffer_output = io->is_tls;
-- io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM);
-+ io->flush_threshold = (apr_size_t)h2_config_sgeti64(s, H2_CONF_STREAM_MAX_MEM);
-
- if (io->is_tls) {
- /* This is what we start with,
- * see https://issues.apache.org/jira/browse/TS-2503
- */
-- io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE);
-- io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS)
-+ io->warmup_size = h2_config_sgeti64(s, H2_CONF_TLS_WARMUP_SIZE);
-+ io->cooldown_usecs = (h2_config_sgeti(s, H2_CONF_TLS_COOLDOWN_SECS)
- * APR_USEC_PER_SEC);
- io->write_size = (io->cooldown_usecs > 0?
- WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
---- a/modules/http2/h2_conn_io.h
-+++ b/modules/http2/h2_conn_io.h
-@@ -48,8 +48,7 @@
- apr_size_t slen;
- } h2_conn_io;
-
--apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
-- const struct h2_config *cfg);
-+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s);
-
- /**
- * Append data to the buffered output.
---- a/modules/http2/h2_ctx.c
-+++ b/modules/http2/h2_ctx.c
-@@ -29,8 +29,8 @@
- {
- h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
- ap_assert(ctx);
-+ h2_ctx_server_update(ctx, c->base_server);
- ap_set_module_config(c->conn_config, &http2_module, ctx);
-- h2_ctx_server_set(ctx, c->base_server);
- return ctx;
- }
-
-@@ -79,8 +79,9 @@
- return ctx;
- }
-
--h2_session *h2_ctx_session_get(h2_ctx *ctx)
-+h2_session *h2_ctx_get_session(conn_rec *c)
- {
-+ h2_ctx *ctx = h2_ctx_get(c, 0);
- return ctx? ctx->session : NULL;
- }
-
-@@ -89,33 +90,17 @@
- ctx->session = session;
- }
-
--server_rec *h2_ctx_server_get(h2_ctx *ctx)
-+h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s)
- {
-- return ctx? ctx->server : NULL;
--}
--
--h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s)
--{
-- ctx->server = s;
-+ if (ctx->server != s) {
-+ ctx->server = s;
-+ }
- return ctx;
- }
-
--int h2_ctx_is_task(h2_ctx *ctx)
--{
-- return ctx && ctx->task;
--}
--
--h2_task *h2_ctx_get_task(h2_ctx *ctx)
-+h2_task *h2_ctx_get_task(conn_rec *c)
- {
-+ h2_ctx *ctx = h2_ctx_get(c, 0);
- return ctx? ctx->task : NULL;
- }
-
--h2_task *h2_ctx_cget_task(conn_rec *c)
--{
-- return h2_ctx_get_task(h2_ctx_get(c, 0));
--}
--
--h2_task *h2_ctx_rget_task(request_rec *r)
--{
-- return h2_ctx_get_task(h2_ctx_rget(r));
--}
---- a/modules/http2/h2_ctx.h
-+++ b/modules/http2/h2_ctx.h
-@@ -56,12 +56,11 @@
- */
- h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto);
-
--/* Set the server_rec relevant for this context.
-+/* Update the server_rec relevant for this context. A server for
-+ * a connection may change during SNI handling, for example.
- */
--h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s);
--server_rec *h2_ctx_server_get(h2_ctx *ctx);
-+h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s);
-
--struct h2_session *h2_ctx_session_get(h2_ctx *ctx);
- void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
-
- /**
-@@ -69,10 +68,8 @@
- */
- const char *h2_ctx_protocol_get(const conn_rec *c);
-
--int h2_ctx_is_task(h2_ctx *ctx);
-+struct h2_session *h2_ctx_get_session(conn_rec *c);
-+struct h2_task *h2_ctx_get_task(conn_rec *c);
-
--struct h2_task *h2_ctx_get_task(h2_ctx *ctx);
--struct h2_task *h2_ctx_cget_task(conn_rec *c);
--struct h2_task *h2_ctx_rget_task(request_rec *r);
-
- #endif /* defined(__mod_h2__h2_ctx__) */
---- a/modules/http2/h2_filter.c
-+++ b/modules/http2/h2_filter.c
-@@ -54,6 +54,7 @@
- const char *data;
- ssize_t n;
-
-+ (void)c;
- status = apr_bucket_read(b, &data, &len, block);
-
- while (status == APR_SUCCESS && len > 0) {
-@@ -71,10 +72,10 @@
- }
- else {
- session->io.bytes_read += n;
-- if (len <= n) {
-+ if ((apr_ssize_t)len <= n) {
- break;
- }
-- len -= n;
-+ len -= (apr_size_t)n;
- data += n;
- }
- }
-@@ -277,6 +278,7 @@
- apr_bucket_brigade *dest,
- const apr_bucket *src)
- {
-+ (void)beam;
- if (H2_BUCKET_IS_OBSERVER(src)) {
- h2_bucket_observer *l = (h2_bucket_observer *)src->data;
- apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc,
-@@ -311,8 +313,7 @@
- bbout(bb, " \"settings\": {\n");
- bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams);
- bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024);
-- bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
-- h2_config_geti(s->config, H2_CONF_WIN_SIZE));
-+ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", h2_config_sgeti(s->s, H2_CONF_WIN_SIZE));
- bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s));
- bbout(bb, " }%s\n", last? "" : ",");
- }
-@@ -369,7 +370,7 @@
- x.s = s;
- x.idx = 0;
- bbout(bb, " \"streams\": {");
-- h2_mplx_stream_do(s->mplx, add_stream, &x);
-+ h2_mplx_m_stream_do(s->mplx, add_stream, &x);
- bbout(bb, "\n }%s\n", last? "" : ",");
- }
-
-@@ -431,41 +432,38 @@
-
- static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
- {
-- conn_rec *c = task->c->master;
-- h2_ctx *h2ctx = h2_ctx_get(c, 0);
-- h2_session *session;
-- h2_stream *stream;
-+ h2_mplx *m = task->mplx;
-+ h2_stream *stream = h2_mplx_t_stream_get(m, task);
-+ h2_session *s;
-+ conn_rec *c;
-+
- apr_bucket_brigade *bb;
- apr_bucket *e;
- int32_t connFlowIn, connFlowOut;
-
--
-- if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) {
-- return APR_SUCCESS;
-- }
--
-- stream = h2_session_stream_get(session, task->stream_id);
- if (!stream) {
- /* stream already done */
- return APR_SUCCESS;
- }
-+ s = stream->session;
-+ c = s->c;
-
- bb = apr_brigade_create(stream->pool, c->bucket_alloc);
-
-- connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
-- connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
-+ connFlowIn = nghttp2_session_get_effective_local_window_size(s->ngh2);
-+ connFlowOut = nghttp2_session_get_remote_window_size(s->ngh2);
-
- bbout(bb, "{\n");
- bbout(bb, " \"version\": \"draft-01\",\n");
-- add_settings(bb, session, 0);
-- add_peer_settings(bb, session, 0);
-+ add_settings(bb, s, 0);
-+ add_peer_settings(bb, s, 0);
- bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn);
- bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut);
-- bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown);
-+ bbout(bb, " \"sentGoAway\": %d,\n", s->local.shutdown);
-
-- add_streams(bb, session, 0);
-+ add_streams(bb, s, 0);
-
-- add_stats(bb, session, stream, 1);
-+ add_stats(bb, s, stream, 1);
- bbout(bb, "}\n");
-
- while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) {
-@@ -495,9 +493,54 @@
- return APR_SUCCESS;
- }
-
-+static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
-+{
-+ apr_bucket_brigade *bb;
-+ int seen_eos;
-+ apr_status_t rv;
-+
-+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
-+ seen_eos = 0;
-+ do {
-+ apr_bucket *bucket;
-+
-+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
-+ APR_BLOCK_READ, HUGE_STRING_LEN);
-+
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_destroy(bb);
-+ return rv;
-+ }
-+
-+ for (bucket = APR_BRIGADE_FIRST(bb);
-+ bucket != APR_BRIGADE_SENTINEL(bb);
-+ bucket = APR_BUCKET_NEXT(bucket))
-+ {
-+ const char *data;
-+ apr_size_t len;
-+
-+ if (APR_BUCKET_IS_EOS(bucket)) {
-+ seen_eos = 1;
-+ break;
-+ }
-+ if (bucket->length == 0) {
-+ continue;
-+ }
-+ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_destroy(bb);
-+ return rv;
-+ }
-+ maxlen -= bucket->length;
-+ }
-+ apr_brigade_cleanup(bb);
-+ } while (!seen_eos && maxlen >= 0);
-+
-+ return APR_SUCCESS;
-+}
-+
- int h2_filter_h2_status_handler(request_rec *r)
- {
-- h2_ctx *ctx = h2_ctx_rget(r);
- conn_rec *c = r->connection;
- h2_task *task;
- apr_bucket_brigade *bb;
-@@ -511,10 +554,12 @@
- return DECLINED;
- }
-
-- task = ctx? h2_ctx_get_task(ctx) : NULL;
-+ task = h2_ctx_get_task(r->connection);
- if (task) {
--
-- if ((status = ap_discard_request_body(r)) != OK) {
-+ /* In this handler, we do some special sauce to send footers back,
-+ * IFF we received footers in the request. This is used in our test
-+ * cases, since CGI has no way of handling those. */
-+ if ((status = discard_body(r, 1024)) != OK) {
- return status;
- }
-
---- a/modules/http2/h2_from_h1.c
-+++ b/modules/http2/h2_from_h1.c
-@@ -315,6 +315,7 @@
- int http_status;
- apr_array_header_t *hlines;
- apr_bucket_brigade *tmp;
-+ apr_bucket_brigade *saveto;
- } h2_response_parser;
-
- static apr_status_t parse_header(h2_response_parser *parser, char *line) {
-@@ -351,13 +352,17 @@
- parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc);
- }
- status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
-- HUGE_STRING_LEN);
-+ len);
- if (status == APR_SUCCESS) {
- --len;
- status = apr_brigade_flatten(parser->tmp, line, &len);
- if (status == APR_SUCCESS) {
- /* we assume a non-0 containing line and remove trailing crlf. */
- line[len] = '\0';
-+ /*
-+ * XXX: What to do if there is an LF but no CRLF?
-+ * Should we error out?
-+ */
- if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
- len -= 2;
- line[len] = '\0';
-@@ -367,10 +372,47 @@
- task->id, line);
- }
- else {
-+ apr_off_t brigade_length;
-+
-+ /*
-+ * If the brigade parser->tmp becomes longer than our buffer
-+ * for flattening we never have a chance to get a complete
-+ * line. This can happen if we are called multiple times after
-+ * previous calls did not find a H2_CRLF and we returned
-+ * APR_EAGAIN. In this case parser->tmp (correctly) grows
-+ * with each call to apr_brigade_split_line.
-+ *
-+ * XXX: Currently a stack based buffer of HUGE_STRING_LEN is
-+ * used. This means we cannot cope with lines larger than
-+ * HUGE_STRING_LEN which might be an issue.
-+ */
-+ status = apr_brigade_length(parser->tmp, 0, &brigade_length);
-+ if ((status != APR_SUCCESS) || (brigade_length > len)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, task->c, APLOGNO(10257)
-+ "h2_task(%s): read response, line too long",
-+ task->id);
-+ return APR_ENOSPC;
-+ }
- /* this does not look like a complete line yet */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
- "h2_task(%s): read response, incomplete line: %s",
- task->id, line);
-+ if (!parser->saveto) {
-+ parser->saveto = apr_brigade_create(task->pool,
-+ task->c->bucket_alloc);
-+ }
-+ /*
-+ * Be on the save side and save the parser->tmp brigade
-+ * as it could contain transient buckets which could be
-+ * invalid next time we are here.
-+ *
-+ * NULL for the filter parameter is ok since we
-+ * provide our own brigade as second parameter
-+ * and ap_save_brigade does not need to create one.
-+ */
-+ ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp),
-+ parser->tmp->p);
-+ APR_BRIGADE_CONCAT(parser->tmp, parser->saveto);
- return APR_EAGAIN;
- }
- }
-@@ -594,18 +636,20 @@
- }
- }
-
-- if (r->header_only) {
-+ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
-- "h2_task(%s): header_only, cleanup output brigade",
-+ "h2_task(%s): headers only, cleanup output brigade",
- task->id);
- b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
- while (b != APR_BRIGADE_SENTINEL(bb)) {
- next = APR_BUCKET_NEXT(b);
- if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
- break;
-- }
-- APR_BUCKET_REMOVE(b);
-- apr_bucket_destroy(b);
-+ }
-+ if (!H2_BUCKET_IS_HEADERS(b)) {
-+ APR_BUCKET_REMOVE(b);
-+ apr_bucket_destroy(b);
-+ }
- b = next;
- }
- }
---- a/modules/http2/h2_h2.c
-+++ b/modules/http2/h2_h2.c
-@@ -463,19 +463,18 @@
- return opt_ssl_is_https && opt_ssl_is_https(c);
- }
-
--int h2_is_acceptable_connection(conn_rec *c, int require_all)
-+int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all)
- {
- int is_tls = h2_h2_is_tls(c);
-- const h2_config *cfg = h2_config_get(c);
-
-- if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) {
-+ if (is_tls && h2_config_cgeti(c, H2_CONF_MODERN_TLS_ONLY) > 0) {
- /* Check TLS connection for modern TLS parameters, as defined in
- * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- */
- apr_pool_t *pool = c->pool;
- server_rec *s = c->base_server;
- char *val;
--
-+
- if (!opt_ssl_var_lookup) {
- /* unable to check */
- return 0;
-@@ -521,33 +520,29 @@
- return 1;
- }
-
--int h2_allows_h2_direct(conn_rec *c)
-+static int h2_allows_h2_direct(conn_rec *c)
- {
-- const h2_config *cfg = h2_config_get(c);
- int is_tls = h2_h2_is_tls(c);
- const char *needed_protocol = is_tls? "h2" : "h2c";
-- int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT);
-+ int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT);
-
- if (h2_direct < 0) {
- h2_direct = is_tls? 0 : 1;
- }
-- return (h2_direct
-- && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
-+ return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
- }
-
--int h2_allows_h2_upgrade(conn_rec *c)
-+int h2_allows_h2_upgrade(request_rec *r)
- {
-- const h2_config *cfg = h2_config_get(c);
-- int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE);
--
-- return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c));
-+ int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE);
-+ return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(r->connection));
- }
-
- /*******************************************************************************
- * Register various hooks
- */
- static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
--static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL};
-+static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL};
-
- void h2_h2_register_hooks(void)
- {
-@@ -558,7 +553,7 @@
- * a chance to take over before it.
- */
- ap_hook_process_connection(h2_h2_process_conn,
-- mod_ssl, mod_reqtimeout, APR_HOOK_LAST);
-+ mod_reqtimeout, NULL, APR_HOOK_LAST);
-
- /* One last chance to properly say goodbye if we have not done so
- * already. */
-@@ -581,14 +576,17 @@
- {
- apr_status_t status;
- h2_ctx *ctx;
-+ server_rec *s;
-
- if (c->master) {
- return DECLINED;
- }
-
- ctx = h2_ctx_get(c, 0);
-+ s = ctx? ctx->server : c->base_server;
-+
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
-- if (h2_ctx_is_task(ctx)) {
-+ if (ctx && ctx->task) {
- /* our stream pseudo connection */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
- return DECLINED;
-@@ -601,19 +599,19 @@
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
- "new connection using protocol '%s', direct=%d, "
- "tls acceptable=%d", proto, h2_allows_h2_direct(c),
-- h2_is_acceptable_connection(c, 1));
-+ h2_is_acceptable_connection(c, NULL, 1));
- }
-
- if (!strcmp(AP_PROTOCOL_HTTP1, proto)
- && h2_allows_h2_direct(c)
-- && h2_is_acceptable_connection(c, 1)) {
-+ && h2_is_acceptable_connection(c, NULL, 1)) {
- /* Fresh connection still is on http/1.1 and H2Direct is enabled.
- * Otherwise connection is in a fully acceptable state.
- * -> peek at the first 24 incoming bytes
- */
- apr_bucket_brigade *temp;
-- char *s = NULL;
-- apr_size_t slen;
-+ char *peek = NULL;
-+ apr_size_t peeklen;
-
- temp = apr_brigade_create(c->pool, c->bucket_alloc);
- status = ap_get_brigade(c->input_filters, temp,
-@@ -626,8 +624,8 @@
- return DECLINED;
- }
-
-- apr_brigade_pflatten(temp, &s, &slen, c->pool);
-- if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
-+ apr_brigade_pflatten(temp, &peek, &peeklen, c->pool);
-+ if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, direct mode detected");
- if (!ctx) {
-@@ -638,7 +636,7 @@
- else if (APLOGctrace2(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_h2, not detected in %d bytes(base64): %s",
-- (int)slen, h2_util_base64url_encode(s, slen, c->pool));
-+ (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool));
- }
-
- apr_brigade_destroy(temp);
-@@ -647,15 +645,16 @@
-
- if (ctx) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
-- if (!h2_ctx_session_get(ctx)) {
-- status = h2_conn_setup(ctx, c, NULL);
-+
-+ if (!h2_ctx_get_session(c)) {
-+ status = h2_conn_setup(c, NULL, s);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
- if (status != APR_SUCCESS) {
- h2_ctx_clear(c);
- return !OK;
- }
- }
-- h2_conn_run(ctx, c);
-+ h2_conn_run(c);
- return OK;
- }
-
-@@ -667,7 +666,7 @@
- {
- h2_ctx *ctx;
-
-- /* slave connection? */
-+ /* secondary connection? */
- if (c->master) {
- return DECLINED;
- }
-@@ -684,16 +683,17 @@
-
- static void check_push(request_rec *r, const char *tag)
- {
-- const h2_config *conf = h2_config_rget(r);
-- if (!r->expecting_100
-- && conf && conf->push_list && conf->push_list->nelts > 0) {
-+ apr_array_header_t *push_list = h2_config_push_list(r);
-+
-+ if (!r->expecting_100 && push_list && push_list->nelts > 0) {
- int i, old_status;
- const char *old_line;
-+
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "%s, early announcing %d resources for push",
-- tag, conf->push_list->nelts);
-- for (i = 0; i < conf->push_list->nelts; ++i) {
-- h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res);
-+ tag, push_list->nelts);
-+ for (i = 0; i < push_list->nelts; ++i) {
-+ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
- apr_table_add(r->headers_out, "Link",
- apr_psprintf(r->pool, "<%s>; rel=preload%s",
- push->uri_ref, push->critical? "; critical" : ""));
-@@ -710,10 +710,9 @@
-
- static int h2_h2_post_read_req(request_rec *r)
- {
-- /* slave connection? */
-+ /* secondary connection? */
- if (r->connection->master) {
-- h2_ctx *ctx = h2_ctx_rget(r);
-- struct h2_task *task = h2_ctx_get_task(ctx);
-+ struct h2_task *task = h2_ctx_get_task(r->connection);
- /* This hook will get called twice on internal redirects. Take care
- * that we manipulate filters only once. */
- if (task && !task->filters_set) {
-@@ -730,7 +729,7 @@
- ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
-
- for (f = r->input_filters; f; f = f->next) {
-- if (!strcmp("H2_SLAVE_IN", f->frec->name)) {
-+ if (!strcmp("H2_SECONDARY_IN", f->frec->name)) {
- f->r = r;
- break;
- }
-@@ -744,17 +743,15 @@
-
- static int h2_h2_late_fixups(request_rec *r)
- {
-- /* slave connection? */
-+ /* secondary connection? */
- if (r->connection->master) {
-- h2_ctx *ctx = h2_ctx_rget(r);
-- struct h2_task *task = h2_ctx_get_task(ctx);
-+ struct h2_task *task = h2_ctx_get_task(r->connection);
- if (task) {
- /* check if we copy vs. setaside files in this location */
-- task->output.copy_files = h2_config_geti(h2_config_rget(r),
-- H2_CONF_COPY_FILES);
-+ task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES);
- if (task->output.copy_files) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
-- "h2_slave_out(%s): copy_files on", task->id);
-+ "h2_secondary_out(%s): copy_files on", task->id);
- h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
- }
- check_push(r, "late_fixup");
---- a/modules/http2/h2_h2.h
-+++ b/modules/http2/h2_h2.h
-@@ -57,23 +57,15 @@
- * the handshake is still ongoing.
- * @return != 0 iff connection requirements are met
- */
--int h2_is_acceptable_connection(conn_rec *c, int require_all);
--
--/**
-- * Check if the "direct" HTTP/2 mode of protocol handling is enabled
-- * for the given connection.
-- * @param c the connection to check
-- * @return != 0 iff direct mode is enabled
-- */
--int h2_allows_h2_direct(conn_rec *c);
-+int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all);
-
- /**
- * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
-- * for the given connection.
-- * @param c the connection to check
-+ * for the given request.
-+ * @param r the request to check
- * @return != 0 iff Upgrade switching is enabled
- */
--int h2_allows_h2_upgrade(conn_rec *c);
-+int h2_allows_h2_upgrade(request_rec *r);
-
-
- #endif /* defined(__mod_h2__h2_h2__) */
---- a/modules/http2/h2_headers.c
-+++ b/modules/http2/h2_headers.c
-@@ -28,6 +28,7 @@
-
- #include "h2_private.h"
- #include "h2_h2.h"
-+#include "h2_config.h"
- #include "h2_util.h"
- #include "h2_request.h"
- #include "h2_headers.h"
-@@ -101,8 +102,9 @@
- const apr_bucket *src)
- {
- if (H2_BUCKET_IS_HEADERS(src)) {
-- h2_headers *r = ((h2_bucket_headers *)src->data)->headers;
-- apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r);
-+ h2_headers *src_headers = ((h2_bucket_headers *)src->data)->headers;
-+ apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc,
-+ h2_headers_clone(dest->p, src_headers));
- APR_BRIGADE_INSERT_TAIL(dest, b);
- return b;
- }
-@@ -128,28 +130,41 @@
- {
- h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
- if (headers->status == HTTP_FORBIDDEN) {
-- const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
-- if (cause) {
-- /* This request triggered a TLS renegotiation that is now allowed
-- * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
-- */
-- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
-- APLOGNO(03061)
-- "h2_headers(%ld): renegotiate forbidden, cause: %s",
-- (long)r->connection->id, cause);
-- headers->status = H2_ERR_HTTP_1_1_REQUIRED;
-+ request_rec *r_prev;
-+ for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
-+ const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden");
-+ if (cause) {
-+ /* This request triggered a TLS renegotiation that is not allowed
-+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
-+ APLOGNO(03061)
-+ "h2_headers(%ld): renegotiate forbidden, cause: %s",
-+ (long)r->connection->id, cause);
-+ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
-+ break;
-+ }
- }
- }
- if (is_unsafe(r->server)) {
-- apr_table_setn(headers->notes, H2_HDR_CONFORMANCE,
-- H2_HDR_CONFORMANCE_UNSAFE);
-+ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE);
-+ }
-+ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
-+ apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0");
- }
- return headers;
- }
-
- h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
- {
-- return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
-+ return h2_headers_create(h->status, apr_table_copy(pool, h->headers),
-+ apr_table_copy(pool, h->notes), h->raw_bytes, pool);
-+}
-+
-+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
-+{
-+ return h2_headers_create(h->status, apr_table_clone(pool, h->headers),
-+ apr_table_clone(pool, h->notes), h->raw_bytes, pool);
- }
-
- h2_headers *h2_headers_die(apr_status_t type,
---- a/modules/http2/h2_headers.h
-+++ b/modules/http2/h2_headers.h
-@@ -59,12 +59,18 @@
- apr_table_t *header, apr_pool_t *pool);
-
- /**
-- * Clone the headers into another pool. This will not copy any
-+ * Copy the headers into another pool. This will not copy any
- * header strings.
- */
- h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
-
- /**
-+ * Clone the headers into another pool. This will also clone any
-+ * header strings.
-+ */
-+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
-+
-+/**
- * Create the headers for the given error.
- * @param stream_id id of the stream to create the headers for
- * @param type the error code
---- a/modules/http2/h2_mplx.c
-+++ b/modules/http2/h2_mplx.c
-@@ -40,7 +40,6 @@
- #include "h2_ctx.h"
- #include "h2_h2.h"
- #include "h2_mplx.h"
--#include "h2_ngn_shed.h"
- #include "h2_request.h"
- #include "h2_stream.h"
- #include "h2_session.h"
-@@ -54,9 +53,21 @@
- h2_mplx *m;
- h2_stream *stream;
- apr_time_t now;
-+ apr_size_t count;
- } stream_iter_ctx;
-
--apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
-+/**
-+ * Naming convention for static functions:
-+ * - m_*: function only called from the master connection
-+ * - s_*: function only called from a secondary connection
-+ * - t_*: function only called from a h2_task holder
-+ * - mst_*: function called from everyone
-+ */
-+
-+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task);
-+static apr_status_t m_be_annoyed(h2_mplx *m);
-+
-+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s)
- {
- return APR_SUCCESS;
- }
-@@ -72,46 +83,40 @@
- #define H2_MPLX_ENTER_ALWAYS(m) \
- apr_thread_mutex_lock(m->lock)
-
--#define H2_MPLX_ENTER_MAYBE(m, lock) \
-- if (lock) apr_thread_mutex_lock(m->lock)
-+#define H2_MPLX_ENTER_MAYBE(m, dolock) \
-+ if (dolock) apr_thread_mutex_lock(m->lock)
-
--#define H2_MPLX_LEAVE_MAYBE(m, lock) \
-- if (lock) apr_thread_mutex_unlock(m->lock)
-+#define H2_MPLX_LEAVE_MAYBE(m, dolock) \
-+ if (dolock) apr_thread_mutex_unlock(m->lock)
-
--static void check_data_for(h2_mplx *m, h2_stream *stream, int lock);
-+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked);
-
--static void stream_output_consumed(void *ctx,
-- h2_bucket_beam *beam, apr_off_t length)
-+static void mst_stream_output_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
- {
-- h2_stream *stream = ctx;
-- h2_task *task = stream->task;
--
-- if (length > 0 && task && task->assigned) {
-- h2_req_engine_out_consumed(task->assigned, task->c, length);
-- }
- }
-
--static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
-+static void mst_stream_input_ev(void *ctx, h2_bucket_beam *beam)
- {
- h2_stream *stream = ctx;
- h2_mplx *m = stream->session->mplx;
- apr_atomic_set32(&m->event_pending, 1);
- }
-
--static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
-+static void m_stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
- {
- h2_stream_in_consumed(ctx, length);
- }
-
--static void stream_joined(h2_mplx *m, h2_stream *stream)
-+static void ms_stream_joined(h2_mplx *m, h2_stream *stream)
- {
-- ap_assert(!stream->task || stream->task->worker_done);
-+ ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
-
-+ h2_ififo_remove(m->readyq, stream->id);
- h2_ihash_remove(m->shold, stream->id);
- h2_ihash_add(m->spurge, stream);
- }
-
--static void stream_cleanup(h2_mplx *m, h2_stream *stream)
-+static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
- {
- ap_assert(stream->state == H2_SS_CLEANUP);
-
-@@ -128,15 +133,16 @@
-
- h2_ihash_remove(m->streams, stream->id);
- h2_iq_remove(m->q, stream->id);
-- h2_ififo_remove(m->readyq, stream->id);
-- h2_ihash_add(m->shold, stream);
-
-- if (!stream->task || stream->task->worker_done) {
-- stream_joined(m, stream);
-+ if (!h2_task_has_started(stream->task) || stream->task->done_done) {
-+ ms_stream_joined(m, stream);
- }
-- else if (stream->task) {
-- stream->task->c->aborted = 1;
-- apr_thread_cond_broadcast(m->task_thawed);
-+ else {
-+ h2_ififo_remove(m->readyq, stream->id);
-+ h2_ihash_add(m->shold, stream);
-+ if (stream->task) {
-+ stream->task->c->aborted = 1;
-+ }
- }
- }
-
-@@ -151,29 +157,23 @@
- * their HTTP/1 cousins, the separate allocator seems to work better
- * than protecting a shared h2_session one with an own lock.
- */
--h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
-- const h2_config *conf,
-- h2_workers *workers)
-+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
-+ h2_workers *workers)
- {
- apr_status_t status = APR_SUCCESS;
- apr_allocator_t *allocator;
- apr_thread_mutex_t *mutex;
- h2_mplx *m;
-- h2_ctx *ctx = h2_ctx_get(c, 0);
-- ap_assert(conf);
-
- m = apr_pcalloc(parent, sizeof(h2_mplx));
- if (m) {
- m->id = c->id;
- m->c = c;
-- m->s = (ctx? h2_ctx_server_get(ctx) : NULL);
-- if (!m->s) {
-- m->s = c->base_server;
-- }
-+ m->s = s;
-
- /* We create a pool with its own allocator to be used for
-- * processing slave connections. This is the only way to have the
-- * processing independant of its parent pool in the sense that it
-+ * processing secondary connections. This is the only way to have the
-+ * processing independent of its parent pool in the sense that it
- * can work in another thread. Also, the new allocator needs its own
- * mutex to synchronize sub-pools.
- */
-@@ -204,17 +204,10 @@
- return NULL;
- }
-
-- status = apr_thread_cond_create(&m->task_thawed, m->pool);
-- if (status != APR_SUCCESS) {
-- apr_pool_destroy(m->pool);
-- return NULL;
-- }
--
-- m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
-- m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
-+ m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
-+ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
- m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
-- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->q = h2_iq_create(m->pool, m->max_streams);
-@@ -228,19 +221,15 @@
- m->workers = workers;
- m->max_active = workers->max_workers;
- m->limit_active = 6; /* the original h1 max parallel connections */
-- m->last_limit_change = m->last_idle_block = apr_time_now();
-- m->limit_change_interval = apr_time_from_msec(100);
--
-- m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
-+ m->last_mood_change = apr_time_now();
-+ m->mood_update_interval = apr_time_from_msec(100);
-
-- m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
-- m->stream_max_mem);
-- h2_ngn_shed_set_ctx(m->ngn_shed , m);
-+ m->spare_secondary = apr_array_make(m->pool, 10, sizeof(conn_rec*));
- }
- return m;
- }
-
--int h2_mplx_shutdown(h2_mplx *m)
-+int h2_mplx_m_shutdown(h2_mplx *m)
- {
- int max_stream_started = 0;
-
-@@ -254,7 +243,7 @@
- return max_stream_started;
- }
-
--static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
-+static int m_input_consumed_signal(h2_mplx *m, h2_stream *stream)
- {
- if (stream->input) {
- return h2_beam_report_consumption(stream->input);
-@@ -262,12 +251,12 @@
- return 0;
- }
-
--static int report_consumption_iter(void *ctx, void *val)
-+static int m_report_consumption_iter(void *ctx, void *val)
- {
- h2_stream *stream = val;
- h2_mplx *m = ctx;
-
-- input_consumed_signal(m, stream);
-+ m_input_consumed_signal(m, stream);
- if (stream->state == H2_SS_CLOSED_L
- && (!stream->task || stream->task->worker_done)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
-@@ -278,7 +267,7 @@
- return 1;
- }
-
--static int output_consumed_signal(h2_mplx *m, h2_task *task)
-+static int s_output_consumed_signal(h2_mplx *m, h2_task *task)
- {
- if (task->output.beam) {
- return h2_beam_report_consumption(task->output.beam);
-@@ -286,7 +275,7 @@
- return 0;
- }
-
--static int stream_destroy_iter(void *ctx, void *val)
-+static int m_stream_destroy_iter(void *ctx, void *val)
- {
- h2_mplx *m = ctx;
- h2_stream *stream = val;
-@@ -296,7 +285,7 @@
-
- if (stream->input) {
- /* Process outstanding events before destruction */
-- input_consumed_signal(m, stream);
-+ m_input_consumed_signal(m, stream);
- h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
- h2_beam_destroy(stream->input);
- stream->input = NULL;
-@@ -304,12 +293,12 @@
-
- if (stream->task) {
- h2_task *task = stream->task;
-- conn_rec *slave;
-- int reuse_slave = 0;
-+ conn_rec *secondary;
-+ int reuse_secondary = 0;
-
- stream->task = NULL;
-- slave = task->c;
-- if (slave) {
-+ secondary = task->c;
-+ if (secondary) {
- /* On non-serialized requests, the IO logging has not accounted for any
- * meta data send over the network: response headers and h2 frame headers. we
- * counted this on the stream and need to add this now.
-@@ -318,26 +307,25 @@
- if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) {
- apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets;
- if (unaccounted > 0) {
-- h2_task_logio_add_bytes_out(slave, unaccounted);
-+ h2_task_logio_add_bytes_out(secondary, unaccounted);
- }
- }
-
-- if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
-- reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
-- && !task->rst_error);
-+ if (m->s->keep_alive_max == 0 || secondary->keepalives < m->s->keep_alive_max) {
-+ reuse_secondary = ((m->spare_secondary->nelts < (m->limit_active * 3 / 2))
-+ && !task->rst_error);
- }
-
-- task->c = NULL;
-- if (reuse_slave) {
-+ if (reuse_secondary) {
- h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
-- APLOGNO(03385) "h2_task_destroy, reuse slave");
-+ APLOGNO(03385) "h2_task_destroy, reuse secondary");
- h2_task_destroy(task);
-- APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
-+ APR_ARRAY_PUSH(m->spare_secondary, conn_rec*) = secondary;
- }
- else {
- h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
-- "h2_task_destroy, destroy slave");
-- h2_slave_destroy(slave);
-+ "h2_task_destroy, destroy secondary");
-+ h2_secondary_destroy(secondary);
- }
- }
- }
-@@ -345,11 +333,11 @@
- return 0;
- }
-
--static void purge_streams(h2_mplx *m, int lock)
-+static void m_purge_streams(h2_mplx *m, int lock)
- {
- if (!h2_ihash_empty(m->spurge)) {
- H2_MPLX_ENTER_MAYBE(m, lock);
-- while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) {
-+ while (!h2_ihash_iter(m->spurge, m_stream_destroy_iter, m)) {
- /* repeat until empty */
- }
- H2_MPLX_LEAVE_MAYBE(m, lock);
-@@ -361,13 +349,13 @@
- void *ctx;
- } stream_iter_ctx_t;
-
--static int stream_iter_wrap(void *ctx, void *stream)
-+static int m_stream_iter_wrap(void *ctx, void *stream)
- {
- stream_iter_ctx_t *x = ctx;
- return x->cb(stream, x->ctx);
- }
-
--apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
-+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
- {
- stream_iter_ctx_t x;
-
-@@ -375,13 +363,13 @@
-
- x.cb = cb;
- x.ctx = ctx;
-- h2_ihash_iter(m->streams, stream_iter_wrap, &x);
-+ h2_ihash_iter(m->streams, m_stream_iter_wrap, &x);
-
- H2_MPLX_LEAVE(m);
- return APR_SUCCESS;
- }
-
--static int report_stream_iter(void *ctx, void *val) {
-+static int m_report_stream_iter(void *ctx, void *val) {
- h2_mplx *m = ctx;
- h2_stream *stream = val;
- h2_task *task = stream->task;
-@@ -394,10 +382,10 @@
- if (task) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
- H2_STRM_MSG(stream, "->03198: %s %s %s"
-- "[started=%d/done=%d/frozen=%d]"),
-+ "[started=%d/done=%d]"),
- task->request->method, task->request->authority,
- task->request->path, task->worker_started,
-- task->worker_done, task->frozen);
-+ task->worker_done);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
-@@ -406,7 +394,7 @@
- return 1;
- }
-
--static int unexpected_stream_iter(void *ctx, void *val) {
-+static int m_unexpected_stream_iter(void *ctx, void *val) {
- h2_mplx *m = ctx;
- h2_stream *stream = val;
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
-@@ -415,7 +403,7 @@
- return 1;
- }
-
--static int stream_cancel_iter(void *ctx, void *val) {
-+static int m_stream_cancel_iter(void *ctx, void *val) {
- h2_mplx *m = ctx;
- h2_stream *stream = val;
-
-@@ -429,14 +417,14 @@
- h2_stream_rst(stream, H2_ERR_NO_ERROR);
- /* All connection data has been sent, simulate cleanup */
- h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
-- stream_cleanup(m, stream);
-+ m_stream_cleanup(m, stream);
- return 0;
- }
-
--void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
-+void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- {
- apr_status_t status;
-- int i, wait_secs = 60;
-+ int i, wait_secs = 60, old_aborted;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): start release", m->id);
-@@ -447,15 +435,23 @@
-
- H2_MPLX_ENTER_ALWAYS(m);
-
-+ /* While really terminating any secondary connections, treat the master
-+ * connection as aborted. It's not as if we could send any more data
-+ * at this point. */
-+ old_aborted = m->c->aborted;
-+ m->c->aborted = 1;
-+
- /* How to shut down a h2 connection:
- * 1. cancel all streams still active */
-- while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-+ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
-+ m->id, (int)h2_ihash_count(m->streams),
-+ (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
-+ while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
- /* until empty */
- }
-
-- /* 2. terminate ngn_shed, no more streams
-- * should be scheduled or in the active set */
-- h2_ngn_shed_abort(m->ngn_shed);
-+ /* 2. no more streams should be scheduled or in the active set */
- ap_assert(h2_ihash_empty(m->streams));
- ap_assert(h2_iq_empty(m->q));
-
-@@ -473,65 +469,60 @@
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
- "h2_mplx(%ld): waited %d sec for %d tasks",
- m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
-- h2_ihash_iter(m->shold, report_stream_iter, m);
-+ h2_ihash_iter(m->shold, m_report_stream_iter, m);
- }
- }
-- ap_assert(m->tasks_active == 0);
- m->join_wait = NULL;
--
-- /* 4. close the h2_req_enginge shed */
-- h2_ngn_shed_destroy(m->ngn_shed);
-- m->ngn_shed = NULL;
--
-+
- /* 4. With all workers done, all streams should be in spurge */
-+ ap_assert(m->tasks_active == 0);
- if (!h2_ihash_empty(m->shold)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
- "h2_mplx(%ld): unexpected %d streams in hold",
- m->id, (int)h2_ihash_count(m->shold));
-- h2_ihash_iter(m->shold, unexpected_stream_iter, m);
-+ h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
- }
-
-+ m->c->aborted = old_aborted;
- H2_MPLX_LEAVE(m);
-
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): released", m->id);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
- }
-
--apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
-+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, h2_stream *stream)
- {
- H2_MPLX_ENTER(m);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- H2_STRM_MSG(stream, "cleanup"));
-- stream_cleanup(m, stream);
-+ m_stream_cleanup(m, stream);
-
- H2_MPLX_LEAVE(m);
- return APR_SUCCESS;
- }
-
--h2_stream *h2_mplx_stream_get(h2_mplx *m, int id)
-+h2_stream *h2_mplx_t_stream_get(h2_mplx *m, h2_task *task)
- {
- h2_stream *s = NULL;
-
- H2_MPLX_ENTER_ALWAYS(m);
-
-- s = h2_ihash_get(m->streams, id);
-+ s = h2_ihash_get(m->streams, task->stream_id);
-
- H2_MPLX_LEAVE(m);
- return s;
- }
-
--static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
-+static void mst_output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
- {
- h2_stream *stream = ctx;
- h2_mplx *m = stream->session->mplx;
-
-- check_data_for(m, stream, 1);
-+ mst_check_data_for(m, stream, 0);
- }
-
--static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
-+static apr_status_t t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
- {
-- apr_status_t status = APR_SUCCESS;
- h2_stream *stream = h2_ihash_get(m->streams, stream_id);
-
- if (!stream || !stream->task || m->aborted) {
-@@ -542,26 +533,26 @@
- stream->output = beam;
-
- if (APLOGctrace2(m->c)) {
-- h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
-+ h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open");
- }
- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c,
- "h2_mplx(%s): out open", stream->task->id);
- }
-
-- h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream);
-- h2_beam_on_produced(stream->output, output_produced, stream);
-+ h2_beam_on_consumed(stream->output, NULL, mst_stream_output_consumed, stream);
-+ h2_beam_on_produced(stream->output, mst_output_produced, stream);
- if (stream->task->output.copy_files) {
- h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
- }
-
- /* we might see some file buckets in the output, see
- * if we have enough handles reserved. */
-- check_data_for(m, stream, 0);
-- return status;
-+ mst_check_data_for(m, stream, 1);
-+ return APR_SUCCESS;
- }
-
--apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
-+apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
- {
- apr_status_t status;
-
-@@ -571,14 +562,14 @@
- status = APR_ECONNABORTED;
- }
- else {
-- status = out_open(m, stream_id, beam);
-+ status = t_out_open(m, stream_id, beam);
- }
-
- H2_MPLX_LEAVE(m);
- return status;
- }
-
--static apr_status_t out_close(h2_mplx *m, h2_task *task)
-+static apr_status_t s_out_close(h2_mplx *m, h2_task *task)
- {
- apr_status_t status = APR_SUCCESS;
- h2_stream *stream;
-@@ -595,17 +586,17 @@
- return APR_ECONNABORTED;
- }
-
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c,
- "h2_mplx(%s): close", task->id);
- status = h2_beam_close(task->output.beam);
-- h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
-- output_consumed_signal(m, task);
-- check_data_for(m, stream, 0);
-+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close");
-+ s_output_consumed_signal(m, task);
-+ mst_check_data_for(m, stream, 1);
- return status;
- }
-
--apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
-- apr_thread_cond_t *iowait)
-+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
-+ apr_thread_cond_t *iowait)
- {
- apr_status_t status;
-
-@@ -614,12 +605,12 @@
- if (m->aborted) {
- status = APR_ECONNABORTED;
- }
-- else if (h2_mplx_has_master_events(m)) {
-+ else if (h2_mplx_m_has_master_events(m)) {
- status = APR_SUCCESS;
- }
- else {
-- purge_streams(m, 0);
-- h2_ihash_iter(m->streams, report_consumption_iter, m);
-+ m_purge_streams(m, 0);
-+ h2_ihash_iter(m->streams, m_report_consumption_iter, m);
- m->added_output = iowait;
- status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
- if (APLOGctrace2(m->c)) {
-@@ -634,19 +625,27 @@
- return status;
- }
-
--static void check_data_for(h2_mplx *m, h2_stream *stream, int lock)
-+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked)
- {
-+ /* If m->lock is already held, we must release during h2_ififo_push()
-+ * which can wait on its not_full condition, causing a deadlock because
-+ * no one would then be able to acquire m->lock to empty the fifo.
-+ */
-+ H2_MPLX_LEAVE_MAYBE(m, mplx_is_locked);
- if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) {
-+ H2_MPLX_ENTER_ALWAYS(m);
- apr_atomic_set32(&m->event_pending, 1);
-- H2_MPLX_ENTER_MAYBE(m, lock);
- if (m->added_output) {
- apr_thread_cond_signal(m->added_output);
- }
-- H2_MPLX_LEAVE_MAYBE(m, lock);
-+ H2_MPLX_LEAVE_MAYBE(m, !mplx_is_locked);
-+ }
-+ else {
-+ H2_MPLX_ENTER_MAYBE(m, mplx_is_locked);
- }
- }
-
--apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
-+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
- {
- apr_status_t status;
-
-@@ -666,22 +665,22 @@
- return status;
- }
-
--static void register_if_needed(h2_mplx *m)
-+static void ms_register_if_needed(h2_mplx *m, int from_master)
- {
- if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
- apr_status_t status = h2_workers_register(m->workers, m);
- if (status == APR_SUCCESS) {
- m->is_registered = 1;
- }
-- else {
-+ else if (from_master) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
- "h2_mplx(%ld): register at workers", m->id);
- }
- }
- }
-
--apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
-- h2_stream_pri_cmp *cmp, void *ctx)
-+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
-+ h2_stream_pri_cmp *cmp, void *ctx)
- {
- apr_status_t status;
-
-@@ -695,13 +694,13 @@
- h2_ihash_add(m->streams, stream);
- if (h2_stream_is_ready(stream)) {
- /* already have a response */
-- check_data_for(m, stream, 0);
-+ mst_check_data_for(m, stream, 1);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- H2_STRM_MSG(stream, "process, add to readyq"));
- }
- else {
- h2_iq_add(m->q, stream->id, cmp, ctx);
-- register_if_needed(m);
-+ ms_register_if_needed(m, 1);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- H2_STRM_MSG(stream, "process, added to q"));
- }
-@@ -711,7 +710,7 @@
- return status;
- }
-
--static h2_task *next_stream_task(h2_mplx *m)
-+static h2_task *s_next_stream_task(h2_mplx *m)
- {
- h2_stream *stream;
- int sid;
-@@ -720,40 +719,39 @@
-
- stream = h2_ihash_get(m->streams, sid);
- if (stream) {
-- conn_rec *slave, **pslave;
-+ conn_rec *secondary, **psecondary;
-
-- pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
-- if (pslave) {
-- slave = *pslave;
-- slave->aborted = 0;
-+ psecondary = (conn_rec **)apr_array_pop(m->spare_secondary);
-+ if (psecondary) {
-+ secondary = *psecondary;
-+ secondary->aborted = 0;
- }
- else {
-- slave = h2_slave_create(m->c, stream->id, m->pool);
-+ secondary = h2_secondary_create(m->c, stream->id, m->pool);
- }
-
- if (!stream->task) {
--
- if (sid > m->max_stream_started) {
- m->max_stream_started = sid;
- }
- if (stream->input) {
-- h2_beam_on_consumed(stream->input, stream_input_ev,
-- stream_input_consumed, stream);
-+ h2_beam_on_consumed(stream->input, mst_stream_input_ev,
-+ m_stream_input_consumed, stream);
- }
-
-- stream->task = h2_task_create(slave, stream->id,
-+ stream->task = h2_task_create(secondary, stream->id,
- stream->request, m, stream->input,
- stream->session->s->timeout,
- m->stream_max_mem);
- if (!stream->task) {
-- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave,
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, secondary,
- H2_STRM_LOG(APLOGNO(02941), stream,
- "create task"));
- return NULL;
- }
--
- }
-
-+ stream->task->started_at = apr_time_now();
- ++m->tasks_active;
- return stream->task;
- }
-@@ -761,7 +759,7 @@
- return NULL;
- }
-
--apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
-+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
- {
- apr_status_t rv = APR_EOF;
-
-@@ -777,7 +775,7 @@
- rv = APR_EOF;
- }
- else {
-- *ptask = next_stream_task(m);
-+ *ptask = s_next_stream_task(m);
- rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
- }
- if (APR_EAGAIN != rv) {
-@@ -787,127 +785,87 @@
- return rv;
- }
-
--static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
-+static void s_task_done(h2_mplx *m, h2_task *task)
- {
- h2_stream *stream;
-
-- if (task->frozen) {
-- /* this task was handed over to an engine for processing
-- * and the original worker has finished. That means the
-- * engine may start processing now. */
-- h2_task_thaw(task);
-- apr_thread_cond_broadcast(m->task_thawed);
-- return;
-- }
--
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_mplx(%ld): task(%s) done", m->id, task->id);
-- out_close(m, task);
--
-- if (ngn) {
-- apr_off_t bytes = 0;
-- h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
-- bytes += h2_beam_get_buffered(task->output.beam);
-- if (bytes > 0) {
-- /* we need to report consumed and current buffered output
-- * to the engine. The request will be streamed out or cancelled,
-- * no more data is coming from it and the engine should update
-- * its calculations before we destroy this information. */
-- h2_req_engine_out_consumed(ngn, task->c, bytes);
-- }
-- }
--
-- if (task->engine) {
-- if (!m->aborted && !task->c->aborted
-- && !h2_req_engine_is_shutdown(task->engine)) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022)
-- "h2_mplx(%ld): task(%s) has not-shutdown "
-- "engine(%s)", m->id, task->id,
-- h2_req_engine_get_id(task->engine));
-- }
-- h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
-- }
-+ s_out_close(m, task);
-
- task->worker_done = 1;
- task->done_at = apr_time_now();
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- "h2_mplx(%s): request done, %f ms elapsed", task->id,
- (task->done_at - task->started_at) / 1000.0);
-
-- if (task->started_at > m->last_idle_block) {
-- /* this task finished without causing an 'idle block', e.g.
-- * a block by flow control.
-- */
-- if (task->done_at- m->last_limit_change >= m->limit_change_interval
-- && m->limit_active < m->max_active) {
-- /* Well behaving stream, allow it more workers */
-- m->limit_active = H2MIN(m->limit_active * 2,
-- m->max_active);
-- m->last_limit_change = task->done_at;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): increase worker limit to %d",
-- m->id, m->limit_active);
-- }
-+ if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
-+ s_mplx_be_happy(m, task);
- }
-
-+ ap_assert(task->done_done == 0);
-+
- stream = h2_ihash_get(m->streams, task->stream_id);
- if (stream) {
- /* stream not done yet. */
-- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
-+ if (!m->aborted && task->redo) {
- /* reset and schedule again */
- h2_task_redo(task);
-- h2_ihash_remove(m->sredo, stream->id);
- h2_iq_add(m->q, stream->id, NULL, NULL);
-+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
-+ H2_STRM_MSG(stream, "redo, added to q"));
- }
- else {
- /* stream not cleaned up, stay around */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ task->done_done = 1;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- H2_STRM_MSG(stream, "task_done, stream open"));
- if (stream->input) {
- h2_beam_leave(stream->input);
- }
-
- /* more data will not arrive, resume the stream */
-- check_data_for(m, stream, 0);
-+ mst_check_data_for(m, stream, 1);
- }
- }
- else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
- /* stream is done, was just waiting for this. */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ task->done_done = 1;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- H2_STRM_MSG(stream, "task_done, in hold"));
- if (stream->input) {
- h2_beam_leave(stream->input);
- }
-- stream_joined(m, stream);
-+ ms_stream_joined(m, stream);
- }
- else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
-- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
-+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c,
- H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
- ap_assert("stream should not be in spurge" == NULL);
- }
- else {
-- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
-+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518)
- "h2_mplx(%s): task_done, stream not found",
- task->id);
- ap_assert("stream should still be available" == NULL);
- }
- }
-
--void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
-+void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
- {
- H2_MPLX_ENTER_ALWAYS(m);
-
-- task_done(m, task, NULL);
- --m->tasks_active;
-+ s_task_done(m, task);
-
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
- }
- if (ptask) {
- /* caller wants another task */
-- *ptask = next_stream_task(m);
-+ *ptask = s_next_stream_task(m);
- }
-- register_if_needed(m);
-+ ms_register_if_needed(m, 0);
-
- H2_MPLX_LEAVE(m);
- }
-@@ -916,94 +874,161 @@
- * h2_mplx DoS protection
- ******************************************************************************/
-
--static int latest_repeatable_unsubmitted_iter(void *data, void *val)
-+static int m_timed_out_busy_iter(void *data, void *val)
- {
- stream_iter_ctx *ctx = data;
- h2_stream *stream = val;
--
-- if (stream->task && !stream->task->worker_done
-- && h2_task_can_redo(stream->task)
-- && !h2_ihash_get(ctx->m->sredo, stream->id)) {
-- if (!h2_stream_is_ready(stream)) {
-- /* this task occupies a worker, the response has not been submitted
-- * yet, not been cancelled and it is a repeatable request
-- * -> it can be re-scheduled later */
-- if (!ctx->stream
-- || (ctx->stream->task->started_at < stream->task->started_at)) {
-- /* we did not have one or this one was started later */
-- ctx->stream = stream;
-- }
-- }
-+ if (h2_task_has_started(stream->task) && !stream->task->worker_done
-+ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
-+ /* timed out stream occupying a worker, found */
-+ ctx->stream = stream;
-+ return 0;
- }
- return 1;
- }
-
--static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
-+static h2_stream *m_get_timed_out_busy_stream(h2_mplx *m)
- {
- stream_iter_ctx ctx;
- ctx.m = m;
- ctx.stream = NULL;
-- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
-+ ctx.now = apr_time_now();
-+ h2_ihash_iter(m->streams, m_timed_out_busy_iter, &ctx);
- return ctx.stream;
- }
-
--static int timed_out_busy_iter(void *data, void *val)
-+static int m_latest_repeatable_unsubmitted_iter(void *data, void *val)
- {
- stream_iter_ctx *ctx = data;
- h2_stream *stream = val;
-- if (stream->task && !stream->task->worker_done
-- && (ctx->now - stream->task->started_at) > stream->task->timeout) {
-- /* timed out stream occupying a worker, found */
-- ctx->stream = stream;
-- return 0;
-+
-+ if (!stream->task) goto leave;
-+ if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
-+ if (h2_stream_is_ready(stream)) goto leave;
-+ if (stream->task->redo) {
-+ ++ctx->count;
-+ goto leave;
-+ }
-+ if (h2_task_can_redo(stream->task)) {
-+ /* this task occupies a worker, the response has not been submitted
-+ * yet, not been cancelled and it is a repeatable request
-+ * -> we could redo it later */
-+ if (!ctx->stream
-+ || (ctx->stream->task->started_at < stream->task->started_at)) {
-+ /* we did not have one or this one was started later */
-+ ctx->stream = stream;
-+ }
- }
-+leave:
- return 1;
- }
-
--static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
-+static apr_status_t m_assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
- {
- stream_iter_ctx ctx;
-+
-+ /* count the running tasks already marked for redo and get one that could
-+ * be throttled */
-+ *ptask = NULL;
- ctx.m = m;
- ctx.stream = NULL;
-- ctx.now = apr_time_now();
-- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
-- return ctx.stream;
-+ ctx.count = 0;
-+ h2_ihash_iter(m->streams, m_latest_repeatable_unsubmitted_iter, &ctx);
-+ if (m->tasks_active - ctx.count > m->limit_active) {
-+ /* we are above the limit of running tasks, accounting for the ones
-+ * already throttled. */
-+ if (ctx.stream && ctx.stream->task) {
-+ *ptask = ctx.stream->task;
-+ return APR_EAGAIN;
-+ }
-+ /* above limit, be seeing no candidate for easy throttling */
-+ if (m_get_timed_out_busy_stream(m)) {
-+ /* Too many busy workers, unable to cancel enough streams
-+ * and with a busy, timed out stream, we tell the client
-+ * to go away... */
-+ return APR_TIMEUP;
-+ }
-+ }
-+ return APR_SUCCESS;
- }
-
--static apr_status_t unschedule_slow_tasks(h2_mplx *m)
-+static apr_status_t m_unschedule_slow_tasks(h2_mplx *m)
- {
-- h2_stream *stream;
-- int n;
-+ h2_task *task;
-+ apr_status_t rv;
-
- /* Try to get rid of streams that occupy workers. Look for safe requests
- * that are repeatable. If none found, fail the connection.
- */
-- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
-- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
-+ while (APR_EAGAIN == (rv = m_assess_task_to_throttle(&task, m))) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): unschedule, resetting task for redo later",
-- stream->task->id);
-- h2_task_rst(stream->task, H2_ERR_CANCEL);
-- h2_ihash_add(m->sredo, stream);
-- --n;
-+ task->id);
-+ task->redo = 1;
-+ h2_task_rst(task, H2_ERR_CANCEL);
- }
-
-- if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
-- h2_stream *stream = get_timed_out_busy_stream(m);
-- if (stream) {
-- /* Too many busy workers, unable to cancel enough streams
-- * and with a busy, timed out stream, we tell the client
-- * to go away... */
-- return APR_TIMEUP;
-- }
-+ return rv;
-+}
-+
-+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task)
-+{
-+ apr_time_t now;
-+
-+ --m->irritations_since;
-+ now = apr_time_now();
-+ if (m->limit_active < m->max_active
-+ && (now - m->last_mood_change >= m->mood_update_interval
-+ || m->irritations_since < -m->limit_active)) {
-+ m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
-+ m->last_mood_change = now;
-+ m->irritations_since = 0;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
-+ "h2_mplx(%ld): mood update, increasing worker limit to %d",
-+ m->id, m->limit_active);
- }
- return APR_SUCCESS;
- }
-
--apr_status_t h2_mplx_idle(h2_mplx *m)
-+static apr_status_t m_be_annoyed(h2_mplx *m)
- {
- apr_status_t status = APR_SUCCESS;
- apr_time_t now;
-+
-+ ++m->irritations_since;
-+ now = apr_time_now();
-+ if (m->limit_active > 2 &&
-+ ((now - m->last_mood_change >= m->mood_update_interval)
-+ || (m->irritations_since >= m->limit_active))) {
-+
-+ if (m->limit_active > 16) {
-+ m->limit_active = 16;
-+ }
-+ else if (m->limit_active > 8) {
-+ m->limit_active = 8;
-+ }
-+ else if (m->limit_active > 4) {
-+ m->limit_active = 4;
-+ }
-+ else if (m->limit_active > 2) {
-+ m->limit_active = 2;
-+ }
-+ m->last_mood_change = now;
-+ m->irritations_since = 0;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-+ "h2_mplx(%ld): mood update, decreasing worker limit to %d",
-+ m->id, m->limit_active);
-+ }
-+
-+ if (m->tasks_active > m->limit_active) {
-+ status = m_unschedule_slow_tasks(m);
-+ }
-+ return status;
-+}
-+
-+apr_status_t h2_mplx_m_idle(h2_mplx *m)
-+{
-+ apr_status_t status = APR_SUCCESS;
- apr_size_t scount;
-
- H2_MPLX_ENTER(m);
-@@ -1023,31 +1048,7 @@
- * of busy workers we allow for this connection until it
- * well behaves.
- */
-- now = apr_time_now();
-- m->last_idle_block = now;
-- if (m->limit_active > 2
-- && now - m->last_limit_change >= m->limit_change_interval) {
-- if (m->limit_active > 16) {
-- m->limit_active = 16;
-- }
-- else if (m->limit_active > 8) {
-- m->limit_active = 8;
-- }
-- else if (m->limit_active > 4) {
-- m->limit_active = 4;
-- }
-- else if (m->limit_active > 2) {
-- m->limit_active = 2;
-- }
-- m->last_limit_change = now;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): decrease worker limit to %d",
-- m->id, m->limit_active);
-- }
--
-- if (m->tasks_active > m->limit_active) {
-- status = unschedule_slow_tasks(m);
-- }
-+ status = m_be_annoyed(m);
- }
- else if (!h2_iq_empty(m->q)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-@@ -1077,167 +1078,30 @@
- h2_beam_is_closed(stream->output),
- (long)h2_beam_get_buffered(stream->output));
- h2_ihash_add(m->streams, stream);
-- check_data_for(m, stream, 0);
-+ mst_check_data_for(m, stream, 1);
- stream->out_checked = 1;
- status = APR_EAGAIN;
- }
- }
- }
- }
-- register_if_needed(m);
-+ ms_register_if_needed(m, 1);
-
- H2_MPLX_LEAVE(m);
- return status;
- }
-
- /*******************************************************************************
-- * HTTP/2 request engines
-- ******************************************************************************/
--
--typedef struct {
-- h2_mplx * m;
-- h2_req_engine *ngn;
-- int streams_updated;
--} ngn_update_ctx;
--
--static int ngn_update_window(void *ctx, void *val)
--{
-- ngn_update_ctx *uctx = ctx;
-- h2_stream *stream = val;
-- if (stream->task && stream->task->assigned == uctx->ngn
-- && output_consumed_signal(uctx->m, stream->task)) {
-- ++uctx->streams_updated;
-- }
-- return 1;
--}
--
--static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn)
--{
-- ngn_update_ctx ctx;
--
-- ctx.m = m;
-- ctx.ngn = ngn;
-- ctx.streams_updated = 0;
-- h2_ihash_iter(m->streams, ngn_update_window, &ctx);
--
-- return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
--}
--
--apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
-- request_rec *r,
-- http2_req_engine_init *einit)
--{
-- apr_status_t status;
-- h2_mplx *m;
-- h2_task *task;
-- h2_stream *stream;
--
-- task = h2_ctx_rget_task(r);
-- if (!task) {
-- return APR_ECONNABORTED;
-- }
-- m = task->mplx;
--
-- H2_MPLX_ENTER(m);
--
-- stream = h2_ihash_get(m->streams, task->stream_id);
-- if (stream) {
-- status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit);
-- }
-- else {
-- status = APR_ECONNABORTED;
-- }
--
-- H2_MPLX_LEAVE(m);
-- return status;
--}
--
--apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr)
--{
-- h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn);
-- h2_mplx *m = h2_ngn_shed_get_ctx(shed);
-- apr_status_t status;
-- int want_shutdown;
--
-- H2_MPLX_ENTER(m);
--
-- want_shutdown = (block == APR_BLOCK_READ);
--
-- /* Take this opportunity to update output consummation
-- * for this engine */
-- ngn_out_update_windows(m, ngn);
--
-- if (want_shutdown && !h2_iq_empty(m->q)) {
-- /* For a blocking read, check first if requests are to be
-- * had and, if not, wait a short while before doing the
-- * blocking, and if unsuccessful, terminating read.
-- */
-- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
-- if (APR_STATUS_IS_EAGAIN(status)) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): start block engine pull", m->id);
-- apr_thread_cond_timedwait(m->task_thawed, m->lock,
-- apr_time_from_msec(20));
-- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
-- }
-- }
-- else {
-- status = h2_ngn_shed_pull_request(shed, ngn, capacity,
-- want_shutdown, pr);
-- }
--
-- H2_MPLX_LEAVE(m);
-- return status;
--}
--
--void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
-- apr_status_t status)
--{
-- h2_task *task = h2_ctx_cget_task(r_conn);
--
-- if (task) {
-- h2_mplx *m = task->mplx;
-- h2_stream *stream;
--
-- H2_MPLX_ENTER_ALWAYS(m);
--
-- stream = h2_ihash_get(m->streams, task->stream_id);
--
-- ngn_out_update_windows(m, ngn);
-- h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
--
-- if (status != APR_SUCCESS && stream
-- && h2_task_can_redo(task)
-- && !h2_ihash_get(m->sredo, stream->id)) {
-- h2_ihash_add(m->sredo, stream);
-- }
--
-- if (task->engine) {
-- /* cannot report that as done until engine returns */
-- }
-- else {
-- task_done(m, task, ngn);
-- }
--
-- H2_MPLX_LEAVE(m);
-- }
--}
--
--/*******************************************************************************
- * mplx master events dispatching
- ******************************************************************************/
-
--int h2_mplx_has_master_events(h2_mplx *m)
-+int h2_mplx_m_has_master_events(h2_mplx *m)
- {
- return apr_atomic_read32(&m->event_pending) > 0;
- }
-
--apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
-- stream_ev_callback *on_resume,
-- void *on_ctx)
-+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
-+ void *on_ctx)
- {
- h2_stream *stream;
- int n, id;
-@@ -1247,8 +1111,8 @@
- apr_atomic_set32(&m->event_pending, 0);
-
- /* update input windows for streams */
-- h2_ihash_iter(m->streams, report_consumption_iter, m);
-- purge_streams(m, 1);
-+ h2_ihash_iter(m->streams, m_report_consumption_iter, m);
-+ m_purge_streams(m, 1);
-
- n = h2_ififo_count(m->readyq);
- while (n > 0
-@@ -1263,13 +1127,13 @@
- return APR_SUCCESS;
- }
-
--apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream)
-+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, h2_stream *stream)
- {
-- check_data_for(m, stream, 1);
-+ mst_check_data_for(m, stream, 0);
- return APR_SUCCESS;
- }
-
--int h2_mplx_awaits_data(h2_mplx *m)
-+int h2_mplx_m_awaits_data(h2_mplx *m)
- {
- int waiting = 1;
-
-@@ -1278,11 +1142,24 @@
- if (h2_ihash_empty(m->streams)) {
- waiting = 0;
- }
-- else if (!m->tasks_active && !h2_ififo_count(m->readyq)
-- && h2_iq_empty(m->q)) {
-+ else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
- waiting = 0;
- }
-
- H2_MPLX_LEAVE(m);
- return waiting;
- }
-+
-+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id)
-+{
-+ h2_stream *stream;
-+ apr_status_t status = APR_SUCCESS;
-+
-+ H2_MPLX_ENTER_ALWAYS(m);
-+ stream = h2_ihash_get(m->streams, stream_id);
-+ if (stream && stream->task) {
-+ status = m_be_annoyed(m);
-+ }
-+ H2_MPLX_LEAVE(m);
-+ return status;
-+}
---- a/modules/http2/h2_mplx.h
-+++ b/modules/http2/h2_mplx.h
-@@ -31,8 +31,10 @@
- * queued in the multiplexer. If a task thread tries to write more
- * data, it is blocked until space becomes available.
- *
-- * Writing input is never blocked. In order to use flow control on the input,
-- * the mplx can be polled for input data consumption.
-+ * Naming Convention:
-+ * "h2_mplx_m_" are methods only to be called by the main connection
-+ * "h2_mplx_s_" are method only to be called by a secondary connection
-+ * "h2_mplx_t_" are method only to be called by a task handler (can be master or secondary)
- */
-
- struct apr_pool_t;
-@@ -47,8 +49,6 @@
- struct apr_thread_cond_t;
- struct h2_workers;
- struct h2_iqueue;
--struct h2_ngn_shed;
--struct h2_req_engine;
-
- #include
-
-@@ -65,7 +65,6 @@
- unsigned int is_registered; /* is registered at h2_workers */
-
- struct h2_ihash_t *streams; /* all streams currently processing */
-- struct h2_ihash_t *sredo; /* all streams that need to be re-started */
- struct h2_ihash_t *shold; /* all streams done with task ongoing */
- struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
-
-@@ -79,41 +78,35 @@
- int tasks_active; /* # of tasks being processed from this mplx */
- int limit_active; /* current limit on active tasks, dynamic */
- int max_active; /* max, hard limit # of active tasks in a process */
-- apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
-- * streams were ready */
-- apr_time_t last_limit_change; /* last time, worker limit changed */
-- apr_interval_time_t limit_change_interval;
-+
-+ apr_time_t last_mood_change; /* last time, we worker limit changed */
-+ apr_interval_time_t mood_update_interval; /* how frequent we update at most */
-+ int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
-
- apr_thread_mutex_t *lock;
- struct apr_thread_cond_t *added_output;
-- struct apr_thread_cond_t *task_thawed;
- struct apr_thread_cond_t *join_wait;
-
- apr_size_t stream_max_mem;
-
- apr_pool_t *spare_io_pool;
-- apr_array_header_t *spare_slaves; /* spare slave connections */
-+ apr_array_header_t *spare_secondary; /* spare secondary connections */
-
- struct h2_workers *workers;
--
-- struct h2_ngn_shed *ngn_shed;
- };
-
--
--
- /*******************************************************************************
-- * Object lifecycle and information.
-+ * From the main connection processing: h2_mplx_m_*
- ******************************************************************************/
-
--apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
-+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s);
-
- /**
- * Create the multiplexer for the given HTTP2 session.
- * Implicitly has reference count 1.
- */
--h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master,
-- const struct h2_config *conf,
-- struct h2_workers *workers);
-+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *master,
-+ struct h2_workers *workers);
-
- /**
- * Decreases the reference counter of this mplx and waits for it
-@@ -123,26 +116,14 @@
- * @param m the mplx to be released and destroyed
- * @param wait condition var to wait on for ref counter == 0
- */
--void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
--
--apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask);
--
--void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
-+void h2_mplx_m_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
-
- /**
- * Shut down the multiplexer gracefully. Will no longer schedule new streams
- * but let the ongoing ones finish normally.
- * @return the highest stream id being/been processed
- */
--int h2_mplx_shutdown(h2_mplx *m);
--
--int h2_mplx_is_busy(h2_mplx *m);
--
--/*******************************************************************************
-- * IO lifetime of streams.
-- ******************************************************************************/
--
--struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id);
-+int h2_mplx_m_shutdown(h2_mplx *m);
-
- /**
- * Notifies mplx that a stream has been completely handled on the main
-@@ -151,20 +132,16 @@
- * @param m the mplx itself
- * @param stream the stream ready for cleanup
- */
--apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
-+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
-
- /**
- * Waits on output data from any stream in this session to become available.
- * Returns APR_TIMEUP if no data arrived in the given time.
- */
--apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
-- struct apr_thread_cond_t *iowait);
-+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
-+ struct apr_thread_cond_t *iowait);
-
--apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream);
--
--/*******************************************************************************
-- * Stream processing.
-- ******************************************************************************/
-+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, struct h2_stream *stream);
-
- /**
- * Process a stream request.
-@@ -175,8 +152,8 @@
- * @param cmp the stream priority compare function
- * @param ctx context data for the compare function
- */
--apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
-- h2_stream_pri_cmp *cmp, void *ctx);
-+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
-+ h2_stream_pri_cmp *cmp, void *ctx);
-
- /**
- * Stream priorities have changed, reschedule pending requests.
-@@ -185,7 +162,7 @@
- * @param cmp the stream priority compare function
- * @param ctx context data for the compare function
- */
--apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
-+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
-
- typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
-
-@@ -193,7 +170,7 @@
- * Check if the multiplexer has events for the master connection pending.
- * @return != 0 iff there are events pending
- */
--int h2_mplx_has_master_events(h2_mplx *m);
-+int h2_mplx_m_has_master_events(h2_mplx *m);
-
- /**
- * Dispatch events for the master connection, such as
-@@ -201,130 +178,46 @@
- * @param on_resume new output data has arrived for a suspended stream
- * @param ctx user supplied argument to invocation.
- */
--apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
-- stream_ev_callback *on_resume,
-- void *ctx);
-+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
-+ void *ctx);
-
--int h2_mplx_awaits_data(h2_mplx *m);
-+int h2_mplx_m_awaits_data(h2_mplx *m);
-
- typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
-
--apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
-+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
-
--/*******************************************************************************
-- * Output handling of streams.
-- ******************************************************************************/
-+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id);
-
- /**
-- * Opens the output for the given stream with the specified response.
-+ * Master connection has entered idle mode.
-+ * @param m the mplx instance of the master connection
-+ * @return != SUCCESS iff connection should be terminated
- */
--apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
-- struct h2_bucket_beam *beam);
-+apr_status_t h2_mplx_m_idle(h2_mplx *m);
-
- /*******************************************************************************
-- * h2_mplx list Manipulation.
-+ * From a secondary connection processing: h2_mplx_s_*
- ******************************************************************************/
--
--/**
-- * The magic pointer value that indicates the head of a h2_mplx list
-- * @param b The mplx list
-- * @return The magic pointer value
-- */
--#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link)
--
--/**
-- * Determine if the mplx list is empty
-- * @param b The list to check
-- * @return true or false
-- */
--#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link)
--
--/**
-- * Return the first mplx in a list
-- * @param b The list to query
-- * @return The first mplx in the list
-- */
--#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b)
--
--/**
-- * Return the last mplx in a list
-- * @param b The list to query
-- * @return The last mplx int he list
-- */
--#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b)
--
--/**
-- * Insert a single mplx at the front of a list
-- * @param b The list to add to
-- * @param e The mplx to insert
-- */
--#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \
--h2_mplx *ap__b = (e); \
--APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \
--} while (0)
--
--/**
-- * Insert a single mplx at the end of a list
-- * @param b The list to add to
-- * @param e The mplx to insert
-- */
--#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \
--h2_mplx *ap__b = (e); \
--APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \
--} while (0)
--
--/**
-- * Get the next mplx in the list
-- * @param e The current mplx
-- * @return The next mplx
-- */
--#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link)
--/**
-- * Get the previous mplx in the list
-- * @param e The current mplx
-- * @return The previous mplx
-- */
--#define H2_MPLX_PREV(e) APR_RING_PREV((e), link)
--
--/**
-- * Remove a mplx from its list
-- * @param e The mplx to remove
-- */
--#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link)
-+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, struct h2_task **ptask);
-+void h2_mplx_s_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
-
- /*******************************************************************************
-- * h2_mplx DoS protection
-+ * From a h2_task owner: h2_mplx_s_*
-+ * (a task is transfered from master to secondary connection and back in
-+ * its normal lifetime).
- ******************************************************************************/
-
- /**
-- * Master connection has entered idle mode.
-- * @param m the mplx instance of the master connection
-- * @return != SUCCESS iff connection should be terminated
-+ * Opens the output for the given stream with the specified response.
- */
--apr_status_t h2_mplx_idle(h2_mplx *m);
-+apr_status_t h2_mplx_t_out_open(h2_mplx *mplx, int stream_id,
-+ struct h2_bucket_beam *beam);
-
--/*******************************************************************************
-- * h2_req_engine handling
-- ******************************************************************************/
-+/**
-+ * Get the stream that belongs to the given task.
-+ */
-+struct h2_stream *h2_mplx_t_stream_get(h2_mplx *m, struct h2_task *task);
-
--typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
--typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine,
-- const char *id,
-- const char *type,
-- apr_pool_t *pool,
-- apr_size_t req_buffer_size,
-- request_rec *r,
-- h2_output_consumed **pconsumed,
-- void **pbaton);
--
--apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
-- request_rec *r,
-- h2_mplx_req_engine_init *einit);
--apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr);
--void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn,
-- apr_status_t status);
-
- #endif /* defined(__mod_h2__h2_mplx__) */
---- a/modules/http2/h2_proxy_session.c
-+++ b/modules/http2/h2_proxy_session.c
-@@ -45,6 +45,7 @@
- unsigned int suspended : 1;
- unsigned int waiting_on_100 : 1;
- unsigned int waiting_on_ping : 1;
-+ unsigned int headers_ended : 1;
- uint32_t error_code;
-
- apr_bucket_brigade *input;
-@@ -61,7 +62,123 @@
- static void ping_arrived(h2_proxy_session *session);
- static apr_status_t check_suspended(h2_proxy_session *session);
- static void stream_resume(h2_proxy_stream *stream);
-+static apr_status_t submit_trailers(h2_proxy_stream *stream);
-
-+/*
-+ * The H2_PING connection sub-state: a state independant of the H2_SESSION state
-+ * of the connection:
-+ * - H2_PING_ST_NONE: no interference with request handling, ProxyTimeout in effect.
-+ * When entered, all suspended streams are unsuspended again.
-+ * - H2_PING_ST_AWAIT_ANY: new requests are suspended, a possibly configured "ping"
-+ * timeout is in effect. Any frame received transits to H2_PING_ST_NONE.
-+ * - H2_PING_ST_AWAIT_PING: same as above, but only a PING frame transits
-+ * to H2_PING_ST_NONE.
-+ *
-+ * An AWAIT state is entered on a new connection or when re-using a connection and
-+ * the last frame received has been some time ago. The latter sends a PING frame
-+ * and insists on an answer, the former is satisfied by any frame received from the
-+ * backend.
-+ *
-+ * This works for new connections as there is always at least one SETTINGS frame
-+ * that the backend sends. When re-using connection, we send a PING and insist on
-+ * receiving one back, as there might be frames in our connection buffers from
-+ * some time ago. Since some servers have protections against PING flooding, we
-+ * only ever have one PING unanswered.
-+ *
-+ * Requests are suspended while in a PING state, as we do not want to send data
-+ * before we can be reasonably sure that the connection is working (at least on
-+ * the h2 protocol level). This also means that the session can do blocking reads
-+ * when expecting PING answers.
-+ */
-+static void set_ping_timeout(h2_proxy_session *session)
-+{
-+ if (session->ping_timeout != -1 && session->save_timeout == -1) {
-+ apr_socket_t *socket = NULL;
-+
-+ socket = ap_get_conn_socket(session->c);
-+ if (socket) {
-+ apr_socket_timeout_get(socket, &session->save_timeout);
-+ apr_socket_timeout_set(socket, session->ping_timeout);
-+ }
-+ }
-+}
-+
-+static void unset_ping_timeout(h2_proxy_session *session)
-+{
-+ if (session->save_timeout != -1) {
-+ apr_socket_t *socket = NULL;
-+
-+ socket = ap_get_conn_socket(session->c);
-+ if (socket) {
-+ apr_socket_timeout_set(socket, session->save_timeout);
-+ session->save_timeout = -1;
-+ }
-+ }
-+}
-+
-+static void enter_ping_state(h2_proxy_session *session, h2_ping_state_t state)
-+{
-+ if (session->ping_state == state) return;
-+ switch (session->ping_state) {
-+ case H2_PING_ST_NONE:
-+ /* leaving NONE, enforce timeout, send frame maybe */
-+ if (H2_PING_ST_AWAIT_PING == state) {
-+ unset_ping_timeout(session);
-+ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
-+ }
-+ set_ping_timeout(session);
-+ session->ping_state = state;
-+ break;
-+ default:
-+ /* no switching between the != NONE states */
-+ if (H2_PING_ST_NONE == state) {
-+ session->ping_state = state;
-+ unset_ping_timeout(session);
-+ ping_arrived(session);
-+ }
-+ break;
-+ }
-+}
-+
-+static void ping_new_session(h2_proxy_session *session, proxy_conn_rec *p_conn)
-+{
-+ session->save_timeout = -1;
-+ session->ping_timeout = (p_conn->worker->s->ping_timeout_set?
-+ p_conn->worker->s->ping_timeout : -1);
-+ session->ping_state = H2_PING_ST_NONE;
-+ enter_ping_state(session, H2_PING_ST_AWAIT_ANY);
-+}
-+
-+static void ping_reuse_session(h2_proxy_session *session)
-+{
-+ if (H2_PING_ST_NONE == session->ping_state) {
-+ apr_interval_time_t age = apr_time_now() - session->last_frame_received;
-+ if (age > apr_time_from_sec(1)) {
-+ enter_ping_state(session, H2_PING_ST_AWAIT_PING);
-+ }
-+ }
-+}
-+
-+static void ping_ev_frame_received(h2_proxy_session *session, const nghttp2_frame *frame)
-+{
-+ session->last_frame_received = apr_time_now();
-+ switch (session->ping_state) {
-+ case H2_PING_ST_NONE:
-+ /* nop */
-+ break;
-+ case H2_PING_ST_AWAIT_ANY:
-+ enter_ping_state(session, H2_PING_ST_NONE);
-+ break;
-+ case H2_PING_ST_AWAIT_PING:
-+ if (NGHTTP2_PING == frame->hd.type) {
-+ enter_ping_state(session, H2_PING_ST_NONE);
-+ }
-+ /* we may receive many other frames while we are waiting for the
-+ * PING answer. They may come all from our connection buffers and
-+ * say nothing about the current state of the backend. */
-+ break;
-+ }
-+}
-
- static apr_status_t proxy_session_pre_close(void *theconn)
- {
-@@ -152,7 +269,8 @@
- session->id, buffer);
- }
-
-- session->last_frame_received = apr_time_now();
-+ ping_ev_frame_received(session, frame);
-+ /* Action for frame types: */
- switch (frame->hd.type) {
- case NGHTTP2_HEADERS:
- stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
-@@ -193,10 +311,6 @@
- stream_resume(stream);
- break;
- case NGHTTP2_PING:
-- if (session->check_ping) {
-- session->check_ping = 0;
-- ping_arrived(session);
-- }
- break;
- case NGHTTP2_PUSH_PROMISE:
- break;
-@@ -241,7 +355,8 @@
- return 1;
- }
-
--static void process_proxy_header(h2_proxy_stream *stream, const char *n, const char *v)
-+static void process_proxy_header(apr_table_t *headers, h2_proxy_stream *stream,
-+ const char *n, const char *v)
- {
- static const struct {
- const char *name;
-@@ -262,20 +377,18 @@
- if (!dconf->preserve_host) {
- for (i = 0; transform_hdrs[i].name; ++i) {
- if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
-- apr_table_add(r->headers_out, n,
-- (*transform_hdrs[i].func)(r, dconf, v));
-+ apr_table_add(headers, n, (*transform_hdrs[i].func)(r, dconf, v));
- return;
- }
- }
- if (!ap_cstr_casecmp("Link", n)) {
- dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
-- apr_table_add(r->headers_out, n,
-- h2_proxy_link_reverse_map(r, dconf,
-- stream->real_server_uri, stream->p_server_uri, v));
-+ apr_table_add(headers, n, h2_proxy_link_reverse_map(r, dconf,
-+ stream->real_server_uri, stream->p_server_uri, v));
- return;
- }
- }
-- apr_table_add(r->headers_out, n, v);
-+ apr_table_add(headers, n, v);
- }
-
- static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
-@@ -299,8 +412,13 @@
- return APR_SUCCESS;
- }
-
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
-+ "h2_proxy_stream(%s-%d): on_header %s: %s",
-+ stream->session->id, stream->id, n, v);
- if (!h2_proxy_res_ignore_header(n, nlen)) {
- char *hname, *hvalue;
-+ apr_table_t *headers = (stream->headers_ended?
-+ stream->r->trailers_out : stream->r->headers_out);
-
- hname = apr_pstrndup(stream->pool, n, nlen);
- h2_proxy_util_camel_case_header(hname, nlen);
-@@ -309,7 +427,7 @@
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
- "h2_proxy_stream(%s-%d): got header %s: %s",
- stream->session->id, stream->id, hname, hvalue);
-- process_proxy_header(stream, hname, hvalue);
-+ process_proxy_header(headers, stream, hname, hvalue);
- }
- return APR_SUCCESS;
- }
-@@ -374,6 +492,7 @@
- server_name, portstr)
- );
- }
-+ if (r->status >= 200) stream->headers_ended = 1;
-
- if (APLOGrtrace2(stream->r)) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
-@@ -429,12 +548,6 @@
- stream_id, NGHTTP2_STREAM_CLOSED);
- return NGHTTP2_ERR_STREAM_CLOSING;
- }
-- if (stream->standalone) {
-- nghttp2_session_consume(ngh2, stream_id, len);
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
-- "h2_proxy_session(%s): stream %d, win_update %d bytes",
-- session->id, stream_id, (int)len);
-- }
- return 0;
- }
-
-@@ -493,12 +606,12 @@
- stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
- if (!stream) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361)
-- "h2_proxy_stream(%s): data_read, stream %d not found",
-- stream->session->id, stream_id);
-+ "h2_proxy_stream(NULL): data_read, stream %d not found",
-+ stream_id);
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
-
-- if (stream->session->check_ping) {
-+ if (stream->session->ping_state != H2_PING_ST_NONE) {
- /* suspend until we hear from the other side */
- stream->waiting_on_ping = 1;
- status = APR_EAGAIN;
-@@ -553,9 +666,14 @@
- stream->data_sent += readlen;
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468)
- "h2_proxy_stream(%d): request DATA %ld, %ld"
-- " total, flags=%d",
-- stream->id, (long)readlen, (long)stream->data_sent,
-+ " total, flags=%d", stream->id, (long)readlen, (long)stream->data_sent,
- (int)*data_flags);
-+ if ((*data_flags & NGHTTP2_DATA_FLAG_EOF) && !apr_is_empty_table(stream->r->trailers_in)) {
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(10179)
-+ "h2_proxy_stream(%d): submit trailers", stream->id);
-+ *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;
-+ submit_trailers(stream);
-+ }
- return readlen;
- }
- else if (APR_STATUS_IS_EAGAIN(status)) {
-@@ -641,23 +759,20 @@
-
- nghttp2_option_new(&option);
- nghttp2_option_set_peer_max_concurrent_streams(option, 100);
-- nghttp2_option_set_no_auto_window_update(option, 1);
-+ nghttp2_option_set_no_auto_window_update(option, 0);
-
- nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
-
- nghttp2_option_del(option);
- nghttp2_session_callbacks_del(cbs);
-
-+ ping_new_session(session, p_conn);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
- "setup session for %s", p_conn->hostname);
- }
- else {
- h2_proxy_session *session = p_conn->data;
-- apr_interval_time_t age = apr_time_now() - session->last_frame_received;
-- if (age > apr_time_from_sec(1)) {
-- session->check_ping = 1;
-- nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
-- }
-+ ping_reuse_session(session);
- }
- return p_conn->data;
- }
-@@ -740,6 +855,8 @@
- stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority);
- stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
- path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
-+
-+
- h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
- authority, path, r->headers_in);
-
-@@ -826,6 +943,16 @@
- return APR_EGENERAL;
- }
-
-+static apr_status_t submit_trailers(h2_proxy_stream *stream)
-+{
-+ h2_proxy_ngheader *hd;
-+ int rv;
-+
-+ hd = h2_proxy_util_nghd_make(stream->pool, stream->r->trailers_in);
-+ rv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, hd->nv, hd->nvlen);
-+ return rv == 0? APR_SUCCESS: APR_EGENERAL;
-+}
-+
- static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb)
- {
- apr_status_t status = APR_SUCCESS;
-@@ -882,7 +1009,7 @@
- apr_socket_t *socket = NULL;
- apr_time_t save_timeout = -1;
-
-- if (block) {
-+ if (block && timeout > 0) {
- socket = ap_get_conn_socket(session->c);
- if (socket) {
- apr_socket_timeout_get(socket, &save_timeout);
-@@ -954,6 +1081,14 @@
- dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
- }
-
-+static int is_waiting_for_backend(h2_proxy_session *session)
-+{
-+ return ((session->ping_state != H2_PING_ST_NONE)
-+ || ((session->suspended->nelts <= 0)
-+ && !nghttp2_session_want_write(session->ngh2)
-+ && nghttp2_session_want_read(session->ngh2)));
-+}
-+
- static apr_status_t check_suspended(h2_proxy_session *session)
- {
- h2_proxy_stream *stream;
-@@ -1408,7 +1543,22 @@
- break;
-
- case H2_PROXYS_ST_WAIT:
-- if (check_suspended(session) == APR_EAGAIN) {
-+ if (is_waiting_for_backend(session)) {
-+ /* we can do a blocking read with the default timeout (as
-+ * configured via ProxyTimeout in our socket. There is
-+ * nothing we want to send or check until we get more data
-+ * from the backend. */
-+ status = h2_proxy_session_read(session, 1, 0);
-+ if (status == APR_SUCCESS) {
-+ have_read = 1;
-+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
-+ }
-+ else {
-+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
-+ return status;
-+ }
-+ }
-+ else if (check_suspended(session) == APR_EAGAIN) {
- /* no stream has become resumed. Do a blocking read with
- * ever increasing timeouts... */
- if (session->wait_timeout < 25) {
-@@ -1423,7 +1573,7 @@
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
- APLOGNO(03365)
- "h2_proxy_session(%s): WAIT read, timeout=%fms",
-- session->id, (float)session->wait_timeout/1000.0);
-+ session->id, session->wait_timeout/1000.0);
- if (status == APR_SUCCESS) {
- have_read = 1;
- dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
-@@ -1543,42 +1693,3 @@
- int updated;
- } win_update_ctx;
-
--static int win_update_iter(void *udata, void *val)
--{
-- win_update_ctx *ctx = udata;
-- h2_proxy_stream *stream = val;
--
-- if (stream->r && stream->r->connection == ctx->c) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c,
-- "h2_proxy_session(%s-%d): win_update %ld bytes",
-- ctx->session->id, (int)stream->id, (long)ctx->bytes);
-- nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes);
-- ctx->updated = 1;
-- return 0;
-- }
-- return 1;
--}
--
--
--void h2_proxy_session_update_window(h2_proxy_session *session,
-- conn_rec *c, apr_off_t bytes)
--{
-- if (!h2_proxy_ihash_empty(session->streams)) {
-- win_update_ctx ctx;
-- ctx.session = session;
-- ctx.c = c;
-- ctx.bytes = bytes;
-- ctx.updated = 0;
-- h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx);
--
-- if (!ctx.updated) {
-- /* could not find the stream any more, possibly closed, update
-- * the connection window at least */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
-- "h2_proxy_session(%s): win_update conn %ld bytes",
-- session->id, (long)bytes);
-- nghttp2_session_consume_connection(session->ngh2, (size_t)bytes);
-- }
-- }
--}
--
---- a/modules/http2/h2_proxy_session.h
-+++ b/modules/http2/h2_proxy_session.h
-@@ -60,6 +60,11 @@
- H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */
- } h2_proxys_event_t;
-
-+typedef enum {
-+ H2_PING_ST_NONE, /* normal connection mode, ProxyTimeout rules */
-+ H2_PING_ST_AWAIT_ANY, /* waiting for any frame from backend */
-+ H2_PING_ST_AWAIT_PING, /* waiting for PING frame from backend */
-+} h2_ping_state_t;
-
- typedef struct h2_proxy_session h2_proxy_session;
- typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
-@@ -74,7 +79,6 @@
- nghttp2_session *ngh2; /* the nghttp2 session itself */
-
- unsigned int aborted : 1;
-- unsigned int check_ping : 1;
- unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */
-
- h2_proxy_request_done *done;
-@@ -94,6 +98,10 @@
-
- apr_bucket_brigade *input;
- apr_bucket_brigade *output;
-+
-+ h2_ping_state_t ping_state;
-+ apr_time_t ping_timeout;
-+ apr_time_t save_timeout;
- };
-
- h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
-@@ -120,9 +128,6 @@
-
- void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
-
--void h2_proxy_session_update_window(h2_proxy_session *s,
-- conn_rec *c, apr_off_t bytes);
--
- #define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
-
- #endif /* h2_proxy_session_h */
---- a/modules/http2/h2_proxy_util.c
-+++ b/modules/http2/h2_proxy_util.c
-@@ -452,6 +452,22 @@
- return ngh;
- }
-
-+h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers)
-+{
-+
-+ h2_proxy_ngheader *ngh;
-+ size_t n;
-+
-+ n = 0;
-+ apr_table_do(count_header, &n, headers, NULL);
-+
-+ ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader));
-+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
-+ apr_table_do(add_table_header, ngh, headers, NULL);
-+
-+ return ngh;
-+}
-+
- /*******************************************************************************
- * header HTTP/1 <-> HTTP/2 conversions
- ******************************************************************************/
-@@ -609,6 +625,7 @@
- apr_table_t *headers)
- {
- h1_ctx x;
-+ const char *val;
-
- req->method = method;
- req->scheme = scheme;
-@@ -623,6 +640,11 @@
- x.pool = pool;
- x.headers = req->headers;
- apr_table_do(set_h1_header, &x, headers, NULL);
-+ if ((val = apr_table_get(headers, "TE")) && ap_find_token(pool, val, "trailers")) {
-+ /* client accepts trailers, forward this information */
-+ apr_table_addn(req->headers, "TE", "trailers");
-+ }
-+ apr_table_setn(req->headers, "te", "trailers");
- return APR_SUCCESS;
- }
-
-@@ -915,12 +937,12 @@
- nlen = (int)strlen(ns);
- delta = nlen - olen;
- plen = ctx->slen + delta + 1;
-- p = apr_pcalloc(ctx->pool, plen);
-+ p = apr_palloc(ctx->pool, plen);
- memcpy(p, ctx->s, start);
- memcpy(p + start, ns, nlen);
- strcpy(p + start + nlen, ctx->s + end);
- ctx->s = p;
-- ctx->slen = (int)strlen(p);
-+ ctx->slen = plen - 1; /* (int)strlen(p) */
- if (ctx->i >= end) {
- ctx->i += delta;
- }
---- a/modules/http2/h2_proxy_util.h
-+++ b/modules/http2/h2_proxy_util.h
-@@ -168,6 +168,8 @@
- h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
- const struct h2_proxy_request *req);
-
-+h2_proxy_ngheader *h2_proxy_util_nghd_make(apr_pool_t *p, apr_table_t *headers);
-+
- /*******************************************************************************
- * h2_proxy_request helpers
- ******************************************************************************/
-@@ -183,7 +185,7 @@
-
- apr_time_t request_time;
-
-- unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
-+ unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
- unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
- };
-
---- a/modules/http2/h2_push.c
-+++ b/modules/http2/h2_push.c
-@@ -464,33 +464,6 @@
- return NULL;
- }
-
--/*******************************************************************************
-- * push diary
-- *
-- * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
-- * connection. It records a hash value from the absolute URL of the resource
-- * pushed.
-- * - Lacking openssl, it uses 'apr_hashfunc_default' for the value
-- * - with openssl, it uses SHA256 to calculate the hash value
-- * - whatever the method to generate the hash, the diary keeps a maximum of 64
-- * bits per hash, limiting the memory consumption to about
-- * H2PushDiarySize * 8
-- * bytes. Entries are sorted by most recently used and oldest entries are
-- * forgotten first.
-- * - Clients can initialize/replace the push diary by sending a 'Cache-Digest'
-- * header. Currently, this is the base64url encoded value of the cache digest
-- * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
-- * This draft can be expected to evolve and the definition of the header
-- * will be added there and refined.
-- * - The cache digest header is a Golomb Coded Set of hash values, but it may
-- * limit the amount of bits per hash value even further. For a good description
-- * of GCS, read here:
-- * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters
-- * - The means that the push diary might be initialized with hash values of much
-- * less than 64 bits, leading to more false positives, but smaller digest size.
-- ******************************************************************************/
--
--
- #define GCSLOG_LEVEL APLOG_TRACE1
-
- typedef struct h2_push_diary_entry {
-@@ -617,38 +590,48 @@
- return -1;
- }
-
--static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
-+static void move_to_last(h2_push_diary *diary, apr_size_t idx)
- {
- h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
- h2_push_diary_entry e;
-- apr_size_t lastidx = diary->entries->nelts-1;
-+ int lastidx;
-
-+ /* Move an existing entry to the last place */
-+ if (diary->entries->nelts <= 0)
-+ return;
-+
- /* move entry[idx] to the end */
-+ lastidx = diary->entries->nelts - 1;
- if (idx < lastidx) {
- e = entries[idx];
-- memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
-+ memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx));
- entries[lastidx] = e;
- }
-- return &entries[lastidx];
- }
-
--static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
-+static void remove_first(h2_push_diary *diary)
- {
-- h2_push_diary_entry *ne;
-+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
-+ int lastidx;
-
-- if (diary->entries->nelts < diary->N) {
-- /* append a new diary entry at the end */
-- APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
-- ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
-- }
-- else {
-- /* replace content with new digest. keeps memory usage constant once diary is full */
-- ne = move_to_last(diary, 0);
-- *ne = *e;
-+ /* move remaining entries to index 0 */
-+ lastidx = diary->entries->nelts - 1;
-+ if (lastidx > 0) {
-+ --diary->entries->nelts;
-+ memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts);
- }
-+}
-+
-+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
-+{
-+ while (diary->entries->nelts >= diary->N) {
-+ remove_first(diary);
-+ }
-+ /* append a new diary entry at the end */
-+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
- /* Intentional no APLOGNO */
- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
-- "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
-+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash);
- }
-
- apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
-@@ -691,30 +674,12 @@
- const struct h2_request *req,
- const struct h2_headers *res)
- {
-- h2_session *session = stream->session;
-- const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
- apr_array_header_t *pushes;
-- apr_status_t status;
-
-- if (cache_digest && session->push_diary) {
-- status = h2_push_diary_digest64_set(session->push_diary, req->authority,
-- cache_digest, stream->pool);
-- if (status != APR_SUCCESS) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
-- H2_SSSN_LOG(APLOGNO(03057), session,
-- "push diary set from Cache-Digest: %s"), cache_digest);
-- }
-- }
- pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
- return h2_push_diary_update(stream->session, pushes);
- }
-
--static apr_int32_t h2_log2inv(unsigned char log2)
--{
-- return log2? (1 << log2) : 1;
--}
--
--
- typedef struct {
- h2_push_diary *diary;
- unsigned char log2p;
-@@ -829,16 +794,11 @@
- apr_size_t hash_count;
-
- nelts = diary->entries->nelts;
--
-- if (nelts > APR_UINT32_MAX) {
-- /* should not happen */
-- return APR_ENOTIMPL;
-- }
- N = ceil_power_of_2(nelts);
- log2n = h2_log2(N);
-
- /* Now log2p is the max number of relevant bits, so that
-- * log2p + log2n == mask_bits. We can uise a lower log2p
-+ * log2p + log2n == mask_bits. We can use a lower log2p
- * and have a shorter set encoding...
- */
- log2pmax = h2_log2(ceil_power_of_2(maxP));
-@@ -895,166 +855,3 @@
- return APR_SUCCESS;
- }
-
--typedef struct {
-- h2_push_diary *diary;
-- apr_pool_t *pool;
-- unsigned char log2p;
-- const unsigned char *data;
-- apr_size_t datalen;
-- apr_size_t offset;
-- unsigned int bit;
-- apr_uint64_t last_val;
--} gset_decoder;
--
--static int gset_decode_next_bit(gset_decoder *decoder)
--{
-- if (++decoder->bit >= 8) {
-- if (++decoder->offset >= decoder->datalen) {
-- return -1;
-- }
-- decoder->bit = 0;
-- }
-- return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
--}
--
--static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
--{
-- apr_uint64_t flex = 0, fixed = 0, delta;
-- int i;
--
-- /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
-- * On a malformed bit-string, this will not fail, but produce results
-- * which are pbly too large. Luckily, the diary will modulo the hash.
-- */
-- while (1) {
-- int bit = gset_decode_next_bit(decoder);
-- if (bit == -1) {
-- return APR_EINVAL;
-- }
-- if (!bit) {
-- break;
-- }
-- ++flex;
-- }
--
-- for (i = 0; i < decoder->log2p; ++i) {
-- int bit = gset_decode_next_bit(decoder);
-- if (bit == -1) {
-- return APR_EINVAL;
-- }
-- fixed = (fixed << 1) | bit;
-- }
--
-- delta = (flex << decoder->log2p) | fixed;
-- *phash = delta + decoder->last_val;
-- decoder->last_val = *phash;
--
-- /* Intentional no APLOGNO */
-- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
-- "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
-- APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT,
-- *phash, delta, (int)flex, fixed);
--
-- return APR_SUCCESS;
--}
--
--/**
-- * Initialize the push diary by a cache digest as described in
-- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
-- * .
-- * @param diary the diary to set the digest into
-- * @param data the binary cache digest
-- * @param len the length of the cache digest
-- * @return APR_EINVAL if digest was not successfully parsed
-- */
--apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
-- const char *data, apr_size_t len)
--{
-- gset_decoder decoder;
-- unsigned char log2n, log2p;
-- int N, i;
-- apr_pool_t *pool = diary->entries->pool;
-- h2_push_diary_entry e;
-- apr_status_t status = APR_SUCCESS;
--
-- if (len < 2) {
-- /* at least this should be there */
-- return APR_EINVAL;
-- }
-- log2n = data[0];
-- log2p = data[1];
-- diary->mask_bits = log2n + log2p;
-- if (diary->mask_bits > 64) {
-- /* cannot handle */
-- return APR_ENOTIMPL;
-- }
--
-- /* whatever is in the digest, it replaces the diary entries */
-- apr_array_clear(diary->entries);
-- if (!authority || !strcmp("*", authority)) {
-- diary->authority = NULL;
-- }
-- else if (!diary->authority || strcmp(diary->authority, authority)) {
-- diary->authority = apr_pstrdup(diary->entries->pool, authority);
-- }
--
-- N = h2_log2inv(log2n + log2p);
--
-- decoder.diary = diary;
-- decoder.pool = pool;
-- decoder.log2p = log2p;
-- decoder.data = (const unsigned char*)data;
-- decoder.datalen = len;
-- decoder.offset = 1;
-- decoder.bit = 8;
-- decoder.last_val = 0;
--
-- diary->N = N;
-- /* Determine effective N we use for storage */
-- if (!N) {
-- /* a totally empty cache digest. someone tells us that she has no
-- * entries in the cache at all. Use our own preferences for N+mask
-- */
-- diary->N = diary->NMax;
-- return APR_SUCCESS;
-- }
-- else if (N > diary->NMax) {
-- /* Store not more than diary is configured to hold. We open us up
-- * to DOS attacks otherwise. */
-- diary->N = diary->NMax;
-- }
--
-- /* Intentional no APLOGNO */
-- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
-- "h2_push_diary_digest_set: N=%d, log2n=%d, "
-- "diary->mask_bits=%d, dec.log2p=%d",
-- (int)diary->N, (int)log2n, diary->mask_bits,
-- (int)decoder.log2p);
--
-- for (i = 0; i < diary->N; ++i) {
-- if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
-- /* the data may have less than N values */
-- break;
-- }
-- h2_push_diary_append(diary, &e);
-- }
--
-- /* Intentional no APLOGNO */
-- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
-- "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
-- (int)diary->entries->nelts, diary->mask_bits);
-- return status;
--}
--
--apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
-- const char *data64url, apr_pool_t *pool)
--{
-- const char *data;
-- apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
-- /* Intentional no APLOGNO */
-- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
-- "h2_push_diary_digest64_set: digest=%s, dlen=%d",
-- data64url, (int)len);
-- return h2_push_diary_digest_set(diary, authority, data, len);
--}
--
---- a/modules/http2/h2_push.h
-+++ b/modules/http2/h2_push.h
-@@ -35,6 +35,44 @@
- H2_PUSH_DIGEST_SHA256
- } h2_push_digest_type;
-
-+/*******************************************************************************
-+ * push diary
-+ *
-+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
-+ * connection. It records a hash value from the absolute URL of the resource
-+ * pushed.
-+ * - Lacking openssl,
-+ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it
-+ * falls back to apr_hashfunc_default()
-+ * - whatever the method to generate the hash, the diary keeps a maximum of 64
-+ * bits per hash, limiting the memory consumption to about
-+ * H2PushDiarySize * 8
-+ * bytes. Entries are sorted by most recently used and oldest entries are
-+ * forgotten first.
-+ * - While useful by itself to avoid duplicated PUSHes on the same connection,
-+ * the original idea was that clients provided a 'Cache-Digest' header with
-+ * the values of *their own* cached resources. This was described in
-+ *
-+ * and some subsequent revisions that tweaked values but kept the overall idea.
-+ * - The draft was abandoned by the IETF http-wg, as support from major clients,
-+ * e.g. browsers, was lacking for various reasons.
-+ * - For these reasons, mod_h2 abandoned its support for client supplied values
-+ * but keeps the diary. It seems to provide value for applications using PUSH,
-+ * is configurable in size and defaults to a very moderate amount of memory
-+ * used.
-+ * - The cache digest header is a Golomb Coded Set of hash values, but it may
-+ * limit the amount of bits per hash value even further. For a good description
-+ * of GCS, read here:
-+ *
-+ ******************************************************************************/
-+
-+
-+/*
-+ * The push diary is based on the abandoned draft
-+ *
-+ * that describes how to use golomb filters.
-+ */
-+
- typedef struct h2_push_diary h2_push_diary;
-
- typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
-@@ -101,20 +139,4 @@
- int maxP, const char *authority,
- const char **pdata, apr_size_t *plen);
-
--/**
-- * Initialize the push diary by a cache digest as described in
-- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
-- * .
-- * @param diary the diary to set the digest into
-- * @param authority the authority to set the data for
-- * @param data the binary cache digest
-- * @param len the length of the cache digest
-- * @return APR_EINVAL if digest was not successfully parsed
-- */
--apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
-- const char *data, apr_size_t len);
--
--apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
-- const char *data64url, apr_pool_t *pool);
--
- #endif /* defined(__mod_h2__h2_push__) */
---- a/modules/http2/h2_request.c
-+++ b/modules/http2/h2_request.c
-@@ -17,6 +17,7 @@
- #include
-
- #include
-+#include
-
- #include
- #include
-@@ -46,9 +47,9 @@
- static int set_h1_header(void *ctx, const char *key, const char *value)
- {
- h1_ctx *x = ctx;
-- x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key),
-- value, strlen(value));
-- return (x->status == APR_SUCCESS)? 1 : 0;
-+ int was_added;
-+ h2_req_add_header(x->headers, x->pool, key, strlen(key), value, strlen(value), 0, &was_added);
-+ return 1;
- }
-
- apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
-@@ -84,8 +85,7 @@
- req->path = path;
- req->headers = apr_table_make(pool, 10);
- if (r->server) {
-- req->serialize = h2_config_geti(h2_config_sget(r->server),
-- H2_CONF_SER_HEADERS);
-+ req->serialize = h2_config_rgeti(r, H2_CONF_SER_HEADERS);
- }
-
- x.pool = pool;
-@@ -99,10 +99,12 @@
-
- apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
- const char *name, size_t nlen,
-- const char *value, size_t vlen)
-+ const char *value, size_t vlen,
-+ size_t max_field_len, int *pwas_added)
- {
- apr_status_t status = APR_SUCCESS;
-
-+ *pwas_added = 0;
- if (nlen <= 0) {
- return status;
- }
-@@ -143,8 +145,9 @@
- }
- }
- else {
-- /* non-pseudo header, append to work bucket of stream */
-- status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen);
-+ /* non-pseudo header, add to table */
-+ status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen,
-+ max_field_len, pwas_added);
- }
-
- return status;
-@@ -156,7 +159,7 @@
-
- /* rfc7540, ch. 8.1.2.3:
- * - if we have :authority, it overrides any Host header
-- * - :authority MUST be ommited when converting h1->h2, so we
-+ * - :authority MUST be omitted when converting h1->h2, so we
- * might get a stream without, but then Host needs to be there */
- if (!req->authority) {
- const char *host = apr_table_get(req->headers, "Host");
-@@ -206,13 +209,11 @@
- return dst;
- }
-
--request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
-+#if !AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
-+static request_rec *my_ap_create_request(conn_rec *c)
- {
-- int access_status = HTTP_OK;
-- const char *rpath;
- apr_pool_t *p;
- request_rec *r;
-- const char *s;
-
- apr_pool_create(&p, c->pool);
- apr_pool_tag(p, "request");
-@@ -226,8 +227,8 @@
- r->ap_auth_type = NULL;
-
- r->allowed_methods = ap_make_method_list(p, 2);
--
-- r->headers_in = apr_table_clone(r->pool, req->headers);
-+
-+ r->headers_in = apr_table_make(r->pool, 5);
- r->trailers_in = apr_table_make(r->pool, 5);
- r->subprocess_env = apr_table_make(r->pool, 25);
- r->headers_out = apr_table_make(r->pool, 12);
-@@ -262,6 +263,24 @@
- r->useragent_addr = c->client_addr;
- r->useragent_ip = c->client_ip;
-
-+ return r;
-+}
-+#endif
-+
-+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
-+{
-+ int access_status = HTTP_OK;
-+ const char *rpath;
-+ const char *s;
-+
-+#if AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
-+ request_rec *r = ap_create_request(c);
-+#else
-+ request_rec *r = my_ap_create_request(c);
-+#endif
-+
-+ r->headers_in = apr_table_clone(r->pool, req->headers);
-+
- ap_run_pre_read_request(r, c);
-
- /* Time to populate r with the data we have. */
-@@ -272,6 +291,9 @@
- if (r->method_number == M_GET && r->method[0] == 'H') {
- r->header_only = 1;
- }
-+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0",
-+ req->method, req->path ? req->path : "");
-+ r->headers_in = apr_table_clone(r->pool, req->headers);
-
- rpath = (req->path ? req->path : "");
- ap_parse_uri(r, rpath);
-@@ -288,7 +310,9 @@
- */
- r->hostname = NULL;
- ap_update_vhost_from_headers(r);
--
-+ r->protocol = "HTTP/2.0";
-+ r->proto_num = HTTP_VERSION(2, 0);
-+
- /* we may have switched to another server */
- r->per_dir_config = r->server->lookup_defaults;
-
-@@ -337,3 +361,4 @@
- }
-
-
-+
---- a/modules/http2/h2_request.h
-+++ b/modules/http2/h2_request.h
-@@ -24,7 +24,8 @@
-
- apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
- const char *name, size_t nlen,
-- const char *value, size_t vlen);
-+ const char *value, size_t vlen,
-+ size_t max_field_len, int *pwas_added);
-
- apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
- const char *name, size_t nlen,
---- a/modules/http2/h2_session.c
-+++ b/modules/http2/h2_session.c
-@@ -106,7 +106,7 @@
-
- static void cleanup_unprocessed_streams(h2_session *session)
- {
-- h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session);
-+ h2_mplx_m_stream_do(session->mplx, rst_unprocessed_stream, session);
- }
-
- static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
-@@ -385,14 +385,19 @@
- break;
- case NGHTTP2_RST_STREAM:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
-- "h2_stream(%ld-%d): RST_STREAM by client, errror=%d",
-+ "h2_stream(%ld-%d): RST_STREAM by client, error=%d",
- session->id, (int)frame->hd.stream_id,
- (int)frame->rst_stream.error_code);
- stream = h2_session_stream_get(session, frame->hd.stream_id);
- if (stream && stream->initiated_on) {
-+ /* A stream reset on a request we sent it. Normal, when the
-+ * client does not want it. */
- ++session->pushes_reset;
- }
- else {
-+ /* A stream reset on a request it sent us. Could happen in a browser
-+ * when the user navigates away or cancels loading - maybe. */
-+ h2_mplx_m_client_rst(session->mplx, frame->hd.stream_id);
- ++session->streams_reset;
- }
- break;
-@@ -462,7 +467,7 @@
- }
-
- static int h2_session_continue_data(h2_session *session) {
-- if (h2_mplx_has_master_events(session->mplx)) {
-+ if (h2_mplx_m_has_master_events(session->mplx)) {
- return 0;
- }
- if (h2_conn_io_needs_flush(&session->io)) {
-@@ -495,9 +500,7 @@
- return NGHTTP2_ERR_WOULDBLOCK;
- }
-
-- if (frame->data.padlen > H2_MAX_PADLEN) {
-- return NGHTTP2_ERR_PROTO;
-- }
-+ ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1));
- padlen = (unsigned char)frame->data.padlen;
-
- stream = h2_session_stream_get(session, stream_id);
-@@ -513,8 +516,9 @@
- H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
- (long)length);
-
-- status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
-+ status = h2_conn_io_write(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN);
- if (padlen && status == APR_SUCCESS) {
-+ --padlen;
- status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
- }
-
-@@ -622,6 +626,39 @@
- }
- #endif
-
-+static ssize_t select_padding_cb(nghttp2_session *ngh2,
-+ const nghttp2_frame *frame,
-+ size_t max_payloadlen, void *user_data)
-+{
-+ h2_session *session = user_data;
-+ ssize_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
-+ ssize_t padded_len = frame_len;
-+
-+ /* Determine # of padding bytes to append to frame. Unless session->padding_always
-+ * the number my be capped by the ui.write_size that currently applies.
-+ */
-+ if (session->padding_max) {
-+ int n = ap_random_pick(0, session->padding_max);
-+ padded_len = H2MIN(max_payloadlen + H2_FRAME_HDR_LEN, frame_len + n);
-+ }
-+
-+ if (padded_len != frame_len) {
-+ if (!session->padding_always && session->io.write_size
-+ && (padded_len > session->io.write_size)
-+ && (frame_len <= session->io.write_size)) {
-+ padded_len = session->io.write_size;
-+ }
-+ if (APLOGctrace2(session->c)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
-+ "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)",
-+ (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN,
-+ (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size);
-+ }
-+ return padded_len - H2_FRAME_HDR_LEN;
-+ }
-+ return frame->hd.length;
-+}
-+
- #define NGH2_SET_CALLBACK(callbacks, name, fn)\
- nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
-
-@@ -647,6 +684,7 @@
- #ifdef H2_NG2_INVALID_HEADER_CB
- NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
- #endif
-+ NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb);
- return APR_SUCCESS;
- }
-
-@@ -691,7 +729,7 @@
- * Remove all streams greater than this number without submitting
- * a RST_STREAM frame, since that should be clear from the GOAWAY
- * we send. */
-- session->local.accepted_max = h2_mplx_shutdown(session->mplx);
-+ session->local.accepted_max = h2_mplx_m_shutdown(session->mplx);
- session->local.error = error;
- }
- else {
-@@ -741,7 +779,7 @@
- }
-
- transit(session, trigger, H2_SESSION_ST_CLEANUP);
-- h2_mplx_release_and_join(session->mplx, session->iowait);
-+ h2_mplx_m_release_and_join(session->mplx, session->iowait);
- session->mplx = NULL;
-
- ap_assert(session->ngh2);
-@@ -757,13 +795,12 @@
- {
- conn_rec *c = data;
- h2_session *session;
-- h2_ctx *ctx = h2_ctx_get(c, 0);
-
-- if (ctx && (session = h2_ctx_session_get(ctx))) {
-+ if ((session = h2_ctx_get_session(c))) {
- /* if the session is still there, now is the last chance
- * to perform cleanup. Normally, cleanup should have happened
- * earlier in the connection pre_close. Main reason is that
-- * any ongoing requests on slave connections might still access
-+ * any ongoing requests on secondary connections might still access
- * data which has, at this time, already been freed. An example
- * is mod_ssl that uses request hooks. */
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
-@@ -775,11 +812,8 @@
- return APR_SUCCESS;
- }
-
--static apr_status_t h2_session_create_int(h2_session **psession,
-- conn_rec *c,
-- request_rec *r,
-- h2_ctx *ctx,
-- h2_workers *workers)
-+apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *r,
-+ server_rec *s, h2_workers *workers)
- {
- nghttp2_session_callbacks *callbacks = NULL;
- nghttp2_option *options = NULL;
-@@ -820,19 +854,16 @@
- session->id = c->id;
- session->c = c;
- session->r = r;
-- session->s = h2_ctx_server_get(ctx);
-+ session->s = s;
- session->pool = pool;
-- session->config = h2_config_sget(session->s);
- session->workers = workers;
-
- session->state = H2_SESSION_ST_INIT;
- session->local.accepting = 1;
- session->remote.accepting = 1;
-
-- session->max_stream_count = h2_config_geti(session->config,
-- H2_CONF_MAX_STREAMS);
-- session->max_stream_mem = h2_config_geti(session->config,
-- H2_CONF_STREAM_MAX_MEM);
-+ session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
-+ session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
- status = apr_thread_cond_create(&session->iowait, session->pool);
- if (status != APR_SUCCESS) {
-@@ -862,14 +893,18 @@
- session->monitor->on_state_event = on_stream_state_event;
- session->monitor->on_event = on_stream_event;
-
-- session->mplx = h2_mplx_create(c, session->pool, session->config,
-- workers);
-+ session->mplx = h2_mplx_m_create(c, s, session->pool, workers);
-
- /* connection input filter that feeds the session */
- session->cin = h2_filter_cin_create(session);
- ap_add_input_filter("H2_IN", session->cin, r, c);
-
-- h2_conn_io_init(&session->io, c, session->config);
-+ h2_conn_io_init(&session->io, c, s);
-+ session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS);
-+ if (session->padding_max) {
-+ session->padding_max = (0x01 << session->padding_max) - 1;
-+ }
-+ session->padding_always = h2_config_sgeti(s, H2_CONF_PADDING_ALWAYS);
- session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
-
- status = init_callbacks(c, &callbacks);
-@@ -888,8 +923,7 @@
- apr_pool_destroy(pool);
- return status;
- }
-- nghttp2_option_set_peer_max_concurrent_streams(
-- options, (uint32_t)session->max_stream_count);
-+ nghttp2_option_set_peer_max_concurrent_streams(options, (uint32_t)session->max_stream_count);
- /* We need to handle window updates ourself, otherwise we
- * get flooded by nghttp2. */
- nghttp2_option_set_no_auto_window_update(options, 1);
-@@ -907,7 +941,7 @@
- return APR_ENOMEM;
- }
-
-- n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
-+ n = h2_config_sgeti(s, H2_CONF_PUSH_DIARY_SIZE);
- session->push_diary = h2_push_diary_create(session->pool, n);
-
- if (APLOGcdebug(c)) {
-@@ -924,22 +958,11 @@
- (int)session->push_diary->N);
- }
-
-- apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
-+ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
-+
- return APR_SUCCESS;
- }
-
--apr_status_t h2_session_create(h2_session **psession,
-- conn_rec *c, h2_ctx *ctx, h2_workers *workers)
--{
-- return h2_session_create_int(psession, c, NULL, ctx, workers);
--}
--
--apr_status_t h2_session_rcreate(h2_session **psession,
-- request_rec *r, h2_ctx *ctx, h2_workers *workers)
--{
-- return h2_session_create_int(psession, r->connection, r, ctx, workers);
--}
--
- static apr_status_t h2_session_start(h2_session *session, int *rv)
- {
- apr_status_t status = APR_SUCCESS;
-@@ -1004,7 +1027,7 @@
- settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
- settings[slen].value = (uint32_t)session->max_stream_count;
- ++slen;
-- win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE);
-+ win_size = h2_config_sgeti(session->s, H2_CONF_WIN_SIZE);
- if (win_size != H2_INITIAL_WINDOW_SIZE) {
- settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
- settings[slen].value = win_size;
-@@ -1156,7 +1179,7 @@
- stream = h2_session_open_stream(session, nid, is->id);
- if (!stream) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
-- H2_STRM_LOG(APLOGNO(03077), stream,
-+ H2_STRM_LOG(APLOGNO(03077), is,
- "failed to create stream obj %d"), nid);
- /* kill the push_promise */
- nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
-@@ -1262,7 +1285,7 @@
-
- rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
-- ""H2_STRM_LOG(APLOGNO(03203), stream,
-+ H2_STRM_LOG(APLOGNO(03203), stream,
- "PUSH %s, weight=%d, depends=%d, returned=%d"),
- ptype, ps.weight, ps.stream_id, rv);
- status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
-@@ -1280,7 +1303,7 @@
- {
- /* iff we can and they can and want */
- return (session->remote.accepting /* remote GOAWAY received */
-- && h2_config_geti(session->config, H2_CONF_PUSH)
-+ && h2_config_sgeti(session->s, H2_CONF_PUSH)
- && nghttp2_session_get_remote_settings(session->ngh2,
- NGHTTP2_SETTINGS_ENABLE_PUSH));
- }
-@@ -1324,6 +1347,7 @@
- int eos)
- {
- apr_status_t status = APR_SUCCESS;
-+ const char *s;
- int rv = 0;
-
- ap_assert(session);
-@@ -1391,8 +1415,12 @@
- && (headers->status < 400)
- && (headers->status != 304)
- && h2_session_push_enabled(session)) {
--
-- h2_stream_submit_pushes(stream, headers);
-+ /* PUSH is possible and enabled on server, unless the request
-+ * denies it, submit resources to push */
-+ s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
-+ if (!s || strcmp(s, "0")) {
-+ h2_stream_submit_pushes(stream, headers);
-+ }
- }
-
- if (!stream->pref_priority) {
-@@ -1414,7 +1442,7 @@
- }
-
- if (headers->status == 103
-- && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) {
-+ && !h2_config_sgeti(session->s, H2_CONF_EARLY_HINTS)) {
- /* suppress sending this to the client, it might have triggered
- * pushes and served its purpose nevertheless */
- rv = 0;
-@@ -1524,7 +1552,7 @@
- if (stream) {
- ap_assert(!stream->scheduled);
- if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
-- h2_mplx_process(session->mplx, stream, stream_pri_cmp, session);
-+ h2_mplx_m_process(session->mplx, stream, stream_pri_cmp, session);
- }
- else {
- h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
-@@ -1680,7 +1708,7 @@
- * that already served requests - not fair. */
- session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
- s = "timeout";
-- timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
-+ timeout = session->s->timeout;
- update_child_status(session, SERVER_BUSY_READ, "idle");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
-@@ -1688,8 +1716,8 @@
- }
- else if (session->open_streams) {
- s = "timeout";
-- timeout = session->s->keep_alive_timeout;
-- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
-+ timeout = session->s->timeout;
-+ update_child_status(session, SERVER_BUSY_READ, "idle");
- }
- else {
- /* normal keepalive setup */
-@@ -1796,7 +1824,7 @@
- session->open_streams);
- h2_conn_io_flush(&session->io);
- if (session->open_streams > 0) {
-- if (h2_mplx_awaits_data(session->mplx)) {
-+ if (h2_mplx_m_awaits_data(session->mplx)) {
- /* waiting for at least one stream to produce data */
- transit(session, "no io", H2_SESSION_ST_WAIT);
- }
-@@ -1954,7 +1982,8 @@
- ev_stream_closed(session, stream);
- break;
- case H2_SS_CLEANUP:
-- h2_mplx_stream_cleanup(session->mplx, stream);
-+ nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
-+ h2_mplx_m_stream_cleanup(session->mplx, stream);
- break;
- default:
- break;
-@@ -2044,7 +2073,7 @@
- static apr_status_t dispatch_master(h2_session *session) {
- apr_status_t status;
-
-- status = h2_mplx_dispatch_master_events(session->mplx,
-+ status = h2_mplx_m_dispatch_master_events(session->mplx,
- on_stream_resume, session);
- if (status == APR_EAGAIN) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
-@@ -2089,7 +2118,7 @@
- switch (session->state) {
- case H2_SESSION_ST_INIT:
- ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
-- if (!h2_is_acceptable_connection(c, 1)) {
-+ if (!h2_is_acceptable_connection(c, session->r, 1)) {
- update_child_status(session, SERVER_BUSY_READ,
- "inadequate security");
- h2_session_shutdown(session,
-@@ -2112,7 +2141,7 @@
- break;
-
- case H2_SESSION_ST_IDLE:
-- if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) {
-+ if (session->idle_until && (now + session->idle_delay) > session->idle_until) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- H2_SSSN_MSG(session, "idle, timeout reached, closing"));
- if (session->idle_delay) {
-@@ -2146,6 +2175,14 @@
- session->have_read = 1;
- }
- else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
-+ status = h2_mplx_m_idle(session->mplx);
-+ if (status == APR_EAGAIN) {
-+ break;
-+ }
-+ else if (status != APR_SUCCESS) {
-+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
-+ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
-+ }
- status = APR_EAGAIN;
- goto out;
- }
-@@ -2168,7 +2205,7 @@
- /* We wait in smaller increments, using a 1 second timeout.
- * That gives us the chance to check for MPMQ_STOPPING often.
- */
-- status = h2_mplx_idle(session->mplx);
-+ status = h2_mplx_m_idle(session->mplx);
- if (status == APR_EAGAIN) {
- break;
- }
-@@ -2282,7 +2319,7 @@
- "h2_session: wait for data, %ld micros",
- (long)session->wait_us);
- }
-- status = h2_mplx_out_trywait(session->mplx, session->wait_us,
-+ status = h2_mplx_m_out_trywait(session->mplx, session->wait_us,
- session->iowait);
- if (status == APR_SUCCESS) {
- session->wait_us = 0;
-@@ -2319,7 +2356,7 @@
- dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
- }
- if (session->reprioritize) {
-- h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
-+ h2_mplx_m_reprioritize(session->mplx, stream_pri_cmp, session);
- session->reprioritize = 0;
- }
- }
---- a/modules/http2/h2_session.h
-+++ b/modules/http2/h2_session.h
-@@ -80,12 +80,13 @@
- request_rec *r; /* the request that started this in case
- * of 'h2c', NULL otherwise */
- server_rec *s; /* server/vhost we're starting on */
-- const struct h2_config *config; /* Relevant config for this session */
- apr_pool_t *pool; /* pool to use in session */
- struct h2_mplx *mplx; /* multiplexer for stream data */
- struct h2_workers *workers; /* for executing stream tasks */
- struct h2_filter_cin *cin; /* connection input filter context */
- h2_conn_io io; /* io on httpd conn filters */
-+ int padding_max; /* max number of padding bytes */
-+ int padding_always; /* padding has precedence over I/O optimizations */
- struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
-
- h2_session_state state; /* state session is in */
-@@ -131,7 +132,7 @@
- const char *last_status_msg; /* the one already reported */
-
- struct h2_iqueue *in_pending; /* all streams with input pending */
-- struct h2_iqueue *in_process; /* all streams ready for processing on slave */
-+ struct h2_iqueue *in_process; /* all streams ready for processing on a secondary */
-
- } h2_session;
-
-@@ -142,27 +143,15 @@
- * The session will apply the configured parameter.
- * @param psession pointer receiving the created session on success or NULL
- * @param c the connection to work on
-+ * @param r optional request when protocol was upgraded
- * @param cfg the module config to apply
- * @param workers the worker pool to use
- * @return the created session
- */
- apr_status_t h2_session_create(h2_session **psession,
-- conn_rec *c, struct h2_ctx *ctx,
-+ conn_rec *c, request_rec *r, server_rec *,
- struct h2_workers *workers);
-
--/**
-- * Create a new h2_session for the given request.
-- * The session will apply the configured parameter.
-- * @param psession pointer receiving the created session on success or NULL
-- * @param r the request that was upgraded
-- * @param cfg the module config to apply
-- * @param workers the worker pool to use
-- * @return the created session
-- */
--apr_status_t h2_session_rcreate(h2_session **psession,
-- request_rec *r, struct h2_ctx *ctx,
-- struct h2_workers *workers);
--
- void h2_session_event(h2_session *session, h2_session_event_t ev,
- int err, const char *msg);
-
---- a/modules/http2/h2_stream.c
-+++ b/modules/http2/h2_stream.c
-@@ -365,9 +365,8 @@
- static void set_policy_for(h2_stream *stream, h2_request *r)
- {
- int enabled = h2_session_push_enabled(stream->session);
-- stream->push_policy = h2_push_policy_determine(r->headers, stream->pool,
-- enabled);
-- r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS);
-+ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled);
-+ r->serialize = h2_config_sgeti(stream->session->s, H2_CONF_SER_HEADERS);
- }
-
- apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
-@@ -398,13 +397,8 @@
- /* start pushed stream */
- ap_assert(stream->request == NULL);
- ap_assert(stream->rtmp != NULL);
-- status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
-- if (status != APR_SUCCESS) {
-- return status;
-- }
-- set_policy_for(stream, stream->rtmp);
-- stream->request = stream->rtmp;
-- stream->rtmp = NULL;
-+ status = h2_stream_end_headers(stream, 1, 0);
-+ if (status != APR_SUCCESS) goto leave;
- break;
-
- default:
-@@ -416,6 +410,7 @@
- if (status == APR_SUCCESS && eos) {
- status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
- }
-+leave:
- return status;
- }
-
-@@ -451,18 +446,13 @@
- ap_assert(stream->request == NULL);
- if (stream->rtmp == NULL) {
- /* This can only happen, if the stream has received no header
-- * name/value pairs at all. The lastest nghttp2 version have become
-+ * name/value pairs at all. The latest nghttp2 version have become
- * pretty good at detecting this early. In any case, we have
- * to abort the connection here, since this is clearly a protocol error */
- return APR_EINVAL;
- }
-- status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
-- if (status != APR_SUCCESS) {
-- return status;
-- }
-- set_policy_for(stream, stream->rtmp);
-- stream->request = stream->rtmp;
-- stream->rtmp = NULL;
-+ status = h2_stream_end_headers(stream, eos, frame_len);
-+ if (status != APR_SUCCESS) goto leave;
- }
- break;
-
-@@ -473,6 +463,7 @@
- if (status == APR_SUCCESS && eos) {
- status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
- }
-+leave:
- return status;
- }
-
-@@ -663,11 +654,14 @@
-
- static apr_status_t add_trailer(h2_stream *stream,
- const char *name, size_t nlen,
-- const char *value, size_t vlen)
-+ const char *value, size_t vlen,
-+ size_t max_field_len, int *pwas_added)
- {
- conn_rec *c = stream->session->c;
- char *hname, *hvalue;
-+ const char *existing;
-
-+ *pwas_added = 0;
- if (nlen == 0 || name[0] == ':') {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c,
- H2_STRM_LOG(APLOGNO(03060), stream,
-@@ -681,9 +675,18 @@
- stream->trailers = apr_table_make(stream->pool, 5);
- }
- hname = apr_pstrndup(stream->pool, name, nlen);
-- hvalue = apr_pstrndup(stream->pool, value, vlen);
- h2_util_camel_case_header(hname, nlen);
-+ existing = apr_table_get(stream->trailers, hname);
-+ if (max_field_len
-+ && ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len)) {
-+ /* "key: (oldval, )?nval" is too long */
-+ return APR_EINVAL;
-+ }
-+ if (!existing) *pwas_added = 1;
-+ hvalue = apr_pstrndup(stream->pool, value, vlen);
- apr_table_mergen(stream->trailers, hname, hvalue);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-+ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
-
- return APR_SUCCESS;
- }
-@@ -693,44 +696,31 @@
- const char *value, size_t vlen)
- {
- h2_session *session = stream->session;
-- int error = 0;
-- apr_status_t status;
-+ int error = 0, was_added = 0;
-+ apr_status_t status = APR_SUCCESS;
-
- if (stream->has_response) {
- return APR_EINVAL;
- }
-- ++stream->request_headers_added;
-+
- if (name[0] == ':') {
- if ((vlen) > session->s->limit_req_line) {
- /* pseudo header: approximation of request line size check */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-- H2_STRM_MSG(stream, "pseudo %s too long"), name);
-+ if (!h2_stream_is_ready(stream)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
-+ H2_STRM_LOG(APLOGNO(10178), stream,
-+ "Request pseudo header exceeds "
-+ "LimitRequestFieldSize: %s"), name);
-+ }
- error = HTTP_REQUEST_URI_TOO_LARGE;
-+ goto cleanup;
- }
- }
-- else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
-- /* header too long */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-- H2_STRM_MSG(stream, "header %s too long"), name);
-- error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
-- }
--
-- if (stream->request_headers_added > session->s->limit_req_fields + 4) {
-- /* too many header lines, include 4 pseudo headers */
-- if (stream->request_headers_added
-- > session->s->limit_req_fields + 4 + 100) {
-- /* yeah, right */
-- h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
-- return APR_ECONNRESET;
-- }
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-- H2_STRM_MSG(stream, "too many header lines"));
-- error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
-- }
-
-- if (error) {
-- set_error_response(stream, error);
-- return APR_EINVAL;
-+ if (session->s->limit_req_fields > 0
-+ && stream->request_headers_added > session->s->limit_req_fields) {
-+ /* already over limit, count this attempt, but do not take it in */
-+ ++stream->request_headers_added;
- }
- else if (H2_SS_IDLE == stream->state) {
- if (!stream->rtmp) {
-@@ -738,16 +728,55 @@
- NULL, NULL, NULL, NULL, NULL, 0);
- }
- status = h2_request_add_header(stream->rtmp, stream->pool,
-- name, nlen, value, vlen);
-+ name, nlen, value, vlen,
-+ session->s->limit_req_fieldsize, &was_added);
-+ if (was_added) ++stream->request_headers_added;
- }
- else if (H2_SS_OPEN == stream->state) {
-- status = add_trailer(stream, name, nlen, value, vlen);
-+ status = add_trailer(stream, name, nlen, value, vlen,
-+ session->s->limit_req_fieldsize, &was_added);
-+ if (was_added) ++stream->request_headers_added;
- }
- else {
- status = APR_EINVAL;
-+ goto cleanup;
-+ }
-+
-+ if (APR_EINVAL == status) {
-+ /* header too long */
-+ if (!h2_stream_is_ready(stream)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
-+ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
-+ "LimitRequestFieldSize: %.*s"),
-+ (int)H2MIN(nlen, 80), name);
-+ }
-+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
-+ goto cleanup;
-+ }
-+
-+ if (session->s->limit_req_fields > 0
-+ && stream->request_headers_added > session->s->limit_req_fields) {
-+ /* too many header lines */
-+ if (stream->request_headers_added > session->s->limit_req_fields + 100) {
-+ /* yeah, right, this request is way over the limit, say goodbye */
-+ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
-+ return APR_ECONNRESET;
-+ }
-+ if (!h2_stream_is_ready(stream)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
-+ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
-+ "exceeds LimitRequestFields"));
-+ }
-+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
-+ goto cleanup;
- }
-
-- if (status != APR_SUCCESS) {
-+cleanup:
-+ if (error) {
-+ set_error_response(stream, error);
-+ return APR_EINVAL;
-+ }
-+ else if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- H2_STRM_MSG(stream, "header %s not accepted"), name);
- h2_stream_dispatch(stream, H2_SEV_CANCELLED);
-@@ -755,6 +784,49 @@
- return status;
- }
-
-+typedef struct {
-+ apr_size_t maxlen;
-+ const char *failed_key;
-+} val_len_check_ctx;
-+
-+static int table_check_val_len(void *baton, const char *key, const char *value)
-+{
-+ val_len_check_ctx *ctx = baton;
-+
-+ if (strlen(value) <= ctx->maxlen) return 1;
-+ ctx->failed_key = key;
-+ return 0;
-+}
-+
-+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
-+{
-+ apr_status_t status;
-+ val_len_check_ctx ctx;
-+
-+ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes);
-+ if (APR_SUCCESS == status) {
-+ set_policy_for(stream, stream->rtmp);
-+ stream->request = stream->rtmp;
-+ stream->rtmp = NULL;
-+
-+ ctx.maxlen = stream->session->s->limit_req_fieldsize;
-+ ctx.failed_key = NULL;
-+ apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
-+ if (ctx.failed_key) {
-+ if (!h2_stream_is_ready(stream)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
-+ H2_STRM_LOG(APLOGNO(10230), stream,"Request header exceeds "
-+ "LimitRequestFieldSize: %.*s"),
-+ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
-+ }
-+ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
-+ /* keep on returning APR_SUCCESS, so that we send a HTTP response and
-+ * do not RST the stream. */
-+ }
-+ }
-+ return status;
-+}
-+
- static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
- {
- if (bb) {
-@@ -855,7 +927,7 @@
- * is requested. But we can reduce the size in case the master
- * connection operates in smaller chunks. (TSL warmup) */
- if (stream->session->io.write_size > 0) {
-- max_chunk = stream->session->io.write_size - 9; /* header bits */
-+ max_chunk = stream->session->io.write_size - H2_FRAME_HDR_LEN;
- }
- requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
-
-@@ -864,7 +936,7 @@
-
- if (status == APR_EAGAIN) {
- /* TODO: ugly, someone needs to retrieve the response first */
-- h2_mplx_keep_active(stream->session->mplx, stream);
-+ h2_mplx_m_keep_active(stream->session->mplx, stream);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- H2_STRM_MSG(stream, "prep, response eagain"));
- return status;
-@@ -987,7 +1059,7 @@
- const char *ctype = apr_table_get(response->headers, "content-type");
- if (ctype) {
- /* FIXME: Not good enough, config needs to come from request->server */
-- return h2_config_get_priority(stream->session->config, ctype);
-+ return h2_cconfig_get_priority(stream->session->c, ctype);
- }
- }
- return NULL;
---- a/modules/http2/h2_stream.h
-+++ b/modules/http2/h2_stream.h
-@@ -198,6 +198,10 @@
- apr_status_t h2_stream_add_header(h2_stream *stream,
- const char *name, size_t nlen,
- const char *value, size_t vlen);
-+
-+/* End the construction of request headers */
-+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
-+
-
- apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
- apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
---- a/modules/http2/h2_switch.c
-+++ b/modules/http2/h2_switch.c
-@@ -55,7 +55,6 @@
- int is_tls = h2_h2_is_tls(c);
- const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
-
-- (void)s;
- if (!h2_mpm_supported()) {
- return DECLINED;
- }
-@@ -68,7 +67,7 @@
- return DECLINED;
- }
-
-- if (!h2_is_acceptable_connection(c, 0)) {
-+ if (!h2_is_acceptable_connection(c, r, 0)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
- "protocol propose: connection requirements not met");
- return DECLINED;
-@@ -81,7 +80,7 @@
- */
- const char *p;
-
-- if (!h2_allows_h2_upgrade(c)) {
-+ if (!h2_allows_h2_upgrade(r)) {
- return DECLINED;
- }
-
-@@ -150,7 +149,7 @@
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "switching protocol to '%s'", protocol);
- h2_ctx_protocol_set(ctx, protocol);
-- h2_ctx_server_set(ctx, s);
-+ h2_ctx_server_update(ctx, s);
-
- if (r != NULL) {
- apr_status_t status;
-@@ -160,12 +159,11 @@
- * right away.
- */
- ap_remove_input_filter_byhandle(r->input_filters, "http_in");
-- ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout");
- ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
-
- /* Ok, start an h2_conn on this one. */
-- h2_ctx_server_set(ctx, r->server);
-- status = h2_conn_setup(ctx, r->connection, r);
-+ status = h2_conn_setup(c, r, s);
-+
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
- "session setup");
-@@ -173,7 +171,7 @@
- return !OK;
- }
-
-- h2_conn_run(ctx, c);
-+ h2_conn_run(c);
- }
- return OK;
- }
---- a/modules/http2/h2_task.c
-+++ b/modules/http2/h2_task.c
-@@ -86,7 +86,7 @@
- task->request->authority,
- task->request->path);
- task->output.opened = 1;
-- return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam);
-+ return h2_mplx_t_out_open(task->mplx, task->stream_id, task->output.beam);
- }
-
- static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
-@@ -97,7 +97,7 @@
- apr_brigade_length(bb, 0, &written);
- H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out");
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)");
-- /* engines send unblocking */
-+
- status = h2_beam_send(task->output.beam, bb,
- block? APR_BLOCK_READ : APR_NONBLOCK_READ);
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)");
-@@ -126,33 +126,16 @@
- * request_rec out filter chain) into the h2_mplx for further sending
- * on the master connection.
- */
--static apr_status_t slave_out(h2_task *task, ap_filter_t* f,
-- apr_bucket_brigade* bb)
-+static apr_status_t secondary_out(h2_task *task, ap_filter_t* f,
-+ apr_bucket_brigade* bb)
- {
- apr_bucket *b;
- apr_status_t rv = APR_SUCCESS;
- int flush = 0, blocking;
-
-- if (task->frozen) {
-- h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
-- "frozen task output write, ignored", bb);
-- while (!APR_BRIGADE_EMPTY(bb)) {
-- b = APR_BRIGADE_FIRST(bb);
-- if (AP_BUCKET_IS_EOR(b)) {
-- APR_BUCKET_REMOVE(b);
-- task->eor = b;
-- }
-- else {
-- apr_bucket_delete(b);
-- }
-- }
-- return APR_SUCCESS;
-- }
--
- send:
-- /* we send block once we opened the output, so someone is there
-- * reading it *and* the task is not assigned to a h2_req_engine */
-- blocking = (!task->assigned && task->output.opened);
-+ /* we send block once we opened the output, so someone is there reading it */
-+ blocking = task->output.opened;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b)) {
-@@ -192,7 +175,7 @@
- if (APR_SUCCESS == rv) {
- /* could not write all, buffer the rest */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
-- "h2_slave_out(%s): saving brigade", task->id);
-+ "h2_secondary_out(%s): saving brigade", task->id);
- ap_assert(NULL);
- rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
- flush = 1;
-@@ -206,7 +189,7 @@
- }
- out:
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c,
-- "h2_slave_out(%s): slave_out leave", task->id);
-+ "h2_secondary_out(%s): secondary_out leave", task->id);
- return rv;
- }
-
-@@ -219,14 +202,14 @@
- }
-
- /*******************************************************************************
-- * task slave connection filters
-+ * task secondary connection filters
- ******************************************************************************/
-
--static apr_status_t h2_filter_slave_in(ap_filter_t* f,
-- apr_bucket_brigade* bb,
-- ap_input_mode_t mode,
-- apr_read_type_e block,
-- apr_off_t readbytes)
-+static apr_status_t h2_filter_secondary_in(ap_filter_t* f,
-+ apr_bucket_brigade* bb,
-+ ap_input_mode_t mode,
-+ apr_read_type_e block,
-+ apr_off_t readbytes)
- {
- h2_task *task;
- apr_status_t status = APR_SUCCESS;
-@@ -236,12 +219,12 @@
- apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
- (apr_size_t)readbytes : APR_SIZE_MAX);
-
-- task = h2_ctx_cget_task(f->c);
-+ task = h2_ctx_get_task(f->c);
- ap_assert(task);
-
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
-- "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld",
-+ "h2_secondary_in(%s): read, mode=%d, block=%d, readbytes=%ld",
- task->id, mode, block, (long)readbytes);
- }
-
-@@ -271,7 +254,7 @@
- /* Get more input data for our request. */
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-- "h2_slave_in(%s): get more data from mplx, block=%d, "
-+ "h2_secondary_in(%s): get more data from mplx, block=%d, "
- "readbytes=%ld", task->id, block, (long)readbytes);
- }
- if (task->input.beam) {
-@@ -284,7 +267,7 @@
-
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
-- "h2_slave_in(%s): read returned", task->id);
-+ "h2_secondary_in(%s): read returned", task->id);
- }
- if (APR_STATUS_IS_EAGAIN(status)
- && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
-@@ -310,11 +293,9 @@
- }
- }
-
-- /* Nothing there, no more data to get. Return APR_EAGAIN on
-- * speculative reads, this is ap_check_pipeline()'s trick to
-- * see if the connection needs closing. */
-+ /* Nothing there, no more data to get. Return. */
- if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) {
-- return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF;
-+ return status;
- }
-
- if (trace1) {
-@@ -325,7 +306,7 @@
- if (APR_BRIGADE_EMPTY(task->input.bb)) {
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
-- "h2_slave_in(%s): no data", task->id);
-+ "h2_secondary_in(%s): no data", task->id);
- }
- return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
- }
-@@ -353,7 +334,7 @@
- buffer[len] = 0;
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-- "h2_slave_in(%s): getline: %s",
-+ "h2_secondary_in(%s): getline: %s",
- task->id, buffer);
- }
- }
-@@ -363,7 +344,7 @@
- * to support it. Seems to work. */
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
- APLOGNO(03472)
-- "h2_slave_in(%s), unsupported READ mode %d",
-+ "h2_secondary_in(%s), unsupported READ mode %d",
- task->id, mode);
- status = APR_ENOTIMPL;
- }
-@@ -371,19 +352,19 @@
- if (trace1) {
- apr_brigade_length(bb, 0, &bblen);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-- "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
-+ "h2_secondary_in(%s): %ld data bytes", task->id, (long)bblen);
- }
- return status;
- }
-
--static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
-- apr_bucket_brigade* brigade)
-+static apr_status_t h2_filter_secondary_output(ap_filter_t* filter,
-+ apr_bucket_brigade* brigade)
- {
-- h2_task *task = h2_ctx_cget_task(filter->c);
-+ h2_task *task = h2_ctx_get_task(filter->c);
- apr_status_t status;
-
- ap_assert(task);
-- status = slave_out(task, filter, brigade);
-+ status = secondary_out(task, filter, brigade);
- if (status != APR_SUCCESS) {
- h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
- }
-@@ -392,14 +373,14 @@
-
- static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb)
- {
-- h2_task *task = h2_ctx_cget_task(f->c);
-+ h2_task *task = h2_ctx_get_task(f->c);
- apr_status_t status;
-
- ap_assert(task);
- /* There are cases where we need to parse a serialized http/1.1
- * response. One example is a 100-continue answer in serialized mode
- * or via a mod_proxy setup */
-- while (bb && !task->output.sent_response) {
-+ while (bb && !task->c->aborted && !task->output.sent_response) {
- status = h2_from_h1_parse_response(task, f, bb);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
- "h2_task(%s): parsed response", task->id);
-@@ -425,8 +406,15 @@
- || !strcmp("OPTIONS", task->request->method));
- }
-
-+int h2_task_has_started(h2_task *task)
-+{
-+ return task && task->started_at != 0;
-+}
-+
- void h2_task_redo(h2_task *task)
- {
-+ task->started_at = 0;
-+ task->worker_done = 0;
- task->rst_error = 0;
- }
-
-@@ -468,9 +456,9 @@
- ap_hook_process_connection(h2_task_process_conn,
- NULL, NULL, APR_HOOK_FIRST);
-
-- ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in,
-+ ap_register_input_filter("H2_SECONDARY_IN", h2_filter_secondary_in,
- NULL, AP_FTYPE_NETWORK);
-- ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output,
-+ ap_register_output_filter("H2_SECONDARY_OUT", h2_filter_secondary_output,
- NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
- NULL, AP_FTYPE_NETWORK);
-@@ -502,17 +490,17 @@
-
- ctx = h2_ctx_get(c, 0);
- (void)arg;
-- if (h2_ctx_is_task(ctx)) {
-+ if (ctx->task) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-- "h2_slave(%s), pre_connection, adding filters", c->log_id);
-- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
-+ "h2_secondary(%s), pre_connection, adding filters", c->log_id);
-+ ap_add_input_filter("H2_SECONDARY_IN", NULL, NULL, c);
- ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
-- ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
-+ ap_add_output_filter("H2_SECONDARY_OUT", NULL, NULL, c);
- }
- return OK;
- }
-
--h2_task *h2_task_create(conn_rec *slave, int stream_id,
-+h2_task *h2_task_create(conn_rec *secondary, int stream_id,
- const h2_request *req, h2_mplx *m,
- h2_bucket_beam *input,
- apr_interval_time_t timeout,
-@@ -521,17 +509,18 @@
- apr_pool_t *pool;
- h2_task *task;
-
-- ap_assert(slave);
-+ ap_assert(secondary);
- ap_assert(req);
-
-- apr_pool_create(&pool, slave->pool);
-+ apr_pool_create(&pool, secondary->pool);
-+ apr_pool_tag(pool, "h2_task");
- task = apr_pcalloc(pool, sizeof(h2_task));
- if (task == NULL) {
- return NULL;
- }
- task->id = "000";
- task->stream_id = stream_id;
-- task->c = slave;
-+ task->c = secondary;
- task->mplx = m;
- task->pool = pool;
- task->request = req;
-@@ -564,41 +553,40 @@
- ap_assert(task);
- c = task->c;
- task->worker_started = 1;
-- task->started_at = apr_time_now();
-
- if (c->master) {
-- /* Each conn_rec->id is supposed to be unique at a point in time. Since
-+ /* See the discussion at
-+ *
-+ * Each conn_rec->id is supposed to be unique at a point in time. Since
- * some modules (and maybe external code) uses this id as an identifier
-- * for the request_rec they handle, it needs to be unique for slave
-+ * for the request_rec they handle, it needs to be unique for secondary
- * connections also.
-- * The connection id is generated by the MPM and most MPMs use the formula
-- * id := (child_num * max_threads) + thread_num
-- * which means that there is a maximum id of about
-- * idmax := max_child_count * max_threads
-- * If we assume 2024 child processes with 2048 threads max, we get
-- * idmax ~= 2024 * 2048 = 2 ** 22
-- * On 32 bit systems, we have not much space left, but on 64 bit systems
-- * (and higher?) we can use the upper 32 bits without fear of collision.
-- * 32 bits is just what we need, since a connection can only handle so
-- * many streams.
-+ *
-+ * The MPM module assigns the connection ids and mod_unique_id is using
-+ * that one to generate identifier for requests. While the implementation
-+ * works for HTTP/1.x, the parallel execution of several requests per
-+ * connection will generate duplicate identifiers on load.
-+ *
-+ * The original implementation for secondary connection identifiers used
-+ * to shift the master connection id up and assign the stream id to the
-+ * lower bits. This was cramped on 32 bit systems, but on 64bit there was
-+ * enough space.
-+ *
-+ * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
-+ * connection id, even on 64bit systems. Therefore collisions in request ids.
-+ *
-+ * The way master connection ids are generated, there is some space "at the
-+ * top" of the lower 32 bits on allmost all systems. If you have a setup
-+ * with 64k threads per child and 255 child processes, you live on the edge.
-+ *
-+ * The new implementation shifts 8 bits and XORs in the worker
-+ * id. This will experience collisions with > 256 h2 workers and heavy
-+ * load still. There seems to be no way to solve this in all possible
-+ * configurations by mod_h2 alone.
- */
-- int slave_id, free_bits;
--
-+ task->c->id = (c->master->id << 8)^worker_id;
- task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id,
- task->stream_id);
-- if (sizeof(unsigned long) >= 8) {
-- free_bits = 32;
-- slave_id = task->stream_id;
-- }
-- else {
-- /* Assume we have a more limited number of threads/processes
-- * and h2 workers on a 32-bit system. Use the worker instead
-- * of the stream id. */
-- free_bits = 8;
-- slave_id = worker_id;
-- }
-- task->c->id = (c->master->id << free_bits)^slave_id;
-- c->keepalive = AP_CONN_KEEPALIVE;
- }
-
- h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output",
-@@ -613,7 +601,7 @@
- h2_ctx_create_for(c, task);
- apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
-
-- h2_slave_run_pre_connection(c, ap_get_conn_socket(c));
-+ h2_secondary_run_pre_connection(c, ap_get_conn_socket(c));
-
- task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
- if (task->request->serialize) {
-@@ -633,18 +621,9 @@
- task->c->current_thread = thread;
- ap_run_process_connection(c);
-
-- if (task->frozen) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): process_conn returned frozen task",
-- task->id);
-- /* cleanup delayed */
-- return APR_EAGAIN;
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): processing done", task->id);
-- return output_finish(task);
-- }
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-+ "h2_task(%s): processing done", task->id);
-+ return output_finish(task);
- }
-
- static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
-@@ -682,14 +661,8 @@
-
- ap_process_request(r);
-
-- if (task->frozen) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): process_request frozen", task->id);
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): process_request done", task->id);
-- }
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-+ "h2_task(%s): process_request done", task->id);
-
- /* After the call to ap_process_request, the
- * request pool may have been deleted. We set
-@@ -724,7 +697,7 @@
- }
-
- ctx = h2_ctx_get(c, 0);
-- if (h2_ctx_is_task(ctx)) {
-+ if (ctx->task) {
- if (!ctx->task->request->serialize) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, processing request directly");
-@@ -736,33 +709,8 @@
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "slave_conn(%ld): has no task", c->id);
-+ "secondary_conn(%ld): has no task", c->id);
- }
- return DECLINED;
- }
-
--apr_status_t h2_task_freeze(h2_task *task)
--{
-- if (!task->frozen) {
-- task->frozen = 1;
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406)
-- "h2_task(%s), frozen", task->id);
-- }
-- return APR_SUCCESS;
--}
--
--apr_status_t h2_task_thaw(h2_task *task)
--{
-- if (task->frozen) {
-- task->frozen = 0;
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
-- "h2_task(%s), thawed", task->id);
-- }
-- task->thawed = 1;
-- return APR_SUCCESS;
--}
--
--int h2_task_has_thawed(h2_task *task)
--{
-- return task->thawed;
--}
---- a/modules/http2/h2_task.h
-+++ b/modules/http2/h2_task.h
-@@ -35,14 +35,13 @@
- *
- * Finally, to keep certain connection level filters, such as ourselves and
- * especially mod_ssl ones, from messing with our data, we need a filter
-- * of our own to disble those.
-+ * of our own to disable those.
- */
-
- struct h2_bucket_beam;
- struct h2_conn;
- struct h2_mplx;
- struct h2_task;
--struct h2_req_engine;
- struct h2_request;
- struct h2_response_parser;
- struct h2_stream;
-@@ -80,20 +79,18 @@
- struct h2_mplx *mplx;
-
- unsigned int filters_set : 1;
-- unsigned int frozen : 1;
-- unsigned int thawed : 1;
- unsigned int worker_started : 1; /* h2_worker started processing */
-- unsigned int worker_done : 1; /* h2_worker finished */
-+ unsigned int redo : 1; /* was throttled, should be restarted later */
-+
-+ int worker_done; /* h2_worker finished */
-+ int done_done; /* task_done has been handled */
-
- apr_time_t started_at; /* when processing started */
- apr_time_t done_at; /* when processing was done */
- apr_bucket *eor;
--
-- struct h2_req_engine *engine; /* engine hosted by this task */
-- struct h2_req_engine *assigned; /* engine that task has been assigned to */
- };
-
--h2_task *h2_task_create(conn_rec *slave, int stream_id,
-+h2_task *h2_task_create(conn_rec *secondary, int stream_id,
- const h2_request *req, struct h2_mplx *m,
- struct h2_bucket_beam *input,
- apr_interval_time_t timeout,
-@@ -105,6 +102,7 @@
-
- void h2_task_redo(h2_task *task);
- int h2_task_can_redo(h2_task *task);
-+int h2_task_has_started(h2_task *task);
-
- /**
- * Reset the task with the given error code, resets all input/output.
-@@ -120,8 +118,4 @@
- extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
- extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
-
--apr_status_t h2_task_freeze(h2_task *task);
--apr_status_t h2_task_thaw(h2_task *task);
--int h2_task_has_thawed(h2_task *task);
--
- #endif /* defined(__mod_h2__h2_task__) */
---- a/modules/http2/h2_util.c
-+++ b/modules/http2/h2_util.c
-@@ -638,15 +638,6 @@
- apr_status_t rv;
- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- fifo->aborted = 1;
-- apr_thread_mutex_unlock(fifo->lock);
-- }
-- return rv;
--}
--
--apr_status_t h2_fifo_interrupt(h2_fifo *fifo)
--{
-- apr_status_t rv;
-- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- apr_thread_cond_broadcast(fifo->not_empty);
- apr_thread_cond_broadcast(fifo->not_full);
- apr_thread_mutex_unlock(fifo->lock);
-@@ -710,10 +701,6 @@
- {
- apr_status_t rv;
-
-- if (fifo->aborted) {
-- return APR_EOF;
-- }
--
- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- rv = fifo_push_int(fifo, elem, block);
- apr_thread_mutex_unlock(fifo->lock);
-@@ -754,10 +741,6 @@
- {
- apr_status_t rv;
-
-- if (fifo->aborted) {
-- return APR_EOF;
-- }
--
- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- rv = pull_head(fifo, pelem, block);
- apr_thread_mutex_unlock(fifo->lock);
-@@ -946,15 +929,6 @@
- apr_status_t rv;
- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- fifo->aborted = 1;
-- apr_thread_mutex_unlock(fifo->lock);
-- }
-- return rv;
--}
--
--apr_status_t h2_ififo_interrupt(h2_ififo *fifo)
--{
-- apr_status_t rv;
-- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- apr_thread_cond_broadcast(fifo->not_empty);
- apr_thread_cond_broadcast(fifo->not_full);
- apr_thread_mutex_unlock(fifo->lock);
-@@ -1018,10 +992,6 @@
- {
- apr_status_t rv;
-
-- if (fifo->aborted) {
-- return APR_EOF;
-- }
--
- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- rv = ififo_push_int(fifo, id, block);
- apr_thread_mutex_unlock(fifo->lock);
-@@ -1062,10 +1032,6 @@
- {
- apr_status_t rv;
-
-- if (fifo->aborted) {
-- return APR_EOF;
-- }
--
- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
- rv = ipull_head(fifo, pi, block);
- apr_thread_mutex_unlock(fifo->lock);
-@@ -1088,10 +1054,6 @@
- apr_status_t rv;
- int id;
-
-- if (fifo->aborted) {
-- return APR_EOF;
-- }
--
- if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
- if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) {
- switch (fn(id, ctx)) {
-@@ -1117,39 +1079,40 @@
- return ififo_peek(fifo, fn, ctx, 0);
- }
-
--apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
-+static apr_status_t ififo_remove(h2_ififo *fifo, int id)
- {
-- apr_status_t rv;
-+ int rc, i;
-
- if (fifo->aborted) {
- return APR_EOF;
- }
-
-- if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
-- int i, rc;
-- int e;
--
-- rc = 0;
-- for (i = 0; i < fifo->count; ++i) {
-- e = fifo->elems[inth_index(fifo, i)];
-- if (e == id) {
-- ++rc;
-- }
-- else if (rc) {
-- fifo->elems[inth_index(fifo, i-rc)] = e;
-- }
-- }
-- if (rc) {
-- fifo->count -= rc;
-- if (fifo->count + rc == fifo->nelems) {
-- apr_thread_cond_broadcast(fifo->not_full);
-- }
-- rv = APR_SUCCESS;
-+ rc = 0;
-+ for (i = 0; i < fifo->count; ++i) {
-+ int e = fifo->elems[inth_index(fifo, i)];
-+ if (e == id) {
-+ ++rc;
- }
-- else {
-- rv = APR_EAGAIN;
-+ else if (rc) {
-+ fifo->elems[inth_index(fifo, i-rc)] = e;
- }
--
-+ }
-+ if (!rc) {
-+ return APR_EAGAIN;
-+ }
-+ fifo->count -= rc;
-+ if (fifo->count + rc == fifo->nelems) {
-+ apr_thread_cond_broadcast(fifo->not_full);
-+ }
-+ return APR_SUCCESS;
-+}
-+
-+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
-+{
-+ apr_status_t rv;
-+
-+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
-+ rv = ififo_remove(fifo, id);
- apr_thread_mutex_unlock(fifo->lock);
- }
- return rv;
-@@ -1373,7 +1336,7 @@
- return status;
- }
- else if (blen == 0) {
-- /* brigade without data, does it have an EOS bucket somwhere? */
-+ /* brigade without data, does it have an EOS bucket somewhere? */
- *plen = 0;
- *peos = h2_util_has_eos(bb, -1);
- }
-@@ -1840,22 +1803,29 @@
- }
-
- apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
-- const char *name, size_t nlen,
-- const char *value, size_t vlen)
-+ const char *name, size_t nlen,
-+ const char *value, size_t vlen,
-+ size_t max_field_len, int *pwas_added)
- {
- char *hname, *hvalue;
-+ const char *existing;
-
-+ *pwas_added = 0;
- if (h2_req_ignore_header(name, nlen)) {
- return APR_SUCCESS;
- }
- else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
-- const char *existing = apr_table_get(headers, "cookie");
-+ existing = apr_table_get(headers, "cookie");
- if (existing) {
- char *nval;
-
- /* Cookie header come separately in HTTP/2, but need
- * to be merged by "; " (instead of default ", ")
- */
-+ if (max_field_len && strlen(existing) + vlen + nlen + 4 > max_field_len) {
-+ /* "key: oldval, nval" is too long */
-+ return APR_EINVAL;
-+ }
- hvalue = apr_pstrndup(pool, value, vlen);
- nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
- apr_table_setn(headers, "Cookie", nval);
-@@ -1869,8 +1839,16 @@
- }
-
- hname = apr_pstrndup(pool, name, nlen);
-- hvalue = apr_pstrndup(pool, value, vlen);
- h2_util_camel_case_header(hname, nlen);
-+ existing = apr_table_get(headers, hname);
-+ if (max_field_len) {
-+ if ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len) {
-+ /* "key: (oldval, )?nval" is too long */
-+ return APR_EINVAL;
-+ }
-+ }
-+ if (!existing) *pwas_added = 1;
-+ hvalue = apr_pstrndup(pool, value, vlen);
- apr_table_mergen(headers, hname, hvalue);
-
- return APR_SUCCESS;
-@@ -1960,7 +1938,8 @@
- case NGHTTP2_GOAWAY: {
- size_t len = (frame->goaway.opaque_data_len < s_len)?
- frame->goaway.opaque_data_len : s_len-1;
-- memcpy(scratch, frame->goaway.opaque_data, len);
-+ if (len)
-+ memcpy(scratch, frame->goaway.opaque_data, len);
- scratch[len] = '\0';
- return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
- "last_stream=%d]", frame->goaway.error_code,
---- a/modules/http2/h2_util.h
-+++ b/modules/http2/h2_util.h
-@@ -209,7 +209,6 @@
- apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
-
- apr_status_t h2_fifo_term(h2_fifo *fifo);
--apr_status_t h2_fifo_interrupt(h2_fifo *fifo);
-
- int h2_fifo_count(h2_fifo *fifo);
-
-@@ -229,7 +228,7 @@
-
- typedef enum {
- H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */
-- H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */
-+ H2_FIFO_OP_REPUSH, /* pull and immediately re-push it */
- } h2_fifo_op_t;
-
- typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx);
-@@ -280,7 +279,6 @@
- apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
-
- apr_status_t h2_ififo_term(h2_ififo *fifo);
--apr_status_t h2_ififo_interrupt(h2_ififo *fifo);
-
- int h2_ififo_count(h2_ififo *fifo);
-
-@@ -412,9 +410,14 @@
- apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
- const struct h2_request *req);
-
-+/**
-+ * Add a HTTP/2 header and return the table key if it really was added
-+ * and not ignored.
-+ */
- apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
- const char *name, size_t nlen,
-- const char *value, size_t vlen);
-+ const char *value, size_t vlen,
-+ size_t max_field_len, int *pwas_added);
-
- /*******************************************************************************
- * h2_request helpers
---- a/modules/http2/h2_version.h
-+++ b/modules/http2/h2_version.h
-@@ -27,7 +27,7 @@
- * @macro
- * Version number of the http2 module as c string
- */
--#define MOD_HTTP2_VERSION "1.11.4"
-+#define MOD_HTTP2_VERSION "1.15.14"
-
- /**
- * @macro
-@@ -35,7 +35,6 @@
- * release. This is a 24 bit number with 8 bits for major number, 8 bits
- * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
- */
--#define MOD_HTTP2_VERSION_NUM 0x010b04
--
-+#define MOD_HTTP2_VERSION_NUM 0x010f0e
-
- #endif /* mod_h2_h2_version_h */
---- a/modules/http2/h2_workers.c
-+++ b/modules/http2/h2_workers.c
-@@ -155,7 +155,7 @@
- {
- apr_status_t rv;
-
-- rv = h2_mplx_pop_task(m, &slot->task);
-+ rv = h2_mplx_s_pop_task(m, &slot->task);
- if (slot->task) {
- /* Ok, we got something to give back to the worker for execution.
- * If we still have idle workers, we let the worker be sticky,
-@@ -234,10 +234,10 @@
- * mplx the opportunity to give us back a new task right away.
- */
- if (!slot->aborted && (--slot->sticks > 0)) {
-- h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task);
-+ h2_mplx_s_task_done(slot->task->mplx, slot->task, &slot->task);
- }
- else {
-- h2_mplx_task_done(slot->task->mplx, slot->task, NULL);
-+ h2_mplx_s_task_done(slot->task->mplx, slot->task, NULL);
- slot->task = NULL;
- }
- }
-@@ -269,7 +269,6 @@
- }
-
- h2_fifo_term(workers->mplxs);
-- h2_fifo_interrupt(workers->mplxs);
-
- cleanup_zombies(workers);
- }
---- a/modules/http2/mod_http2.c
-+++ b/modules/http2/mod_http2.c
-@@ -172,27 +172,6 @@
- conn_rec *, request_rec *, char *name);
- static int http2_is_h2(conn_rec *);
-
--static apr_status_t http2_req_engine_push(const char *ngn_type,
-- request_rec *r,
-- http2_req_engine_init *einit)
--{
-- return h2_mplx_req_engine_push(ngn_type, r, einit);
--}
--
--static apr_status_t http2_req_engine_pull(h2_req_engine *ngn,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr)
--{
-- return h2_mplx_req_engine_pull(ngn, block, capacity, pr);
--}
--
--static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
-- apr_status_t status)
--{
-- h2_mplx_req_engine_done(ngn, r_conn, status);
--}
--
- static void http2_get_num_workers(server_rec *s, int *minw, int *maxw)
- {
- h2_get_num_workers(s, minw, maxw);
-@@ -220,9 +199,6 @@
-
- APR_REGISTER_OPTIONAL_FN(http2_is_h2);
- APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
-- APR_REGISTER_OPTIONAL_FN(http2_req_engine_push);
-- APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull);
-- APR_REGISTER_OPTIONAL_FN(http2_req_engine_done);
- APR_REGISTER_OPTIONAL_FN(http2_get_num_workers);
-
- ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
-@@ -260,9 +236,8 @@
- {
- if (ctx) {
- if (r) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task) {
-- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
-+ if (ctx->task) {
-+ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
- if (stream && stream->push_policy != H2_PUSH_NONE) {
- return "on";
- }
-@@ -273,8 +248,7 @@
- }
- }
- else if (s) {
-- const h2_config *cfg = h2_config_sget(s);
-- if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) {
-+ if (h2_config_geti(r, s, H2_CONF_PUSH)) {
- return "on";
- }
- }
-@@ -285,8 +259,7 @@
- conn_rec *c, request_rec *r, h2_ctx *ctx)
- {
- if (ctx) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
-+ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
- return "PUSHED";
- }
- }
-@@ -297,9 +270,8 @@
- conn_rec *c, request_rec *r, h2_ctx *ctx)
- {
- if (ctx) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
-- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
-+ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
-+ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
- if (stream) {
- return apr_itoa(p, stream->initiated_on);
- }
-@@ -312,9 +284,8 @@
- conn_rec *c, request_rec *r, h2_ctx *ctx)
- {
- if (ctx) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task) {
-- return task->id;
-+ if (ctx->task) {
-+ return ctx->task->id;
- }
- }
- return "";
-@@ -366,7 +337,7 @@
- for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
- h2_var_def *vdef = &H2_VARS[i];
- if (!strcmp(vdef->name, name)) {
-- h2_ctx *ctx = (r? h2_ctx_rget(r) :
-+ h2_ctx *ctx = (r? h2_ctx_get(c, 0) :
- h2_ctx_get(c->master? c->master : c, 0));
- return (char *)vdef->lookup(p, s, c, r, ctx);
- }
-@@ -377,7 +348,7 @@
- static int h2_h2_fixups(request_rec *r)
- {
- if (r->connection->master) {
-- h2_ctx *ctx = h2_ctx_rget(r);
-+ h2_ctx *ctx = h2_ctx_get(r->connection, 0);
- int i;
-
- for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
---- a/modules/http2/mod_http2.dep
-+++ b/modules/http2/mod_http2.dep
-@@ -694,7 +694,6 @@
- ".\h2_ctx.h"\
- ".\h2_h2.h"\
- ".\h2_mplx.h"\
-- ".\h2_ngn_shed.h"\
- ".\h2_private.h"\
- ".\h2_request.h"\
- ".\h2_stream.h"\
-@@ -754,7 +753,6 @@
- ".\h2_ctx.h"\
- ".\h2_h2.h"\
- ".\h2_mplx.h"\
-- ".\h2_ngn_shed.h"\
- ".\h2_private.h"\
- ".\h2_request.h"\
- ".\h2_task.h"\
---- a/modules/http2/mod_http2.dsp
-+++ b/modules/http2/mod_http2.dsp
-@@ -145,10 +145,6 @@
- # End Source File
- # Begin Source File
-
--SOURCE=./h2_ngn_shed.c
--# End Source File
--# Begin Source File
--
- SOURCE=./h2_push.c
- # End Source File
- # Begin Source File
---- a/modules/http2/mod_http2.h
-+++ b/modules/http2/mod_http2.h
-@@ -30,22 +30,20 @@
-
-
- /*******************************************************************************
-- * HTTP/2 request engines
-+ * START HTTP/2 request engines (DEPRECATED)
- ******************************************************************************/
-+
-+/* The following functions were introduced for the experimental mod_proxy_http2
-+ * support, but have been abandoned since.
-+ * They are still declared here for backward compatibility, in case someone
-+ * tries to build an old mod_proxy_http2 against it, but will disappear
-+ * completely sometime in the future.
-+ */
-
- struct apr_thread_cond_t;
--
- typedef struct h2_req_engine h2_req_engine;
--
- typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
-
--/**
-- * Initialize a h2_req_engine. The structure will be passed in but
-- * only the name and master are set. The function should initialize
-- * all fields.
-- * @param engine the allocated, partially filled structure
-- * @param r the first request to process, or NULL
-- */
- typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
- const char *id,
- const char *type,
-@@ -55,35 +53,11 @@
- http2_output_consumed **pconsumed,
- void **pbaton);
-
--/**
-- * Push a request to an engine with the specified name for further processing.
-- * If no such engine is available, einit is not NULL, einit is called
-- * with a new engine record and the caller is responsible for running the
-- * new engine instance.
-- * @param engine_type the type of the engine to add the request to
-- * @param r the request to push to an engine for processing
-- * @param einit an optional initialization callback for a new engine
-- * of the requested type, should no instance be available.
-- * By passing a non-NULL callback, the caller is willing
-- * to init and run a new engine itself.
-- * @return APR_SUCCESS iff slave was successfully added to an engine
-- */
- APR_DECLARE_OPTIONAL_FN(apr_status_t,
- http2_req_engine_push, (const char *engine_type,
- request_rec *r,
- http2_req_engine_init *einit));
-
--/**
-- * Get a new request for processing in this engine.
-- * @param engine the engine which is done processing the slave
-- * @param block if call should block waiting for request to come
-- * @param capacity how many parallel requests are acceptable
-- * @param pr the request that needs processing or NULL
-- * @return APR_SUCCESS if new request was assigned
-- * APR_EAGAIN if no new request is available
-- * APR_EOF if engine may shut down, as no more request will be scheduled
-- * APR_ECONNABORTED if the engine needs to shut down immediately
-- */
- APR_DECLARE_OPTIONAL_FN(apr_status_t,
- http2_req_engine_pull, (h2_req_engine *engine,
- apr_read_type_e block,
-@@ -98,4 +72,8 @@
- http2_get_num_workers, (server_rec *s,
- int *minw, int *max));
-
-+/*******************************************************************************
-+ * END HTTP/2 request engines (DEPRECATED)
-+ ******************************************************************************/
-+
- #endif
---- a/modules/http2/mod_http2.mak
-+++ b/modules/http2/mod_http2.mak
-@@ -61,7 +61,6 @@
- -@erase "$(INTDIR)\h2_h2.obj"
- -@erase "$(INTDIR)\h2_headers.obj"
- -@erase "$(INTDIR)\h2_mplx.obj"
-- -@erase "$(INTDIR)\h2_ngn_shed.obj"
- -@erase "$(INTDIR)\h2_push.obj"
- -@erase "$(INTDIR)\h2_request.obj"
- -@erase "$(INTDIR)\h2_session.obj"
-@@ -138,7 +137,6 @@
- "$(INTDIR)\h2_h2.obj" \
- "$(INTDIR)\h2_headers.obj" \
- "$(INTDIR)\h2_mplx.obj" \
-- "$(INTDIR)\h2_ngn_shed.obj" \
- "$(INTDIR)\h2_push.obj" \
- "$(INTDIR)\h2_request.obj" \
- "$(INTDIR)\h2_session.obj" \
-@@ -207,7 +205,6 @@
- -@erase "$(INTDIR)\h2_h2.obj"
- -@erase "$(INTDIR)\h2_headers.obj"
- -@erase "$(INTDIR)\h2_mplx.obj"
-- -@erase "$(INTDIR)\h2_ngn_shed.obj"
- -@erase "$(INTDIR)\h2_push.obj"
- -@erase "$(INTDIR)\h2_request.obj"
- -@erase "$(INTDIR)\h2_session.obj"
-@@ -284,7 +281,6 @@
- "$(INTDIR)\h2_h2.obj" \
- "$(INTDIR)\h2_headers.obj" \
- "$(INTDIR)\h2_mplx.obj" \
-- "$(INTDIR)\h2_ngn_shed.obj" \
- "$(INTDIR)\h2_push.obj" \
- "$(INTDIR)\h2_request.obj" \
- "$(INTDIR)\h2_session.obj" \
-@@ -469,11 +465,6 @@
- "$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
-
-
--SOURCE=./h2_ngn_shed.c
--
--"$(INTDIR)\h2_ngn_shed.obj" : $(SOURCE) "$(INTDIR)"
--
--
- SOURCE=./h2_push.c
-
- "$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)"
---- a/modules/http2/mod_proxy_http2.c
-+++ b/modules/http2/mod_proxy_http2.c
-@@ -16,13 +16,14 @@
-
- #include
-
-+#include
- #include
- #include
- #include "mod_http2.h"
-
-
- #include "mod_proxy_http2.h"
--#include "h2_request.h"
-+#include "h2.h"
- #include "h2_proxy_util.h"
- #include "h2_version.h"
- #include "h2_proxy_session.h"
-@@ -46,19 +47,12 @@
-
- /* Optional functions from mod_http2 */
- static int (*is_h2)(conn_rec *c);
--static apr_status_t (*req_engine_push)(const char *name, request_rec *r,
-- http2_req_engine_init *einit);
--static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr);
--static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn,
-- apr_status_t status);
--
-+
- typedef struct h2_proxy_ctx {
-+ const char *id;
-+ conn_rec *master;
- conn_rec *owner;
- apr_pool_t *pool;
-- request_rec *rbase;
- server_rec *server;
- const char *proxy_func;
- char server_portstr[32];
-@@ -66,19 +60,15 @@
- proxy_worker *worker;
- proxy_server_conf *conf;
-
-- h2_req_engine *engine;
-- const char *engine_id;
-- const char *engine_type;
-- apr_pool_t *engine_pool;
- apr_size_t req_buffer_size;
-- h2_proxy_fifo *requests;
- int capacity;
-
-- unsigned standalone : 1;
- unsigned is_ssl : 1;
-- unsigned flushall : 1;
-
-- apr_status_t r_status; /* status of our first request work */
-+ request_rec *r; /* the request processed in this ctx */
-+ apr_status_t r_status; /* status of request work */
-+ int r_done; /* request was processed, not necessarily successfully */
-+ int r_may_retry; /* request may be retried */
- h2_proxy_session *session; /* current http2 session against backend */
- } h2_proxy_ctx;
-
-@@ -104,16 +94,6 @@
- MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
-
- is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
-- req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push);
-- req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull);
-- req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done);
--
-- /* we need all of them */
-- if (!req_engine_push || !req_engine_pull || !req_engine_done) {
-- req_engine_push = NULL;
-- req_engine_pull = NULL;
-- req_engine_done = NULL;
-- }
-
- return status;
- }
-@@ -204,45 +184,6 @@
- return OK;
- }
-
--static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes)
--{
-- h2_proxy_ctx *ctx = baton;
--
-- if (ctx->session) {
-- h2_proxy_session_update_window(ctx->session, c, bytes);
-- }
--}
--
--static apr_status_t proxy_engine_init(h2_req_engine *engine,
-- const char *id,
-- const char *type,
-- apr_pool_t *pool,
-- apr_size_t req_buffer_size,
-- request_rec *r,
-- http2_output_consumed **pconsumed,
-- void **pctx)
--{
-- h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
-- &proxy_http2_module);
-- if (!ctx) {
-- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
-- "h2_proxy_session, engine init, no ctx found");
-- return APR_ENOTIMPL;
-- }
--
-- ctx->pool = pool;
-- ctx->engine = engine;
-- ctx->engine_id = id;
-- ctx->engine_type = type;
-- ctx->engine_pool = pool;
-- ctx->req_buffer_size = req_buffer_size;
-- ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests));
--
-- *pconsumed = out_consumed;
-- *pctx = ctx;
-- return APR_SUCCESS;
--}
--
- static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
- {
- h2_proxy_ctx *ctx = session->user_data;
-@@ -252,7 +193,7 @@
- url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
- apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
- ctx->p_conn->connection->local_addr->port));
-- status = h2_proxy_session_submit(session, url, r, ctx->standalone);
-+ status = h2_proxy_session_submit(session, url, r, 1);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
- "pass request body failed to %pI (%s) from %s (%s)",
-@@ -266,43 +207,15 @@
- static void request_done(h2_proxy_ctx *ctx, request_rec *r,
- apr_status_t status, int touched)
- {
-- const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
--
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
-- "h2_proxy_session(%s): request done %s, touched=%d",
-- ctx->engine_id, task_id, touched);
-- if (status != APR_SUCCESS) {
-- if (!touched) {
-- /* untouched request, need rescheduling */
-- status = h2_proxy_fifo_push(ctx->requests, r);
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
-- APLOGNO(03369)
-- "h2_proxy_session(%s): rescheduled request %s",
-- ctx->engine_id, task_id);
-- return;
-- }
-- else {
-- const char *uri;
-- uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0);
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
-- APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s "
-- "not complete, cannot repeat",
-- ctx->engine_id, task_id, uri);
-- }
-- }
--
-- if (r == ctx->rbase) {
-+ if (r == ctx->r) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
-+ "h2_proxy_session(%s): request done, touched=%d",
-+ ctx->id, touched);
-+ ctx->r_done = 1;
-+ if (touched) ctx->r_may_retry = 0;
- ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
- : HTTP_SERVICE_UNAVAILABLE);
- }
--
-- if (req_engine_done && ctx->engine) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
-- APLOGNO(03370)
-- "h2_proxy_session(%s): finished request %s",
-- ctx->engine_id, task_id);
-- req_engine_done(ctx->engine, r->connection, status);
-- }
- }
-
- static void session_req_done(h2_proxy_session *session, request_rec *r,
-@@ -311,43 +224,15 @@
- request_done(session->user_data, r, status, touched);
- }
-
--static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
--{
-- if (h2_proxy_fifo_count(ctx->requests) > 0) {
-- return APR_SUCCESS;
-- }
-- else if (req_engine_pull && ctx->engine) {
-- apr_status_t status;
-- request_rec *r = NULL;
--
-- status = req_engine_pull(ctx->engine, before_leave?
-- APR_BLOCK_READ: APR_NONBLOCK_READ,
-- ctx->capacity, &r);
-- if (status == APR_SUCCESS && r) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner,
-- "h2_proxy_engine(%s): pulled request (%s) %s",
-- ctx->engine_id,
-- before_leave? "before leave" : "regular",
-- r->the_request);
-- h2_proxy_fifo_push(ctx->requests, r);
-- }
-- return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
-- }
-- return APR_EOF;
--}
--
--static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
-+static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
- apr_status_t status = OK;
- int h2_front;
-- request_rec *r;
-
- /* Step Four: Send the Request in a new HTTP/2 stream and
- * loop until we got the response or encounter errors.
- */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
-- "eng(%s): setup session", ctx->engine_id);
- h2_front = is_h2? is_h2(ctx->owner) : 0;
-- ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
-+ ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
- h2_front, 30,
- h2_proxy_log2((int)ctx->req_buffer_size),
- session_req_done);
-@@ -358,105 +243,45 @@
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
-- "eng(%s): run session %s", ctx->engine_id, ctx->session->id);
-+ "eng(%s): run session %s", ctx->id, ctx->session->id);
- ctx->session->user_data = ctx;
-
-- while (!ctx->owner->aborted) {
-- if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
-- add_request(ctx->session, r);
-- }
--
-+ ctx->r_done = 0;
-+ add_request(ctx->session, ctx->r);
-+
-+ while (!ctx->master->aborted && !ctx->r_done) {
-+
- status = h2_proxy_session_process(ctx->session);
--
-- if (status == APR_SUCCESS) {
-- apr_status_t s2;
-- /* ongoing processing, call again */
-- if (ctx->session->remote_max_concurrent > 0
-- && ctx->session->remote_max_concurrent != ctx->capacity) {
-- ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent,
-- h2_proxy_fifo_capacity(ctx->requests));
-- }
-- s2 = next_request(ctx, 0);
-- if (s2 == APR_ECONNABORTED) {
-- /* master connection gone */
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
-- APLOGNO(03374) "eng(%s): pull request",
-- ctx->engine_id);
-- /* give notice that we're leaving and cancel all ongoing
-- * streams. */
-- next_request(ctx, 1);
-- h2_proxy_session_cancel_all(ctx->session);
-- h2_proxy_session_process(ctx->session);
-- status = ctx->r_status = APR_SUCCESS;
-- break;
-- }
-- if ((h2_proxy_fifo_count(ctx->requests) == 0)
-- && h2_proxy_ihash_empty(ctx->session->streams)) {
-- break;
-- }
-- }
-- else {
-- /* end of processing, maybe error */
-+ if (status != APR_SUCCESS) {
-+ /* Encountered an error during session processing */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
- APLOGNO(03375) "eng(%s): end of session %s",
-- ctx->engine_id, ctx->session->id);
-- /*
-- * Any open stream of that session needs to
-+ ctx->id, ctx->session->id);
-+ /* Any open stream of that session needs to
- * a) be reopened on the new session iff safe to do so
- * b) reported as done (failed) otherwise
- */
- h2_proxy_session_cleanup(ctx->session, session_req_done);
-- break;
-+ goto out;
- }
- }
-
-- ctx->session->user_data = NULL;
-- ctx->session = NULL;
--
-- return status;
--}
--
--static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r)
--{
-- conn_rec *c = ctx->owner;
-- const char *engine_type, *hostname;
--
-- hostname = (ctx->p_conn->ssl_hostname?
-- ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
-- engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
-- ctx->server_portstr);
--
-- if (c->master && req_engine_push && r && is_h2 && is_h2(c)) {
-- /* If we are have req_engine capabilities, push the handling of this
-- * request (e.g. slave connection) to a proxy_http2 engine which
-- * uses the same backend. We may be called to create an engine
-- * ourself. */
-- if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) {
-- if (ctx->engine == NULL) {
-- /* request has been assigned to an engine in another thread */
-- return SUSPENDED;
-- }
-+out:
-+ if (ctx->master->aborted) {
-+ /* master connection gone */
-+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
-+ APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
-+ /* cancel all ongoing requests */
-+ h2_proxy_session_cancel_all(ctx->session);
-+ h2_proxy_session_process(ctx->session);
-+ if (!ctx->master->aborted) {
-+ status = ctx->r_status = APR_SUCCESS;
- }
- }
-
-- if (!ctx->engine) {
-- /* No engine was available or has been initialized, handle this
-- * request just by ourself. */
-- ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
-- ctx->engine_type = engine_type;
-- ctx->engine_pool = ctx->pool;
-- ctx->req_buffer_size = (32*1024);
-- ctx->standalone = 1;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_proxy_http2(%ld): setup standalone engine for type %s",
-- c->id, engine_type);
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "H2: hosting engine %s", ctx->engine_id);
-- }
--
-- return h2_proxy_fifo_push(ctx->requests, r);
-+ ctx->session->user_data = NULL;
-+ ctx->session = NULL;
-+ return status;
- }
-
- static int proxy_http2_handler(request_rec *r,
-@@ -466,7 +291,7 @@
- const char *proxyname,
- apr_port_t proxyport)
- {
-- const char *proxy_func;
-+ const char *proxy_func, *task_id;
- char *locurl = url, *u;
- apr_size_t slen;
- int is_ssl = 0;
-@@ -498,29 +323,35 @@
- default:
- return DECLINED;
- }
-+
-+ task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
-
- ctx = apr_pcalloc(r->pool, sizeof(*ctx));
-- ctx->owner = r->connection;
-- ctx->pool = r->pool;
-- ctx->rbase = r;
-- ctx->server = r->server;
-+ ctx->master = r->connection->master? r->connection->master : r->connection;
-+ ctx->id = task_id? task_id : apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
-+ ctx->owner = r->connection;
-+ ctx->pool = r->pool;
-+ ctx->server = r->server;
- ctx->proxy_func = proxy_func;
-- ctx->is_ssl = is_ssl;
-- ctx->worker = worker;
-- ctx->conf = conf;
-- ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
-- ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
--
-- h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100);
-+ ctx->is_ssl = is_ssl;
-+ ctx->worker = worker;
-+ ctx->conf = conf;
-+ ctx->req_buffer_size = (32*1024);
-+ ctx->r = r;
-+ ctx->r_status = status = HTTP_SERVICE_UNAVAILABLE;
-+ ctx->r_done = 0;
-+ ctx->r_may_retry = 1;
-
- ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
-
- /* scheme says, this is for us. */
-- apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase,
-+ apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url);
-+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->r,
- "H2: serving URL %s", url);
-
- run_connect:
-+ if (ctx->master->aborted) goto cleanup;
-+
- /* Get a proxy_conn_rec from the worker, might be a new one, might
- * be one still open from another request, or it might fail if the
- * worker is stopped or in error. */
-@@ -530,25 +361,11 @@
- }
-
- ctx->p_conn->is_ssl = ctx->is_ssl;
-- if (ctx->is_ssl && ctx->p_conn->connection) {
-- /* If there are some metadata on the connection (e.g. TLS alert),
-- * let mod_ssl detect them, and create a new connection below.
-- */
-- apr_bucket_brigade *tmp_bb;
-- tmp_bb = apr_brigade_create(ctx->rbase->pool,
-- ctx->rbase->connection->bucket_alloc);
-- status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb,
-- AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1);
-- if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
-- ctx->p_conn->close = 1;
-- }
-- apr_brigade_cleanup(tmp_bb);
-- }
-
- /* Step One: Determine the URL to connect to (might be a proxy),
- * initialize the backend accordingly and determine the server
- * port string we can expect in responses. */
-- if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
-+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->r, conf, worker,
- ctx->p_conn, &uri, &locurl,
- proxyname, proxyport,
- ctx->server_portstr,
-@@ -556,17 +373,6 @@
- goto cleanup;
- }
-
-- /* If we are not already hosting an engine, try to push the request
-- * to an already existing engine or host a new engine here. */
-- if (r && !ctx->engine) {
-- ctx->r_status = push_request_somewhere(ctx, r);
-- r = NULL;
-- if (ctx->r_status == SUSPENDED) {
-- /* request was pushed to another thread, leave processing here */
-- goto cleanup;
-- }
-- }
--
- /* Step Two: Make the Connection (or check that an already existing
- * socket is still usable). On success, we have a socket connected to
- * backend->hostname. */
-@@ -575,70 +381,56 @@
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
- "H2: failed to make connection to backend: %s",
- ctx->p_conn->hostname);
-- goto reconnect;
-+ goto cleanup;
- }
-
- /* Step Three: Create conn_rec for the socket we have open now. */
-- if (!ctx->p_conn->connection) {
-- status = ap_proxy_connection_create_ex(ctx->proxy_func,
-- ctx->p_conn, ctx->rbase);
-- if (status != OK) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
-- "setup new connection: is_ssl=%d %s %s %s",
-- ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
-- locurl, ctx->p_conn->hostname);
-- goto reconnect;
-- }
--
-- if (!ctx->p_conn->data) {
-- /* New conection: set a note on the connection what CN is
-- * requested and what protocol we want */
-- if (ctx->p_conn->ssl_hostname) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner,
-- "set SNI to %s for (%s)",
-- ctx->p_conn->ssl_hostname,
-- ctx->p_conn->hostname);
-- apr_table_setn(ctx->p_conn->connection->notes,
-- "proxy-request-hostname", ctx->p_conn->ssl_hostname);
-- }
-- if (ctx->is_ssl) {
-- apr_table_setn(ctx->p_conn->connection->notes,
-- "proxy-request-alpn-protos", "h2");
-- }
-- }
-+ status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r);
-+ if (status != OK) {
-+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
-+ "setup new connection: is_ssl=%d %s %s %s",
-+ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
-+ locurl, ctx->p_conn->hostname);
-+ ctx->r_status = status;
-+ goto cleanup;
- }
--
--run_session:
-- status = proxy_engine_run(ctx);
-- if (status == APR_SUCCESS) {
-- /* session and connection still ok */
-- if (next_request(ctx, 1) == APR_SUCCESS) {
-- /* more requests, run again */
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376)
-- "run_session, again");
-- goto run_session;
-+
-+ if (!ctx->p_conn->data && ctx->is_ssl) {
-+ /* New SSL connection: set a note on the connection about what
-+ * protocol we want.
-+ */
-+ apr_table_setn(ctx->p_conn->connection->notes,
-+ "proxy-request-alpn-protos", "h2");
-+ if (ctx->p_conn->ssl_hostname) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
-+ "set SNI to %s for (%s)",
-+ ctx->p_conn->ssl_hostname,
-+ ctx->p_conn->hostname);
-+ apr_table_setn(ctx->p_conn->connection->notes,
-+ "proxy-request-hostname", ctx->p_conn->ssl_hostname);
- }
-- /* done */
-- ctx->engine = NULL;
- }
-
--reconnect:
-- if (next_request(ctx, 1) == APR_SUCCESS) {
-- /* Still more to do, tear down old conn and start over */
-+ if (ctx->master->aborted) goto cleanup;
-+ status = ctx_run(ctx);
-+
-+ if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->master->aborted) {
-+ /* Not successfully processed, but may retry, tear down old conn and start over */
- if (ctx->p_conn) {
- ctx->p_conn->close = 1;
-- /*only in trunk so far */
-- /*proxy_run_detach_backend(r, ctx->p_conn);*/
-+#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
-+ proxy_run_detach_backend(r, ctx->p_conn);
-+#endif
- ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
- ctx->p_conn = NULL;
- }
- ++reconnects;
-- if (reconnects < 5 && !ctx->owner->aborted) {
-+ if (reconnects < 5) {
- goto run_connect;
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
-- "giving up after %d reconnects, %d requests todo",
-- reconnects, h2_proxy_fifo_count(ctx->requests));
-+ "giving up after %d reconnects, request-done=%d",
-+ reconnects, ctx->r_done);
- }
-
- cleanup:
-@@ -647,17 +439,13 @@
- /* close socket when errors happened or session shut down (EOF) */
- ctx->p_conn->close = 1;
- }
-- /*only in trunk so far */
-- /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/
-+#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
-+ proxy_run_detach_backend(ctx->r, ctx->p_conn);
-+#endif
- ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
- ctx->p_conn = NULL;
- }
-
-- /* Any requests will still have need to fail */
-- while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
-- request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1);
-- }
--
- ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
- APLOGNO(03377) "leaving handler");
diff --git a/debian/patches/reproducible_builds.diff b/debian/patches/reproducible_builds.diff
index 36f71e2..8f48922 100644
--- a/debian/patches/reproducible_builds.diff
+++ b/debian/patches/reproducible_builds.diff
@@ -18,7 +18,7 @@ Last-Update: 2015-08-11
-#endif
+static const char server_built[] = BUILD_DATETIME;
- AP_DECLARE(const char *) ap_get_server_built()
+ AP_DECLARE(const char *) ap_get_server_built(void)
{
--- a/server/Makefile.in
+++ b/server/Makefile.in
diff --git a/debian/patches/series b/debian/patches/series
index 839511c..49ae838 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -4,60 +4,7 @@ suexec-CVE-2007-1742.patch
customize_apxs.patch
build_suexec-custom.patch
reproducible_builds.diff
+fix-macro.patch
# This patch is applied manually
#suexec-custom.patch
-spelling-errors.patch
-
-CVE-2019-0196.patch
-CVE-2019-0211.patch
-CVE-2019-0215.patch
-CVE-2019-0217.patch
-CVE-2019-0220-1.patch
-CVE-2019-0220-2.patch
-CVE-2019-0220-3.patch
-CVE-2019-0197.patch
-CVE-2019-10092.patch
-CVE-2019-10097.patch
-CVE-2019-10098.patch
-import-http2-module-from-2.4.46.patch
-CVE-2020-11984.patch
-CVE-2020-1927.patch
-CVE-2020-1934.patch
-CVE-2021-31618.patch
-CVE-2021-30641.patch
-CVE-2021-26691.patch
-CVE-2021-26690.patch
-CVE-2020-35452.patch
-CVE-2021-34798.patch
-CVE-2021-36160.patch
-CVE-2021-39275.patch
-CVE-2021-40438.patch
-CVE-2021-44224-1.patch
-CVE-2021-44224-2.patch
-CVE-2021-44790.patch
-CVE-2021-36160-2.patch
-CVE-2022-22719.patch
-CVE-2022-22720.patch
-CVE-2022-22721.patch
-CVE-2022-23943-1.patch
-CVE-2022-23943-2.patch
-CVE-2022-26377.patch
-CVE-2022-28614.patch
-CVE-2022-28615.patch
-CVE-2022-29404.patch
-CVE-2022-30522.patch
-CVE-2022-30556.patch
-CVE-2022-31813.patch
-CVE-2006-20001.patch
-CVE-2022-36760.patch
-CVE-2022-37436.patch
-CVE-2021-33193.patch
-0052-CVE-2023-27522-HTTP-Response-Smuggling-mod_proxy_uws.patch
-0053-CVE-2023-25690-1.patch
-0054-CVE-2023-25690-2.patch
-0055-CVE-2023-25690-Regression-1.patch
-0056-CVE-2023-25690-Regression-2.patch
-0057-CVE-2023-25690-Regression-3.patch
-
-
diff --git a/debian/patches/spelling-errors.patch b/debian/patches/spelling-errors.patch
deleted file mode 100644
index d42ec00..0000000
--- a/debian/patches/spelling-errors.patch
+++ /dev/null
@@ -1,196 +0,0 @@
-Description: spelling errors
-Author: Xavier Guimard
-Forwarded: https://bz.apache.org/bugzilla/show_bug.cgi?id=62960
-Last-Update: 2018-11-28
-
---- a/LICENSE
-+++ b/LICENSE
-@@ -516,7 +516,7 @@
- This program may be used and copied freely providing this copyright notice
- is not removed.
-
--This software is provided "as is" and any express or implied waranties,
-+This software is provided "as is" and any express or implied warranties,
- including but not limited to, the implied warranties of merchantability and
- fitness for a particular purpose are disclaimed. In no event shall
- Zeus Technology Ltd. be liable for any direct, indirect, incidental, special,
---- a/docs/man/httxt2dbm.1
-+++ b/docs/man/httxt2dbm.1
-@@ -50,7 +50,7 @@
- Specify the DBM type to be used for the output\&. If not specified, will use the APR Default\&. Available types are: \fBGDBM\fR for GDBM files, \fBSDBM\fR for SDBM files, \fBDB\fR for berkeley DB files, \fBNDBM\fR for NDBM files, \fBdefault\fR for the default DBM type\&.
- .TP
- \fB-i \fISOURCE_TXT\fR\fR
--Input file from which the dbm is to be created\&. The file should be formated with one record per line, of the form: \fBkey value\fR\&. See the documentation for RewriteMap for further details of this file's format and meaning\&.
-+Input file from which the dbm is to be created\&. The file should be formatted with one record per line, of the form: \fBkey value\fR\&. See the documentation for RewriteMap for further details of this file's format and meaning\&.
- .TP
- \fB-o \fIOUTPUT_DBM\fR\fR
- Name of the output dbm files\&.
---- a/docs/manual/howto/htaccess.html.ja.utf8
-+++ b/docs/manual/howto/htaccess.html.ja.utf8
-@@ -247,7 +247,7 @@
-
- As discussed in the documentation on Configuration Sections,
- .htaccess
files can override the <Directory>
sections for
-- the corresponding directory, but will be overriden by other types
-+ the corresponding directory, but will be overridden by other types
- of configuration sections from the main configuration files. This
- fact can be used to enforce certain configurations, even in the
- presence of a liberal AllowOverride
setting. For example, to
-@@ -414,4 +414,4 @@
- prettyPrint();
- }
- //-->
--