summaryrefslogtreecommitdiffstats
path: root/misc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:30:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:30:05 +0000
commita1e354165254cd9e346751e6c2ddc554feeb0e6d (patch)
tree5fd273cc604fd00efd630eb387a6f79ce102f4e3 /misc
parentInitial commit. (diff)
downloadapr-util-a1e354165254cd9e346751e6c2ddc554feeb0e6d.tar.xz
apr-util-a1e354165254cd9e346751e6c2ddc554feeb0e6d.zip
Adding upstream version 1.6.3.upstream/1.6.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'misc')
-rw-r--r--misc/apr_date.c637
-rw-r--r--misc/apr_queue.c398
-rw-r--r--misc/apr_reslist.c479
-rw-r--r--misc/apr_rmm.c457
-rw-r--r--misc/apr_thread_pool.c1019
-rw-r--r--misc/apu_dso.c209
-rw-r--r--misc/apu_version.c37
7 files changed, 3236 insertions, 0 deletions
diff --git a/misc/apr_date.c b/misc/apr_date.c
new file mode 100644
index 0000000..28086e3
--- /dev/null
+++ b/misc/apr_date.c
@@ -0,0 +1,637 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * apr_date.c: date parsing utility routines
+ * These routines are (hopefully) platform independent.
+ *
+ * 27 Oct 1996 Roy Fielding
+ * Extracted (with many modifications) from mod_proxy.c and
+ * tested with over 50,000 randomly chosen valid date strings
+ * and several hundred variations of invalid date strings.
+ *
+ */
+
+#include "apr.h"
+#include "apr_lib.h"
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+
+#if APR_HAVE_CTYPE_H
+#include <ctype.h>
+#endif
+
+#include "apr_date.h"
+
+/*
+ * Compare a string to a mask
+ * Mask characters (arbitrary maximum is 256 characters, just in case):
+ * @ - uppercase letter
+ * $ - lowercase letter
+ * & - hex digit
+ * # - digit
+ * ~ - digit or space
+ * * - swallow remaining characters
+ * <x> - exact match for any other character
+ */
+APU_DECLARE(int) apr_date_checkmask(const char *data, const char *mask)
+{
+ int i;
+ char d;
+
+ for (i = 0; i < 256; i++) {
+ d = data[i];
+ switch (mask[i]) {
+ case '\0':
+ return (d == '\0');
+
+ case '*':
+ return 1;
+
+ case '@':
+ if (!apr_isupper(d))
+ return 0;
+ break;
+ case '$':
+ if (!apr_islower(d))
+ return 0;
+ break;
+ case '#':
+ if (!apr_isdigit(d))
+ return 0;
+ break;
+ case '&':
+ if (!apr_isxdigit(d))
+ return 0;
+ break;
+ case '~':
+ if ((d != ' ') && !apr_isdigit(d))
+ return 0;
+ break;
+ default:
+ if (mask[i] != d)
+ return 0;
+ break;
+ }
+ }
+ return 0; /* We only get here if mask is corrupted (exceeds 256) */
+}
+
+/*
+ * Parses an HTTP date in one of three standard forms:
+ *
+ * Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
+ * Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
+ * Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
+ *
+ * and returns the apr_time_t number of microseconds since 1 Jan 1970 GMT,
+ * or APR_DATE_BAD if this would be out of range or if the date is invalid.
+ *
+ * The restricted HTTP syntax is
+ *
+ * HTTP-date = rfc1123-date | rfc850-date | asctime-date
+ *
+ * rfc1123-date = wkday "," SP date1 SP time SP "GMT"
+ * rfc850-date = weekday "," SP date2 SP time SP "GMT"
+ * asctime-date = wkday SP date3 SP time SP 4DIGIT
+ *
+ * date1 = 2DIGIT SP month SP 4DIGIT
+ * ; day month year (e.g., 02 Jun 1982)
+ * date2 = 2DIGIT "-" month "-" 2DIGIT
+ * ; day-month-year (e.g., 02-Jun-82)
+ * date3 = month SP ( 2DIGIT | ( SP 1DIGIT ))
+ * ; month day (e.g., Jun 2)
+ *
+ * time = 2DIGIT ":" 2DIGIT ":" 2DIGIT
+ * ; 00:00:00 - 23:59:59
+ *
+ * wkday = "Mon" | "Tue" | "Wed"
+ * | "Thu" | "Fri" | "Sat" | "Sun"
+ *
+ * weekday = "Monday" | "Tuesday" | "Wednesday"
+ * | "Thursday" | "Friday" | "Saturday" | "Sunday"
+ *
+ * month = "Jan" | "Feb" | "Mar" | "Apr"
+ * | "May" | "Jun" | "Jul" | "Aug"
+ * | "Sep" | "Oct" | "Nov" | "Dec"
+ *
+ * However, for the sake of robustness (and Netscapeness), we ignore the
+ * weekday and anything after the time field (including the timezone).
+ *
+ * This routine is intended to be very fast; 10x faster than using sscanf.
+ *
+ * Originally from Andrew Daviel <andrew@vancouver-webpages.com>, 29 Jul 96
+ * but many changes since then.
+ *
+ */
+APU_DECLARE(apr_time_t) apr_date_parse_http(const char *date)
+{
+ apr_time_exp_t ds;
+ apr_time_t result;
+ int mint, mon;
+ const char *monstr, *timstr;
+ static const int months[12] =
+ {
+ ('J' << 16) | ('a' << 8) | 'n', ('F' << 16) | ('e' << 8) | 'b',
+ ('M' << 16) | ('a' << 8) | 'r', ('A' << 16) | ('p' << 8) | 'r',
+ ('M' << 16) | ('a' << 8) | 'y', ('J' << 16) | ('u' << 8) | 'n',
+ ('J' << 16) | ('u' << 8) | 'l', ('A' << 16) | ('u' << 8) | 'g',
+ ('S' << 16) | ('e' << 8) | 'p', ('O' << 16) | ('c' << 8) | 't',
+ ('N' << 16) | ('o' << 8) | 'v', ('D' << 16) | ('e' << 8) | 'c'};
+
+ if (!date)
+ return APR_DATE_BAD;
+
+ while (*date && apr_isspace(*date)) /* Find first non-whitespace char */
+ ++date;
+
+ if (*date == '\0')
+ return APR_DATE_BAD;
+
+ if ((date = strchr(date, ' ')) == NULL) /* Find space after weekday */
+ return APR_DATE_BAD;
+
+ ++date; /* Now pointing to first char after space, which should be */
+
+ /* start of the actual date information for all 4 formats. */
+
+ if (apr_date_checkmask(date, "## @$$ #### ##:##:## *")) {
+ /* RFC 1123 format with two days */
+ ds.tm_year = ((date[7] - '0') * 10 + (date[8] - '0') - 19) * 100;
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[9] - '0') * 10) + (date[10] - '0');
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 12;
+ }
+ else if (apr_date_checkmask(date, "##-@$$-## ##:##:## *")) {
+ /* RFC 850 format */
+ ds.tm_year = ((date[7] - '0') * 10) + (date[8] - '0');
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 10;
+ }
+ else if (apr_date_checkmask(date, "@$$ ~# ##:##:## ####*")) {
+ /* asctime format */
+ ds.tm_year = ((date[16] - '0') * 10 + (date[17] - '0') - 19) * 100;
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[18] - '0') * 10) + (date[19] - '0');
+
+ if (date[4] == ' ')
+ ds.tm_mday = 0;
+ else
+ ds.tm_mday = (date[4] - '0') * 10;
+
+ ds.tm_mday += (date[5] - '0');
+
+ monstr = date;
+ timstr = date + 7;
+ }
+ else if (apr_date_checkmask(date, "# @$$ #### ##:##:## *")) {
+ /* RFC 1123 format with one day */
+ ds.tm_year = ((date[6] - '0') * 10 + (date[7] - '0') - 19) * 100;
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[8] - '0') * 10) + (date[9] - '0');
+
+ ds.tm_mday = (date[0] - '0');
+
+ monstr = date + 2;
+ timstr = date + 11;
+ }
+ else
+ return APR_DATE_BAD;
+
+ if (ds.tm_mday <= 0 || ds.tm_mday > 31)
+ return APR_DATE_BAD;
+
+ ds.tm_hour = ((timstr[0] - '0') * 10) + (timstr[1] - '0');
+ ds.tm_min = ((timstr[3] - '0') * 10) + (timstr[4] - '0');
+ ds.tm_sec = ((timstr[6] - '0') * 10) + (timstr[7] - '0');
+
+ if ((ds.tm_hour > 23) || (ds.tm_min > 59) || (ds.tm_sec > 61))
+ return APR_DATE_BAD;
+
+ mint = (monstr[0] << 16) | (monstr[1] << 8) | monstr[2];
+ for (mon = 0; mon < 12; mon++)
+ if (mint == months[mon])
+ break;
+
+ if (mon == 12)
+ return APR_DATE_BAD;
+
+ if ((ds.tm_mday == 31) && (mon == 3 || mon == 5 || mon == 8 || mon == 10))
+ return APR_DATE_BAD;
+
+ /* February gets special check for leapyear */
+ if ((mon == 1) &&
+ ((ds.tm_mday > 29) ||
+ ((ds.tm_mday == 29)
+ && ((ds.tm_year & 3)
+ || (((ds.tm_year % 100) == 0)
+ && (((ds.tm_year % 400) != 100)))))))
+ return APR_DATE_BAD;
+
+ ds.tm_mon = mon;
+
+ /* ap_mplode_time uses tm_usec and tm_gmtoff fields, but they haven't
+ * been set yet.
+ * It should be safe to just zero out these values.
+ * tm_usec is the number of microseconds into the second. HTTP only
+ * cares about second granularity.
+ * tm_gmtoff is the number of seconds off of GMT the time is. By
+ * definition all times going through this function are in GMT, so this
+ * is zero.
+ */
+ ds.tm_usec = 0;
+ ds.tm_gmtoff = 0;
+ if (apr_time_exp_get(&result, &ds) != APR_SUCCESS)
+ return APR_DATE_BAD;
+
+ return result;
+}
+
+/*
+ * Parses a string resembling an RFC 822 date. This is meant to be
+ * leinent in its parsing of dates. Hence, this will parse a wider
+ * range of dates than apr_date_parse_http.
+ *
+ * The prominent mailer (or poster, if mailer is unknown) that has
+ * been seen in the wild is included for the unknown formats.
+ *
+ * Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
+ * Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
+ * Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
+ * Sun, 6 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
+ * Sun, 06 Nov 94 08:49:37 GMT ; RFC 822
+ * Sun, 6 Nov 94 08:49:37 GMT ; RFC 822
+ * Sun, 06 Nov 94 08:49 GMT ; Unknown [drtr@ast.cam.ac.uk]
+ * Sun, 6 Nov 94 08:49 GMT ; Unknown [drtr@ast.cam.ac.uk]
+ * Sun, 06 Nov 94 8:49:37 GMT ; Unknown [Elm 70.85]
+ * Sun, 6 Nov 94 8:49:37 GMT ; Unknown [Elm 70.85]
+ * Mon, 7 Jan 2002 07:21:22 GMT ; Unknown [Postfix]
+ * Sun, 06-Nov-1994 08:49:37 GMT ; RFC 850 with four digit years
+ *
+ */
+
+#define TIMEPARSE(ds,hr10,hr1,min10,min1,sec10,sec1) \
+ { \
+ ds.tm_hour = ((hr10 - '0') * 10) + (hr1 - '0'); \
+ ds.tm_min = ((min10 - '0') * 10) + (min1 - '0'); \
+ ds.tm_sec = ((sec10 - '0') * 10) + (sec1 - '0'); \
+ }
+#define TIMEPARSE_STD(ds,timstr) \
+ { \
+ TIMEPARSE(ds, timstr[0],timstr[1], \
+ timstr[3],timstr[4], \
+ timstr[6],timstr[7]); \
+ }
+
+APU_DECLARE(apr_time_t) apr_date_parse_rfc(const char *date)
+{
+ apr_time_exp_t ds;
+ apr_time_t result;
+ int mint, mon;
+ const char *monstr, *timstr, *gmtstr;
+ static const int months[12] =
+ {
+ ('J' << 16) | ('a' << 8) | 'n', ('F' << 16) | ('e' << 8) | 'b',
+ ('M' << 16) | ('a' << 8) | 'r', ('A' << 16) | ('p' << 8) | 'r',
+ ('M' << 16) | ('a' << 8) | 'y', ('J' << 16) | ('u' << 8) | 'n',
+ ('J' << 16) | ('u' << 8) | 'l', ('A' << 16) | ('u' << 8) | 'g',
+ ('S' << 16) | ('e' << 8) | 'p', ('O' << 16) | ('c' << 8) | 't',
+ ('N' << 16) | ('o' << 8) | 'v', ('D' << 16) | ('e' << 8) | 'c' };
+
+ if (!date)
+ return APR_DATE_BAD;
+
+ /* Not all dates have text days at the beginning. */
+ if (!apr_isdigit(date[0]))
+ {
+ while (*date && apr_isspace(*date)) /* Find first non-whitespace char */
+ ++date;
+
+ if (*date == '\0')
+ return APR_DATE_BAD;
+
+ if ((date = strchr(date, ' ')) == NULL) /* Find space after weekday */
+ return APR_DATE_BAD;
+
+ ++date; /* Now pointing to first char after space, which should be */ }
+
+ /* start of the actual date information for all 11 formats. */
+ if (apr_date_checkmask(date, "## @$$ #### ##:##:## *")) { /* RFC 1123 format */
+ ds.tm_year = ((date[7] - '0') * 10 + (date[8] - '0') - 19) * 100;
+
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[9] - '0') * 10) + (date[10] - '0');
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 12;
+ gmtstr = date + 21;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "##-@$$-## ##:##:## *")) {/* RFC 850 format */
+ ds.tm_year = ((date[7] - '0') * 10) + (date[8] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 10;
+ gmtstr = date + 19;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "@$$ ~# ##:##:## ####*")) {
+ /* asctime format */
+ ds.tm_year = ((date[16] - '0') * 10 + (date[17] - '0') - 19) * 100;
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[18] - '0') * 10) + (date[19] - '0');
+
+ if (date[4] == ' ')
+ ds.tm_mday = 0;
+ else
+ ds.tm_mday = (date[4] - '0') * 10;
+
+ ds.tm_mday += (date[5] - '0');
+
+ monstr = date;
+ timstr = date + 7;
+ gmtstr = NULL;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "# @$$ #### ##:##:## *")) {
+ /* RFC 1123 format*/
+ ds.tm_year = ((date[6] - '0') * 10 + (date[7] - '0') - 19) * 100;
+
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[8] - '0') * 10) + (date[9] - '0');
+ ds.tm_mday = (date[0] - '0');
+
+ monstr = date + 2;
+ timstr = date + 11;
+ gmtstr = date + 20;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "## @$$ ## ##:##:## *")) {
+ /* This is the old RFC 1123 date format - many many years ago, people
+ * used two-digit years. Oh, how foolish.
+ *
+ * Two-digit day, two-digit year version. */
+ ds.tm_year = ((date[7] - '0') * 10) + (date[8] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 10;
+ gmtstr = date + 19;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, " # @$$ ## ##:##:## *")) {
+ /* This is the old RFC 1123 date format - many many years ago, people
+ * used two-digit years. Oh, how foolish.
+ *
+ * Space + one-digit day, two-digit year version.*/
+ ds.tm_year = ((date[7] - '0') * 10) + (date[8] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 10;
+ gmtstr = date + 19;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "# @$$ ## ##:##:## *")) {
+ /* This is the old RFC 1123 date format - many many years ago, people
+ * used two-digit years. Oh, how foolish.
+ *
+ * One-digit day, two-digit year version. */
+ ds.tm_year = ((date[6] - '0') * 10) + (date[7] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = (date[0] - '0');
+
+ monstr = date + 2;
+ timstr = date + 9;
+ gmtstr = date + 18;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "## @$$ ## ##:## *")) {
+ /* Loser format. This is quite bogus. */
+ ds.tm_year = ((date[7] - '0') * 10) + (date[8] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 10;
+ gmtstr = NULL;
+
+ TIMEPARSE(ds, timstr[0],timstr[1], timstr[3],timstr[4], '0','0');
+ }
+ else if (apr_date_checkmask(date, "# @$$ ## ##:## *")) {
+ /* Loser format. This is quite bogus. */
+ ds.tm_year = ((date[6] - '0') * 10) + (date[7] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = (date[0] - '0');
+
+ monstr = date + 2;
+ timstr = date + 9;
+ gmtstr = NULL;
+
+ TIMEPARSE(ds, timstr[0],timstr[1], timstr[3],timstr[4], '0','0');
+ }
+ else if (apr_date_checkmask(date, "## @$$ ## #:##:## *")) {
+ /* Loser format. This is quite bogus. */
+ ds.tm_year = ((date[7] - '0') * 10) + (date[8] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 9;
+ gmtstr = date + 18;
+
+ TIMEPARSE(ds, '0',timstr[1], timstr[3],timstr[4], timstr[6],timstr[7]);
+ }
+ else if (apr_date_checkmask(date, "# @$$ ## #:##:## *")) {
+ /* Loser format. This is quite bogus. */
+ ds.tm_year = ((date[6] - '0') * 10) + (date[7] - '0');
+
+ if (ds.tm_year < 70)
+ ds.tm_year += 100;
+
+ ds.tm_mday = (date[0] - '0');
+
+ monstr = date + 2;
+ timstr = date + 8;
+ gmtstr = date + 17;
+
+ TIMEPARSE(ds, '0',timstr[1], timstr[3],timstr[4], timstr[6],timstr[7]);
+ }
+ else if (apr_date_checkmask(date, " # @$$ #### ##:##:## *")) {
+ /* RFC 1123 format with a space instead of a leading zero. */
+ ds.tm_year = ((date[7] - '0') * 10 + (date[8] - '0') - 19) * 100;
+
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[9] - '0') * 10) + (date[10] - '0');
+
+ ds.tm_mday = (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 12;
+ gmtstr = date + 21;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else if (apr_date_checkmask(date, "##-@$$-#### ##:##:## *")) {
+ /* RFC 1123 with dashes instead of spaces between date/month/year
+ * This also looks like RFC 850 with four digit years.
+ */
+ ds.tm_year = ((date[7] - '0') * 10 + (date[8] - '0') - 19) * 100;
+ if (ds.tm_year < 0)
+ return APR_DATE_BAD;
+
+ ds.tm_year += ((date[9] - '0') * 10) + (date[10] - '0');
+
+ ds.tm_mday = ((date[0] - '0') * 10) + (date[1] - '0');
+
+ monstr = date + 3;
+ timstr = date + 12;
+ gmtstr = date + 21;
+
+ TIMEPARSE_STD(ds, timstr);
+ }
+ else
+ return APR_DATE_BAD;
+
+ if (ds.tm_mday <= 0 || ds.tm_mday > 31)
+ return APR_DATE_BAD;
+
+ if ((ds.tm_hour > 23) || (ds.tm_min > 59) || (ds.tm_sec > 61))
+ return APR_DATE_BAD;
+
+ mint = (monstr[0] << 16) | (monstr[1] << 8) | monstr[2];
+ for (mon = 0; mon < 12; mon++)
+ if (mint == months[mon])
+ break;
+
+ if (mon == 12)
+ return APR_DATE_BAD;
+
+ if ((ds.tm_mday == 31) && (mon == 3 || mon == 5 || mon == 8 || mon == 10))
+ return APR_DATE_BAD;
+
+ /* February gets special check for leapyear */
+
+ if ((mon == 1) &&
+ ((ds.tm_mday > 29)
+ || ((ds.tm_mday == 29)
+ && ((ds.tm_year & 3)
+ || (((ds.tm_year % 100) == 0)
+ && (((ds.tm_year % 400) != 100)))))))
+ return APR_DATE_BAD;
+
+ ds.tm_mon = mon;
+
+ /* tm_gmtoff is the number of seconds off of GMT the time is.
+ *
+ * We only currently support: [+-]ZZZZ where Z is the offset in
+ * hours from GMT.
+ *
+ * If there is any confusion, tm_gmtoff will remain 0.
+ */
+ ds.tm_gmtoff = 0;
+
+ /* Do we have a timezone ? */
+ if (gmtstr) {
+ int offset;
+ switch (*gmtstr) {
+ case '-':
+ offset = atoi(gmtstr+1);
+ ds.tm_gmtoff -= (offset / 100) * 60 * 60;
+ ds.tm_gmtoff -= (offset % 100) * 60;
+ break;
+ case '+':
+ offset = atoi(gmtstr+1);
+ ds.tm_gmtoff += (offset / 100) * 60 * 60;
+ ds.tm_gmtoff += (offset % 100) * 60;
+ break;
+ }
+ }
+
+ /* apr_time_exp_get uses tm_usec field, but it hasn't been set yet.
+ * It should be safe to just zero out this value.
+ * tm_usec is the number of microseconds into the second. HTTP only
+ * cares about second granularity.
+ */
+ ds.tm_usec = 0;
+
+ if (apr_time_exp_gmt_get(&result, &ds) != APR_SUCCESS)
+ return APR_DATE_BAD;
+
+ return result;
+}
diff --git a/misc/apr_queue.c b/misc/apr_queue.c
new file mode 100644
index 0000000..82859c8
--- /dev/null
+++ b/misc/apr_queue.c
@@ -0,0 +1,398 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr.h"
+
+#if APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "apu.h"
+#include "apr_portable.h"
+#include "apr_thread_mutex.h"
+#include "apr_thread_cond.h"
+#include "apr_errno.h"
+#include "apr_queue.h"
+
+#if APR_HAS_THREADS
+/*
+ * define this to get debug messages
+ *
+#define QUEUE_DEBUG
+ */
+
+struct apr_queue_t {
+ void **data;
+ unsigned int nelts; /**< # elements */
+ unsigned int in; /**< next empty location */
+ unsigned int out; /**< next filled location */
+ unsigned int bounds;/**< max size of queue */
+ unsigned int full_waiters;
+ unsigned int empty_waiters;
+ apr_thread_mutex_t *one_big_mutex;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+ int terminated;
+};
+
+#ifdef QUEUE_DEBUG
+static void Q_DBG(char*msg, apr_queue_t *q) {
+ fprintf(stderr, "%ld\t#%d in %d out %d\t%s\n",
+ apr_os_thread_current(),
+ q->nelts, q->in, q->out,
+ msg
+ );
+}
+#else
+#define Q_DBG(x,y)
+#endif
+
+/**
+ * Detects when the apr_queue_t is full. This utility function is expected
+ * to be called from within critical sections, and is not threadsafe.
+ */
+#define apr_queue_full(queue) ((queue)->nelts == (queue)->bounds)
+
+/**
+ * Detects when the apr_queue_t is empty. This utility function is expected
+ * to be called from within critical sections, and is not threadsafe.
+ */
+#define apr_queue_empty(queue) ((queue)->nelts == 0)
+
+/**
+ * Callback routine that is called to destroy this
+ * apr_queue_t when its pool is destroyed.
+ */
+static apr_status_t queue_destroy(void *data)
+{
+ apr_queue_t *queue = data;
+
+ /* Ignore errors here, we can't do anything about them anyway. */
+
+ apr_thread_cond_destroy(queue->not_empty);
+ apr_thread_cond_destroy(queue->not_full);
+ apr_thread_mutex_destroy(queue->one_big_mutex);
+
+ return APR_SUCCESS;
+}
+
+/**
+ * Initialize the apr_queue_t.
+ */
+APU_DECLARE(apr_status_t) apr_queue_create(apr_queue_t **q,
+ unsigned int queue_capacity,
+ apr_pool_t *a)
+{
+ apr_status_t rv;
+ apr_queue_t *queue;
+ queue = apr_palloc(a, sizeof(apr_queue_t));
+ *q = queue;
+
+ /* nested doesn't work ;( */
+ rv = apr_thread_mutex_create(&queue->one_big_mutex,
+ APR_THREAD_MUTEX_UNNESTED,
+ a);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&queue->not_empty, a);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&queue->not_full, a);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* Set all the data in the queue to NULL */
+ queue->data = apr_pcalloc(a, queue_capacity * sizeof(void*));
+ queue->bounds = queue_capacity;
+ queue->nelts = 0;
+ queue->in = 0;
+ queue->out = 0;
+ queue->terminated = 0;
+ queue->full_waiters = 0;
+ queue->empty_waiters = 0;
+
+ apr_pool_cleanup_register(a, queue, queue_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+/**
+ * Push new data onto the queue. Blocks if the queue is full. Once
+ * the push operation has completed, it signals other threads waiting
+ * in apr_queue_pop() that they may continue consuming sockets.
+ */
+APU_DECLARE(apr_status_t) apr_queue_push(apr_queue_t *queue, void *data)
+{
+ apr_status_t rv;
+
+ if (queue->terminated) {
+ return APR_EOF; /* no more elements ever again */
+ }
+
+ rv = apr_thread_mutex_lock(queue->one_big_mutex);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (apr_queue_full(queue)) {
+ if (!queue->terminated) {
+ queue->full_waiters++;
+ rv = apr_thread_cond_wait(queue->not_full, queue->one_big_mutex);
+ queue->full_waiters--;
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+ }
+ }
+ /* If we wake up and it's still empty, then we were interrupted */
+ if (apr_queue_full(queue)) {
+ Q_DBG("queue full (intr)", queue);
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (queue->terminated) {
+ return APR_EOF; /* no more elements ever again */
+ }
+ else {
+ return APR_EINTR;
+ }
+ }
+ }
+
+ queue->data[queue->in] = data;
+ queue->in++;
+ if (queue->in >= queue->bounds)
+ queue->in -= queue->bounds;
+ queue->nelts++;
+
+ if (queue->empty_waiters) {
+ Q_DBG("sig !empty", queue);
+ rv = apr_thread_cond_signal(queue->not_empty);
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+ }
+ }
+
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+}
+
+/**
+ * Push new data onto the queue. If the queue is full, return APR_EAGAIN. If
+ * the push operation completes successfully, it signals other threads
+ * waiting in apr_queue_pop() that they may continue consuming sockets.
+ */
+APU_DECLARE(apr_status_t) apr_queue_trypush(apr_queue_t *queue, void *data)
+{
+ apr_status_t rv;
+
+ if (queue->terminated) {
+ return APR_EOF; /* no more elements ever again */
+ }
+
+ rv = apr_thread_mutex_lock(queue->one_big_mutex);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (apr_queue_full(queue)) {
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ return APR_EAGAIN;
+ }
+
+ queue->data[queue->in] = data;
+ queue->in++;
+ if (queue->in >= queue->bounds)
+ queue->in -= queue->bounds;
+ queue->nelts++;
+
+ if (queue->empty_waiters) {
+ Q_DBG("sig !empty", queue);
+ rv = apr_thread_cond_signal(queue->not_empty);
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+ }
+ }
+
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+}
+
+/**
+ * not thread safe
+ */
+APU_DECLARE(unsigned int) apr_queue_size(apr_queue_t *queue) {
+ return queue->nelts;
+}
+
+/**
+ * Retrieves the next item from the queue. If there are no
+ * items available, it will block until one becomes available.
+ * Once retrieved, the item is placed into the address specified by
+ * 'data'.
+ */
+APU_DECLARE(apr_status_t) apr_queue_pop(apr_queue_t *queue, void **data)
+{
+ apr_status_t rv;
+
+ if (queue->terminated) {
+ return APR_EOF; /* no more elements ever again */
+ }
+
+ rv = apr_thread_mutex_lock(queue->one_big_mutex);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* Keep waiting until we wake up and find that the queue is not empty. */
+ if (apr_queue_empty(queue)) {
+ if (!queue->terminated) {
+ queue->empty_waiters++;
+ rv = apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
+ queue->empty_waiters--;
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+ }
+ }
+ /* If we wake up and it's still empty, then we were interrupted */
+ if (apr_queue_empty(queue)) {
+ Q_DBG("queue empty (intr)", queue);
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (queue->terminated) {
+ return APR_EOF; /* no more elements ever again */
+ }
+ else {
+ return APR_EINTR;
+ }
+ }
+ }
+
+ *data = queue->data[queue->out];
+ queue->nelts--;
+
+ queue->out++;
+ if (queue->out >= queue->bounds)
+ queue->out -= queue->bounds;
+ if (queue->full_waiters) {
+ Q_DBG("signal !full", queue);
+ rv = apr_thread_cond_signal(queue->not_full);
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+ }
+ }
+
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+}
+
+/**
+ * Retrieves the next item from the queue. If there are no
+ * items available, return APR_EAGAIN. Once retrieved,
+ * the item is placed into the address specified by 'data'.
+ */
+APU_DECLARE(apr_status_t) apr_queue_trypop(apr_queue_t *queue, void **data)
+{
+ apr_status_t rv;
+
+ if (queue->terminated) {
+ return APR_EOF; /* no more elements ever again */
+ }
+
+ rv = apr_thread_mutex_lock(queue->one_big_mutex);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ if (apr_queue_empty(queue)) {
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ return APR_EAGAIN;
+ }
+
+ *data = queue->data[queue->out];
+ queue->nelts--;
+
+ queue->out++;
+ if (queue->out >= queue->bounds)
+ queue->out -= queue->bounds;
+ if (queue->full_waiters) {
+ Q_DBG("signal !full", queue);
+ rv = apr_thread_cond_signal(queue->not_full);
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+ }
+ }
+
+ rv = apr_thread_mutex_unlock(queue->one_big_mutex);
+ return rv;
+}
+
+APU_DECLARE(apr_status_t) apr_queue_interrupt_all(apr_queue_t *queue)
+{
+ apr_status_t rv;
+ Q_DBG("intr all", queue);
+ if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
+ return rv;
+ }
+ apr_thread_cond_broadcast(queue->not_empty);
+ apr_thread_cond_broadcast(queue->not_full);
+
+ if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
+ return rv;
+ }
+
+ return APR_SUCCESS;
+}
+
+APU_DECLARE(apr_status_t) apr_queue_term(apr_queue_t *queue)
+{
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* we must hold one_big_mutex when setting this... otherwise,
+ * we could end up setting it and waking everybody up just after a
+ * would-be popper checks it but right before they block
+ */
+ queue->terminated = 1;
+ if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
+ return rv;
+ }
+ return apr_queue_interrupt_all(queue);
+}
+
+#endif /* APR_HAS_THREADS */
diff --git a/misc/apr_reslist.c b/misc/apr_reslist.c
new file mode 100644
index 0000000..12ae96a
--- /dev/null
+++ b/misc/apr_reslist.c
@@ -0,0 +1,479 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include "apu.h"
+#include "apr_reslist.h"
+#include "apr_errno.h"
+#include "apr_strings.h"
+#include "apr_thread_mutex.h"
+#include "apr_thread_cond.h"
+#include "apr_ring.h"
+
+/**
+ * A single resource element.
+ */
+struct apr_res_t {
+ apr_time_t freed;
+ void *opaque;
+ APR_RING_ENTRY(apr_res_t) link;
+};
+typedef struct apr_res_t apr_res_t;
+
+/**
+ * A ring of resources representing the list of available resources.
+ */
+APR_RING_HEAD(apr_resring_t, apr_res_t);
+typedef struct apr_resring_t apr_resring_t;
+
+struct apr_reslist_t {
+ apr_pool_t *pool; /* the pool used in constructor and destructor calls */
+ int ntotal; /* total number of resources managed by this list */
+ int nidle; /* number of available resources */
+ int min; /* desired minimum number of available resources */
+ int smax; /* soft maximum on the total number of resources */
+ int hmax; /* hard maximum on the total number of resources */
+ apr_interval_time_t ttl; /* TTL when we have too many resources */
+ apr_interval_time_t timeout; /* Timeout for waiting on resource */
+ apr_reslist_constructor constructor;
+ apr_reslist_destructor destructor;
+ void *params; /* opaque data passed to constructor and destructor calls */
+ apr_resring_t avail_list;
+ apr_resring_t free_list;
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *listlock;
+ apr_thread_cond_t *avail;
+#endif
+};
+
+/**
+ * Grab a resource from the front of the resource list.
+ * Assumes: that the reslist is locked.
+ */
+static apr_res_t *pop_resource(apr_reslist_t *reslist)
+{
+ apr_res_t *res;
+ res = APR_RING_FIRST(&reslist->avail_list);
+ APR_RING_REMOVE(res, link);
+ reslist->nidle--;
+ return res;
+}
+
+/**
+ * Add a resource to the beginning of the list, set the time at which
+ * it was added to the list.
+ * Assumes: that the reslist is locked.
+ */
+static void push_resource(apr_reslist_t *reslist, apr_res_t *resource)
+{
+ APR_RING_INSERT_HEAD(&reslist->avail_list, resource, apr_res_t, link);
+ if (reslist->ttl) {
+ resource->freed = apr_time_now();
+ }
+ reslist->nidle++;
+}
+
+/**
+ * Get an resource container from the free list or create a new one.
+ */
+static apr_res_t *get_container(apr_reslist_t *reslist)
+{
+ apr_res_t *res;
+
+ if (!APR_RING_EMPTY(&reslist->free_list, apr_res_t, link)) {
+ res = APR_RING_FIRST(&reslist->free_list);
+ APR_RING_REMOVE(res, link);
+ }
+ else
+ res = apr_pcalloc(reslist->pool, sizeof(*res));
+ return res;
+}
+
+/**
+ * Free up a resource container by placing it on the free list.
+ */
+static void free_container(apr_reslist_t *reslist, apr_res_t *container)
+{
+ APR_RING_INSERT_TAIL(&reslist->free_list, container, apr_res_t, link);
+}
+
+/**
+ * Create a new resource and return it.
+ * Assumes: that the reslist is locked.
+ */
+static apr_status_t create_resource(apr_reslist_t *reslist, apr_res_t **ret_res)
+{
+ apr_status_t rv;
+ apr_res_t *res;
+
+ res = get_container(reslist);
+
+ rv = reslist->constructor(&res->opaque, reslist->params, reslist->pool);
+
+ *ret_res = res;
+ return rv;
+}
+
+/**
+ * Destroy a single idle resource.
+ * Assumes: that the reslist is locked.
+ */
+static apr_status_t destroy_resource(apr_reslist_t *reslist, apr_res_t *res)
+{
+ return reslist->destructor(res->opaque, reslist->params, reslist->pool);
+}
+
+static apr_status_t reslist_cleanup(void *data_)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_reslist_t *rl = data_;
+ apr_res_t *res;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(rl->listlock);
+#endif
+
+ while (rl->nidle > 0) {
+ apr_status_t rv1;
+ res = pop_resource(rl);
+ rl->ntotal--;
+ rv1 = destroy_resource(rl, res);
+ if (rv1 != APR_SUCCESS) {
+ rv = rv1; /* loses info in the unlikely event of
+ * multiple *different* failures */
+ }
+ free_container(rl, res);
+ }
+
+ assert(rl->nidle == 0);
+ assert(rl->ntotal == 0);
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(rl->listlock);
+ apr_thread_mutex_destroy(rl->listlock);
+ apr_thread_cond_destroy(rl->avail);
+#endif
+
+ return rv;
+}
+
+/**
+ * Perform routine maintenance on the resource list. This call
+ * may instantiate new resources or expire old resources.
+ */
+APU_DECLARE(apr_status_t) apr_reslist_maintain(apr_reslist_t *reslist)
+{
+ apr_time_t now;
+ apr_status_t rv;
+ apr_res_t *res;
+ int created_one = 0;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(reslist->listlock);
+#endif
+
+ /* Check if we need to create more resources, and if we are allowed to. */
+ while (reslist->nidle < reslist->min && reslist->ntotal < reslist->hmax) {
+ /* Create the resource */
+ rv = create_resource(reslist, &res);
+ if (rv != APR_SUCCESS) {
+ free_container(reslist, res);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return rv;
+ }
+ /* Add it to the list */
+ push_resource(reslist, res);
+ /* Update our counters */
+ reslist->ntotal++;
+ /* If someone is waiting on that guy, wake them up. */
+#if APR_HAS_THREADS
+ rv = apr_thread_cond_signal(reslist->avail);
+ if (rv != APR_SUCCESS) {
+ apr_thread_mutex_unlock(reslist->listlock);
+ return rv;
+ }
+#endif
+ created_one++;
+ }
+
+ /* We don't need to see if we're over the max if we were under it before,
+ * nor need we check for expiry if no ttl is configure.
+ */
+ if (created_one || !reslist->ttl) {
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return APR_SUCCESS;
+ }
+
+ /* Check if we need to expire old resources */
+ now = apr_time_now();
+ while (reslist->nidle > reslist->smax && reslist->nidle > 0) {
+ /* Peak at the last resource in the list */
+ res = APR_RING_LAST(&reslist->avail_list);
+ /* See if the oldest entry should be expired */
+ if (now - res->freed < reslist->ttl) {
+ /* If this entry is too young, none of the others
+ * will be ready to be expired either, so we are done. */
+ break;
+ }
+ APR_RING_REMOVE(res, link);
+ reslist->nidle--;
+ reslist->ntotal--;
+ rv = destroy_resource(reslist, res);
+ free_container(reslist, res);
+ if (rv != APR_SUCCESS) {
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return rv;
+ }
+ }
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return APR_SUCCESS;
+}
+
+APU_DECLARE(apr_status_t) apr_reslist_create(apr_reslist_t **reslist,
+ int min, int smax, int hmax,
+ apr_interval_time_t ttl,
+ apr_reslist_constructor con,
+ apr_reslist_destructor de,
+ void *params,
+ apr_pool_t *pool)
+{
+ apr_status_t rv;
+ apr_reslist_t *rl;
+
+ /* Do some sanity checks so we don't thrash around in the
+ * maintenance routine later. */
+ if (min < 0 || min > smax || min > hmax || smax > hmax || hmax == 0 ||
+ ttl < 0) {
+ return APR_EINVAL;
+ }
+
+#if !APR_HAS_THREADS
+ /* There can be only one resource when we have no threads. */
+ if (min > 0) {
+ min = 1;
+ }
+ if (smax > 0) {
+ smax = 1;
+ }
+ hmax = 1;
+#endif
+
+ rl = apr_pcalloc(pool, sizeof(*rl));
+ rl->pool = pool;
+ rl->min = min;
+ rl->smax = smax;
+ rl->hmax = hmax;
+ rl->ttl = ttl;
+ rl->constructor = con;
+ rl->destructor = de;
+ rl->params = params;
+
+ APR_RING_INIT(&rl->avail_list, apr_res_t, link);
+ APR_RING_INIT(&rl->free_list, apr_res_t, link);
+
+#if APR_HAS_THREADS
+ rv = apr_thread_mutex_create(&rl->listlock, APR_THREAD_MUTEX_DEFAULT,
+ pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ rv = apr_thread_cond_create(&rl->avail, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+#endif
+
+ rv = apr_reslist_maintain(rl);
+ if (rv != APR_SUCCESS) {
+ /* Destroy what we've created so far.
+ */
+ reslist_cleanup(rl);
+ return rv;
+ }
+
+ apr_pool_cleanup_register(rl->pool, rl, reslist_cleanup,
+ apr_pool_cleanup_null);
+
+ *reslist = rl;
+
+ return APR_SUCCESS;
+}
+
+APU_DECLARE(apr_status_t) apr_reslist_destroy(apr_reslist_t *reslist)
+{
+ return apr_pool_cleanup_run(reslist->pool, reslist, reslist_cleanup);
+}
+
+APU_DECLARE(apr_status_t) apr_reslist_acquire(apr_reslist_t *reslist,
+ void **resource)
+{
+ apr_status_t rv;
+ apr_res_t *res;
+ apr_time_t now = 0;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(reslist->listlock);
+#endif
+ /* If there are idle resources on the available list, use
+ * them right away. */
+ if (reslist->ttl) {
+ now = apr_time_now();
+ }
+ while (reslist->nidle > 0) {
+ /* Pop off the first resource */
+ res = pop_resource(reslist);
+ if (reslist->ttl && (now - res->freed >= reslist->ttl)) {
+ /* this res is expired - kill it */
+ reslist->ntotal--;
+ rv = destroy_resource(reslist, res);
+ free_container(reslist, res);
+ if (rv != APR_SUCCESS) {
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return rv; /* FIXME: this might cause unnecessary fails */
+ }
+ continue;
+ }
+ *resource = res->opaque;
+ free_container(reslist, res);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return APR_SUCCESS;
+ }
+ /* If we've hit our max, block until we're allowed to create
+ * a new one, or something becomes free. */
+ while (reslist->ntotal >= reslist->hmax && reslist->nidle <= 0) {
+#if APR_HAS_THREADS
+ if (reslist->timeout) {
+ if ((rv = apr_thread_cond_timedwait(reslist->avail,
+ reslist->listlock, reslist->timeout)) != APR_SUCCESS) {
+ apr_thread_mutex_unlock(reslist->listlock);
+ return rv;
+ }
+ }
+ else {
+ apr_thread_cond_wait(reslist->avail, reslist->listlock);
+ }
+#else
+ return APR_EAGAIN;
+#endif
+ }
+ /* If we popped out of the loop, first try to see if there
+ * are new resources available for immediate use. */
+ if (reslist->nidle > 0) {
+ res = pop_resource(reslist);
+ *resource = res->opaque;
+ free_container(reslist, res);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return APR_SUCCESS;
+ }
+ /* Otherwise the reason we dropped out of the loop
+ * was because there is a new slot available, so create
+ * a resource to fill the slot and use it. */
+ else {
+ rv = create_resource(reslist, &res);
+ if (rv == APR_SUCCESS) {
+ reslist->ntotal++;
+ *resource = res->opaque;
+ }
+ free_container(reslist, res);
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return rv;
+ }
+}
+
+APU_DECLARE(apr_status_t) apr_reslist_release(apr_reslist_t *reslist,
+ void *resource)
+{
+ apr_res_t *res;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(reslist->listlock);
+#endif
+ res = get_container(reslist);
+ res->opaque = resource;
+ push_resource(reslist, res);
+#if APR_HAS_THREADS
+ apr_thread_cond_signal(reslist->avail);
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+
+ return apr_reslist_maintain(reslist);
+}
+
+APU_DECLARE(void) apr_reslist_timeout_set(apr_reslist_t *reslist,
+ apr_interval_time_t timeout)
+{
+ reslist->timeout = timeout;
+}
+
+APU_DECLARE(apr_uint32_t) apr_reslist_acquired_count(apr_reslist_t *reslist)
+{
+ apr_uint32_t count;
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(reslist->listlock);
+#endif
+ count = reslist->ntotal - reslist->nidle;
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+
+ return count;
+}
+
+APU_DECLARE(apr_status_t) apr_reslist_invalidate(apr_reslist_t *reslist,
+ void *resource)
+{
+ apr_status_t ret;
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(reslist->listlock);
+#endif
+ ret = reslist->destructor(resource, reslist->params, reslist->pool);
+ reslist->ntotal--;
+#if APR_HAS_THREADS
+ apr_thread_cond_signal(reslist->avail);
+ apr_thread_mutex_unlock(reslist->listlock);
+#endif
+ return ret;
+}
+
+APU_DECLARE(void) apr_reslist_cleanup_order_set(apr_reslist_t *rl,
+ apr_uint32_t mode)
+{
+ apr_pool_cleanup_kill(rl->pool, rl, reslist_cleanup);
+ if (mode == APR_RESLIST_CLEANUP_FIRST)
+ apr_pool_pre_cleanup_register(rl->pool, rl, reslist_cleanup);
+ else
+ apr_pool_cleanup_register(rl->pool, rl, reslist_cleanup,
+ apr_pool_cleanup_null);
+}
diff --git a/misc/apr_rmm.c b/misc/apr_rmm.c
new file mode 100644
index 0000000..1fd420b
--- /dev/null
+++ b/misc/apr_rmm.c
@@ -0,0 +1,457 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h"
+#include "apr_rmm.h"
+#include "apr_errno.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+
+/* The RMM region is made up of two doubly-linked-list of blocks; the
+ * list of used blocks, and the list of free blocks (either list may
+ * be empty). The base pointer, rmm->base, points at the beginning of
+ * the shmem region in use. Each block is addressable by an
+ * apr_rmm_off_t value, which represents the offset from the base
+ * pointer. The term "address" is used here to mean such a value; an
+ * "offset from rmm->base".
+ *
+ * The RMM region contains exactly one "rmm_hdr_block_t" structure,
+ * the "header block", which is always stored at the base pointer.
+ * The firstused field in this structure is the address of the first
+ * block in the "used blocks" list; the firstfree field is the address
+ * of the first block in the "free blocks" list.
+ *
+ * Each block is prefixed by an "rmm_block_t" structure, followed by
+ * the caller-usable region represented by the block. The next and
+ * prev fields of the structure are zero if the block is at the end or
+ * beginning of the linked-list respectively, or otherwise hold the
+ * address of the next and previous blocks in the list. ("address 0",
+ * i.e. rmm->base is *not* a valid address for a block, since the
+ * header block is always stored at that address).
+ *
+ * At creation, the RMM region is initialized to hold a single block
+ * on the free list representing the entire available shm segment
+ * (minus header block); subsequent allocation and deallocation of
+ * blocks involves splitting blocks and coalescing adjacent blocks,
+ * and switching them between the free and used lists as
+ * appropriate. */
+
+typedef struct rmm_block_t {
+ apr_size_t size;
+ apr_rmm_off_t prev;
+ apr_rmm_off_t next;
+} rmm_block_t;
+
+/* Always at our apr_rmm_off(0):
+ */
+typedef struct rmm_hdr_block_t {
+ apr_size_t abssize;
+ apr_rmm_off_t /* rmm_block_t */ firstused;
+ apr_rmm_off_t /* rmm_block_t */ firstfree;
+} rmm_hdr_block_t;
+
+#define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
+#define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
+
+struct apr_rmm_t {
+ apr_pool_t *p;
+ rmm_hdr_block_t *base;
+ apr_size_t size;
+ apr_anylock_t lock;
+};
+
+static apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next,
+ apr_rmm_off_t find, int includes)
+{
+ apr_rmm_off_t prev = 0;
+
+ while (next) {
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
+
+ if (find == next)
+ return next;
+
+ /* Overshot? */
+ if (find < next)
+ return includes ? prev : 0;
+
+ prev = next;
+ next = blk->next;
+ }
+ return includes ? prev : 0;
+}
+
+static apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
+{
+ apr_rmm_off_t next = rmm->base->firstfree;
+ apr_rmm_off_t best = 0;
+ apr_rmm_off_t bestsize = 0;
+
+ while (next) {
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
+
+ if (blk->size == size)
+ return next;
+
+ if (blk->size >= size) {
+ /* XXX: sub optimal algorithm
+ * We need the most thorough best-fit logic, since we can
+ * never grow our rmm, we are SOL when we hit the wall.
+ */
+ if (!bestsize || (blk->size < bestsize)) {
+ bestsize = blk->size;
+ best = next;
+ }
+ }
+
+ next = blk->next;
+ }
+
+ if (bestsize > RMM_BLOCK_SIZE + size) {
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
+ struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
+
+ new->size = blk->size - size;
+ new->next = blk->next;
+ new->prev = best;
+
+ blk->size = size;
+ blk->next = best + size;
+
+ if (new->next) {
+ blk = (rmm_block_t*)((char*)rmm->base + new->next);
+ blk->prev = best + size;
+ }
+ }
+
+ return best;
+}
+
+static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
+{
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
+
+ /* close the gap */
+ if (blk->prev) {
+ struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
+ prev->next = blk->next;
+ }
+ else {
+ if (free) {
+ rmm->base->firstused = blk->next;
+ }
+ else {
+ rmm->base->firstfree = blk->next;
+ }
+ }
+ if (blk->next) {
+ struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
+ next->prev = blk->prev;
+ }
+
+ /* now find it in the other list, pushing it to the head if required */
+ if (free) {
+ blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
+ if (!blk->prev) {
+ blk->next = rmm->base->firstfree;
+ rmm->base->firstfree = this;
+ }
+ }
+ else {
+ blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
+ if (!blk->prev) {
+ blk->next = rmm->base->firstused;
+ rmm->base->firstused = this;
+ }
+ }
+
+ /* and open it up */
+ if (blk->prev) {
+ struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
+ if (free && (blk->prev + prev->size == this)) {
+ /* Collapse us into our predecessor */
+ prev->size += blk->size;
+ this = blk->prev;
+ blk = prev;
+ }
+ else {
+ blk->next = prev->next;
+ prev->next = this;
+ }
+ }
+
+ if (blk->next) {
+ struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
+ if (free && (this + blk->size == blk->next)) {
+ /* Collapse us into our successor */
+ blk->size += next->size;
+ blk->next = next->next;
+ if (blk->next) {
+ next = (rmm_block_t*)((char*)rmm->base + blk->next);
+ next->prev = this;
+ }
+ }
+ else {
+ next->prev = this;
+ }
+ }
+}
+
+APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
+ void *base, apr_size_t size,
+ apr_pool_t *p)
+{
+ apr_status_t rv;
+ rmm_block_t *blk;
+ apr_anylock_t nulllock;
+
+ if (!lock) {
+ nulllock.type = apr_anylock_none;
+ nulllock.lock.pm = NULL;
+ lock = &nulllock;
+ }
+ if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
+ return rv;
+
+ (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
+ (*rmm)->p = p;
+ (*rmm)->base = base;
+ (*rmm)->size = size;
+ (*rmm)->lock = *lock;
+
+ (*rmm)->base->abssize = size;
+ (*rmm)->base->firstused = 0;
+ (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
+
+ blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
+
+ blk->size = size - (*rmm)->base->firstfree;
+ blk->prev = 0;
+ blk->next = 0;
+
+ return APR_ANYLOCK_UNLOCK(lock);
+}
+
+APU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
+{
+ apr_status_t rv;
+ rmm_block_t *blk;
+
+ if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
+ return rv;
+ }
+ /* Blast it all --- no going back :) */
+ if (rmm->base->firstused) {
+ apr_rmm_off_t this = rmm->base->firstused;
+ do {
+ blk = (rmm_block_t *)((char*)rmm->base + this);
+ this = blk->next;
+ blk->next = blk->prev = 0;
+ } while (this);
+ rmm->base->firstused = 0;
+ }
+ if (rmm->base->firstfree) {
+ apr_rmm_off_t this = rmm->base->firstfree;
+ do {
+ blk = (rmm_block_t *)((char*)rmm->base + this);
+ this = blk->next;
+ blk->next = blk->prev = 0;
+ } while (this);
+ rmm->base->firstfree = 0;
+ }
+ rmm->base->abssize = 0;
+ rmm->size = 0;
+
+ return APR_ANYLOCK_UNLOCK(&rmm->lock);
+}
+
+APU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
+ void *base, apr_pool_t *p)
+{
+ apr_anylock_t nulllock;
+
+ if (!lock) {
+ nulllock.type = apr_anylock_none;
+ nulllock.lock.pm = NULL;
+ lock = &nulllock;
+ }
+
+ /* sanity would be good here */
+ (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
+ (*rmm)->p = p;
+ (*rmm)->base = base;
+ (*rmm)->size = (*rmm)->base->abssize;
+ (*rmm)->lock = *lock;
+ return APR_SUCCESS;
+}
+
+APU_DECLARE(apr_status_t) apr_rmm_detach(apr_rmm_t *rmm)
+{
+ /* A noop until we introduce locked/refcounts */
+ return APR_SUCCESS;
+}
+
+APU_DECLARE(apr_rmm_off_t) apr_rmm_malloc(apr_rmm_t *rmm, apr_size_t reqsize)
+{
+ apr_size_t size;
+ apr_rmm_off_t this;
+
+ size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
+ if (size < reqsize) {
+ return 0;
+ }
+
+ APR_ANYLOCK_LOCK(&rmm->lock);
+
+ this = find_block_of_size(rmm, size);
+
+ if (this) {
+ move_block(rmm, this, 0);
+ this += RMM_BLOCK_SIZE;
+ }
+
+ APR_ANYLOCK_UNLOCK(&rmm->lock);
+ return this;
+}
+
+APU_DECLARE(apr_rmm_off_t) apr_rmm_calloc(apr_rmm_t *rmm, apr_size_t reqsize)
+{
+ apr_size_t size;
+ apr_rmm_off_t this;
+
+ size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
+ if (size < reqsize) {
+ return 0;
+ }
+
+ APR_ANYLOCK_LOCK(&rmm->lock);
+
+ this = find_block_of_size(rmm, size);
+
+ if (this) {
+ move_block(rmm, this, 0);
+ this += RMM_BLOCK_SIZE;
+ memset((char*)rmm->base + this, 0, size - RMM_BLOCK_SIZE);
+ }
+
+ APR_ANYLOCK_UNLOCK(&rmm->lock);
+ return this;
+}
+
+APU_DECLARE(apr_rmm_off_t) apr_rmm_realloc(apr_rmm_t *rmm, void *entity,
+ apr_size_t reqsize)
+{
+ apr_rmm_off_t this;
+ apr_rmm_off_t old;
+ struct rmm_block_t *blk;
+ apr_size_t size, oldsize;
+
+ if (!entity) {
+ return apr_rmm_malloc(rmm, reqsize);
+ }
+
+ size = APR_ALIGN_DEFAULT(reqsize);
+ if (size < reqsize) {
+ return 0;
+ }
+ old = apr_rmm_offset_get(rmm, entity);
+
+ if ((this = apr_rmm_malloc(rmm, size)) == 0) {
+ return 0;
+ }
+
+ blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
+ oldsize = blk->size;
+
+ memcpy(apr_rmm_addr_get(rmm, this),
+ apr_rmm_addr_get(rmm, old), oldsize < size ? oldsize : size);
+ apr_rmm_free(rmm, old);
+
+ return this;
+}
+
+APU_DECLARE(apr_status_t) apr_rmm_free(apr_rmm_t *rmm, apr_rmm_off_t this)
+{
+ apr_status_t rv;
+ struct rmm_block_t *blk;
+
+ /* A little sanity check is always healthy, especially here.
+ * If we really cared, we could make this compile-time
+ */
+ if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
+ return APR_EINVAL;
+ }
+
+ this -= RMM_BLOCK_SIZE;
+
+ blk = (rmm_block_t*)((char*)rmm->base + this);
+
+ if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
+ return rv;
+ }
+ if (blk->prev) {
+ struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
+ if (prev->next != this) {
+ APR_ANYLOCK_UNLOCK(&rmm->lock);
+ return APR_EINVAL;
+ }
+ }
+ else {
+ if (rmm->base->firstused != this) {
+ APR_ANYLOCK_UNLOCK(&rmm->lock);
+ return APR_EINVAL;
+ }
+ }
+
+ if (blk->next) {
+ struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
+ if (next->prev != this) {
+ APR_ANYLOCK_UNLOCK(&rmm->lock);
+ return APR_EINVAL;
+ }
+ }
+
+ /* Ok, it remained [apparently] sane, so unlink it
+ */
+ move_block(rmm, this, 1);
+
+ return APR_ANYLOCK_UNLOCK(&rmm->lock);
+}
+
+APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity)
+{
+ /* debug-sanity checking here would be good
+ */
+ return (void*)((char*)rmm->base + entity);
+}
+
+APU_DECLARE(apr_rmm_off_t) apr_rmm_offset_get(apr_rmm_t *rmm, void* entity)
+{
+ /* debug, or always, sanity checking here would be good
+ * since the primitive is apr_rmm_off_t, I don't mind penalizing
+ * inverse conversions for safety, unless someone can prove that
+ * there is no choice in some cases.
+ */
+ return ((char*)entity - (char*)rmm->base);
+}
+
+APU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n)
+{
+ /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
+ * for alignment overhead, plus the size of the rmm_block_t
+ * structure. */
+ return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));
+}
diff --git a/misc/apr_thread_pool.c b/misc/apr_thread_pool.c
new file mode 100644
index 0000000..5aa3b65
--- /dev/null
+++ b/misc/apr_thread_pool.c
@@ -0,0 +1,1019 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed
+ * with this work for additional information regarding copyright
+ * ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <assert.h>
+#include "apr_thread_pool.h"
+#include "apr_ring.h"
+#include "apr_thread_cond.h"
+#include "apr_portable.h"
+
+#if APR_HAS_THREADS
+
+#define TASK_PRIORITY_SEGS 4
+#define TASK_PRIORITY_SEG(x) (((x)->dispatch.priority & 0xFF) / 64)
+
+typedef struct apr_thread_pool_task
+{
+ APR_RING_ENTRY(apr_thread_pool_task) link;
+ apr_thread_start_t func;
+ void *param;
+ void *owner;
+ union
+ {
+ apr_byte_t priority;
+ apr_time_t time;
+ } dispatch;
+} apr_thread_pool_task_t;
+
+APR_RING_HEAD(apr_thread_pool_tasks, apr_thread_pool_task);
+
+struct apr_thread_list_elt
+{
+ APR_RING_ENTRY(apr_thread_list_elt) link;
+ apr_thread_t *thd;
+ void *current_owner;
+ enum { TH_RUN, TH_STOP, TH_PROBATION } state;
+ int signal_work_done;
+};
+
+APR_RING_HEAD(apr_thread_list, apr_thread_list_elt);
+
+struct apr_thread_pool
+{
+ apr_pool_t *pool;
+ volatile apr_size_t thd_max;
+ volatile apr_size_t idle_max;
+ volatile apr_interval_time_t idle_wait;
+ volatile apr_size_t thd_cnt;
+ volatile apr_size_t idle_cnt;
+ volatile apr_size_t busy_cnt;
+ volatile apr_size_t task_cnt;
+ volatile apr_size_t scheduled_task_cnt;
+ volatile apr_size_t threshold;
+ volatile apr_size_t tasks_run;
+ volatile apr_size_t tasks_high;
+ volatile apr_size_t thd_high;
+ volatile apr_size_t thd_timed_out;
+ struct apr_thread_pool_tasks *tasks;
+ struct apr_thread_pool_tasks *scheduled_tasks;
+ struct apr_thread_list *busy_thds;
+ struct apr_thread_list *idle_thds;
+ struct apr_thread_list *dead_thds;
+ apr_thread_cond_t *more_work;
+ apr_thread_cond_t *work_done;
+ apr_thread_cond_t *all_done;
+ apr_thread_mutex_t *lock;
+ volatile int terminated;
+ struct apr_thread_pool_tasks *recycled_tasks;
+ struct apr_thread_list *recycled_thds;
+ apr_thread_pool_task_t *task_idx[TASK_PRIORITY_SEGS];
+};
+
+static apr_status_t thread_pool_construct(apr_thread_pool_t **tp,
+ apr_size_t init_threads,
+ apr_size_t max_threads,
+ apr_pool_t *pool)
+{
+ apr_status_t rv;
+ apr_thread_pool_t *me;
+
+ me = *tp = apr_pcalloc(pool, sizeof(apr_thread_pool_t));
+ me->thd_max = max_threads;
+ me->idle_max = init_threads;
+ me->threshold = init_threads / 2;
+
+ /* This pool will be used by different threads. As we cannot ensure that
+ * our caller won't use the pool without acquiring the mutex, we must
+ * create a new sub pool.
+ */
+ rv = apr_pool_create(&me->pool, pool);
+ if (APR_SUCCESS != rv) {
+ return rv;
+ }
+ /* Create the mutex on the parent pool such that it's always alive from
+ * apr_thread_pool_{push,schedule,top}() callers.
+ */
+ rv = apr_thread_mutex_create(&me->lock, APR_THREAD_MUTEX_NESTED, pool);
+ if (APR_SUCCESS != rv) {
+ return rv;
+ }
+ rv = apr_thread_cond_create(&me->more_work, me->pool);
+ if (APR_SUCCESS != rv) {
+ apr_thread_mutex_destroy(me->lock);
+ return rv;
+ }
+ rv = apr_thread_cond_create(&me->work_done, me->pool);
+ if (APR_SUCCESS != rv) {
+ apr_thread_cond_destroy(me->more_work);
+ apr_thread_mutex_destroy(me->lock);
+ return rv;
+ }
+ rv = apr_thread_cond_create(&me->all_done, me->pool);
+ if (APR_SUCCESS != rv) {
+ apr_thread_cond_destroy(me->work_done);
+ apr_thread_cond_destroy(me->more_work);
+ apr_thread_mutex_destroy(me->lock);
+ return rv;
+ }
+ me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
+ if (!me->tasks) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->tasks, apr_thread_pool_task, link);
+ me->scheduled_tasks = apr_palloc(me->pool, sizeof(*me->scheduled_tasks));
+ if (!me->scheduled_tasks) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->scheduled_tasks, apr_thread_pool_task, link);
+ me->recycled_tasks = apr_palloc(me->pool, sizeof(*me->recycled_tasks));
+ if (!me->recycled_tasks) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->recycled_tasks, apr_thread_pool_task, link);
+ me->busy_thds = apr_palloc(me->pool, sizeof(*me->busy_thds));
+ if (!me->busy_thds) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->busy_thds, apr_thread_list_elt, link);
+ me->idle_thds = apr_palloc(me->pool, sizeof(*me->idle_thds));
+ if (!me->idle_thds) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
+ me->dead_thds = apr_palloc(me->pool, sizeof(*me->dead_thds));
+ if (!me->dead_thds) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->dead_thds, apr_thread_list_elt, link);
+ me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
+ if (!me->recycled_thds) {
+ goto CATCH_ENOMEM;
+ }
+ APR_RING_INIT(me->recycled_thds, apr_thread_list_elt, link);
+ goto FINAL_EXIT;
+ CATCH_ENOMEM:
+ rv = APR_ENOMEM;
+ apr_thread_cond_destroy(me->all_done);
+ apr_thread_cond_destroy(me->work_done);
+ apr_thread_cond_destroy(me->more_work);
+ apr_thread_mutex_destroy(me->lock);
+ FINAL_EXIT:
+ return rv;
+}
+
+/*
+ * NOTE: This function is not thread safe by itself. Caller should hold the lock
+ */
+static apr_thread_pool_task_t *pop_task(apr_thread_pool_t * me)
+{
+ apr_thread_pool_task_t *task = NULL;
+ int seg;
+
+ /* check for scheduled tasks */
+ if (me->scheduled_task_cnt > 0) {
+ task = APR_RING_FIRST(me->scheduled_tasks);
+ assert(task != NULL);
+ assert(task !=
+ APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
+ link));
+ /* if it's time */
+ if (task->dispatch.time <= apr_time_now()) {
+ --me->scheduled_task_cnt;
+ APR_RING_REMOVE(task, link);
+ return task;
+ }
+ }
+ /* check for normal tasks if we're not returning a scheduled task */
+ if (me->task_cnt == 0) {
+ return NULL;
+ }
+
+ task = APR_RING_FIRST(me->tasks);
+ assert(task != NULL);
+ assert(task != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link));
+ --me->task_cnt;
+ seg = TASK_PRIORITY_SEG(task);
+ if (task == me->task_idx[seg]) {
+ me->task_idx[seg] = APR_RING_NEXT(task, link);
+ if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
+ apr_thread_pool_task, link)
+ || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
+ me->task_idx[seg] = NULL;
+ }
+ }
+ APR_RING_REMOVE(task, link);
+ return task;
+}
+
+static apr_interval_time_t waiting_time(apr_thread_pool_t * me)
+{
+ apr_thread_pool_task_t *task = NULL;
+
+ task = APR_RING_FIRST(me->scheduled_tasks);
+ assert(task != NULL);
+ assert(task !=
+ APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
+ link));
+ return task->dispatch.time - apr_time_now();
+}
+
+/*
+ * NOTE: This function is not thread safe by itself. Caller should hold the lock
+ */
+static struct apr_thread_list_elt *elt_new(apr_thread_pool_t * me,
+ apr_thread_t * t)
+{
+ struct apr_thread_list_elt *elt;
+
+ if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
+ elt = apr_palloc(me->pool, sizeof(*elt));
+ if (NULL == elt) {
+ return NULL;
+ }
+ }
+ else {
+ elt = APR_RING_FIRST(me->recycled_thds);
+ APR_RING_REMOVE(elt, link);
+ }
+
+ APR_RING_ELEM_INIT(elt, link);
+ elt->thd = t;
+ elt->current_owner = NULL;
+ elt->signal_work_done = 0;
+ elt->state = TH_RUN;
+ return elt;
+}
+
+/*
+ * The worker thread function. Take a task from the queue and perform it if
+ * there is any. Otherwise, put itself into the idle thread list and waiting
+ * for signal to wake up.
+ * The thread terminates directly and exits when it is asked to stop, after
+ * handling its task if busy. The thread will then be in the dead_thds list
+ * and should be joined.
+ */
+static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param)
+{
+ apr_thread_pool_t *me = param;
+ apr_thread_pool_task_t *task = NULL;
+ apr_interval_time_t wait;
+ struct apr_thread_list_elt *elt;
+
+ apr_thread_mutex_lock(me->lock);
+
+ elt = elt_new(me, t);
+ if (!elt) {
+ apr_thread_mutex_unlock(me->lock);
+ apr_thread_exit(t, APR_ENOMEM);
+ }
+
+ for (;;) {
+ /* Test if not new element, it is awakened from idle */
+ if (APR_RING_NEXT(elt, link) != elt) {
+ --me->idle_cnt;
+ APR_RING_REMOVE(elt, link);
+ }
+
+ if (elt->state != TH_STOP) {
+ ++me->busy_cnt;
+ APR_RING_INSERT_TAIL(me->busy_thds, elt,
+ apr_thread_list_elt, link);
+ do {
+ task = pop_task(me);
+ if (!task) {
+ break;
+ }
+ ++me->tasks_run;
+ elt->current_owner = task->owner;
+ apr_thread_mutex_unlock(me->lock);
+
+ /* Run the task (or drop it if terminated already) */
+ if (!me->terminated) {
+ apr_thread_data_set(task, "apr_thread_pool_task", NULL, t);
+ task->func(t, task->param);
+ }
+
+ apr_thread_mutex_lock(me->lock);
+ APR_RING_INSERT_TAIL(me->recycled_tasks, task,
+ apr_thread_pool_task, link);
+ elt->current_owner = NULL;
+ if (elt->signal_work_done) {
+ elt->signal_work_done = 0;
+ apr_thread_cond_signal(me->work_done);
+ }
+ } while (elt->state != TH_STOP);
+ APR_RING_REMOVE(elt, link);
+ --me->busy_cnt;
+ }
+ assert(NULL == elt->current_owner);
+
+ /* thread should die? */
+ if (me->terminated
+ || elt->state != TH_RUN
+ || (me->idle_cnt >= me->idle_max
+ && (me->idle_max || !me->scheduled_task_cnt)
+ && !me->idle_wait)) {
+ if ((TH_PROBATION == elt->state) && me->idle_wait)
+ ++me->thd_timed_out;
+ break;
+ }
+
+ /* busy thread become idle */
+ ++me->idle_cnt;
+ APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);
+
+ /*
+ * If there is a scheduled task, always scheduled to perform that task.
+ * Since there is no guarantee that current idle threads are scheduled
+ * for next scheduled task.
+ */
+ if (me->scheduled_task_cnt)
+ wait = waiting_time(me);
+ else if (me->idle_cnt > me->idle_max) {
+ wait = me->idle_wait;
+ elt->state = TH_PROBATION;
+ }
+ else
+ wait = -1;
+
+ if (wait >= 0) {
+ apr_thread_cond_timedwait(me->more_work, me->lock, wait);
+ }
+ else {
+ apr_thread_cond_wait(me->more_work, me->lock);
+ }
+ }
+
+ /* Dead thread, to be joined */
+ APR_RING_INSERT_TAIL(me->dead_thds, elt, apr_thread_list_elt, link);
+ if (--me->thd_cnt == 0 && me->terminated) {
+ apr_thread_cond_signal(me->all_done);
+ }
+ apr_thread_mutex_unlock(me->lock);
+
+ apr_thread_exit(t, APR_SUCCESS);
+ return NULL; /* should not be here, safe net */
+}
+
+/* Must be locked by the caller */
+static void join_dead_threads(apr_thread_pool_t *me)
+{
+ while (!APR_RING_EMPTY(me->dead_thds, apr_thread_list_elt, link)) {
+ struct apr_thread_list_elt *elt;
+ apr_status_t status;
+
+ elt = APR_RING_FIRST(me->dead_thds);
+ APR_RING_REMOVE(elt, link);
+ apr_thread_mutex_unlock(me->lock);
+
+ apr_thread_join(&status, elt->thd);
+
+ apr_thread_mutex_lock(me->lock);
+ APR_RING_INSERT_TAIL(me->recycled_thds, elt,
+ apr_thread_list_elt, link);
+ }
+}
+
+static apr_status_t thread_pool_cleanup(void *me)
+{
+ apr_thread_pool_t *_myself = me;
+
+ _myself->terminated = 1;
+ apr_thread_pool_tasks_cancel(_myself, NULL);
+ apr_thread_pool_thread_max_set(_myself, 0);
+ apr_thread_mutex_lock(_myself->lock);
+
+ if (_myself->thd_cnt) {
+ apr_thread_cond_wait(_myself->all_done, _myself->lock);
+ }
+
+ /* All threads should be dead now, join them */
+ join_dead_threads(_myself);
+
+ apr_thread_mutex_unlock(_myself->lock);
+
+ return APR_SUCCESS;
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_create(apr_thread_pool_t ** me,
+ apr_size_t init_threads,
+ apr_size_t max_threads,
+ apr_pool_t * pool)
+{
+ apr_thread_t *t;
+ apr_status_t rv = APR_SUCCESS;
+ apr_thread_pool_t *tp;
+
+ *me = NULL;
+
+ rv = thread_pool_construct(&tp, init_threads, max_threads, pool);
+ if (APR_SUCCESS != rv)
+ return rv;
+ apr_pool_pre_cleanup_register(tp->pool, tp, thread_pool_cleanup);
+
+ /* Grab the mutex as apr_thread_create() and thread_pool_func() will
+ * allocate from (*me)->pool. This is dangerous if there are multiple
+ * initial threads to create.
+ */
+ apr_thread_mutex_lock(tp->lock);
+ while (init_threads--) {
+ rv = apr_thread_create(&t, NULL, thread_pool_func, tp, tp->pool);
+ if (APR_SUCCESS != rv) {
+ break;
+ }
+ tp->thd_cnt++;
+ if (tp->thd_cnt > tp->thd_high) {
+ tp->thd_high = tp->thd_cnt;
+ }
+ }
+ apr_thread_mutex_unlock(tp->lock);
+
+ if (rv == APR_SUCCESS) {
+ *me = tp;
+ }
+
+ return rv;
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_destroy(apr_thread_pool_t * me)
+{
+ /* Stop the threads before destroying me->pool: with APR <= 1.7 the
+ * threads' pools are children of me->pool and APR_POOL_DEBUG would
+ * deadlock if thread_pool_cleanup() is called while me->pool is
+ * destroyed (because of parent locking).
+ * With APR > 1.7 the threads' pools are unmanaged so there is no
+ * such issue, yet it does not hurt to stop the threads before.
+ */
+ apr_pool_cleanup_run(me->pool, me, thread_pool_cleanup);
+ apr_pool_destroy(me->pool);
+ return APR_SUCCESS;
+}
+
+/*
+ * NOTE: This function is not thread safe by itself. Caller should hold the lock
+ */
+static apr_thread_pool_task_t *task_new(apr_thread_pool_t * me,
+ apr_thread_start_t func,
+ void *param, apr_byte_t priority,
+ void *owner, apr_time_t time)
+{
+ apr_thread_pool_task_t *t;
+
+ if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
+ t = apr_palloc(me->pool, sizeof(*t));
+ if (NULL == t) {
+ return NULL;
+ }
+ }
+ else {
+ t = APR_RING_FIRST(me->recycled_tasks);
+ APR_RING_REMOVE(t, link);
+ }
+ APR_RING_ELEM_INIT(t, link);
+
+ t->func = func;
+ t->param = param;
+ t->owner = owner;
+ if (time > 0) {
+ t->dispatch.time = apr_time_now() + time;
+ }
+ else {
+ t->dispatch.priority = priority;
+ }
+ return t;
+}
+
+/*
+ * Test it the task is the only one within the priority segment.
+ * If it is not, return the first element with same or lower priority.
+ * Otherwise, add the task into the queue and return NULL.
+ *
+ * NOTE: This function is not thread safe by itself. Caller should hold the lock
+ */
+static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me,
+ apr_thread_pool_task_t * const t)
+{
+ int seg;
+ int next;
+ apr_thread_pool_task_t *t_next;
+
+ seg = TASK_PRIORITY_SEG(t);
+ if (me->task_idx[seg]) {
+ assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
+ me->task_idx[seg]);
+ t_next = me->task_idx[seg];
+ while (t_next->dispatch.priority > t->dispatch.priority) {
+ t_next = APR_RING_NEXT(t_next, link);
+ if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) ==
+ t_next) {
+ return t_next;
+ }
+ }
+ return t_next;
+ }
+
+ for (next = seg - 1; next >= 0; next--) {
+ if (me->task_idx[next]) {
+ APR_RING_INSERT_BEFORE(me->task_idx[next], t, link);
+ break;
+ }
+ }
+ if (0 > next) {
+ APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link);
+ }
+ me->task_idx[seg] = t;
+ return NULL;
+}
+
+/*
+* schedule a task to run in "time" microseconds. Find the spot in the ring where
+* the time fits. Adjust the short_time so the thread wakes up when the time is reached.
+*/
+static apr_status_t schedule_task(apr_thread_pool_t *me,
+ apr_thread_start_t func, void *param,
+ void *owner, apr_interval_time_t time)
+{
+ apr_thread_pool_task_t *t;
+ apr_thread_pool_task_t *t_loc;
+ apr_thread_t *thd;
+ apr_status_t rv = APR_SUCCESS;
+
+ apr_thread_mutex_lock(me->lock);
+
+ if (me->terminated) {
+ /* Let the caller know that we are done */
+ apr_thread_mutex_unlock(me->lock);
+ return APR_NOTFOUND;
+ }
+
+ /* Maintain dead threads */
+ join_dead_threads(me);
+
+ t = task_new(me, func, param, 0, owner, time);
+ if (NULL == t) {
+ apr_thread_mutex_unlock(me->lock);
+ return APR_ENOMEM;
+ }
+ t_loc = APR_RING_FIRST(me->scheduled_tasks);
+ while (NULL != t_loc) {
+ /* if the time is less than the entry insert ahead of it */
+ if (t->dispatch.time < t_loc->dispatch.time) {
+ ++me->scheduled_task_cnt;
+ APR_RING_INSERT_BEFORE(t_loc, t, link);
+ break;
+ }
+ else {
+ t_loc = APR_RING_NEXT(t_loc, link);
+ if (t_loc ==
+ APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
+ link)) {
+ ++me->scheduled_task_cnt;
+ APR_RING_INSERT_TAIL(me->scheduled_tasks, t,
+ apr_thread_pool_task, link);
+ break;
+ }
+ }
+ }
+ /* there should be at least one thread for scheduled tasks */
+ if (0 == me->thd_cnt) {
+ rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
+ if (APR_SUCCESS == rv) {
+ ++me->thd_cnt;
+ if (me->thd_cnt > me->thd_high)
+ me->thd_high = me->thd_cnt;
+ }
+ }
+ apr_thread_cond_signal(me->more_work);
+ apr_thread_mutex_unlock(me->lock);
+
+ return rv;
+}
+
+static apr_status_t add_task(apr_thread_pool_t *me, apr_thread_start_t func,
+ void *param, apr_byte_t priority, int push,
+ void *owner)
+{
+ apr_thread_pool_task_t *t;
+ apr_thread_pool_task_t *t_loc;
+ apr_thread_t *thd;
+ apr_status_t rv = APR_SUCCESS;
+
+ apr_thread_mutex_lock(me->lock);
+
+ if (me->terminated) {
+ /* Let the caller know that we are done */
+ apr_thread_mutex_unlock(me->lock);
+ return APR_NOTFOUND;
+ }
+
+ /* Maintain dead threads */
+ join_dead_threads(me);
+
+ t = task_new(me, func, param, priority, owner, 0);
+ if (NULL == t) {
+ apr_thread_mutex_unlock(me->lock);
+ return APR_ENOMEM;
+ }
+
+ t_loc = add_if_empty(me, t);
+ if (NULL == t_loc) {
+ goto FINAL_EXIT;
+ }
+
+ if (push) {
+ while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
+ t_loc && t_loc->dispatch.priority >= t->dispatch.priority) {
+ t_loc = APR_RING_NEXT(t_loc, link);
+ }
+ }
+ APR_RING_INSERT_BEFORE(t_loc, t, link);
+ if (!push) {
+ if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) {
+ me->task_idx[TASK_PRIORITY_SEG(t)] = t;
+ }
+ }
+
+ FINAL_EXIT:
+ me->task_cnt++;
+ if (me->task_cnt > me->tasks_high)
+ me->tasks_high = me->task_cnt;
+ if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max &&
+ me->task_cnt > me->threshold)) {
+ rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
+ if (APR_SUCCESS == rv) {
+ ++me->thd_cnt;
+ if (me->thd_cnt > me->thd_high)
+ me->thd_high = me->thd_cnt;
+ }
+ }
+
+ apr_thread_cond_signal(me->more_work);
+ apr_thread_mutex_unlock(me->lock);
+
+ return rv;
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_push(apr_thread_pool_t *me,
+ apr_thread_start_t func,
+ void *param,
+ apr_byte_t priority,
+ void *owner)
+{
+ return add_task(me, func, param, priority, 1, owner);
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_schedule(apr_thread_pool_t *me,
+ apr_thread_start_t func,
+ void *param,
+ apr_interval_time_t time,
+ void *owner)
+{
+ return schedule_task(me, func, param, owner, time);
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_top(apr_thread_pool_t *me,
+ apr_thread_start_t func,
+ void *param,
+ apr_byte_t priority,
+ void *owner)
+{
+ return add_task(me, func, param, priority, 0, owner);
+}
+
+static apr_status_t remove_scheduled_tasks(apr_thread_pool_t *me,
+ void *owner)
+{
+ apr_thread_pool_task_t *t_loc;
+ apr_thread_pool_task_t *next;
+
+ t_loc = APR_RING_FIRST(me->scheduled_tasks);
+ while (t_loc !=
+ APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
+ link)) {
+ next = APR_RING_NEXT(t_loc, link);
+ /* if this is the owner remove it */
+ if (!owner || t_loc->owner == owner) {
+ --me->scheduled_task_cnt;
+ APR_RING_REMOVE(t_loc, link);
+ }
+ t_loc = next;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner)
+{
+ apr_thread_pool_task_t *t_loc;
+ apr_thread_pool_task_t *next;
+ int seg;
+
+ t_loc = APR_RING_FIRST(me->tasks);
+ while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
+ next = APR_RING_NEXT(t_loc, link);
+ if (!owner || t_loc->owner == owner) {
+ --me->task_cnt;
+ seg = TASK_PRIORITY_SEG(t_loc);
+ if (t_loc == me->task_idx[seg]) {
+ me->task_idx[seg] = APR_RING_NEXT(t_loc, link);
+ if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
+ apr_thread_pool_task,
+ link)
+ || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
+ me->task_idx[seg] = NULL;
+ }
+ }
+ APR_RING_REMOVE(t_loc, link);
+ }
+ t_loc = next;
+ }
+ return APR_SUCCESS;
+}
+
+/* Must be locked by the caller */
+static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner)
+{
+#ifndef NDEBUG
+ apr_os_thread_t *os_thread;
+#endif
+ struct apr_thread_list_elt *elt;
+
+ elt = APR_RING_FIRST(me->busy_thds);
+ while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
+ if (owner ? owner != elt->current_owner : !elt->current_owner) {
+ elt = APR_RING_NEXT(elt, link);
+ continue;
+ }
+
+#ifndef NDEBUG
+ /* make sure the thread is not the one calling tasks_cancel */
+ apr_os_thread_get(&os_thread, elt->thd);
+#ifdef WIN32
+ /* hack for apr win32 bug */
+ assert(!apr_os_thread_equal(apr_os_thread_current(), os_thread));
+#else
+ assert(!apr_os_thread_equal(apr_os_thread_current(), *os_thread));
+#endif
+#endif
+
+ elt->signal_work_done = 1;
+ apr_thread_cond_wait(me->work_done, me->lock);
+
+ /* Restart */
+ elt = APR_RING_FIRST(me->busy_thds);
+ }
+
+ /* Maintain dead threads */
+ join_dead_threads(me);
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_tasks_cancel(apr_thread_pool_t *me,
+ void *owner)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ apr_thread_mutex_lock(me->lock);
+
+ if (me->task_cnt > 0) {
+ rv = remove_tasks(me, owner);
+ }
+ if (me->scheduled_task_cnt > 0) {
+ rv = remove_scheduled_tasks(me, owner);
+ }
+
+ wait_on_busy_threads(me, owner);
+
+ apr_thread_mutex_unlock(me->lock);
+
+ return rv;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_tasks_count(apr_thread_pool_t *me)
+{
+ return me->task_cnt;
+}
+
+APU_DECLARE(apr_size_t)
+ apr_thread_pool_scheduled_tasks_count(apr_thread_pool_t *me)
+{
+ return me->scheduled_task_cnt;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_threads_count(apr_thread_pool_t *me)
+{
+ return me->thd_cnt;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_busy_count(apr_thread_pool_t *me)
+{
+ return me->busy_cnt;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_idle_count(apr_thread_pool_t *me)
+{
+ return me->idle_cnt;
+}
+
+APU_DECLARE(apr_size_t)
+ apr_thread_pool_tasks_run_count(apr_thread_pool_t * me)
+{
+ return me->tasks_run;
+}
+
+APU_DECLARE(apr_size_t)
+ apr_thread_pool_tasks_high_count(apr_thread_pool_t * me)
+{
+ return me->tasks_high;
+}
+
+APU_DECLARE(apr_size_t)
+ apr_thread_pool_threads_high_count(apr_thread_pool_t * me)
+{
+ return me->thd_high;
+}
+
+APU_DECLARE(apr_size_t)
+ apr_thread_pool_threads_idle_timeout_count(apr_thread_pool_t * me)
+{
+ return me->thd_timed_out;
+}
+
+
+APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_get(apr_thread_pool_t *me)
+{
+ return me->idle_max;
+}
+
+APU_DECLARE(apr_interval_time_t)
+ apr_thread_pool_idle_wait_get(apr_thread_pool_t * me)
+{
+ return me->idle_wait;
+}
+
+/*
+ * Stop threads above given *cnt, set the number of threads stopped in *cnt.
+ * NOTE: There could be busy threads become idle during this function
+ */
+static void stop_threads(apr_thread_pool_t *me, apr_size_t *cnt, int idle)
+{
+ struct apr_thread_list *thds;
+ struct apr_thread_list_elt *elt, *last;
+ apr_size_t n, i;
+
+ apr_thread_mutex_lock(me->lock);
+
+ if (idle) {
+ thds = me->idle_thds;
+ n = me->idle_cnt;
+ }
+ else {
+ thds = me->busy_thds;
+ n = me->busy_cnt;
+ }
+ if (n <= *cnt) {
+ apr_thread_mutex_unlock(me->lock);
+ *cnt = 0;
+ return;
+ }
+
+ elt = APR_RING_FIRST(thds);
+ last = APR_RING_LAST(thds);
+ for (i = 0; i < *cnt; ++i) {
+ elt = APR_RING_NEXT(elt, link);
+ }
+ for (; i < n; ++i) {
+ elt->state = TH_STOP;
+ if (elt == last) {
+ break;
+ }
+ elt = APR_RING_NEXT(elt, link);
+ }
+ assert(i + 1 == n);
+ *cnt -= n;
+
+ join_dead_threads(me);
+
+ apr_thread_mutex_unlock(me->lock);
+}
+
+static apr_size_t stop_idle_threads(apr_thread_pool_t *me, apr_size_t cnt)
+{
+ stop_threads(me, &cnt, 1);
+ if (cnt) {
+ apr_thread_mutex_lock(me->lock);
+ apr_thread_cond_broadcast(me->more_work);
+ apr_thread_mutex_unlock(me->lock);
+ }
+ return cnt;
+}
+
+static apr_size_t stop_busy_threads(apr_thread_pool_t *me, apr_size_t cnt)
+{
+ stop_threads(me, &cnt, 0);
+ return cnt;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_set(apr_thread_pool_t *me,
+ apr_size_t cnt)
+{
+ me->idle_max = cnt;
+ return stop_idle_threads(me, cnt);
+}
+
+APU_DECLARE(apr_interval_time_t)
+ apr_thread_pool_idle_wait_set(apr_thread_pool_t * me,
+ apr_interval_time_t timeout)
+{
+ apr_interval_time_t oldtime;
+
+ oldtime = me->idle_wait;
+ me->idle_wait = timeout;
+
+ return oldtime;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_get(apr_thread_pool_t *me)
+{
+ return me->thd_max;
+}
+
+/*
+ * This function stop extra working threads to the new limit.
+ * NOTE: There could be busy threads become idle during this function
+ */
+APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_set(apr_thread_pool_t *me,
+ apr_size_t cnt)
+{
+ apr_size_t n, i;
+
+ me->thd_max = cnt;
+ n = me->thd_cnt;
+ if (n <= cnt) {
+ return 0;
+ }
+ n -= cnt; /* #threads to stop */
+
+ i = me->idle_cnt;
+ if (n >= i) {
+ stop_busy_threads(me, n - i);
+ n = i; /* stop all idle threads */
+ }
+ stop_idle_threads(me, i - n);
+
+ return n;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_threshold_get(apr_thread_pool_t *me)
+{
+ return me->threshold;
+}
+
+APU_DECLARE(apr_size_t) apr_thread_pool_threshold_set(apr_thread_pool_t *me,
+ apr_size_t val)
+{
+ apr_size_t ov;
+
+ ov = me->threshold;
+ me->threshold = val;
+ return ov;
+}
+
+APU_DECLARE(apr_status_t) apr_thread_pool_task_owner_get(apr_thread_t *thd,
+ void **owner)
+{
+ apr_status_t rv;
+ apr_thread_pool_task_t *task;
+ void *data;
+
+ rv = apr_thread_data_get(&data, "apr_thread_pool_task", thd);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ task = data;
+ if (!task) {
+ *owner = NULL;
+ return APR_BADARG;
+ }
+
+ *owner = task->owner;
+ return APR_SUCCESS;
+}
+
+#endif /* APR_HAS_THREADS */
+
+/* vim: set ts=4 sw=4 et cin tw=80: */
diff --git a/misc/apu_dso.c b/misc/apu_dso.c
new file mode 100644
index 0000000..9d7f206
--- /dev/null
+++ b/misc/apu_dso.c
@@ -0,0 +1,209 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+
+#include "apu_config.h"
+#include "apu.h"
+
+#include "apr_pools.h"
+#include "apr_tables.h"
+#include "apr_dso.h"
+#include "apr_strings.h"
+#include "apr_hash.h"
+#include "apr_file_io.h"
+#include "apr_env.h"
+#include "apr_atomic.h"
+
+#include "apu_internal.h"
+#include "apu_version.h"
+
+#if APU_DSO_BUILD
+
+#if APR_HAS_THREADS
+static apr_thread_mutex_t* mutex = NULL;
+#endif
+static apr_hash_t *dsos = NULL;
+static apr_uint32_t initialised = 0, in_init = 1;
+
+#if APR_HAS_THREADS
+apr_status_t apu_dso_mutex_lock()
+{
+ return apr_thread_mutex_lock(mutex);
+}
+apr_status_t apu_dso_mutex_unlock()
+{
+ return apr_thread_mutex_unlock(mutex);
+}
+#else
+apr_status_t apu_dso_mutex_lock() {
+ return APR_SUCCESS;
+}
+apr_status_t apu_dso_mutex_unlock() {
+ return APR_SUCCESS;
+}
+#endif
+
+static apr_status_t apu_dso_term(void *ptr)
+{
+ /* set statics to NULL so init can work again */
+ dsos = NULL;
+#if APR_HAS_THREADS
+ mutex = NULL;
+#endif
+
+ /* Everything else we need is handled by cleanups registered
+ * when we created mutexes and loaded DSOs
+ */
+ return APR_SUCCESS;
+}
+
+apr_status_t apu_dso_init(apr_pool_t *pool)
+{
+ apr_status_t ret = APR_SUCCESS;
+ apr_pool_t *parent;
+
+ if (apr_atomic_inc32(&initialised)) {
+ apr_atomic_set32(&initialised, 1); /* prevent wrap-around */
+
+ while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
+ ;
+
+ return APR_SUCCESS;
+ }
+
+ /* Top level pool scope, need process-scope lifetime */
+ for (parent = apr_pool_parent_get(pool);
+ parent && parent != pool;
+ parent = apr_pool_parent_get(pool))
+ pool = parent;
+
+ dsos = apr_hash_make(pool);
+
+#if APR_HAS_THREADS
+ ret = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
+ /* This already registers a pool cleanup */
+#endif
+
+ apr_pool_cleanup_register(pool, NULL, apu_dso_term,
+ apr_pool_cleanup_null);
+
+ apr_atomic_dec32(&in_init);
+
+ return ret;
+}
+
+apr_status_t apu_dso_load(apr_dso_handle_t **dlhandleptr,
+ apr_dso_handle_sym_t *dsoptr,
+ const char *module,
+ const char *modsym,
+ apr_pool_t *pool)
+{
+ apr_dso_handle_t *dlhandle = NULL;
+ char *pathlist;
+ char path[APR_PATH_MAX + 1];
+ apr_array_header_t *paths;
+ apr_pool_t *global;
+ apr_status_t rv = APR_EDSOOPEN;
+ char *eos = NULL;
+ int i;
+
+ *dsoptr = apr_hash_get(dsos, module, APR_HASH_KEY_STRING);
+ if (*dsoptr) {
+ return APR_EINIT;
+ }
+
+ /* The driver DSO must have exactly the same lifetime as the
+ * drivers hash table; ignore the passed-in pool */
+ global = apr_hash_pool_get(dsos);
+
+ /* Retrieve our path search list or prepare for a single search */
+ if ((apr_env_get(&pathlist, APR_DSOPATH, pool) != APR_SUCCESS)
+ || (apr_filepath_list_split(&paths, pathlist, pool) != APR_SUCCESS))
+ paths = apr_array_make(pool, 1, sizeof(char*));
+
+#if defined(APU_DSO_LIBDIR)
+ /* Always search our prefix path, but on some platforms such as
+ * win32 this may be left undefined
+ */
+ (*((char **)apr_array_push(paths))) = APU_DSO_LIBDIR;
+#endif
+
+ for (i = 0; i < paths->nelts; ++i)
+ {
+#if defined(WIN32)
+ /* Use win32 dso search semantics and attempt to
+ * load the relative lib on the first pass.
+ */
+ if (!eos) {
+ eos = path;
+ --i;
+ }
+ else
+#endif
+ {
+ eos = apr_cpystrn(path, ((char**)paths->elts)[i], sizeof(path));
+ if ((eos > path) && (eos - path < sizeof(path) - 1))
+ *(eos++) = '/';
+ }
+ apr_cpystrn(eos, module, sizeof(path) - (eos - path));
+
+ rv = apr_dso_load(&dlhandle, path, global);
+ if (dlhandleptr) {
+ *dlhandleptr = dlhandle;
+ }
+ if (rv == APR_SUCCESS) { /* APR_EDSOOPEN */
+ break;
+ }
+#if defined(APU_DSO_LIBDIR)
+ else if (i < paths->nelts - 1) {
+#else
+ else { /* No APU_DSO_LIBDIR to skip */
+#endif
+ /* try with apr-util-APU_MAJOR_VERSION appended */
+ eos = apr_cpystrn(eos,
+ "apr-util-" APU_STRINGIFY(APU_MAJOR_VERSION) "/",
+ sizeof(path) - (eos - path));
+
+ apr_cpystrn(eos, module, sizeof(path) - (eos - path));
+
+ rv = apr_dso_load(&dlhandle, path, global);
+ if (dlhandleptr) {
+ *dlhandleptr = dlhandle;
+ }
+ if (rv == APR_SUCCESS) { /* APR_EDSOOPEN */
+ break;
+ }
+ }
+ }
+
+ if (rv != APR_SUCCESS) /* APR_ESYMNOTFOUND */
+ return rv;
+
+ rv = apr_dso_sym(dsoptr, dlhandle, modsym);
+ if (rv != APR_SUCCESS) { /* APR_ESYMNOTFOUND */
+ apr_dso_unload(dlhandle);
+ }
+ else {
+ module = apr_pstrdup(global, module);
+ apr_hash_set(dsos, module, APR_HASH_KEY_STRING, *dsoptr);
+ }
+ return rv;
+}
+
+#endif /* APU_DSO_BUILD */
+
diff --git a/misc/apu_version.c b/misc/apu_version.c
new file mode 100644
index 0000000..97e7309
--- /dev/null
+++ b/misc/apu_version.c
@@ -0,0 +1,37 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_general.h" /* for APR_STRINGIFY */
+
+#include "apu.h"
+#include "apu_version.h"
+
+APU_DECLARE(void) apu_version(apr_version_t *pvsn)
+{
+ pvsn->major = APU_MAJOR_VERSION;
+ pvsn->minor = APU_MINOR_VERSION;
+ pvsn->patch = APU_PATCH_VERSION;
+#ifdef APU_IS_DEV_VERSION
+ pvsn->is_dev = 1;
+#else
+ pvsn->is_dev = 0;
+#endif
+}
+
+APU_DECLARE(const char *) apu_version_string(void)
+{
+ return APU_VERSION_STRING;
+}