summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/.gitignore8
-rwxr-xr-xscripts/Ssha2Passwd144
-rw-r--r--scripts/all.mk22
-rw-r--r--scripts/bbedit/unlanglaguage.plist116
-rw-r--r--scripts/bin/README.md9
-rwxr-xr-xscripts/bin/gdb5
-rwxr-xr-xscripts/bin/lldb5
l---------scripts/bin/radclient1
-rwxr-xr-xscripts/bin/radict5
-rwxr-xr-xscripts/bin/radiusd5
l---------scripts/bin/radmin1
l---------scripts/bin/radsniff1
l---------scripts/bin/unit_test_module1
-rw-r--r--scripts/boiler.mk674
-rw-r--r--scripts/ci/Dockerfile10
-rw-r--r--scripts/ci/Jenkinsfile66
-rwxr-xr-xscripts/ci/eapol_test-build.sh121
-rw-r--r--scripts/ci/eapol_test/.gitignore1
-rw-r--r--scripts/ci/eapol_test/config_freebsd520
-rw-r--r--scripts/ci/eapol_test/config_linux520
-rw-r--r--scripts/ci/eapol_test/config_osx515
-rw-r--r--scripts/ci/haproxy.conf16
-rwxr-xr-xscripts/ci/ldap-setup.sh51
-rw-r--r--scripts/ci/ldap/slapd.conf51
-rw-r--r--scripts/ci/ldap/slapd2.conf61
-rwxr-xr-xscripts/ci/ldap2-setup.sh63
-rwxr-xr-xscripts/ci/mysql-setup.sh19
-rwxr-xr-xscripts/ci/openresty-setup.sh144
-rw-r--r--scripts/ci/openresty/.htpasswd1
-rw-r--r--scripts/ci/openresty/auth-api.lua19
-rw-r--r--scripts/ci/openresty/delay-api.lua6
-rw-r--r--scripts/ci/openresty/json-api.lua145
-rw-r--r--scripts/ci/openresty/post-api.lua19
-rw-r--r--scripts/ci/openresty/test.txt1
-rw-r--r--scripts/ci/package-test.mk41
-rwxr-xr-xscripts/ci/postgresql-setup.sh26
-rw-r--r--scripts/ci/radsecproxy.conf33
-rw-r--r--scripts/ci/start.sh37
-rw-r--r--scripts/ci/stunnel.conf16
-rwxr-xr-xscripts/clients.pl68
-rw-r--r--scripts/collectd/radsniff_types.db10
-rwxr-xr-xscripts/create-users.pl64
-rw-r--r--scripts/cron/radiusd.cron.daily.in34
-rw-r--r--scripts/cron/radiusd.cron.monthly.in19
-rw-r--r--scripts/crossbuild/README.md122
-rw-r--r--scripts/crossbuild/build/.gitignore4
-rw-r--r--scripts/crossbuild/crossbuild.mk236
-rw-r--r--scripts/crossbuild/docker/centos7/Dockerfile92
-rw-r--r--scripts/crossbuild/docker/centos8/Dockerfile81
-rw-r--r--scripts/crossbuild/docker/debian10/Dockerfile89
-rw-r--r--scripts/crossbuild/docker/debian8/Dockerfile84
-rw-r--r--scripts/crossbuild/docker/debian9/Dockerfile84
-rw-r--r--scripts/crossbuild/docker/debian9/README15
-rw-r--r--scripts/crossbuild/docker/ubuntu16/Dockerfile86
-rw-r--r--scripts/crossbuild/docker/ubuntu18/Dockerfile65
-rw-r--r--scripts/crossbuild/docker/ubuntu20/Dockerfile65
-rwxr-xr-xscripts/cryptpasswd.in83
-rwxr-xr-xscripts/dhcp/isc2ippool.pl189
-rwxr-xr-xscripts/dhcp/rlm_iscfixed2ippool422
-rwxr-xr-xscripts/dict_alias.sh22
-rw-r--r--scripts/docker/README.md200
-rw-r--r--scripts/docker/alpine/Dockerfile83
-rwxr-xr-xscripts/docker/alpine/docker-entrypoint.sh27
-rw-r--r--scripts/docker/centos7/Dockerfile96
-rwxr-xr-xscripts/docker/centos7/docker-entrypoint.sh24
-rw-r--r--scripts/docker/debian10/Dockerfile59
-rwxr-xr-xscripts/docker/debian10/docker-entrypoint.sh24
-rw-r--r--scripts/docker/debian11/Dockerfile64
-rwxr-xr-xscripts/docker/debian11/docker-entrypoint.sh24
-rw-r--r--scripts/docker/debian9/Dockerfile59
-rwxr-xr-xscripts/docker/debian9/docker-entrypoint.sh24
-rw-r--r--scripts/docker/debiansid/Dockerfile64
-rwxr-xr-xscripts/docker/debiansid/docker-entrypoint.sh24
-rw-r--r--scripts/docker/docker.mk86
-rw-r--r--scripts/docker/rocky8/Dockerfile108
-rwxr-xr-xscripts/docker/rocky8/docker-entrypoint.sh24
-rw-r--r--scripts/docker/ubuntu18/Dockerfile59
-rwxr-xr-xscripts/docker/ubuntu18/docker-entrypoint.sh24
-rw-r--r--scripts/docker/ubuntu20/Dockerfile61
-rwxr-xr-xscripts/docker/ubuntu20/docker-entrypoint.sh24
-rw-r--r--scripts/docker/ubuntu22/Dockerfile66
-rwxr-xr-xscripts/docker/ubuntu22/docker-entrypoint.sh24
-rwxr-xr-xscripts/exec-program-wait62
-rwxr-xr-xscripts/git/post-receive151
-rw-r--r--scripts/install.mk250
-rw-r--r--scripts/jlibtool.c2608
-rwxr-xr-xscripts/ldap/radiusd2ldif.pl307
-rw-r--r--scripts/ldap/schema_to_samba.py132
-rw-r--r--scripts/libtool.mk243
-rw-r--r--scripts/logrotate/freeradius59
-rwxr-xr-xscripts/min-includes.pl238
-rw-r--r--scripts/monit/freeradius.monitrc18
-rwxr-xr-xscripts/munin/freeradius_acct88
-rwxr-xr-xscripts/munin/freeradius_auth103
-rwxr-xr-xscripts/munin/freeradius_proxy_acct88
-rwxr-xr-xscripts/munin/freeradius_proxy_auth103
-rwxr-xr-xscripts/munin/radsniff246
-rw-r--r--scripts/osx/README2
-rw-r--r--scripts/osx/org.freeradius.radiusd.plist21
-rwxr-xr-xscripts/raddebug140
-rw-r--r--scripts/radiusd.sh17
-rwxr-xr-xscripts/radtee563
-rwxr-xr-xscripts/raduat366
-rwxr-xr-xscripts/rc.radiusd.in100
-rw-r--r--scripts/snmp-proxy/README32
-rw-r--r--scripts/snmp-proxy/dictionary.hacked132
-rw-r--r--scripts/snmp-proxy/freeradius-snmp.pl585
-rw-r--r--scripts/snmp-proxy/net-radius-freeradius-dictionary.diff81
-rw-r--r--scripts/solaris/.gitignore1
-rw-r--r--scripts/solaris/README.md58
-rw-r--r--scripts/solaris/radius.xml68
-rwxr-xr-xscripts/solaris/svc-radius.in99
-rwxr-xr-xscripts/sql/align_sql_pools.pl311
-rwxr-xr-xscripts/sql/generate_pool_addresses.pl456
-rwxr-xr-xscripts/sql/radsqlrelay342
-rwxr-xr-xscripts/sql/rlm_sqlippool_tool961
-rw-r--r--scripts/sql/users2mysql.pl157
117 files changed, 15869 insertions, 0 deletions
diff --git a/scripts/.gitignore b/scripts/.gitignore
new file mode 100644
index 0000000..3c72551
--- /dev/null
+++ b/scripts/.gitignore
@@ -0,0 +1,8 @@
+check-radiusd-config
+cryptpasswd
+radiusd.cron.daily
+radiusd.cron.monthly
+radwatch
+rc.radiusd
+radcrypt
+jlibtool
diff --git a/scripts/Ssha2Passwd b/scripts/Ssha2Passwd
new file mode 100755
index 0000000..65c23a4
--- /dev/null
+++ b/scripts/Ssha2Passwd
@@ -0,0 +1,144 @@
+#!/usr/bin/perl -w
+use strict;
+use Getopt::Long qw(:config no_ignore_case);
+use Pod::Usage;
+use Digest::SHA;
+use MIME::Base64;
+use Math::Random::Secure qw(irand);
+
+my %opts;
+GetOptions(\%opts,
+ qw[ help|?! man! f|format=s l|len=i s|salt=s S|Salt=s z|saltlen:i ]
+) or pod2usage(2);
+pod2usage(1) if $opts{help};
+pod2usage(-verbose => 2) if $opts{man};
+
+my $len = 256;
+if (exists $opts{l}) {
+ my @length = (224, 256, 384, 512);
+ if (grep {$_ eq $opts{l}} @length) {
+ $len = $opts{l};
+ } else {
+ print "Bad length\n";
+ exit(1);
+ }
+}
+
+sub fmt_base64 {
+ return encode_base64(shift, '')."\n";
+}
+
+sub fmt_hex {
+ return unpack('H*', shift)."\n";
+}
+
+sub fmt_bin {
+ return shift;
+}
+
+my $fmt = \&{'fmt_base64'};
+if (exists $opts{f}) {
+ my %format = ('m' => \&{'fmt_base64'}, 'base64' => \&{'fmt_base64'},
+ 'x' => \&{'fmt_hex'}, 'hex' => \&{'fmt_hex'},
+ 'b' => \&{'fmt_bin'}, 'bin' => \&{'fmt_bin'});
+ $fmt = $format{$opts{f}};
+ if (!defined $fmt) {
+ print "Bad format\n";
+ exit(1);
+ }
+}
+
+my $password = $ARGV[0];
+if (!defined $password) {
+ print "Missing password\n";
+ exit(1);
+}
+
+my $salt = $opts{s};
+if (exists $opts{S}) {
+ if (defined $salt) {
+ print "Redundant salt\n";
+ exit(1);
+ }
+ $salt = pack('H*', $opts{S});
+} elsif (!defined $salt and exists $opts{z}) {
+ my $ssiz = $opts{z};
+ if ($ssiz == 0) {
+ $ssiz = 8;
+ } elsif ($ssiz < 0) {
+ print "Bad salt length\n";
+ exit(1);
+ }
+ while ($ssiz >= 4) {
+ $salt .= pack('N', irand());
+ $ssiz -= 4;
+ }
+ $salt .= substr(pack('N', irand()), 1, $ssiz) if ($ssiz > 0);
+}
+
+my $ctx = Digest::SHA->new($len);
+$ctx->add($password);
+$ctx->add($salt) if (defined $salt);
+my $dig = $ctx->digest;
+$dig .= $salt if (defined $salt);
+
+print &$fmt($dig);
+
+__END__
+
+=head1 NAME
+
+ssha2passwd - Generate a SHA2 hashed password
+
+=head1 DESCRIPTION
+
+Hash the given password into a SHA2 digest with optional salt.
+
+=head1 SYNOPSIS
+
+ ssha2passwd [options] <password>
+
+=head1 OPTIONS
+
+=over
+
+=item B<-f> or B<-format> <format>
+
+Format options:
+
+=over
+
+=item B<m> or B<base64> : base64 encoded (default)
+
+=item B<x> or B<hex> : hexadecimal string
+
+=item B<b> or B<bin> : binary string
+
+=back
+
+=item B<-l> or B<-length> <length>
+
+Hash algorithm bit length (224, 256, 384, or 512 | default: 256).
+
+=item B<-s> or B<-salt> <string>
+
+=item B<-S> or B<-Salt> <hexadecimal string>
+
+Salt string appended to password and hashed. The resultant digest then
+has the salt string appended.
+
+=item B<-z> or B<-saltlen> [<length>]
+
+Byte length of random salt appended to password and hashed, if no salt
+string is explicitly given (0 is default, default: 8).
+
+=item B<-?> or B<-help>
+
+Print a brief help message.
+
+=item B<-man>
+
+Print the manual page.
+
+=back
+=cut
diff --git a/scripts/all.mk b/scripts/all.mk
new file mode 100644
index 0000000..a6e90aa
--- /dev/null
+++ b/scripts/all.mk
@@ -0,0 +1,22 @@
+install: $(R)$(sbindir)/rc.radiusd $(R)$(sbindir)/raddebug \
+ $(R)$(bindir)/radsqlrelay $(R)$(bindir)/radcrypt $(R)$(bindir)/rlm_sqlippool_tool
+
+$(R)$(sbindir)/rc.radiusd: scripts/rc.radiusd
+ @mkdir -p $(dir $@)
+ @$(INSTALL) -m 755 $< $@
+
+$(R)$(sbindir)/raddebug: scripts/raddebug
+ @mkdir -p $(dir $@)
+ @$(INSTALL) -m 755 $< $@
+
+$(R)$(bindir)/radsqlrelay: scripts/sql/radsqlrelay
+ @mkdir -p $(dir $@)
+ @$(INSTALL) -m 755 $< $@
+
+$(R)$(bindir)/radcrypt: scripts/cryptpasswd
+ @mkdir -p $(dir $@)
+ @$(INSTALL) -m 755 $< $@
+
+$(R)$(bindir)/rlm_sqlippool_tool: scripts/sql/rlm_sqlippool_tool
+ @mkdir -p $(dir $@)
+ @$(INSTALL) -m 755 $< $@
diff --git a/scripts/bbedit/unlanglaguage.plist b/scripts/bbedit/unlanglaguage.plist
new file mode 100644
index 0000000..24496f5
--- /dev/null
+++ b/scripts/bbedit/unlanglaguage.plist
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<!--
+
+ UnlangLanguage.plist
+ A codeless language module for Unlang in BBEdit.
+ Copyright Arran Cudbard-Bell <a.cudbardb@freeradius.org>, 2012.
+
+ This work is licensed under a Creative Commons Attribution-Share Alike 3.0 License.
+ http://creativecommons.org/licenses/by-sa/3.0/
+
+ Version 1.0
+
+-->
+<dict>
+ <key>BBEditDocumentType</key>
+ <string>CodelessLanguageModule</string>
+ <key>BBLMLanguageDisplayName</key>
+ <string>Unlang</string>
+ <key>BBLMLanguageCode</key>
+ <string>unlg</string>
+ <key>BBLMColorsSyntax</key>
+ <true/>
+ <key>BBLMScansFunctions</key>
+ <true/>
+ <key>BBLMIsCaseSensitive</key>
+ <true/>
+ <key>BBLMKeywordList</key>
+ <array>
+ <string>update</string>
+ <string>switch</string>
+ <string>case</string>
+ <string>if</string>
+ <string>else</string>
+ <string>elsif</string>
+ <string>redundant</string>
+ <string>load-balance</string>
+ <string>redundant-load-balance</string>
+ <string>notfound</string>
+ <string>noop</string>
+ <string>ok</string>
+ <string>updated</string>
+ <string>fail</string>
+ <string>userlock</string>
+ <string>invalid</string>
+ <string>handled</string>
+ </array>
+ <key>BBLMSuffixMap</key>
+ <array>
+ <dict>
+ <key>BBLMLanguageSuffix</key>
+ <string>.policy</string>
+ </dict>
+ </array>
+ <key>BBLMCommentLineDefault</key>
+ <string>#</string>
+ <key>Language Features</key>
+ <dict>
+ <key>Identifier and Keyword Character Class</key>
+ <string><![CDATA[0-9A-Z_a-z]]></string>
+
+ <key>Function Pattern</key>
+ <string><![CDATA[(?x:
+ (?x:
+ (?P<leadspace>^\s*)
+ (?P<function>
+ (?P<function_name>[a-zA-Z0-9_-\.]+)
+ \s+{\n
+ (?P<function_body>[^}]+)
+ }
+ )
+ )
+ )]]></string>
+
+ <key>Skip Pattern</key>
+ <string><![CDATA[
+ (?x:
+ (?P>comment) |
+ (?P>string)
+ )]]></string>
+ <key>Open Line Comments</key>
+ <string>#</string>
+
+ <key>Open Statement Blocks</key>
+ <string>{</string>
+
+ <key>Close Statement Blocks</key>
+ <string>}</string>
+
+ <key>Open Strings 1</key>
+ <string>"</string>
+
+ <key>Close Strings 1</key>
+ <string>"</string>
+
+ <key>Escape Char in Strings 1</key>
+ <string>\</string>
+
+ <key>End-of-line Ends Strings 1</key>
+ <true/>
+
+ <key>Open Strings 2</key>
+ <string>'</string>
+
+ <key>Close Strings 2</key>
+ <string>'</string>
+
+ <key>Escape Char in Strings 2</key>
+ <string>\</string>
+
+ <key>End-of-line Ends Strings 2</key>
+ <true/>
+ </dict>
+</dict>
+</plist> \ No newline at end of file
diff --git a/scripts/bin/README.md b/scripts/bin/README.md
new file mode 100644
index 0000000..b7f4b5d
--- /dev/null
+++ b/scripts/bin/README.md
@@ -0,0 +1,9 @@
+# Wrapper scripts for binaries
+
+The build process creates "local" versions of the binaries. These
+binaries can be run out of the source / build tree, and do not need to
+be installed in order to work.
+
+However, the "local" binaries require manual mangling of environment
+variables in order to work. As such, it's easier to just have shell
+script wrappers so people have to remember fewer things.
diff --git a/scripts/bin/gdb b/scripts/bin/gdb
new file mode 100755
index 0000000..c28b814
--- /dev/null
+++ b/scripts/bin/gdb
@@ -0,0 +1,5 @@
+#!/bin/sh
+export PANIC_ACTION=
+export DEBUGGER_ATTACHED=yes
+
+exec ./build//make/jlibtool --mode=execute gdb --args ./build/bin/local/radiusd -sf -xx -l stdout -d ./raddb -D share/ $@
diff --git a/scripts/bin/lldb b/scripts/bin/lldb
new file mode 100755
index 0000000..2634601
--- /dev/null
+++ b/scripts/bin/lldb
@@ -0,0 +1,5 @@
+#!/bin/sh
+export PANIC_ACTION=
+export DEBUGGER_ATTACHED=yes
+
+exec ./build//make/jlibtool --mode=execute lldb -- ./build/bin/local/radiusd -sf -xx -l stdout -d ./raddb -D share/ $@
diff --git a/scripts/bin/radclient b/scripts/bin/radclient
new file mode 120000
index 0000000..f3a1360
--- /dev/null
+++ b/scripts/bin/radclient
@@ -0,0 +1 @@
+radiusd \ No newline at end of file
diff --git a/scripts/bin/radict b/scripts/bin/radict
new file mode 100755
index 0000000..cedcad0
--- /dev/null
+++ b/scripts/bin/radict
@@ -0,0 +1,5 @@
+#!/bin/sh
+DIR=$(dirname $0)/../..
+PROGRAM=$(basename $0)
+
+exec $DIR/build/make/jlibtool --mode=execute $FR_DEBUGGER $DIR/build/bin/local/$PROGRAM -D $DIR/share/ $@
diff --git a/scripts/bin/radiusd b/scripts/bin/radiusd
new file mode 100755
index 0000000..7d2e5cb
--- /dev/null
+++ b/scripts/bin/radiusd
@@ -0,0 +1,5 @@
+#!/bin/sh
+DIR=$(dirname $0)/../..
+PROGRAM=$(basename $0)
+
+exec $DIR/build/make/jlibtool --mode=execute $FR_DEBUGGER $DIR/build/bin/local/$PROGRAM -d $DIR/raddb -D $DIR/share/ $@
diff --git a/scripts/bin/radmin b/scripts/bin/radmin
new file mode 120000
index 0000000..f3a1360
--- /dev/null
+++ b/scripts/bin/radmin
@@ -0,0 +1 @@
+radiusd \ No newline at end of file
diff --git a/scripts/bin/radsniff b/scripts/bin/radsniff
new file mode 120000
index 0000000..f3a1360
--- /dev/null
+++ b/scripts/bin/radsniff
@@ -0,0 +1 @@
+radiusd \ No newline at end of file
diff --git a/scripts/bin/unit_test_module b/scripts/bin/unit_test_module
new file mode 120000
index 0000000..f3a1360
--- /dev/null
+++ b/scripts/bin/unit_test_module
@@ -0,0 +1 @@
+radiusd \ No newline at end of file
diff --git a/scripts/boiler.mk b/scripts/boiler.mk
new file mode 100644
index 0000000..9d70104
--- /dev/null
+++ b/scripts/boiler.mk
@@ -0,0 +1,674 @@
+# boilermake: A reusable, but flexible, boilerplate Makefile.
+#
+# Copyright 2008, 2009, 2010 Dan Moulding, Alan T. DeKok
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Caution: Don't edit this Makefile! Create your own main.mk and other
+# submakefiles, which will be included by this Makefile.
+# Only edit this if you need to modify boilermake's behavior (fix
+# bugs, add features, etc).
+
+# Note: Parameterized "functions" in this makefile that are marked with
+# "USE WITH EVAL" are only useful in conjuction with eval. This is
+# because those functions result in a block of Makefile syntax that must
+# be evaluated after expansion. Since they must be used with eval, most
+# instances of "$" within them need to be escaped with a second "$" to
+# accomodate the double expansion that occurs when eval is invoked.
+
+#
+# You can watch what it's doing by:
+#
+# $ VERBOSE=1 make ... args ...
+#
+ifeq "${VERBOSE}" ""
+ Q=@
+else
+ Q=
+endif
+
+# ADD_CLEAN_RULE - Parameterized "function" that adds a new rule and phony
+# target for cleaning the specified target (removing its build-generated
+# files).
+#
+# USE WITH EVAL
+#
+define ADD_CLEAN_RULE
+ clean: clean_$(notdir ${1})
+ .PHONY: clean_$(notdir ${1})
+ clean_$(notdir ${1}):
+ $(Q)$(strip rm -f ${${1}_BUILD}/${1} $${${1}_OBJS} $${${1}_DEPS} $${${1}_OBJS:%.${OBJ_EXT}=%.[do]}) $(if ${TARGET_DIR},$${TARGET_DIR}/$(notdir ${1}))
+ $${${1}_POSTCLEAN}
+
+endef
+
+# FILTER_DEPENDS: function to turn a *.d file into a *.mk file.
+# We start off with the dependencies as created by the compiler,
+# CPP, or makedepend. We then ensure that there is an empty dependency
+# for each header file. The blank target ensures that the build
+# can proceed even if the header file has been deleted.
+#
+# COMMON filters:
+# remove comments
+# remove dependencies on global include files
+# remove empty dependencies
+# remove CPP hacks like "foo: <built-in>"
+#
+# 1) Filter the .d file to remove unnecessary cruft
+#
+# COMMON
+# Replace ".o" with "${OBJ_EXT}"
+# delete empty continuation lines
+# delete blank lines
+# replace "build/" with "${BUILD_DIR}/" when it's at the start of a line
+# delete references to ${BUILD_DIR}/make/include, the "config.mk"
+# file adds these dependencies automatically.
+# replace "build/" with "${BUILD_DIR}/" when it's in the middle of a line
+#
+# remove sequential duplicate lines
+#
+# 2) Create empty dependencies from the files
+#
+# COMMON
+# remove existing targets
+# remove continuations (to make the targets stand by themselves)
+# delete blank lines
+# add in empty dependency for each file.
+# remove sequential duplicate lines
+#
+define FILTER_DEPENDS
+ $(Q)mkdir -p $$(dir $${BUILD_DIR}/make/src/$$*)
+ $(Q)mkdir -p $$(dir $${BUILD_DIR}/objs/$$*)
+ $(Q)sed -e 's/#.*//' \
+ -e 's,^$${top_srcdir},$$$${top_srcdir},' \
+ -e 's, $${top_srcdir}, $$$${top_srcdir},' \
+ -e 's,^$${BUILD_DIR},$$$${BUILD_DIR},' \
+ -e 's, $${BUILD_DIR}/make/include/[^ :]*,,' \
+ -e 's, $${BUILD_DIR}, $$$${BUILD_DIR},' \
+ -e 's, /[^: ]*,,g' \
+ -e 's,^ *[^:]* *: *$$$$,,' \
+ -e '/: </ d' \
+ -e 's/\.o: /.$$$${OBJ_EXT}: /' \
+ -e '/^ *\\$$$$/ d' \
+ < $${BUILD_DIR}/objs/$$*.d | sed -e '$$$$!N; /^\(.*\)\n\1$$$$/!P; D' \
+ > $${BUILD_DIR}/make/src/$$*.mk
+ $(Q)sed -e 's/#.*//' \
+ -e 's, $${BUILD_DIR}/make/include/[^ :]*,,' \
+ -e 's, /[^: ]*,,g' \
+ -e 's,^ *[^:]* *: *$$$$,,' \
+ -e '/: </ d' \
+ -e 's/^[^:]*: *//' \
+ -e 's/ *\\$$$$//' \
+ -e 's/$$$$/ :/' \
+ < $${BUILD_DIR}/objs/$$*.d | sed -e '$$$$!N; /^\(.*\)\n\1$$$$/!P; D' \
+ >> $${BUILD_DIR}/make/src/$$*.mk
+endef
+
+# ADD_OBJECT_RULE - Parameterized "function" that adds a pattern rule, using
+# the commands from the second argument, for building object files from
+# source files with the filename extension specified in the first argument.
+#
+# This function assumes that the C/C++ sources files have filenames
+# *relative* to the source root. If they have absolute pathnames, it
+# creates the wrong filenames...
+#
+# USE WITH EVAL
+#
+ifeq "${CPP_MAKEDEPEND}" "yes"
+define ADD_OBJECT_RULE
+$${BUILD_DIR}/objs/%.${OBJ_EXT} $${BUILD_DIR}/objs/%.d: ${1} | ${BOOTSTRAP_BUILD}
+ ${2}
+ $${CPP} $${CPPFLAGS} $$(addprefix -I,$${SRC_INCDIRS}) $${SRC_DEFS} $$< | sed \
+ -n 's,^\# *[0-9][0-9]* *"\([^"]*\)".*,$$@: \1,p' > $${BUILD_DIR}/objs/$$*.d
+${FILTER_DEPENDS}
+endef
+
+else
+define ADD_OBJECT_RULE
+$${BUILD_DIR}/objs/%.${OBJ_EXT} $${BUILD_DIR}/objs/%.d: ${1} | ${BOOTSTRAP_BUILD}
+ ${2}
+${FILTER_DEPENDS}
+endef
+endif
+
+define ADD_ANALYZE_RULE
+$${BUILD_DIR}/plist/%.plist: ${1}
+ ${2}
+endef
+
+# ADD_TARGET_DIR - Parameterized "function" that makes a link from
+# TARGET_DIR to the executable or library in the BUILD_DIR directory.
+#
+# USE WITH EVAL
+#
+ifneq "${TARGET_DIR}" ""
+ define ADD_TARGET_DIR
+ all: $${TARGET_DIR}/$$(notdir ${1})
+
+ $${TARGET_DIR}/$$(notdir ${1}): ${1}
+ [ -f $${TARGET_DIR}/$$(notdir ${1}) ] || ln -s ${1} $${TARGET_DIR}/$$(notdir ${1})
+
+ endef
+endif
+
+# ADD_TARGET_TO_ALL - Parameterized "function" that adds the target,
+# and makes "all" depend on it.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_TO_ALL
+ all: ${1}
+
+endef
+
+# ADD_TARGET_RULE.* - Parameterized "functions" that adds a new target to the
+# Makefile. There should be one ADD_TARGET_RULE definition for each
+# type of target that is used in the build.
+#
+# New rules can be added by copying one of the existing ones, and
+# replacing the line after the "mkdir"
+#
+
+# ADD_TARGET_RULE.exe - Build an executable target.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_RULE.exe
+ # So "make ${1}" works
+ .PHONY: ${1}
+ ${1}: $${${1}_BUILD}/${1}
+
+ # Create executable ${1}
+ $${${1}_BUILD}/${1}: $${${1}_OBJS} $${${1}_PRBIN} $${${1}_PRLIBS}
+ $(Q)$(strip mkdir -p $(dir $${${1}_BUILD}/${1}))
+ $(Q)$(ECHO) LINK $${${1}_BUILD}/${1}
+ $(Q)$${${1}_LINKER} -o $${${1}_BUILD}/${1} $${RPATH_FLAGS} $${LDFLAGS} \
+ $${${1}_LDFLAGS} $${${1}_OBJS} $${${1}_PRLIBS} \
+ $${LDLIBS} $${${1}_LDLIBS}
+ $(Q)$${${1}_POSTMAKE}
+
+ ifneq "${ANALYZE.c}" ""
+ scan.${1}: $${${1}_PLISTS}
+ endif
+
+ .PHONY: $(DIR)
+ $(DIR)/: ${1}
+endef
+
+# ADD_TARGET_RULE.a - Build a static library target.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_RULE.a
+ # So "make ${1}" works
+ .PHONY: ${1}
+ ${1}: $${${1}_BUILD}/${1}
+
+ # Create static library ${1}
+ $${${1}_BUILD}/${1}: $${${1}_OBJS} $${${1}_PRLIBS}
+ $(Q)$(strip mkdir -p $(dir $${${1}_BUILD}/${1}))
+ $(Q)$(ECHO) LINK $${${1}_BUILD}/${1}
+ $(Q)$${AR} $${ARFLAGS} $${${1}_BUILD}/${1} $${${1}_OBJS}
+ $(Q)$${${1}_POSTMAKE}
+
+ ifneq "${ANALYZE.c}" ""
+ scan.${1}: $${${1}_PLISTS}
+ endif
+
+ .PHONY: $(DIR)
+ $(DIR)/: ${1}
+endef
+
+# ADD_TARGET_RULE.so - Build a ".so" target.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_RULE.so
+$(error Please add rules to build a ".so" file.)
+endef
+
+# ADD_TARGET_RULE.dll - Build a ".dll" target.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_RULE.dll
+$(error Please add rules to build a ".dll" file.)
+endef
+
+# ADD_TARGET_RULE.dylib - Build a ".dylib" target.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_RULE.dylib
+$(error Please add rules to build a ".dylib" file.)
+endef
+
+# CANONICAL_PATH - Given one or more paths, converts the paths to the canonical
+# form. The canonical form is the path, relative to the project's top-level
+# directory (the directory from which "make" is run), and without
+# any "./" or "../" sequences. For paths that are not located below the
+# top-level directory, the canonical form is the absolute path (i.e. from
+# the root of the filesystem) also without "./" or "../" sequences.
+define CANONICAL_PATH
+$(patsubst ${CURDIR}/%,%,$(abspath ${1}))
+endef
+
+# COMPILE_C_CMDS - Commands for compiling C source code.
+ifeq "$(CPPCHECK)" ""
+define COMPILE_C_CMDS
+ $(Q)mkdir -p $(dir $@)
+ $(Q)$(ECHO) CC $<
+ $(Q)$(strip ${COMPILE.c} -o $@ -c -MD ${CPPFLAGS} ${CFLAGS} ${SRC_CFLAGS} ${INCDIRS} \
+ $(addprefix -I, ${SRC_INCDIRS}) ${SRC_DEFS} ${DEFS} $<)
+endef
+else
+#
+# do cppcheck AND compilation, so that we have correct dependencies
+# Suppress variable scope warnings for now. They're style, and don't really
+# affect anything.
+#
+define COMPILE_C_CMDS
+ $(Q)mkdir -p $(dir $@)
+ $(Q)$(ECHO) CC $<
+ $(Q)$(strip ${COMPILE.c} -o $@ -c -MD ${CPPFLAGS} ${CFLAGS} ${SRC_CFLAGS} ${INCDIRS} \
+ $(addprefix -I,${SRC_INCDIRS}) ${SRC_DEFS} ${DEFS} $<)
+ $(Q)cppcheck --enable=style -q ${CHECKFLAGS} $(filter -isystem%,${SRC_CFLAGS}) \
+ $(filter -I%,${SRC_CFLAGS}) $(filter -D%,${SRC_CFLAGS}) ${INCDIRS} \
+ $(addprefix -I,${SRC_INCDIRS}) ${SRC_DEFS} ${DEFS} --suppress=variableScope --suppress=invalidscanf $<
+endef
+endif
+
+# ANALYZE_C_CMDS - Commands for analyzing C source code with clang.
+define ANALYZE_C_CMDS
+ $(Q)mkdir -p $(dir $@)
+ $(Q)$(ECHO) SCAN $<
+ $(Q)$(strip ${ANALYZE.c} --analyze -Xanalyzer -analyzer-output=html -c $< -o $@ ${CPPFLAGS} \
+ ${CFLAGS} ${SRC_CFLAGS} ${INCDIRS} $(addprefix -I,${SRC_INCDIRS}) ${SRC_DEFS} ${DEFS}) || (rm -f $@ && false)
+ $(Q)touch $@
+endef
+
+# COMPILE_CXX_CMDS - Commands for compiling C++ source code.
+define COMPILE_CXX_CMDS
+ $(Q)mkdir -p $(dir $@)
+ $(Q)$(strip ${COMPILE.cxx} -o $@ -c -MD ${CPPFLAGS} ${CXXFLAGS} ${SRC_CXXFLAGS} ${INCDIRS} \
+ $(addprefix -I,${SRC_INCDIRS}) ${SRC_DEFS} ${DEFS} $<)
+endef
+
+# INCLUDE_SUBMAKEFILE - Parameterized "function" that includes a new
+# "submakefile" fragment into the overall Makefile. It also recursively
+# includes all submakefiles of the specified submakefile fragment.
+#
+# USE WITH EVAL
+#
+define INCLUDE_SUBMAKEFILE
+ # Initialize all variables that can be defined by a makefile fragment, then
+ # include the specified makefile fragment.
+ TARGET :=
+ TGT_LDFLAGS :=
+ TGT_LDLIBS :=
+ TGT_LINKER :=
+ TGT_POSTCLEAN :=
+ TGT_POSTMAKE :=
+ TGT_PREREQS :=
+ TGT_POSTINSTALL :=
+ TGT_INSTALLDIR := ..
+ TGT_CHECK_HEADERS :=
+ TGT_CHECK_LIBS :=
+ TEST :=
+
+ SOURCES :=
+ SRC_CFLAGS :=
+ SRC_CXXFLAGS :=
+ SRC_DEFS :=
+ SRC_INCDIRS :=
+ MAN :=
+ FILES :=
+ OUTPUT :=
+
+ SUBMAKEFILES :=
+
+ # A directory stack is maintained so that the correct paths are used as we
+ # recursively include all submakefiles. Get the makefile's directory and
+ # push it onto the stack.
+ DIR := $(call CANONICAL_PATH,$(dir ${1}))
+ DIR_STACK := $$(call PUSH,$${DIR_STACK},$${DIR})
+
+ include ${1}
+
+ # Initialize internal local variables.
+ OBJS :=
+
+ # Determine which target this makefile's variables apply to. A stack is
+ # used to keep track of which target is the "current" target as we
+ # recursively include other submakefiles.
+ ifneq "$$(strip $${TARGET})" ""
+ # This makefile defined a new target. Target variables defined by this
+ # makefile apply to this new target. Initialize the target's variables.
+
+ # libs go into ${BUILD_DIR}/lib
+ # everything else goes into ${BUILD_DIR}/bin
+# TGT := $$(strip $$(if $$(suffix $${TARGET}),$${BUILD_DIR}/lib,$${BUILD_DIR}/bin)/$${TARGET})
+ TGT := $${TARGET}
+
+ # A "hook" to rewrite "libfoo.a" -> "libfoo.la" when using libtool
+ $$(eval $$(call ADD_LIBTOOL_SUFFIX))
+
+ ALL_TGTS += $${TGT}
+ $${TGT}_LDFLAGS := $${TGT_LDFLAGS}
+ $${TGT}_LDLIBS := $${TGT_LDLIBS}
+ $${TGT}_LINKER := $${TGT_LINKER}
+ $${TGT}_POSTMAKE := $${TGT_POSTMAKE}
+ $${TGT}_POSTCLEAN := $${TGT_POSTCLEAN}
+ $${TGT}_POSTINSTALL := $${TGT_POSTINSTALL}
+ $${TGT}_PREREQS := $${TGT_PREREQS}
+ $${TGT}_PRBIN := $$(addprefix $${BUILD_DIR}/bin/,$$(filter-out %.a %.so %.la,$${TGT_PREREQS}))
+ $${TGT}_PRLIBS := $$(addprefix $${BUILD_DIR}/lib/,$$(filter %.a %.so %.la,$${TGT_PREREQS}))
+ $${TGT}_DEPS :=
+ $${TGT}_OBJS :=
+ $${TGT}_SOURCES :=
+ $${TGT}_MAN := $${MAN}
+ $${TGT}_SUFFIX := $$(if $$(suffix $${TGT}),$$(suffix $${TGT}),.exe)
+
+ # If it's an EXE, ensure that transitive library linking works.
+ # i.e. we build libfoo.a which in turn requires -lbar. So, the executable
+ # has to be linked to both libfoo.a and -lbar.
+ ifeq "$${$${TGT}_SUFFIX}" ".exe"
+ $${TGT}_LDLIBS += $$(filter-out %.a %.so %.la,$${$${TGT_PREREQS}_LDLIBS})
+ endif
+
+ $${TGT}_BUILD := $$(if $$(suffix $${TGT}),$${BUILD_DIR}/lib,$${BUILD_DIR}/bin)
+ $${TGT}_MAKEFILES += ${1}
+ $${TGT}_CHECK_HEADERS := $${TGT_CHECK_HEADERS}
+ $${TGT}_CHECK_LIBS := $${TGT_CHECK_LIBS}
+ else
+ # The values defined by this makefile apply to the the "current" target
+ # as determined by which target is at the top of the stack.
+ TGT := $$(strip $$(call PEEK,$${TGT_STACK}))
+ $${TGT}_LDFLAGS += $${TGT_LDFLAGS}
+ $${TGT}_LDLIBS += $${TGT_LDLIBS}
+ $${TGT}_POSTCLEAN += $${TGT_POSTCLEAN}
+ $${TGT}_POSTMAKE += $${TGT_POSTMAKE}
+ $${TGT}_PREREQS += $${TGT_PREREQS}
+ endif
+
+ # Push the current target onto the target stack.
+ TGT_STACK := $$(call PUSH,$${TGT_STACK},$${TGT})
+
+ # If there's no target, don't build the sources.
+ ifneq "$$(strip $${TARGET})" ""
+
+ # if there's no sources, don't do the automatic object build
+ ifneq "$$(strip $${SOURCES})" ""
+ # This makefile builds one or more objects from source. Validate the
+ # specified sources against the supported source file types.
+ BAD_SRCS := $$(strip $$(filter-out $${ALL_SRC_EXTS},$${SOURCES}))
+ ifneq "$${BAD_SRCS}" ""
+ $$(error Unsupported source file(s) found in ${1} [$${BAD_SRCS}])
+ endif
+
+ # Qualify and canonicalize paths.
+ SOURCES := $$(call QUALIFY_PATH,$${DIR},$${SOURCES})
+ SOURCES := $$(call CANONICAL_PATH,$${SOURCES})
+ SRC_INCDIRS := $$(call QUALIFY_PATH,$${DIR},$${SRC_INCDIRS})
+ SRC_INCDIRS := $$(call CANONICAL_PATH,$${SRC_INCDIRS})
+
+ # Save the list of source files for this target.
+ $${TGT}_SOURCES += $${SOURCES}
+
+ # Convert the source file names to their corresponding object file
+ # names.
+ OBJS := $$(addprefix $${BUILD_DIR}/objs/,\
+ $$(addsuffix .${OBJ_EXT},$$(basename $${SOURCES})))
+
+ PLISTS := $$(addprefix $${BUILD_DIR}/plist/,\
+ $$(addsuffix .plist,$$(basename $${SOURCES})))
+ ALL_PLISTS += ${PLISTS}
+
+ # Add the objects to the current target's list of objects, and create
+ # target-specific variables for the objects based on any source
+ # variables that were defined.
+ $${TGT}_OBJS += $${OBJS}
+ $${TGT}_PLISTS += $${PLISTS}
+ $${TGT}_DEPS += $$(addprefix $${BUILD_DIR}/make/src/,\
+ $$(addsuffix .mk,$$(basename $${SOURCES})))
+
+ # A "hook" to define variables needed by the "legacy" makefiles.
+ $$(eval $$(call ADD_LEGACY_VARIABLES,$$(dir ${1}),$${TGT}))
+
+ $${OBJS}: SRC_CFLAGS := $${SRC_CFLAGS}
+ $${OBJS}: SRC_CXXFLAGS := $${SRC_CXXFLAGS}
+ $${OBJS}: SRC_DEFS := $$(addprefix -D,$${SRC_DEFS})
+ $${OBJS}: SRC_INCDIRS := $${SRC_INCDIRS}
+ $${OBJS}: ${1}
+
+ $${PLISTS}: SRC_CFLAGS := $${SRC_CFLAGS}
+ $${PLISTS}: SRC_CXXFLAGS := $${SRC_CXXFLAGS}
+ $${PLISTS}: SRC_DEFS := $$(addprefix -D,$${SRC_DEFS})
+ $${PLISTS}: SRC_INCDIRS := $${SRC_INCDIRS}
+ $${PLISTS}: ${1}
+ endif
+ endif
+
+ ifneq "$$(strip $${SUBMAKEFILES})" ""
+ # This makefile has submakefiles. Recursively include them.
+ $$(foreach MK,$${SUBMAKEFILES},\
+ $$(eval $$(call INCLUDE_SUBMAKEFILE,\
+ $$(call CANONICAL_PATH,\
+ $$(call QUALIFY_PATH,$${DIR},$${MK})))))
+ endif
+
+ # Reset the "current" target to it's previous value.
+ TGT_STACK := $$(call POP,$${TGT_STACK})
+ # If we're about to change targets, create the rules for the target
+ ifneq "$${TGT}" "$$(call PEEK,$${TGT_STACK})"
+ # add rules to build the target, and have "all" depend on it.
+ $$(eval $$(call ADD_TARGET_TO_ALL,$${TGT}))
+
+ # A "hook" to add rules for ${TARGET_DIR}/foo, if TARGET_DIR
+ # is defined. Otherwise, we leave the source directory untouched.
+ $$(eval $$(call ADD_TARGET_DIR,$${TGT}))
+
+ # A "hook" to build the libtool target.
+ $$(eval $$(call ADD_LIBTOOL_TARGET))
+
+ # Choose the correct linker.
+ ifeq "$$(strip $$(filter $${CXX_SRC_EXTS},$${$${TGT}_SOURCES}))" ""
+ ifeq "$${$${TGT}_LINKER}" ""
+ $${TGT}_LINKER := ${LL}$${LINK.c}
+ endif
+ else
+ ifeq "$${$${TGT}_LINKER}" ""
+ $${TGT}_LINKER := ${LL}$${LINK.cxx}
+ endif
+ endif
+
+ # add rules to build the target
+ $$(eval $$(call ADD_TARGET_RULE$${$${TGT}_SUFFIX},$${TGT}))
+
+ # generate the clean rule for this target.
+ $$(eval $$(call ADD_CLEAN_RULE,$${TGT}))
+
+ # Hook to add an installation target
+ $$(eval $$(call ADD_INSTALL_TARGET,$${TGT}))
+
+ # Hook to add a configuration target
+ $$(eval $$(call ADD_TARGET_CONFIG,$${TGT}))
+
+ # "hook" for legacy Makefiles
+ $$(eval $$(call ADD_LEGACY_RULE,$${TGT}))
+ endif
+
+ TGT := $$(call PEEK,$${TGT_STACK})
+
+ # Reset the "current" directory to it's previous value.
+ DIR_STACK := $$(call POP,$${DIR_STACK})
+ DIR := $$(call PEEK,$${DIR_STACK})
+endef
+
+# MIN - Parameterized "function" that results in the minimum lexical value of
+# the two values given.
+define MIN
+$(firstword $(sort ${1} ${2}))
+endef
+
+# PEEK - Parameterized "function" that results in the value at the top of the
+# specified colon-delimited stack.
+define PEEK
+$(lastword $(subst :, ,${1}))
+endef
+
+# POP - Parameterized "function" that pops the top value off of the specified
+# colon-delimited stack, and results in the new value of the stack. Note that
+# the popped value cannot be obtained using this function; use peek for that.
+define POP
+${1:%:$(lastword $(subst :, ,${1}))=%}
+endef
+
+# PUSH - Parameterized "function" that pushes a value onto the specified colon-
+# delimited stack, and results in the new value of the stack.
+define PUSH
+${2:%=${1}:%}
+endef
+
+# QUALIFY_PATH - Given a "root" directory and one or more paths, qualifies the
+# paths using the "root" directory (i.e. appends the root directory name to
+# the paths) except for paths that are absolute.
+define QUALIFY_PATH
+$(addprefix ${1}/,$(filter-out /%,${2})) $(filter /%,${2})
+endef
+
+###############################################################################
+#
+# Start of Makefile Evaluation
+#
+###############################################################################
+
+# Older versions of GNU Make lack capabilities needed by boilermake.
+# With older versions, "make" may simply output "nothing to do", likely leading
+# to confusion. To avoid this, check the version of GNU make up-front and
+# inform the user if their version of make doesn't meet the minimum required.
+MIN_MAKE_VERSION := 3.81
+MIN_MAKE_VER_MSG := boilermake requires GNU Make ${MIN_MAKE_VERSION} or greater
+ifeq "${MAKE_VERSION}" ""
+ $(info GNU Make not detected)
+ $(error ${MIN_MAKE_VER_MSG})
+endif
+ifneq "${MIN_MAKE_VERSION}" "$(call MIN,${MIN_MAKE_VERSION},${MAKE_VERSION})"
+ $(info This is GNU Make version ${MAKE_VERSION})
+ $(error ${MIN_MAKE_VER_MSG})
+endif
+
+# Define the source file extensions that we know how to handle.
+OBJ_EXT := o
+C_SRC_EXTS := %.c
+CXX_SRC_EXTS := %.C %.cc %.cp %.cpp %.CPP %.cxx %.c++
+ALL_SRC_EXTS := ${C_SRC_EXTS} ${CXX_SRC_EXTS}
+
+# Initialize global variables.
+ALL_TGTS :=
+DEFS :=
+DIR_STACK :=
+INCDIRS :=
+TGT_STACK :=
+
+ifeq "${top_builddir}" ""
+ top_builddir := .
+endif
+
+# Ensure that valid values are set for BUILD_DIR
+ifeq "$(strip ${BUILD_DIR})" ""
+ ifeq "${top_builddir}" "${PWD}"
+ BUILD_DIR := build
+ else
+ BUILD_DIR := ${top_builddir}/build
+ endif
+else
+ BUILD_DIR := $(call CANONICAL_PATH,${BUILD_DIR})
+endif
+
+.PHONY: $(BUILD_DIR)
+$(BUILD_DIR):
+ @mkdir -p $@
+
+# Define compilers and linkers
+#
+BOOTSTRAP_BUILD =
+COMPILE.c = ${CC}
+COMPILE.cxx = ${CXX}
+CPP = cc -E
+LINK.c = ${CC}
+LINK.cxx = ${CXX}
+
+# Set ECHO to "true" for *very* quiet builds
+ECHO = echo
+
+# Define the "all" target (which simply builds all user-defined targets) as the
+# default goal.
+.PHONY: all
+all:
+
+# Add "clean" rules to remove all build-generated files.
+.PHONY: clean
+clean:
+
+top_makedir := $(dir $(lastword ${MAKEFILE_LIST}))
+
+-include ${top_makedir}/install.mk
+-include ${top_makedir}/libtool.mk
+
+ifneq "${CPPCHECK}" ""
+CHECKFLAGS := -DCPPCHECK $(filter -isystem%,$(CPPFLAGS) $(CFLAGS)) $(filter -I%,$(CPPFLAGS) $(CFLAGS)) $(filter -D%,$(CPPFLAGS) $(CFLAGS))
+endif
+
+# Include the main user-supplied submakefile. This also recursively includes
+# all other user-supplied submakefiles.
+$(eval $(call INCLUDE_SUBMAKEFILE,${top_builddir}/main.mk))
+
+# Perform post-processing on global variables as needed.
+DEFS := $(addprefix -D,${DEFS})
+INCDIRS := $(addprefix -I,$(call CANONICAL_PATH,${INCDIRS}))
+
+# Add pattern rule(s) for creating compiled object code from C source.
+$(foreach EXT,${C_SRC_EXTS},\
+ $(eval $(call ADD_OBJECT_RULE,${EXT},$${COMPILE_C_CMDS})))
+
+ifneq "${ANALYZE.c}" ""
+$(foreach EXT,${C_SRC_EXTS},\
+ $(eval $(call ADD_ANALYZE_RULE,${EXT},$${ANALYZE_C_CMDS})))
+endif
+
+# Add pattern rule(s) for creating compiled object code from C++ source.
+$(foreach EXT,${CXX_SRC_EXTS},\
+ $(eval $(call ADD_OBJECT_RULE,${EXT},$${COMPILE_CXX_CMDS})))
+
+# Don't include the target dependencies if we're doing a "make clean"
+# Future: have a list of targets that don't require dependency generation,
+# and see if MAKECMDGOALS is one of them.
+ifneq "$(MAKECMDGOALS)" "clean"
+ $(foreach TGT,${ALL_TGTS},\
+ $(eval -include ${${TGT}_DEPS}))
+endif
+
+# Build rules for installation subdirectories
+$(foreach D,$(patsubst %/,%,$(sort $(dir ${ALL_INSTALL}))),\
+ $(eval $(call ADD_INSTALL_RULE.dir,${D})))
+
+
+scan: ${ALL_PLISTS}
+
+.PHONY: clean.scan
+clean.scan:
+ $(Q)rm -rf ${ALL_PLISTS}
+
+clean: clean.scan
diff --git a/scripts/ci/Dockerfile b/scripts/ci/Dockerfile
new file mode 100644
index 0000000..e285936
--- /dev/null
+++ b/scripts/ci/Dockerfile
@@ -0,0 +1,10 @@
+FROM ubuntu:14.04
+RUN apt-get update && apt-get install -y curl
+RUN curl -sSL "https://build.travis-ci.org/files/gpg/couchbase-precise.asc" | sudo -E apt-key add -
+RUN echo "deb http://packages.couchbase.com/ubuntu precise precise/main" | sudo tee -a /etc/apt/sources.list >/dev/null
+RUN echo "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main" >> /etc/apt/sources.list
+RUN curl https://raw.githubusercontent.com/travis-ci/apt-source-safelist/master/keys/llvm-toolchain-trusty-5.0.asc | apt-key add -
+RUN apt-get update && apt-get -y upgrade
+RUN apt-get -yq --no-install-suggests --no-install-recommends install autoconf build-essential debhelper devscripts dh-make doxygen fakeroot gdb graphviz lintian pbuilder python-dev quilt libruby ruby-dev libcollectdclient-dev firebird-dev freetds-dev libcap-dev libcouchbase2-libevent libcouchbase-dev libcurl4-openssl-dev libgdbm-dev libhiredis-dev libidn11-dev libiodbc2-dev libiodbc2 libjson0 libjson0-dev libkrb5-dev libldap2-dev libmemcached-dev libmysqlclient-dev libpam0g-dev libpcap-dev libpcre3-dev libperl-dev libpq-dev libreadline-dev libsnmp-dev libssl-dev libtalloc-dev libtalloc2-dbg libunbound-dev libwbclient-dev libykclient-dev libyubikey-dev clang-5.0
+RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-5.0 100
+WORKDIR /usr/local/src/repositories
diff --git a/scripts/ci/Jenkinsfile b/scripts/ci/Jenkinsfile
new file mode 100644
index 0000000..f82fadc
--- /dev/null
+++ b/scripts/ci/Jenkinsfile
@@ -0,0 +1,66 @@
+// Initialize a variable to hold the matrix of travis builds
+tmatrix = []
+
+/* This function takes a list of tests and builds closures for each test to
+* be run in it's own docker container. It's a little strange, and uses a
+* functional programming trick (function currying) to create a closure that
+* can be passed to the "parallel" function, which can only take one argument
+* in this context
+*/
+
+def buildClosures(arg) {
+ println arg.inspect()
+ def travisTests = arg
+ def closures = [:]
+ for (value in travisTests) {
+ final valueCopy = value
+ closures[value] = { testEnv_str ->
+ def(dir,testEnv) = testEnv_str.split(":")
+ stage("$testEnv") {
+ // Docker needs full privileges and capabilites to run the tests
+ // This passes the necessary arguments to "docker run"
+ travisImage.inside("--privileged --cap-add=ALL") {
+ checkout([$class: 'GitSCM',\
+ branches: [[name: scm.branches[0].name]],\
+ doGenerateSubmoduleConfigurations: false,\
+ extensions: [[$class: 'CleanCheckout'],\
+ [$class: 'CleanBeforeCheckout'],\
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: dir]],\
+ submoduleCfg: [], userRemoteConfigs: [[url: 'https://github.com/FreeRADIUS/freeradius-server']]])
+ sh "cd $dir ; export ${testEnv} ; bash scripts/travis/start.sh"
+ }
+ }
+ }.curry(value)
+ }
+ closures
+}
+
+/* This section does three things
+* 1. Checkout the repo for the necessary setup files
+* 2. Reads the test matrix from the .travis.yml and converts it into a list that
+* can be passed to the buildClosures function
+* 3. runs each test matrix under gcc and clang in parallel.
+*/
+
+node {
+ cleanWs()
+ checkout scm
+ travis = readYaml(file: "./.travis.yml")
+ travisImage = docker.build("travis-image-${scm.branches[0].name}", "./scripts/travis/")
+ stage("clang tests") {
+ tmatrix = []
+ c = "clang"
+ travis["env"]["matrix"].eachWithIndex { t,i ->
+ tmatrix << "${c}-${i}:CC=${c} ${t}"
+ }
+ parallel buildClosures(tmatrix)
+ }
+ stage("gcc tests") {
+ tmatrix = []
+ c = "gcc"
+ travis["env"]["matrix"].eachWithIndex { t,i ->
+ tmatrix << "${c}-${i}:CC=${c} ${t}"
+ }
+ parallel buildClosures(tmatrix)
+ }
+}
diff --git a/scripts/ci/eapol_test-build.sh b/scripts/ci/eapol_test-build.sh
new file mode 100755
index 0000000..42397e2
--- /dev/null
+++ b/scripts/ci/eapol_test-build.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+
+#
+# This program is is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright 2015 Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+#
+
+#
+# Extremely basic script for building eapol_test from hostapd's main branch
+#
+# On success will write progress to stderr, and a path to the eapol_test
+# binary to stdout, exiting with 0.
+#
+# On error will exit with 1.
+#
+# Note: We don't always build eapol_test. If a copy is already present on the
+# system we use that in preference. To always build eapol_test, set
+# FORCE_BUILD=1 in the environment.
+#
+
+TMP_BUILD_DIR="${BUILD_DIR}"
+: ${TMP_BUILD_DIR:="$(mktemp -d -t eapol_test.XXXXX)"}
+: ${HOSTAPD_DIR:="${TMP_BUILD_DIR}/hostapd"}
+: ${HOSTAPD_GIT_BRANCH:="hostap_2_10"}
+#: ${HOSTAPD_GIT_COMMIT:=""}
+: ${WPA_SUPPLICANT_DIR:="${HOSTAPD_DIR}/wpa_supplicant"}
+
+: ${BUILD_CONF_DIR:="$(dirname $0)/eapol_test"}
+: ${EAPOL_TEST_PATH:="${BUILD_CONF_DIR}/eapol_test"}
+
+if [ -z "${FORCE_BUILD}" ]; then
+ if [ -e "${EAPOL_TEST_PATH}" ]; then
+ echo "${EAPOL_TEST_PATH}"
+ exit 0
+ fi
+
+ WHICH_EAPOL_TEST="$(which eapol_test)"
+ if [ ! -z "${WHICH_EAPOL_TEST}" ]; then
+ echo "${WHICH_EAPOL_TEST}"
+ exit 0
+ fi
+fi
+
+#
+# If OpenSSL 3.x
+#
+if openssl version | grep -q "OpenSSL 3\."; then
+ export EAPOL_TEST_CFLAGS="${EAPOL_TEST_CFLAGS} -DOPENSSL_USE_DEPRECATED -DOPENSSL_API_COMPAT=0x10101000L"
+ echo "WARNING: Building against OpenSSL 3, setting:" 1>&2
+ echo " EAPOL_TEST_CFLAGS='${EAPOL_TEST_CFLAGS}'" 1>&2
+ echo " EAPOL_TEST_LDFLAGS='${EAPOL_TEST_LDFLAGS}'" 1>&2
+fi
+
+case "$OSTYPE" in
+linux-gnu)
+ BUILD_CONF_FILE="${BUILD_CONF_DIR}/config_linux"
+ ;;
+
+darwin*)
+ BUILD_CONF_FILE="${BUILD_CONF_DIR}/config_osx"
+ ;;
+
+freebsd*)
+ BUILD_CONF_FILE="${BUILD_CONF_DIR}/config_freebsd"
+ ;;
+
+*)
+ echo "Don't have specific eapol_test build config for OS $OSTYPE. Using linux build config"
+ BUILD_CONF_FILE="${BUILD_CONF_DIR}/linux"
+ ;;
+esac
+
+if [ ! -e "${BUILD_CONF_FILE}" ]; then
+ echo "Missing build config file \"${BUILD_CONF_FILE}\" for OS $OSTYPE, please contribute one" 1>&2
+ exit 1
+fi
+
+# Shallow clone so we don't use all Jouni's bandwidth
+CLONE_DEPTH="--depth 1"
+# Unless we want a specific commit, in which case there's no way to grab it directly
+[ -z "${HOSTAPD_GIT_COMMIT}" ] || CLONE_DEPTH=""
+
+if ! [ -e "${HOSTAPD_DIR}/.git" ] && ! git clone --branch "${HOSTAPD_GIT_BRANCH}" ${CLONE_DEPTH} http://w1.fi/hostap.git 1>&2 "${TMP_BUILD_DIR}/hostapd"; then
+ echo "Failed cloning hostapd" 1>&2
+ if [ -z "${BUILD_DIR}" ]; then rm -rf "$TMP_BUILD_DIR"; fi
+ exit 1
+fi
+
+if [ -n "$HOSTAPD_GIT_COMMIT" ]; then
+ if ! git --work-tree="${TMP_BUILD_DIR}/hostapd" --git-dir="${TMP_BUILD_DIR}/hostapd/.git" checkout "${HOSTAPD_GIT_COMMIT}"; then
+ echo "Unable to check out hostapd commit ${HOSTAPD_GIT_COMMIT}" 1>&2
+ if [ -z "${BUILD_DIR}" ]; then rm -rf "$TMP_BUILD_DIR"; fi
+ exit 1
+ fi
+fi
+
+cp "$BUILD_CONF_FILE" "$WPA_SUPPLICANT_DIR/.config"
+
+if ! make -C "${WPA_SUPPLICANT_DIR}" -j8 eapol_test 1>&2 || [ ! -e "${WPA_SUPPLICANT_DIR}/eapol_test" ]; then
+ echo "Build error" 1>&2
+ if [ -z "${BUILD_DIR}" ]; then rm -rf "$TMP_BUILD_DIR"; fi
+ exit 1
+fi
+
+cp "${WPA_SUPPLICANT_DIR}/eapol_test" "${EAPOL_TEST_PATH}"
+
+echo "${EAPOL_TEST_PATH}"
+if [ -z "${BUILD_DIR}" ]; then rm -rf "$TMP_BUILD_DIR"; fi
diff --git a/scripts/ci/eapol_test/.gitignore b/scripts/ci/eapol_test/.gitignore
new file mode 100644
index 0000000..6a537cb
--- /dev/null
+++ b/scripts/ci/eapol_test/.gitignore
@@ -0,0 +1 @@
+eapol_test
diff --git a/scripts/ci/eapol_test/config_freebsd b/scripts/ci/eapol_test/config_freebsd
new file mode 100644
index 0000000..8cab937
--- /dev/null
+++ b/scripts/ci/eapol_test/config_freebsd
@@ -0,0 +1,520 @@
+# Example wpa_supplicant build time configuration
+#
+# This file lists the configuration options that are used when building the
+# hostapd binary. All lines starting with # are ignored. Configuration option
+# lines must be commented out complete, if they are not to be included, i.e.,
+# just setting VARIABLE=n is not disabling that variable.
+#
+# This file is included in Makefile, so variables like CFLAGS and LIBS can also
+# be modified from here. In most cases, these lines should use += in order not
+# to override previous values of the variables.
+
+CFLAGS += -g3 -O0 -Wno-error=deprecated-declarations $(EAPOL_TEST_CFLAGS)
+LIBS += $(EAPOL_TEST_LDFLAGS)
+
+#
+# Disable some warnings only against CLANG
+#
+ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
+CFLAGS += -Wno-error=void-pointer-to-enum-cast -Wno-error=ignored-qualifiers
+endif
+
+# Some Red Hat versions seem to include kerberos header files from OpenSSL, but
+# the kerberos files are not in the default include path. Following line can be
+# used to fix build issues on such systems (krb5.h not found).
+#CFLAGS += -I/usr/include/kerberos
+
+# Driver interface for generic Linux wireless extensions
+# Note: WEXT is deprecated in the current Linux kernel version and no new
+# functionality is added to it. nl80211-based interface is the new
+# replacement for WEXT and its use allows wpa_supplicant to properly control
+# the driver to improve existing functionality like roaming and to support new
+# functionality.
+CONFIG_DRIVER_WEXT=n
+
+# Driver interface for Linux drivers using the nl80211 kernel interface
+CONFIG_DRIVER_NL80211=n
+
+# QCA vendor extensions to nl80211
+CONFIG_DRIVER_NL80211_QCA=n
+
+# driver_nl80211.c requires libnl. If you are compiling it yourself
+# you may need to point hostapd to your version of libnl.
+#
+#CFLAGS += -I$<path to libnl include files>
+#LIBS += -L$<path to libnl library files>
+
+# Use libnl v2.0 (or 3.0) libraries.
+#CONFIG_LIBNL20=y
+
+# Use libnl 3.2 libraries (if this is selected, CONFIG_LIBNL20 is ignored)
+#CONFIG_LIBNL32=y
+
+
+# Driver interface for FreeBSD net80211 layer (e.g., Atheros driver)
+#CONFIG_DRIVER_BSD=y
+#CFLAGS += -I/usr/local/include
+#LIBS += -L/usr/local/lib
+#LIBS_p += -L/usr/local/lib
+#LIBS_c += -L/usr/local/lib
+
+# Driver interface for Windows NDIS
+#CONFIG_DRIVER_NDIS=y
+#CFLAGS += -I/usr/include/w32api/ddk
+#LIBS += -L/usr/local/lib
+# For native build using mingw
+#CONFIG_NATIVE_WINDOWS=y
+# Additional directories for cross-compilation on Linux host for mingw target
+#CFLAGS += -I/opt/mingw/mingw32/include/ddk
+#LIBS += -L/opt/mingw/mingw32/lib
+#CC=mingw32-gcc
+# By default, driver_ndis uses WinPcap for low-level operations. This can be
+# replaced with the following option which replaces WinPcap calls with NDISUIO.
+# However, this requires that WZC is disabled (net stop wzcsvc) before starting
+# wpa_supplicant.
+# CONFIG_USE_NDISUIO=y
+
+# Driver interface for wired Ethernet drivers
+CONFIG_DRIVER_WIRED=y
+
+# Driver interface for the Broadcom RoboSwitch family
+#CONFIG_DRIVER_ROBOSWITCH=y
+
+# Driver interface for no driver (e.g., WPS ER only)
+#CONFIG_DRIVER_NONE=y
+
+# Solaris libraries
+#LIBS += -lsocket -ldlpi -lnsl
+#LIBS_c += -lsocket
+
+# Enable IEEE 802.1X Supplicant (automatically included if any EAP method is
+# included)
+CONFIG_IEEE8021X_EAPOL=y
+
+# EAP-MD5
+CONFIG_EAP_MD5=y
+
+# EAP-MSCHAPv2
+CONFIG_EAP_MSCHAPV2=y
+
+# EAP-TLS
+CONFIG_EAP_TLS=y
+
+# EAL-PEAP
+CONFIG_EAP_PEAP=y
+
+# EAP-TTLS
+CONFIG_EAP_TTLS=y
+
+# EAP-FAST
+# Note: If OpenSSL is used as the TLS library, OpenSSL 1.0 or newer is needed
+# for EAP-FAST support. Older OpenSSL releases would need to be patched, e.g.,
+# with openssl-0.9.8x-tls-extensions.patch, to add the needed functions.
+CONFIG_EAP_FAST=y
+
+# EAP-GTC
+CONFIG_EAP_GTC=y
+
+# EAP-OTP
+CONFIG_EAP_OTP=y
+
+# EAP-SIM (enable CONFIG_PCSC, if EAP-SIM is used)
+#CONFIG_EAP_SIM=y
+
+# EAP-PSK (experimental; this is _not_ needed for WPA-PSK)
+CONFIG_EAP_PSK=y
+
+# EAP-pwd (secure authentication using only a password)
+CONFIG_EAP_PWD=y
+
+# EAP-PAX
+CONFIG_EAP_PAX=y
+
+# LEAP
+CONFIG_EAP_LEAP=n
+
+# EAP-AKA (enable CONFIG_PCSC, if EAP-AKA is used)
+CONFIG_EAP_AKA=y
+
+# EAP-AKA' (enable CONFIG_PCSC, if EAP-AKA' is used).
+# This requires CONFIG_EAP_AKA to be enabled, too.
+CONFIG_EAP_AKA_PRIME=y
+
+# Enable USIM simulator (Milenage) for EAP-AKA
+CONFIG_USIM_SIMULATOR=y
+
+# Enable SIM simulator (Milenage) for EAP-SIM
+CONFIG_SIM_SIMULATOR=y
+
+# EAP-SAKE
+CONFIG_EAP_SAKE=y
+
+# EAP-GPSK
+CONFIG_EAP_GPSK=y
+# Include support for optional SHA256 cipher suite in EAP-GPSK
+CONFIG_EAP_GPSK_SHA256=y
+
+# EAP-TNC and related Trusted Network Connect support (experimental)
+CONFIG_EAP_TNC=y
+
+# Wi-Fi Protected Setup (WPS)
+#CONFIG_WPS=y
+# Enable WPS external registrar functionality
+#CONFIG_WPS_ER=y
+# Disable credentials for an open network by default when acting as a WPS
+# registrar.
+#CONFIG_WPS_REG_DISABLE_OPEN=y
+# Enable WPS support with NFC config method
+#CONFIG_WPS_NFC=y
+
+# EAP-IKEv2
+CONFIG_EAP_IKEV2=y
+
+# EAP-EKE
+CONFIG_EAP_EKE=y
+
+# PKCS#12 (PFX) support (used to read private key and certificate file from
+# a file that usually has extension .p12 or .pfx)
+CONFIG_PKCS12=y
+
+# Smartcard support (i.e., private key on a smartcard), e.g., with openssl
+# engine.
+CONFIG_SMARTCARD=y
+
+# PC/SC interface for smartcards (USIM, GSM SIM)
+# Enable this if EAP-SIM or EAP-AKA is included
+#CONFIG_PCSC=y
+
+# Support HT overrides (disable HT/HT40, mask MCS rates, etc.)
+#CONFIG_HT_OVERRIDES=y
+
+# Support VHT overrides (disable VHT, mask MCS rates, etc.)
+#CONFIG_VHT_OVERRIDES=y
+
+# Development testing
+CONFIG_EAPOL_TEST=y
+
+# Select control interface backend for external programs, e.g, wpa_cli:
+# unix = UNIX domain sockets (default for Linux/*BSD)
+# udp = UDP sockets using localhost (127.0.0.1)
+# udp6 = UDP IPv6 sockets using localhost (::1)
+# named_pipe = Windows Named Pipe (default for Windows)
+# udp-remote = UDP sockets with remote access (only for tests systems/purpose)
+# udp6-remote = UDP IPv6 sockets with remote access (only for tests purpose)
+# y = use default (backwards compatibility)
+# If this option is commented out, control interface is not included in the
+# build.
+CONFIG_CTRL_IFACE=y
+
+# Include support for GNU Readline and History Libraries in wpa_cli.
+# When building a wpa_cli binary for distribution, please note that these
+# libraries are licensed under GPL and as such, BSD license may not apply for
+# the resulting binary.
+#CONFIG_READLINE=y
+
+# Include internal line edit mode in wpa_cli. This can be used as a replacement
+# for GNU Readline to provide limited command line editing and history support.
+#CONFIG_WPA_CLI_EDIT=y
+
+# Remove debugging code that is printing out debug message to stdout.
+# This can be used to reduce the size of the wpa_supplicant considerably
+# if debugging code is not needed. The size reduction can be around 35%
+# (e.g., 90 kB).
+#CONFIG_NO_STDOUT_DEBUG=y
+
+# Remove WPA support, e.g., for wired-only IEEE 802.1X supplicant, to save
+# 35-50 kB in code size.
+#CONFIG_NO_WPA=y
+
+# Remove IEEE 802.11i/WPA-Personal ASCII passphrase support
+# This option can be used to reduce code size by removing support for
+# converting ASCII passphrases into PSK. If this functionality is removed, the
+# PSK can only be configured as the 64-octet hexstring (e.g., from
+# wpa_passphrase). This saves about 0.5 kB in code size.
+#CONFIG_NO_WPA_PASSPHRASE=y
+
+# Disable scan result processing (ap_mode=1) to save code size by about 1 kB.
+# This can be used if ap_scan=1 mode is never enabled.
+#CONFIG_NO_SCAN_PROCESSING=y
+
+# Select configuration backend:
+# file = text file (e.g., wpa_supplicant.conf; note: the configuration file
+# path is given on command line, not here; this option is just used to
+# select the backend that allows configuration files to be used)
+# winreg = Windows registry (see win_example.reg for an example)
+CONFIG_BACKEND=file
+
+# Remove configuration write functionality (i.e., to allow the configuration
+# file to be updated based on runtime configuration changes). The runtime
+# configuration can still be changed, the changes are just not going to be
+# persistent over restarts. This option can be used to reduce code size by
+# about 3.5 kB.
+#CONFIG_NO_CONFIG_WRITE=y
+
+# Remove support for configuration blobs to reduce code size by about 1.5 kB.
+#CONFIG_NO_CONFIG_BLOBS=y
+
+# Select program entry point implementation:
+# main = UNIX/POSIX like main() function (default)
+# main_winsvc = Windows service (read parameters from registry)
+# main_none = Very basic example (development use only)
+#CONFIG_MAIN=main
+
+# Select wrapper for operating system and C library specific functions
+# unix = UNIX/POSIX like systems (default)
+# win32 = Windows systems
+# none = Empty template
+#CONFIG_OS=unix
+
+# Select event loop implementation
+# eloop = select() loop (default)
+# eloop_win = Windows events and WaitForMultipleObject() loop
+#CONFIG_ELOOP=eloop
+
+# Should we use poll instead of select? Select is used by default.
+#CONFIG_ELOOP_POLL=y
+
+# Should we use epoll instead of select? Select is used by default.
+#CONFIG_ELOOP_EPOLL=y
+
+# Select layer 2 packet implementation
+# linux = Linux packet socket (default)
+# pcap = libpcap/libdnet/WinPcap
+# freebsd = FreeBSD libpcap
+# winpcap = WinPcap with receive thread
+# ndis = Windows NDISUIO (note: requires CONFIG_USE_NDISUIO=y)
+# none = Empty template
+CONFIG_L2_PACKET=freebsd
+
+# PeerKey handshake for Station to Station Link (IEEE 802.11e DLS)
+CONFIG_PEERKEY=y
+
+# IEEE 802.11w (management frame protection), also known as PMF
+# Driver support is also needed for IEEE 802.11w.
+#CONFIG_IEEE80211W=y
+
+# Select TLS implementation
+# openssl = OpenSSL (default)
+# gnutls = GnuTLS
+# internal = Internal TLSv1 implementation (experimental)
+# none = Empty template
+CONFIG_TLS=openssl
+
+# TLS-based EAP methods require at least TLS v1.0. Newer version of TLS (v1.1)
+# can be enabled to get a stronger construction of messages when block ciphers
+# are used. It should be noted that some existing TLS v1.0 -based
+# implementation may not be compatible with TLS v1.1 message (ClientHello is
+# sent prior to negotiating which version will be used)
+#CONFIG_TLSV11=y
+
+# TLS-based EAP methods require at least TLS v1.0. Newer version of TLS (v1.2)
+# can be enabled to enable use of stronger crypto algorithms. It should be
+# noted that some existing TLS v1.0 -based implementation may not be compatible
+# with TLS v1.2 message (ClientHello is sent prior to negotiating which version
+# will be used)
+CONFIG_TLSV12=y
+
+# If CONFIG_TLS=internal is used, additional library and include paths are
+# needed for LibTomMath. Alternatively, an integrated, minimal version of
+# LibTomMath can be used. See beginning of libtommath.c for details on benefits
+# and drawbacks of this option.
+#CONFIG_INTERNAL_LIBTOMMATH=y
+#ifndef CONFIG_INTERNAL_LIBTOMMATH
+#LTM_PATH=/usr/src/libtommath-0.39
+#CFLAGS += -I$(LTM_PATH)
+#LIBS += -L$(LTM_PATH)
+#LIBS_p += -L$(LTM_PATH)
+#endif
+# At the cost of about 4 kB of additional binary size, the internal LibTomMath
+# can be configured to include faster routines for exptmod, sqr, and div to
+# speed up DH and RSA calculation considerably
+#CONFIG_INTERNAL_LIBTOMMATH_FAST=y
+
+# Include NDIS event processing through WMI into wpa_supplicant/wpasvc.
+# This is only for Windows builds and requires WMI-related header files and
+# WbemUuid.Lib from Platform SDK even when building with MinGW.
+#CONFIG_NDIS_EVENTS_INTEGRATED=y
+#PLATFORMSDKLIB="/opt/Program Files/Microsoft Platform SDK/Lib"
+
+# Add support for old DBus control interface
+# (fi.epitest.hostap.WPASupplicant)
+#CONFIG_CTRL_IFACE_DBUS=y
+
+# Add support for new DBus control interface
+# (fi.w1.hostap.wpa_supplicant1)
+#CONFIG_CTRL_IFACE_DBUS_NEW=y
+
+# Add introspection support for new DBus control interface
+#CONFIG_CTRL_IFACE_DBUS_INTRO=y
+
+# Add support for loading EAP methods dynamically as shared libraries.
+# When this option is enabled, each EAP method can be either included
+# statically (CONFIG_EAP_<method>=y) or dynamically (CONFIG_EAP_<method>=dyn).
+# Dynamic EAP methods are build as shared objects (eap_*.so) and they need to
+# be loaded in the beginning of the wpa_supplicant configuration file
+# (see load_dynamic_eap parameter in the example file) before being used in
+# the network blocks.
+#
+# Note that some shared parts of EAP methods are included in the main program
+# and in order to be able to use dynamic EAP methods using these parts, the
+# main program must have been build with the EAP method enabled (=y or =dyn).
+# This means that EAP-TLS/PEAP/TTLS/FAST cannot be added as dynamic libraries
+# unless at least one of them was included in the main build to force inclusion
+# of the shared code. Similarly, at least one of EAP-SIM/AKA must be included
+# in the main build to be able to load these methods dynamically.
+#
+# Please also note that using dynamic libraries will increase the total binary
+# size. Thus, it may not be the best option for targets that have limited
+# amount of memory/flash.
+#CONFIG_DYNAMIC_EAP_METHODS=y
+
+# IEEE Std 802.11r-2008 (Fast BSS Transition)
+#CONFIG_IEEE80211R=y
+
+# Add support for writing debug log to a file (/tmp/wpa_supplicant-log-#.txt)
+#CONFIG_DEBUG_FILE=y
+
+# Send debug messages to syslog instead of stdout
+#CONFIG_DEBUG_SYSLOG=y
+# Set syslog facility for debug messages
+#CONFIG_DEBUG_SYSLOG_FACILITY=LOG_DAEMON
+
+# Add support for sending all debug messages (regardless of debug verbosity)
+# to the Linux kernel tracing facility. This helps debug the entire stack by
+# making it easy to record everything happening from the driver up into the
+# same file, e.g., using trace-cmd.
+#CONFIG_DEBUG_LINUX_TRACING=y
+
+# Add support for writing debug log to Android logcat instead of standard
+# output
+#CONFIG_ANDROID_LOG=y
+
+# Enable privilege separation (see README 'Privilege separation' for details)
+#CONFIG_PRIVSEP=y
+
+# Enable mitigation against certain attacks against TKIP by delaying Michael
+# MIC error reports by a random amount of time between 0 and 60 seconds
+#CONFIG_DELAYED_MIC_ERROR_REPORT=y
+
+# Enable tracing code for developer debugging
+# This tracks use of memory allocations and other registrations and reports
+# incorrect use with a backtrace of call (or allocation) location.
+#CONFIG_WPA_TRACE=y
+# For BSD, uncomment these.
+#LIBS += -lexecinfo
+#LIBS_p += -lexecinfo
+#LIBS_c += -lexecinfo
+
+# Use libbfd to get more details for developer debugging
+# This enables use of libbfd to get more detailed symbols for the backtraces
+# generated by CONFIG_WPA_TRACE=y.
+#CONFIG_WPA_TRACE_BFD=y
+# For BSD, uncomment these.
+#LIBS += -lbfd -liberty -lz
+#LIBS_p += -lbfd -liberty -lz
+#LIBS_c += -lbfd -liberty -lz
+
+# wpa_supplicant depends on strong random number generation being available
+# from the operating system. os_get_random() function is used to fetch random
+# data when needed, e.g., for key generation. On Linux and BSD systems, this
+# works by reading /dev/urandom. It should be noted that the OS entropy pool
+# needs to be properly initialized before wpa_supplicant is started. This is
+# important especially on embedded devices that do not have a hardware random
+# number generator and may by default start up with minimal entropy available
+# for random number generation.
+#
+# As a safety net, wpa_supplicant is by default trying to internally collect
+# additional entropy for generating random data to mix in with the data fetched
+# from the OS. This by itself is not considered to be very strong, but it may
+# help in cases where the system pool is not initialized properly. However, it
+# is very strongly recommended that the system pool is initialized with enough
+# entropy either by using hardware assisted random number generator or by
+# storing state over device reboots.
+#
+# wpa_supplicant can be configured to maintain its own entropy store over
+# restarts to enhance random number generation. This is not perfect, but it is
+# much more secure than using the same sequence of random numbers after every
+# reboot. This can be enabled with -e<entropy file> command line option. The
+# specified file needs to be readable and writable by wpa_supplicant.
+#
+# If the os_get_random() is known to provide strong random data (e.g., on
+# Linux/BSD, the board in question is known to have reliable source of random
+# data from /dev/urandom), the internal wpa_supplicant random pool can be
+# disabled. This will save some in binary size and CPU use. However, this
+# should only be considered for builds that are known to be used on devices
+# that meet the requirements described above.
+#CONFIG_NO_RANDOM_POOL=y
+
+# IEEE 802.11n (High Throughput) support (mainly for AP mode)
+#CONFIG_IEEE80211N=y
+
+# IEEE 802.11ac (Very High Throughput) support (mainly for AP mode)
+# (depends on CONFIG_IEEE80211N)
+#CONFIG_IEEE80211AC=y
+
+# Wireless Network Management (IEEE Std 802.11v-2011)
+# Note: This is experimental and not complete implementation.
+#CONFIG_WNM=y
+
+# Interworking (IEEE 802.11u)
+# This can be used to enable functionality to improve interworking with
+# external networks (GAS/ANQP to learn more about the networks and network
+# selection based on available credentials).
+#CONFIG_INTERWORKING=y
+
+# Hotspot 2.0
+#CONFIG_HS20=y
+
+# Disable roaming in wpa_supplicant
+#CONFIG_NO_ROAMING=y
+
+# AP mode operations with wpa_supplicant
+# This can be used for controlling AP mode operations with wpa_supplicant. It
+# should be noted that this is mainly aimed at simple cases like
+# WPA2-Personal while more complex configurations like WPA2-Enterprise with an
+# external RADIUS server can be supported with hostapd.
+#CONFIG_AP=y
+
+# P2P (Wi-Fi Direct)
+# This can be used to enable P2P support in wpa_supplicant. See README-P2P for
+# more information on P2P operations.
+#CONFIG_P2P=y
+
+# Enable TDLS support
+#CONFIG_TDLS=y
+
+# Wi-Fi Direct
+# This can be used to enable Wi-Fi Direct extensions for P2P using an external
+# program to control the additional information exchanges in the messages.
+#CONFIG_WIFI_DISPLAY=y
+
+# Autoscan
+# This can be used to enable automatic scan support in wpa_supplicant.
+# See wpa_supplicant.conf for more information on autoscan usage.
+#
+# Enabling directly a module will enable autoscan support.
+# For exponential module:
+#CONFIG_AUTOSCAN_EXPONENTIAL=y
+# For periodic module:
+#CONFIG_AUTOSCAN_PERIODIC=y
+
+# Password (and passphrase, etc.) backend for external storage
+# These optional mechanisms can be used to add support for storing passwords
+# and other secrets in external (to wpa_supplicant) location. This allows, for
+# example, operating system specific key storage to be used
+#
+# External password backend for testing purposes (developer use)
+#CONFIG_EXT_PASSWORD_TEST=y
+
+# Enable Fast Session Transfer (FST)
+#CONFIG_FST=y
+
+# Enable CLI commands for FST testing
+#CONFIG_FST_TEST=y
+
+# OS X builds. This is only for building eapol_test.
+#CONFIG_OSX=y
+
+# EAP-FAST used to require OpenSSL patches, so it's not on by default.
+# enable it.
+CONFIG_EAP_FAST=y
diff --git a/scripts/ci/eapol_test/config_linux b/scripts/ci/eapol_test/config_linux
new file mode 100644
index 0000000..e53e05a
--- /dev/null
+++ b/scripts/ci/eapol_test/config_linux
@@ -0,0 +1,520 @@
+# Example wpa_supplicant build time configuration
+#
+# This file lists the configuration options that are used when building the
+# hostapd binary. All lines starting with # are ignored. Configuration option
+# lines must be commented out complete, if they are not to be included, i.e.,
+# just setting VARIABLE=n is not disabling that variable.
+#
+# This file is included in Makefile, so variables like CFLAGS and LIBS can also
+# be modified from here. In most cases, these lines should use += in order not
+# to override previous values of the variables.
+
+CFLAGS += -g3 -O0 -Wno-error=deprecated-declarations $(EAPOL_TEST_CFLAGS)
+LIBS += $(EAPOL_TEST_LDFLAGS)
+
+#
+# Disable some warnings only against CLANG
+#
+ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
+CFLAGS += -Wno-error=ignored-qualifiers
+endif
+
+# Some Red Hat versions seem to include kerberos header files from OpenSSL, but
+# the kerberos files are not in the default include path. Following line can be
+# used to fix build issues on such systems (krb5.h not found).
+#CFLAGS += -I/usr/include/kerberos
+
+# Driver interface for generic Linux wireless extensions
+# Note: WEXT is deprecated in the current Linux kernel version and no new
+# functionality is added to it. nl80211-based interface is the new
+# replacement for WEXT and its use allows wpa_supplicant to properly control
+# the driver to improve existing functionality like roaming and to support new
+# functionality.
+CONFIG_DRIVER_WEXT=n
+
+# Driver interface for Linux drivers using the nl80211 kernel interface
+CONFIG_DRIVER_NL80211=n
+
+# QCA vendor extensions to nl80211
+CONFIG_DRIVER_NL80211_QCA=n
+
+# driver_nl80211.c requires libnl. If you are compiling it yourself
+# you may need to point hostapd to your version of libnl.
+#
+#CFLAGS += -I$<path to libnl include files>
+#LIBS += -L$<path to libnl library files>
+
+# Use libnl v2.0 (or 3.0) libraries.
+#CONFIG_LIBNL20=y
+
+# Use libnl 3.2 libraries (if this is selected, CONFIG_LIBNL20 is ignored)
+CONFIG_LIBNL32=y
+
+
+# Driver interface for FreeBSD net80211 layer (e.g., Atheros driver)
+#CONFIG_DRIVER_BSD=y
+#CFLAGS += -I/usr/local/include
+#LIBS += -L/usr/local/lib
+#LIBS_p += -L/usr/local/lib
+#LIBS_c += -L/usr/local/lib
+
+# Driver interface for Windows NDIS
+#CONFIG_DRIVER_NDIS=y
+#CFLAGS += -I/usr/include/w32api/ddk
+#LIBS += -L/usr/local/lib
+# For native build using mingw
+#CONFIG_NATIVE_WINDOWS=y
+# Additional directories for cross-compilation on Linux host for mingw target
+#CFLAGS += -I/opt/mingw/mingw32/include/ddk
+#LIBS += -L/opt/mingw/mingw32/lib
+#CC=mingw32-gcc
+# By default, driver_ndis uses WinPcap for low-level operations. This can be
+# replaced with the following option which replaces WinPcap calls with NDISUIO.
+# However, this requires that WZC is disabled (net stop wzcsvc) before starting
+# wpa_supplicant.
+# CONFIG_USE_NDISUIO=y
+
+# Driver interface for wired Ethernet drivers
+CONFIG_DRIVER_WIRED=y
+
+# Driver interface for the Broadcom RoboSwitch family
+#CONFIG_DRIVER_ROBOSWITCH=y
+
+# Driver interface for no driver (e.g., WPS ER only)
+#CONFIG_DRIVER_NONE=y
+
+# Solaris libraries
+#LIBS += -lsocket -ldlpi -lnsl
+#LIBS_c += -lsocket
+
+# Enable IEEE 802.1X Supplicant (automatically included if any EAP method is
+# included)
+CONFIG_IEEE8021X_EAPOL=y
+
+# EAP-MD5
+CONFIG_EAP_MD5=y
+
+# EAP-MSCHAPv2
+CONFIG_EAP_MSCHAPV2=y
+
+# EAP-TLS
+CONFIG_EAP_TLS=y
+
+# EAL-PEAP
+CONFIG_EAP_PEAP=y
+
+# EAP-TTLS
+CONFIG_EAP_TTLS=y
+
+# EAP-FAST
+# Note: If OpenSSL is used as the TLS library, OpenSSL 1.0 or newer is needed
+# for EAP-FAST support. Older OpenSSL releases would need to be patched, e.g.,
+# with openssl-0.9.8x-tls-extensions.patch, to add the needed functions.
+CONFIG_EAP_FAST=y
+
+# EAP-GTC
+CONFIG_EAP_GTC=y
+
+# EAP-OTP
+CONFIG_EAP_OTP=y
+
+# EAP-SIM (enable CONFIG_PCSC, if EAP-SIM is used)
+CONFIG_EAP_SIM=y
+
+# EAP-PSK (experimental; this is _not_ needed for WPA-PSK)
+CONFIG_EAP_PSK=y
+
+# EAP-pwd (secure authentication using only a password)
+CONFIG_EAP_PWD=y
+
+# EAP-PAX
+CONFIG_EAP_PAX=y
+
+# LEAP
+CONFIG_EAP_LEAP=n
+
+# EAP-AKA (enable CONFIG_PCSC, if EAP-AKA is used)
+CONFIG_EAP_AKA=y
+
+# EAP-AKA' (enable CONFIG_PCSC, if EAP-AKA' is used).
+# This requires CONFIG_EAP_AKA to be enabled, too.
+CONFIG_EAP_AKA_PRIME=y
+
+# Enable USIM simulator (Milenage) for EAP-AKA
+CONFIG_USIM_SIMULATOR=y
+
+# Enable SIM simulator (Milenage) for EAP-SIM
+CONFIG_SIM_SIMULATOR=y
+
+# EAP-SAKE
+CONFIG_EAP_SAKE=y
+
+# EAP-GPSK
+CONFIG_EAP_GPSK=y
+# Include support for optional SHA256 cipher suite in EAP-GPSK
+CONFIG_EAP_GPSK_SHA256=y
+
+# EAP-TNC and related Trusted Network Connect support (experimental)
+CONFIG_EAP_TNC=y
+
+# Wi-Fi Protected Setup (WPS)
+#CONFIG_WPS=y
+# Enable WPS external registrar functionality
+#CONFIG_WPS_ER=y
+# Disable credentials for an open network by default when acting as a WPS
+# registrar.
+#CONFIG_WPS_REG_DISABLE_OPEN=y
+# Enable WPS support with NFC config method
+#CONFIG_WPS_NFC=y
+
+# EAP-IKEv2
+CONFIG_EAP_IKEV2=y
+
+# EAP-EKE
+CONFIG_EAP_EKE=y
+
+# PKCS#12 (PFX) support (used to read private key and certificate file from
+# a file that usually has extension .p12 or .pfx)
+CONFIG_PKCS12=y
+
+# Smartcard support (i.e., private key on a smartcard), e.g., with openssl
+# engine.
+CONFIG_SMARTCARD=y
+
+# PC/SC interface for smartcards (USIM, GSM SIM)
+# Enable this if EAP-SIM or EAP-AKA is included
+#CONFIG_PCSC=y
+
+# Support HT overrides (disable HT/HT40, mask MCS rates, etc.)
+#CONFIG_HT_OVERRIDES=y
+
+# Support VHT overrides (disable VHT, mask MCS rates, etc.)
+#CONFIG_VHT_OVERRIDES=y
+
+# Development testing
+CONFIG_EAPOL_TEST=y
+
+# Select control interface backend for external programs, e.g, wpa_cli:
+# unix = UNIX domain sockets (default for Linux/*BSD)
+# udp = UDP sockets using localhost (127.0.0.1)
+# udp6 = UDP IPv6 sockets using localhost (::1)
+# named_pipe = Windows Named Pipe (default for Windows)
+# udp-remote = UDP sockets with remote access (only for tests systems/purpose)
+# udp6-remote = UDP IPv6 sockets with remote access (only for tests purpose)
+# y = use default (backwards compatibility)
+# If this option is commented out, control interface is not included in the
+# build.
+CONFIG_CTRL_IFACE=y
+
+# Include support for GNU Readline and History Libraries in wpa_cli.
+# When building a wpa_cli binary for distribution, please note that these
+# libraries are licensed under GPL and as such, BSD license may not apply for
+# the resulting binary.
+#CONFIG_READLINE=y
+
+# Include internal line edit mode in wpa_cli. This can be used as a replacement
+# for GNU Readline to provide limited command line editing and history support.
+#CONFIG_WPA_CLI_EDIT=y
+
+# Remove debugging code that is printing out debug message to stdout.
+# This can be used to reduce the size of the wpa_supplicant considerably
+# if debugging code is not needed. The size reduction can be around 35%
+# (e.g., 90 kB).
+#CONFIG_NO_STDOUT_DEBUG=y
+
+# Remove WPA support, e.g., for wired-only IEEE 802.1X supplicant, to save
+# 35-50 kB in code size.
+#CONFIG_NO_WPA=y
+
+# Remove IEEE 802.11i/WPA-Personal ASCII passphrase support
+# This option can be used to reduce code size by removing support for
+# converting ASCII passphrases into PSK. If this functionality is removed, the
+# PSK can only be configured as the 64-octet hexstring (e.g., from
+# wpa_passphrase). This saves about 0.5 kB in code size.
+#CONFIG_NO_WPA_PASSPHRASE=y
+
+# Disable scan result processing (ap_mode=1) to save code size by about 1 kB.
+# This can be used if ap_scan=1 mode is never enabled.
+#CONFIG_NO_SCAN_PROCESSING=y
+
+# Select configuration backend:
+# file = text file (e.g., wpa_supplicant.conf; note: the configuration file
+# path is given on command line, not here; this option is just used to
+# select the backend that allows configuration files to be used)
+# winreg = Windows registry (see win_example.reg for an example)
+CONFIG_BACKEND=file
+
+# Remove configuration write functionality (i.e., to allow the configuration
+# file to be updated based on runtime configuration changes). The runtime
+# configuration can still be changed, the changes are just not going to be
+# persistent over restarts. This option can be used to reduce code size by
+# about 3.5 kB.
+#CONFIG_NO_CONFIG_WRITE=y
+
+# Remove support for configuration blobs to reduce code size by about 1.5 kB.
+#CONFIG_NO_CONFIG_BLOBS=y
+
+# Select program entry point implementation:
+# main = UNIX/POSIX like main() function (default)
+# main_winsvc = Windows service (read parameters from registry)
+# main_none = Very basic example (development use only)
+#CONFIG_MAIN=main
+
+# Select wrapper for operating system and C library specific functions
+# unix = UNIX/POSIX like systems (default)
+# win32 = Windows systems
+# none = Empty template
+#CONFIG_OS=unix
+
+# Select event loop implementation
+# eloop = select() loop (default)
+# eloop_win = Windows events and WaitForMultipleObject() loop
+#CONFIG_ELOOP=eloop
+
+# Should we use poll instead of select? Select is used by default.
+#CONFIG_ELOOP_POLL=y
+
+# Should we use epoll instead of select? Select is used by default.
+#CONFIG_ELOOP_EPOLL=y
+
+# Select layer 2 packet implementation
+# linux = Linux packet socket (default)
+# pcap = libpcap/libdnet/WinPcap
+# freebsd = FreeBSD libpcap
+# winpcap = WinPcap with receive thread
+# ndis = Windows NDISUIO (note: requires CONFIG_USE_NDISUIO=y)
+# none = Empty template
+CONFIG_L2_PACKET=linux
+
+# PeerKey handshake for Station to Station Link (IEEE 802.11e DLS)
+CONFIG_PEERKEY=y
+
+# IEEE 802.11w (management frame protection), also known as PMF
+# Driver support is also needed for IEEE 802.11w.
+#CONFIG_IEEE80211W=y
+
+# Select TLS implementation
+# openssl = OpenSSL (default)
+# gnutls = GnuTLS
+# internal = Internal TLSv1 implementation (experimental)
+# none = Empty template
+CONFIG_TLS=openssl
+
+# TLS-based EAP methods require at least TLS v1.0. Newer version of TLS (v1.1)
+# can be enabled to get a stronger construction of messages when block ciphers
+# are used. It should be noted that some existing TLS v1.0 -based
+# implementation may not be compatible with TLS v1.1 message (ClientHello is
+# sent prior to negotiating which version will be used)
+#CONFIG_TLSV11=y
+
+# TLS-based EAP methods require at least TLS v1.0. Newer version of TLS (v1.2)
+# can be enabled to enable use of stronger crypto algorithms. It should be
+# noted that some existing TLS v1.0 -based implementation may not be compatible
+# with TLS v1.2 message (ClientHello is sent prior to negotiating which version
+# will be used)
+CONFIG_TLSV12=y
+
+# If CONFIG_TLS=internal is used, additional library and include paths are
+# needed for LibTomMath. Alternatively, an integrated, minimal version of
+# LibTomMath can be used. See beginning of libtommath.c for details on benefits
+# and drawbacks of this option.
+#CONFIG_INTERNAL_LIBTOMMATH=y
+#ifndef CONFIG_INTERNAL_LIBTOMMATH
+#LTM_PATH=/usr/src/libtommath-0.39
+#CFLAGS += -I$(LTM_PATH)
+#LIBS += -L$(LTM_PATH)
+#LIBS_p += -L$(LTM_PATH)
+#endif
+# At the cost of about 4 kB of additional binary size, the internal LibTomMath
+# can be configured to include faster routines for exptmod, sqr, and div to
+# speed up DH and RSA calculation considerably
+#CONFIG_INTERNAL_LIBTOMMATH_FAST=y
+
+# Include NDIS event processing through WMI into wpa_supplicant/wpasvc.
+# This is only for Windows builds and requires WMI-related header files and
+# WbemUuid.Lib from Platform SDK even when building with MinGW.
+#CONFIG_NDIS_EVENTS_INTEGRATED=y
+#PLATFORMSDKLIB="/opt/Program Files/Microsoft Platform SDK/Lib"
+
+# Add support for old DBus control interface
+# (fi.epitest.hostap.WPASupplicant)
+#CONFIG_CTRL_IFACE_DBUS=y
+
+# Add support for new DBus control interface
+# (fi.w1.hostap.wpa_supplicant1)
+#CONFIG_CTRL_IFACE_DBUS_NEW=y
+
+# Add introspection support for new DBus control interface
+#CONFIG_CTRL_IFACE_DBUS_INTRO=y
+
+# Add support for loading EAP methods dynamically as shared libraries.
+# When this option is enabled, each EAP method can be either included
+# statically (CONFIG_EAP_<method>=y) or dynamically (CONFIG_EAP_<method>=dyn).
+# Dynamic EAP methods are build as shared objects (eap_*.so) and they need to
+# be loaded in the beginning of the wpa_supplicant configuration file
+# (see load_dynamic_eap parameter in the example file) before being used in
+# the network blocks.
+#
+# Note that some shared parts of EAP methods are included in the main program
+# and in order to be able to use dynamic EAP methods using these parts, the
+# main program must have been build with the EAP method enabled (=y or =dyn).
+# This means that EAP-TLS/PEAP/TTLS/FAST cannot be added as dynamic libraries
+# unless at least one of them was included in the main build to force inclusion
+# of the shared code. Similarly, at least one of EAP-SIM/AKA must be included
+# in the main build to be able to load these methods dynamically.
+#
+# Please also note that using dynamic libraries will increase the total binary
+# size. Thus, it may not be the best option for targets that have limited
+# amount of memory/flash.
+#CONFIG_DYNAMIC_EAP_METHODS=y
+
+# IEEE Std 802.11r-2008 (Fast BSS Transition)
+#CONFIG_IEEE80211R=y
+
+# Add support for writing debug log to a file (/tmp/wpa_supplicant-log-#.txt)
+#CONFIG_DEBUG_FILE=y
+
+# Send debug messages to syslog instead of stdout
+#CONFIG_DEBUG_SYSLOG=y
+# Set syslog facility for debug messages
+#CONFIG_DEBUG_SYSLOG_FACILITY=LOG_DAEMON
+
+# Add support for sending all debug messages (regardless of debug verbosity)
+# to the Linux kernel tracing facility. This helps debug the entire stack by
+# making it easy to record everything happening from the driver up into the
+# same file, e.g., using trace-cmd.
+#CONFIG_DEBUG_LINUX_TRACING=y
+
+# Add support for writing debug log to Android logcat instead of standard
+# output
+#CONFIG_ANDROID_LOG=y
+
+# Enable privilege separation (see README 'Privilege separation' for details)
+#CONFIG_PRIVSEP=y
+
+# Enable mitigation against certain attacks against TKIP by delaying Michael
+# MIC error reports by a random amount of time between 0 and 60 seconds
+#CONFIG_DELAYED_MIC_ERROR_REPORT=y
+
+# Enable tracing code for developer debugging
+# This tracks use of memory allocations and other registrations and reports
+# incorrect use with a backtrace of call (or allocation) location.
+#CONFIG_WPA_TRACE=y
+# For BSD, uncomment these.
+#LIBS += -lexecinfo
+#LIBS_p += -lexecinfo
+#LIBS_c += -lexecinfo
+
+# Use libbfd to get more details for developer debugging
+# This enables use of libbfd to get more detailed symbols for the backtraces
+# generated by CONFIG_WPA_TRACE=y.
+#CONFIG_WPA_TRACE_BFD=y
+# For BSD, uncomment these.
+#LIBS += -lbfd -liberty -lz
+#LIBS_p += -lbfd -liberty -lz
+#LIBS_c += -lbfd -liberty -lz
+
+# wpa_supplicant depends on strong random number generation being available
+# from the operating system. os_get_random() function is used to fetch random
+# data when needed, e.g., for key generation. On Linux and BSD systems, this
+# works by reading /dev/urandom. It should be noted that the OS entropy pool
+# needs to be properly initialized before wpa_supplicant is started. This is
+# important especially on embedded devices that do not have a hardware random
+# number generator and may by default start up with minimal entropy available
+# for random number generation.
+#
+# As a safety net, wpa_supplicant is by default trying to internally collect
+# additional entropy for generating random data to mix in with the data fetched
+# from the OS. This by itself is not considered to be very strong, but it may
+# help in cases where the system pool is not initialized properly. However, it
+# is very strongly recommended that the system pool is initialized with enough
+# entropy either by using hardware assisted random number generator or by
+# storing state over device reboots.
+#
+# wpa_supplicant can be configured to maintain its own entropy store over
+# restarts to enhance random number generation. This is not perfect, but it is
+# much more secure than using the same sequence of random numbers after every
+# reboot. This can be enabled with -e<entropy file> command line option. The
+# specified file needs to be readable and writable by wpa_supplicant.
+#
+# If the os_get_random() is known to provide strong random data (e.g., on
+# Linux/BSD, the board in question is known to have reliable source of random
+# data from /dev/urandom), the internal wpa_supplicant random pool can be
+# disabled. This will save some in binary size and CPU use. However, this
+# should only be considered for builds that are known to be used on devices
+# that meet the requirements described above.
+#CONFIG_NO_RANDOM_POOL=y
+
+# IEEE 802.11n (High Throughput) support (mainly for AP mode)
+#CONFIG_IEEE80211N=y
+
+# IEEE 802.11ac (Very High Throughput) support (mainly for AP mode)
+# (depends on CONFIG_IEEE80211N)
+#CONFIG_IEEE80211AC=y
+
+# Wireless Network Management (IEEE Std 802.11v-2011)
+# Note: This is experimental and not complete implementation.
+#CONFIG_WNM=y
+
+# Interworking (IEEE 802.11u)
+# This can be used to enable functionality to improve interworking with
+# external networks (GAS/ANQP to learn more about the networks and network
+# selection based on available credentials).
+#CONFIG_INTERWORKING=y
+
+# Hotspot 2.0
+#CONFIG_HS20=y
+
+# Disable roaming in wpa_supplicant
+#CONFIG_NO_ROAMING=y
+
+# AP mode operations with wpa_supplicant
+# This can be used for controlling AP mode operations with wpa_supplicant. It
+# should be noted that this is mainly aimed at simple cases like
+# WPA2-Personal while more complex configurations like WPA2-Enterprise with an
+# external RADIUS server can be supported with hostapd.
+#CONFIG_AP=y
+
+# P2P (Wi-Fi Direct)
+# This can be used to enable P2P support in wpa_supplicant. See README-P2P for
+# more information on P2P operations.
+#CONFIG_P2P=y
+
+# Enable TDLS support
+#CONFIG_TDLS=y
+
+# Wi-Fi Direct
+# This can be used to enable Wi-Fi Direct extensions for P2P using an external
+# program to control the additional information exchanges in the messages.
+#CONFIG_WIFI_DISPLAY=y
+
+# Autoscan
+# This can be used to enable automatic scan support in wpa_supplicant.
+# See wpa_supplicant.conf for more information on autoscan usage.
+#
+# Enabling directly a module will enable autoscan support.
+# For exponential module:
+#CONFIG_AUTOSCAN_EXPONENTIAL=y
+# For periodic module:
+#CONFIG_AUTOSCAN_PERIODIC=y
+
+# Password (and passphrase, etc.) backend for external storage
+# These optional mechanisms can be used to add support for storing passwords
+# and other secrets in external (to wpa_supplicant) location. This allows, for
+# example, operating system specific key storage to be used
+#
+# External password backend for testing purposes (developer use)
+#CONFIG_EXT_PASSWORD_TEST=y
+
+# Enable Fast Session Transfer (FST)
+#CONFIG_FST=y
+
+# Enable CLI commands for FST testing
+#CONFIG_FST_TEST=y
+
+# OS X builds. This is only for building eapol_test.
+#CONFIG_OSX=y
+
+# EAP-FAST used to require OpenSSL patches, so it's not on by default.
+# enable it.
+CONFIG_EAP_FAST=y
diff --git a/scripts/ci/eapol_test/config_osx b/scripts/ci/eapol_test/config_osx
new file mode 100644
index 0000000..01dd020
--- /dev/null
+++ b/scripts/ci/eapol_test/config_osx
@@ -0,0 +1,515 @@
+# Example wpa_supplicant build time configuration
+#
+# This file lists the configuration options that are used when building the
+# hostapd binary. All lines starting with # are ignored. Configuration option
+# lines must be commented out complete, if they are not to be included, i.e.,
+# just setting VARIABLE=n is not disabling that variable.
+#
+# This file is included in Makefile, so variables like CFLAGS and LIBS can also
+# be modified from here. In most cases, these lines should use += in order not
+# to override previous values of the variables.
+
+CFLAGS += -g3 -O0 -Wno-error=deprecated-declarations -Wno-error=void-pointer-to-enum-cast $(EAPOL_TEST_CFLAGS)
+CFLAGS += -I/usr/local/opt/openssl/include -I/opt/homebrew/opt/openssl/include -I/usr/local/include/openssl
+
+LIBS += $(EAPOL_TEST_LDFLAGS) -L/usr/local/opt/openssl/lib -L/opt/homebrew/opt/openssl/lib -L/usr/local/lib
+
+# Some Red Hat versions seem to include kerberos header files from OpenSSL, but
+# the kerberos files are not in the default include path. Following line can be
+# used to fix build issues on such systems (krb5.h not found).
+#CFLAGS += -I/usr/include/kerberos
+
+# Driver interface for generic Linux wireless extensions
+# Note: WEXT is deprecated in the current Linux kernel version and no new
+# functionality is added to it. nl80211-based interface is the new
+# replacement for WEXT and its use allows wpa_supplicant to properly control
+# the driver to improve existing functionality like roaming and to support new
+# functionality.
+CONFIG_DRIVER_WEXT=n
+
+# Driver interface for Linux drivers using the nl80211 kernel interface
+CONFIG_DRIVER_NL80211=n
+
+# QCA vendor extensions to nl80211
+CONFIG_DRIVER_NL80211_QCA=n
+
+# driver_nl80211.c requires libnl. If you are compiling it yourself
+# you may need to point hostapd to your version of libnl.
+#
+#CFLAGS += -I$<path to libnl include files>
+#LIBS += -L$<path to libnl library files>
+
+# Use libnl v2.0 (or 3.0) libraries.
+#CONFIG_LIBNL20=y
+
+# Use libnl 3.2 libraries (if this is selected, CONFIG_LIBNL20 is ignored)
+#CONFIG_LIBNL32=y
+
+
+# Driver interface for FreeBSD net80211 layer (e.g., Atheros driver)
+#CONFIG_DRIVER_BSD=y
+#CFLAGS += -I/usr/local/include
+#LIBS += -L/usr/local/lib
+#LIBS_p += -L/usr/local/lib
+#LIBS_c += -L/usr/local/lib
+
+# Driver interface for Windows NDIS
+#CONFIG_DRIVER_NDIS=y
+#CFLAGS += -I/usr/include/w32api/ddk
+#LIBS += -L/usr/local/lib
+# For native build using mingw
+#CONFIG_NATIVE_WINDOWS=y
+# Additional directories for cross-compilation on Linux host for mingw target
+#CFLAGS += -I/opt/mingw/mingw32/include/ddk
+#LIBS += -L/opt/mingw/mingw32/lib
+#CC=mingw32-gcc
+# By default, driver_ndis uses WinPcap for low-level operations. This can be
+# replaced with the following option which replaces WinPcap calls with NDISUIO.
+# However, this requires that WZC is disabled (net stop wzcsvc) before starting
+# wpa_supplicant.
+# CONFIG_USE_NDISUIO=y
+
+# Driver interface for wired Ethernet drivers
+CONFIG_DRIVER_WIRED=y
+
+# Driver interface for the Broadcom RoboSwitch family
+#CONFIG_DRIVER_ROBOSWITCH=y
+
+# Driver interface for no driver (e.g., WPS ER only)
+#CONFIG_DRIVER_NONE=y
+
+# Solaris libraries
+#LIBS += -lsocket -ldlpi -lnsl
+#LIBS_c += -lsocket
+
+# Enable IEEE 802.1X Supplicant (automatically included if any EAP method is
+# included)
+CONFIG_IEEE8021X_EAPOL=y
+
+# EAP-MD5
+CONFIG_EAP_MD5=y
+
+# EAP-MSCHAPv2
+CONFIG_EAP_MSCHAPV2=y
+
+# EAP-TLS
+CONFIG_EAP_TLS=y
+
+# EAL-PEAP
+CONFIG_EAP_PEAP=y
+
+# EAP-TTLS
+CONFIG_EAP_TTLS=y
+
+# EAP-FAST
+# Note: If OpenSSL is used as the TLS library, OpenSSL 1.0 or newer is needed
+# for EAP-FAST support. Older OpenSSL releases would need to be patched, e.g.,
+# with openssl-0.9.8x-tls-extensions.patch, to add the needed functions.
+CONFIG_EAP_FAST=y
+
+# EAP-GTC
+CONFIG_EAP_GTC=y
+
+# EAP-OTP
+CONFIG_EAP_OTP=y
+
+# EAP-SIM (enable CONFIG_PCSC, if EAP-SIM is used)
+CONFIG_EAP_SIM=y
+
+# EAP-PSK (experimental; this is _not_ needed for WPA-PSK)
+CONFIG_EAP_PSK=y
+
+# EAP-pwd (secure authentication using only a password)
+CONFIG_EAP_PWD=y
+
+# EAP-PAX
+CONFIG_EAP_PAX=y
+
+# LEAP
+CONFIG_EAP_LEAP=n
+
+# EAP-AKA (enable CONFIG_PCSC, if EAP-AKA is used)
+CONFIG_EAP_AKA=y
+
+# EAP-AKA' (enable CONFIG_PCSC, if EAP-AKA' is used).
+# This requires CONFIG_EAP_AKA to be enabled, too.
+CONFIG_EAP_AKA_PRIME=y
+
+# Enable USIM simulator (Milenage) for EAP-AKA
+CONFIG_USIM_SIMULATOR=y
+
+# Enable SIM simulator (Milenage) for EAP-SIM
+CONFIG_SIM_SIMULATOR=y
+
+# EAP-SAKE
+CONFIG_EAP_SAKE=y
+
+# EAP-GPSK
+CONFIG_EAP_GPSK=y
+# Include support for optional SHA256 cipher suite in EAP-GPSK
+CONFIG_EAP_GPSK_SHA256=y
+
+# EAP-TNC and related Trusted Network Connect support (experimental)
+CONFIG_EAP_TNC=y
+
+# Wi-Fi Protected Setup (WPS)
+#CONFIG_WPS=y
+# Enable WPS external registrar functionality
+#CONFIG_WPS_ER=y
+# Disable credentials for an open network by default when acting as a WPS
+# registrar.
+#CONFIG_WPS_REG_DISABLE_OPEN=y
+# Enable WPS support with NFC config method
+#CONFIG_WPS_NFC=y
+
+# EAP-IKEv2
+CONFIG_EAP_IKEV2=y
+
+# EAP-EKE
+CONFIG_EAP_EKE=y
+
+# PKCS#12 (PFX) support (used to read private key and certificate file from
+# a file that usually has extension .p12 or .pfx)
+CONFIG_PKCS12=y
+
+# Smartcard support (i.e., private key on a smartcard), e.g., with openssl
+# engine.
+CONFIG_SMARTCARD=y
+
+# PC/SC interface for smartcards (USIM, GSM SIM)
+# Enable this if EAP-SIM or EAP-AKA is included
+#CONFIG_PCSC=y
+
+# Support HT overrides (disable HT/HT40, mask MCS rates, etc.)
+#CONFIG_HT_OVERRIDES=y
+
+# Support VHT overrides (disable VHT, mask MCS rates, etc.)
+#CONFIG_VHT_OVERRIDES=y
+
+# Development testing
+CONFIG_EAPOL_TEST=y
+
+# Select control interface backend for external programs, e.g, wpa_cli:
+# unix = UNIX domain sockets (default for Linux/*BSD)
+# udp = UDP sockets using localhost (127.0.0.1)
+# udp6 = UDP IPv6 sockets using localhost (::1)
+# named_pipe = Windows Named Pipe (default for Windows)
+# udp-remote = UDP sockets with remote access (only for tests systems/purpose)
+# udp6-remote = UDP IPv6 sockets with remote access (only for tests purpose)
+# y = use default (backwards compatibility)
+# If this option is commented out, control interface is not included in the
+# build.
+CONFIG_CTRL_IFACE=y
+
+# Include support for GNU Readline and History Libraries in wpa_cli.
+# When building a wpa_cli binary for distribution, please note that these
+# libraries are licensed under GPL and as such, BSD license may not apply for
+# the resulting binary.
+#CONFIG_READLINE=y
+
+# Include internal line edit mode in wpa_cli. This can be used as a replacement
+# for GNU Readline to provide limited command line editing and history support.
+#CONFIG_WPA_CLI_EDIT=y
+
+# Remove debugging code that is printing out debug message to stdout.
+# This can be used to reduce the size of the wpa_supplicant considerably
+# if debugging code is not needed. The size reduction can be around 35%
+# (e.g., 90 kB).
+#CONFIG_NO_STDOUT_DEBUG=y
+
+# Remove WPA support, e.g., for wired-only IEEE 802.1X supplicant, to save
+# 35-50 kB in code size.
+#CONFIG_NO_WPA=y
+
+# Remove IEEE 802.11i/WPA-Personal ASCII passphrase support
+# This option can be used to reduce code size by removing support for
+# converting ASCII passphrases into PSK. If this functionality is removed, the
+# PSK can only be configured as the 64-octet hexstring (e.g., from
+# wpa_passphrase). This saves about 0.5 kB in code size.
+#CONFIG_NO_WPA_PASSPHRASE=y
+
+# Disable scan result processing (ap_mode=1) to save code size by about 1 kB.
+# This can be used if ap_scan=1 mode is never enabled.
+#CONFIG_NO_SCAN_PROCESSING=y
+
+# Select configuration backend:
+# file = text file (e.g., wpa_supplicant.conf; note: the configuration file
+# path is given on command line, not here; this option is just used to
+# select the backend that allows configuration files to be used)
+# winreg = Windows registry (see win_example.reg for an example)
+CONFIG_BACKEND=file
+
+# Remove configuration write functionality (i.e., to allow the configuration
+# file to be updated based on runtime configuration changes). The runtime
+# configuration can still be changed, the changes are just not going to be
+# persistent over restarts. This option can be used to reduce code size by
+# about 3.5 kB.
+#CONFIG_NO_CONFIG_WRITE=y
+
+# Remove support for configuration blobs to reduce code size by about 1.5 kB.
+#CONFIG_NO_CONFIG_BLOBS=y
+
+# Select program entry point implementation:
+# main = UNIX/POSIX like main() function (default)
+# main_winsvc = Windows service (read parameters from registry)
+# main_none = Very basic example (development use only)
+#CONFIG_MAIN=main
+
+# Select wrapper for operating system and C library specific functions
+# unix = UNIX/POSIX like systems (default)
+# win32 = Windows systems
+# none = Empty template
+#CONFIG_OS=unix
+
+# Select event loop implementation
+# eloop = select() loop (default)
+# eloop_win = Windows events and WaitForMultipleObject() loop
+#CONFIG_ELOOP=eloop
+
+# Should we use poll instead of select? Select is used by default.
+#CONFIG_ELOOP_POLL=y
+
+# Should we use epoll instead of select? Select is used by default.
+#CONFIG_ELOOP_EPOLL=y
+
+# Select layer 2 packet implementation
+# linux = Linux packet socket (default)
+# pcap = libpcap/libdnet/WinPcap
+# freebsd = FreeBSD libpcap
+# winpcap = WinPcap with receive thread
+# ndis = Windows NDISUIO (note: requires CONFIG_USE_NDISUIO=y)
+# none = Empty template
+CONFIG_L2_PACKET=freebsd
+
+# PeerKey handshake for Station to Station Link (IEEE 802.11e DLS)
+CONFIG_PEERKEY=y
+
+# IEEE 802.11w (management frame protection), also known as PMF
+# Driver support is also needed for IEEE 802.11w.
+#CONFIG_IEEE80211W=y
+
+# Select TLS implementation
+# openssl = OpenSSL (default)
+# gnutls = GnuTLS
+# internal = Internal TLSv1 implementation (experimental)
+# none = Empty template
+CONFIG_TLS=openssl
+
+# TLS-based EAP methods require at least TLS v1.0. Newer version of TLS (v1.1)
+# can be enabled to get a stronger construction of messages when block ciphers
+# are used. It should be noted that some existing TLS v1.0 -based
+# implementation may not be compatible with TLS v1.1 message (ClientHello is
+# sent prior to negotiating which version will be used)
+#CONFIG_TLSV11=y
+
+# TLS-based EAP methods require at least TLS v1.0. Newer version of TLS (v1.2)
+# can be enabled to enable use of stronger crypto algorithms. It should be
+# noted that some existing TLS v1.0 -based implementation may not be compatible
+# with TLS v1.2 message (ClientHello is sent prior to negotiating which version
+# will be used)
+CONFIG_TLSV12=y
+
+# If CONFIG_TLS=internal is used, additional library and include paths are
+# needed for LibTomMath. Alternatively, an integrated, minimal version of
+# LibTomMath can be used. See beginning of libtommath.c for details on benefits
+# and drawbacks of this option.
+#CONFIG_INTERNAL_LIBTOMMATH=y
+#ifndef CONFIG_INTERNAL_LIBTOMMATH
+#LTM_PATH=/usr/src/libtommath-0.39
+#CFLAGS += -I$(LTM_PATH)
+#LIBS += -L$(LTM_PATH)
+#LIBS_p += -L$(LTM_PATH)
+#endif
+# At the cost of about 4 kB of additional binary size, the internal LibTomMath
+# can be configured to include faster routines for exptmod, sqr, and div to
+# speed up DH and RSA calculation considerably
+#CONFIG_INTERNAL_LIBTOMMATH_FAST=y
+
+# Include NDIS event processing through WMI into wpa_supplicant/wpasvc.
+# This is only for Windows builds and requires WMI-related header files and
+# WbemUuid.Lib from Platform SDK even when building with MinGW.
+#CONFIG_NDIS_EVENTS_INTEGRATED=y
+#PLATFORMSDKLIB="/opt/Program Files/Microsoft Platform SDK/Lib"
+
+# Add support for old DBus control interface
+# (fi.epitest.hostap.WPASupplicant)
+#CONFIG_CTRL_IFACE_DBUS=y
+
+# Add support for new DBus control interface
+# (fi.w1.hostap.wpa_supplicant1)
+#CONFIG_CTRL_IFACE_DBUS_NEW=y
+
+# Add introspection support for new DBus control interface
+#CONFIG_CTRL_IFACE_DBUS_INTRO=y
+
+# Add support for loading EAP methods dynamically as shared libraries.
+# When this option is enabled, each EAP method can be either included
+# statically (CONFIG_EAP_<method>=y) or dynamically (CONFIG_EAP_<method>=dyn).
+# Dynamic EAP methods are build as shared objects (eap_*.so) and they need to
+# be loaded in the beginning of the wpa_supplicant configuration file
+# (see load_dynamic_eap parameter in the example file) before being used in
+# the network blocks.
+#
+# Note that some shared parts of EAP methods are included in the main program
+# and in order to be able to use dynamic EAP methods using these parts, the
+# main program must have been build with the EAP method enabled (=y or =dyn).
+# This means that EAP-TLS/PEAP/TTLS/FAST cannot be added as dynamic libraries
+# unless at least one of them was included in the main build to force inclusion
+# of the shared code. Similarly, at least one of EAP-SIM/AKA must be included
+# in the main build to be able to load these methods dynamically.
+#
+# Please also note that using dynamic libraries will increase the total binary
+# size. Thus, it may not be the best option for targets that have limited
+# amount of memory/flash.
+#CONFIG_DYNAMIC_EAP_METHODS=y
+
+# IEEE Std 802.11r-2008 (Fast BSS Transition)
+#CONFIG_IEEE80211R=y
+
+# Add support for writing debug log to a file (/tmp/wpa_supplicant-log-#.txt)
+#CONFIG_DEBUG_FILE=y
+
+# Send debug messages to syslog instead of stdout
+#CONFIG_DEBUG_SYSLOG=y
+# Set syslog facility for debug messages
+#CONFIG_DEBUG_SYSLOG_FACILITY=LOG_DAEMON
+
+# Add support for sending all debug messages (regardless of debug verbosity)
+# to the Linux kernel tracing facility. This helps debug the entire stack by
+# making it easy to record everything happening from the driver up into the
+# same file, e.g., using trace-cmd.
+#CONFIG_DEBUG_LINUX_TRACING=y
+
+# Add support for writing debug log to Android logcat instead of standard
+# output
+#CONFIG_ANDROID_LOG=y
+
+# Enable privilege separation (see README 'Privilege separation' for details)
+#CONFIG_PRIVSEP=y
+
+# Enable mitigation against certain attacks against TKIP by delaying Michael
+# MIC error reports by a random amount of time between 0 and 60 seconds
+#CONFIG_DELAYED_MIC_ERROR_REPORT=y
+
+# Enable tracing code for developer debugging
+# This tracks use of memory allocations and other registrations and reports
+# incorrect use with a backtrace of call (or allocation) location.
+#CONFIG_WPA_TRACE=y
+# For BSD, uncomment these.
+#LIBS += -lexecinfo
+#LIBS_p += -lexecinfo
+#LIBS_c += -lexecinfo
+
+# Use libbfd to get more details for developer debugging
+# This enables use of libbfd to get more detailed symbols for the backtraces
+# generated by CONFIG_WPA_TRACE=y.
+#CONFIG_WPA_TRACE_BFD=y
+# For BSD, uncomment these.
+#LIBS += -lbfd -liberty -lz
+#LIBS_p += -lbfd -liberty -lz
+#LIBS_c += -lbfd -liberty -lz
+
+# wpa_supplicant depends on strong random number generation being available
+# from the operating system. os_get_random() function is used to fetch random
+# data when needed, e.g., for key generation. On Linux and BSD systems, this
+# works by reading /dev/urandom. It should be noted that the OS entropy pool
+# needs to be properly initialized before wpa_supplicant is started. This is
+# important especially on embedded devices that do not have a hardware random
+# number generator and may by default start up with minimal entropy available
+# for random number generation.
+#
+# As a safety net, wpa_supplicant is by default trying to internally collect
+# additional entropy for generating random data to mix in with the data fetched
+# from the OS. This by itself is not considered to be very strong, but it may
+# help in cases where the system pool is not initialized properly. However, it
+# is very strongly recommended that the system pool is initialized with enough
+# entropy either by using hardware assisted random number generator or by
+# storing state over device reboots.
+#
+# wpa_supplicant can be configured to maintain its own entropy store over
+# restarts to enhance random number generation. This is not perfect, but it is
+# much more secure than using the same sequence of random numbers after every
+# reboot. This can be enabled with -e<entropy file> command line option. The
+# specified file needs to be readable and writable by wpa_supplicant.
+#
+# If the os_get_random() is known to provide strong random data (e.g., on
+# Linux/BSD, the board in question is known to have reliable source of random
+# data from /dev/urandom), the internal wpa_supplicant random pool can be
+# disabled. This will save some in binary size and CPU use. However, this
+# should only be considered for builds that are known to be used on devices
+# that meet the requirements described above.
+#CONFIG_NO_RANDOM_POOL=y
+
+# IEEE 802.11n (High Throughput) support (mainly for AP mode)
+#CONFIG_IEEE80211N=y
+
+# IEEE 802.11ac (Very High Throughput) support (mainly for AP mode)
+# (depends on CONFIG_IEEE80211N)
+#CONFIG_IEEE80211AC=y
+
+# Wireless Network Management (IEEE Std 802.11v-2011)
+# Note: This is experimental and not complete implementation.
+#CONFIG_WNM=y
+
+# Interworking (IEEE 802.11u)
+# This can be used to enable functionality to improve interworking with
+# external networks (GAS/ANQP to learn more about the networks and network
+# selection based on available credentials).
+#CONFIG_INTERWORKING=y
+
+# Hotspot 2.0
+#CONFIG_HS20=y
+
+# Disable roaming in wpa_supplicant
+#CONFIG_NO_ROAMING=y
+
+# AP mode operations with wpa_supplicant
+# This can be used for controlling AP mode operations with wpa_supplicant. It
+# should be noted that this is mainly aimed at simple cases like
+# WPA2-Personal while more complex configurations like WPA2-Enterprise with an
+# external RADIUS server can be supported with hostapd.
+#CONFIG_AP=y
+
+# P2P (Wi-Fi Direct)
+# This can be used to enable P2P support in wpa_supplicant. See README-P2P for
+# more information on P2P operations.
+#CONFIG_P2P=y
+
+# Enable TDLS support
+#CONFIG_TDLS=y
+
+# Wi-Fi Direct
+# This can be used to enable Wi-Fi Direct extensions for P2P using an external
+# program to control the additional information exchanges in the messages.
+#CONFIG_WIFI_DISPLAY=y
+
+# Autoscan
+# This can be used to enable automatic scan support in wpa_supplicant.
+# See wpa_supplicant.conf for more information on autoscan usage.
+#
+# Enabling directly a module will enable autoscan support.
+# For exponential module:
+#CONFIG_AUTOSCAN_EXPONENTIAL=y
+# For periodic module:
+#CONFIG_AUTOSCAN_PERIODIC=y
+
+# Password (and passphrase, etc.) backend for external storage
+# These optional mechanisms can be used to add support for storing passwords
+# and other secrets in external (to wpa_supplicant) location. This allows, for
+# example, operating system specific key storage to be used
+#
+# External password backend for testing purposes (developer use)
+#CONFIG_EXT_PASSWORD_TEST=y
+
+# Enable Fast Session Transfer (FST)
+#CONFIG_FST=y
+
+# Enable CLI commands for FST testing
+#CONFIG_FST_TEST=y
+
+# OS X builds. This is only for building eapol_test.
+CONFIG_OSX=y
+
+# EAP-FAST used to require OpenSSL patches, so it's not on by default.
+# enable it.
+CONFIG_EAP_FAST=y
diff --git a/scripts/ci/haproxy.conf b/scripts/ci/haproxy.conf
new file mode 100644
index 0000000..e451aed
--- /dev/null
+++ b/scripts/ci/haproxy.conf
@@ -0,0 +1,16 @@
+global
+ maxconn 100
+
+defaults
+ timeout connect 10s
+ timeout client 30s
+ timeout server 30s
+
+frontend example.org
+ bind 127.0.0.1:2084
+ default_backend radsec
+
+backend radsec
+ balance roundrobin
+ server localhost-radssec 127.0.0.1:2083 send-proxy
+
diff --git a/scripts/ci/ldap-setup.sh b/scripts/ci/ldap-setup.sh
new file mode 100755
index 0000000..ec3ba2c
--- /dev/null
+++ b/scripts/ci/ldap-setup.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+# Allow setup script to work with homebrew too
+export PATH="/usr/local/opt/openldap/libexec:$PATH"
+
+# Clean out any existing DB
+rm -rf /tmp/ldap/db
+# Create directory we can write DB files to
+mkdir -p /tmp/ldap/db/
+
+# Change db location to /tmp as we can't write to /var
+sed -i -e 's/\/var\/lib\/ldap/\/tmp\/ldap\/db/' src/tests/salt-test-server/salt/ldap/base.ldif
+
+# Create a directory we can link schema files into
+if [ -d /tmp/ldap/schema ]; then
+ echo "Schema dir already linked"
+# Debian
+elif [ -d /etc/ldap/schema ]; then
+ ln -fs /etc/ldap/schema /tmp/ldap/schema
+# Redhat
+elif [ -d /etc/openldap/schema ]; then
+ ln -fs /etc/openldap/schema /tmp/ldap/schema
+# macOS (homebrew)
+elif [ -d /usr/local/etc/openldap/schema ]; then
+ ln -fs /usr/local/etc/openldap/schema /tmp/ldap/schema
+else
+ echo "Can't locate OpenLDAP schema dir"
+ exit 1
+fi
+
+# Start slapd
+slapd -h "ldap://127.0.0.1:3890/" -f scripts/ci/ldap/slapd.conf &
+
+# Wait for LDAP to start
+sleep 1
+
+# Add test data
+count=0
+while [ $count -lt 10 ] ; do
+ if ldapadd -x -H ldap://127.0.0.1:3890/ -D "cn=admin,cn=config" -w secret -f src/tests/salt-test-server/salt/ldap/base.ldif ; then
+ break 2
+ else
+ count=$((count+1))
+ sleep 1
+ fi
+done
+
+if [ $? -ne 0 ]; then
+ echo "Error configuring server"
+ exit 1
+fi
diff --git a/scripts/ci/ldap/slapd.conf b/scripts/ci/ldap/slapd.conf
new file mode 100644
index 0000000..7782616
--- /dev/null
+++ b/scripts/ci/ldap/slapd.conf
@@ -0,0 +1,51 @@
+#
+###### SAMPLE 1 - SIMPLE DIRECTORY ############
+#
+# NOTES: inetorgperson picks up attributes and objectclasses
+# from all three schemas
+#
+# NB: RH Linux schemas in /etc/openldap
+#
+include /tmp/ldap/schema/core.schema
+include /tmp/ldap/schema/cosine.schema
+include /tmp/ldap/schema/inetorgperson.schema
+include /tmp/ldap/schema/nis.schema
+include doc/schemas/ldap/openldap/freeradius.schema
+include doc/schemas/ldap/openldap/freeradius-clients.schema
+pidfile /tmp/slapd.pid
+
+# enable a lot of logging - we might need it
+# but generates huge logs
+loglevel -1
+
+# MODULELOAD definitions
+# not required (comment out) before version 2.3
+moduleload back_mdb.la
+
+database config
+rootdn "cn=admin,cn=config"
+rootpw secret
+
+#######################################################################
+# mdb database definitions
+#
+# replace example and com below with a suitable domain
+#
+# If you don't have a domain you can leave it since example.com
+# is reserved for experimentation or change them to my and inc
+#
+#######################################################################
+
+database mdb
+suffix "dc=nodomain"
+
+# root or superuser
+rootdn "cn=admin,dc=nodomain"
+rootpw secret
+# The database directory MUST exist prior to running slapd AND
+# change path as necessary
+directory /tmp/ldap/db/
+
+# other database parameters
+# read more in slapd.conf reference section
+checkpoint 128 15
diff --git a/scripts/ci/ldap/slapd2.conf b/scripts/ci/ldap/slapd2.conf
new file mode 100644
index 0000000..52c16a7
--- /dev/null
+++ b/scripts/ci/ldap/slapd2.conf
@@ -0,0 +1,61 @@
+#
+###### SAMPLE 1 - SIMPLE DIRECTORY ############
+#
+# NOTES: inetorgperson picks up attributes and objectclasses
+# from all three schemas
+#
+# NB: RH Linux schemas in /etc/openldap
+#
+include /tmp/ldap2/schema/core.schema
+include /tmp/ldap2/schema/cosine.schema
+include /tmp/ldap2/schema/inetorgperson.schema
+include /tmp/ldap2/schema/nis.schema
+include doc/schemas/ldap/openldap/freeradius.schema
+include doc/schemas/ldap/openldap/freeradius-clients.schema
+pidfile /tmp/slapd2.pid
+
+# enable a lot of logging - we might need it
+# but generates huge logs
+loglevel -1
+
+# MODULELOAD definitions
+# not required (comment out) before version 2.3
+moduleload back_mdb.la
+
+database config
+rootdn "cn=admin,cn=config"
+rootpw secret
+
+#
+# Certificates for SSL/TLS connections
+# Note - these will not match the host name so clients need to use
+# the "allow" option when checking certificates
+#
+#TLSCACertificateFile /tmp/ldap2/certs/cacert.pem
+#TLSCertificateFile /tmp/ldap2/certs/servercert.pem
+#TLSCertificateKeyFile /tmp/ldap2/certs/serverkey.pem
+
+#######################################################################
+# mdb database definitions
+#
+# replace example and com below with a suitable domain
+#
+# If you don't have a domain you can leave it since example.com
+# is reserved for experimentation or change them to my and inc
+#
+#######################################################################
+
+database mdb
+suffix "dc=nodomain"
+
+# root or superuser
+rootdn "cn=admin,dc=nodomain"
+rootpw secret
+# The database directory MUST exist prior to running slapd AND
+# change path as necessary
+directory /tmp/ldap2/db/
+
+# other database parameters
+# read more in slapd.conf reference section
+checkpoint 128 15
+
diff --git a/scripts/ci/ldap2-setup.sh b/scripts/ci/ldap2-setup.sh
new file mode 100755
index 0000000..c274414
--- /dev/null
+++ b/scripts/ci/ldap2-setup.sh
@@ -0,0 +1,63 @@
+#!/bin/sh
+
+# Allow setup script to work with homebrew too
+export PATH="/usr/local/opt/openldap/libexec:$PATH"
+
+# Clean out any existing DB
+rm -rf /tmp/ldap2/db
+# Create directory we can write DB files to
+mkdir -p /tmp/ldap2/db/
+
+# Change db location to /tmp as we can't write to /var
+sed -i -e 's/\/var\/lib\/ldap/\/tmp\/ldap2\/db/' src/tests/salt-test-server/salt/ldap/base2.ldif
+
+# Create a directory we can link schema files into
+if [ -d /tmp/ldap2/schema ]; then
+ echo "Schema dir already linked"
+# Debian
+elif [ -d /etc/ldap/schema ]; then
+ ln -fs /etc/ldap/schema /tmp/ldap2/schema
+# Redhat
+elif [ -d /etc/openldap/schema ]; then
+ ln -fs /etc/openldap/schema /tmp/ldap2/schema
+# macOS (homebrew)
+elif [ -d /usr/local/etc/openldap/schema ]; then
+ ln -fs /usr/local/etc/openldap/schema /tmp/ldap2/schema
+else
+ echo "Can't locate OpenLDAP schema dir"
+ exit 1
+fi
+
+# Clean out any old certificates
+##rm -rf /tmp/ldap2/certs
+# Create certificate directory
+##mkdir -p /tmp/ldap2/certs
+
+# Copy certificates - whilst not stricltly LDAP certs they work fine for these tests
+##cp src/tests/certs/rsa/ca.pem /tmp/ldap2/certs/cacert.pem
+##cp src/tests/certs/rsa/server.pem /tmp/ldap2/certs/servercert.pem
+# OpenLDAP wants an un-encrypted key
+##openssl rsa -in src/tests/certs/rsa/server.key -out /tmp/ldap2/certs/serverkey.pem -passin pass:whatever
+
+# Start slapd
+slapd -h "ldap://127.0.0.1:3891/" -f scripts/ci/ldap/slapd2.conf &
+
+# Wait for LDAP to start
+sleep 1
+
+# Add test data
+count=0
+while [ $count -lt 10 ] ; do
+ if ldapadd -x -H ldap://127.0.0.1:3891/ -D "cn=admin,cn=config" -w secret -f src/tests/salt-test-server/salt/ldap/base2.ldif ; then
+ break 2
+ else
+ count=$((count+1))
+ sleep 1
+ fi
+done
+
+if [ $? -ne 0 ]; then
+ echo "Error configuring server"
+ exit 1
+fi
+
diff --git a/scripts/ci/mysql-setup.sh b/scripts/ci/mysql-setup.sh
new file mode 100755
index 0000000..12e0067
--- /dev/null
+++ b/scripts/ci/mysql-setup.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+
+echo "MySQL - Dropping existing database"
+mysql -h "${SQL_MYSQL_TEST_SERVER}" -u root -e 'DROP DATABASE radius;' || true
+
+echo "MySQL - Dropping existing user"
+mysql -h "${SQL_MYSQL_TEST_SERVER}" -u root -e 'DROP USER radius@localhost;' || true
+
+echo "MySQL - Creating database"
+mysql -h "${SQL_MYSQL_TEST_SERVER}" -u root -e 'CREATE DATABASE radius;'
+
+echo "MySQL - Executing schema.sql"
+mysql -h "${SQL_MYSQL_TEST_SERVER}" -u root radius < raddb/mods-config/sql/main/mysql/schema.sql
+
+echo "MySQL - Executing setup.sql"
+mysql -h "${SQL_MYSQL_TEST_SERVER}" -u root radius < raddb/mods-config/sql/main/mysql/setup.sql
+
+echo "MySQL - Grant radius user permissions"
+mysql -h "${SQL_MYSQL_TEST_SERVER}" -u root -e "GRANT ALL on radius.* TO radius@localhost; FLUSH PRIVILEGES;"
diff --git a/scripts/ci/openresty-setup.sh b/scripts/ci/openresty-setup.sh
new file mode 100755
index 0000000..eb55d67
--- /dev/null
+++ b/scripts/ci/openresty-setup.sh
@@ -0,0 +1,144 @@
+#!/bin/sh -e
+#
+# ### This is a script to setup an openresty web server for testing rlm_smtp
+#
+
+#
+# Declare the important path variables
+#
+
+# Base Directories
+BASEDIR=$(git rev-parse --show-toplevel)
+BUILDDIR="${BASEDIR}/build/ci/openresty"
+CIDIR="${BASEDIR}/scripts/ci"
+
+# Directories for openresty processes
+ROOTDIR="${BUILDDIR}/html"
+APIDIR="${BUILDDIR}/api"
+LOGDIR="${BUILDDIR}/logs"
+CERTDIR="${BUILDDIR}/certs"
+CERTSRCDIR="${BASEDIR}/raddb/restcerts"
+PASSWORD="whatever"
+
+# Important files for running openresty
+CONF="${BUILDDIR}/nginx.conf"
+
+#
+# Prepare the directories and files needed for running openresty
+#
+
+# Stop any currently running openresty instance
+echo "Checking for a running openresty instance"
+if [ -e "${LOGDIR}/nginx.pid" ]
+then
+ echo "Stopping the current openresty instance"
+ kill "$(cat ${LOGDIR}/nginx.pid)"
+ rm -r "${BUILDDIR}"
+fi
+
+# Create the directories
+mkdir -p "${BUILDDIR}" "${ROOTDIR}" "${APIDIR}" "${LOGDIR}" "${CERTDIR}"
+
+# Create the certificate
+echo "Generating the certificates"
+openssl pkcs8 -in ${CERTSRCDIR}/server.key -passin pass:${PASSWORD} -out ${CERTDIR}/server.key
+cat ${CERTSRCDIR}/server.pem ${CERTSRCDIR}/ca.pem > ${CERTDIR}/server.pem
+
+# Create nginx.conf file
+echo "Generating the openresty configuration file"
+touch "${CONF}"
+
+# Build nginx.conf
+echo "
+#
+worker_processes 1;
+error_log ${LOGDIR}/error.log;
+pid ${LOGDIR}/nginx.pid;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ include /usr/local/openresty/nginx/conf/mime.types;
+ default_type text/plain;
+
+ sendfile on;
+
+ server {
+ listen 8080;
+ server_name localhost;
+
+ location / {
+ root ${ROOTDIR};
+ index index.html;
+ }
+
+ location ~ ^/user(.*)$ {
+ default_type 'application/json';
+ add_header 'Content-Type' 'application/json';
+ content_by_lua_file ${APIDIR}/json-api.lua;
+ }
+
+ location ~ ^/post(.*)$ {
+ content_by_lua_file ${APIDIR}/post-api.lua;
+ }
+
+ location ~ ^/delay(.*)$ {
+ content_by_lua_file ${APIDIR}/delay-api.lua;
+ }
+ }
+
+ server {
+ listen 8443 ssl;
+ server_name localhost;
+
+ ssl_certificate ${CERTDIR}/server.pem;
+ ssl_certificate_key ${CERTDIR}/server.key;
+
+ ssl_session_cache shared:SSL:1m;
+ ssl_session_timeout 5m;
+
+ ssl_ciphers HIGH:!aNULL:!MD5;
+ ssl_prefer_server_ciphers on;
+
+ location / {
+ root ${ROOTDIR};
+ index index.html;
+ }
+
+ location ~ ^/user(.*)$ {
+ default_type 'application/json';
+ add_header 'Content-Type' 'application/json';
+ content_by_lua_file ${APIDIR}/json-api.lua;
+ }
+
+ location ~ ^/post(.*)$ {
+ content_by_lua_file ${APIDIR}/post-api.lua;
+ }
+
+ location ~ ^/auth(.*)$ {
+ content_by_lua_file ${APIDIR}/auth-api.lua;
+ auth_basic 'Auth Area';
+ auth_basic_user_file ${BUILDDIR}/.htpasswd;
+ }
+ }
+}
+
+" >"${CONF}"
+
+echo "Copy lua scripts into place"
+cp ${CIDIR}/openresty/*.lua "${APIDIR}"
+
+echo "Copy sample data into place"
+cp "${CIDIR}/openresty/test.txt" "${ROOTDIR}"
+
+echo "Copy htpasswd into place"
+cp "${CIDIR}/openresty/.htpasswd" "${BUILDDIR}"
+
+#
+# Run the openresty instance
+#
+echo "Starting openresty"
+openresty -c ${CONF} -p ${BUILDDIR}
+echo "Running openresty on port 8080 and 8443, accepting all local connections"
diff --git a/scripts/ci/openresty/.htpasswd b/scripts/ci/openresty/.htpasswd
new file mode 100644
index 0000000..028536a
--- /dev/null
+++ b/scripts/ci/openresty/.htpasswd
@@ -0,0 +1 @@
+Bob:$apr1$4AtMvgrH$pWCOxq7gq1N2AyeE7GT3R/
diff --git a/scripts/ci/openresty/auth-api.lua b/scripts/ci/openresty/auth-api.lua
new file mode 100644
index 0000000..8ea336c
--- /dev/null
+++ b/scripts/ci/openresty/auth-api.lua
@@ -0,0 +1,19 @@
+-- Simple API for checking POST data
+
+-- Get the request path
+local reqPath = ngx.var.uri
+-- Get the request method (POST, GET etc..)
+local reqMethod = ngx.var.request_method
+-- Get any URI arguments
+local uriArgs = ngx.req.get_uri_args()
+-- Get any POST arguments
+ngx.req.read_body()
+local postArgs = ngx.req.get_post_args()
+
+-- We only reply to POST requests
+if reqMethod ~= "POST"
+then
+ return false
+end
+
+ngx.say("Section: ", uriArgs.section, ", User: ", postArgs.user, ", Authenticated: true")
diff --git a/scripts/ci/openresty/delay-api.lua b/scripts/ci/openresty/delay-api.lua
new file mode 100644
index 0000000..aa4f61b
--- /dev/null
+++ b/scripts/ci/openresty/delay-api.lua
@@ -0,0 +1,6 @@
+-- Simple API represending a slow response for testing timeouts
+
+local t0 = os.clock()
+while os.clock() - t0 <= 2 do end
+
+ngx.say("Delayed response")
diff --git a/scripts/ci/openresty/json-api.lua b/scripts/ci/openresty/json-api.lua
new file mode 100644
index 0000000..4a574f7
--- /dev/null
+++ b/scripts/ci/openresty/json-api.lua
@@ -0,0 +1,145 @@
+-- Based on https://github.com/bambattajb/openresty-api-example
+
+-- Helper functions
+function strSplit(delim,str)
+ local t = {}
+
+ for substr in string.gmatch(str, "[^".. delim.. "]*") do
+ if substr ~= nil and string.len(substr) > 0 then
+ table.insert(t,substr)
+ end
+ end
+
+ return t
+end
+
+-- Read body being passed
+-- Required for ngx.req.get_body_data()
+ngx.req.read_body()
+-- Parser for sending JSON back to the client
+local cjson = require("cjson")
+-- Get the request path
+local reqPath = ngx.var.uri
+-- Get the request method (POST, GET etc..)
+local reqMethod = ngx.var.request_method
+-- Get any URI arguments
+local uriArgs = ngx.req.get_uri_args()
+-- Parse the body data as JSON
+local body = ngx.req.get_body_data() ==
+ -- This is like a ternary statement for Lua
+ -- It is saying if doesn't exist at least
+ -- define as empty object
+ nil and {} or cjson.decode(ngx.req.get_body_data());
+
+Api = {}
+Api.__index = Api
+-- Declare API not yet responded
+Api.responded = false;
+-- Function for checking input from client
+function Api.endpoint(method, path, callback)
+
+ -- return false if method doesn't match
+ if reqMethod ~= method
+ then
+ return false
+ end
+
+ -- If API already responded
+ if Api.responded then
+ return false
+ end
+
+ -- KeyData = params passed in path
+ local keyData = {}
+ -- Unaltered version of path
+ local origPath = reqPath
+ -- If this endpoint has params
+ if string.find(path, "<(.-)>")
+ then
+ -- Split origin and passed path sections
+ local splitPath = strSplit("/", path)
+ local splitReqPath = strSplit("/", reqPath)
+ -- Iterate over splitPath
+ for i, k in pairs(splitPath) do
+ -- If chunk contains <something>
+ if string.find(k, "<(.-)>")
+ then
+ if not splitReqPath[i] then
+ reqPath = origPath
+ return false
+ end
+ -- Add to keyData
+ keyData[string.match(k, "%<(%a+)%>")] = splitReqPath[i]
+ -- Replace matches with default for validation
+ reqPath = string.gsub(reqPath, splitReqPath[i], k)
+ end
+ end
+ end
+
+ -- return false if path doesn't match anything
+ if reqPath ~= path
+ then
+ reqPath = origPath
+ return false;
+ end
+
+ -- Make sure we don't run this again
+ Api.responded = true;
+
+ return callback(body, keyData);
+end
+
+-- Used in the accounting test
+Api.endpoint('POST', '/user/<username>/mac/<client>',
+ function(body, keyData)
+ local returnData = {}
+ returnData["control:Tmp-String-0"] = uriArgs.section
+ returnData["control:Tmp-String-1"] = {
+ reqMethod,
+ reqPath
+ }
+ returnData["control:User-Name"] = {
+ op = ":=",
+ value = keyData.username
+ }
+ returnData["control:NAS-IP-Address"] = {
+ op = "+=",
+ value = body.NAS or body['NAS-IP-Address'].value
+ }
+ returnData["control:Tmp-String-2"] = {
+ op = "^=",
+ value = keyData.username
+ }
+ return ngx.say(cjson.encode(returnData))
+ end
+)
+
+-- Used in the authorize test
+Api.endpoint('GET', '/user/<username>/mac/<client>',
+ function(body, keyData)
+ local returnData = {}
+ returnData["control:Tmp-String-0"] = uriArgs.section
+ returnData["control:Tmp-String-1"] = {
+ reqMethod,
+ reqPath
+ }
+ returnData["control:User-Name"] = {
+ op = ":=",
+ value = keyData.username
+ }
+ returnData["control:Tmp-String-2"] = {
+ op = "^=",
+ value = keyData.username
+ }
+ return ngx.say(cjson.encode(returnData))
+ end
+)
+
+-- Simple reflection of a URI argument
+Api.endpoint('GET', '/user/<username>/reflect/',
+ function(body, keyData)
+ local returnData = {}
+ returnData["station"] = uriArgs.station
+ return ngx.say(cjson.encode(returnData))
+ end
+)
diff --git a/scripts/ci/openresty/post-api.lua b/scripts/ci/openresty/post-api.lua
new file mode 100644
index 0000000..3f22960
--- /dev/null
+++ b/scripts/ci/openresty/post-api.lua
@@ -0,0 +1,19 @@
+-- Simple API for checking POST data
+
+-- Get the request path
+local reqPath = ngx.var.uri
+-- Get the request method (POST, GET etc..)
+local reqMethod = ngx.var.request_method
+-- Get any URI arguments
+local uriArgs = ngx.req.get_uri_args()
+-- Get any POST arguments
+ngx.req.read_body()
+local postArgs = ngx.req.get_post_args()
+
+-- We only reply to POST requests
+if reqMethod ~= "POST"
+then
+ return false
+end
+
+ngx.say("Section: ", uriArgs.section, ", User: ", postArgs.user)
diff --git a/scripts/ci/openresty/test.txt b/scripts/ci/openresty/test.txt
new file mode 100644
index 0000000..eceb6ed
--- /dev/null
+++ b/scripts/ci/openresty/test.txt
@@ -0,0 +1 @@
+Sample text response
diff --git a/scripts/ci/package-test.mk b/scripts/ci/package-test.mk
new file mode 100644
index 0000000..417784b
--- /dev/null
+++ b/scripts/ci/package-test.mk
@@ -0,0 +1,41 @@
+#
+# This Makefile performs some end to end tests against a package installed
+# within the CI environment.
+#
+# It reuses the eapol_test build-time tests, but runs them against the assets
+# installed by the distribution packaging.
+#
+# We want the run-time environment to be lean, typical of a fresh system
+# installation so that we catch any missing runtime dependancies, assets
+# missing from the packages, issues with the dynamic loader, etc.
+#
+# Therefore we skip the usual build framework so that we do not have so
+# configure the build tree and so that our only dependency is some non-ancient
+# version GNU Make. (Any version in a supported distribution will do.)
+#
+
+#
+# For the package tests we use the system version of radiusd on the standard
+# port
+#
+RADIUSD_BIN := $(shell which radiusd || which freeradius)
+PORT := 1812
+SECRET := testing123
+DICT_PATH := /usr/share/freeradius
+
+ifneq (,$(wildcard /etc/raddb/radiusd.conf))
+RADDB_PATH := /etc/raddb/
+else
+RADDB_PATH := /etc/freeradius/
+endif
+
+#
+# We prefer to use our exactly eapol_test version
+#
+EAPOL_TEST := $(shell ./scripts/ci/eapol_test-build.sh)
+
+MAKE_ARGS := RADIUSD_BIN=$(RADIUSD_BIN) PORT=$(PORT) SECRET="$(SECRET)" DICT_PATH=$(DICT_PATH) RADDB_PATH=$(RADDB_PATH)
+
+.PHONY: package-test
+package-test:
+ $(MAKE) -C src/tests $(MAKE_ARGS) tests.eap
diff --git a/scripts/ci/postgresql-setup.sh b/scripts/ci/postgresql-setup.sh
new file mode 100755
index 0000000..1e1abfb
--- /dev/null
+++ b/scripts/ci/postgresql-setup.sh
@@ -0,0 +1,26 @@
+#!/bin/sh -e
+
+#
+# To initialise on MacOS
+# sudo brew install postgresql
+# pg_ctl -D /usr/local/var/postgres start
+# /usr/local/opt/postgres/bin/createuser -s postgres
+#
+
+echo "Dropping existing database"
+psql -h "${SQL_POSTGRESQL_TEST_SERVER}" -c 'drop database radius;' -U postgres || true
+
+echo "Dropping existing database"
+psql -h "${SQL_POSTGRESQL_TEST_SERVER}" -c 'drop user radius;' -U postgres || true
+
+echo "PostgreSQL - Creating database"
+psql -h "${SQL_POSTGRESQL_TEST_SERVER}" -c 'create database radius;' -U postgres || true
+
+echo "PostgreSQL - Execute schema.sql"
+psql -h "${SQL_POSTGRESQL_TEST_SERVER}" -U postgres radius < raddb/mods-config/sql/main/postgresql/schema.sql
+
+echo "PostgreSQL - Execute setup.sql"
+psql -h "${SQL_POSTGRESQL_TEST_SERVER}" -U postgres radius < raddb/mods-config/sql/main/postgresql/setup.sql
+
+echo "PostgreSQL - Grant radius user permissions"
+psql -h "${SQL_POSTGRESQL_TEST_SERVER}" -c 'GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO radius;' -U postgres radius
diff --git a/scripts/ci/radsecproxy.conf b/scripts/ci/radsecproxy.conf
new file mode 100644
index 0000000..c6df5d1
--- /dev/null
+++ b/scripts/ci/radsecproxy.conf
@@ -0,0 +1,33 @@
+# radsecproxy -f -c radsecproxy.conf
+
+# If there's no ListenUDP config, then radsecproxy will listen on *:1812 by default. <sigh>
+ListenUDP *:1816
+
+tls default {
+ CACertificateFile ../../raddb/certs/CA.pem
+
+ # You must specify the below for TLS, we always present our certificate
+ CertificateFile ../../raddb/certs/client.pem
+ CertificateKeyFile ../../raddb/certs/client.key
+
+ # Optionally specify password if key is encrypted (not very secure)
+ CertificateKeyPassword "whatever"
+}
+
+client 127.0.0.1 {
+ type udp
+ secret testing123
+}
+
+server 127.0.0.1 {
+ type tls
+ port 2084
+ secret radsec
+
+ CertificateNameCheck off
+}
+
+
+realm * {
+ server 127.0.0.1
+}
diff --git a/scripts/ci/start.sh b/scripts/ci/start.sh
new file mode 100644
index 0000000..0408068
--- /dev/null
+++ b/scripts/ci/start.sh
@@ -0,0 +1,37 @@
+##TODO rip this apart into "configure , make , make deb, make scan and make install" functions
+export PANIC_ACTION="gdb -batch -x raddb/panic.gdb %e %p 1>&0 2>&0"
+
+#Configure
+if [ "${DO_BUILD}" = 'yes' ]; then
+ CFLAGS="${BUILD_CFLAGS}" ./configure -C\
+ --enable-werror \
+ --prefix=$HOME/freeradius\
+ --with-shared-libs=$LIBS_SHARED \
+ --with-threads=$LIBS_OPTIONAL \
+ --with-udpfromto=$LIBS_OPTIONAL \
+ --with-openssl=$LIBS_OPTIONAL \
+ --with-pcre=$LIBS_OPTIONAL \
+ --enable-reproducible-builds=${REPRODUCIBLE}
+fi
+
+if [ "${DO_BUILD}" = 'no' ]; then
+ ./configure -C --without-modules
+fi
+
+# Make
+if [ "${DO_BUILD}" = 'yes' ]; then
+ make -j8
+fi
+
+# Make scan
+if [ "${DO_BUILD}" = 'yes' -a ${CC} = 'clang' ]; then
+ make -j8 scan && [ "$(find build/plist/ -name *.html)" = '' ]
+fi
+
+if [ "${DO_BUILD}" = 'yes' ]; then
+ make ci-test
+fi
+
+if [ "${DO_BUILD}" = 'no' ]; then
+ cd doc/source; doxygen 3>&1 1>&2 2>&3 | grep -iv '^warning:' | tee doxygen_stderr.log && [ ! -n "$(cat doxygen_stderr.log)" ]
+fi
diff --git a/scripts/ci/stunnel.conf b/scripts/ci/stunnel.conf
new file mode 100644
index 0000000..a312d66
--- /dev/null
+++ b/scripts/ci/stunnel.conf
@@ -0,0 +1,16 @@
+;
+; Run via:
+;
+; stunnel stunnel.conf
+;
+; You will be prompted for the password. Type "whatever".
+;
+foreground = yes
+
+[radsec]
+client = yes
+accept = 127.0.0.1:20830
+connect = 127.0.0.1:2083
+cert = ../../raddb/certs/client.pem
+key = ../../raddb/certs/client.key
+;protocol=proxy
diff --git a/scripts/clients.pl b/scripts/clients.pl
new file mode 100755
index 0000000..40ed5ee
--- /dev/null
+++ b/scripts/clients.pl
@@ -0,0 +1,68 @@
+#!/usr/bin/env perl
+#
+# Convert old-style "clients" file to new "clients.conf" format.
+#
+# Usage: clients.pl clients [naslist] new-clients.conf
+# The "new-clients.conf" will be created if it does not exist.
+# If it does exist, it will be over-written.
+#
+#
+# $Id$
+#
+if (($#ARGV < 1) || ($#ARGV > 2)) {
+ print "Usage: clients.pl clients [naslist] new-clients.conf\n";
+ print " The \"new-clients.conf\" will be created if it does not exist.\n";
+ print " If it does exist, it will be over-written.\n";
+ exit(1);
+}
+
+$old = shift;
+$new = shift;
+
+if ($new =~ /naslist/) {
+ $naslist = $new;
+ $new = shift;
+}
+
+open OLD, "< $old" or die "Failed to open $old: $!\n";
+
+while (<OLD>) {
+ next if (/^\s*\#/);
+ next if (/^\s*$/);
+
+ split;
+
+ $clients{$_[0]}{"secret"} = $_[1];
+}
+close OLD;
+
+if (defined $naslist) {
+ open OLD, "< $naslist" or die "Failed to open $naslist: $!\n";
+
+ while (<OLD>) {
+ next if (/^\s*\#/);
+ next if (/^\s*$/);
+
+ split;
+
+ if (!defined $clients{$_[0]}) {
+ print "WARNING! client $_[0] is defined in naslist, but not in clients!";
+ next;
+ }
+
+ $clients{$_[0]}{"shortname"} = $_[1];
+ $clients{$_[0]}{"nas_type"} = $_[2];
+ }
+}
+
+open NEW, "> $new" or die "Failed to open $new: $!\n";
+foreach $client (keys %clients) {
+ print NEW "client $client {\n";
+ print NEW "\tsecret = ", $clients{$client}{"secret"}, "\n";
+ if (defined $clients{$client}{"shortname"}) {
+ print NEW "\tshortname = ", $clients{$client}{"shortname"}, "\n";
+ print NEW "\tnas_type = ", $clients{$client}{"nas_type"}, "\n";
+ }
+ print NEW "}\n";
+ print NEW "\n";
+}
diff --git a/scripts/collectd/radsniff_types.db b/scripts/collectd/radsniff_types.db
new file mode 100644
index 0000000..8f4fc57
--- /dev/null
+++ b/scripts/collectd/radsniff_types.db
@@ -0,0 +1,10 @@
+#
+# Collectd type definitions for radsniff probes
+#
+# $Id$
+#
+# Copyright 2013 Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+#
+radius_count received:GAUGE:0:U, linked:GAUGE:0:U, unlinked:GAUGE:0:U, reused:GAUGE:0:U
+radius_latency smoothed:GAUGE:0:U, avg:GAUGE:0:U, high:GAUGE:0:U, low:GAUGE:0:U
+radius_rtx none:GAUGE:0:U, 1:GAUGE:0:U, 2:GAUGE:0:U, 3:GAUGE:0:U, 4:GAUGE:0:U, more:GAUGE:0:U, lost:GAUGE:0:U
diff --git a/scripts/create-users.pl b/scripts/create-users.pl
new file mode 100755
index 0000000..36d6f8a
--- /dev/null
+++ b/scripts/create-users.pl
@@ -0,0 +1,64 @@
+#!/usr/bin/perl
+
+# Purpose: create lots of random users and passes
+# for testing your radius server
+# Read doc/README.testing for more information
+
+$passfile = "./passwd";
+$shadfile = "./shadow";
+$radfile = "./radius.test";
+$nocrypt = "./passwd.nocrypt";
+$users = "./radius.users";
+
+if($ARGV[0] eq "") {
+ print "\n\tUsage: $0 <number of users>\n\n";
+ exit(1);
+} else {
+ $numusers = $ARGV[0];
+}
+$userlen = 6;
+$passlen = 6;
+
+open(PASS, ">$passfile") || die "Can't open $passfile";
+open(SHAD, ">$shadfile") || die "Can't open $shadfile";
+open(RAD, ">$radfile") || die "Can't open $radfile";
+open(NOCRYPT, ">$nocrypt") || die "Can't open $nocrypt";
+open(USERS, ">$users") || die "Can't open $users";
+
+for ($num=0; $num<$numusers; $num++) {
+ # generate username
+ $username = "";
+ for($i=0; $i<rand($userlen)+2; $i++) {
+ do { ($char = chr((rand 25)+97))} until $char=~/[A-Za-z]/;
+ $username .= $char;
+ }
+ # Make sure they're unique
+ if(($userlist{$username}) || (getpwnam($username))) {
+ $num--;
+ next;
+ }
+ $userlist{$username} = 1;
+
+ # generate password
+ $password = "";
+ for($i=0; $i<rand($passlen)+2; $i++) {
+ do { ($char = chr((rand 25)+97))} until $char=~/[A-Za-z]/;
+ $password .= $char;
+ }
+
+ if (length($num)%2==1) {
+ $num="0".$num;
+ }
+ printf PASS "$username:%s:1001:1001:Name:/dev/null:/dev/null\n", crypt($password, $password);
+ printf SHAD "$username:%s:1000:0:99999:7:::\n", crypt($password, $password);
+ printf RAD "User-Name=$username, User-Password=$password,NAS-IP-Address=127.0.0.1,NAS-Port-Id=0\n\n";
+ print NOCRYPT "$username:$password\n";
+ print USERS "$username Cleartext-Password := \"$password\"\n\tClass=\"0x$num\"\n\n";
+}
+
+close(PASS);
+close(SHAD);
+close(RAD);
+close(NOCRYPT);
+close(USERS);
+print "\nCreated $numusers random users and passwords\n\n";
diff --git a/scripts/cron/radiusd.cron.daily.in b/scripts/cron/radiusd.cron.daily.in
new file mode 100644
index 0000000..da631ba
--- /dev/null
+++ b/scripts/cron/radiusd.cron.daily.in
@@ -0,0 +1,34 @@
+#! /bin/sh
+#
+# radiusd Cron script to rotate radiusd log files daily.
+#
+
+prefix=@prefix@
+localstatedir=@localstatedir@
+logdir=@logdir@
+
+umask 027
+cd $logdir
+
+# Take care of the standard logfiles.
+cd $logdir
+if [ -f radius.log ]
+then
+ savelog -g adm -m 640 -c 3 radius.log > /dev/null
+fi
+
+# Rotate "details" files.
+if [ ! -d radacct/. ]
+then
+ exit 0
+fi
+cd radacct
+
+for LOG in */detail
+do
+ if [ -f $LOG ]
+ then
+ savelog -g adm -m 640 -u root -c 3 $LOG >/dev/null
+ fi
+done
+
diff --git a/scripts/cron/radiusd.cron.monthly.in b/scripts/cron/radiusd.cron.monthly.in
new file mode 100644
index 0000000..84d8246
--- /dev/null
+++ b/scripts/cron/radiusd.cron.monthly.in
@@ -0,0 +1,19 @@
+#! /bin/sh
+#
+# radiusd Cron script to rotate radwtmp file monthly.
+#
+
+prefix=@prefix@
+localstatedir=@localstatedir@
+logdir=@logdir@
+
+umask 022
+cd $logdir
+
+# Take care of the standard logfiles.
+cd $logdir
+if [ -f radwtmp ]
+then
+ savelog -g adm -m 644 -c 6 radwtmp > /dev/null
+fi
+
diff --git a/scripts/crossbuild/README.md b/scripts/crossbuild/README.md
new file mode 100644
index 0000000..0bcc2c4
--- /dev/null
+++ b/scripts/crossbuild/README.md
@@ -0,0 +1,122 @@
+# Crossbuild
+
+## Summary
+
+The "crossbuild" system is a way to build FreeRADIUS for multiple
+different operating systems, using Docker.
+
+The primary purpose is for developers to easily test FreeRADIUS on
+different systems.
+
+
+## Common Usage
+
+The systems supported can be listed with
+
+ make crossbuild.info
+
+A reminder of the make targets may be seen with
+
+ make crossbuild.help
+
+To make all the known systems (this may take quite a while, at
+least on the first run):
+
+ make crossbuild
+
+or for the most common systems (Debian, Ubuntu, CentOS):
+
+ make crossbuild.common
+
+
+## General operation
+
+The system works by building and then starting up Docker
+containers for the systems. When a build is triggered (either
+generally, as above, or for a specific OS) the current git commits
+are copied into the image and then `make test` run.
+
+The Docker containers are left running, and may be stopped with
+
+ make crossbuild.down
+
+The system tries to be as efficient as possible, so will not
+rebuild from scratch every time.
+
+
+## Global make targets
+
+The following targets will operate on the crossbuild system
+globally, or on all images (unless otherwise stated):
+
+
+### `make crossbuild`
+
+Create all docker images (if required), start them, build and test
+FreeRADIUS.
+
+
+### `make crossbuild.common`
+
+As `make crossbuild`, but only build and test the most common
+systems.
+
+
+### `make crossbuild.info`
+
+List all systems, together with the expected state. See
+`crossbuild.reset`.
+
+
+### `make crossbuild.down`
+
+Stop all containers.
+
+
+### `make crossbuild.reset`
+
+If containers are stopped or started outside Docker, crossbuild
+may get confused. This will clear the internal state which should
+try and start everything from be beginning again.
+
+
+### `make crossbuild.clean`
+
+Bring down all containers, clear state. This is a general "tidy
+up".
+
+
+### `make crossbuild.wipe`
+
+Don't just stop, but destroy all crossbuild docker images. This
+will mean they need to be recreated again upon next use.
+
+
+## Per-image make targets
+
+The following make targets may be used on a per-image basis:
+
+ * `make crossbuild.IMAGE`: build and test image
+ * `make crossbuild.IMAGE.log`: show latest build log
+ * `make crossbuild.IMAGE.up`: start container
+ * `make crossbuild.IMAGE.down`: stop container
+ * `make crossbuild.IMAGE.sh`: shell in container
+ * `make crossbuild.IMAGE.refresh`: push latest commits into container
+ * `make crossbuild.IMAGE.clean`: stop container and tidy up
+ * `make crossbuild.IMAGE.wipe`: remove Docker image
+
+For example, `make crossbuild.debian10` to create, build and test
+FreeRADIUS on Debian 10. `make crossbuild.debian10.down` will then
+stop the container.
+
+
+## Docker image and container names
+
+Docker images will be created with names in the form:
+
+ freeradius-build/debian10
+
+whil containers will have names like:
+
+ fr-crossbuild-debian10
+
diff --git a/scripts/crossbuild/build/.gitignore b/scripts/crossbuild/build/.gitignore
new file mode 100644
index 0000000..7bd9396
--- /dev/null
+++ b/scripts/crossbuild/build/.gitignore
@@ -0,0 +1,4 @@
+build.*
+configure.*
+log.*
+stamp-*
diff --git a/scripts/crossbuild/crossbuild.mk b/scripts/crossbuild/crossbuild.mk
new file mode 100644
index 0000000..da96506
--- /dev/null
+++ b/scripts/crossbuild/crossbuild.mk
@@ -0,0 +1,236 @@
+#
+# Include crossbuild targets, to test building on lots of
+# different OSes. Uses Docker.
+#
+ifeq ($(shell which docker 2> /dev/null),)
+.PHONY: crossbuild crossbuild.help
+crossbuild crossbuild.help :
+ @echo crossbuild requires Docker to be installed
+else
+
+#
+# Short list of common builds
+#
+CB_COMMON:=centos7 debian10 ubuntu18
+
+# Where the docker directories are
+DT:=scripts/crossbuild/docker
+
+# Where to put stamp files (subdirectory of where this makefile is)
+DD:=$(dir $(realpath $(lastword $(MAKEFILE_LIST))))/build
+
+# List of all the docker images (sorted for "crossbuild.info")
+CB_IMAGES:=$(sort $(patsubst $(DT)/%,%,$(wildcard $(DT)/*)))
+
+# Location of the .git dir (may be different for e.g. submodules)
+GITDIR:=$(shell perl -MCwd -e 'print Cwd::abs_path shift' $$(git rev-parse --git-dir))
+
+CB_CPREFIX:=fr-crossbuild-
+CB_IPREFIX:=freeradius-build
+
+#
+# This Makefile is included in-line, and not via the "boilermake"
+# wrapper. But it's still useful to use the same process for
+# seeing commands that are run.
+#
+ifeq "${VERBOSE}" ""
+ Q=@
+else
+ Q=
+endif
+
+#
+# Enter here: This builds everything
+#
+.PHONY: crossbuild crossbuild.common
+crossbuild: crossbuild.info $(foreach IMG,${CB_IMAGES},crossbuild.${IMG})
+crossbuild.common: crossbuild.info $(foreach IMG,${CB_COMMON},crossbuild.${IMG})
+
+#
+# Dump out some useful information on what images we're going to test
+#
+.PHONY: crossbuild.info crossbuild.info_header crossbuild.help
+crossbuild.info: crossbuild.info_header $(foreach IMG,${CB_IMAGES},crossbuild.${IMG}.status)
+ @echo Common images: $(CB_COMMON)
+
+crossbuild.info_header:
+ @echo Images:
+
+crossbuild.help: crossbuild.info
+ @echo ""
+ @echo "Make targets:"
+ @echo " crossbuild - build and test all images"
+ @echo " crossbuild.common - build and test common images"
+ @echo " crossbuild.info - list images"
+ @echo " crossbuild.down - stop all containers"
+ @echo " crossbuild.reset - remove cache of docker state"
+ @echo " crossbuild.clean - down and reset all targets"
+ @echo " crossbuild.wipe - destroy all crossbuild Docker images"
+ @echo ""
+ @echo "Per-image targets:"
+ @echo " crossbuild.IMAGE - build and test image <IMAGE>"
+ @echo " crossbuild.IMAGE.log - show latest build log"
+ @echo " crossbuild.IMAGE.up - start container"
+ @echo " crossbuild.IMAGE.down - stop container"
+ @echo " crossbuild.IMAGE.sh - shell in container"
+ @echo " crossbuild.IMAGE.refresh - push latest commits into container"
+ @echo " crossbuild.IMAGE.reset - remove cache of docker state"
+ @echo " crossbuild.IMAGE.clean - stop container and tidy up"
+ @echo " crossbuild.IMAGE.wipe - remove Docker image"
+
+#
+# Remove stamp files, so that we try and create images again
+#
+crossbuild.reset: $(foreach IMG,${CB_IMAGES},crossbuild.${IMG}.reset)
+
+#
+# Stop all containers
+#
+crossbuild.down: $(foreach IMG,${CB_IMAGES},crossbuild.${IMG}.down)
+
+#
+# Clean up: stop all containers, do a reset
+#
+crossbuild.clean: $(foreach IMG,${CB_IMAGES},crossbuild.${IMG}.clean)
+
+#
+# Remove all images
+#
+crossbuild.wipe: $(foreach IMG,${CB_IMAGES},crossbuild.${IMG}.wipe)
+
+#
+# Define rules for building a particular image
+#
+define CROSSBUILD_IMAGE_RULE
+#
+# Show status (based on stamp files)
+#
+.PHONY: crossbuild.${1}.status
+crossbuild.${1}.status:
+ ${Q}printf "%s" "`echo \" ${1} \" | cut -c 1-20`"
+ ${Q}if [ -e "$(DD)/stamp-up.${1}" ]; then echo "running"; \
+ elif [ -e "$(DD)/stamp-image.${1}" ]; then echo "built"; \
+ else echo "-"; fi
+#
+# Build the docker image
+#
+$(DD)/stamp-image.${1}:
+ ${Q}echo "BUILD ${1} ($(CB_IPREFIX)/${1}) > $(DD)/build.${1}"
+ ${Q}docker build $(DOCKER_BUILD_OPTS) $(DT)/${1} -f $(DT)/${1}/Dockerfile -t $(CB_IPREFIX)/${1} >$(DD)/build.${1} 2>&1
+ ${Q}touch $(DD)/stamp-image.${1}
+
+#
+# Start up the docker container
+#
+.PHONY: $(DD)/docker.up.${1}
+$(DD)/docker.up.${1}: $(DD)/stamp-image.${1}
+ ${Q}echo "START ${1} ($(CB_CPREFIX)${1})"
+ ${Q}docker container inspect $(CB_CPREFIX)${1} >/dev/null 2>&1 || \
+ docker run -d --rm \
+ --privileged --cap-add=ALL \
+ --mount=type=bind,source="$(GITDIR)",destination=/srv/src,ro \
+ --name $(CB_CPREFIX)${1} $(CB_IPREFIX)/${1} \
+ /bin/sh -c 'while true; do sleep 60; done' >/dev/null
+
+$(DD)/stamp-up.${1}: $(DD)/docker.up.${1}
+ ${Q}touch $(DD)/stamp-up.${1}
+
+.PHONY: crossbuild.${1}.up
+crossbuild.${1}.up: $(DD)/stamp-up.${1}
+
+#
+# Run tests in the container
+#
+.PHONY: $(DD)/docker.refresh.${1}
+$(DD)/docker.refresh.${1}: $(DD)/stamp-up.${1}
+ ${Q}echo "REFRESH ${1}"
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c 'rsync -a /srv/src/ /srv/local-src/'
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c 'git config -f /srv/local-src/config core.bare true'
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c 'git config -f /srv/local-src/config --unset core.worktree || true'
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c '[ -d /srv/build ] || git clone /srv/local-src /srv/build'
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c '(cd /srv/build && git pull --rebase)'
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c '[ -e /srv/build/config.log ] || echo CONFIGURE ${1}'
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c '[ -e /srv/build/config.log ] || (cd /srv/build && ./configure -C)' > $(DD)/configure.${1} 2>&1
+
+.PHONY: $(DD)/docker.run.${1}
+$(DD)/docker.run.${1}: $(DD)/docker.refresh.${1}
+ ${Q}echo "TEST ${1} > $(DD)/log.${1}"
+ ${Q}docker container exec $(CB_CPREFIX)${1} sh -c '(cd /srv/build && make && make test)' > $(DD)/log.${1} 2>&1 || echo FAIL ${1}
+
+#
+# Stop the docker container
+#
+.PHONY: crossbuild.${1}.down
+crossbuild.${1}.down:
+ @echo STOP ${1}
+ ${Q}docker container kill $(CB_CPREFIX)${1} || true
+ @rm -f $(DD)/stamp-up.${1}
+
+.PHONY: crossbuild.${1}.clean
+crossbuild.${1}.clean: crossbuild.${1}.down crossbuild.${1}.reset
+
+#
+# Shell into container. cd to root first (will always succeed),
+# then try to change to build dir, which might not exist, then
+# run bash. (Default cwd is the wrong freeradius source in
+# /usr/local, which would be confusing)
+#
+.PHONY: crossbuild.${1}.sh
+crossbuild.${1}.sh: crossbuild.${1}.up
+ ${Q}docker exec -it $(CB_CPREFIX)${1} sh -c 'cd / ; cd /srv/build 2>/dev/null; bash' || true
+
+#
+# Show last build logs. Try and use the most sensible pager.
+#
+.PHONY: crossbuild.${1}.log
+crossbuild.${1}.log:
+ @if which less >/dev/null; then \
+ less +G $(DD)/log.${1};\
+ elif which more >/dev/null; then \
+ more $(DD)/log.${1};\
+ else cat $(DD)/log.${1}; fi
+
+#
+# Tidy up stamp files. This means on next run we'll do
+# everything. Required if e.g. system has been rebooted, so
+# containers are stopped, but the stamp file still exists.
+#
+.PHONY: crossbuild.${1}.reset
+crossbuild.${1}.reset:
+ ${Q}echo RESET ${1}
+ ${Q}rm -f $(DD)/stamp-up.${1}
+ ${Q}rm -f $(DD)/stamp-image.${1}
+
+#
+# Clean down images. Means on next run we'll rebuild the
+# container (rather than just starting it).
+#
+.PHONY: crossbuild.${1}.wipe
+crossbuild.${1}.wipe:
+ ${Q}echo CLEAN ${1}
+ ${Q}docker image rm $(CB_IPREFIX)/${1} >/dev/null 2>&1 || true
+ ${Q}rm -f $(DD)/stamp-image.${1}
+
+#
+# Refresh git repository within the docker image
+#
+.PHONY: crossbuild.${1}.refresh
+crossbuild.${1}.refresh: $(DD)/docker.refresh.${1}
+
+#
+# Run the build test
+#
+.PHONY: crossbuild.${1}
+crossbuild.${1}: $(DD)/docker.run.${1}
+
+endef
+
+#
+# Add all the image building rules
+#
+$(foreach IMAGE,$(CB_IMAGES),\
+ $(eval $(call CROSSBUILD_IMAGE_RULE,$(IMAGE))))
+
+
+# if docker is defined
+endif
diff --git a/scripts/crossbuild/docker/centos7/Dockerfile b/scripts/crossbuild/docker/centos7/Dockerfile
new file mode 100644
index 0000000..2f9e4ac
--- /dev/null
+++ b/scripts/crossbuild/docker/centos7/Dockerfile
@@ -0,0 +1,92 @@
+FROM centos:centos7
+
+#
+# Install devtools like make and git and the EPEL
+# repository for freetds and hiredis
+#
+RUN yum update -y
+RUN yum install -y rpmdevtools openssl epel-release git yum-utils rsync
+
+#
+# Install GCC that has the requisite support for C11 keywords and atomics
+#
+RUN yum install -y centos-release-scl
+RUN yum install -y devtoolset-8-gcc devtoolset-8-gcc-c++
+ENV CC=/opt/rh/devtoolset-8/root/usr/bin/gcc
+
+#
+# Remove the CentOS-SCLo repo which is apparently not valid?
+# See: https://bugs.centos.org/view.php?id=14773
+#
+RUN rm /etc/yum.repos.d/CentOS-SCLo-scl-rh.repo
+RUN rm /etc/yum.repos.d/CentOS-SCLo-scl.repo
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN yum install -y doxygen graphviz perl-JSON
+# - antora (npm needed)
+RUN curl -sL https://rpm.nodesource.com/setup_10.x | bash -
+RUN yum install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+RUN curl -o - -L $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*tar.gz" | cut -d '"' -f 4) | tar xzvf - -C /tmp/
+# "
+RUN mv /tmp/pandoc-*/bin/* /usr/local/bin
+# - asciidoctor
+RUN yum install -y rubygems-devel
+RUN gem install asciidoctor
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Use LTB's openldap packages intead of the distribution version to avoid linking against NSS
+#
+RUN echo $'[ltb-project]\n\
+name=LTB project packages\n\
+baseurl=https://ltb-project.org/rpm/$releasever/$basearch\n\
+enabled=1\n\
+gpgcheck=1\n\
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-LTB-project'\
+> /etc/yum.repos.d/ltb-project.repo
+RUN rpm --import https://ltb-project.org/lib/RPM-GPG-KEY-LTB-project
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+# Nodesource has issues (no SRPMS in some repos) and is not needed here
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ [ -e redhat/freeradius.spec ] && yum-builddep --disablerepo="nodesource*" -y redhat/freeradius.spec; \
+ done
+
+#
+# Which is required by fixture setup utilities
+#
+RUN yum install -y which
+
+#
+# Explicitly install libnl3-devel which is required for the EAP tests
+#
+RUN yum install -y libnl3-devel
+
+#
+# Create the RPM build tree
+#
+ENV BUILDDIR=/root/rpmbuild
+RUN rpmdev-setuptree
diff --git a/scripts/crossbuild/docker/centos8/Dockerfile b/scripts/crossbuild/docker/centos8/Dockerfile
new file mode 100644
index 0000000..bd856af
--- /dev/null
+++ b/scripts/crossbuild/docker/centos8/Dockerfile
@@ -0,0 +1,81 @@
+FROM centos:centos8
+
+#
+# Install devtools like make and git and the EPEL
+# repository for freetds and hiredis
+#
+RUN yum update -y
+RUN yum install -y rpmdevtools openssl epel-release git yum-utils rsync dnf-plugins-core
+RUN dnf config-manager --set-enabled powertools
+
+#
+# Install GCC that has the requisite support for C11 keywords and atomics
+#
+RUN yum install -y gcc-toolset-9
+
+#
+# Documentation build dependecies
+#
+# - doxygen & JSON.pm
+RUN yum install -y doxygen graphviz perl-JSON
+# - antora (npm needed)
+RUN curl -sL https://rpm.nodesource.com/setup_10.x | bash -
+RUN yum install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+RUN curl -o - -L $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*tar.gz" | cut -d '"' -f 4) | tar xzvf - -C /tmp/
+RUN mv /tmp/pandoc-*/bin/* /usr/local/bin
+# - asciidoctor
+RUN yum install -y rubygems-devel
+RUN gem install asciidoctor
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Use LTB's openldap packages intead of the distribution version to avoid linking against NSS
+#
+RUN echo $'[ltb-project]\n\
+name=LTB project packages\n\
+baseurl=https://ltb-project.org/rpm/$releasever/$basearch\n\
+enabled=1\n\
+gpgcheck=1\n\
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-LTB-project'\
+> /etc/yum.repos.d/ltb-project.repo
+RUN rpm --import https://ltb-project.org/lib/RPM-GPG-KEY-LTB-project
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ [ -e redhat/freeradius.spec ] && yum-builddep -y redhat/freeradius.spec; \
+ done
+
+#
+# Which is required by fixture setup utilities
+#
+RUN yum install -y which
+
+#
+# Explicitly install libnl3-devel which is required for the EAP tests
+#
+RUN yum install -y libnl3-devel
+
+#
+# Create the RPM build tree
+#
+ENV BUILDDIR=/root/rpmbuild
+RUN rpmdev-setuptree
diff --git a/scripts/crossbuild/docker/debian10/Dockerfile b/scripts/crossbuild/docker/debian10/Dockerfile
new file mode 100644
index 0000000..3eb13a7
--- /dev/null
+++ b/scripts/crossbuild/docker/debian10/Dockerfile
@@ -0,0 +1,89 @@
+FROM debian:buster
+
+ARG gccver=8
+ARG clangver=8
+ARG osname=buster
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install add-apt-repository
+#
+RUN apt-get update && \
+ apt-get install -y software-properties-common gnupg2 procps && \
+ apt-get clean && \
+ rm -r /var/lib/apt/lists/*
+
+# For clang
+RUN add-apt-repository -y "deb http://apt.llvm.org/${osname}/ llvm-toolchain-${osname}-${clangver} main" && \
+ apt-key adv --fetch-keys http://apt.llvm.org/llvm-snapshot.gpg.key
+
+RUN apt-get update && \
+# Development utilities
+ apt-get install -y devscripts equivs git quilt rsync && \
+# Compilers
+ apt-get install -y g++-${gccver} llvm-${clangver} clang-${clangver} lldb-${clangver} && \
+# eapol_test dependencies
+ apt-get install -y libnl-3-dev libnl-genl-3-dev
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN apt-get install -y doxygen graphviz libjson-perl
+# - antora (npm needed)
+RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
+RUN apt-get install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+WORKDIR /tmp
+RUN curl -OL $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*deb" | cut -d '"' -f 4)
+RUN apt-get install -y ./pandoc-*.deb
+# - asciidoctor
+RUN apt-get install -y ruby-dev
+RUN gem install asciidoctor
+
+# set default things
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${gccver} 50 \
+ --slave /usr/bin/g++ g++ /usr/bin/g++-${gccver} && \
+ update-alternatives --config gcc
+
+RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${clangver} 60 && \
+ update-alternatives --config clang
+
+RUN update-alternatives --install /usr/bin/lldb lldb /usr/bin/lldb-${clangver} 60 && \
+ update-alternatives --config lldb
+
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+
+#
+# Install build dependencies for all v3 branches
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^v3\..*\.x");\
+ do \
+ git checkout $i; \
+ if [ -e ./debian/control.in ] ; then \
+ debian/rules debian/control ; \
+ fi ; \
+ echo Installing dependencies for $i ; \
+ mk-build-deps debian/control ; \
+ apt-get --no-install-recommends -y -V install ./freeradius-build-deps*.deb || true ; \
+ apt-get -y -f remove freeradius-build-deps libiodbc2-dev || true ; \
+ rm ./freeradius-build-deps*.deb ; \
+ done
diff --git a/scripts/crossbuild/docker/debian8/Dockerfile b/scripts/crossbuild/docker/debian8/Dockerfile
new file mode 100644
index 0000000..094faa3
--- /dev/null
+++ b/scripts/crossbuild/docker/debian8/Dockerfile
@@ -0,0 +1,84 @@
+FROM debian:jessie
+
+ARG gccver=4.9
+ARG clangver=5.0
+ARG osname=jessie
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install add-apt-repository
+#
+RUN apt-get update && \
+ apt-get install -y software-properties-common python-software-properties apt-transport-https curl && \
+ apt-get clean && \
+ rm -r /var/lib/apt/lists/*
+
+# Requires GCC-4.9 as it has support for C11 keywords and atomics
+
+# For clang
+RUN add-apt-repository -y "deb http://apt.llvm.org/${osname}/ llvm-toolchain-${osname}-${clangver} main" && \
+ curl -o /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key && \
+ apt-key add /tmp/llvm-snapshot.gpg.key
+
+RUN apt-get update && \
+# Development utilities
+ apt-get install -y devscripts equivs git quilt rsync && \
+# Compilers
+ apt-get install -y g++-${gccver} llvm-${clangver} clang-${clangver} lldb-${clangver} && \
+# eapol_test dependencies
+ apt-get install -y libnl-3-dev libnl-genl-3-dev
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN apt-get install -y doxygen graphviz libjson-perl
+# - antora (npm needed)
+RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
+RUN apt-get install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+WORKDIR /tmp
+RUN curl -OL $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*deb" | cut -d '"' -f 4)
+RUN dpkg -i ./pandoc-*.deb
+RUN apt-get install -fy
+# - asciidoctor
+RUN apt-get install -y ruby
+RUN gem install asciidoctor
+
+# set default things
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${gccver} 50 \
+ --slave /usr/bin/g++ g++ /usr/bin/g++-${gccver} && \
+ update-alternatives --config gcc
+
+RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${clangver} 60 && \
+ update-alternatives --config clang
+
+RUN update-alternatives --install /usr/bin/lldb lldb /usr/bin/lldb-${clangver} 60 && \
+ update-alternatives --config lldb
+
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ if [ -e ./debian/control.in ] ; then debian/rules debian/control ; fi ; echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control ; \
+ done
diff --git a/scripts/crossbuild/docker/debian9/Dockerfile b/scripts/crossbuild/docker/debian9/Dockerfile
new file mode 100644
index 0000000..9b47832
--- /dev/null
+++ b/scripts/crossbuild/docker/debian9/Dockerfile
@@ -0,0 +1,84 @@
+FROM debian:stretch
+
+ARG gccver=6
+ARG clangver=5.0
+ARG osname=stretch
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install add-apt-repository
+#
+RUN apt-get update && \
+ apt-get install -y software-properties-common gnupg2 apt-transport-https curl && \
+ apt-get clean && \
+ rm -r /var/lib/apt/lists/*
+
+# Stretch uses GCC-6.3 by default, so it doesn't need to be updated to get C11 functionality.
+
+# For clang
+RUN add-apt-repository -y "deb http://apt.llvm.org/${osname}/ llvm-toolchain-${osname}-${clangver} main" && \
+ curl -o /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key && \
+ apt-key add /tmp/llvm-snapshot.gpg.key
+
+RUN apt-get update && \
+# Development utilities
+ apt-get install -y devscripts equivs git quilt rsync && \
+# Compilers
+ apt-get install -y g++-${gccver} llvm-${clangver} clang-${clangver} lldb-${clangver} && \
+# eapol_test dependencies
+ apt-get install -y libnl-3-dev libnl-genl-3-dev
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN apt-get install -y doxygen graphviz libjson-perl
+# - antora (npm needed)
+RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
+RUN apt-get install -y npm
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+WORKDIR /tmp
+RUN curl -OL $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*deb" | cut -d '"' -f 4)
+RUN apt-get install -y ./pandoc-*.deb
+# - asciidoctor
+RUN apt-get install -y ruby-dev
+RUN gem install asciidoctor
+
+# set default things
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${gccver} 50 \
+ --slave /usr/bin/g++ g++ /usr/bin/g++-${gccver} && \
+ update-alternatives --config gcc
+
+RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${clangver} 60 && \
+ update-alternatives --config clang
+
+RUN update-alternatives --install /usr/bin/lldb lldb /usr/bin/lldb-${clangver} 60 && \
+ update-alternatives --config lldb
+
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ if [ -e ./debian/control.in ] ; then debian/rules debian/control ; fi ; echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control ; \
+ done
diff --git a/scripts/crossbuild/docker/debian9/README b/scripts/crossbuild/docker/debian9/README
new file mode 100644
index 0000000..f7a6135
--- /dev/null
+++ b/scripts/crossbuild/docker/debian9/README
@@ -0,0 +1,15 @@
+
+Build source image
+
+ docker build . -f Dockerfile.source -t freeradius:debian9-source
+
+Then either build and run jenkins image
+
+ docker build . -f Dockerfile.jenkins -t freeradius:debian9-jenkins
+ docker run -d -p 2222:22 freeradius:debian9-jenkins
+
+or build and run the server
+
+ docker build . -t freeradius:debian9
+ docker run -d -p 1812:1812/udp -p 1813:1813/udp freeradius:debian9
+
diff --git a/scripts/crossbuild/docker/ubuntu16/Dockerfile b/scripts/crossbuild/docker/ubuntu16/Dockerfile
new file mode 100644
index 0000000..dbec6f9
--- /dev/null
+++ b/scripts/crossbuild/docker/ubuntu16/Dockerfile
@@ -0,0 +1,86 @@
+FROM ubuntu:16.04
+
+ARG gccver=4.9
+ARG clangver=5.0
+ARG osname=xenial
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install add-apt-repository
+#
+RUN apt-get update && \
+ apt-get install -y software-properties-common python-software-properties apt-transport-https curl && \
+ apt-get clean && \
+ rm -r /var/lib/apt/lists/*
+
+# Requires GCC-4.9 as it has support for C11 keywords and atomics
+
+# For clang
+RUN add-apt-repository -y "deb http://apt.llvm.org/${osname}/ llvm-toolchain-${osname}-${clangver} main" && \
+ curl -o /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key && \
+ apt-key add /tmp/llvm-snapshot.gpg.key && \
+# For GCC
+ add-apt-repository -y ppa:ubuntu-toolchain-r/test
+
+RUN apt-get update && \
+# Development utilities
+ apt-get install -y devscripts equivs git quilt rsync && \
+# Compilers
+ apt-get install -y g++-${gccver} llvm-${clangver} clang-${clangver} lldb-${clangver} && \
+# eapol_test dependencies
+ apt-get install -y libnl-3-dev libnl-genl-3-dev
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN apt-get install -y doxygen graphviz libjson-perl
+# - antora (npm needed)
+RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
+RUN apt-get install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+WORKDIR /tmp
+RUN curl -OL $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*deb" | cut -d '"' -f 4)
+RUN apt-get install -y ./pandoc-*.deb
+# - asciidoctor
+RUN apt-get install -y ruby-dev
+RUN gem install asciidoctor
+
+# set default things
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${gccver} 50 \
+ --slave /usr/bin/g++ g++ /usr/bin/g++-${gccver} && \
+ update-alternatives --config gcc
+
+RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${clangver} 60 && \
+ update-alternatives --config clang
+
+RUN update-alternatives --install /usr/bin/lldb lldb /usr/bin/lldb-${clangver} 60 && \
+ update-alternatives --config lldb
+
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ if [ -e ./debian/control.in ] ; then debian/rules debian/control ; fi ; echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control ; \
+ done
diff --git a/scripts/crossbuild/docker/ubuntu18/Dockerfile b/scripts/crossbuild/docker/ubuntu18/Dockerfile
new file mode 100644
index 0000000..874e3ec
--- /dev/null
+++ b/scripts/crossbuild/docker/ubuntu18/Dockerfile
@@ -0,0 +1,65 @@
+FROM ubuntu:18.04
+
+ARG gccver=4.9
+ARG clangver=5.0
+ARG osname=bionic
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install add-apt-repository
+#
+RUN apt-get update && \
+ apt-get install -y software-properties-common && \
+ apt-get clean && \
+ rm -r /var/lib/apt/lists/*
+
+RUN apt-get update && \
+# Development utilities
+ apt-get install -y devscripts equivs git quilt rsync && \
+# Compilers
+ apt-get install -y g++ llvm clang lldb && \
+# eapol_test dependencies
+ apt-get install -y libnl-3-dev libnl-genl-3-dev
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN apt-get install -y doxygen graphviz libjson-perl
+# - antora (npm needed)
+RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
+RUN apt-get install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+WORKDIR /tmp
+RUN curl -OL $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*deb" | cut -d '"' -f 4)
+RUN apt-get install -y ./pandoc-*.deb
+# - asciidoctor
+RUN apt-get install -y ruby-dev
+RUN gem install asciidoctor
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ if [ -e ./debian/control.in ] ; then debian/rules debian/control ; fi ; echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control ; \
+ done
diff --git a/scripts/crossbuild/docker/ubuntu20/Dockerfile b/scripts/crossbuild/docker/ubuntu20/Dockerfile
new file mode 100644
index 0000000..c813b2f
--- /dev/null
+++ b/scripts/crossbuild/docker/ubuntu20/Dockerfile
@@ -0,0 +1,65 @@
+FROM ubuntu:20.04
+
+ARG gccver=4.9
+ARG clangver=5.0
+ARG osname=bionic
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install add-apt-repository
+#
+RUN apt-get update && \
+ apt-get install -y software-properties-common && \
+ apt-get clean && \
+ rm -r /var/lib/apt/lists/*
+
+RUN apt-get update && \
+# Development utilities
+ apt-get install -y devscripts equivs git quilt rsync && \
+# Compilers
+ apt-get install -y g++ llvm clang lldb && \
+# eapol_test dependencies
+ apt-get install -y libnl-3-dev libnl-genl-3-dev
+
+#
+# Documentation build dependecies
+#
+
+# - doxygen & JSON.pm
+RUN apt-get install -y doxygen graphviz libjson-perl
+# - antora (npm needed)
+RUN curl -sL https://deb.nodesource.com/setup_10.x | bash -
+RUN apt-get install -y nodejs
+RUN npm i -g @antora/cli@2.1 @antora/site-generator-default@2.1
+# - pandoc
+WORKDIR /tmp
+RUN curl -OL $(curl -s https://api.github.com/repos/jgm/pandoc/releases/latest | grep "browser_download_url.*deb" | cut -d '"' -f 4)
+RUN apt-get install -y ./pandoc-*.deb
+# - asciidoctor
+RUN apt-get install -y ruby-dev
+RUN gem install asciidoctor
+
+#
+# Setup a src dir in /usr/local
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+
+#
+# Shallow clone the FreeRADIUS source
+#
+WORKDIR /usr/local/src/repositories
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+RUN git clone --depth 1 --no-single-branch ${source}
+
+#
+# Install build dependencies for all branches from v3 onwards
+#
+WORKDIR freeradius-server
+RUN for i in $(git for-each-ref --format='%(refname:short)' refs/remotes/origin 2>/dev/null | sed -e 's#origin/##' | egrep "^(v[3-9]*\.[0-9x]*\.x|master)$");\
+ do \
+ git checkout $i; \
+ if [ -e ./debian/control.in ] ; then debian/rules debian/control ; fi ; echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control ; \
+ done
diff --git a/scripts/cryptpasswd.in b/scripts/cryptpasswd.in
new file mode 100755
index 0000000..dbc0f4f
--- /dev/null
+++ b/scripts/cryptpasswd.in
@@ -0,0 +1,83 @@
+#!@PERL@
+#
+# cryptpasswd Generate or check md5 and DES hashed passwords.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2001 The FreeRADIUS Project http://www.freeradius.org
+#
+# Written by Miquel van Smoorenburg <miquels@cistron-office.nl>
+#
+# $Id$
+#
+
+use Getopt::Long;
+
+sub check_des {
+ return (crypt("fnord", "aa") =~ m/^aa/);
+}
+
+sub check_md5 {
+ return (crypt("fnord", "\$1\$aa") =~ m/^\$1\$/);
+}
+
+sub usage {
+ $name = $0;
+ $name =~ s,.*/,,;
+
+ die "Usage: $name [--des|--md5|--check] plaintext_password [crypted_password]\n";
+}
+
+@saltc = ( '.', '/', '0'..'9', 'A'..'Z', 'a'..'z' );
+
+#
+# MAIN
+#
+sub main {
+
+ Getopt::Long::Configure("no_ignore_case", "bundling");
+ my @options = ( "des|d+", "md5|m+", "check|c+" );
+ usage() unless (eval { Getopt::Long::GetOptions(@options) } );
+
+ if ($opt_check) {
+ usage unless ($#ARGV == 1);
+ if (crypt($ARGV[0], $ARGV[1]) ne $ARGV[1]) {
+ print "Password BAD\n";
+ return 0;
+ } else {
+ print "Password OK\n";
+ return 1;
+ }
+ }
+
+ $opt_des = 1 unless ($opt_des || $opt_md5);
+ usage() unless ($#ARGV == 0);
+
+ die "DES password hashing not available\n"
+ if ($opt_des && !check_des());
+ die "MD5 password hashing not available\n"
+ if ($opt_md5 && !check_md5());
+
+ $salt = ($opt_md5 ? '$1$' : '');
+ for ($i = 0; $i < ($opt_md5 ? 8 : 2); $i++) {
+ $salt .= $saltc[rand 64];
+ }
+ $salt .= '$' if ($opt_md5);
+ print crypt($ARGV[0], $salt), "\n";
+
+ 1;
+}
+
+exit !main();
diff --git a/scripts/dhcp/isc2ippool.pl b/scripts/dhcp/isc2ippool.pl
new file mode 100755
index 0000000..6fb9612
--- /dev/null
+++ b/scripts/dhcp/isc2ippool.pl
@@ -0,0 +1,189 @@
+#!/usr/bin/perl
+
+# isc2ippool Insert ISC DHCPD lease entries into SQL database (ippool).
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2012 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
+
+use warnings;
+use strict;
+
+use DateTime;
+use DateTime::Format::Strptime;
+use DateTime::Format::DBI;
+
+use Getopt::Long;
+use Text::DHCPLeases;
+use DBI;
+
+my $lease_file = '/var/db/dhcpd.leases';
+my $sql_type = 'mysql';
+my $sql_host = 'localhost';
+my $sql_user = 'radius';
+my $sql_pass = 'radpass';
+my $sql_database = 'radius';
+my $sql_table = 'dhcpippool';
+my $pool_name = '';
+my $insert_only = 0;
+
+my $verbose;
+my $help;
+
+sub error {
+ print STDERR @_, "\n";
+ exit(64);
+}
+
+sub notice {
+ if ($verbose) {
+ printf(shift . "\n", @_);
+ }
+}
+
+sub help {
+ my @this = split('/', $0);
+ print <<HELP
+$this[$#this] [options] <pool>
+
+Options:
+ -leases <lease file> - The lease file to parse (defaults to '$lease_file')
+ -no-update - Don't update existing lease entries
+ -type - SQL database type (defaults to '$sql_type')
+ -table - SQL table (defaults to '$sql_table')
+ -h | -host - SQL host to connect to
+ -u | -user - SQL user
+ -p | -pass - SQL password
+ -v - Verbose
+ -help - This help text
+HELP
+;
+ exit(0);
+}
+
+GetOptions (
+ 'leases=s' => \$lease_file,
+ 'no-update' => \$insert_only,
+ 'type=s' => \$sql_type,
+ 'table=s' => \$sql_table,
+ 'h|host=s' => \$sql_host,
+ 'u|user=s' => \$sql_user,
+ 'p|pass=s' => \$sql_pass,
+ 'v' => \$verbose,
+ 'help' => \$help
+) or error('Failed parsing options');
+
+#
+# Poolname must be provided, and we need at least some arguments...
+#
+help if !scalar @ARGV or ($pool_name = $ARGV[$#ARGV]) =~ /^-/;
+
+-r $lease_file or
+ error("Lease file ($lease_file) doesn\'t exist or isn't readable");
+
+my $leases = Text::DHCPLeases->new(file => $lease_file) or
+ error("Failed parsing leases file (or lease file empty)");
+
+my $handle = DBI->connect(
+ "DBI:$sql_type:database=$sql_database;host=$sql_host",
+ $sql_user, $sql_pass, {RaiseError => 1}
+);
+
+my $dt_isc = DateTime::Format::Strptime->new(pattern => '%Y/%m/%d %H:%M:%S');
+my $dt_sql = DateTime::Format::DBI->new($handle);
+
+for my $lease ($leases->get_objects) {
+ next unless ($lease->binding_state && $lease->binding_state eq 'active');
+
+ my($query, @result);
+ eval {
+ $query = $handle->prepare("
+ SELECT expiry_time, framedipaddress FROM $sql_table
+ WHERE pool_name = ?
+ AND pool_key = ?;"
+ , undef);
+
+ $query->bind_param(1, $pool_name);
+ $query->bind_param(2, $lease->mac_address);
+
+ $query->execute();
+
+ @result = $query->fetchrow_array();
+ };
+
+ error($@) if $@;
+
+ my $ends_isc = $dt_isc->parse_datetime($lease->ends =~ m{^(?:[0-9]+) (.+)});
+
+ if (!$query->rows) {
+ eval {
+ $handle->do("
+ INSERT INTO $sql_table (
+ pool_name, framedipaddress,
+ expiry_time, pool_key)
+ VALUES (?, ?, ?, ?);"
+ , undef
+ , $pool_name
+ , $lease->ip_address
+ , $dt_sql->format_datetime($ends_isc)
+ , $lease->mac_address
+ );
+ };
+
+ error($@) if $@;
+
+ notice("MAC:'%s' inserted with IP:'%s'.",
+ $lease->mac_address, $lease->ip_address);
+
+ next;
+ }
+
+ my $ends_sql = $dt_sql->parse_datetime($result[0]);
+
+ if ($insert_only && (($result[1] ne $lease->ip_address) ||
+ (DateTime->compare($ends_sql, $ends_isc) < 0))) {
+
+ eval {
+ $handle->do("
+ UPDATE $sql_table
+ SET
+ pool_key = ?, expiry_time = ?
+ WHERE pool_name = ?
+ AND framedipaddress = ?;"
+ , undef
+ , $lease->mac_address
+ , $dt_sql->format_datetime($ends_isc)
+ , $pool_name
+ , $lease->ip_address
+ );
+ };
+
+ error($@) if $@;
+
+ notice("MAC:'%s' updated. ISC-TS: '%s', SQL-TS: '%s', ISC-IP: '%s', SQL-IP: '%s'.",
+ $lease->mac_address,
+ $dt_sql->format_datetime($ends_isc),
+ $dt_sql->format_datetime($ends_sql),
+ $lease->ip_address,
+ $result[1]);
+
+ next;
+ }
+
+ notice("MAC:'%s' skipped (no update %s). ",
+ $lease->mac_address, $insert_only ? 'allowed' : 'needed');
+}
+
+exit(0);
diff --git a/scripts/dhcp/rlm_iscfixed2ippool b/scripts/dhcp/rlm_iscfixed2ippool
new file mode 100755
index 0000000..4ef9365
--- /dev/null
+++ b/scripts/dhcp/rlm_iscfixed2ippool
@@ -0,0 +1,422 @@
+#!/usr/bin/perl -Tw
+
+######################################################################
+#
+# Copyright (C) 2020 Network RADIUS
+#
+# $Id$
+#
+######################################################################
+#
+# Helper script to parse an ISC DHCP config file and extract fixed
+# leases for populating FreeRADIUS ippool tables.
+#
+# This script reads an ISC DCHP config file and extracts any fixed
+# leases. If Net::DNS is available, then any host names are resolved.
+# The resulting list of hardware mac addresses and IP addresses are
+# then formatted as SQL to update a standard FreeRADIUS DHCP ippool
+# table.
+#
+# rlm_iscfixed2ippool -c <dhcpd.conf> -t <table_name> \
+# (-d <sql_dialect> | -f <raddb_dir> [-i <instance>]) \
+# -k <mac|id>
+#
+
+use warnings;
+use strict;
+
+my $dns_available = 0;
+my $resolver;
+eval {
+ require Net::DNS;
+ $dns_available = 1;
+ $resolver = Net::DNS::Resolver->new;
+};
+
+#
+# Option defaults
+#
+my $opts = {
+ dhcpdconf => '/etc/dhcp/dhcpd.conf',
+ key => 'mac'
+};
+
+#
+# Parse the command line arguments
+#
+my $opt = '';
+for (my $i = 0; $i <= $#ARGV; $i++) {
+ if ($ARGV[$i] =~ m/^-(.)$/) {
+ if ($1 eq 'c') {
+ $opt = 'dhcpdconf';
+ } elsif ($1 eq 't') {
+ $opt = 'table_name';
+ } elsif ($1 eq 'd') {
+ $opt = 'dialect';
+ } elsif ($1 eq 'f') {
+ $opt = 'raddb_dir';
+ } elsif ($1 eq 'i') {
+ $opt = 'instance';
+ } elsif ($1 eq 'k') {
+ $opt = 'key'
+ } else {
+ &usage();
+ exit 1;
+ }
+ } else {
+ if ($opt eq '') {
+ &usage();
+ exit 1;
+ } else {
+ $opts->{$opt} = $ARGV[$i]
+ }
+ }
+}
+
+if (($opts->{key} ne 'mac') && ($opts->{key} ne 'id')) {
+ &usage();
+ exit(1);
+}
+
+#
+# If a raddb dir is set then we parse the mods-enabled config
+#
+
+if ($opts->{raddb_dir}) {
+ my $found = 0;
+ if (-d $opts->{raddb_dir}.'/mods-enabled') {
+ opendir(my $dh, $opts->{raddb_dir}.'/mods-enabled') || die 'ERROR: Could not open directory '.$opts->{raddb_dir}.'/mods-enabled';
+ my @dir = grep { -f "$opts->{raddb_dir}/mods-enabled/$_" } readdir($dh);
+ closedir($dh);
+ my $instance = $opts->{instance};
+ foreach my $file (@dir) {
+ open (my $fh, $opts->{raddb_dir}.'/mods-enabled/'.$file);
+ my $level = 0;
+ my $section = '';
+ my $subsection = '';
+ while (<$fh>) {
+ if ($found) {
+ $_ =~ s/#.*//; # Remove comments
+ if ($_ =~ m/\s*([a-z_]+)\s*=\s*(.*)/) {
+ my $param = $1;
+ my $value = $2;
+ $value =~ s/^"//;
+ $value =~ s/"\s*$//;
+ if ($level == 1) {
+ $opts->{$param} = $value;
+ } elsif ($level == 2) {
+ $opts->{$section}->{$param} = $value;
+ } elsif ($level == 3) {
+ $opts->{$section}->{$subsection}->{$param} = $value;
+ }
+ }
+ if ($_ =~ m/([a-z_]*)\s+\{/) { # Find nested sectinos
+ $level++ ;
+ if ($level == 2) {
+ $section = $1;
+ } elsif ($level == 3) {
+ $subsection = $1;
+ }
+ }
+ $level-- if ($_ =~ m/\s+\}/); # Close of nesting
+ last if ($level == 0); # We've got to the end of the instance
+ }
+ if ($_ =~ m/\b$instance\s+\{/) {
+ # We've found the specified SQL instance
+ $found = 1;
+ $level = 1;
+ }
+ }
+ close ($fh);
+ if ($found) {
+ last;
+ }
+ }
+ } else {
+ die 'ERROR: Specified FreeRADIUS config directory does not contain mods-enabled';
+ }
+ if ($found == 0) {
+ die 'ERROR: SQL instance not found in FreeRADIUS config';
+ }
+}
+
+#
+# The SQL dialect and table name must be set
+#
+if ((!($opts->{dialect})) || (!($opts->{table_name}))) {
+ &usage();
+ exit 1;
+}
+
+
+open (my $fh, '<', $opts->{dhcpdconf}) or die "ERROR: Cannot open ISC DHCP config for reading: $opts->{dhcpdconf}";
+
+my $inhost = 0;
+my @hosts;
+my $host = {key => ''};
+while (my $line = <$fh>) {
+ $line = lc($line);
+ if ($inhost == 0) {
+ $inhost = 1 if ($line =~ m/host\s+\S+\s+{/); # We've found the beginning of a host record
+ }
+ if ($inhost) {
+ if (($opts->{key} eq 'mac') && ($line =~ m/hardware\s+ethernet\s+(([0-9a-f]{2}([:;]|\s)){6})/)) {
+ $host->{key} = $1;
+ $host->{key} =~ s/;$//;
+ }
+ if (($opts->{key} eq 'id') && ($line =~ m/dhcp-client-identifier\s+(.*?)\s*;/)) {
+ $host->{key} = $1;
+ }
+ if ($line =~ m/fixed-address\s+(.+);/) {
+ my @addresses = split(',', $1);
+ foreach my $address (@addresses) {
+ $address =~ s/^\s+//;
+ $address =~ s/\s+$//;
+ if ($address =~ m/(([0-9]{1,3}(\.|$)){4})/) {
+ push (@{$host->{ips}}, $1);
+ } elsif ($dns_available) {
+ my $reply = $resolver->search($1, 'A');
+ if ($reply) {
+ foreach my $rr ($reply->answer) {
+ push (@{$host->{ips}}, $rr->address) if ($rr->can('address'))
+ }
+ }
+ }
+ }
+ }
+ if ($line =~ m/}/) { # End of the host record - store the results and clear up
+ push (@hosts, $host) if (($host->{key}) && ($#{$host->{ips}} >= 0));
+ $host = {key => ''};
+ $inhost = 0;
+ }
+ }
+}
+
+close($fh);
+
+my ($template, $queries) = &load_templates($opts->{table_name});
+
+unless (defined $template->{$opts->{dialect}}) {
+ print STDERR "Unknown dialect. Pick one of: ";
+ print STDERR "$_ " foreach sort keys %{$template};
+ print STDERR "\n";
+ exit 1;
+}
+
+if ($opts->{radius_db}) {
+ &call_database($opts, $queries, @hosts);
+} else {
+ my $tt_available = 0;
+ eval {
+ require Template;
+ $tt_available = 1;
+ };
+ if ($tt_available) {
+ my $tt=Template->new();
+ $tt->process(\$template->{$opts->{dialect}}, {tablename => $opts->{table_name}, hosts => \@hosts}) || die $tt->error();
+ } else {
+ die "ERROR: Template Toolkit is not available. Install the Template Perl module.";
+ }
+}
+
+exit(0);
+
+sub usage {
+ print STDERR <<'EOF'
+Usage:
+ rlm_iscfixed2ippool -c <dhcpd.conf> -t <table_name> (-d <sql_dialect> | -f <raddb_dir> [ -i <instance> ]) [-k <mac|id> ]
+
+EOF
+}
+
+
+sub call_database {
+
+ my $opts = shift;
+ my $queries = shift;
+ my @entries = @_;
+
+ my $dbi_avail = 0;
+ eval {
+ require DBI;
+ $dbi_avail = 1;
+ };
+ unless($dbi_avail) {
+ die "ERROR: DBI is not available. Install the DBI Perl module.";
+ }
+
+ my $dsn;
+ if ($opts->{dialect} eq 'mysql') {
+ $dsn = "DBI:mysql:database=$opts->{radius_db};host=$opts->{server}";
+ if (defined($opts->{mysql}->{tls})) {
+ $dsn .= ';mysql_ssl=1';
+ $dsn .= ';mysql_ssl_ca_file='.$opts->{mysql}->{tls}->{ca_file} if ($opts->{mysql}->{tls}->{ca_file});
+ $dsn .= ';mysql_ssl_ca_path='.$opts->{mysql}->{tls}->{ca_path} if ($opts->{mysql}->{tls}->{ca_path});
+ $dsn .= ';mysql_ssl_client_key='.$opts->{mysql}->{tls}->{private_key_file} if ($opts->{mysql}->{tls}->{private_key_file});
+ $dsn .= ';mysql_ssl_client_cert='.$opts->{mysql}->{tls}->{certificate_file} if ($opts->{mysql}->{tls}->{certificate_file});
+ $dsn .= ';mysql_ssl_cipher='.$opts->{mysql}->{tls}->{cipher} if ($opts->{mysql}->{tls}->{cipher});
+ }
+ } elsif ($opts->{dialect} eq 'postgresql') {
+ # Parse FreeRADIUS alternative connection string
+ if ($opts->{radius_db} =~ m/host=(.+?)\b/) {
+ $opts->{server} = $1;
+ }
+ if ($opts->{radius_db} =~ m/user=(.+?)\b/) {
+ $opts->{login} = $1;
+ }
+ if ($opts->{radius_db} =~ m/password=(.+?)\b/) {
+ $opts->{password} = $1;
+ }
+ if ($opts->{radius_db} =~ m/sslmode=(.+?)\b/) {
+ $opts->{sslmode} = $1;
+ }
+ if ($opts->{radius_db} =~ m/dbname=(.+?)\b/) {
+ $opts->{radius_db} = $1;
+ }
+ $dsn = "DBI:Pg:dbname=$opts->{radius_db};host=$opts->{server}";
+ #
+ # DBD doesn't have all the options used by FreeRADIUS - just enable ssl if
+ # FreeRADIUS has SSL options enabled
+ #
+ $dsn .= ';sslmode=prefer' if ($opts->{sslmode});
+ } elsif ($opts->{dialect} eq 'sqlite') {
+ $dsn = "DBI:SQLite:dbname=$opts->{sqlite}->{filename}";
+ } elsif ($opts->{dialect} eq 'mssql') {
+ if ($opts->{driver} eq 'rlm_sql_unixodbc') {
+ $dsn = "DBI:ODBC:DSN=$opts->{server}";
+ } else {
+ $dsn = "DBI:Sybase:server=$opts->{server};database=$opts->{radius_db}";
+ }
+ } elsif ($opts->{dialect} eq 'oracle') {
+ # Extract data from Oracle connection string as used by FreeRADIUS
+ if ($opts->{radius_db} =~ m/HOST=(.+?)\)/) {
+ $opts->{server} = $1;
+ }
+ if ($opts->{radius_db} =~ m/PORT=(.+?)\)/) {
+ $opts->{port} =$1;
+ }
+ if ($opts->{radius_db} =~ m/SID=(.+?)\)/) {
+ $opts->{sid} = $1;
+ }
+ $dsn = "DBI:Oracle:host=$opts->{server};sid=$opts->{sid}";
+ } else {
+ $dsn = "DBI:$opts->{dialect}:database=$opts->{radius_db};host=$opts->{server}";
+ }
+ $dsn .= ";port=$opts->{port}" if ($opts->{port}) && ($opts->{driver} ne 'rlm_sql_unixodbc');
+
+ # Read the results by running our query against the database
+ my $dbh = DBI->connect($dsn, $opts->{login}, $opts->{password}) || die "Unable to connect to database";
+
+ $dbh->do($queries->{$opts->{dialect}}->{pre}) if ($queries->{$opts->{dialect}}->{pre});
+
+ my $sth = $dbh->prepare($queries->{$opts->{dialect}}->{update});
+ foreach my $h (@hosts) {
+ foreach my $i (@{$h->{ips}}) {
+ $sth->execute($h->{key}, $i);
+ }
+ }
+ $sth->finish();
+
+ $dbh->do($queries->{$opts->{dialect}}->{post}) if ($queries->{$opts->{dialect}}->{post});
+
+ $dbh->disconnect();
+}
+
+
+#
+# SQL dialect templates
+#
+
+sub load_templates {
+
+ my $tablename = shift;
+
+ my $template;
+ my $queries;
+#
+# MySQL / MariaDB
+#
+ $queries->{'mysql'}->{pre} = 'START TRANSACTION';
+ $queries->{'mysql'}->{update} = 'UPDATE'.$tablename.' SET pool_key = ?, `status` = "static" WHERE framedipaddress = ?';
+ $queries->{'mysql'}->{post} = 'COMMIT';
+
+ $template->{'mysql'} = $queries->{'mysql'}->{pre}.";\n";
+ $template->{'mysql'} .= <<'END_mysql';
+[%- FOREACH h IN hosts %]
+[%- FOREACH i IN h.ips %]
+UPDATE [% tablename %] SET pool_key = '[% h.key %]', `status` = 'static' WHERE framedipaddress = '[% i %]';
+[%- END %]
+[%- END %]
+END_mysql
+ $template->{'mysql'} .= $queries->{'mysql'}->{post}.";\n";
+
+#
+# PostgreSQL
+#
+ $queries->{'postgresql'}->{pre} = 'START TRANSACTION';
+ $queries->{'postgresql'}->{update} = 'UPDATE'.$tablename.' SET pool_key = ?, status = "static" WHERE framedipaddress = ?';
+ $queries->{'postgresql'}->{post} = 'COMMIT';
+
+ $template->{'postgresql'} = $queries->{'postgresql'}->{pre}.";\n";
+ $template->{'postgresql'} .= <<'END_postgresql';
+[%- FOREACH h IN hosts %]
+[%- FOREACH i IN h.ips %]
+UPDATE [% tablename %] SET pool_key = '[% h.key %]', status = 'static' WHERE framedipaddress = '[% i %]';
+[%- END %]
+[%- END %]
+END_postgresql
+ $template->{'postgresql'} .= $queries->{'postgresql'}->{post}.";\n";
+#
+# Oracle
+#
+ $queries->{'oracle'}->{pre} = '';
+ $queries->{'oracle'}->{update} = 'UPDATE '.$tablename.' SET pool_key = ?, status_id = (SELECT status_id FROM dhcpstatus WHERE status = \'static\') WHERE FramedIPAddress = ?';
+ $queries->{'oracle'}->{post} = 'COMMIT';
+
+ $template->{'oracle'} = <<'END_oracle';
+[%- FOREACH h IN hosts %]
+[%- FOREACH i IN h.ips %]
+UPDATE [% tablename %] SET pool_key = '[% h.key %]', status_id = (SELECT status_id FROM dhcpstatus WHERE status = 'static') WHERE framedipaddress = '[% i %]';
+[%- END %]
+[%- END %]
+END_oracle
+ $template->{'oracle'} .= $queries->{'oracle'}->{post}.";\n";
+
+#
+# SQLite
+#
+ $queries->{'sqlite'}->{pre} = 'BEGIN TRANSACTION';
+ $queries->{'sqlite'}->{update} = 'UPDATE '.$tablename.' SET pool_key = ?, status_id = (SELECT status_id FROM dhcpstatus WHERE status = \'static\') WHERE framedipaddress = ?';
+ $queries->{'sqlite'}->{post} = 'COMMIT';
+
+ $template->{'sqlite'} = $queries->{'sqlite'}->{pre}.";\n";
+ $template->{'sqlite'} .= <<'END_sqlite';
+[%- FOREACH h IN hosts %]
+[%- FOREACH i IN h.ips %]
+UPDATE [% tablename %] SET pool_key = '[% h.key %]', status_id = (SELECT status_id FROM dhcpstatus WHERE status = 'static') WHERE framedipaddress = '[% i %]';
+[%- END %]
+[%- END %]
+END_sqlite
+ $template->{'sqlite'} .= $queries->{'sqlite'}->{post}.";\n";
+
+#
+# MS SQL
+#
+ $queries->{'mssql'}->{pre} = 'BEGIN TRAN';
+ $queries->{'mssql'}->{update} = 'UPDATE '.$tablename.' SET pool_key = ?, status_id = (SELECT status_id FROM dhcpstatus WHERE status = \'static\') WHERE framedipaddress = ?';
+ $queries->{'mssql'}->{post} = 'COMMIT TRAN';
+
+ $template->{'mssql'} = $queries->{'mssql'}->{pre}.";\n";
+ $template->{'mssql'} .= <<'END_mssql';
+[%- FOREACH h IN hosts %]
+[%- FOREACH i IN h.ips %]
+UPDATE [% tablename %] SET pool_key = '[% h.key %]', status_id = (SELECT status_id FROM dhcpstatus WHERE status = 'static') WHERE framedipaddress = '[% i %]';
+[%- END %]
+[%- END %]
+END_mssql
+ $template->{'mssql'} .= $queries->{'mssql'}->{post}.";\n";
+
+ return ($template, $queries);
+
+}
+
diff --git a/scripts/dict_alias.sh b/scripts/dict_alias.sh
new file mode 100755
index 0000000..b573668
--- /dev/null
+++ b/scripts/dict_alias.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+#
+# Print out "ALIAS NAME OID", which lets us import the v3 names into v4.
+#
+
+RADATTR=$(echo $0 | sed 's,dict_alias.sh,../../build/make/jlibtool --quiet --mode=execute ./build/bin/radattr,')
+DICTDIR=$(echo $0 | sed 's,dict_alias.sh,../../share,')
+
+#
+# Print out the attributes,
+# sorted by vendor,
+# split off the comment showing the vendor,
+# reformat so that the name is in a 40-character field
+#
+# The output is ordered by vendor, which makes it easy to split them manually.
+#
+# The output is also ordered by name, instead of by OID. Which is
+# fine, as ordering it by OID is hard. Both for "radattr", and for
+# simple shell scripts.
+#
+$RADATTR -A -D $DICTDIR | sort -n -k5 | sed 's/ #.*//' | awk '{printf "%s\t%-40s\t%s\n", $1, $2, $3 }'
diff --git a/scripts/docker/README.md b/scripts/docker/README.md
new file mode 100644
index 0000000..f6e9ae6
--- /dev/null
+++ b/scripts/docker/README.md
@@ -0,0 +1,200 @@
+# What is FreeRADIUS?
+
+The FreeRADIUS Server Project is a high performance and highly
+configurable multi-protocol policy server, supporting RADIUS, DHCPv4
+and VMPS. Using RADIUS allows authentication and authorization for a network
+to be centralized, and minimizes the number of changes that have to
+be done when adding or deleting new users to a network.
+
+FreeRADIUS can authenticate users on systems such as 802.1x
+(WiFi), dialup, PPPoE, VPN's, VoIP, and many others. It supports
+back-end databases such as MySQL, PostgreSQL, Oracle, Microsoft
+Active Directory, Redis, OpenLDAP. It is used daily to
+authenticate the Internet access for hundreds of millions of
+people, in sites ranging from 10 to 10 million+ users.
+
+> [wikipedia.org/wiki/FreeRADIUS](https://en.wikipedia.org/wiki/FreeRADIUS)
+
+
+# How to use this image
+
+## Starting the server
+
+```console
+$ docker run --name my-radius -d freeradius/freeradius-server
+```
+
+The image contains only the default FreeRADIUS configuration which
+has no users, and accepts test clients on 127.0.0.1. In order to
+use it in production, as a minimum you will need to add clients to
+the `clients.conf` file, and users to the "users" file in
+`mods-config/files/authorize`.
+
+**Without building a local image with a configuration, the
+container will refuse to answer any queries.**
+
+
+## Defining a local configuration
+
+Create a local `Dockerfile` based on the required image and
+COPY in the server configuration.
+
+```Dockerfile
+FROM freeradius/freeradius-server:latest
+COPY raddb/ /etc/raddb/
+```
+
+The `raddb` directory could contain, for example:
+
+```
+clients.conf
+mods-config/
+mods-config/files/
+mods-config/files/authorize
+```
+
+Where `clients.conf` contains a simple client definition
+
+```
+client dockernet {
+ ipaddr = 172.17.0.0/16
+ secret = testing123
+}
+```
+
+and the `authorize` "users" file contains a test user:
+
+```
+bob Cleartext-Password := "test"
+```
+
+Build the image locally:
+
+```console
+$ docker build -t my-radius-image -f Dockerfile .
+```
+
+
+## Using the local configuration
+
+It should now be possible to test authentication against the
+server from the host machine, using the `radtest` utility supplied
+with FreeRADIUS and the credentials defined above.
+
+Start the local container. Ports will need to be forwarded to the
+server, typically 1812/udp and/or 1813/udp, for example:
+
+```console
+docker run --rm -d --name my-radius -p 1812-1813:1812-1813/udp my-radius-image
+```
+
+Send a test request, you will need the `radtest` utility:
+
+```console
+$ radtest bob test 127.0.0.1 0 testing123
+```
+
+which should return an "Access-Accept".
+
+The image can now be stopped with:
+
+```console
+docker stop my-radius
+```
+
+
+## Running in debug mode
+
+FreeRADIUS should always be tested in debug mode, using option
+`-X`. Coloured debug output also requres `-t` be passed to docker.
+
+```console
+$ docker run --rm --name my-radius -t -p 1812-1813:1812-1813/udp freeradius/freeradius-server -X
+```
+
+Guidelines for how to read and interpret the debug output are on the
+[FreeRADIUS Wiki](https://wiki.freeradius.org/radiusd-X).
+
+
+## Security notes
+
+The configuration in the docker image comes with self-signed
+certificates for convenience. These should not be used in a
+production environment, but replaced with new certificates. See
+the file `raddb/certs/README.md` for more information.
+
+
+## Debugging
+
+By default if you try to use `gdb` in a Docker container, the
+pattach call will fail, and you will not be able to trace
+processes.
+
+In order to allow tracing, the ``--privileged`` flag must be
+passed to ``docker run``, this restores any Linux ``cap``
+privileges that would not ordinarily be given.
+
+
+# Image variants
+
+## `freeradius/freeradius-server:<version>`
+
+The de-facto image which should be used unless you know you need
+another image. It is based on
+[Ubuntu Linux](https://hub.docker.com/_/ubuntu/) Docker images.
+
+
+## `freeradius/freeradius-server:<version>-alpine`
+
+Image based on the [Alpine Linux](https://hub.docker.com/_/alpine/)
+Docker images, which are much smaller than most Linux
+distributions. To keep the basic size as small as possible, **this
+image does not include libraries for all modules that have been
+built** (especially the languages such as Perl or Python). Therefore
+these extra libraries will need to be installed with `apk add` in
+your own Dockerfile if you intend on using modules that require
+them.
+
+
+# Building Docker images
+
+The FreeRADIUS source contains Dockerfiles for several Linux
+distributions. They are in
+[`freeradius-server/scripts/docker/<os_name>`](https://github.com/FreeRADIUS/freeradius-server/tree/v3.2.x/scripts/docker).
+
+Build an image with
+
+```bash
+$ cd scripts/docker/<os_name>
+$ docker build . -t freeradius-<os_name>
+```
+
+This will download the OS base image, install/build any dependencies
+as necessary, perform a shallow clone of the FreeRADIUS source and
+build the server.
+
+Once built, running ``docker images`` should show the image.
+
+```bash
+$ docker images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+freeradius-ubuntu16 latest 289b3c7aca94 4 minutes ago 218MB
+freeradius-alpine latest d7fb3041bea2 2 hours ago 88.6MB
+```
+
+
+## Build args
+
+Two ARGs are defined in the Dockerfiles that specify the source
+repository and git tag that the release will be built from. These
+are
+
+- source: the git repository URL
+- release: the git commit/tag
+
+To build the image from a specific repository and git tag, set one
+or both of these args:
+
+```console
+$ docker build . --build-arg=release=v3.2.x --build-arg=source=https://github.com/FreeRADIUS/freeradius-server.git -t freeradius-<os_name>
+```
diff --git a/scripts/docker/alpine/Dockerfile b/scripts/docker/alpine/Dockerfile
new file mode 100644
index 0000000..2965525
--- /dev/null
+++ b/scripts/docker/alpine/Dockerfile
@@ -0,0 +1,83 @@
+ARG from=alpine:3.13
+FROM ${from} as build
+
+#
+# Install build tools
+#
+RUN apk update
+RUN apk add git gcc make
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+# essential
+RUN apk add libc-dev talloc-dev
+RUN apk add openssl openssl-dev
+RUN apk add linux-headers
+# general
+RUN apk add pcre-dev libidn-dev krb5-dev samba-dev curl-dev json-c-dev
+RUN apk add openldap-dev unbound-dev
+# languages
+RUN apk add ruby-dev perl-dev python2-dev
+# databases
+RUN apk add hiredis-dev libmemcached-dev gdbm-dev libcouchbase-dev
+# sql
+RUN apk add postgresql-dev mariadb-dev unixodbc-dev sqlite-dev
+
+#
+# Build the server
+#
+RUN ./configure --prefix=/opt
+RUN make -j2
+RUN make install
+RUN rm /opt/lib/*.a
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /opt /opt
+
+#
+# These are needed for the server to start
+#
+RUN apk update \
+ && apk add talloc libressl pcre libwbclient tzdata \
+ \
+#
+# Libraries that are needed dependent on which modules are used
+# Some of these (especially the languages) are huge. A reasonable
+# selection has been enabled here. If you use modules needing
+# other dependencies then install any others required in your
+# local Dockerfile.
+#
+ && apk add libcurl json-c libldap hiredis sqlite-dev \
+#RUN apk add libidn krb5
+#RUN apk add unbound-libs
+#RUN apk add ruby-libs perl python2-dev
+#RUN apk add libmemcached gdbm libcouchbase
+#RUN apk add postgresql-dev mariadb-dev unixodbc-dev
+ \
+ && ln -s /opt/etc/raddb /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["radiusd"]
diff --git a/scripts/docker/alpine/docker-entrypoint.sh b/scripts/docker/alpine/docker-entrypoint.sh
new file mode 100755
index 0000000..e0f9f6f
--- /dev/null
+++ b/scripts/docker/alpine/docker-entrypoint.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+set -e
+
+PATH=/opt/sbin:/opt/bin:$PATH
+export PATH
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- radiusd "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec radiusd -f "$@"
+fi
+
+# debian people are likely to call "freeradius" as well, so allow that
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec radiusd -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/centos7/Dockerfile b/scripts/docker/centos7/Dockerfile
new file mode 100644
index 0000000..efa56eb
--- /dev/null
+++ b/scripts/docker/centos7/Dockerfile
@@ -0,0 +1,96 @@
+ARG from=centos:centos7
+FROM ${from} as build
+
+#
+# Install build tools
+#
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y rpmdevtools
+RUN yum install -y openssl
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Other requirements
+#
+
+# Use LTB's openldap packages intead of the distribution version to avoid linking against NSS
+RUN echo $'[ltb-project]\n\
+name=LTB project packages\n\
+baseurl=https://ltb-project.org/rpm/$releasever/$basearch\n\
+enabled=1\n\
+gpgcheck=1\n\
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-LTB-project'\
+> /etc/yum.repos.d/ltb-project.repo
+RUN rpm --import https://ltb-project.org/lib/RPM-GPG-KEY-LTB-project
+
+# EPEL repository for freetds and hiredis
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
+#
+# Install build dependencies
+#
+RUN [ -e redhat/freeradius.spec ] && yum-builddep -y redhat/freeradius.spec
+
+#
+# Create RPM build environment
+#
+ENV BUILDDIR=/root/rpmbuild
+RUN rpmdev-setuptree
+
+RUN ./configure
+RUN make freeradius-server-$(cat VERSION).tar.bz2
+RUN cp freeradius-server-$(cat VERSION).tar.bz2 $BUILDDIR/SOURCES/
+RUN cp -r redhat/* $BUILDDIR/SOURCES/
+RUN cp -r redhat/freeradius.spec $BUILDDIR/SPECS/
+WORKDIR $BUILDDIR
+
+#
+# Build the server
+#
+ENV QA_RPATHS=0x0003
+RUN rpmbuild -bb --define '_release $release' "$BUILDDIR/SPECS/freeradius.spec"
+
+RUN mkdir /root/rpms
+RUN mv $BUILDDIR/RPMS/*/*.rpm /root/rpms/
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /root/rpms /tmp/
+
+# Use LTB's openldap packages intead of the distribution version to avoid linking against NSS
+RUN echo $'[ltb-project]\n\
+name=LTB project packages\n\
+baseurl=https://ltb-project.org/rpm/$releasever/$basearch\n\
+enabled=1\n\
+gpgcheck=1\n\
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-LTB-project'\
+> /etc/yum.repos.d/ltb-project.repo \
+ && rpm --import https://ltb-project.org/lib/RPM-GPG-KEY-LTB-project \
+ \
+# EPEL repository for freetds and hiredis
+ && yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
+ \
+ && yum install -y /tmp/*.rpm
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["radiusd"]
diff --git a/scripts/docker/centos7/docker-entrypoint.sh b/scripts/docker/centos7/docker-entrypoint.sh
new file mode 100755
index 0000000..900ad6b
--- /dev/null
+++ b/scripts/docker/centos7/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- radiusd "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec radiusd -f "$@"
+fi
+
+# debian people are likely to call "freeradius" as well, so allow that
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec radiusd -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/debian10/Dockerfile b/scripts/docker/debian10/Dockerfile
new file mode 100644
index 0000000..441bed7
--- /dev/null
+++ b/scripts/docker/debian10/Dockerfile
@@ -0,0 +1,59 @@
+ARG from=debian:buster
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+RUN apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/debian10/docker-entrypoint.sh b/scripts/docker/debian10/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/debian10/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/debian11/Dockerfile b/scripts/docker/debian11/Dockerfile
new file mode 100644
index 0000000..7a9931c
--- /dev/null
+++ b/scripts/docker/debian11/Dockerfile
@@ -0,0 +1,64 @@
+ARG from=debian:bullseye
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+ARG freerad_uid=101
+ARG freerad_gid=101
+
+RUN groupadd -g ${freerad_gid} -r freerad \
+ && useradd -u ${freerad_uid} -g freerad -r -M -d /etc/freeradius -s /usr/sbin/nologin freerad \
+ && apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/debian11/docker-entrypoint.sh b/scripts/docker/debian11/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/debian11/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/debian9/Dockerfile b/scripts/docker/debian9/Dockerfile
new file mode 100644
index 0000000..1a34f7f
--- /dev/null
+++ b/scripts/docker/debian9/Dockerfile
@@ -0,0 +1,59 @@
+ARG from=debian:stretch
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+RUN apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/debian9/docker-entrypoint.sh b/scripts/docker/debian9/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/debian9/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/debiansid/Dockerfile b/scripts/docker/debiansid/Dockerfile
new file mode 100644
index 0000000..191ec49
--- /dev/null
+++ b/scripts/docker/debiansid/Dockerfile
@@ -0,0 +1,64 @@
+ARG from=debian:sid
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+ARG freerad_uid=101
+ARG freerad_gid=101
+
+RUN groupadd -g ${freerad_gid} -r freerad \
+ && useradd -u ${freerad_uid} -g freerad -r -M -d /etc/freeradius -s /usr/sbin/nologin freerad \
+ && apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/debiansid/docker-entrypoint.sh b/scripts/docker/debiansid/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/debiansid/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/docker.mk b/scripts/docker/docker.mk
new file mode 100644
index 0000000..9773625
--- /dev/null
+++ b/scripts/docker/docker.mk
@@ -0,0 +1,86 @@
+#
+# Docker-related targets
+#
+# Intended for internal use to publish Docker images to docker hub. Likely need to run
+# "docker login" before any push commands.
+#
+# Examples:
+#
+# Publish to Dockerhub "freeradius-server"
+# make DOCKER_VERSION=3.2.0 DOCKER_BUILD_ARGS="--no-cache" docker-publish
+#
+# Build and push "freeradius-dev" image to Dockerhub (e.g. CI on every commit):
+# make DOCKER_VERSION=latest DOCKER_COMMIT=v3.2.x DOCKER_TAG="freeradius-dev-3.2.x" DOCKER_BUILD_ARGS="--no-cache" docker-push
+#
+# Push to local repository:
+# make DOCKER_VERSION=3.2.0 DOCKER_TAG="our-freeradius-build" DOCKER_REGISTRY="docker.somewhere.example" docker-publish
+#
+# See what is going to happen:
+# make Q=": " ...
+#
+#
+# Variables:
+#
+# Which version to tag as, e.g. "3.2.0". If this is not an actual release
+# version, DOCKER_COMMIT _must_ also be set.
+DOCKER_VERSION := $(RADIUSD_VERSION_STRING)
+#
+# Commit hash/tag/branch to build, will be taken from VERSION above if not overridden, e.g. "release_3_2_0"
+DOCKER_COMMIT := release_$(shell echo $(DOCKER_VERSION) | tr .- __)
+#
+# Build args, most likely "--no-cache"
+DOCKER_BUILD_ARGS :=
+#
+# Tag name, likely "freeradius-server" for releases, or "freeradius-dev" for nightlies.
+DOCKER_TAG := freeradius-server
+#
+# Repository name
+DOCKER_REPO := freeradius
+#
+# Registry to push to
+DOCKER_REGISTRY :=
+#
+
+ifneq "$(DOCKER_REPO)" ""
+ override DOCKER_REPO := $(DOCKER_REPO)/
+endif
+
+ifneq "$(DOCKER_REGISTRY)" ""
+ override DOCKER_REGISTRY := $(DOCKER_REGISTRY)/
+endif
+
+
+.PHONY: docker-ubuntu
+docker-ubuntu:
+ @echo Building ubuntu $(DOCKER_COMMIT)
+ $(Q)docker build $(DOCKER_BUILD_ARGS) scripts/docker/ubuntu22 --build-arg=release=$(DOCKER_COMMIT) -t $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION)
+
+.PHONY: docker-alpine
+docker-alpine:
+ @echo Building alpine $(DOCKER_COMMIT)
+ $(Q)docker build $(DOCKER_BUILD_ARGS) scripts/docker/alpine --build-arg=release=$(DOCKER_COMMIT) -t $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION)-alpine
+
+.PHONY: docker
+docker: docker-ubuntu docker-alpine
+
+.PHONY: docker-push
+docker-push: docker
+ $(Q)docker push $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION)
+ $(Q)docker push $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION)-alpine
+
+.PHONY: docker-tag-latest
+docker-tag-latest: docker
+ $(Q)docker tag $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION) $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest
+ $(Q)docker tag $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION)-alpine $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest-alpine
+ $(Q)docker tag $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION) $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest-3.2
+ $(Q)docker tag $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):$(DOCKER_VERSION)-alpine $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest-3.2-alpine
+
+.PHONY: docker-push-latest
+docker-push-latest: docker-push docker-tag-latest
+ $(Q)docker push $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest
+ $(Q)docker push $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest-alpine
+ $(Q)docker push $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest-3.2
+ $(Q)docker push $(DOCKER_REGISTRY)$(DOCKER_REPO)$(DOCKER_TAG):latest-3.2-alpine
+
+.PHONY: docker-publish
+docker-publish: docker-push-latest
diff --git a/scripts/docker/rocky8/Dockerfile b/scripts/docker/rocky8/Dockerfile
new file mode 100644
index 0000000..ca821a3
--- /dev/null
+++ b/scripts/docker/rocky8/Dockerfile
@@ -0,0 +1,108 @@
+ARG from=rockylinux/rockylinux:8
+FROM ${from} as build
+
+RUN rpmkeys --import /etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
+
+#
+# Install build tools
+#
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y rpmdevtools openssl dnf-utils
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Other requirements
+#
+
+# Use LTB's openldap packages intead of the distribution version to avoid linking against NSS
+RUN echo $'[ltb-project]\n\
+name=LTB project packages\n\
+baseurl=https://ltb-project.org/rpm/$releasever/$basearch\n\
+enabled=1\n\
+gpgcheck=1\n\
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-LTB-project'\
+> /etc/yum.repos.d/ltb-project.repo
+RUN rpm --import https://ltb-project.org/lib/RPM-GPG-KEY-LTB-project
+
+# EPEL repository for freetds and hiredis
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
+
+RUN yum config-manager --enable powertools
+# Currently needed for hiredis-devel
+RUN yum config-manager --enable epel-testing
+
+#
+# Install build dependencies
+#
+RUN [ -e redhat/freeradius.spec ] && yum-builddep -y redhat/freeradius.spec
+
+#
+# Create RPM build environment
+#
+ENV BUILDDIR=/root/rpmbuild
+RUN rpmdev-setuptree
+
+RUN ./configure
+RUN make freeradius-server-$(cat VERSION).tar.bz2
+RUN cp freeradius-server-$(cat VERSION).tar.bz2 $BUILDDIR/SOURCES/
+RUN cp -r redhat/* $BUILDDIR/SOURCES/
+RUN cp -r redhat/freeradius.spec $BUILDDIR/SPECS/
+WORKDIR $BUILDDIR
+
+#
+# Build the server
+#
+ENV QA_RPATHS=0x0003
+RUN rpmbuild -bb --define '_release $release' "$BUILDDIR/SPECS/freeradius.spec"
+
+RUN mkdir /root/rpms
+RUN mv $BUILDDIR/RPMS/*/*.rpm /root/rpms/
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /root/rpms /tmp/
+
+# Use LTB's openldap packages intead of the distribution version to avoid linking against NSS
+RUN echo $'[ltb-project]\n\
+name=LTB project packages\n\
+baseurl=https://ltb-project.org/rpm/$releasever/$basearch\n\
+enabled=1\n\
+gpgcheck=1\n\
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-LTB-project'\
+> /etc/yum.repos.d/ltb-project.repo \
+ && rpm --import https://ltb-project.org/lib/RPM-GPG-KEY-LTB-project \
+ \
+# EPEL repository for freetds and hiredis
+ && yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \
+ && yum install -y dnf-utils \
+ && yum config-manager --enable epel-testing
+
+ARG radiusd_uid=95
+ARG radiusd_gid=95
+
+RUN groupadd -g ${radiusd_gid} -r radiusd \
+ && useradd -u ${radiusd_uid} -g radiusd -r -M -d /home/radiusd -s /sbin/nologin radiusd \
+ && yum install -y /tmp/*.rpm
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["radiusd"]
diff --git a/scripts/docker/rocky8/docker-entrypoint.sh b/scripts/docker/rocky8/docker-entrypoint.sh
new file mode 100755
index 0000000..900ad6b
--- /dev/null
+++ b/scripts/docker/rocky8/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- radiusd "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec radiusd -f "$@"
+fi
+
+# debian people are likely to call "freeradius" as well, so allow that
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec radiusd -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/ubuntu18/Dockerfile b/scripts/docker/ubuntu18/Dockerfile
new file mode 100644
index 0000000..7322026
--- /dev/null
+++ b/scripts/docker/ubuntu18/Dockerfile
@@ -0,0 +1,59 @@
+ARG from=ubuntu:18.04
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+RUN apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/ubuntu18/docker-entrypoint.sh b/scripts/docker/ubuntu18/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/ubuntu18/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/ubuntu20/Dockerfile b/scripts/docker/ubuntu20/Dockerfile
new file mode 100644
index 0000000..783ebc9
--- /dev/null
+++ b/scripts/docker/ubuntu20/Dockerfile
@@ -0,0 +1,61 @@
+ARG from=ubuntu:20.04
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+RUN apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/ubuntu20/docker-entrypoint.sh b/scripts/docker/ubuntu20/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/ubuntu20/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/docker/ubuntu22/Dockerfile b/scripts/docker/ubuntu22/Dockerfile
new file mode 100644
index 0000000..9e6aa57
--- /dev/null
+++ b/scripts/docker/ubuntu22/Dockerfile
@@ -0,0 +1,66 @@
+ARG from=ubuntu:22.04
+FROM ${from} as build
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+#
+# Install build tools
+#
+RUN apt-get update
+RUN apt-get install -y devscripts equivs git quilt gcc
+
+#
+# Create build directory
+#
+RUN mkdir -p /usr/local/src/repositories
+WORKDIR /usr/local/src/repositories
+
+#
+# Shallow clone the FreeRADIUS source
+#
+ARG source=https://github.com/FreeRADIUS/freeradius-server.git
+ARG release=v3.2.x
+
+RUN git clone --depth 1 --single-branch --branch ${release} ${source}
+WORKDIR freeradius-server
+
+#
+# Install build dependencies
+#
+RUN git checkout ${release}; \
+ if [ -e ./debian/control.in ]; then \
+ debian/rules debian/control; \
+ fi; \
+ echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control
+
+#
+# Build the server
+#
+RUN make -j2 deb
+
+#
+# Clean environment and run the server
+#
+FROM ${from}
+COPY --from=build /usr/local/src/repositories/*.deb /tmp/
+
+ARG freerad_uid=101
+ARG freerad_gid=101
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+RUN groupadd -g ${freerad_gid} -r freerad \
+ && useradd -u ${freerad_uid} -g freerad -r -M -d /etc/freeradius -s /usr/sbin/nologin freerad \
+ && apt-get update \
+ && apt-get install -y /tmp/*.deb \
+ && apt-get clean \
+ && rm -r /var/lib/apt/lists/* /tmp/*.deb \
+ \
+ && ln -s /etc/freeradius /etc/raddb
+
+COPY docker-entrypoint.sh /
+RUN chmod +x /docker-entrypoint.sh
+
+EXPOSE 1812/udp 1813/udp
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD ["freeradius"]
diff --git a/scripts/docker/ubuntu22/docker-entrypoint.sh b/scripts/docker/ubuntu22/docker-entrypoint.sh
new file mode 100755
index 0000000..93141b0
--- /dev/null
+++ b/scripts/docker/ubuntu22/docker-entrypoint.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e
+
+# this if will check if the first argument is a flag
+# but only works if all arguments require a hyphenated flag
+# -v; -SL; -f arg; etc will work, but not arg1 arg2
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+ set -- freeradius "$@"
+fi
+
+# check for the expected command
+if [ "$1" = 'freeradius' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# many people are likely to call "radiusd" as well, so allow that
+if [ "$1" = 'radiusd' ]; then
+ shift
+ exec freeradius -f "$@"
+fi
+
+# else default to run whatever the user wanted like "bash" or "sh"
+exec "$@"
diff --git a/scripts/exec-program-wait b/scripts/exec-program-wait
new file mode 100755
index 0000000..a851309
--- /dev/null
+++ b/scripts/exec-program-wait
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+# $Id$
+#
+# Sample script to add Attribute/Value pairs in the reply sent to
+# the NAS.
+#
+# Before version 2.0 of FreeRADIUS, the script could be run from the
+# deprecated attributes 'Exec-Program' and 'Exec-Program-Wait'.
+# However, these attributes are no longer supported and you have to
+# use the module 'rlm_exec' instead.
+#
+# An entry for the module 'rlm_exec' must be added to the file
+# 'radiusd.conf' with the path of the script.
+#
+#modules {
+# exec {
+# program = "/path/to/program/exec-program-wait"
+# wait = yes
+# input_pairs = request
+# output_pairs = reply
+# }
+# ...
+#}
+#
+#authorize {
+# ...
+# exec
+# ...
+#}
+#
+# Each of the attributes in the request will be available in an
+# environment variable. The name of the variable depends on the
+# name of the attribute. All letters are converted to upper case,
+# and all hyphens '-' to underlines.
+#
+# For example, the User-Name attribute will be in the $USER_NAME
+# environment variable. If you want to see the list of all of
+# the variables, try adding a line 'printenv > /tmp/exec-program-wait'
+# to the script. Then look in the file for a complete list of
+# variables.
+#
+# The return value of the program run determines the result
+# of the exec instance call as follows:
+# (See doc/configurable_failover for details)
+# < 0 : fail the module failed
+# = 0 : ok the module succeeded
+# = 1 : reject the module rejected the user
+# = 2 : fail the module failed
+# = 3 : ok the module succeeded
+# = 4 : handled the module has done everything to handle the request
+# = 5 : invalid the user's configuration entry was invalid
+# = 6 : userlock the user was locked out
+# = 7 : notfound the user was not found
+# = 8 : noop the module did nothing
+# = 9 : updated the module updated information in the request
+# > 9 : fail the module failed
+#
+echo "Reply-Message += \"Hello, %u\","
+echo "Reply-Message += \"PATH=$PATH\","
+echo Framed-IP-Address = 255.255.255.255
+exit 0
diff --git a/scripts/git/post-receive b/scripts/git/post-receive
new file mode 100755
index 0000000..3f44994
--- /dev/null
+++ b/scripts/git/post-receive
@@ -0,0 +1,151 @@
+#!/bin/sh
+# git Post-receive configuration update script
+#
+# To install:
+# * Enable r/w control socket for the user you're pushing with
+# * cd <config-dir>/raddb git config receive.denyCurrentBranch ignore
+# * cp ./post-receive <config-dir>/raddb/.git/hooks/
+# * # Edit the capitalized variables below to match your environment.
+#
+# Copyright 2012 Arran Cudbard-Bell <a.cudbard-bell@freeradius.org>
+
+PATH=/bin:/usr/bin:/usr/sbin:/sbin
+# Tag to update when we successfully manage to start the server with a new configuration
+: ${STABLE_TAG='stable'}
+
+# Descriptive name of daemon
+: ${DAEMON_DESC='FreeRADIUS'}
+
+# Init script for radiusd
+: ${DAEMON_STATUS='/etc/init.d/radiusd status'}
+
+# Command used to restart the RADIUS daemon
+: ${DAEMON_REST='radmin -e hup'}
+
+# Command used to verify the new configuration
+: ${DAEMON_CONF='radiusd -Cxl stdout'}
+
+# Command used to execute git functions
+: ${GIT_EXEC='env -i git'}
+
+# Abort if there are local untracked files
+: ${ABORT_UNTRACKED=true}
+
+# Push changes to any remotes we have configured
+: ${PUSH_TO_REMOTES=false}
+
+while read oldrev newrev refname
+do
+:
+done
+
+status () {
+ if [ $1 -ne 0 ]; then
+ echo "failed"
+ else
+ echo "ok"
+ fi
+}
+
+conf_rollback () {
+ # Use stable tag if it exists...
+ if $GIT_EXEC show-ref $STABLE_TAG > /dev/null 2>&1; then
+ echo -n "Attempting to roll config back to tag: \"$STABLE_TAG\"... "
+ $GIT_EXEC reset --hard $STABLE_TAG; ret=$?
+ else
+ echo -n "Attempting to roll config back to commit: \"$oldrev\"... "
+ $GIT_EXEC reset --hard $oldrev; ret=$?
+ fi
+
+ status $ret
+ return $ret
+}
+
+conf_check () {
+ echo -n "Checking new configuration... "
+ $DAEMON_CONF; ret=$?
+
+ status $ret
+ return $ret
+}
+
+daemon_status () {
+ echo -n "Checking if radiusd is running ... "
+ $DAEMON_STATUS; ret=$?
+
+ return $ret
+}
+
+daemon_restart () {
+ echo -n "Restarting server... "
+
+ $DAEMON_REST > /dev/null 2>&1; ret=$?
+
+ status $ret
+ return $ret
+}
+
+# Reset the current working directory state
+cd ..
+
+# Friendly update of working copy to head state
+$GIT_EXEC checkout -f
+if $ABORT_UNTRACKED && [ `$GIT_EXEC status --porcelain | wc -l` -gt 0 ]; then
+ echo "WARNING: Untracked changes have been made to this git repository,"
+ echo "changes have been committed but untracked files should be removed,"
+ echo "committed or added to .gitignore and $DAEMON_DESC restarted manually."
+ $GIT_EXEC status --short
+
+ if ! conf_check; then
+ exit 64
+ fi
+
+ echo "WARNING: $DAEMON_DESC found errors in the configuration,"
+ echo "these errors should be corrected before updating working copy."
+ exit 65
+fi
+
+# Clean out all untracked files and directories (if there are local files you
+# wish to keep, they should be add to .gitignore)
+if ! $GIT_EXEC clean -d -f
+ then exit $?
+fi
+
+# Reset all tracked files to the HEAD state
+if ! $GIT_EXEC reset --hard
+ then exit $?
+fi
+
+# Check if the server finds any errors in the new config
+if ! conf_check; then
+ echo "WARNING: $DAEMON_DESC found errors in the configuration,"
+ echo "please fix the errors and push the corrected configuration."
+
+ conf_rollback
+ exit 64
+else
+ if daemon_status && ! daemon_restart ; then
+ if ! conf_rollback; then
+ echo "WARNING: Manually verify $DAEMON_DESC status immediately!"
+ exit 64
+ fi
+
+ if ! daemon_restart; then
+ echo "WARNING: Manually verify $DAEMON_DESC status immediately!"
+ exit 64
+ fi
+
+ exit 64
+ fi
+
+ $GIT_EXEC tag -f $STABLE_TAG $newrev
+fi
+
+if $PUSH_TO_REMOTES; then
+ echo "Pushing to remote repositories"
+ for remote in `$GIT_EXEC remote`; do
+ $GIT_EXEC push "$remote"
+ done
+fi
+
+exit 0
diff --git a/scripts/install.mk b/scripts/install.mk
new file mode 100644
index 0000000..9164115
--- /dev/null
+++ b/scripts/install.mk
@@ -0,0 +1,250 @@
+# boilermake: A reusable, but flexible, boilerplate Makefile.
+#
+# Copyright 2008, 2009, 2010 Dan Moulding, Alan T. DeKok
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# ADD_INSTALL_RULE.* - Parameterized "functions" that adds a new
+# installation to the Makefile. There should be one ADD_INSTALL_RULE
+# definition for each type of target that is used in the build.
+#
+# New rules can be added by copying one of the existing ones, and
+# replacing the line after the "mkdir"
+#
+
+#
+# You can watch what it's doing by:
+#
+# $ VERBOSE=1 make ... args ...
+#
+ifeq "${VERBOSE}" ""
+ Q=@
+else
+ Q=
+endif
+
+# ADD_INSTALL_RULE.exe - Parameterized "function" that adds a new rule
+# and phony target for installing an executable.
+#
+# USE WITH EVAL
+#
+define ADD_INSTALL_RULE.exe
+ ALL_INSTALL += $${${1}_INSTALLDIR}/$(notdir ${1})
+
+ # Global install depends on ${1}
+ install: $${${1}_INSTALLDIR}/$(notdir ${1})
+
+ # Install executable ${1}
+ $${${1}_INSTALLDIR}/$(notdir ${1}): ${JLIBTOOL} $${${1}_BUILD}/${1} | $${${1}_INSTALLDIR}
+ @$(ECHO) INSTALL ${1}
+ $(Q)$${PROGRAM_INSTALL} -c -m 755 $${BUILD_DIR}/bin/${1} $${${1}_INSTALLDIR}/
+ $(Q)$${${1}_POSTINSTALL}
+
+endef
+
+# ADD_INSTALL_RULE.a - Parameterized "function" that adds a new rule
+# and phony target for installing a static library
+#
+# USE WITH EVAL
+#
+define ADD_INSTALL_RULE.a
+ ALL_INSTALL += $${${1}_INSTALLDIR}/$(notdir ${1})
+
+ # Global install depends on ${1}
+ install: $${${1}_INSTALLDIR}/$(notdir ${1})
+
+ # Install static library ${1}
+ $${${1}_INSTALLDIR}/$(notdir ${1}): ${JLIBTOOL} ${1} | $${${1}_INSTALLDIR}
+ @$(ECHO) INSTALL ${1}
+ $(Q)$${PROGRAM_INSTALL} -c -m 755 $${BUILD_DIR}/lib/${1} $${${1}_INSTALLDIR}/
+ $(Q)$${${1}_POSTINSTALL}
+
+endef
+
+# ADD_INSTALL_RULE.la - Parameterized "function" that adds a new rule
+# and phony target for installing a libtool library
+#
+# FIXME: The libtool install *also* installs a bunch of other files.
+# ensure that those are removed, too.
+#
+# USE WITH EVAL
+#
+define ADD_INSTALL_RULE.la
+ ALL_INSTALL += $${${1}_INSTALLDIR}/$(notdir ${1})
+
+ # Global install depends on ${1}
+ install: $${${1}_INSTALLDIR}/$(notdir ${1})
+
+ # Install libtool library ${1}
+ $${${1}_INSTALLDIR}/$(notdir ${1}): ${JLIBTOOL} $${${1}_BUILD}/${1} | $${${1}_INSTALLDIR}
+ @$(ECHO) INSTALL ${1}
+ $(Q)$${PROGRAM_INSTALL} -c -m 755 $${LOCAL_FLAGS_MIN} $${BUILD_DIR}/lib/${1} $${${1}_INSTALLDIR}/
+ $(Q)$${${1}_POSTINSTALL}
+
+endef
+
+# ADD_INSTALL_RULE.man - Parameterized "function" that adds a new rule
+# and phony target for installing a "man" page. It will take care of
+# installing it into the correct subdirectory of "man".
+#
+# USE WITH EVAL
+#
+define ADD_INSTALL_RULE.man
+ ALL_INSTALL += ${2}/$(notdir ${1})
+
+ # Global install depends on ${1}
+ install: ${2}/$(notdir ${1})
+
+ # Install manual page ${1}
+ ${2}/$(notdir ${1}): ${JLIBTOOL} ${1} | ${2}
+ @$(ECHO) INSTALL $(notdir ${1})
+ $(Q)$${PROGRAM_INSTALL} -c -m 644 ${1} ${2}/
+
+endef
+
+
+# ADD_INSTALL_RULE.dir - Parameterized "function" that adds a new rule
+# and phony target for installing a directory
+#
+# USE WITH EVAL
+#
+define ADD_INSTALL_RULE.dir
+ # Install directory
+ .PHONY: ${1}
+ ${1}: ${JLIBTOOL}
+ @$(ECHO) INSTALL -d -m 755 ${1}
+ $(Q)$${PROGRAM_INSTALL} -d -m 755 ${1}
+endef
+
+
+# ADD_INSTALL_TARGET - Parameterized "function" that adds a new rule
+# which installs everything for the target.
+#
+# USE WITH EVAL
+#
+define ADD_INSTALL_TARGET
+ # Figure out which target rule to use for installation.
+ ifeq "$${${1}_SUFFIX}" ".exe"
+ ifeq "$${TGT_INSTALLDIR}" ".."
+ TGT_INSTALLDIR := $${bindir}
+ endif
+ else
+ ifeq "$${TGT_INSTALLDIR}" ".."
+ TGT_INSTALLDIR := $${libdir}
+ endif
+ endif
+
+ # add rules to install the target
+ ifneq "$${TGT_INSTALLDIR}" ""
+ ${1}_INSTALLDIR := ${LL}$${DESTDIR}$${TGT_INSTALLDIR}
+
+ $$(eval $$(call ADD_INSTALL_RULE$${${1}_SUFFIX},${1}))
+ endif
+
+ # add rules to install the MAN pages.
+ ifneq "$$(strip $${${1}_MAN})" ""
+ ifeq "$${mandir}" ""
+ $$(error You must define 'mandir' in order to be able to install MAN pages.)
+ endif
+
+ MAN := $$(call QUALIFY_PATH,$${DIR},$${MAN})
+ MAN := $$(call CANONICAL_PATH,$${MAN})
+
+ $$(foreach PAGE,$${MAN},\
+ $$(eval $$(call ADD_INSTALL_RULE.man,$${PAGE},\
+ $${DESTDIR}$${mandir}/man$$(subst .,,$$(suffix $${PAGE})))))
+ endif
+endef
+
+.PHONY: install
+install:
+
+ALL_INSTALL :=
+
+# Define reasonable defaults for all of the installation directories.
+# The user can over-ride these, but these are the defaults.
+ifeq "${prefix}" ""
+ prefix = /usr/local
+endif
+ifeq "${exec_prefix}" ""
+ exec_prefix = ${prefix}
+endif
+ifeq "${bindir}" ""
+ bindir = ${exec_prefix}/bin
+endif
+ifeq "${sbindir}" ""
+ sbindir = ${exec_prefix}/sbin
+endif
+ifeq "${libdir}" ""
+ libdir = ${exec_prefix}/lib
+endif
+ifeq "${sysconfdir}" ""
+ sysconfdir = ${prefix}/etc
+endif
+ifeq "${localstatedir}" ""
+ localstatedir = ${prefix}/var
+endif
+ifeq "${datarootdir}" ""
+ datarootdir = ${prefix}/share
+endif
+ifeq "${datadir}" ""
+ datadir = ${prefix}/share
+endif
+ifeq "${mandir}" ""
+ mandir = ${datadir}/man
+endif
+ifeq "${docdir}" ""
+ ifneq "${PROJECT_NAME}" ""
+ docdir = ${datadir}/doc/${PROJECT_NAME}
+ endif
+endif
+ifeq "${logdir}" ""
+ logdir = ${localstatedir}/log/
+endif
+ifeq "${includedir}" ""
+ includedir = ${prefix}/include
+endif
+
+
+# Un-install any installed programs. We DON'T want to depend on the
+# install target. Doing so would cause "make uninstall" to build it,
+# install it, and then remove it.
+#
+# We also want to uninstall only when there are "install_foo" targets.
+.PHONY: uninstall
+uninstall:
+ $(Q)rm -f ${ALL_INSTALL} ./.no_such_file
+
+# Wrapper around INSTALL
+ifeq "${PROGRAM_INSTALL}" ""
+ PROGRAM_INSTALL := ${INSTALL}
+
+endif
+
+# Make just the installation directories
+.PHONY: installdirs
+installdirs:
+
+# Be nice to the user. If there is no INSTALL program, then print out
+# a helpful message. Without this check, the "install" rules defined
+# above would try to run a command-line with a blank INSTALL, and give
+# some inscrutable error.
+ifeq "${INSTALL}" ""
+install: install_ERROR
+
+.PHONY: install_ERROR
+install_ERROR:
+ @$(ECHO) Please define INSTALL in order to enable the installation rules.
+ $(Q)exit 1
+endif
diff --git a/scripts/jlibtool.c b/scripts/jlibtool.c
new file mode 100644
index 0000000..77b7db2
--- /dev/null
+++ b/scripts/jlibtool.c
@@ -0,0 +1,2608 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#if !defined(__MINGW32__)
+# include <sys/wait.h>
+#endif
+
+#include <unistd.h>
+#include <dirent.h>
+#include <errno.h>
+#include <assert.h>
+
+#ifdef __EMX__
+# define SHELL_CMD "sh"
+# define GEN_EXPORTS "emxexp"
+# define DEF2IMPLIB_CMD "emximp"
+# define SHARE_SW "-Zdll -Zmtd"
+# define USE_OMF 1
+# define TRUNCATE_DLL_NAME
+# define DYNAMIC_LIB_EXT "dll"
+# define EXE_EX ".exe"
+/* OMF is the native format under OS/2 */
+# if USE_OMF
+
+# define STATIC_LIB_EXT "lib"
+# define OBJECT_EXT "obj"
+# define LIBRARIAN "emxomfar"
+# define LIBRARIAN_OPTS "cr"
+# else
+/* but the alternative, a.out, can fork() which is sometimes necessary */
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+# endif
+#endif
+
+#if defined(__APPLE__)
+# define SHELL_CMD "/bin/sh"
+# define DYNAMIC_LIB_EXT "dylib"
+# define MODULE_LIB_EXT "bundle"
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+/* man libtool(1) documents ranlib option of -c. */
+# define RANLIB "ranlib"
+# define PIC_FLAG "-fPIC -fno-common"
+# define SHARED_OPTS "-dynamiclib"
+# define MODULE_OPTS "-bundle -dynamic"
+# define DYNAMIC_LINK_OPTS "-flat_namespace"
+# define DYNAMIC_LINK_UNDEFINED "-undefined suppress"
+# define dynamic_link_version_func darwin_dynamic_link_function
+# define DYNAMIC_INSTALL_NAME "-install_name"
+# define DYNAMIC_LINK_NO_INSTALL "-dylib_file"
+# define HAS_REALPATH
+/*-install_name /Users/jerenk/apache-2.0-cvs/lib/libapr.0.dylib -compatibility_version 1 -current_version 1.0 */
+# define LD_LIBRARY_PATH "DYLD_LIBRARY_PATH"
+# define LD_LIBRARY_PATH_LOCAL "DYLD_FALLBACK_LIBRARY_PATH"
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || (defined(__sun) && defined(__GNUC__))
+# define SHELL_CMD "/bin/sh"
+# define DYNAMIC_LIB_EXT "so"
+# define MODULE_LIB_EXT "so"
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+# define RANLIB "ranlib"
+# define PIC_FLAG "-fPIC"
+# define RPATH "-rpath"
+# define SHARED_OPTS "-shared"
+# define MODULE_OPTS "-shared"
+# define LINKER_FLAG_PREFIX "-Wl,"
+#if !defined(__sun)
+# define DYNAMIC_LINK_OPTS LINKER_FLAG_PREFIX "-export-dynamic"
+#else
+# define DYNAMIC_LINK_OPTS ""
+#endif
+# define ADD_MINUS_L
+# define LD_RUN_PATH "LD_RUN_PATH"
+# define LD_LIBRARY_PATH "LD_LIBRARY_PATH"
+# define LD_LIBRARY_PATH_LOCAL LD_LIBRARY_PATH
+#endif
+
+#if defined(__sun) && !defined(__GNUC__)
+# define SHELL_CMD "/bin/sh"
+# define DYNAMIC_LIB_EXT "so"
+# define MODULE_LIB_EXT "so"
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+# define RANLIB "ranlib"
+# define PIC_FLAG "-fPIC"
+# define RPATH "-R"
+# define SHARED_OPTS "-G"
+# define MODULE_OPTS "-G"
+# define DYNAMIC_LINK_OPTS ""
+# define LINKER_FLAG_NO_EQUALS
+# define ADD_MINUS_L
+# define HAS_REALPATH
+# define LD_RUN_PATH "LD_RUN_PATH"
+# define LD_LIBRARY_PATH "LD_LIBRARY_PATH"
+# define LD_LIBRARY_PATH_LOCAL LD_LIBRARY_PATH
+#endif
+
+#if defined(_OSD_POSIX)
+# define SHELL_CMD "/usr/bin/sh"
+# define DYNAMIC_LIB_EXT "so"
+# define MODULE_LIB_EXT "so"
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+# define SHARED_OPTS "-G"
+# define MODULE_OPTS "-G"
+# define LINKER_FLAG_PREFIX "-Wl,"
+# define NEED_SNPRINTF
+#endif
+
+#if defined(sinix) && defined(mips) && defined(__SNI_TARG_UNIX)
+# define SHELL_CMD "/usr/bin/sh"
+# define DYNAMIC_LIB_EXT "so"
+# define MODULE_LIB_EXT "so"
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+# define RPATH "-Brpath"
+# define SHARED_OPTS "-G"
+# define MODULE_OPTS "-G"
+# define LINKER_FLAG_PREFIX "-Wl,"
+# define DYNAMIC_LINK_OPTS LINKER_FLAG_PREFIX "-Blargedynsym"
+
+# define NEED_SNPRINTF
+# define LD_RUN_PATH "LD_RUN_PATH"
+# define LD_LIBRARY_PATH "LD_LIBRARY_PATH"
+# define LD_LIBRARY_PATH_LOCAL LD_LIBRARY_PATH
+#endif
+
+#if defined(__MINGW32__)
+# define SHELL_CMD "sh"
+# define DYNAMIC_LIB_EXT "dll"
+# define MODULE_LIB_EXT "dll"
+# define STATIC_LIB_EXT "a"
+# define OBJECT_EXT "o"
+# define LIBRARIAN "ar"
+# define LIBRARIAN_OPTS "cr"
+# define RANLIB "ranlib"
+# define LINKER_FLAG_PREFIX "-Wl,"
+# define SHARED_OPTS "-shared"
+# define MODULE_OPTS "-shared"
+# define MKDIR_NO_UMASK
+# define EXE_EXT ".exe"
+#endif
+
+#ifndef CC
+#define CC "clang"
+#endif
+
+#ifndef CXX
+#define CXX "g++"
+#endif
+
+#ifndef LINK_C
+#define LINK_C "clang"
+#endif
+
+#ifndef LINK_CXX
+#define LINK_CXX "g++"
+#endif
+
+#ifndef LIBDIR
+#define LIBDIR "/usr/local/lib"
+#endif
+
+#define OBJDIR ".libs"
+
+#ifndef SHELL_CMD
+#error Unsupported platform: Please add defines for SHELL_CMD etc. for your platform.
+#endif
+
+#ifdef NEED_SNPRINTF
+#include <stdarg.h>
+#endif
+
+#ifdef __EMX__
+#include <process.h>
+#endif
+
+#ifndef PATH_MAX
+#define PATH_MAX 1024
+#endif
+
+
+/* We want to say we are libtool 1.4 for shlibtool compatibility. */
+#define VERSION "1.4"
+
+#define DEBUG(fmt, ...) if(cmd->options.debug) printf(fmt, ## __VA_ARGS__)
+#define NOTICE(fmt, ...) if(!cmd->options.silent) printf(fmt, ## __VA_ARGS__)
+#define ERROR(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__)
+
+enum tool_mode {
+ MODE_UNKNOWN,
+ MODE_COMPILE,
+ MODE_LINK,
+ MODE_EXECUTE,
+ MODE_INSTALL,
+};
+
+enum output_type {
+ OUT_GENERAL,
+ OUT_OBJECT,
+ OUT_PROGRAM,
+ OUT_LIB,
+ OUT_STATIC_LIB_ONLY,
+ OUT_DYNAMIC_LIB_ONLY,
+ OUT_MODULE,
+};
+
+enum pic_mode {
+ PIC_UNKNOWN,
+ PIC_PREFER,
+ PIC_AVOID,
+};
+
+enum shared_mode {
+ SHARE_UNSET,
+ SHARE_STATIC,
+ SHARE_SHARED,
+};
+
+enum lib_type {
+ TYPE_UKNOWN,
+ TYPE_STATIC_LIB,
+ TYPE_DYNAMIC_LIB,
+ TYPE_MODULE_LIB,
+ TYPE_OBJECT,
+};
+
+typedef struct {
+ char const **vals;
+ int num;
+} count_chars;
+
+typedef struct {
+ char const *normal;
+ char const *install;
+} library_name;
+
+typedef struct {
+ count_chars *normal;
+ count_chars *install;
+ count_chars *dependencies;
+} library_opts;
+
+typedef struct {
+ int silent;
+ int debug;
+ enum shared_mode shared;
+ int export_all;
+ int dry_run;
+ enum pic_mode pic_mode;
+ int export_dynamic;
+ int no_install;
+} options_t;
+
+typedef struct {
+ enum tool_mode mode;
+ enum output_type output;
+ options_t options;
+
+ char const *output_name;
+ char const *fake_output_name;
+ char const *basename;
+
+ char const *install_path;
+ char const *compiler;
+ char const *program;
+ count_chars *program_opts;
+
+ count_chars *arglist;
+ count_chars *tmp_dirs;
+ count_chars *obj_files;
+ count_chars *dep_rpaths;
+ count_chars *rpaths;
+
+ library_name static_name;
+ library_name shared_name;
+ library_name module_name;
+
+ library_opts static_opts;
+ library_opts shared_opts;
+
+ char const *version_info;
+ char const *undefined_flag;
+} command_t;
+
+#ifdef RPATH
+static void add_rpath(count_chars *cc, char const *path);
+#endif
+
+static void usage(int code)
+{
+ printf("Usage: jlibtool [OPTIONS...] COMMANDS...\n");
+ printf("jlibtool is a replacement for GNU libtool with similar functionality.\n\n");
+
+ printf(" --config show all configuration variables\n");
+ printf(" --debug enable verbose shell tracing\n");
+ printf(" --dry-run display commands without modifying any files\n");
+ printf(" --help display this help message and exit\n");
+ printf(" --mode=MODE use operational mode MODE (you *must* set mode)\n");
+
+ printf(" --silent don't print informational messages\n");
+ printf(" --tag=TAG Ignored for libtool compatibility\n");
+ printf(" --version print version information\n");
+
+
+ printf(" --shared Build shared libraries when using --mode=link\n");
+ printf(" --export-all Try to export 'def' file on some platforms\n");
+
+ printf("\nMODE must be one of the following:\n\n");
+ printf(" compile compile a source file into a jlibtool object\n");
+ printf(" execute automatically set library path, then run a program\n");
+ printf(" install install libraries or executables\n");
+ printf(" link create a library or an executable\n");
+
+ printf("\nMODE-ARGS can be the following:\n\n");
+ printf(" -export-dynamic accepted and ignored\n");
+ printf(" -module create a module when linking\n");
+ printf(" -shared create a shared library when linking\n");
+ printf(" -prefer-pic prefer position-independent-code when compiling\n");
+ printf(" -prefer-non-pic prefer non position-independent-code when compiling\n");
+ printf(" -static create a static library when linking\n");
+ printf(" -no-install link libraries locally\n");
+ printf(" -rpath arg Set install path for shared libraries\n");
+ printf(" -l arg pass '-l arg' to the link stage\n");
+ printf(" -L arg pass '-L arg' to the link stage\n");
+ printf(" -R dir add 'dir' to runtime library search path.\n");
+ printf(" -Zexe accepted and ignored\n");
+ printf(" -avoid-version accepted and ignored\n");
+
+ exit(code);
+}
+
+#if defined(NEED_SNPRINTF)
+/* Write at most n characters to the buffer in str, return the
+ * number of chars written or -1 if the buffer would have been
+ * overflowed.
+ *
+ * This is portable to any POSIX-compliant system has /dev/null
+ */
+static FILE *f = NULL;
+static int vsnprintf(char *str, size_t n, char const *fmt, va_list ap)
+{
+ int res;
+
+ if (!f) {
+ f = fopen("/dev/null","w");
+ }
+
+ if (!f) {
+ return -1;
+ }
+
+ setvbuf(f, str, _IOFBF, n);
+
+ res = vfprintf(f, fmt, ap);
+
+ if ((res > 0) && (res < n)) {
+ res = vsprintf( str, fmt, ap );
+ }
+ return res;
+}
+
+static int snprintf(char *str, size_t n, char const *fmt, ...)
+{
+ va_list ap;
+ int res;
+
+ va_start(ap, fmt);
+ res = vsnprintf(str, n, fmt, ap);
+ va_end(ap);
+ return res;
+}
+#endif
+
+static void *lt_malloc(size_t size)
+{
+ void *out;
+
+ out = malloc(size);
+ if (!out) {
+ ERROR("Failed allocating %zu bytes, OOM\n", size);
+ exit(1);
+ }
+
+ return out;
+}
+
+static void lt_const_free(const void *ptr)
+{
+ void *tmp;
+
+ memcpy(&tmp, &ptr, sizeof(tmp));
+ free(tmp);
+}
+
+static void init_count_chars(count_chars *cc)
+{
+ cc->vals = (char const**) lt_malloc(PATH_MAX*sizeof(char*));
+ cc->num = 0;
+}
+
+static count_chars *alloc_countchars(void)
+{
+ count_chars *out;
+ out = lt_malloc(sizeof(count_chars));
+ if (!out) {
+ exit(1);
+ }
+ init_count_chars(out);
+
+ return out;
+}
+
+static void clear_count_chars(count_chars *cc)
+{
+ int i;
+ for (i = 0; i < cc->num; i++) {
+ cc->vals[i] = NULL;
+ }
+
+ cc->num = 0;
+}
+
+static void push_count_chars(count_chars *cc, char const *newval)
+{
+ cc->vals[cc->num++] = newval;
+}
+
+static char const *pop_count_chars(count_chars *cc)
+{
+ if (!cc->num) {
+ return NULL;
+ }
+ return cc->vals[--cc->num];
+}
+
+static void insert_count_chars(count_chars *cc, char const *newval, int position)
+{
+ int i;
+
+ for (i = cc->num; i > position; i--) {
+ cc->vals[i] = cc->vals[i-1];
+ }
+
+ cc->vals[position] = newval;
+ cc->num++;
+}
+
+static void append_count_chars(count_chars *cc, count_chars *cctoadd)
+{
+ int i;
+ for (i = 0; i < cctoadd->num; i++) {
+ if (cctoadd->vals[i]) {
+ push_count_chars(cc, cctoadd->vals[i]);
+ }
+ }
+}
+
+static char const *flatten_count_chars(count_chars *cc, char delim)
+{
+ int i, size;
+ char *newval;
+
+ size = 0;
+ for (i = 0; i < cc->num; i++) {
+ if (cc->vals[i]) {
+ size += strlen(cc->vals[i]) + 1;
+ if (delim) {
+ size++;
+ }
+ }
+ }
+
+ newval = (char*)lt_malloc(size + 1);
+ newval[0] = '\0';
+
+ for (i = 0; i < cc->num; i++) {
+ if (cc->vals[i]) {
+ strcat(newval, cc->vals[i]);
+ if (delim) {
+ size_t len = strlen(newval);
+ newval[len] = delim;
+ newval[len + 1] = '\0';
+ }
+ }
+ }
+
+ return newval;
+}
+
+static char *shell_esc(char const *str)
+{
+ int in_quote = 0;
+ char *cmd;
+ uint8_t *d;
+ uint8_t const *s;
+
+ cmd = (char *)lt_malloc(2 * strlen(str) + 3);
+ d = (unsigned char *)cmd;
+ s = (const unsigned char *)str;
+
+#ifdef __MINGW32__
+ *d++ = '\"';
+#endif
+
+ for (; *s; ++s) {
+ if (*s == '"') {
+ *d++ = '\\';
+ in_quote++;
+ }
+ else if (*s == '\\' || (*s == ' ' && (in_quote % 2))) {
+ *d++ = '\\';
+ }
+ *d++ = *s;
+ }
+
+#ifdef __MINGW32__
+ *d++ = '\"';
+#endif
+
+ *d = '\0';
+ return cmd;
+}
+
+static int external_spawn(command_t *cmd, char const *file, char const **argv)
+{
+ file = file; /* -Wunused */
+
+ if (!cmd->options.silent) {
+ char const **argument = argv;
+ NOTICE("Executing: ");
+ while (*argument) {
+ NOTICE("%s ", *argument);
+ argument++;
+ }
+ puts("");
+ }
+
+ if (cmd->options.dry_run) {
+ return 0;
+ }
+#if defined(__EMX__) || defined(__MINGW32__)
+ return spawnvp(P_WAIT, argv[0], argv);
+#else
+ {
+ pid_t pid;
+ pid = fork();
+ if (pid == 0) {
+ return execvp(argv[0], (char**)argv);
+ }
+ else {
+ int status;
+ waitpid(pid, &status, 0);
+
+ /*
+ * Exited via exit(status)
+ */
+ if (WIFEXITED(status)) {
+ return WEXITSTATUS(status);
+ }
+
+#ifdef WTERMSIG
+ if (WIFSIGNALED(status)) {
+ return WTERMSIG(status);
+ }
+#endif
+
+ /*
+ * Some other failure.
+ */
+ return 1;
+ }
+ }
+#endif
+}
+
+static int run_command(command_t *cmd, count_chars *cc)
+{
+ int ret;
+ char *command;
+ char *tmp;
+ char const *raw;
+ char const *spawn_args[4];
+ count_chars tmpcc;
+
+ init_count_chars(&tmpcc);
+
+ if (cmd->program) {
+ push_count_chars(&tmpcc, cmd->program);
+ }
+
+ append_count_chars(&tmpcc, cmd->program_opts);
+
+ append_count_chars(&tmpcc, cc);
+
+ raw = flatten_count_chars(&tmpcc, ' ');
+ command = shell_esc(raw);
+
+ memcpy(&tmp, &raw, sizeof(tmp));
+ free(tmp);
+
+ spawn_args[0] = SHELL_CMD;
+ spawn_args[1] = "-c";
+ spawn_args[2] = command;
+ spawn_args[3] = NULL;
+ ret = external_spawn(cmd, spawn_args[0], spawn_args);
+
+ free(command);
+
+ return ret;
+}
+
+/*
+ * print configuration
+ * shlibpath_var is used in configure.
+ */
+#define printc(_x,_y) if (!value || !strcmp(value, _x)) printf(_x "=\"%s\"\n", _y)
+
+static void print_config(char const *value)
+{
+#ifdef LD_RUN_PATH
+ printc("runpath_var", LD_RUN_PATH);
+#endif
+#ifdef LD_LIBRARY_PATH
+ printc("shlibpath_var", LD_LIBRARY_PATH);
+#endif
+#ifdef LD_LIBRARY_PATH_LOCAL
+ printc("shlocallibpath_var", LD_LIBRARY_PATH_LOCAL);
+#endif
+#ifdef SHELL_CMD
+ printc("SHELL", SHELL_CMD);
+#endif
+#ifdef OBJECT_EXT
+ printc("objext", OBJECT_EXT);
+#endif
+#ifdef OBJDIR
+ printc("objdir", OBJDIR);
+#endif
+#ifdef DYNAMIC_LIB_EXT
+ /* add a '.' prefix because libtool does that. */
+ printc("shrext_cmds", "echo ." DYNAMIC_LIB_EXT);
+ /* add a '.' prefix because libtool does that. */
+ printc("shrext", "." DYNAMIC_LIB_EXT);
+#endif
+#ifdef EXE_EXT
+ printc("exeext", EXE_EXT);
+#endif
+#ifdef STATIC_LIB_EXT
+ printc("libext", STATIC_LIB_EXT);
+#endif
+#ifdef LIBRARIAN
+ printc("AR", LIBRARIAN);
+#endif
+#ifdef LIBRARIAN_OPTS
+ printc("AR_FLAGS", LIBRARIAN_OPTS);
+#endif
+#ifdef LINKER_FLAG_PREFIX
+ printc("wl", LINKER_FLAG_PREFIX);
+#endif
+#ifdef RANLIB
+ printc("ranlib", RANLIB);
+#endif
+
+}
+/*
+ * Add a directory to the runtime library search path.
+ */
+static void add_runtime_dir_lib(char const *arg, command_t *cmd)
+{
+#ifdef RPATH
+ add_rpath(cmd->shared_opts.dependencies, arg);
+#else
+ (void) arg; /* -Wunused */
+ (void) cmd;
+#endif
+}
+
+static int parse_long_opt(char const *arg, command_t *cmd)
+{
+ char *equal_pos = strchr(arg, '=');
+ char var[50];
+ char value[500];
+
+ if (equal_pos) {
+ strncpy(var, arg, equal_pos - arg);
+ var[equal_pos - arg] = 0;
+ if (strlen(equal_pos + 1) >= sizeof(var)) {
+ return 0;
+ }
+ strcpy(value, equal_pos + 1);
+ } else {
+ strncpy(var, arg, sizeof(var) - 1);
+ var[sizeof(var) - 1] = '\0';
+
+ value[0] = '\0';
+ }
+
+ if (strcmp(var, "silent") == 0) {
+ cmd->options.silent = 1;
+ } else if (strcmp(var, "quiet") == 0) {
+ cmd->options.silent = 1;
+ } else if (strcmp(var, "debug") == 0) {
+ cmd->options.debug = 1;
+ } else if (strcmp(var, "mode") == 0) {
+ if (cmd->mode != MODE_UNKNOWN) {
+ ERROR("Cannot set --mode twice\n");
+ exit(1);
+ }
+
+ if (strcmp(value, "compile") == 0) {
+ cmd->mode = MODE_COMPILE;
+ cmd->output = OUT_OBJECT;
+
+ } else if (strcmp(value, "link") == 0) {
+ cmd->mode = MODE_LINK;
+ cmd->output = OUT_LIB;
+
+ } else if (strcmp(value, "install") == 0) {
+ cmd->mode = MODE_INSTALL;
+
+ } else if (strcmp(value, "execute") == 0) {
+ cmd->mode = MODE_EXECUTE;
+
+ } else {
+ ERROR("Unknown mode \"%s\"\n", value);
+ exit(1);
+ }
+
+ } else if (strcmp(var, "shared") == 0) {
+ if ((cmd->mode == MODE_LINK) && (cmd->output == OUT_GENERAL)) {
+ cmd->output = OUT_DYNAMIC_LIB_ONLY;
+ }
+ cmd->options.shared = SHARE_SHARED;
+
+ } else if (strcmp(var, "export-all") == 0) {
+ cmd->options.export_all = 1;
+
+ } else if (strcmp(var, "dry-run") == 0) {
+ NOTICE("Dry-run mode on!\n");
+ cmd->options.dry_run = 1;
+
+ } else if (strcmp(var, "version") == 0) {
+ NOTICE("Version " VERSION "\n");
+
+ } else if (strcmp(var, "help") == 0) {
+ usage(0);
+
+ } else if (strcmp(var, "config") == 0) {
+ print_config(value);
+
+ exit(0);
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 if we eat it. */
+static int parse_short_opt(char const *arg, command_t *cmd)
+{
+ if (strcmp(arg, "export-dynamic") == 0) {
+ cmd->options.export_dynamic = 1;
+ return 1;
+ }
+
+ if (strcmp(arg, "module") == 0) {
+ cmd->output = OUT_MODULE;
+ return 1;
+ }
+
+ if (strcmp(arg, "shared") == 0) {
+ if (cmd->mode == MODE_LINK) {
+ cmd->output = OUT_DYNAMIC_LIB_ONLY;
+ }
+ cmd->options.shared = SHARE_SHARED;
+ return 1;
+ }
+
+ if (strcmp(arg, "Zexe") == 0) {
+ return 1;
+ }
+
+ if (strcmp(arg, "avoid-version") == 0) {
+ return 1;
+ }
+
+ if (strcmp(arg, "prefer-pic") == 0) {
+ cmd->options.pic_mode = PIC_PREFER;
+ return 1;
+ }
+
+ if (strcmp(arg, "prefer-non-pic") == 0) {
+ cmd->options.pic_mode = PIC_AVOID;
+ return 1;
+ }
+
+ if (strcmp(arg, "static") == 0) {
+ if ((cmd->mode == MODE_LINK) && (cmd->output == OUT_LIB)) {
+ cmd->output = OUT_STATIC_LIB_ONLY;
+ }
+ cmd->options.shared = SHARE_STATIC;
+ return 1;
+ }
+
+ if (cmd->mode == MODE_LINK) {
+ if (strcmp(arg, "no-install") == 0) {
+ cmd->options.no_install = 1;
+ return 1;
+ }
+ if (arg[0] == 'L' || arg[0] == 'l') {
+ /* Hack... */
+ arg--;
+ push_count_chars(cmd->shared_opts.dependencies, arg);
+ return 1;
+ } else if (arg[0] == 'R' && arg[1]) {
+ /* -Rdir Add dir to runtime library search path. */
+ add_runtime_dir_lib(&arg[1], cmd);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+#ifdef TRUNCATE_DLL_NAME
+static char *truncate_dll_name(char *path)
+{
+ /* Cut DLL name down to 8 characters after removing any mod_ prefix */
+ char *tmppath = strdup(path);
+ char *newname = strrchr(tmppath, '/') + 1;
+ char *ext = strrchr(newname, '.');
+ int len;
+
+ if (ext == NULL) {
+ return tmppath;
+ }
+
+ len = ext - newname;
+
+ if (strncmp(newname, "mod_", 4) == 0) {
+ strcpy(newname, newname + 4);
+ len -= 4;
+ }
+
+ if (len > 8) {
+ strcpy(newname + 8, strchr(newname, '.'));
+ }
+
+ return tmppath;
+}
+#endif
+
+static long safe_strtol(char const *nptr, char const **endptr, int base)
+{
+ long rv;
+
+ errno = 0;
+
+ rv = strtol(nptr, (char**)endptr, 10);
+
+ if (errno == ERANGE) {
+ return 0;
+ }
+
+ return rv;
+}
+
+static void safe_mkdir(command_t *cmd, char const *path)
+{
+ int status;
+ mode_t old_umask;
+
+ old_umask = umask(0);
+ umask(old_umask);
+
+#ifdef MKDIR_NO_UMASK
+ status = mkdir(path);
+#else
+ status = mkdir(path, ~old_umask);
+#endif
+ if ((status < 0) && (errno != EEXIST)) {
+ NOTICE("Warning: mkdir of %s failed\n", path);
+ }
+}
+
+/** Returns a file's name without the path
+ *
+ * @param path to break apart.
+ * @return pointer in path.
+ */
+static char const *file_name(char const *path)
+{
+ char const *name;
+
+ name = strrchr(path, '/');
+ if (!name) {
+ name = strrchr(path, '\\'); /* eww windows? */
+ }
+ if (!name) {
+ name = path;
+ } else {
+ name++;
+ }
+
+ return name;
+}
+
+#ifdef GEN_EXPORTS
+
+/** Returns a file's name without path or extension
+ *
+ * @param path to check
+ * @return pointer in path.
+ */
+static char const *file_name_stripped(char const *path)
+{
+ char const *name;
+ char const *ext;
+
+ name = file_name(path);
+ ext = strrchr(name, '.');
+
+ if (ext) {
+ char *trimmed;
+
+ trimmed = lt_malloc(ext - name + 1);
+ strncpy(trimmed, name, ext - name);
+ trimmed[ext-name] = 0;
+
+ return trimmed;
+ }
+
+ return name;
+}
+#endif
+
+/* version_info is in the form of MAJOR:MINOR:PATCH */
+static char const *darwin_dynamic_link_function(char const *version_info)
+{
+ char *newarg;
+ long major, minor, patch;
+
+ major = 0;
+ minor = 0;
+ patch = 0;
+
+ if (version_info) {
+ major = safe_strtol(version_info, &version_info, 10);
+
+ if (version_info) {
+ if (version_info[0] == ':') {
+ version_info++;
+ }
+
+ minor = safe_strtol(version_info, &version_info, 10);
+
+ if (version_info) {
+ if (version_info[0] == ':') {
+ version_info++;
+ }
+
+ patch = safe_strtol(version_info, &version_info, 10);
+
+ }
+ }
+ }
+
+ /* Avoid -dylib_compatibility_version must be greater than zero errors. */
+ if (major == 0) {
+ major = 1;
+ }
+ newarg = (char*)lt_malloc(100);
+ snprintf(newarg, 99,
+ "-compatibility_version %ld -current_version %ld.%ld",
+ major, major, minor);
+
+ return newarg;
+}
+
+
+/*
+ * Add a '.libs/' to the buffer. The caller ensures that
+ * The buffer is large enough to handle 6 extra characters.
+ */
+static void add_dotlibs(char *buffer)
+{
+ char *name = strrchr(buffer, '/');
+
+ if (!name) {
+ if (!buffer[0]) {
+ strcpy(buffer, ".libs/");
+ return;
+ }
+ name = buffer;
+ } else {
+ name++;
+ }
+ memmove(name + 6, name, strlen(name));
+ memcpy(name, ".libs/", 6);
+}
+
+static char *gen_library_name(char const *name, enum lib_type genlib)
+{
+ char *newarg, *newext;
+
+ newarg = (char *)calloc(strlen(name) + 11, 1);
+
+ if (genlib == TYPE_MODULE_LIB && strncmp(name, "lib", 3) == 0) {
+ name += 3;
+ }
+
+ if (genlib == TYPE_MODULE_LIB) {
+ strcpy(newarg, file_name(name));
+ }
+ else {
+ strcpy(newarg, name);
+ }
+
+ newext = strrchr(newarg, '.');
+ if (!newext) {
+ ERROR("Library path does not have an extension\n");
+ free(newarg);
+
+ return NULL;
+ }
+ newext++;
+
+ switch (genlib) {
+ case TYPE_STATIC_LIB:
+ strcpy(newext, STATIC_LIB_EXT);
+ break;
+ case TYPE_DYNAMIC_LIB:
+ strcpy(newext, DYNAMIC_LIB_EXT);
+ break;
+ case TYPE_MODULE_LIB:
+ strcpy(newext, MODULE_LIB_EXT);
+ break;
+
+ default:
+ break;
+ }
+
+ add_dotlibs(newarg);
+
+ return newarg;
+}
+
+static char *gen_install_name(char const *name, enum lib_type genlib)
+{
+ char *newname;
+ int rv;
+ struct stat sb;
+
+ newname = gen_library_name(name, genlib);
+ if (!newname) return NULL;
+
+ /* Check if it exists. If not, return NULL. */
+ rv = stat(newname, &sb);
+
+ if (rv) {
+ free(newname);
+ return NULL;
+ }
+
+ return newname;
+}
+
+static char const *check_object_exists(command_t *cmd, char const *arg, int arglen)
+{
+ char *newarg, *ext;
+ struct stat sb;
+
+ newarg = (char *)lt_malloc(arglen + 10);
+ memcpy(newarg, arg, arglen);
+ newarg[arglen] = 0;
+ ext = newarg + arglen;
+
+ strcpy(ext, OBJECT_EXT);
+
+ DEBUG("Checking (obj): %s\n", newarg);
+ if (stat(newarg, &sb) == 0) {
+ return newarg;
+ }
+
+ free(newarg);
+
+ return NULL;
+}
+
+/* libdircheck values:
+ * 0 - no .libs suffix
+ * 1 - .libs suffix
+ */
+static char *check_library_exists(command_t *cmd, char const *arg, int pathlen,
+ int libdircheck, enum lib_type *libtype)
+{
+ char *newarg, *ext;
+ int pass, rv, newpathlen;
+
+ newarg = (char *)lt_malloc(strlen(arg) + 10);
+ strcpy(newarg, arg);
+ newarg[pathlen] = '\0';
+
+ newpathlen = pathlen;
+ if (libdircheck) {
+ add_dotlibs(newarg);
+ newpathlen += sizeof(".libs/") - 1;
+ }
+
+ strcpy(newarg + newpathlen, arg + pathlen);
+ ext = strrchr(newarg, '.');
+ if (!ext) {
+ ERROR("Error: Library path does not have an extension\n");
+ free(newarg);
+
+ return NULL;
+ }
+ ext++;
+
+ pass = 0;
+
+ do {
+ struct stat sb;
+
+ switch (pass) {
+ case 0:
+ if (cmd->options.pic_mode != PIC_AVOID &&
+ cmd->options.shared != SHARE_STATIC) {
+ strcpy(ext, DYNAMIC_LIB_EXT);
+ *libtype = TYPE_DYNAMIC_LIB;
+ break;
+ }
+ pass = 1;
+ /* Fall through */
+ case 1:
+ strcpy(ext, STATIC_LIB_EXT);
+ *libtype = TYPE_STATIC_LIB;
+ break;
+ case 2:
+ strcpy(ext, MODULE_LIB_EXT);
+ *libtype = TYPE_MODULE_LIB;
+ break;
+ case 3:
+ strcpy(ext, OBJECT_EXT);
+ *libtype = TYPE_OBJECT;
+ break;
+ default:
+ *libtype = TYPE_UKNOWN;
+ break;
+ }
+
+ DEBUG("Checking (lib): %s\n", newarg);
+ rv = stat(newarg, &sb);
+ }
+ while (rv != 0 && ++pass < 4);
+
+ if (rv == 0) {
+ return newarg;
+ }
+
+ free(newarg);
+
+ return NULL;
+}
+
+static char * load_install_path(char const *arg)
+{
+ FILE *f;
+ char *path;
+
+ f = fopen(arg,"r");
+ if (f == NULL) {
+ return NULL;
+ }
+
+ path = lt_malloc(PATH_MAX);
+
+ fgets(path, PATH_MAX, f);
+ fclose(f);
+
+ if (path[strlen(path)-1] == '\n') {
+ path[strlen(path)-1] = '\0';
+ }
+
+ /* Check that we have an absolute path.
+ * Otherwise the file could be a GNU libtool file.
+ */
+ if (path[0] != '/') {
+ free(path);
+
+ return NULL;
+ }
+ return path;
+}
+
+static char *load_noinstall_path(char const *arg, int pathlen)
+{
+ char *newarg, *expanded_path;
+ int newpathlen;
+
+ newarg = (char *)lt_malloc(strlen(arg) + 10);
+ strcpy(newarg, arg);
+ newarg[pathlen] = 0;
+
+ newpathlen = pathlen;
+ strcat(newarg, ".libs");
+ newpathlen += sizeof(".libs") - 1;
+ newarg[newpathlen] = 0;
+
+#ifdef HAS_REALPATH
+ expanded_path = lt_malloc(PATH_MAX);
+ expanded_path = realpath(newarg, expanded_path);
+ /* Uh, oh. There was an error. Fall back on our first guess. */
+ if (!expanded_path) {
+ expanded_path = newarg;
+ }
+#else
+ /* We might get ../ or something goofy. Oh, well. */
+ expanded_path = newarg;
+#endif
+
+ return expanded_path;
+}
+
+static void add_dynamic_link_opts(command_t *cmd, count_chars *args)
+{
+#ifdef DYNAMIC_LINK_OPTS
+ if (cmd->options.pic_mode != PIC_AVOID) {
+ DEBUG("Adding linker opt: %s\n", DYNAMIC_LINK_OPTS);
+
+ push_count_chars(args, DYNAMIC_LINK_OPTS);
+ if (cmd->undefined_flag) {
+ push_count_chars(args, "-undefined");
+#if defined(__APPLE__)
+ /* -undefined dynamic_lookup is used by the bundled Python in
+ * 10.4, but if we don't set MACOSX_DEPLOYMENT_TARGET to 10.3+,
+ * we'll get a linker error if we pass this flag.
+ */
+ if (strcasecmp(cmd->undefined_flag, "dynamic_lookup") == 0) {
+ insert_count_chars(cmd->program_opts, "MACOSX_DEPLOYMENT_TARGET=10.3", 0);
+ }
+#endif
+ push_count_chars(args, cmd->undefined_flag);
+ }
+ else {
+#ifdef DYNAMIC_LINK_UNDEFINED
+ DEBUG("Adding linker opt: %s\n", DYNAMIC_LINK_UNDEFINED);
+
+ push_count_chars(args, DYNAMIC_LINK_UNDEFINED);
+#endif
+ }
+ }
+#endif
+}
+
+/* Read the final install location and add it to runtime library search path. */
+#ifdef RPATH
+static void add_rpath(count_chars *cc, char const *path)
+{
+ int size = 0;
+ char *tmp;
+
+#ifdef LINKER_FLAG_PREFIX
+ size = strlen(LINKER_FLAG_PREFIX);
+#endif
+ size = size + strlen(path) + strlen(RPATH) + 2;
+ tmp = lt_malloc(size);
+
+#ifdef LINKER_FLAG_PREFIX
+ strcpy(tmp, LINKER_FLAG_PREFIX);
+ strcat(tmp, RPATH);
+#else
+ strcpy(tmp, RPATH);
+#endif
+#ifndef LINKER_FLAG_NO_EQUALS
+ strcat(tmp, "=");
+#endif
+ strcat(tmp, path);
+
+ push_count_chars(cc, tmp);
+}
+
+static void add_rpath_file(count_chars *cc, char const *arg)
+{
+ char const *path;
+
+ path = load_install_path(arg);
+ if (path) {
+ add_rpath(cc, path);
+ lt_const_free(path);
+ }
+}
+
+static void add_rpath_noinstall(count_chars *cc, char const *arg, int pathlen)
+{
+ char const *path;
+
+ path = load_noinstall_path(arg, pathlen);
+ if (path) {
+ add_rpath(cc, path);
+ lt_const_free(path);
+ }
+}
+#endif
+
+#ifdef DYNAMIC_LINK_NO_INSTALL
+static void add_dylink_noinstall(count_chars *cc, char const *arg, int pathlen,
+ int extlen)
+{
+ char const *install_path, *current_path, *name;
+ char *exp_argument;
+ int i_p_len, c_p_len, name_len, dyext_len, cur_len;
+
+ install_path = load_install_path(arg);
+ current_path = load_noinstall_path(arg, pathlen);
+
+ if (!install_path || !current_path) {
+ return;
+ }
+
+ push_count_chars(cc, DYNAMIC_LINK_NO_INSTALL);
+
+ i_p_len = strlen(install_path);
+ c_p_len = strlen(current_path);
+
+ name = arg+pathlen;
+ name_len = extlen-pathlen;
+ dyext_len = sizeof(DYNAMIC_LIB_EXT) - 1;
+
+ /* No, we need to replace the extension. */
+ exp_argument = (char *)lt_malloc(i_p_len + c_p_len + (name_len*2) +
+ (dyext_len*2) + 2);
+
+ cur_len = 0;
+ strcpy(exp_argument, install_path);
+ cur_len += i_p_len;
+ exp_argument[cur_len++] = '/';
+ strncpy(exp_argument+cur_len, name, extlen-pathlen);
+ cur_len += name_len;
+ strcpy(exp_argument+cur_len, DYNAMIC_LIB_EXT);
+ cur_len += dyext_len;
+ exp_argument[cur_len++] = ':';
+ strcpy(exp_argument+cur_len, current_path);
+ cur_len += c_p_len;
+ exp_argument[cur_len++] = '/';
+ strncpy(exp_argument+cur_len, name, extlen-pathlen);
+ cur_len += name_len;
+ strcpy(exp_argument+cur_len, DYNAMIC_LIB_EXT);
+ cur_len += dyext_len;
+
+ push_count_chars(cc, exp_argument);
+}
+#endif
+
+#ifdef ADD_MINUS_L
+/* use -L -llibname to allow to use installed libraries */
+static void add_minus_l(count_chars *cc, char const *arg)
+{
+ char *newarg;
+ char *name = strrchr(arg, '/');
+ char *file = strrchr(arg, '.');
+
+ if ((name != NULL) && (file != NULL) &&
+ (strstr(name, "lib") == (name + 1))) {
+ *name = '\0';
+ *file = '\0';
+ file = name;
+ file = file+4;
+ push_count_chars(cc, "-L");
+ push_count_chars(cc, arg);
+ /* we need one argument like -lapr-1 */
+ newarg = lt_malloc(strlen(file) + 3);
+ strcpy(newarg, "-l");
+ strcat(newarg, file);
+ push_count_chars(cc, newarg);
+ } else {
+ push_count_chars(cc, arg);
+ }
+}
+#endif
+
+#if 0
+static void add_linker_flag_prefix(count_chars *cc, char const *arg)
+{
+#ifndef LINKER_FLAG_PREFIX
+ push_count_chars(cc, arg);
+#else
+ char *newarg;
+ newarg = (char*)lt_malloc(strlen(arg) + sizeof(LINKER_FLAG_PREFIX) + 1);
+ strcpy(newarg, LINKER_FLAG_PREFIX);
+ strcat(newarg, arg);
+ push_count_chars(cc, newarg);
+#endif
+}
+#endif
+
+static int explode_static_lib(command_t *cmd, char const *lib)
+{
+ count_chars tmpdir_cc, libname_cc;
+ char const *tmpdir, *libname;
+ char savewd[PATH_MAX];
+ char const *name;
+ DIR *dir;
+ struct dirent *entry;
+ char const *lib_args[4];
+
+ /* Bah! */
+ if (cmd->options.dry_run) {
+ return 0;
+ }
+
+ name = file_name(lib);
+
+ init_count_chars(&tmpdir_cc);
+ push_count_chars(&tmpdir_cc, ".libs/");
+ push_count_chars(&tmpdir_cc, name);
+ push_count_chars(&tmpdir_cc, ".exploded/");
+ tmpdir = flatten_count_chars(&tmpdir_cc, 0);
+
+ NOTICE("Making: %s\n", tmpdir);
+
+ safe_mkdir(cmd, tmpdir);
+
+ push_count_chars(cmd->tmp_dirs, tmpdir);
+
+ getcwd(savewd, sizeof(savewd));
+
+ if (chdir(tmpdir) != 0) {
+ NOTICE("Warning: could not explode %s\n", lib);
+
+ return 1;
+ }
+
+ if (lib[0] == '/') {
+ libname = lib;
+ }
+ else {
+ init_count_chars(&libname_cc);
+ push_count_chars(&libname_cc, "../../");
+ push_count_chars(&libname_cc, lib);
+ libname = flatten_count_chars(&libname_cc, 0);
+ }
+
+ lib_args[0] = LIBRARIAN;
+ lib_args[1] = "x";
+ lib_args[2] = libname;
+ lib_args[3] = NULL;
+
+ external_spawn(cmd, LIBRARIAN, lib_args);
+
+ chdir(savewd);
+ dir = opendir(tmpdir);
+
+ while ((entry = readdir(dir)) != NULL) {
+#if defined(__APPLE__) && defined(RANLIB)
+ /* Apple inserts __.SYMDEF which isn't needed.
+ * Leopard (10.5+) can also add '__.SYMDEF SORTED' which isn't
+ * much fun either. Just skip them.
+ */
+ if (strstr(entry->d_name, "__.SYMDEF") != NULL) {
+ continue;
+ }
+#endif
+ if (entry->d_name[0] != '.') {
+ push_count_chars(&tmpdir_cc, entry->d_name);
+ name = flatten_count_chars(&tmpdir_cc, 0);
+
+ DEBUG("Adding object: %s\n", name);
+ push_count_chars(cmd->obj_files, name);
+ pop_count_chars(&tmpdir_cc);
+ }
+ }
+
+ closedir(dir);
+ return 0;
+}
+
+static int parse_input_file_name(char const *arg, command_t *cmd)
+{
+ char const *ext = strrchr(arg, '.');
+ char const *name;
+ int pathlen;
+ enum lib_type libtype;
+ char const *newarg;
+
+ /* Can't guess the extension */
+ if (!ext) {
+ return 0;
+ }
+
+ ext++;
+ name = file_name(arg);
+ pathlen = name - arg;
+
+ /*
+ * Were linking and have an archived object or object file
+ * push it onto the list of object files which'll get used
+ * to create the input files list for the linker.
+ *
+ * We assume that these are outside of the project were building,
+ * as there's no reason to create .a files as part of the build
+ * process.
+ */
+ if (!strcmp(ext, STATIC_LIB_EXT) && (cmd->mode == MODE_LINK)) {
+ struct stat sb;
+
+ if (!stat(arg, &sb)) {
+ DEBUG("Adding object: %s\n", arg);
+
+ push_count_chars(cmd->obj_files, arg);
+
+ return 1;
+ }
+ }
+
+ /*
+ * More object files, if were linking they get set as input
+ * files.
+ */
+ if (!strcmp(ext, "lo") || !strcmp(ext, OBJECT_EXT)) {
+ newarg = check_object_exists(cmd, arg, ext - arg);
+ if (!newarg) {
+ ERROR("Can not find suitable object file for %s\n", arg);
+ exit(1);
+ }
+
+ if (cmd->mode == MODE_LINK) {
+ DEBUG("Adding object: %s\n", newarg);
+
+ push_count_chars(cmd->obj_files, newarg);
+ } else {
+ push_count_chars(cmd->arglist, newarg);
+ }
+
+ return 1;
+ }
+
+ if (!strcmp(ext, "la")) {
+ switch (cmd->mode) {
+ case MODE_LINK:
+ /* Try the .libs dir first! */
+ newarg = check_library_exists(cmd, arg, pathlen, 1, &libtype);
+ if (!newarg) {
+ /* Try the normal dir next. */
+ newarg = check_library_exists(cmd, arg, pathlen, 0, &libtype);
+ if (!newarg) {
+ ERROR("Can not find suitable library for %s\n", arg);
+ exit(1);
+ }
+ }
+
+ /* It is not ok to just add the file: a library may added with:
+ 1 - -L path library_name. (For *.so in Linux).
+ 2 - library_name.
+ */
+#ifdef ADD_MINUS_L
+ if (libtype == TYPE_DYNAMIC_LIB) {
+ add_minus_l(cmd->shared_opts.dependencies, newarg);
+ } else if (cmd->output == OUT_LIB &&
+ libtype == TYPE_STATIC_LIB) {
+ explode_static_lib(cmd, newarg);
+ } else {
+ push_count_chars(cmd->shared_opts.dependencies, newarg);
+ }
+#else
+ if (cmd->output == OUT_LIB && libtype == TYPE_STATIC_LIB) {
+ explode_static_lib(cmd, newarg);
+ }
+ else {
+ push_count_chars(cmd->shared_opts.dependencies, newarg);
+ }
+#endif
+ if (libtype == TYPE_DYNAMIC_LIB) {
+ if (cmd->options.no_install) {
+#ifdef RPATH
+ add_rpath_noinstall(cmd->shared_opts.dependencies,
+ arg, pathlen);
+#endif
+ }
+ else {
+#ifdef RPATH
+ add_rpath_file(cmd->shared_opts.dependencies, arg);
+#endif
+ }
+ }
+ break;
+ case MODE_INSTALL:
+ /*
+ * If we've already recorded a library to
+ * install, we're most likely getting the .la
+ * file that we want to install as.
+ *
+ * The problem is that we need to add it as the
+ * directory, not the .la file itself.
+ * Otherwise, we'll do odd things.
+ */
+ if (cmd->output == OUT_LIB) {
+ char *tmp;
+
+ tmp = strdup(arg);
+ tmp[pathlen] = '\0';
+ push_count_chars(cmd->arglist, tmp);
+
+ } else {
+ cmd->output = OUT_LIB;
+ cmd->output_name = arg;
+ cmd->static_name.install = gen_install_name(arg, 0);
+ cmd->shared_name.install = gen_install_name(arg, 1);
+ cmd->module_name.install = gen_install_name(arg, 2);
+
+ if (!cmd->static_name.install &&
+ !cmd->shared_name.install &&
+ !cmd->module_name.install) {
+ ERROR("Files to install do not exist\n");
+ exit(1);
+ }
+
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 1;
+ }
+
+ if (!strcmp(ext, "c")) {
+ /* If we don't already have an idea what our output name will be. */
+ if (!cmd->basename) {
+ char *tmp = lt_malloc(strlen(arg) + 4);
+ strcpy(tmp, arg);
+ strcpy(strrchr(tmp, '.') + 1, "lo");
+
+ cmd->basename = tmp;
+
+ cmd->fake_output_name = strrchr(cmd->basename, '/');
+ if (cmd->fake_output_name) {
+ cmd->fake_output_name++;
+ } else {
+ cmd->fake_output_name = cmd->basename;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int parse_output_file_name(char const *arg, command_t *cmd)
+{
+ char const *name;
+ char const *ext;
+ char *newarg = NULL;
+ size_t pathlen;
+
+ cmd->fake_output_name = arg;
+
+ name = file_name(arg);
+ ext = strrchr(name, '.');
+
+#ifdef EXE_EXT
+ if (!ext || strcmp(ext, EXE_EXT) == 0) {
+#else
+ if (!ext) {
+#endif
+ cmd->basename = arg;
+ cmd->output = OUT_PROGRAM;
+#if defined(_OSD_POSIX)
+ cmd->options.pic_mode = PIC_AVOID;
+#endif
+ newarg = (char *)lt_malloc(strlen(arg) + 5);
+ strcpy(newarg, arg);
+#ifdef EXE_EXT
+ if (!ext) {
+ strcat(newarg, EXE_EXT);
+ }
+#endif
+ cmd->output_name = newarg;
+ return 1;
+ }
+
+ ext++;
+ pathlen = name - arg;
+
+ if (strcmp(ext, "la") == 0) {
+ assert(cmd->mode == MODE_LINK);
+
+ cmd->basename = arg;
+ cmd->static_name.normal = gen_library_name(arg, TYPE_STATIC_LIB);
+ cmd->shared_name.normal = gen_library_name(arg, TYPE_DYNAMIC_LIB);
+ cmd->module_name.normal = gen_library_name(arg, TYPE_MODULE_LIB);
+ cmd->static_name.install = gen_install_name(arg, TYPE_STATIC_LIB);
+ cmd->shared_name.install = gen_install_name(arg, TYPE_DYNAMIC_LIB);
+ cmd->module_name.install = gen_install_name(arg, TYPE_MODULE_LIB);
+
+ if (!cmd->options.dry_run) {
+ char *newname;
+ char *newext;
+ newname = lt_malloc(strlen(cmd->static_name.normal) + 1);
+
+ strcpy(newname, cmd->static_name.normal);
+ newext = strrchr(newname, '/');
+ if (!newext) {
+ /* Check first to see if the dir already exists! */
+ safe_mkdir(cmd, ".libs");
+ } else {
+ *newext = '\0';
+ safe_mkdir(cmd, newname);
+ }
+ free(newname);
+ }
+
+#ifdef TRUNCATE_DLL_NAME
+ if (shared) {
+ arg = truncate_dll_name(arg);
+ }
+#endif
+
+ cmd->output_name = arg;
+ return 1;
+ }
+
+ if (strcmp(ext, STATIC_LIB_EXT) == 0) {
+ assert(cmd->mode == MODE_LINK);
+
+ cmd->basename = arg;
+ cmd->options.shared = SHARE_STATIC;
+ cmd->output = OUT_STATIC_LIB_ONLY;
+ cmd->static_name.normal = gen_library_name(arg, TYPE_STATIC_LIB);
+ cmd->static_name.install = gen_install_name(arg, TYPE_STATIC_LIB);
+
+ if (!cmd->options.dry_run) {
+ char *newname;
+ char *newext;
+ newname = lt_malloc(strlen(cmd->static_name.normal) + 1);
+
+ strcpy(newname, cmd->static_name.normal);
+ newext = strrchr(newname, '/');
+ if (!newext) {
+ /* Check first to see if the dir already exists! */
+ safe_mkdir(cmd, ".libs");
+ } else {
+ *newext = '\0';
+ safe_mkdir(cmd, newname);
+ }
+ free(newname);
+ }
+
+ cmd->output_name = arg;
+ return 1;
+ }
+
+ if (strcmp(ext, DYNAMIC_LIB_EXT) == 0) {
+ assert(cmd->mode == MODE_LINK);
+
+ cmd->basename = arg;
+ cmd->options.shared = SHARE_SHARED;
+ cmd->output = OUT_DYNAMIC_LIB_ONLY;
+ cmd->shared_name.normal = gen_library_name(arg, TYPE_DYNAMIC_LIB);
+ cmd->module_name.normal = gen_library_name(arg, TYPE_MODULE_LIB);
+ cmd->shared_name.install = gen_install_name(arg, TYPE_DYNAMIC_LIB);
+ cmd->module_name.install = gen_install_name(arg, TYPE_MODULE_LIB);
+
+ if (!cmd->options.dry_run) {
+ char *newname;
+ char *newext;
+ newname = lt_malloc(strlen(cmd->shared_name.normal) + 1);
+
+ strcpy(newname, cmd->shared_name.normal);
+ newext = strrchr(newname, '/');
+ if (!newext) {
+ /* Check first to see if the dir already exists! */
+ safe_mkdir(cmd, ".libs");
+ } else {
+ *newext = '\0';
+ safe_mkdir(cmd, newname);
+ }
+ free(newname);
+ }
+
+ cmd->output_name = arg;
+ return 1;
+ }
+
+ if (strcmp(ext, "lo") == 0) {
+ char *newext;
+ cmd->basename = arg;
+ cmd->output = OUT_OBJECT;
+ newarg = (char *)lt_malloc(strlen(arg) + 2);
+ strcpy(newarg, arg);
+ newext = strrchr(newarg, '.') + 1;
+ strcpy(newext, OBJECT_EXT);
+ cmd->output_name = newarg;
+ return 1;
+ }
+
+ if (strcmp(ext, DYNAMIC_LIB_EXT) == 0) {
+ ERROR("Please build libraries with .la target, not ."
+ DYNAMIC_LIB_EXT "\n");
+
+ exit(1);
+ }
+
+ if (strcmp(ext, STATIC_LIB_EXT) == 0) {
+ ERROR("Please build libraries with .la target, not ."
+ STATIC_LIB_EXT "\n");
+
+ exit(1);
+ }
+
+ return 0;
+}
+
+static char const *automode(char const *arg, command_t *cmd)
+{
+ if (cmd->mode != MODE_UNKNOWN) return arg;
+
+ if (!strcmp(arg, "CC") ||
+ !strcmp(arg, "CXX")) {
+ DEBUG("Now in compile mode, guessed from: %s\n", arg);
+ arg = CC;
+ cmd->mode = MODE_COMPILE;
+
+ } else if (!strcmp(arg, "LINK") ||
+ !strcmp(arg, "LINK.c") ||
+ !strcmp(arg, "LINK.cxx")) {
+ DEBUG("Now in linker mode, guessed from: %s\n", arg);
+ arg = LINK_C;
+ cmd->mode = MODE_LINK;
+ }
+
+ return arg;
+}
+
+
+#ifdef GEN_EXPORTS
+static void generate_def_file(command_t *cmd)
+{
+ char def_file[1024];
+ char implib_file[1024];
+ char *ext;
+ FILE *hDef;
+ char *export_args[1024];
+ int num_export_args = 0;
+ char *cmd;
+ int cmd_size = 0;
+ int a;
+
+ if (cmd->output_name) {
+ strcpy(def_file, cmd->output_name);
+ strcat(def_file, ".def");
+ hDef = fopen(def_file, "w");
+
+ if (hDef != NULL) {
+ fprintf(hDef, "LIBRARY '%s' INITINSTANCE\n", file_name_stripped(cmd->output_name));
+ fprintf(hDef, "DATA NONSHARED\n");
+ fprintf(hDef, "EXPORTS\n");
+ fclose(hDef);
+
+ for (a = 0; a < cmd->num_obj_files; a++) {
+ cmd_size += strlen(cmd->obj_files[a]) + 1;
+ }
+
+ cmd_size += strlen(GEN_EXPORTS) + strlen(def_file) + 3;
+ cmd = (char *)lt_malloc(cmd_size);
+ strcpy(cmd, GEN_EXPORTS);
+
+ for (a=0; a < cmd->num_obj_files; a++) {
+ strcat(cmd, " ");
+ strcat(cmd, cmd->obj_files[a] );
+ }
+
+ strcat(cmd, ">>");
+ strcat(cmd, def_file);
+ puts(cmd);
+ export_args[num_export_args++] = SHELL_CMD;
+ export_args[num_export_args++] = "-c";
+ export_args[num_export_args++] = cmd;
+ export_args[num_export_args++] = NULL;
+ external_spawn(cmd, export_args[0], (char const**)export_args);
+ cmd->arglist[cmd->num_args++] = strdup(def_file);
+
+ /* Now make an import library for the dll */
+ num_export_args = 0;
+ export_args[num_export_args++] = DEF2IMPLIB_CMD;
+ export_args[num_export_args++] = "-o";
+
+ strcpy(implib_file, ".libs/");
+ strcat(implib_file, cmd->basename);
+ ext = strrchr(implib_file, '.');
+
+ if (ext) {
+ *ext = '\0';
+ }
+
+ strcat(implib_file, ".");
+ strcat(implib_file, STATIC_LIB_EXT);
+
+ export_args[num_export_args++] = implib_file;
+ export_args[num_export_args++] = def_file;
+ export_args[num_export_args++] = NULL;
+ external_spawn(cmd, export_args[0], (char const**)export_args);
+
+ }
+ }
+}
+#endif
+
+#if 0
+static char const* expand_path(char const *relpath)
+{
+ char foo[PATH_MAX], *newpath;
+
+ getcwd(foo, PATH_MAX-1);
+ newpath = (char*)lt_malloc(strlen(foo)+strlen(relpath)+2);
+ strcpy(newpath, foo);
+ strcat(newpath, "/");
+ strcat(newpath, relpath);
+ return newpath;
+}
+#endif
+
+static void link_fixup(command_t *cmd)
+{
+ /* If we were passed an -rpath directive, we need to build
+ * shared objects too. Otherwise, we should only create static
+ * libraries.
+ */
+ if (!cmd->install_path && (cmd->output == OUT_DYNAMIC_LIB_ONLY ||
+ cmd->output == OUT_MODULE || cmd->output == OUT_LIB)) {
+ if (cmd->options.shared == SHARE_SHARED) {
+ cmd->install_path = LIBDIR;
+ }
+ }
+
+ if (cmd->output == OUT_DYNAMIC_LIB_ONLY ||
+ cmd->output == OUT_MODULE ||
+ cmd->output == OUT_LIB) {
+
+ push_count_chars(cmd->shared_opts.normal, "-o");
+ if (cmd->output == OUT_MODULE) {
+ push_count_chars(cmd->shared_opts.normal, cmd->module_name.normal);
+ } else {
+ push_count_chars(cmd->shared_opts.normal, cmd->shared_name.normal);
+#ifdef DYNAMIC_INSTALL_NAME
+ push_count_chars(cmd->shared_opts.normal, DYNAMIC_INSTALL_NAME);
+
+ if (!cmd->install_path) {
+ ERROR("Installation mode requires -rpath\n");
+ exit(1);
+ }
+
+ {
+ char *tmp = lt_malloc(PATH_MAX);
+ strcpy(tmp, cmd->install_path);
+
+ if (cmd->shared_name.install) {
+ strcat(tmp, strrchr(cmd->shared_name.install, '/'));
+ } else {
+ strcat(tmp, strrchr(cmd->shared_name.normal, '/'));
+ }
+
+ push_count_chars(cmd->shared_opts.normal, tmp);
+ }
+#endif
+ }
+
+ append_count_chars(cmd->shared_opts.normal, cmd->obj_files);
+ append_count_chars(cmd->shared_opts.normal, cmd->shared_opts.dependencies);
+
+ if (cmd->options.export_all) {
+#ifdef GEN_EXPORTS
+ generate_def_file(cmd);
+#endif
+ }
+ }
+
+ if (cmd->output == OUT_LIB || cmd->output == OUT_STATIC_LIB_ONLY) {
+ push_count_chars(cmd->static_opts.normal, "-o");
+ push_count_chars(cmd->static_opts.normal, cmd->output_name);
+ }
+
+ if (cmd->output == OUT_PROGRAM) {
+ if (cmd->output_name) {
+ push_count_chars(cmd->arglist, "-o");
+ push_count_chars(cmd->arglist, cmd->output_name);
+ append_count_chars(cmd->arglist, cmd->obj_files);
+ append_count_chars(cmd->arglist, cmd->shared_opts.dependencies);
+ add_dynamic_link_opts(cmd, cmd->arglist);
+ }
+ }
+}
+
+static void post_parse_fixup(command_t *cmd)
+{
+ switch (cmd->mode) {
+ case MODE_COMPILE:
+#ifdef PIC_FLAG
+ if (cmd->options.pic_mode != PIC_AVOID) {
+ push_count_chars(cmd->arglist, PIC_FLAG);
+ }
+#endif
+ if (cmd->output_name) {
+ push_count_chars(cmd->arglist, "-o");
+ push_count_chars(cmd->arglist, cmd->output_name);
+ }
+ break;
+ case MODE_LINK:
+ link_fixup(cmd);
+ break;
+ case MODE_INSTALL:
+ if (cmd->output == OUT_LIB) {
+ link_fixup(cmd);
+ }
+ default:
+ break;
+ }
+
+#ifdef USE_OMF
+ if (cmd->output == OUT_OBJECT ||
+ cmd->output == OUT_PROGRAM ||
+ cmd->output == OUT_LIB ||
+ cmd->output == OUT_DYNAMIC_LIB_ONLY) {
+ push_count_chars(cmd->arglist, "-Zomf");
+ }
+#endif
+
+ if (cmd->options.shared &&
+ (cmd->output == OUT_OBJECT ||
+ cmd->output == OUT_LIB ||
+ cmd->output == OUT_DYNAMIC_LIB_ONLY)) {
+#ifdef SHARE_SW
+ push_count_chars(cmd->arglist, SHARE_SW);
+#endif
+ }
+}
+
+static int run_mode(command_t *cmd)
+{
+ int rv = 0;
+ count_chars *cctemp;
+
+ cctemp = (count_chars*)lt_malloc(sizeof(count_chars));
+ init_count_chars(cctemp);
+
+ switch (cmd->mode) {
+ case MODE_COMPILE:
+ rv = run_command(cmd, cmd->arglist);
+ if (rv) goto finish;
+ break;
+ case MODE_INSTALL:
+ /* Well, we'll assume it's a file going to a directory... */
+ /* For brain-dead install-sh based scripts, we have to repeat
+ * the command N-times. install-sh should die.
+ */
+ if (!cmd->output_name) {
+ rv = run_command(cmd, cmd->arglist);
+ if (rv) goto finish;
+ }
+ if (cmd->output_name) {
+ append_count_chars(cctemp, cmd->arglist);
+ insert_count_chars(cctemp,
+ cmd->output_name,
+ cctemp->num - 1);
+ rv = run_command(cmd, cctemp);
+ if (rv) goto finish;
+ clear_count_chars(cctemp);
+ }
+ if (cmd->static_name.install) {
+ append_count_chars(cctemp, cmd->arglist);
+ insert_count_chars(cctemp,
+ cmd->static_name.install,
+ cctemp->num - 1);
+ rv = run_command(cmd, cctemp);
+ if (rv) goto finish;
+#if defined(__APPLE__) && defined(RANLIB)
+ /* From the Apple libtool(1) manpage on Tiger/10.4:
+ * ----
+ * With the way libraries used to be created, errors were possible
+ * if the library was modified with ar(1) and the table of
+ * contents was not updated by rerunning ranlib(1). Thus the
+ * link editor, ld, warns when the modification date of a library
+ * is more recent than the creation date of its table of
+ * contents. Unfortunately, this means that you get the warning
+ * even if you only copy the library.
+ * ----
+ *
+ * This means that when we install the static archive, we need to
+ * rerun ranlib afterwards.
+ */
+ char const *lib_args[3], *static_lib_name;
+
+ {
+ char *tmp;
+ size_t len1, len2;
+
+ len1 = strlen(cmd->arglist->vals[cmd->arglist->num - 1]);
+
+ static_lib_name = file_name(cmd->static_name.install);
+ len2 = strlen(static_lib_name);
+
+ tmp = lt_malloc(len1 + len2 + 2);
+
+ snprintf(tmp, len1 + len2 + 2, "%s/%s",
+ cmd->arglist->vals[cmd->arglist->num - 1],
+ static_lib_name);
+
+ lib_args[0] = RANLIB;
+ lib_args[1] = tmp;
+ lib_args[2] = NULL;
+
+ external_spawn(cmd, RANLIB, lib_args);
+
+ free(tmp);
+ }
+#endif
+ clear_count_chars(cctemp);
+ }
+ if (cmd->shared_name.install) {
+ append_count_chars(cctemp, cmd->arglist);
+ insert_count_chars(cctemp, cmd->shared_name.install,
+ cctemp->num - 1);
+ rv = run_command(cmd, cctemp);
+ if (rv) goto finish;
+ clear_count_chars(cctemp);
+ }
+ if (cmd->module_name.install) {
+ append_count_chars(cctemp, cmd->arglist);
+ insert_count_chars(cctemp, cmd->module_name.install,
+ cctemp->num - 1);
+ rv = run_command(cmd, cctemp);
+ if (rv) goto finish;
+ clear_count_chars(cctemp);
+ }
+ break;
+ case MODE_LINK:
+ if (cmd->output == OUT_STATIC_LIB_ONLY ||
+ cmd->output == OUT_LIB) {
+#ifdef RANLIB
+ char const *lib_args[3];
+#endif
+ /* Removes compiler! */
+ cmd->program = LIBRARIAN;
+ push_count_chars(cmd->program_opts, LIBRARIAN_OPTS);
+ push_count_chars(cmd->program_opts, cmd->static_name.normal);
+
+ rv = run_command(cmd, cmd->obj_files);
+ if (rv) goto finish;
+
+#ifdef RANLIB
+ lib_args[0] = RANLIB;
+ lib_args[1] = cmd->static_name.normal;
+ lib_args[2] = NULL;
+ external_spawn(cmd, RANLIB, lib_args);
+#endif
+ }
+
+ if (cmd->output == OUT_DYNAMIC_LIB_ONLY ||
+ cmd->output == OUT_MODULE ||
+ cmd->output == OUT_LIB) {
+ cmd->program = NULL;
+ clear_count_chars(cmd->program_opts);
+
+ append_count_chars(cmd->program_opts, cmd->arglist);
+ if (cmd->output == OUT_MODULE) {
+#ifdef MODULE_OPTS
+ push_count_chars(cmd->program_opts, MODULE_OPTS);
+#endif
+ } else {
+#ifdef SHARED_OPTS
+ push_count_chars(cmd->program_opts, SHARED_OPTS);
+#endif
+#ifdef dynamic_link_version_func
+ push_count_chars(cmd->program_opts,
+ dynamic_link_version_func(cmd->version_info));
+#endif
+ }
+ add_dynamic_link_opts(cmd, cmd->program_opts);
+
+ rv = run_command(cmd, cmd->shared_opts.normal);
+ if (rv) goto finish;
+ }
+ if (cmd->output == OUT_PROGRAM) {
+ rv = run_command(cmd, cmd->arglist);
+ if (rv) goto finish;
+ }
+ break;
+ case MODE_EXECUTE:
+ {
+ char *l, libpath[PATH_MAX];
+
+ if (!cmd->arglist->num) {
+ ERROR("No command to execute.\n");
+ rv = 1;
+
+ goto finish;
+ }
+
+ if (strlen(cmd->arglist->vals[0]) >= PATH_MAX) {
+ ERROR("Libpath too long no buffer space\n");
+ rv = 1;
+
+ goto finish;
+ }
+
+ strcpy(libpath, cmd->arglist->vals[0]);
+ add_dotlibs(libpath);
+ l = strrchr(libpath, '/');
+ if (!l) l = strrchr(libpath, '\\');
+ if (l) {
+ *l = '\0';
+ l = libpath;
+ } else {
+ l = ".libs/";
+ }
+
+ l = "./build/lib/.libs";
+ setenv(LD_LIBRARY_PATH_LOCAL, l, 1);
+#ifdef __APPLE__
+ setenv("DYLD_FALLBACK_LIBRARY_PATH", l, 1);
+#endif
+
+ if (!getenv("FR_LIBRARY_PATH")) {
+ setenv("FR_LIBRARY_PATH", "./build/lib/local/.libs", 1);
+ }
+
+ rv = run_command(cmd, cmd->arglist);
+ if (rv) goto finish;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ finish:
+
+ free(cctemp);
+ return rv;
+}
+
+static void cleanup_tmp_dir(char const *dirname)
+{
+ DIR *dir;
+ struct dirent *entry;
+ char fullname[1024];
+
+ dir = opendir(dirname);
+ if (!dir) {
+ return;
+ }
+
+ if ((strlen(dirname) + 1 + sizeof(entry->d_name)) >= sizeof(fullname)) {
+ ERROR("Dirname too long, out of buffer space\n");
+
+ (void) closedir(dir);
+ return;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_name[0] != '.') {
+ strcpy(fullname, dirname);
+ strcat(fullname, "/");
+ strcat(fullname, entry->d_name);
+ (void) remove(fullname);
+ }
+ }
+
+ rmdir(dirname);
+
+ (void) closedir(dir);
+}
+
+static void cleanup_tmp_dirs(command_t *cmd)
+{
+ int d;
+
+ for (d = 0; d < cmd->tmp_dirs->num; d++) {
+ cleanup_tmp_dir(cmd->tmp_dirs->vals[d]);
+ }
+}
+
+static int ensure_fake_uptodate(command_t *cmd)
+{
+ /* FIXME: could do the stat/touch here, but nah... */
+ char const *touch_args[3];
+
+ if (cmd->mode == MODE_INSTALL) {
+ return 0;
+ }
+ if (!cmd->fake_output_name) {
+ return 0;
+ }
+
+ touch_args[0] = "touch";
+ touch_args[1] = cmd->fake_output_name;
+ touch_args[2] = NULL;
+ return external_spawn(cmd, "touch", touch_args);
+}
+
+/* Store the install path in the *.la file */
+static int add_for_runtime(command_t *cmd)
+{
+ if (cmd->mode == MODE_INSTALL) {
+ return 0;
+ }
+ if (cmd->output == OUT_DYNAMIC_LIB_ONLY ||
+ cmd->output == OUT_LIB) {
+ FILE *f=fopen(cmd->fake_output_name,"w");
+ if (f == NULL) {
+ return -1;
+ }
+ fprintf(f,"%s\n", cmd->install_path);
+ fclose(f);
+ return(0);
+ } else {
+ return(ensure_fake_uptodate(cmd));
+ }
+}
+
+static void parse_args(int argc, char *argv[], command_t *cmd)
+{
+ int a;
+ char const *arg, *base;
+ int arg_used;
+
+ /*
+ * We now take a major step past libtool.
+ *
+ * IF there's no "--mode=...", AND we recognise
+ * the binary as a "special" name, THEN replace it
+ * with the correct one, and set the correct mode.
+ *
+ * For example if were called 'CC' then we know we should
+ * probably be compiling stuff.
+ */
+ base = file_name(argv[0]);
+ arg = automode(base, cmd);
+ if (arg != base) {
+ push_count_chars(cmd->arglist, arg);
+
+ assert(cmd->mode != MODE_UNKNOWN);
+ }
+
+ /*
+ * We first pass over the command-line arguments looking for
+ * "--mode", etc. If so, then use the libtool compatibility
+ * method for building the software. Otherwise, auto-detect it
+ * via "-o" and the extensions.
+ */
+ base = NULL;
+ if (cmd->mode == MODE_UNKNOWN) for (a = 1; a < argc; a++) {
+ arg = argv[a];
+
+ if (strncmp(arg, "--mode=", 7) == 0) {
+ base = NULL;
+ break;
+ }
+
+ /*
+ * Stop if we get another magic method
+ */
+ if ((a == 1) &&
+ ((strncmp(arg, "LINK", 4) == 0) ||
+ (strcmp(arg, "CC") == 0) ||
+ (strcmp(arg, "CXX") == 0))) {
+ base = NULL;
+ break;
+ }
+
+ if (strncmp(arg, "-o", 2) == 0) {
+ base = argv[++a];
+ }
+ }
+
+ /*
+ * There were no magic args or an explicit --mode= but we did
+ * find an output file, so guess what mode were meant to be in
+ * from its extension.
+ */
+ if (base) {
+ arg = strrchr(base, '.');
+ if (!arg) {
+ cmd->mode = MODE_LINK;
+ push_count_chars(cmd->arglist, LINK_C);
+ }
+#ifdef EXE_EXT
+ else if (strcmp(arg, EXE_EXT) == 0) {
+ cmd->mode = MODE_LINK;
+ push_count_chars(cmd->arglist, LINK_C);
+ }
+#endif
+ else if (strcmp(arg + 1, DYNAMIC_LIB_EXT) == 0) {
+ cmd->mode = MODE_LINK;
+ push_count_chars(cmd->arglist, LINK_C);
+ }
+ else if (strcmp(arg + 1, STATIC_LIB_EXT) == 0) {
+ cmd->mode = MODE_LINK;
+ push_count_chars(cmd->arglist, LINK_C);
+ }
+ else if (strcmp(arg + 1, "la") == 0) {
+ cmd->mode = MODE_LINK;
+ push_count_chars(cmd->arglist, LINK_C);
+ }
+ else if ((strcmp(arg + 1, "lo") == 0) ||
+ (strcmp(arg + 1, "o") == 0)) {
+ cmd->mode = MODE_COMPILE;
+ push_count_chars(cmd->arglist, CC);
+ }
+ }
+
+ for (a = 1; a < argc; a++) {
+ arg = argv[a];
+ arg_used = 1;
+
+ if (cmd->mode == MODE_EXECUTE) {
+ push_count_chars(cmd->arglist, arg);
+ continue;
+ }
+
+ if (arg[0] == '-') {
+ /*
+ * Double dashed (long) single dash (short)
+ */
+ arg_used = (arg[1] == '-') ?
+ parse_long_opt(arg + 2, cmd) :
+ parse_short_opt(arg + 1, cmd);
+
+ if (arg_used) continue;
+
+ /*
+ * Ignore all options after the '--execute'
+ */
+ if (cmd->mode == MODE_EXECUTE) continue;
+
+ /*
+ * We haven't done anything with it yet, but
+ * there are still some arg/value pairs.
+ *
+ * Try some of the more complicated short opts...
+ */
+ if (a + 1 < argc) {
+ /*
+ * We found an output file!
+ */
+ if ((arg[1] == 'o') && (arg[2] == '\0')) {
+ arg = argv[++a];
+ arg_used = parse_output_file_name(arg,
+ cmd);
+ /*
+ * -MT literal dependency
+ */
+ } else if (!strcmp(arg + 1, "MT")) {
+ DEBUG("Adding: %s\n", arg);
+
+ push_count_chars(cmd->arglist, arg);
+ arg = argv[++a];
+
+ NOTICE(" %s\n", arg);
+
+ push_count_chars(cmd->arglist, arg);
+ arg_used = 1;
+ /*
+ * Runtime library search path
+ */
+ } else if (!strcmp(arg + 1, "rpath")) {
+ /* Aha, we should try to link both! */
+ cmd->install_path = argv[++a];
+ arg_used = 1;
+
+ } else if (!strcmp(arg + 1, "release")) {
+ /* Store for later deciphering */
+ cmd->version_info = argv[++a];
+ arg_used = 1;
+
+ } else if (!strcmp(arg + 1, "version-info")) {
+ /* Store for later deciphering */
+ cmd->version_info = argv[++a];
+ arg_used = 1;
+
+ } else if (!strcmp(arg + 1,
+ "export-symbols-regex")) {
+ /* Skip the argument. */
+ ++a;
+ arg_used = 1;
+
+ } else if (!strcmp(arg + 1, "undefined")) {
+ cmd->undefined_flag = argv[++a];
+ arg_used = 1;
+ /*
+ * Add dir to runtime library search path.
+ */
+ } else if ((arg[1] == 'R') && !arg[2]) {
+
+ add_runtime_dir_lib(argv[++a], cmd);
+ arg_used = 1;
+ }
+ }
+ /*
+ * Ok.. the argument doesn't begin with a dash
+ * maybe it's an input file.
+ *
+ * Check its extension to see if it's a known input
+ * file and verify it exists.
+ */
+ } else {
+ arg_used = parse_input_file_name(arg, cmd);
+ }
+
+ /*
+ * If we still don't have a run mode, look for a magic
+ * program name CC, LINK, or whatever. Then replace that
+ * with the name of the real program we want to run.
+ */
+ if (!arg_used) {
+ if ((cmd->arglist->num == 0) &&
+ (cmd->mode == MODE_UNKNOWN)) {
+ arg = automode(arg, cmd);
+ }
+
+ DEBUG("Adding: %s\n", arg);
+
+ push_count_chars(cmd->arglist, arg);
+ }
+ }
+
+}
+
+int main(int argc, char *argv[])
+{
+ int rc;
+ command_t cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.options.pic_mode = PIC_UNKNOWN;
+ cmd.mode = MODE_UNKNOWN;
+ cmd.output = OUT_GENERAL;
+
+ /*
+ * Initialise the various argument lists
+ */
+ cmd.program_opts = alloc_countchars();
+ cmd.arglist = alloc_countchars();
+ cmd.tmp_dirs = alloc_countchars();
+ cmd.obj_files = alloc_countchars();
+ cmd.dep_rpaths = alloc_countchars();
+ cmd.rpaths = alloc_countchars();
+ cmd.static_opts.normal = alloc_countchars();
+ cmd.shared_opts.normal = alloc_countchars();
+ cmd.shared_opts.dependencies = alloc_countchars();
+
+ /*
+ * Fill up the various argument lists
+ */
+ parse_args(argc, argv, &cmd);
+ post_parse_fixup(&cmd);
+
+ /*
+ * We couldn't figure out which mode to operate in
+ */
+ if (cmd.mode == MODE_UNKNOWN) {
+ usage(1);
+ }
+
+ rc = run_mode(&cmd);
+ if (!rc) {
+ add_for_runtime(&cmd);
+ }
+
+ cleanup_tmp_dirs(&cmd);
+
+ return rc;
+}
diff --git a/scripts/ldap/radiusd2ldif.pl b/scripts/ldap/radiusd2ldif.pl
new file mode 100755
index 0000000..4dbd04f
--- /dev/null
+++ b/scripts/ldap/radiusd2ldif.pl
@@ -0,0 +1,307 @@
+#!/usr/bin/perl
+
+# radius2ldif.pl
+#
+# To test this program, do the following
+#Take a radius users' file, for example with:
+#
+#myuser Password = "apassword"
+# User-Service = Framed-User,
+# Framed-Protocol = PPP,
+# Framed-Address = 255.255.255.255,
+# Framed-Netmask = 255.255.255.255,
+# Ascend-Metric = 2,
+# Framed-Routing = None,
+# Framed-Compression = 0,
+# Ascend-Idle-Limit = 0,
+# Ascend-Maximum-Time = 36000
+#
+#and do:
+#
+#cat users | ./radius2ldif
+#
+#Output is:
+#dn: cn=myuser, ou=Hardware, ou=EDUCAMADRID, ou=People, o=icm.es
+#objectclass: top
+#objectclass: person
+#objectclass: radiusprofile
+#cn: myuser
+#sn: myuser
+#userpassword: apassword
+#radiusServiceType: Framed-User
+#radiusFramedProtocol: PPP
+#radiusFramedIPAddress: 255.255.255.255
+#radiusFramedIPNetmask: 255.255.255.255
+#radiusFramedRouting: None
+#radiusFramedCompression: 0
+#
+#dn: ou=RadiusUser, ou=Groups, o=icm.es
+#description: RadiusUser
+#objectclass: top
+#objectclass: groupOfUniqueNames
+#cn: RadiusUser
+#uniquemember: dn: cn=myuser, ou=Hardware, ou=EDUCAMADRID, ou=People, o=icm.es
+#
+# (c) 2000 Javier Fern'andez-Sanguino Pen~a <jfs@computer.org>
+# -------------------------------------------------------------------------
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+# -----------------------------------------------------------------------
+
+
+# TODO:
+# currently does not encrypt passwords (takes them from outside file)
+
+# Command line options
+# -d : debugging output
+# -p : give only password
+# -m : set entry to modify ldap attributes
+# -f : read encrypted passwords from file
+use Getopt::Std;
+getopts('dpmf:');
+$debug = $opt_d;
+
+%passwords;
+# This might or might not be necessary depending if your LDAP server
+# when importing from ldif introduces crypted passwords in the LDAP db
+# (not necessary for Netscape's Directory Server)
+read_passwds ($opt_f) if $opt_f;
+
+# USER CONFIGURATION
+# ------------------
+$usermatch = ".*"; # only add users matching this
+# WARNING: in order to add *all* users set this to ".*" NOT ""
+
+# LDAP configuration
+$domain = "o=icm.es";
+$basedn = ", ou=Hardware, ou=EDUCAMADRID, ou=People, $domain";
+$predn = "dn: cn=";
+$uniquemembers = 1;
+$groupname = "RadiusUser"; # group to add in the LDAP, if null will not add
+$group = "\n\ndn: ou=$groupname, ou=Groups, $domain";
+# Only useful for adding the group (not yet implemented)
+$addgroup = $group."\ndescription: $groupname\nobjectclass: top";
+if ( $uniquemembers ) {
+$addgroup = $addgroup."\nobjectclass: groupOfUniqueNames";
+} else {
+$addgroup = $addgroup."\nobjectclass: groupOfNames";
+}
+$addgroup = $addgroup."\ncn: $groupname";
+# The following group must be created first
+# (ldif entry), the script will *not* create it
+#cn=$group,ou=Groups,o=icm.es
+#description=whatever
+#objectclass=top
+#objectclass=groupOfUniqueNames
+# (or objectclass=groupOfNames)
+#cn=$group
+# Required: person (for userpasswords) and radiusprofile (<draft-aboba-radius-02.txt> 5 February 1998)
+@objectClass = ( "top", "person" , "radiusprofile" );
+
+
+# Mapping of entries (use lower case so no check needs to be make)
+# From freeradius: rlm_ldap.c
+# { "radiusServiceType", "Service-Type" },
+# { "radiusFramedProtocol", "Framed-Protocol" },
+# { "radiusFramedIPAddress", "Framed-IP-Address" },
+# { "radiusFramedIPNetmask", "Framed-IP-Netmask" },
+# { "radiusFramedRoute", "Framed-Route" },
+# { "radiusFramedRouting", "Framed-Routing" },
+# { "radiusFilterId", "Filter-Id" },
+# { "radiusFramedMTU", "Framed-MTU" },
+# { "radiusFramedCompression", "Framed-Compression" },
+# { "radiusLoginIPHost", "Login-IP-Host" },
+# { "radiusLoginService", "Login-Service" },
+# { "radiusLoginTCPPort", "Login-TCP-Port" },
+# { "radiusCallbackNumber", "Callback-Number" },
+# { "radiusCallbackId", "Callback-Id" },
+# { "radiusFramedRoute", "Framed-Route" },
+# { "radiusFramedIPXNetwork", "Framed-IPX-Network" },
+# { "radiusClass", "Class" },
+# { "radiusSessionTimeout", "Session-Timeout" },
+# { "radiusIdleTimeout", "Idle-Timeout" },
+# { "radiusTerminationAction", "Termination-Action" },
+# { "radiusCalledStationId", "Called-Station-Id" },
+# { "radiusCallingStationId", "Calling-Station-Id" },
+# { "radiusLoginLATService", "Login-LAT-Service" },
+# { "radiusLoginLATNode", "Login-LAT-Node" },
+# { "radiusLoginLATGroup", "Login-LAT-Group" },
+# { "radiusFramedAppleTalkLink", "Framed-AppleTalk-Link" },
+# { "radiusFramedAppleTalkNetwork", "Framed-AppleTalk-Network" },
+# { "radiusFramedAppleTalkZone", "Framed-AppleTalk-Zone" },
+# { "radiusPortLimit", "Port-Limit" },
+# { "radiusLoginLATPort", "Login-LAT-Port" },
+# You can change to the mappings below like this
+# cat radius2ldif.pl | grep ^# | \
+# perl -ne 'if ( /\{ \"(.*?)\", \"(.*?)\" \}/ ) \
+# { $attr=lc $2; print "\$mapping{\"$attr\"} = \"$1\";\n" ; } '
+
+
+# Warning: sometimes password must be encrypted before sent to the LDAP
+# Which Perl libraries are available? Only way I find is through
+# Netscape's NDS getpwenc.
+# However NDS does the cyphering even if sending plain passwords
+# (do all LDAP's do this?)
+# TODO: test with OpenLDAP
+$mapping{'password'} = "userpassword";
+$mapping{'service-type'} = "radiusServiceType";
+$mapping{'framed-protocol'} = "radiusFramedProtocol";
+$mapping{'framed-ip-address'} = "radiusFramedIPAddress";
+$mapping{'framed-ip-netmask'} = "radiusFramedIPNetmask";
+$mapping{'framed-route'} = "radiusFramedRoute";
+$mapping{'framed-routing'} = "radiusFramedRouting";
+$mapping{'filter-id'} = "radiusFilterId";
+$mapping{'framed-mtu'} = "radiusFramedMTU";
+$mapping{'framed-compression'} = "radiusFramedCompression";
+$mapping{'login-ip-host'} = "radiusLoginIPHost";
+$mapping{'login-service'} = "radiusLoginService";
+$mapping{'login-tcp-port'} = "radiusLoginTCPPort";
+$mapping{'callback-number'} = "radiusCallbackNumber";
+$mapping{'callback-id'} = "radiusCallbackId";
+$mapping{'framed-ipx-network'} = "radiusFramedIPXNetwork";
+$mapping{'class'} = "radiusClass";
+$mapping{'session-timeout'} = "radiusSessionTimeout";
+$mapping{'idle-timeout'} = "radiusIdleTimeout";
+$mapping{'termination-action'} = "radiusTerminationAction";
+$mapping{'called-station-id'} = "radiusCalledStationId";
+$mapping{'calling-station-id'} = "radiusCallingStationId";
+$mapping{'login-lat-service'} = "radiusLoginLATService";
+$mapping{'login-lat-node'} = "radiusLoginLATNode";
+$mapping{'login-lat-group'} = "radiusLoginLATGroup";
+$mapping{'framed-appletalk-link'} = "radiusFramedAppleTalkLink";
+$mapping{'framed-appletalk-network'} = "radiusFramedAppleTalkNetwork";
+$mapping{'framed-appletalk-zone'} = "radiusFramedAppleTalkZone";
+$mapping{'port-limit'} = "radiusPortLimit";
+$mapping{'login-lat-port'} = "radiusLoginLATPort";
+
+# Must be added to rlm_ldap.c (change this to suite your needs)
+# (really not all since they are in the /etc/raddb/dictionary.compat)
+$mapping{'framed-address'} = "radiusFramedIPAddress";
+$mapping{'framed-ip-route'} = "radiusFramedRoute";
+$mapping{'framed-netmask'} = "radiusFramedIPNetmask";
+$mapping{'user-service'} = "radiusServiceType";
+# Since this might not change they could be placed in the DEFAULT
+# user insted of the LDAP
+#$mapping{'ascend-metric'} = "radiusAscendMetric";
+#$mapping{'ascend-idle-limit'} = "radiusAscendIdleLimit";
+# But this really ought to be there :
+$mapping{'callback_number'} = "radiusCallbackNumber";
+
+
+# Footer of ldif entries
+$footer = "\n\n";
+$startentry = 0;
+
+while ($line=<STDIN>) {
+ chomp $line;
+ if ( $line =~ /^[\s\t]*$/ && $startentry) {
+ $startentry = 0 ;
+ print $footer;
+ }
+ # Start line is hardcoded must be uid followed by password
+ # this could be changed to use any other parameter however
+ if ( $line =~ /^(\w+)\s*\t*(?:User-)?Password=(\w+)/ ) {
+ $uid = $1;
+ $password= $2;
+ $password = $passwords{$password} if $opt_f;
+ if ( $uid =~ /$usermatch/ ) {
+ $startentry = 1;
+ $dn=$predn.$uid.$basedn; # Start of LDIF entry
+ $header = "$dn\n";
+ push @userlist, $dn;
+ if ( $opt_m ) {
+ $header= $header."changetype: modify\n";
+ } else {
+ for (my $i=0; $i < $#objectClass+1; $i++) {
+ $header = $header."objectclass: ".$objectClass[$i]."\n";
+ }
+ }
+ print $header if !$opt_m;
+ print_entry ("cn",$uid);
+ print_entry ("sn",$uid);
+ # The following might be necessary (depending on the groups)
+ # of the object
+ #print "replace: uid\n" if $opt_m;
+ #print "uid: $uid\n";
+ #print "replace: givenname\n" if $opt_m;
+ #print "givenname: $uid\n";
+ print_entry ($mapping{'password'},$password);
+ }
+ }
+ # Do this only for entries detected
+ if ( $startentry && ! $opt_p ) {
+ #Take anything that starts with a tab or spaces
+ # and ends (sometimes) with a comma
+ if ( $line =~ /^[\t\s]+(.*?)\s+=\s+(.*?),*$/ ) {
+ $parameter = lc $1;
+ $value = $2;
+ print "DEBUG: Got :$parameter=$value\n" if $debug;
+ if ( defined $mapping{$parameter} && $mapping{$parameter} ne "" ) {
+ print_entry ($mapping{$parameter},$value);
+ } # of if defined mapping
+ else {
+ print "DEBUG: Parameter $parameter not known\n" if $debug;
+ }
+ } # of if line
+ } # of if startentry
+
+} # of while
+
+
+# The list of users in the group
+if ( $group ) {
+ if ( ! $opt_m ) {
+ print "$addgroup\n";
+ }
+ else {
+ print "\n\n$group\n";
+ print "changetype: modify\n" ;
+ }
+ foreach $user ( @userlist ) {
+ $member = "member: ";
+ $member = "uniquemember: " if $uniquemembers;
+ print "$member$user\n";
+ }
+}
+
+exit 0;
+
+sub read_passwds {
+# Reads passwords from a file in order to get the crypted
+# version, the file must be of the following format:
+# password cryptedversion
+ my ($file)=@_;
+ open (PASSWD,"< $file") or die ("Could not open $file: $!\n");
+
+ while ($line = <PASSWD>) {
+ chomp $line;
+ if ( $line =~ /^(\w+)[\t\s]+(.*?)$/ ) {
+ $passwords{$1}=$2;
+ }
+ }
+ close PASSWD;
+
+ return 0;
+}
+
+sub print_entry {
+# Prints and ldif entry given name and value
+# if this is a modification it will print header and footer
+ my ($name, $value) = @_;
+ print $header."replace: $name\n" if $opt_m;
+ print $name.": ".$value."\n";
+ print $footer if $opt_m;
+ return 0;
+}
+
diff --git a/scripts/ldap/schema_to_samba.py b/scripts/ldap/schema_to_samba.py
new file mode 100644
index 0000000..637ae52
--- /dev/null
+++ b/scripts/ldap/schema_to_samba.py
@@ -0,0 +1,132 @@
+# This is a quick hack to convert an openldap schema file to a form which
+# can be loaded into Samba4/AD.
+#
+# Inspired by:
+# http://david-latham.blogspot.co.uk/2012/12/extending-ad-schema-on-samba4-part-2.html
+# https://github.com/linuxplayground/yubikey-ldap/tree/master/samba4-schema
+#
+# (c) 2017 Brian Candler <b.candler@pobox.com>
+# -------------------------------------------------------------------------
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+# -----------------------------------------------------------------------
+
+from __future__ import print_function
+import sys
+import re
+from collections import OrderedDict
+
+BASEDN = 'dc=samba4,dc=internal'
+
+# RFC 2252 to https://technet.microsoft.com/en-us/library/cc961740.aspx
+SYNTAX_MAP = {
+ '1.3.6.1.4.1.1466.115.121.1.7': ('2.5.5.8', 1), # boolean
+ '1.3.6.1.4.1.1466.115.121.1.12': ('2.5.5.1', 127), # DN
+ '1.3.6.1.4.1.1466.115.121.1.15': ('2.5.5.3', 27), # DirectoryString
+ '1.3.6.1.4.1.1466.115.121.1.26': ('2.5.5.5', 22), # IA5String
+ '1.3.6.1.4.1.1466.115.121.1.27': ('2.5.5.9', 10), # Integer
+}
+obj = None
+for line in sys.stdin:
+ if re.match(r'^\s*(#|$)', line): continue
+ m = re.match(r'^attributetype\s+\(\s+(\S+)', line)
+ if m:
+ obj = OrderedDict([
+ ('objectClass', ['top', 'attributeSchema']),
+ ('attributeID', m.group(1)),
+ ('isSingleValued', 'FALSE'),
+ ])
+ continue
+ m = re.match(r'^objectclass\s+\(\s+(\S+)', line)
+ if m:
+ obj = OrderedDict([
+ ('objectClass', ['top', 'classSchema']),
+ ('governsID', m.group(1)),
+ ])
+ continue
+ m = re.match(r'^\s*NAME\s+[\'"](.+)[\'"]', line)
+ if m:
+ obj.update([
+ ('cn', m.group(1)),
+ ('name', m.group(1)),
+ ('lDAPDisplayName', m.group(1)),
+ ])
+ continue
+ m = re.match(r'^\s*DESC\s+[\'"](.+)[\'"]', line)
+ if m:
+ obj.update([
+ ('description', m.group(1)),
+ ])
+ continue
+ m = re.match(r'^\s*(EQUALITY|SUBSTR)\s+(\S+)', line)
+ if m:
+ # Not supported by AD?
+ # https://technet.microsoft.com/en-us/library/cc961575.aspx
+ continue
+ m = re.match(r'^\s*SYNTAX\s+(\S+)', line)
+ if m:
+ obj.update([
+ ('attributeSyntax', SYNTAX_MAP[m.group(1)][0]),
+ ('oMSyntax', SYNTAX_MAP[m.group(1)][1]),
+ ])
+ continue
+ if re.match(r'^\s*SINGLE-VALUE', line):
+ obj.update([
+ ('isSingleValued', 'TRUE'),
+ ])
+ continue
+ if re.match(r'^\s*AUXILIARY', line):
+ # https://msdn.microsoft.com/en-us/library/ms679014(v=vs.85).aspx
+ # https://technet.microsoft.com/en-us/library/2008.05.schema.aspx
+ obj.update([
+ ('objectClassCategory', '3'),
+ ])
+ continue
+ if re.match(r'^\s*STRUCTURAL', line):
+ obj.update([
+ ('objectClassCategory', '1'),
+ ])
+ continue
+ m = re.match(r'^\s*SUP\s+(\S+)', line)
+ if m:
+ obj.update([
+ ('subClassOf', m.group(1)),
+ ])
+ continue
+ m = re.match(r'^\s*(MAY|MUST)\s+\((.*)\)\s*$', line)
+ if m:
+ attrs = m.group(2).split('$')
+ obj.update([
+ ('%sContain' % m.group(1).lower(), [v.strip() for v in attrs]),
+ ])
+ continue
+ m = re.match(r'^\s*(MAY|MUST)\s+(\w+)\s*$', line)
+ if m:
+ obj.update([
+ ('%sContain' % m.group(1).lower(), m.group(2)),
+ ])
+ continue
+ if re.match(r'^\s*\)', line) and obj:
+ print("dn: CN=%s,CN=Schema,CN=Configuration,%s" % (obj['cn'], BASEDN))
+ print("changetype: add")
+ for k in obj:
+ if type(obj[k]) == list:
+ for v in obj[k]:
+ print("%s: %s" % (k, v))
+ else:
+ print("%s: %s" % (k, obj[k]))
+ print()
+ obj = None
+ continue
+ print("??? %s" % line, file=sys.stderr)
diff --git a/scripts/libtool.mk b/scripts/libtool.mk
new file mode 100644
index 0000000..381127e
--- /dev/null
+++ b/scripts/libtool.mk
@@ -0,0 +1,243 @@
+# Copyright 2008, 2009, 2010 Dan Moulding, Alan T. DeKok
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# You can watch what it's doing by:
+#
+# $ VERBOSE=1 make ... args ...
+#
+ifeq "${VERBOSE}" ""
+ Q=@
+else
+ Q=
+endif
+
+# Add these rules only when LIBTOOL is being used.
+ifneq "${LIBTOOL}" ""
+
+ # clang on OSX sometimes doesn't know where things are. <sigh>
+ ifeq "$(findstring darwin,$(HOSTINFO))" "darwin"
+ JLIBTOOL_DEFS += -L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib
+ endif
+
+# JLIBTOOL - check if we're using the local (fast) jlibtool, rather
+# than the GNU (slow) libtool shell script. If so, add rules
+# to build it.
+
+ifeq "${LIBTOOL}" "JLIBTOOL"
+ JLIBTOOL := ${BUILD_DIR}/make/jlibtool
+
+ # Add a rule to build jlibtool BEFORE any other targets. This
+ # means that we can use it to build the later targets.
+ all install: ${JLIBTOOL}
+
+ # Note that we need to use a compilation rule that does NOT
+ # include referencing ${LIBTOOL}, as we don't have a jlibtool
+ # binary!
+ ${JLIBTOOL}: ${top_makedir}/jlibtool.c
+ $(Q)mkdir -p $(dir $@)
+ $(Q)echo CC jlibtool.c
+ $(Q)${CC} $< -o $@ ${JLIBTOOL_DEFS}
+
+ clean: jlibtool_clean
+
+ .PHONY: jlibtool_clean
+ jlibtool_clean:
+ $(Q)rm -f ${JLIBTOOL}
+
+ # Tell GNU Make to use this value, rather than anything specified
+ # on the command line.
+ override LIBTOOL := ${JLIBTOOL}
+endif # else we're not using jlibtool
+
+# When using libtool, it produces a '.libs' directory. Ensure that it
+# is removed on "make clean", too.
+#
+clean: .libs_clean
+
+.PHONY: .libs_clean
+.libs_clean:
+ $(Q)rm -rf ${BUILD_DIR}/.libs/
+
+# Re-define compilers and linkers
+#
+OBJ_EXT = lo
+COMPILE.c = ${LIBTOOL} --silent --mode=compile ${CC}
+COMPILE.cxx = ${LIBTOOL} --mode=compile ${CXX}
+LINK.c = ${LIBTOOL} --silent --mode=link ${CC}
+LINK.cxx = ${LIBTOOL} --mode=link ${CXX}
+PROGRAM_INSTALL = ${LIBTOOL} --silent --mode=install ${INSTALL}
+
+
+# LIBTOOL_ENDINGS - Given a library ending in ".a" or ".so", replace that
+# extension with ".la".
+#
+define LIBTOOL_ENDINGS
+$(patsubst %.a,%.la,$(patsubst %.so,%.la,${1}))
+endef
+
+# ADD_TARGET_RULE.la - Build a ".la" target.
+#
+# USE WITH EVAL
+#
+define ADD_TARGET_RULE.la
+ # So "make ${1}" works
+ .PHONY: ${1}
+ ${1}: $${${1}_BUILD}/${1}
+
+ # Create libtool library ${1}
+ $${${1}_BUILD}/${1}: $${${1}_OBJS} $${${1}_PRLIBS}
+ $(Q)$(strip mkdir -p $(dir $${${1}_BUILD}/${1}))
+ @$(ECHO) LINK $${${1}_BUILD}/${1}
+ $(Q)$${${1}_LINKER} -o $${${1}_BUILD}/${1} $${RPATH_FLAGS} $${LDFLAGS} \
+ $${${1}_LDFLAGS} $${${1}_OBJS} $${LDLIBS} $${${1}_LDLIBS} \
+ $${${1}_PRLIBS}
+ $(Q)$${${1}_POSTMAKE}
+
+ ifneq "${ANALYZE.c}" ""
+ scan.${1}: $${${1}_PLISTS}
+ endif
+endef
+
+# ADD_LOCAL_RULE.exe - Parametric "function" that adds a rule to build
+# a local version of the target.
+#
+# USE WITH EVAL
+#
+define ADD_LOCAL_RULE.exe
+ ${1}: $${${1}_BUILD}/$${LOCAL}${1}
+
+ # used to fix up RPATH for ${1} on install.
+ $${${1}_BUILD}/$${${1}_LOCAL}: $${${1}_OBJS} $${${1}_PRBIN} $${${1}_LOCAL_PRLIBS}
+ $(Q)$(strip mkdir -p $${${1}_BUILD}/${LOCAL}/)
+ $(Q)$${${1}_LINKER} -o $${${1}_BUILD}/$${LOCAL}${1} $${LOCAL_FLAGS} $${LDFLAGS} \
+ $${${1}_LDFLAGS} $${${1}_OBJS} $${${1}_LOCAL_PRLIBS} \
+ $${LDLIBS} $${${1}_LDLIBS}
+ $(Q)$${${1}_POSTMAKE}
+endef
+
+# ADD_LOCAL_RULE.la - Parametric "function" that adds a rule to build
+# a local version of the target.
+#
+# USE WITH EVAL
+#
+define ADD_LOCAL_RULE.la
+ ${1}: $${${1}_BUILD}/$${LOCAL}${1}
+
+ # used to fix up RPATH for ${1} on install.
+ $${${1}_BUILD}/$${${1}_LOCAL}: $${${1}_OBJS} $${${1}_LOCAL_PRLIBS}
+ $(Q)$(strip mkdir -p $${${1}_BUILD}/${LOCAL}/)
+ $(Q)$${${1}_LINKER} -o $${${1}_BUILD}/$${LOCAL}${1} $${LOCAL_FLAGS} $${LDFLAGS} \
+ $${${1}_LDFLAGS} $${${1}_OBJS} $${LDLIBS} $${${1}_LDLIBS} \
+ $${${1}_LOCAL_PRLIBS}
+ $(Q)$${${1}_POSTMAKE}
+
+endef
+
+# By default, if libdir is defined, we build shared libraries.
+# However, we can disable shared libraries if explicitly told to.
+ifneq "${libdir}" ""
+ ifneq "${bm_shared_libs}" "no"
+ bm_shared_libs := yes
+ endif
+endif
+
+# Default to building static libraries, too.
+ifneq "${bm_static_libs}" "no"
+ bm_static_libs := yes
+endif
+
+# Check if we build shared libraries.
+ifeq "${bm_shared_libs}" "yes"
+ LOCAL := local/
+
+ # RPATH : flags use to build executables that are installed,
+ # with no dependency on the source.
+ # RELINL : flags use to build executables that can be run
+ # from the build directory / source tree.
+ RPATH_FLAGS := -rpath ${libdir}
+ LOCAL_FLAGS := -rpath $(subst //,/,$(abspath ${BUILD_DIR})/lib/${LOCAL}/.libs)
+
+ LOCAL_FLAGS_MIN := -rpath ${libdir}
+
+ ifneq "${bm_static_libs}" "yes"
+ RPATH_FLAGS += --shared
+ LOCAL_FLAGS += --shared
+ endif
+else
+ ifneq "${bm_static_libs}" "yes"
+ $(error Building without static libraries requires you to set 'INSTALL' or 'libdir')
+ endif
+
+ RPATH_FLAGS := -static
+endif
+
+# UPDATE_TARGET_ENDINGS - Function to turn target into a libtool target
+# e.g. "libfoo.a" -> libfoo.la"
+#
+# If the target is an executable, then its extension doesn't change
+# when we use libtool, and we don't do any re-writing.
+#
+# USE WITH EVAL
+#
+define ADD_LIBTOOL_SUFFIX
+ ifneq "$$(call LIBTOOL_ENDINGS,$${TGT})" "$${TGT}"
+ TGT_NOLIBTOOL := $${TGT}
+ TGT := $$(call LIBTOOL_ENDINGS,$${TGT})
+ $${TGT}_NOLIBTOOL := $${TGT_NOLIBTOOL}
+ endif
+
+ ifneq "$${LOCAL_FLAGS}" ""
+ $${TGT}_LOCAL := ${LOCAL}$${TGT}
+ endif
+
+ # re-write all of the dependencies to have the libtool endings.
+ TGT_PREREQS := $$(call LIBTOOL_ENDINGS,$${TGT_PREREQS})
+endef
+
+# ADD_LIBTOOL_TARGET - Function to ensure that the object files depend
+# on our jlibtool target. This ensures that jlibtool is built before
+# it's used to build the object files.
+#
+# USE WITH EVAL
+#
+define ADD_LIBTOOL_TARGET
+ ifneq "${JLIBTOOL}" ""
+ $${$${TGT}_OBJS}: $${JLIBTOOL}
+ endif
+
+ ifneq "$${$${TGT}_NOLIBTOOL}" ""
+ $$(notdir $${$${TGT}_NOLIBTOOL}): $${TGT}
+ endif
+
+ # If we need to relink, add the relink targets now.
+ ifneq "$${$${TGT}_LOCAL}" ""
+ # add rules to relink the target
+
+ $${TGT}_LOCAL_PRLIBS := $$(subst $${BUILD_DIR}/lib/,$${BUILD_DIR}/lib/${LOCAL},$${$${TGT}_PRLIBS})
+
+ $$(eval $$(call ADD_LOCAL_RULE$${$${TGT}_SUFFIX},$${TGT}))
+
+ $$(eval $$(call ADD_CLEAN_RULE,$${$${TGT}_LOCAL}_libtool))
+
+ ifneq "$${$${TGT}_NOLIBTOOL}" ""
+ $$(eval $$(call ADD_CLEAN_RULE,$${$${TGT}_NOLIBTOOL}_libtool))
+ endif
+ endif
+
+endef
+
+
+endif
diff --git a/scripts/logrotate/freeradius b/scripts/logrotate/freeradius
new file mode 100644
index 0000000..eecf631
--- /dev/null
+++ b/scripts/logrotate/freeradius
@@ -0,0 +1,59 @@
+#
+# Sample logrotate file for FreeRADIUS
+#
+# You can use this to rotate the /var/log/radius/* files, simply copy it to /etc/logrotate.d/radiusd
+#
+
+#
+# The main server log
+#
+/var/log/radius/radius.log {
+ # common options
+ daily
+ rotate 14
+ missingok
+ compress
+ delaycompress
+ notifempty
+
+ copytruncate
+ su radiusd radiusd
+}
+
+# (in order)
+# Session monitoring utilities
+# SQL log files
+/var/log/freeradius/checkrad.log /var/log/freeradius/radwatch.log
+/var/log/freeradius/sqllog.sql
+{
+ # common options
+ daily
+ rotate 14
+ missingok
+ compress
+ delaycompress
+ notifempty
+
+ nocreate
+ su radiusd radiusd
+}
+
+# There are different detail-rotating strategies you can use. One is
+# to write to a single detail file per IP and use the rotate config
+# below. Another is to write to a daily detail file per IP with:
+# detailfile = ${radacctdir}/%{Client-IP-Address}/%Y%m%d-detail
+# (or similar) in radiusd.conf, without rotation. If you go with the
+# second technique, you will need another cron job that removes old
+# detail files. You do not need to comment out the below for method #2.
+/var/log/radius/radacct/*/detail {
+ # common options
+ daily
+ rotate 14
+ missingok
+ compress
+ delaycompress
+ notifempty
+
+ nocreate
+ su radiusd radiusd
+}
diff --git a/scripts/min-includes.pl b/scripts/min-includes.pl
new file mode 100755
index 0000000..37044ed
--- /dev/null
+++ b/scripts/min-includes.pl
@@ -0,0 +1,238 @@
+#!/usr/bin/env perl
+######################################################################
+#
+# This script find duplicates of #include files, ignoring #ifdef's, etc.
+# from C source files, and (at your command) removes the duplicates.
+#
+# It is meant to be run ONLY by FreeRADUS developers, and has nothing
+# whatsoever to do with RADIUS, FreeRADIUS, or configuring a RADIUS server.
+#
+######################################################################
+#
+# Run as: ./min-includes.pl `find . -name "*.c" -print`
+# prints out duplicate includes from files.
+#
+# ./min-includes.pl +n `find . -name "*.c" -print`
+# removes the duplicate includes from each file.
+# Remember to check that it still builds!
+#
+# It has to be run from the TOP of the FreeRADIUS build tree,
+# i.e. where the top-level "configure" script is located.
+#
+######################################################################
+#
+# FIXME: We don't handle include files taken from the current
+# directory...
+#
+# FIXME: we should take -I <path> from the command line.
+#
+######################################################################
+#
+# Copyright (C) 2006 Alan DeKok <aland@freeradius.org>
+#
+# $Id$
+#
+######################################################################
+
+my %processed;
+
+$any_dups = 0;
+$debug = 0;
+
+#
+# Find the #include's for one file.
+#
+sub process($) {
+ my $file = shift;
+
+ return if ($processed{$file});
+
+ $processed{$file}++;
+
+ open FILE, "<$file" or die "Failed to open $file: $!\n";
+
+ $line = 0;
+ while (<FILE>) {
+ $line++;
+
+ next if (!/^\s*\#\s*include\s+/);
+
+ if (/^\s*\#\s*include\s+"(.+?)"/) {
+ $refs{$file}{$1} = $line;
+
+ # FIXME: local header files?
+ # src/foo/bar.c: #include "foo.h"
+ # src/foo/foo.h do stuff..
+
+ $include{$1}++;
+ } elsif (/^\s*\#\s*include\s+<(.+?)>/) {
+ $refs{$file}{$1} = $line;
+ $include{$1}++;
+ }
+ }
+
+ close FILE;
+}
+
+#
+# Where include files are located.
+#
+# FIXME:
+#
+@directories = ("src/lib", "src");
+$do_it = 0;
+
+#
+# Horrid.
+#
+if ($ARGV[0] eq "+n") {
+ shift;
+ $do_it = 1;
+}
+
+#
+# Bootstrap the basic C files.
+#
+foreach $file (@ARGV) {
+ process($file);
+}
+
+
+#
+# Process the include files referenced from the C files, to find out
+# what they include Note that we create a temporary array, rather
+# than walking over %include, because the process() function adds
+# entries to the %include hash.
+#
+@work = sort keys %include;
+foreach $inc (@work) {
+
+ foreach $dir (@directories) {
+ $path = $dir . "/" . $inc;
+
+ # normalize path
+ $path =~ s:/.*?/\.\.::;
+ $path =~ s:/.*?/\.\.::;
+
+ next if (! -e $path);
+ process($path);
+ $forward{$inc} = $path;
+ $reverse{$path} = $inc;
+
+ # ignore system include files
+ next if ((scalar keys %{$refs{$path}}) == 0);
+
+ # Remember that X includes Y, and push Y onto the list
+ # of files to scan.
+ foreach $inc2 (sort keys %{$refs{$path}}) {
+ $maps{$inc}{$inc2} = 0;
+ push @work, $inc2;
+ }
+ }
+}
+
+#
+# Process all of the forward refs, so that we have a complete
+# list of who's referencing who.
+#
+# This doesn't find the shortest path from A to B, but it does
+# find one path.
+#
+foreach $inc (sort keys %maps) {
+ foreach $inc2 (sort keys %{$maps{$inc}}) {
+ foreach $inc3 (sort keys %{$maps{$inc2}}) {
+ # map is already there...
+ next if (defined $maps{$inc}{$inc3});
+
+ $maps{$inc}{$inc3} = $maps{$inc2}{$inc3} + 1;
+ }
+ }
+}
+
+#
+# Walk through the files again, looking for includes that are
+# unnecessary. Note that we process header files, too.
+#
+foreach $file (sort keys %refs) {
+
+ # print out some debugging information.
+ if ($debug > 0) {
+ if (defined $reverse{$file}) {
+ print $file, "\t(", $reverse{$file}, ")\n";
+ } else {
+ print $file, "\n";
+ }
+ }
+
+ # walk of the list of include's in this file
+ foreach $ref (sort keys %{$refs{$file}}) {
+
+ # walk over the include files we include, or included by
+ # files that we include.
+ foreach $inc2 (sort keys %{$maps{$ref}}) {
+ #
+ # If we include X, and X includes Y, and we include
+ # Y ourselves *after* X, it's a definite dupe.
+ #
+ # Note that this is a *guaranteed* duplicate.
+ #
+ # Sometimes order matters, so we can't always delete X if
+ # we include Y after X, and Y includes X
+ #
+ if (defined $refs{$file}{$inc2} &&
+ ($refs{$file}{$inc2} > $refs{$file}{$ref})) {
+ $duplicate{$file}{$inc2} = $ref;
+
+ # mark the line to be deleted.
+ $delete_line{$file}{$refs{$file}{$inc2}}++;
+
+ $any_dups++;
+ }
+ }
+ print "\t", $ref, "\n" if ($debug > 0);
+ }
+}
+
+if ($debug > 0) {
+ print "------------------------------------\n";
+}
+
+#
+# Maybe just print out the dups so that a person can validate them.
+#
+if (!$do_it) {
+ foreach $file (sort keys %duplicate) {
+ print $file, "\n";
+
+ foreach $inc (sort keys %{$duplicate{$file}}) {
+ print "\t[", $refs{$file}{$inc}, "] ", $inc, " (", $duplicate{$file}{$inc}, " at ", $refs{$file}{$duplicate{$file}{$inc}}, ")\n";
+ }
+ }
+} else {
+ foreach $file (sort keys %duplicate) {
+ open FILE, "<$file" or die "Failed to open $file: $!\n";
+ open OUTPUT, ">$file.tmp" or die "Failed to create $file.tmp: $!\n";
+
+ $line = 0;
+ while (<FILE>) {
+ $line++;
+
+ # supposed to delete this line, don't print it to the output.
+ next if (defined $delete_line{$file}{$line});
+
+ print OUTPUT;
+ }
+
+ rename "$file.tmp", $file;
+ }
+
+}
+
+# If we succeeded in re-writing the files, it's OK.
+exit 0 if ($do_it);
+
+# If there are no duplicates, then we're OK.
+exit 0 if (!$any_dups);
+
+# Else there are duplicates, complain.
+exit 1
diff --git a/scripts/monit/freeradius.monitrc b/scripts/monit/freeradius.monitrc
new file mode 100644
index 0000000..65f96cf
--- /dev/null
+++ b/scripts/monit/freeradius.monitrc
@@ -0,0 +1,18 @@
+#
+# Script for use with Monit
+#
+# http://mmonit.com/monit/
+#
+
+#
+# Totalmem limit should be lowered to 200.0 if none of the
+# interpreted language modules or rlm_cache are being used.
+#
+check process radiusd with pidfile /var/run/radiusd/radiusd.pid
+ start program = "/etc/init.d/radiusd start"
+ stop program = "/etc/init.d/radiusd stop"
+ if failed host 127.0.0.1 port 1812 type udp protocol radius secret testing123 then alert
+ if failed host 127.0.0.1 port 1813 type udp protocol radius secret testing123 then alert
+ if cpu > 95% for 2 cycles then alert
+ if totalmem > 1024.0 MB for 5 cycles then restart
+ if 5 restarts within 5 cycles then timeout
diff --git a/scripts/munin/freeradius_acct b/scripts/munin/freeradius_acct
new file mode 100755
index 0000000..a61627c
--- /dev/null
+++ b/scripts/munin/freeradius_acct
@@ -0,0 +1,88 @@
+#!/bin/sh
+#
+# Plugin to count the daily amount of freeradius authentication packets.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2008 Alan DeKok <aland@deployingradius.com>
+#
+# Magic markers - optional - used by installation scripts and
+# munin-config:
+#
+#%# family=manual
+#%# capabilities=autoconf
+
+RADMIN=radmin
+SOCKETFILE=/var/run/radiusd/radiusd.sock
+
+if [ "$1" = "autoconf" ]; then
+ #
+ # FIXME: Check if FreeRADIUS is running.
+ #
+ echo yes
+ exit 0
+fi
+
+if [ "$1" = "config" ]; then
+ echo 'graph_title FreeRADIUS Accounting Requests'
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_period second'
+ echo 'graph_vlabel requests / ${graph_period}'
+ echo 'graph_category Other'
+
+ echo 'requests.label Accounting-Requests'
+ echo 'requests.info total received request packets'
+ echo 'requests.type DERIVE'
+ echo 'requests.min 0'
+
+ echo 'responses.label Accounting-Responses'
+ echo 'responses.info total sent response packets'
+ echo 'responses.type DERIVE'
+ echo 'responses.min 0'
+
+ echo 'dup.label Duplicate requests'
+ echo 'dup.info total duplicate request packets'
+ echo 'dup.type DERIVE'
+ echo 'dup.min 0'
+
+ echo 'invalid.label Invalid requests'
+ echo 'invalid.info total invalid request packets'
+ echo 'invalid.type DERIVE'
+ echo 'invalid.min 0'
+
+ echo 'malformed.label Malformed requests'
+ echo 'malformed.info total malformed request packets'
+ echo 'malformed.type DERIVE'
+ echo 'malformed.min 0'
+
+ echo 'bad_signature.label Requests with bad signature'
+ echo 'bad_signature.info total request packets with a bad signature'
+ echo 'bad_signature.type DERIVE'
+ echo 'bad_signature.min 0'
+
+ echo 'dropped.label Dropped requests'
+ echo 'dropped.info total request packets dropped for other reasons'
+ echo 'dropped.type DERIVE'
+ echo 'dropped.min 0'
+
+ echo 'unknown_types.label Unknown type'
+ echo 'unknown_types.info total request packets of unknown type'
+ echo 'unknown_types.type DERIVE'
+ echo 'unknown_types.min 0'
+
+ exit 0
+fi
+
+$RADMIN -f $SOCKETFILE -e "stats client acct" | awk '{print $1".value " $2}'
diff --git a/scripts/munin/freeradius_auth b/scripts/munin/freeradius_auth
new file mode 100755
index 0000000..b602402
--- /dev/null
+++ b/scripts/munin/freeradius_auth
@@ -0,0 +1,103 @@
+#!/bin/sh
+#
+# Plugin to count the daily amount of freeradius authentication packets.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2008 Alan DeKok <aland@deployingradius.com>
+#
+# Magic markers - optional - used by installation scripts and
+# munin-config:
+#
+#%# family=manual
+#%# capabilities=autoconf
+
+RADMIN=radmin
+SOCKETFILE=/var/run/radiusd/radiusd.sock
+
+if [ "$1" = "autoconf" ]; then
+ #
+ # FIXME: Check if FreeRADIUS is running.
+ #
+ echo yes
+ exit 0
+fi
+
+if [ "$1" = "config" ]; then
+ echo 'graph_title FreeRADIUS Authentication Requests'
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_period second'
+ echo 'graph_vlabel requests / ${graph_period}'
+ echo 'graph_category Other'
+
+ echo 'requests.label Access-Requests'
+ echo 'requests.info total received request packets'
+ echo 'requests.type DERIVE'
+ echo 'requests.min 0'
+
+ echo 'responses.label responses (all types)'
+ echo 'responses.info total sent response packets'
+ echo 'responses.type DERIVE'
+ echo 'responses.min 0'
+
+ echo 'accepts.label Access-Accepts'
+ echo 'accepts.info total sent Access-Accept packets'
+ echo 'accepts.type DERIVE'
+ echo 'accepts.min 0'
+
+ echo 'rejects.label Access-Rejects'
+ echo 'rejects.info total sent Access-Reject packets'
+ echo 'rejects.type DERIVE'
+ echo 'rejects.min 0'
+
+ echo 'challenges.label Access-Challenges'
+ echo 'challenges.info total sent Access-Challenge packets'
+ echo 'challenges.type DERIVE'
+ echo 'challenges.min 0'
+
+ echo 'dup.label Duplicate requests'
+ echo 'dup.info total duplicate request packets'
+ echo 'dup.type DERIVE'
+ echo 'dup.min 0'
+
+ echo 'invalid.label Invalid requests'
+ echo 'invalid.info total invalid request packets'
+ echo 'invalid.type DERIVE'
+ echo 'invalid.min 0'
+
+ echo 'malformed.label Malformed requests'
+ echo 'malformed.info total malformed request packets'
+ echo 'malformed.type DERIVE'
+ echo 'malformed.min 0'
+
+ echo 'bad_signature.label Requests with bad signature'
+ echo 'bad_signature.info total request packets with a bad signature'
+ echo 'bad_signature.type DERIVE'
+ echo 'bad_signature.min 0'
+
+ echo 'dropped.label Dropped requests'
+ echo 'dropped.info total request packets dropped for other reasons'
+ echo 'dropped.type DERIVE'
+ echo 'dropped.min 0'
+
+ echo 'unknown_types.label Unknown type'
+ echo 'unknown_types.info total request packets of unknown type'
+ echo 'unknown_types.type DERIVE'
+ echo 'unknown_types.min 0'
+
+ exit 0
+fi
+
+$RADMIN -f $SOCKETFILE -e "stats client auth" | awk '{print $1".value " $2}'
diff --git a/scripts/munin/freeradius_proxy_acct b/scripts/munin/freeradius_proxy_acct
new file mode 100755
index 0000000..7a8b85b
--- /dev/null
+++ b/scripts/munin/freeradius_proxy_acct
@@ -0,0 +1,88 @@
+#!/bin/sh
+#
+# Plugin to count the daily amount of freeradius authentication packets.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2008 Alan DeKok <aland@deployingradius.com>
+#
+# Magic markers - optional - used by installation scripts and
+# munin-config:
+#
+#%# family=manual
+#%# capabilities=autoconf
+
+RADMIN=radmin
+SOCKETFILE=/var/run/radiusd/radiusd.sock
+
+if [ "$1" = "autoconf" ]; then
+ #
+ # FIXME: Check if FreeRADIUS is running.
+ #
+ echo yes
+ exit 0
+fi
+
+if [ "$1" = "config" ]; then
+ echo 'graph_title FreeRADIUS Proxied Accounting Requests'
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_period second'
+ echo 'graph_vlabel requests / ${graph_period}'
+ echo 'graph_category Other'
+
+ echo 'requests.label Accounting-Requests'
+ echo 'requests.info total sent request packets'
+ echo 'requests.type DERIVE'
+ echo 'requests.min 0'
+
+ echo 'responses.label Accounting-Responses'
+ echo 'responses.info total received response packets'
+ echo 'responses.type DERIVE'
+ echo 'responses.min 0'
+
+ echo 'dup.label Duplicate requests'
+ echo 'dup.info total duplicate request packets'
+ echo 'dup.type DERIVE'
+ echo 'dup.min 0'
+
+ echo 'invalid.label Invalid requests'
+ echo 'invalid.info total invalid request packets'
+ echo 'invalid.type DERIVE'
+ echo 'invalid.min 0'
+
+ echo 'malformed.label Malformed requests'
+ echo 'malformed.info total malformed request packets'
+ echo 'malformed.type DERIVE'
+ echo 'malformed.min 0'
+
+ echo 'bad_signature.label Requests with bad signature'
+ echo 'bad_signature.info total request packets with a bad signature'
+ echo 'bad_signature.type DERIVE'
+ echo 'bad_signature.min 0'
+
+ echo 'dropped.label Dropped requests'
+ echo 'dropped.info total request packets dropped for other reasons'
+ echo 'dropped.type DERIVE'
+ echo 'dropped.min 0'
+
+ echo 'unknown_types.label Unknown type'
+ echo 'unknown_types.info total request packets of unknown type'
+ echo 'unknown_types.type DERIVE'
+ echo 'unknown_types.min 0'
+
+ exit 0
+fi
+
+$RADMIN -f $SOCKETFILE -e "stats client acct" | awk '{print $1".value " $2}'
diff --git a/scripts/munin/freeradius_proxy_auth b/scripts/munin/freeradius_proxy_auth
new file mode 100755
index 0000000..dbc8617
--- /dev/null
+++ b/scripts/munin/freeradius_proxy_auth
@@ -0,0 +1,103 @@
+#!/bin/sh
+#
+# Plugin to count the daily amount of freeradius authentication packets.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2008 Alan DeKok <aland@deployingradius.com>
+#
+# Magic markers - optional - used by installation scripts and
+# munin-config:
+#
+#%# family=manual
+#%# capabilities=autoconf
+
+RADMIN=radmin
+SOCKETFILE=/var/run/radiusd/radiusd.sock
+
+if [ "$1" = "autoconf" ]; then
+ #
+ # FIXME: Check if FreeRADIUS is running.
+ #
+ echo yes
+ exit 0
+fi
+
+if [ "$1" = "config" ]; then
+ echo 'graph_title FreeRADIUS Proxied Authentication Requests'
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_period second'
+ echo 'graph_vlabel requests / ${graph_period}'
+ echo 'graph_category Other'
+
+ echo 'requests.label Access-Requests'
+ echo 'requests.info total sent request packets'
+ echo 'requests.type DERIVE'
+ echo 'requests.min 0'
+
+ echo 'responses.label responses (all types)'
+ echo 'responses.info total received response packets'
+ echo 'responses.type DERIVE'
+ echo 'responses.min 0'
+
+ echo 'accepts.label Access-Accepts'
+ echo 'accepts.info total received Access-Accept packets'
+ echo 'accepts.type DERIVE'
+ echo 'accepts.min 0'
+
+ echo 'rejects.label Access-Rejects'
+ echo 'rejects.info total received Access-Reject packets'
+ echo 'rejects.type DERIVE'
+ echo 'rejects.min 0'
+
+ echo 'challenges.label Access-Challenges'
+ echo 'challenges.info total received Access-Challenge packets'
+ echo 'challenges.type DERIVE'
+ echo 'challenges.min 0'
+
+ echo 'dup.label Duplicate requests'
+ echo 'dup.info total duplicate request packets'
+ echo 'dup.type DERIVE'
+ echo 'dup.min 0'
+
+ echo 'invalid.label Invalid requests'
+ echo 'invalid.info total invalid request packets'
+ echo 'invalid.type DERIVE'
+ echo 'invalid.min 0'
+
+ echo 'malformed.label Malformed requests'
+ echo 'malformed.info total malformed request packets'
+ echo 'malformed.type DERIVE'
+ echo 'malformed.min 0'
+
+ echo 'bad_signature.label Requests with bad signature'
+ echo 'bad_signature.info total request packets with a bad signature'
+ echo 'bad_signature.type DERIVE'
+ echo 'bad_signature.min 0'
+
+ echo 'dropped.label Dropped requests'
+ echo 'dropped.info total request packets dropped for other reasons'
+ echo 'dropped.type DERIVE'
+ echo 'dropped.min 0'
+
+ echo 'unknown_types.label Unknown type'
+ echo 'unknown_types.info total request packets of unknown type'
+ echo 'unknown_types.type DERIVE'
+ echo 'unknown_types.min 0'
+
+ exit 0
+fi
+
+$RADMIN -f $SOCKETFILE -e "stats home_server auth" | awk '{print $1".value " $2}'
diff --git a/scripts/munin/radsniff b/scripts/munin/radsniff
new file mode 100755
index 0000000..47f4400
--- /dev/null
+++ b/scripts/munin/radsniff
@@ -0,0 +1,246 @@
+#!/bin/sh
+: << =cut
+
+=head1 NAME
+
+radsniff - A plugin to consume statistics generated by radsniff via collectd RRD files
+
+=head1 APPLICABLE SYSTEMS
+
+radsniff v4 or later
+
+=head1 CONFIGURATION
+
+This plugin uses the following configuration variables:
+
+ [radsniff]
+ env.host - The host collectd thinks the radsniff data
+ originated from (defaults to current host).
+ env.rrd_path - Path to the directory containing rrd files.
+ env.type - Either radius_rtx, radius_latency or radius_count
+ env.pkt_type - The type of packet to graph.
+ env.instance - radsniff instance name (passed to radsniff with -N).
+
+=head1 AUTHOR
+
+Copyright (C) 2014 Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+
+=head1 LICENSE
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+
+=head1 MAGIC MARKERS
+
+ #%# family=manual
+
+=cut
+
+if [ -z "$type" ]; then
+ echo "env.type must be set" >&2
+ exit -1
+fi
+
+if [ -z "$pkt_type" ]; then
+ echo "env.pkt_type must be set" >&2
+ exit -1
+fi
+
+if [ "$1" = "config" ]; then
+ pretty_pkt_type=`echo "$pkt_type" | sed -e 's/_/ /g' | sed 's/^./\U&\E/'`
+
+ case "$type" in
+ radius_rtx)
+ echo "graph_title ${pretty_pkt_type} rtx"
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_period second'
+ echo 'graph_vlabel Exchanged / ${graph_period}'
+ echo 'graph_category RADIUS'
+
+ echo 'none.label no loss'
+ echo 'none.info Responses received after first request'
+ echo 'none.type GAUGE'
+ echo 'none.min 0'
+
+ echo 'one.label 1'
+ echo 'one.info Responses received after one retransmission'
+ echo 'one.type GAUGE'
+ echo 'one.min 0'
+
+ echo 'two.label 2'
+ echo 'two.info Responses received after two retransmissions'
+ echo 'two.type GAUGE'
+ echo 'two.min 0'
+
+ echo 'three.label 3'
+ echo 'three.info Responses received after three retransmissions'
+ echo 'three.type GAUGE'
+ echo 'three.min 0'
+
+ echo 'four.label 4'
+ echo 'four.info Responses received after four retransmissions'
+ echo 'four.type GAUGE'
+ echo 'four.min 0'
+
+ echo 'more.label more'
+ echo 'more.info Responses received after more than four retransmissions'
+ echo 'more.type GAUGE'
+ echo 'more.min 0'
+
+ echo 'lost.label lost'
+ echo 'lost.info Requests to which no response was seen'
+ echo 'lost.type GAUGE'
+ echo 'lost.min 0'
+ ;;
+
+ radius_latency)
+ echo "graph_title ${pretty_pkt_type} latency"
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_vlabel Latency (ms)'
+ echo 'graph_category RADIUS'
+
+ echo 'smoothed.label smoothed avg'
+ echo 'smoothed.info Smoothed average'
+ echo 'smoothed.type GAUGE'
+ echo 'smoothed.min 0'
+
+ echo 'avg.label avg'
+ echo 'avg.info Average latency over the stats interval'
+ echo 'avg.type GAUGE'
+ echo 'avg.min 0'
+
+ echo 'high.label high'
+ echo 'high.info Highest latency over the stats interval'
+ echo 'high.type GAUGE'
+ echo 'high.min 0'
+
+ echo 'low.label low'
+ echo 'low.info Lowest latency over the stats interval'
+ echo 'low.type GAUGE'
+ echo 'low.min 0'
+ ;;
+
+ radius_count)
+ echo "graph_title $pretty_pkt_type counters"
+ echo 'graph_args --base 1000 -l 0 '
+ echo 'graph_period second'
+ echo 'graph_vlabel Packets / ${graph_period}'
+ echo 'graph_category RADIUS'
+
+ echo 'received.label received'
+ echo 'received.info Packets of this type received'
+ echo 'received.type GAUGE'
+ echo 'received.min 0'
+
+ echo 'linked.label linked'
+ echo 'linked.info Packets linked to another request or response'
+ echo 'linked.type GAUGE'
+ echo 'linked.min 0'
+
+ echo 'unlinked.label unlinked'
+ echo 'unlinked.info Packets not linked to another request or response'
+ echo 'unlinked.type GAUGE'
+ echo 'unlinked.min 0'
+
+ echo 'reused.label reused'
+ echo 'reused.info Request which (prematurely) re-used the same ID as a previous request'
+ echo 'reused.type GAUGE'
+ echo 'reused.min 0'
+ ;;
+ *)
+ echo "env.type ($type) is invalid must be radius_rtx, radius_latency, or radius_count" >&2
+ exit -1
+ esac
+ exit 0
+fi
+
+HOST=${host:-`hostname -f`}
+INSTANCE=${instance:-'radsniff'}
+RRD_PATH=${rrd_path:-"/var/lib/collectd/rrd/${HOST}/${INSTANCE}-exchanged"}
+RRD_PATH="${RRD_PATH}/${type}-${pkt_type}.rrd"
+RRD_RES=${rrd_res:-300}
+
+if [ ! -e "$RRD_PATH" ]; then
+ echo "rrd file '$RRD_PATH' does not exist" >&2
+ exit -1
+fi
+
+fetch_data()
+{
+ # RRD tool doesn't always select the correct period (seems
+ # to round up and give us -nan results) in the interest of
+ # gap free graphing, we attempt to get the last two periods
+ # worth of data, and then use the newest non -nan one.
+ # It's not perfect and should be fixed at some point...
+ rrd_last=`rrdtool fetch "$RRD_PATH" $1 -r $RRD_RES -e $(expr $(date +%s) / $RRD_RES \* $RRD_RES) -s end-$(expr $RRD_RES \* 2)`; ret=$?
+ if [ $ret -ne 0 ]; then
+ echo "$rrd_last" >&2
+ exit $ret
+ fi
+ echo "$rrd_last" | head -1
+ echo "$rrd_last" | grep '^[0-9]*:' | grep -v -E '^[0-9]*:( -nan)*$' | tail -1
+}
+
+fetch_data_column()
+{
+ echo "$(fetch_data $1)" | tail -1 | cut -d ' ' -f $(expr $2 + 2)
+}
+
+case "$type" in
+ radius_rtx)
+ col=2
+ rrd_data=$(fetch_data 'AVERAGE')
+ for var in `echo "$rrd_data" | head -1`; do
+ case "$var" in
+ 1) printf "one.value ";;
+ 2) printf "two.value ";;
+ 3) printf "three.value ";;
+ 4) printf "four.value ";;
+ *) printf "$var.value ";;
+ esac
+ echo "$rrd_data" | tail -1 | cut -d ' ' -f $col
+ col=`expr $col + 1`
+ done
+ ;;
+
+ radius_count)
+ col=2
+ rrd_data=$(fetch_data 'AVERAGE')
+ for var in `echo "$rrd_data" | head -1`; do
+ printf "$var.value "
+ echo "$rrd_data" | tail -1 | cut -d ' ' -f $col
+ col=`expr $col + 1`
+ done
+ ;;
+
+ radius_latency)
+ printf "smoothed.value "
+ fetch_data_column 'AVERAGE' 0
+
+ printf "avg.value "
+ fetch_data_column 'AVERAGE' 1
+
+ # Averages here are unacceptable because we use this to detect
+ # abnormally long delays in responses, and if we average all the highs
+ # over a five minute period, transient spikes in latency may be lost.
+ printf "high.value "
+ fetch_data_column 'MAX' 2
+
+ # Again we need the lowest value of the set here, as an abnormally
+ # quick response may indicate something is wrong.
+ printf "low.value "
+ fetch_data_column 'MIN' 3
+ ;;
+esac
+exit 0
diff --git a/scripts/osx/README b/scripts/osx/README
new file mode 100644
index 0000000..ee621d6
--- /dev/null
+++ b/scripts/osx/README
@@ -0,0 +1,2 @@
+cp ./org.freeradius.radius.plist /System/Library/LaunchDaemons
+launchctl load -w /System/Library/LaunchDaemons/org.freeradius.radiusd.plist
diff --git a/scripts/osx/org.freeradius.radiusd.plist b/scripts/osx/org.freeradius.radiusd.plist
new file mode 100644
index 0000000..5f593a5
--- /dev/null
+++ b/scripts/osx/org.freeradius.radiusd.plist
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>Disabled</key>
+ <true/>
+ <key>EnableTransactions</key>
+ <true/>
+ <key>KeepAlive</key>
+ <true/>
+ <key>RunAtLoad</key>
+ <true/>
+ <key>Label</key>
+ <string>org.freeradius.radiusd</string>
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/sbin/radiusd</string>
+ <string>-f</string>
+ </array>
+</dict>
+</plist>
diff --git a/scripts/raddebug b/scripts/raddebug
new file mode 100755
index 0000000..c1d6a2a
--- /dev/null
+++ b/scripts/raddebug
@@ -0,0 +1,140 @@
+#!/bin/sh
+######################################################################
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2009 Network RADIUS SARL <info@networkradius.com>
+#
+######################################################################
+#
+# This script assumes that "radmin" is in PATH, and that the user
+# running this script has permission to connect to the radmin socket,
+# and to read/write files in the "logdir" directory. If none of this is
+# true, then it won't work.
+#
+# Usage: raddebug [-c condition] [-i client-ip-address] [-I client-ipv6-address] [-f socket_file] [-t timeout] [-u username]
+#
+#
+
+usage() {
+ printf "Usage: %s: [-c condition] [-d directory] [-n name] [-D dictdir] [-i client-ip-address] [-I client-ipv6-address] [-f socket_file] [-t timeout] [-u user]\n" $(basename $0) >&2
+ exit 2
+}
+
+extra=
+condition=1
+timeout=60
+while getopts 'd:n:D:c:i:I:f:t:u:' OPTION
+do
+ case $OPTION in
+ c) condition="$OPTARG"
+ ;;
+ d) extra="$extra -d $OPTARG"
+ ;;
+ n) extra="$extra -n $OPTARG"
+ ;;
+ D) extra="$extra -D $OPTARG"
+ ;;
+ i) x="(Packet-Src-IP-Address == $OPTARG)"
+ if [ "$condition" = "" ]; then
+ condition="$x"
+ else
+ condition="$condition && $x"
+ fi
+ ;;
+ I) x="(Packet-Src-IPv6-Address == $OPTARG)"
+ if [ "$condition" = "" ]; then
+ condition="$x"
+ else
+ condition="$condition && $x"
+ fi
+ ;;
+ f) extra="$extra -f $OPTARG"
+ ;;
+ t) timeout="$OPTARG"
+ [ "$timeout" = "0" ] && timeout=1000000
+ ;;
+ u) x="(User-Name == '$OPTARG')"
+ if [ "$condition" = "" ]; then
+ condition="$x"
+ else
+ condition="$condition && $x"
+ fi
+ ;;
+ ?) usage
+ ;;
+ esac
+done
+shift $(($OPTIND - 1))
+
+radmin="radmin $extra"
+
+#
+# Start off by turning off debugging.
+# If there's a problem, die.
+#
+$radmin -e "debug condition"
+if [ "$?" != "0" ]; then
+ exit 1
+fi
+
+#
+# Debug to a file, and then tell us where the file is.
+#
+outfile=`$radmin -e "debug file radmin.debug.$$" -e "show debug file"`
+group=`$radmin -e "debug file radmin.debug.$$" -e "show config security.group"`
+
+#
+# If there was an error setting the debug output, re-set the
+# debug condition, echo the error, and exit.
+#
+echo $outfile | grep 'ERROR' >/dev/null 2>&1
+if [ "$?" = "0" ]; then
+ $radmin -e "debug condition"
+ echo $outfile
+ exit 1
+fi
+
+#
+# Truncate the file, and ensure it's writable by radiusd
+#
+cp /dev/null $outfile
+[ "$group" != "" ] && chgrp $group $outfile
+chmod g+w $outfile
+
+TAILPID=$$
+
+#
+# Set the trap to clean up on exit and any interrupts.
+#
+trap '$radmin -e "debug condition" -e "debug file"; rm -f $outfile;kill -TERM $TAILPID;exit 0' 1 2 15
+
+#
+# Set the debug condition
+#
+$radmin -e "debug condition \"$condition\"" | grep -I 'error'
+if [ $? -eq 0 ]; then
+ exit 1
+fi
+
+#
+# Print the output, and wait for "timeout". Then, stop printing.
+#
+tail -f $outfile &
+TAILPID=$!
+sleep $timeout
+kill -TERM $TAILPID
+$radmin -e "debug condition" -e "debug file"
+rm -f $outfile
diff --git a/scripts/radiusd.sh b/scripts/radiusd.sh
new file mode 100644
index 0000000..e791dff
--- /dev/null
+++ b/scripts/radiusd.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+#
+# The purpose of this script is to forcibly load the *correct* version
+# of OpenSSL for FreeRADIUS, when you have more than one version of OpenSSL
+# installed on your system.
+#
+# You'll have to edit the directories to the correct location
+# for your local system.
+#
+# $Id$
+#
+
+LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/ssl/lib:/usr/local/radius/lib
+LD_PRELOAD=/usr/local/ssl/lib/libcrypto.so
+
+export LD_LIBRARY_PATH LD_PRELOAD
+exec /usr/local/radius/sbin/radiusd $@
diff --git a/scripts/radtee b/scripts/radtee
new file mode 100755
index 0000000..78b4bcb
--- /dev/null
+++ b/scripts/radtee
@@ -0,0 +1,563 @@
+#!/usr/bin/env python2
+from __future__ import with_statement
+
+# RADIUS comparison tee v1.0
+# Sniffs local RADIUS traffic, replays incoming requests to a test
+# server, and compares the sniffed responses with the responses
+# generated by the test server.
+#
+# Copyright (c) 2009, Frontier Communications
+# Copyright (c) 2010, John Morrissey <jwm@horde.net>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+
+# Requires
+# ========
+# - python 2.4 or newer
+# - impacket
+# - pcapy
+# - pyrad, ideally 1.2 or newer
+
+# Output
+# ======
+# - .: 50 successful, matching responses processed.
+# - C=x.x.x.x: Ignored packet sniffed from unknown client.
+# - D: Dropped sniffed packet due to processing bottleneck. Consider
+# increasing THREADS.
+# - I: Invalid/unparseable packet sniffed.
+# - Mreq: Response was sniffed without sniffing a corresponding request.
+# - Mresp: Request was sniffed without sniffing a corresponding response.
+# - T: Request to test server timed out.
+
+import fcntl
+from getopt import gnu_getopt, GetoptError
+import os
+import Queue
+import re
+import signal
+import socket
+import struct
+import sys
+import thread
+from threading import Thread
+from time import sleep, time
+
+from impacket.ImpactDecoder import EthDecoder
+import pcapy
+from pyrad.client import Client
+from pyrad.dictionary import Dictionary
+from pyrad import packet
+
+
+TEST_DEST = 'server.example.com'
+TEST_SECRET = 'examplesecret'
+
+# Dictionary to use when decoding RADIUS packets. pyrad earlier than
+# v1.2 can't parse $INCLUDE directives, so you must combine FreeRADIUS'
+# dictionary manually, with something like this:
+#
+# import re
+# import sys
+#
+# def combine(file):
+# for line in open(file):
+# matches = re.search(r'^\$INCLUDE\s+(.*)$', line)
+# if not matches:
+# sys.stdout.write(line)
+# continue
+#
+# combine(matches.group(1))
+#
+# combine('/etc/freeradius/dictionary')
+DICTIONARY = '/etc/freeradius/dictionary'
+
+# Number of worker threads to run.
+THREADS = 32
+
+# Mapping of RADIUS request source addresses to shared secrets,
+# so we can decode incoming RADIUS requests.
+#
+# For example:
+# '127.0.0.1': 'test',
+CLIENTS = {
+}
+
+# Ignore any sniffed requests from these IP addresses.
+IGNORE_CLIENTS = [
+]
+
+# Expected mismatches to ignore and consider the packet matching.
+# Only the differences are compared to these items, so only the
+# differing attrs need be listed in the attrs array.
+#
+# Examples:
+# - Ignore mismatched AccessRejects whose sole difference is a
+# Reply-Message attribute with the values given.
+# {
+# 'sniffed': {
+# 'code': packet.AccessReject,
+# 'attrs': [
+# 'Reply-Message=Request Denied',
+# ],
+# },
+# 'test': {
+# 'code': packet.AccessReject,
+# 'attrs': [
+# 'Reply-Message=Account is disabled.',
+# ],
+# }
+# },
+#
+# - Ignore mismatched AccessRejects with Reply-Message=Request Denied
+# and arbitrary Cisco dns-servers in the sniffed packet, and
+# no Reply-Message and Cisco-AVPair attrs in the response from the
+# test RADIUS server.
+# {
+# 'sniffed': {
+# 'code': packet.AccessReject,
+# 'attrs': [
+# 'Reply-Message=Request Denied',
+# 'regex:^Cisco-AVPair=ip:dns-servers=.*$',
+# ],
+# },
+# 'test': {
+# 'code': packet.AccessReject,
+# 'attrs': [
+# ],
+# }
+# },
+#
+# - Only apply this stanza to sniffed requests with
+# 'User-Name= user@example.com' (note the leading whitespace).
+# {
+# 'check': [
+# 'User-Name= user@example.com',
+# ],
+# 'sniffed': {
+# 'code': packet.AccessReject,
+# 'attrs': [
+# 'Reply-Message=Request Denied',
+# ],
+# },
+# 'test': {
+# 'code': packet.AccessAccept,
+# 'attrs': [
+# 'Service-Type=Framed-User',
+# 'Framed-Protocol=PPP',
+# 'Framed-IP-Address=255.255.255.255',
+# 'Framed-MTU=1500',
+# 'Framed-Compression=Van-Jacobson-TCP-IP',
+# ],
+# }
+# },
+IGNORE = [
+]
+
+
+QUEUE = Queue.Queue(maxsize=25000)
+DICT = Dictionary(DICTIONARY)
+
+def code2str(code):
+ if code == packet.AccessRequest:
+ return "Access-Request"
+ elif code == packet.AccessAccept:
+ return "Access-Accept"
+ elif code == packet.AccessReject:
+ return "Access-Reject"
+ elif code == packet.AccountingRequest:
+ return "Accounting-Request"
+ elif code == packet.AccountingResponse:
+ return "Accounting-Response"
+ elif code == packet.AccessChallenge:
+ return "Access-Challenge"
+ elif code == packet.StatusServer:
+ return "Status-Server"
+ elif code == packet.StatusClient:
+ return "Status-Client"
+ elif code == packet.DisconnectRequest:
+ return "Disconnect-Request"
+ elif code == packet.DisconnectACK:
+ return "Disconnect-ACK"
+ elif code == packet.DisconnectNAK:
+ return "Disconnect-NAK"
+ elif code == packet.CoARequest:
+ return "CoA-Request"
+ elif code == packet.CoAACK:
+ return "CoA-ACK"
+ elif code == packet.CoANAK:
+ return "CoA-NAK"
+
+def handlePacket(header, data):
+ """Place captured packets in the queue to be picked up
+ by worker threads."""
+
+ global QUEUE
+
+ try:
+ QUEUE.put_nowait(data)
+ except Queue.Full:
+ sys.stdout.write('D')
+ sys.stdout.flush()
+
+def ignore_applies(pkt, ignore):
+ """Determine whether an ignore stanza (based on its check
+ items) applies to a packet."""
+
+ # All check items must match for this ignore stanza to apply.
+ stanza_applies = True
+ for pair in ignore.get('check', []):
+ attr, value = pair.split('=')
+
+ if attr not in pkt:
+ return False
+ if value.startswith('regex:'):
+ if not re.search(value.replace('regex:', '', 1), value):
+ return False
+ elif pkt[attr] != value:
+ return False
+
+ return True
+
+def ignores_match(pkt, mismatched, ignore):
+ """Determine whether mismatched AV pairs remain after accounting
+ for ignored differences."""
+
+ non_regex_ignore = [
+ q
+ for q
+ in ignore['attrs']
+ if not q.startswith('regex:')
+ ]
+ regex_ignore = [
+ q
+ for q
+ in ignore['attrs']
+ if q.startswith('regex:')
+ ]
+
+ unmatched_av = mismatched[:]
+ unmatched_rules = ignore['attrs'][:]
+ for av in mismatched:
+ if av in non_regex_ignore:
+ unmatched_av.remove(av)
+ unmatched_rules.remove(av)
+ continue
+ for regex in regex_ignore:
+ if re.search(regex.replace('regex:', '', 1), av):
+ unmatched_av.remove(av)
+ if regex in unmatched_rules:
+ unmatched_rules.remove(regex)
+ break
+
+ if unmatched_av or unmatched_rules:
+ return False
+ return True
+
+def matches(req, sniffed_pkt, test_pkt):
+ """Determine whether a response from the test server matches
+ the response sniffed from the wire, accounting for ignored
+ differences."""
+
+ global IGNORE
+
+ mis_attrs_sniffed = []
+ for k in sniffed_pkt.keys():
+ if sorted(sniffed_pkt[k]) == sorted(test_pkt.get(k, [])):
+ continue
+ mis_attrs_sniffed.append('%s=%s' % (
+ k, ', '.join([str(v) for v in sorted(sniffed_pkt[k])])))
+
+ mis_attrs_test = []
+ for k in test_pkt.keys():
+ if sorted(test_pkt[k]) == sorted(sniffed_pkt.get(k, [])):
+ continue
+ mis_attrs_test.append('%s=%s' % (
+ k, ', '.join([str(v) for v in sorted(test_pkt[k])])))
+
+ # The packets match without having to consider any ignores.
+ if sniffed_pkt.code == test_pkt.code and \
+ not mis_attrs_sniffed and not mis_attrs_test:
+ return True
+
+ for ignore in IGNORE:
+ if not ignore_applies(req, ignore):
+ continue
+
+ if ignore['sniffed']['code'] != sniffed_pkt.code or \
+ ignore['test']['code'] != test_pkt.code:
+ continue
+
+ if ignores_match(sniffed_pkt, mis_attrs_sniffed, i['sniffed']):
+ return True
+ if ignores_match(test_pkt, mis_attrs_test, i['test']):
+ return True
+
+ return False
+
+def log_mismatch(nas, req, passwd, expected, got):
+ """Emit notification that the test server has returned a response
+ that differs from the sniffed response."""
+
+ print 'Mismatch: %s' % nas
+
+ print 'Request: %s' % code2str(req.code)
+ for key in req.keys():
+ if key == 'User-Password':
+ print '\t%s: %s' % (key, passwd)
+ continue
+ print '\t%s: %s' % (
+ key, ', '.join([str(v) for v in req[key]]))
+
+ print 'Expected: %s' % code2str(expected.code)
+ for key in expected.keys():
+ print '\t%s: %s' % (
+ key, ', '.join([str(v) for v in expected[key]]))
+
+ print 'Got: %s' % code2str(got.code)
+ for key in got.keys():
+ print '\t%s: %s' % (
+ key, ', '.join([str(v) for v in got[key]]))
+
+ print
+
+REQUESTS = {}
+REQUESTS_LOCK = thread.allocate_lock()
+NUM_SUCCESSFUL = 0
+def check_for_match(key, req_resp):
+ """Send a copy of the original request to the test server and
+ determine whether the response matches the response sniffed from
+ the wire."""
+
+ global DICT, NUM_SUCCESSFUL, TEST_DEST, TEST_SECRET
+ global REQUESTS, REQUESTS_LOCK
+
+ client = Client(server=TEST_DEST,
+ secret=TEST_SECRET, dict=DICT)
+ fwd_req = client.CreateAuthPacket(code=packet.AccessRequest)
+ fwd_req.authenticator = req_resp['req']['pkt'].authenticator
+
+ keys = req_resp['req']['pkt'].keys()
+ for k in keys:
+ for value in req_resp['req']['pkt'][k]:
+ fwd_req.AddAttribute(k, value)
+ if 'User-Password' in keys:
+ fwd_req['User-Password'] = fwd_req.PwCrypt(req_resp['req']['passwd'])
+ if 'NAS-IP-Address' in fwd_req:
+ del fwd_req['NAS-IP-Address']
+ fwd_req.AddAttribute('NAS-IP-Address', req_resp['req']['ip'])
+
+ try:
+ test_reply = client.SendPacket(fwd_req)
+ except:
+ # Request to test server timed out.
+ sys.stdout.write('T')
+ sys.stdout.flush()
+ with REQUESTS_LOCK:
+ del REQUESTS[key]
+ return
+
+ if not matches(req_resp['req']['pkt'],
+ req_resp['response']['pkt'], test_reply):
+
+ print
+ log_mismatch(req_resp['req']['ip'],
+ req_resp['req']['pkt'],
+ req_resp['req']['passwd'],
+ req_resp['response']['pkt'], test_reply)
+
+ with REQUESTS_LOCK:
+ # Occasionally, this key isn't present. Maybe retransmissions
+ # due to a short timeout on the remote RADIUS client's end
+ # and a subsequent race?
+ if key in REQUESTS:
+ del REQUESTS[key]
+
+ NUM_SUCCESSFUL += 1
+ if NUM_SUCCESSFUL % 50 == 0:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+class RadiusComparer(Thread):
+ def run(self):
+ global DICT, IGNORE_CLIENTS, QUEUE, REQUESTS, REQUESTS_LOCK
+
+ while True:
+ data = QUEUE.get()
+ if not data:
+ return
+
+ frame = EthDecoder().decode(data)
+ ip = frame.child()
+ udp = ip.child()
+ rad_raw = udp.child().get_buffer_as_string()
+
+ try:
+ pkt = packet.Packet(dict=DICT, packet=rad_raw)
+ except packet.PacketError:
+ sys.stdout.write('I')
+ sys.stdout.flush()
+ continue
+
+ if ip.get_ip_src() in IGNORE_CLIENTS:
+ continue
+
+ if pkt.code == packet.AccessRequest:
+ auth = packet.AuthPacket(data[42:])
+ auth.authenticator = pkt.authenticator
+ auth.secret = clients.CLIENTS.get(ip.get_ip_src(), None)
+ if not auth.secret:
+ # No configuration for this client.
+ sys.stdout.write('C=%s' % ip.get_ip_src())
+ sys.stdout.flush()
+ continue
+ passwd = None
+ if 'User-Password' in pkt.keys():
+ passwd = auth.PwDecrypt(pkt['User-Password'][0])
+
+ key = '%s:%d:%d' % (ip.get_ip_src(),
+ udp.get_uh_sport(), pkt.id)
+ do_compare = None
+ with REQUESTS_LOCK:
+ if key not in REQUESTS:
+ REQUESTS[key] = {}
+ REQUESTS[key]['req'] = {
+ 'ip': ip.get_ip_src(),
+ 'port': udp.get_uh_sport(),
+ 'pkt': pkt,
+ 'passwd': passwd,
+ }
+ REQUESTS[key]['tstamp'] = time()
+ if 'response' in REQUESTS[key]:
+ do_compare = REQUESTS[key]
+
+ if do_compare:
+ check_for_match(key, do_compare)
+ elif pkt.code in [packet.AccessAccept, packet.AccessReject]:
+ key = '%s:%d:%d' % (ip.get_ip_dst(),
+ udp.get_uh_dport(), pkt.id)
+ do_compare = None
+ with REQUESTS_LOCK:
+ if key not in REQUESTS:
+ REQUESTS[key] = {}
+ REQUESTS[key]['response'] = {
+ 'ip': ip.get_ip_src(),
+ 'port': udp.get_uh_sport(),
+ 'pkt': pkt,
+ }
+ REQUESTS[key]['tstamp'] = time()
+ if 'req' in REQUESTS[key]:
+ do_compare = REQUESTS[key]
+
+ if do_compare:
+ check_for_match(key, do_compare)
+ else:
+ print >>sys.stderr, \
+ 'Unsupported packet type received: %d' % pkt.code
+
+class RequestsPruner(Thread):
+ """Prune stale request state periodically."""
+
+ def run(self):
+ global REQUESTS, REQUESTS_LOCK
+
+ while True:
+ sleep(30)
+
+ now = time()
+ with REQUESTS_LOCK:
+ keys = REQUESTS.keys()
+ for key in keys:
+ if REQUESTS[key]['tstamp'] + 60 >= now:
+ continue
+
+ if 'req' not in REQUESTS[key]:
+ sys.stdout.write('Mreq')
+ sys.stdout.flush()
+ if 'response' not in REQUESTS[key]:
+ sys.stdout.write('Mresp')
+ sys.stdout.flush()
+
+ del REQUESTS[key]
+
+def usage():
+ print 'Usage: %s INTERFACE' % os.path.basename(sys.argv[0])
+ print ''
+ print ' -h, --help display this help and exit'
+
+if __name__ == '__main__':
+ global PID_FILE
+
+ progname = os.path.basename(sys.argv[0])
+
+ try:
+ options, iface = gnu_getopt(sys.argv[1:], 'h', ['help'])
+ except GetoptError, e:
+ print '%s: %s' % (progname, str(e))
+ usage()
+ sys.exit(1)
+
+ for option in options:
+ if option[0] == '-h' or option[0] == '--help':
+ usage()
+ sys.exit(0)
+
+ if len(iface) != 1:
+ usage()
+ sys.exit(1)
+ iface = iface[0]
+
+ if os.geteuid() != 0:
+ print >>sys.stderr, '%s: must be run as root.' % progname
+ sys.exit(1)
+
+ for i in range(0, THREADS):
+ RadiusComparer().start()
+ RequestsPruner().start()
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+
+ # This is Linux-specific, and there's no tenable way to make
+ # it portable.
+ #
+ # Unfortunately, we need the interface's IP address to filter out
+ # only RADIUS traffic destined for this host (avoiding traffic sent
+ # *by* this host, such as proxied requests or our own traffic) to
+ # avoid replaying requests not directed to the local radiusd.
+ #
+ # Furthermore, this only obtains the interface's *first* IP address,
+ # so we won't notice traffic sent to additional IP addresses on
+ # the given interface.
+ #
+ # This is Good Enough For Me given the effort I care to invest.
+ # Of course, patches enhancing this are welcome.
+ if os.uname()[0] == 'Linux':
+ local_ipaddr = socket.inet_ntoa(fcntl.ioctl(
+ s.fileno(),
+ 0x8915, # SIOCGIFADDR
+ struct.pack('256s', iface[:15])
+ )[20:24])
+ else:
+ raise Exception('Only the Linux operating system is currently supported.')
+
+ p = pcapy.open_live(iface, 1600, 0, 100)
+ p.setfilter('''
+ (dst host %s and udp and dst port 1812) or
+ (src host %s and udp and src port 1812)''' % \
+ (local_ipaddr, local_ipaddr))
+ while True:
+ try:
+ p.dispatch(1, handlePacket)
+ except KeyboardInterrupt:
+ sys.exit(0)
diff --git a/scripts/raduat b/scripts/raduat
new file mode 100755
index 0000000..9778b2a
--- /dev/null
+++ b/scripts/raduat
@@ -0,0 +1,366 @@
+#!/bin/bash
+
+# Simple test wrapper around radclient to allow automated UATs
+#
+# Author Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+# Copyright 2014-2015 Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+# Copyright 2015 The FreeRADIUS Project
+
+# A POSIX variable
+OPTIND=1 # Reset in case getopts has been used previously in the shell.
+
+# Environmental variables
+: ${TESTDIR=$(dirname $0)"/tests"}
+: ${RADCLIENT='radclient'}
+: ${FILTER_SUFFIX='_expected'}
+: ${DICTPATH=$(dirname $0)/share}
+PATH="$(dirname $0)/bin:${PATH}"
+
+# Initialize our own variables
+verbose=0
+cluster=
+role=
+type=
+parallel=40
+retries=3
+timeout=2
+target='127.0.0.1'
+secret='testing123'
+
+# Some very basic logging functions
+function ERROR
+{
+ echo "$@" 1>&2;
+}
+
+function INFO
+{
+ echo "$@"
+}
+
+function DEBUG
+{
+ if [ $verbose -gt 0 ]; then
+ echo "$@"
+ fi
+}
+
+function show_help
+{
+ echo $(basename $0)" [options] [-- <test_glob0> <test_glob1> <test_globN>]"
+ echo " -h Display this help message."
+ echo " -H <host>[:port] Send test packets to specified host and port (defaults to 127.0.0.1)"
+ echo " -v Verbose mode."
+ echo " -p <number> Run tests in parallel (defaults to 20)."
+ echo " -s <secret> Shared secret."
+ if [ ! -z "$role_types" ]; then
+ echo " -c <cluster> Specify cluster type one of ($cluster_types)."
+ echo " -r <type> Specify server role one of ($role_types)."
+ echo
+ echo "Note: Test path globs are relative to ${TESTDIR}/<cluster>/<type>/"
+ fi
+
+ echo
+ echo "For role based test file layout create test files under ${TESTDIR}/<cluster>/<type>"
+ echo "Where <cluster> and <type> are substrings found in the FQDN of <host>."
+ echo "For simplified test layout create test files under ${TESTDIR}"
+ echo
+ echo "The directory containing the tests should contains pairs of request files and filter files."
+ echo "The request file name must contain 'test<num><num><num>."
+ echo "The filter name must match the test name but with a '${FILTER_SUFFIX}' suffix."
+ echo "For example:"
+ echo " ${TESTDIR}/test000_my_first_test"
+ echo " ${TESTDIR}/test000_my_first_test${FILTER_SUFFIX}"
+ echo
+ echo "The directory containing the tests may have multiple subdirectories to group the tests."
+}
+
+RADCLIENT=$(command -v "$RADCLIENT")
+if [ ! -x "$RADCLIENT" ]; then
+ ERROR "Can't find radclient binary, modify your \$PATH or set RADCLIENT"
+ exit 64
+fi
+
+if [ ! -d "$TESTDIR" ]; then
+ ERROR "Test dir $TESTDIR does not exist, create it or specify it with TESTDIR=<dir>"
+ show_help
+ exit 64
+fi
+
+# Definitions (build these dynamically by looking at the files under tests)
+cluster_dirs=$(find "$TESTDIR/" -mindepth 1 -maxdepth 1 -type d)
+cluster_types=$(echo $cluster_dirs | sed 's/\s/ /g')
+
+role_types=
+for i in $cluster_dirs; do
+ for j in $(find "$TESTDIR/$(basename $i)/" -mindepth 1 -maxdepth 1 -type d); do
+ role=$(basename "$j")
+ if [ "$role_types" == '' ]; then
+ role_types="$role"
+ else
+ role_types+="\n$role"
+ fi
+ done
+done
+
+if [ -z "$role_types" ]; then
+ DEBUG "Using simple test file layout"
+else
+ DEBUG "Using role based test file layout"
+ role_types=$(echo -e "$role_types" | sort | uniq) # Remove duplicates
+ role_types=$(echo $role_types | sed 's/\s/ /g') # Change \n back to spaces
+fi
+
+while getopts "h?H:vc:r:s:p:" opt; do
+ case "$opt" in
+ h|\?)
+ show_help
+ exit 0
+ ;;
+
+ v)
+ verbose=1
+ ;;
+
+ c)
+ found=0
+ for i in $cluster_types; do
+ if [ "$i" == "$OPTARG" ]; then
+ found=1
+ fi
+ done
+ if [ $found -ne 1 ]; then
+ ERROR "'$OPTARG' is not a valid cluster type"
+ show_help
+ exit 64
+ fi
+ cluster="$OPTARG"
+ ;;
+
+ r)
+ found=0
+ for i in $role_types; do
+ if [ "$i" == "$OPTARG" ]; then
+ found=1
+ fi
+ done
+ if [ $found -ne 1 ]; then
+ ERROR "'$OPTARG' is not a valid role type"
+ show_help
+ exit 64
+ fi
+ role="$OPTARG"
+ ;;
+
+ s)
+ secret="$OPTARG"
+ ;;
+
+ p)
+ if ! echo "$OPTARG" | grep -E '^[0-9]+$' > /dev/null; then
+ ERROR "Non integer argument '$OPTARG' specified for -p"
+ show_help
+ exit 64
+ fi
+ parallel=$OPTARG
+ ;;
+
+ H)
+ target="$OPTARG"
+ ;;
+
+ esac
+done
+
+shift $((OPTIND-1))
+
+[ "$1" = "--" ] && shift
+test_files=$@
+
+#
+# Match keywords from the hostname to clusters or roles
+#
+if [ ! -z "$role_types" ]; then
+ this_host=$(hostname -f)
+ for tok in $(echo "$this_host" | sed 's/\./ /g'); do
+ for key in ${cluster_types}; do
+ if echo "$tok" | grep "$key" > /dev/null && [ "$cluster" = '' ]; then cluster="$key"; fi
+ done
+ for key in ${role_types}; do
+ if echo "$tok" | grep "$key" > /dev/null && [ "$role" = '' ]; then role="$key"; fi
+ done
+ done
+
+ if [ "$cluster" == '' ]; then
+ ERROR "Couldn't determine the cluster $this_host belongs to";
+ show_help
+ exit 64;
+ fi
+
+ if [ "$role" == '' ]; then
+ ERROR "Couldn't determine the role $this_host performs";
+ show_help
+ exit 64;
+ fi
+
+ test_path="${TESTDIR}/${cluster}/${role}"
+#
+# Otherwise just use the tests in the test dir
+#
+else
+ test_path="${TESTDIR}"
+fi
+
+if [ "$test_files" != '' ]; then
+ tmp=
+ for glob in $test_files; do
+ # Filter out response files (makes wildcards easier), and expand the globs
+ for file in $(find "${test_path}" -depth -path "*${glob}" -and -not -path "*${FILTER_SUFFIX}" -and '(' -type f -or -type l ')'); do
+ tmp+="${file} "
+ done
+ done
+ test_files="${tmp}"
+else
+ # Lexicographical, depth-first
+ test_files=$(find "$test_path" -depth -path '*test[0-9][0-9][0-9]*' -and -not -path "*${FILTER_SUFFIX}" -and '(' -type f -or -type l ')')
+ if [ "$test_files" == '' ]; then
+ ERROR "No test files found in $test_path"
+ exit 64;
+ fi
+ INFO "Executing"$(echo "$test_files" | wc -l)" test(s) from ${test_path}"
+fi
+
+#
+# Check if we got any test files
+#
+if [ "$test_files" == '' ]; then
+ ERROR "No test files to process"
+ exit 1
+fi
+
+#
+# Output which files were going to be using for testing
+#
+if [ $verbose -eq 0 ]; then
+ INFO "Executing specified tests"
+ INFO "Use -v to see full list"
+else
+ INFO "Executing specified tests:"
+ for i in $test_files; do
+ DEBUG "$i"
+ done
+fi
+
+#
+# Figure out which tests we can munge into a single file which we can
+# use to parallelise testing
+#
+base=$(basename $0)
+packets=$(mktemp -t "${base}XXX") || exit 1
+filters=$(mktemp -t "${base}XXX") || exit 1
+
+args=
+file_args=
+serial_file_args=
+for i in $test_files; do
+ if [ ! -f "$i" -a ! -L "$i" ]; then
+ INFO "Skipping $i: not file"
+ continue
+ fi
+
+ if [ ! -r "$i" ]; then
+ INFO "Skipping $i: not readable (check permissions)"
+ continue
+ fi
+
+ expected="${i}${FILTER_SUFFIX}"
+ if [ ! -f "$expected" -a ! -L "$expected" ]; then
+ DEBUG "$i cannot be parallelised: Can't find 'expected' file"
+ file_args+=" -f \"$i\""
+ continue
+ fi
+
+ if [ ! -r "$expected" ]; then
+ INFO "$i cannot be parallelised: 'expected' file not readable"
+ file_args+=" -f \"${i}:${expected}\""
+ continue
+ fi
+
+ if head -n 1 "$i" | grep -i -E '^#\s*serial' > /dev/null; then
+ DEBUG "$i marked as serial only"
+ serial_file_args+=" -f \"${i}:${expected}\""
+ continue
+ fi
+
+ # Else add it to the master test file
+ printf '%s\n' "$(cat "$i")" >> "$packets"
+
+ # Add the name of the file so it appears in radclient debug output
+ # and can later be specified with -v -- <test> to drill down.
+ echo "Radclient-Test-Name := \""$(echo "$i" | sed -e "s@${test_path}/\?@@")"\"" >> "$packets"
+ echo >> "$packets"
+ printf '%s\n' "$(cat "${i}_expected")" >> "$filters"
+ echo >> "$filters"
+done
+
+if [ `cat "$packets" | wc -l` -gt 0 ]; then
+ file_args+=" -f \"${packets}:${filters}\""
+fi
+
+if [ ! -z "$file_args" ]; then
+ args="$file_args"
+
+ if [ $verbose -ne 0 ]; then
+ args+=" -x"
+ fi
+
+ args+=" -s"
+ args+=" -t \"$timeout\""
+ args+=" -r \"$retries\""
+ args+=" -p \"$parallel\""
+ args+=" -D \"$DICTPATH\""
+ args+=" \"$target\""
+ args+=" auto"
+ args+=" \"$secret\""
+
+ DEBUG "Executing: $RADCLIENT $args"
+ eval $RADCLIENT $args; ret=$?
+ INFO "(Parallelised tests)"
+ INFO ""
+
+ rm -f "$packets" 2>&1 > /dev/null
+ rm -f "$filters" 2>&1 > /dev/null
+
+ if [ $ret -ne 0 ]; then
+ ERROR "One or more tests failed (radclient exited with $ret)"
+ exit $ret
+ fi
+fi
+
+if [ ! -z "$serial_file_args" ]; then
+ args="$serial_file_args"
+
+ if [ $verbose -ne 0 ]; then
+ args+=" -x"
+ fi
+
+ args+=" -s"
+ args+=" -t \"$timeout\""
+ args+=" -r \"$retries\""
+ args+=" -p 1"
+ args+=" -D \"$DICTPATH\""
+ args+=" \"$target\""
+ args+=" auto"
+ args+=" \"$secret\""
+
+ DEBUG "Executing: $RADCLIENT $args"
+ eval $RADCLIENT $args; ret=$?
+ INFO "(Serialised tests)"
+
+ if [ $ret -ne 0 ]; then
+ ERROR "One or more tests failed (radclient exited with $ret)"
+ exit $ret
+ fi
+fi
+
+exit 0
diff --git a/scripts/rc.radiusd.in b/scripts/rc.radiusd.in
new file mode 100755
index 0000000..4cc04ab
--- /dev/null
+++ b/scripts/rc.radiusd.in
@@ -0,0 +1,100 @@
+#!/bin/sh
+#
+# radiusd Start the radius daemon.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2001-2008 The FreeRADIUS Project http://www.freeradius.org
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+sbindir=@sbindir@
+localstatedir=@localstatedir@
+logdir=@logdir@
+rundir=${localstatedir}/run/radiusd
+sysconfdir=@sysconfdir@
+
+#
+# If you have issues with OpenSSL, uncomment these next lines.
+#
+# Something similar may work for MySQL, and you may also
+# have to LD_PRELOAD libz.so
+#
+#LD_LIBRARY_PATH=@OPENSSL_LIBS@
+#LD_RUN_PATH=@OPENSSL_LIBS@:
+#LD_PRELOAD=@OPENSSL_LIBS@libcrypto.so
+export LD_LIBRARY_PATH LD_RUN_PATH LD_PRELOAD
+
+RADIUSD=$sbindir/radiusd
+RADDBDIR=@raddbdir@
+DESC="FreeRADIUS"
+
+#
+# See 'man radiusd' for details on command-line options.
+#
+ARGS=""
+
+test -f $RADIUSD || exit 0
+test -f $RADDBDIR/radiusd.conf || exit 0
+
+#if [ ! -d $rundir ] ; then
+# mkdir $rundir
+# chown radmin:radius $rundir
+# chmod 775 $rundir
+#fi
+#
+#if [ ! -d $logdir ] ; then
+# mkdir $logdir
+# chown radmin:radius $logdir
+# chmod 770 $logdir
+# chmod g+s $logdir
+#fi
+#
+#if [ ! -f $logdir/radius.log ]; then
+# touch $logdir/radius.log
+#fi
+#
+#chown radmin:radius $logdir/radius.log
+#chmod 660 $logdir/radius.log
+
+case "$1" in
+ start)
+ echo -n "Starting $DESC:"
+ $RADIUSD $ARGS
+ echo "radiusd"
+ ;;
+ stop)
+ [ -z "$2" ] && echo -n "Stopping $DESC: "
+ [ -f $rundir/radiusd.pid ] && kill -TERM `cat $rundir/radiusd.pid`
+ [ -z "$2" ] && echo "radiusd."
+ ;;
+ reload|force-reload)
+ echo "Reloading $DESC configuration files."
+ [ -f $rundir/radiusd.pid ] && kill -HUP `cat $rundir/radiusd.pid`
+ ;;
+ restart)
+ sh $0 stop quiet
+ sleep 3
+ sh $0 start
+ ;;
+ check)
+ $RADIUSD -C $ARGS
+ ;;
+ *)
+ echo "Usage: /etc/init.d/$RADIUS {start|stop|reload|restart|check}"
+ exit 1
+esac
+
+exit 0
diff --git a/scripts/snmp-proxy/README b/scripts/snmp-proxy/README
new file mode 100644
index 0000000..d9741e7
--- /dev/null
+++ b/scripts/snmp-proxy/README
@@ -0,0 +1,32 @@
+ The files in this directory replace the old FreeRADIUS SNMP
+implementantion with a new one.
+
+net-radius-freeradius-dictionary.diff
+ Patch to enable the Perl Net::RADIUS module to read the
+ FreeRADIUS dictionary file format.
+
+dictionary.hacked
+ Dictionary used by Perl Net::RADIUS if it is NOT patched.
+ Do NOT use this dictionary with the FreeRADIUS server!
+
+freeradius-snmp.pl
+ Perl module that implements the connection between SNMP
+ and FreeRADIUS.
+
+ See raddb/sites-available/status for information on using
+ Status-Server packets to obtain internal server statistics.
+
+make sure snmpd is agentx master (snmpd.conf):
+
+ master agentx
+
+run the script (no demonizing support yet):
+
+ $ ./freeradius-snmp.pl
+
+then you can walk the tree (default oid):
+
+ $ snmpbulkwalk -On -v2c -cpublic localhost .1.3.6.1.2.1.67
+
+This code is ALPHA. Please test, and return any fixes back to the
+mailing list, or to bugs.freeradius.org.
diff --git a/scripts/snmp-proxy/dictionary.hacked b/scripts/snmp-proxy/dictionary.hacked
new file mode 100644
index 0000000..66b2159
--- /dev/null
+++ b/scripts/snmp-proxy/dictionary.hacked
@@ -0,0 +1,132 @@
+#
+# This is a dictionary that should be used by the Perl module Net::RADIUS,
+# if it has NOT been updated to parse the FreeRADIUS format dictionaries.
+#
+# It SHOULD NOT be used in the FreeRADIUS server or client!
+#
+ATTRIBUTE Message-Authenticator 80 octets
+
+
+VENDOR FreeRADIUS 11344
+
+ATTRIBUTE FreeRADIUS-Proxied-To 1 ipaddr
+
+
+#
+# This attribute is really a bitmask.
+#
+ATTRIBUTE FreeRADIUS-Statistics-Type 127 integer FreeRADIUS
+
+VALUE FreeRADIUS-Statistics-Type None 0 FreeRADIUS
+VALUE FreeRADIUS-Statistics-Type Authentication 1
+VALUE FreeRADIUS-Statistics-Type Accounting 2
+VALUE FreeRADIUS-Statistics-Type Proxy-Authentication 4
+VALUE FreeRADIUS-Statistics-Type Proxy-Accounting 8
+VALUE FreeRADIUS-Statistics-Type Internal 0x10
+VALUE FreeRADIUS-Statistics-Type Client 0x20
+VALUE FreeRADIUS-Statistics-Type Server 0x40
+VALUE FreeRADIUS-Statistics-Type Home-Server 0x80
+
+VALUE FreeRADIUS-Statistics-Type Auth-Acct 0x03
+VALUE FreeRADIUS-Statistics-Type Proxy-Auth-Acct 0x0c
+
+VALUE FreeRADIUS-Statistics-Type All 0x1f
+
+#
+# Global authentication statistics for packets received by the server.
+#
+ATTRIBUTE FreeRADIUS-Total-Access-Requests 128 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Access-Accepts 129 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Access-Rejects 130 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Access-Challenges 131 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Auth-Responses 132 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Auth-Duplicate-Requests 133 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Auth-Malformed-Requests 134 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Auth-Invalid-Requests 135 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Auth-Dropped-Requests 136 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Auth-Unknown-Types 137 integer FreeRADIUS
+
+#
+# Global statistics for auth packets sent by the server to all home servers
+#
+ATTRIBUTE FreeRADIUS-Total-Proxy-Access-Requests 138 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Access-Accepts 139 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Access-Rejects 140 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Access-Challenges 141 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Auth-Responses 142 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests 143 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Auth-Malformed-Requests 144 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Auth-Invalid-Requests 145 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Auth-Dropped-Requests 146 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Auth-Unknown-Types 147 integer FreeRADIUS
+
+#
+# Global accounting statistics for packets received by the server.
+#
+ATTRIBUTE FreeRADIUS-Total-Accounting-Requests 148 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Accounting-Responses 149 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Acct-Duplicate-Requests 150 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Acct-Malformed-Requests 151 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Acct-Invalid-Requests 152 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Acct-Dropped-Requests 153 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Acct-Unknown-Types 154 integer FreeRADIUS
+
+#
+# Global statistics for acct packets sent by the server to all home servers
+#
+ATTRIBUTE FreeRADIUS-Total-Proxy-Accounting-Requests 155 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Accounting-Responses 156 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests 157 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Acct-Malformed-Requests 158 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Acct-Invalid-Requests 159 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Acct-Dropped-Requests 160 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Total-Proxy-Acct-Unknown-Types 161 integer FreeRADIUS
+
+#
+# Internal queues. Different packet types are put into different queues.
+#
+ATTRIBUTE FreeRADIUS-Queue-Len-Internal 162 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Queue-Len-Proxy 163 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Queue-Len-Auth 164 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Queue-Len-Acct 165 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Queue-Len-Detail 166 integer FreeRADIUS
+
+ATTRIBUTE FreeRADIUS-Stats-Client-IP-Address 167 ipaddr FreeRADIUS
+ATTRIBUTE FreeRADIUS-Stats-Client-Number 168 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Stats-Client-Netmask 169 integer FreeRADIUS
+
+ATTRIBUTE FreeRADIUS-Stats-Server-IP-Address 170 ipaddr FreeRADIUS
+ATTRIBUTE FreeRADIUS-Stats-Server-Port 171 integer FreeRADIUS
+
+ATTRIBUTE FreeRADIUS-Stats-Server-Outstanding-Requests 172 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Stats-Server-State 173 integer FreeRADIUS
+
+VALUE FreeRADIUS-Stats-Server-State Alive 0
+VALUE FreeRADIUS-Stats-Server-State Zombie 1
+VALUE FreeRADIUS-Stats-Server-State Dead 2
+
+#
+# When a home server is marked "dead" or "alive"
+#
+ATTRIBUTE FreeRADIUS-Stats-Server-Time-Of-Death 174 date FreeRADIUS
+ATTRIBUTE FreeRADIUS-Stats-Server-Time-Of-Life 175 date FreeRADIUS
+
+#
+# When this server was started. If start == hup, it hasn't been
+# hup'd yet. This is friendlier than having hup == 0 on start.
+#
+ATTRIBUTE FreeRADIUS-Stats-Start-Time 176 date FreeRADIUS
+ATTRIBUTE FreeRADIUS-Stats-HUP-Time 177 date FreeRADIUS
+
+#
+# Exponential moving average of home server response time
+# Window-1 is the average is calculated over "window" packets.
+# Window-10 is the average is calculated over "10 * window" packets.
+#
+# Both Window-1 and Window-10 are times in microseconds
+# (1/1000000 of a second).
+#
+ATTRIBUTE FreeRADIUS-Server-EMA-Window 178 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Server-EMA-USEC-Window-1 179 integer FreeRADIUS
+ATTRIBUTE FreeRADIUS-Server-EMA-USEC-Window-10 180 integer FreeRADIUS
+
diff --git a/scripts/snmp-proxy/freeradius-snmp.pl b/scripts/snmp-proxy/freeradius-snmp.pl
new file mode 100644
index 0000000..f30fc7d
--- /dev/null
+++ b/scripts/snmp-proxy/freeradius-snmp.pl
@@ -0,0 +1,585 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2008 Sky Network Services.
+# Copyright (C) 2022 Network RADIUS.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the same terms as Perl itself.
+#
+use strict;
+use warnings;
+
+use threads;
+use threads::shared;
+
+use Net::Radius::Packet;
+use Net::Radius::Dictionary;
+use NetSNMP::agent qw/:all/;
+use NetSNMP::ASN qw/:all/;
+use Socket qw(inet_ntop);
+use IO::Socket::INET;
+use Digest::HMAC_MD5;
+use Log::Log4perl qw/:easy/;
+#use Data::Dumper;
+#$Data::Dumper::Indent = 1;
+#$Data::Dumper::Sortkeys = 1;
+#$| = 1;
+
+my $cfg = {
+ snmp => {
+ agent => {
+ Name => 'freeradius-snmp',
+ AgentX => 1,
+ },
+ oid_root => '1.3.6.1.2.1.67',
+ oid_sub => {
+ 1 => [qw/auth proxyauth/],
+ 2 => [qw/acct proxyacct/],
+ },
+ },
+
+ radius => {
+ host => 'localhost',
+ port => 18120,
+ secret => 'adminsecret',
+# dictionary => '../radiusd/share/dictionary',
+ dictionary => 'dictionary.hacked',
+ refresh_rate => 20,
+ },
+
+ log => {
+ level => $WARN, # $DEBUG, $ERROR, etc.
+ layout => '%d{ISO8601} <%p> (%L) %m%n',
+ file => 'STDERR'
+ },
+
+ clients => 1, # Or 0 to disable
+};
+
+Log::Log4perl->easy_init($cfg->{log});
+
+INFO 'starting';
+my $running :shared;
+my %snmp_data :shared;
+my @snmp_data_k :shared;
+my %snmp_next :shared;
+
+INFO 'initializing snmp';
+my $agent = new NetSNMP::agent(%{$cfg->{snmp}->{agent}});
+
+radius_stats_init();
+$running = 1;
+
+$SIG{INT} = sub {
+ INFO 'stopping';
+ $running = 0;
+};
+
+#
+# Background updater thread
+#
+INFO 'launching radius client thread';
+threads->create(\&radius_updater);
+
+#
+# We export only the radiusAuthServ and radiusAccServ subtree
+#
+$agent->register(
+ $cfg->{snmp}->{agent}->{Name},
+ $cfg->{snmp}->{oid_root}.'.'.$_, \&snmp_handler) or die
+ foreach keys %{$cfg->{snmp}->{oid_sub}};
+
+INFO 'entering client main loop';
+$agent->agent_check_and_process(1) while $running;
+
+$agent->shutdown();
+
+$_->join() for threads->list();
+
+
+#
+# Initialize common radius client stuff
+#
+sub radius_stats_init {
+ our ( $d, $s, $rid );
+
+ $d = new Net::Radius::Dictionary;
+ $d->readfile($cfg->{radius}->{dictionary});
+ srand ($$ ^ time);
+ $rid = int rand 255;
+
+ $s = new IO::Socket::INET(
+ PeerHost => $cfg->{radius}->{host},
+ PeerPort => $cfg->{radius}->{port},
+ Proto => 'udp',
+ Timeout => 5) or die;
+
+}
+
+#
+# Build server status packet, send it, fetch and parse the result
+#
+sub radius_stats_get {
+ my ($type, %args) = @_;
+
+ our ($d, $s, $rid);
+
+ my $p_req = new Net::Radius::Packet $d;
+ $p_req->set_code('Status-Server');
+ $p_req->set_vsattr('FreeRADIUS', 'FreeRADIUS-Statistics-Type', $type);
+ $p_req->set_vsattr('FreeRADIUS', $_, $args{$_}) foreach keys %args;
+
+ #
+ # Update id
+ #
+ $p_req->set_identifier($rid++);
+ $p_req->set_authenticator(pack 'C*', map { int rand 255 } 0..15);
+
+ #
+ # Recalc authenticator
+ #
+ $p_req->set_attr('Message-Authenticator', "\0"x16, 1);
+ $p_req->set_attr('Message-Authenticator', Digest::HMAC_MD5::hmac_md5($p_req->pack, $cfg->{radius}->{secret}), 1);
+
+ #
+ # Send brand new and shiny request
+ #
+ $s->send($p_req->pack) or die;
+
+ my $p_data;
+ if (defined $s->recv($p_data, 2048)) {
+ my $p_res = new Net::Radius::Packet $d, $p_data;
+
+ my %response = map {
+ $_ => $p_res->vsattr($d->vendor_num('FreeRADIUS'), $_)->[0]
+ } $p_res->vsattributes($d->vendor_num('FreeRADIUS'));
+ return \%response;
+
+ }
+ else {
+ warn "no answer, $!\n";
+ return undef;
+ }
+
+}
+
+#
+# Wrappers for specific types of stats
+#
+sub radius_stats_get_global { return radius_stats_get(0x1f); }
+sub radius_stats_get_client { return radius_stats_get(0x3f, 'FreeRADIUS-Stats-Client-Number' => $_[0]); }
+
+#
+# Main loop of thread fetching status from freeradius server
+#
+sub radius_updater {
+
+ while ($running) {
+ INFO 'fetching new data';
+ my $main_stat = radius_stats_get_global();
+
+ if (defined $main_stat) {
+ my @clients_stat = ();
+
+ if ($cfg->{clients}) {
+ my $client_id = 0;
+
+ while (1) {
+ my $client_stat = radius_stats_get_client($client_id);
+ last unless exists $client_stat->{'FreeRADIUS-Stats-Client-IP-Address'} || exists $client_stat->{'FreeRADIUS-Stats-Client-IPv6-Address'};
+ push @clients_stat, $client_stat;
+ $client_id += 1;
+ }
+ }
+
+ INFO 'got data, updating stats';
+ radius_snmp_stats($main_stat, \@clients_stat);
+
+ }
+ else {
+ WARN 'problem with fetching data';
+ }
+
+ INFO 'stats updated, sleeping';
+ my $now = time;
+ my $next_stats_time = $now + $cfg->{radius}->{refresh_rate};
+ do {
+ sleep 1;
+ $now = time;
+ } while ($now < $next_stats_time && $running);
+
+ }
+
+}
+
+#
+# Helper to get a dotted string from NetSNMP::OID
+#
+sub oid_s { return join '.', $_[0]->to_array; }
+
+#
+# Handler for snmp requests from master agent
+#
+sub snmp_handler {
+ DEBUG 'got new request';
+ my ($handler, $registration_info, $request_info, $requests) = @_;
+
+ lock %snmp_data;
+ lock @snmp_data_k;
+ lock %snmp_next;
+
+ for (my $request = $requests; $request; $request = $request->next()) {
+ INFO 'request type '.$request_info->getMode.' for oid: '.oid_s($request->getOID);
+
+ if ($request_info->getMode == MODE_GET) {
+ my $oid_s = oid_s($request->getOID);
+ if (exists $snmp_data{$oid_s}) {
+ $request->setValue($snmp_data{$oid_s}->[0], ''.$snmp_data{$oid_s}->[1]);
+ }
+
+ }
+ elsif ($request_info->getMode == MODE_GETNEXT) {
+
+ #
+ # Do a fast lookup if we can...
+ #
+ my $oid = $snmp_next{oid_s($request->getOID)};
+
+ #
+ # ... otherwise take the slow route
+ #
+ unless (defined $oid) {
+ foreach ( @snmp_data_k ) {
+ #the keys is sorted in ascending order, so we are looking for
+ #first value bigger than one in request
+ if ($request->getOID < NetSNMP::OID->new($_)) {
+ $oid = $_;
+ last;
+ }
+ }
+ }
+
+ next unless $oid;
+
+ $request->setValue($snmp_data{$oid}->[0], ''.$snmp_data{$oid}->[1]);
+ $request->setOID($oid);
+
+ }
+ else {
+ $request->setError($request_info, SNMP_ERR_READONLY); # No write support
+ }
+ }
+ DEBUG 'finished processing the request';
+}
+
+#
+# Init part of subtree for handling radius AUTH statistics
+#
+sub radius_snmp_stats_init_auth {
+ my ($snmp_data_n, $oid, $clients) = @_;
+
+ @{$snmp_data_n->{$oid.'.1.1.1.1'} = &share([])} = (ASN_OCTET_STR, ''); # radiusAuthServIdent
+ @{$snmp_data_n->{$oid.'.1.1.1.2'} = &share([])} = (ASN_TIMETICKS, 0); # radiusAuthServUpTime
+ @{$snmp_data_n->{$oid.'.1.1.1.3'} = &share([])} = (ASN_TIMETICKS, 0); # radiusAuthServResetTime
+ @{$snmp_data_n->{$oid.'.1.1.1.4'} = &share([])} = (ASN_INTEGER, 0); # radiusAuthServConfigReset
+ @{$snmp_data_n->{$oid.'.1.1.1.5'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalAccessRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.6'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalInvalidRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.7'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalDupAccessRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.8'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalAccessAccepts
+ @{$snmp_data_n->{$oid.'.1.1.1.9'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalAccessRejects
+ @{$snmp_data_n->{$oid.'.1.1.1.10'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalAccessChallenges
+ @{$snmp_data_n->{$oid.'.1.1.1.11'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalMalformedAccessRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.12'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalBadAuthenticators
+ @{$snmp_data_n->{$oid.'.1.1.1.13'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalPacketsDropped
+ @{$snmp_data_n->{$oid.'.1.1.1.14'} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServTotalUnknownTypes
+
+ #
+ # radiusAuthClientExtTable
+ #
+ for (1 .. scalar @$clients) {
+
+ my $addrtype;
+ my $addr;
+ if (exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IP-Address'}) {
+ $addrtype = 1;
+ $addr = $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IP-Address'};
+ }
+ elsif (exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IPv6-Address'}) {
+ $addrtype = 2;
+ $addr = inet_ntop(AF_INET6, $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IPv6-Address'});
+ }
+ else {
+ next;
+ }
+
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.1.'.$_} = &share([])} = (ASN_INTEGER, $_); # radiusAuthClientExtIndex
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.2.'.$_} = &share([])} = (ASN_INTEGER, $addrtype); # radiusAuthClientInetAddressType
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.3.'.$_} = &share([])} = (ASN_OCTET_STR, $addr); # radiusAuthClientInetAddress
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.4.'.$_} = &share([])} = (ASN_OCTET_STR, $clients->[$_-1]->{'FreeRADIUS-Stats-Client-Number'}); # radiusAuthClientExtID
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.5.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtAccessRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.6.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtDupAccessRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.7.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtAccessAccepts
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.8.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtAccessRejects
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.9.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtAccessChallenges
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.10.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtMalformedAccessRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.11.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtBadAuthenticators
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.12.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtPacketsDropped
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.13.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAuthServExtUnknownTypes
+ @{$snmp_data_n->{$oid.'.1.1.1.16.1.14.'.$_} = &share([])} = (ASN_TIMETICKS, 0); # radiusAuthServerCounterDiscontinuity
+
+ }
+}
+
+#
+# Init part of subtree for handling radius ACCT statistics
+#
+sub radius_snmp_stats_init_acct {
+ my ( $snmp_data_n, $oid, $clients ) = @_;
+
+ @{$snmp_data_n->{$oid.'.1.1.1.1'} = &share([])} = (ASN_OCTET_STR, ''); # radiusAccServIdent
+ @{$snmp_data_n->{$oid.'.1.1.1.2'} = &share([])} = (ASN_TIMETICKS, 0); # radiusAccServUpTime
+ @{$snmp_data_n->{$oid.'.1.1.1.3'} = &share([])} = (ASN_TIMETICKS, 0); # radiusAccServResetTime
+ @{$snmp_data_n->{$oid.'.1.1.1.4'} = &share([])} = (ASN_INTEGER, 0); # radiusAccServConfigReset
+ @{$snmp_data_n->{$oid.'.1.1.1.5'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.6'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalInvalidRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.7'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalDupRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.8'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalResponses
+ @{$snmp_data_n->{$oid.'.1.1.1.9'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalMalformedRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.10'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalBadAuthenticators
+ @{$snmp_data_n->{$oid.'.1.1.1.11'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalPacketsDropped
+ @{$snmp_data_n->{$oid.'.1.1.1.12'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalNoRecords
+ @{$snmp_data_n->{$oid.'.1.1.1.13'} = &share([])} = (ASN_COUNTER, 0); # radiusAccServTotalUnknownTypes
+
+ #
+ # radiusAccClientExtTable
+ #
+ for (1 .. scalar @$clients) {
+
+ my $addrtype;
+ my $addr;
+ if (exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IP-Address'}) {
+ $addrtype = 1;
+ $addr = $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IP-Address'};
+ }
+ elsif (exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IPv6-Address'}) {
+ $addrtype = 2;
+ $addr = inet_ntop(AF_INET6, $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IPv6-Address'});
+ }
+ else {
+ next;
+ }
+
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.1.'.$_} = &share([])} = (ASN_INTEGER, $_); # radiusAccClientExtIndex
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.2.'.$_} = &share([])} = (ASN_INTEGER, $addrtype); # radiusAccClientInetAddressType
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.3.'.$_} = &share([])} = (ASN_OCTET_STR, $addr); # radiusAccClientInetAddress
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.4.'.$_} = &share([])} = (ASN_OCTET_STR, $clients->[$_-1]->{'FreeRADIUS-Stats-Client-Number'}); # radiusAccClientExtID
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.5.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtPacketsDropped
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.6.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.7.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtDupRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.8.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServResponses
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.9.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtBadAuthenticators
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.10.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtMalformedRequests
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.11.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtNoRecords
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.12.'.$_} = &share([])} = (ASN_COUNTER, 0); # radiusAccServExtUnknownTypes
+ @{$snmp_data_n->{$oid.'.1.1.1.15.1.13.'.$_} = &share([])} = (ASN_TIMETICKS, 0); # radiusAccServerCounterDiscontinuity
+
+ }
+}
+
+#
+# Fill part of subtree with data from radius AUTH statistics
+#
+sub radius_snmp_stats_fill_auth {
+ my ($snmp_data_n, $oid, $prefix, $main, $clients) = @_;
+
+ my $time = time;
+
+ $snmp_data_n->{$oid.'.1.1.1.1'}->[1] = 'snmp(over)radius';
+ $snmp_data_n->{$oid.'.1.1.1.2'}->[1] = ($time - $main->{'FreeRADIUS-Stats-Start-Time'})*100;
+ $snmp_data_n->{$oid.'.1.1.1.3'}->[1] = ($time - $main->{'FreeRADIUS-Stats-HUP-Time'})*100;
+ $snmp_data_n->{$oid.'.1.1.1.4'}->[1] = 0;
+ $snmp_data_n->{$oid.'.1.1.1.5'}->[1] += $main->{$prefix.'Access-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.6'}->[1] += $main->{$prefix.'Auth-Invalid-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.7'}->[1] += $main->{$prefix.'Auth-Duplicate-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.8'}->[1] += $main->{$prefix.'Access-Accepts'};
+ $snmp_data_n->{$oid.'.1.1.1.9'}->[1] += $main->{$prefix.'Access-Rejects'};
+ $snmp_data_n->{$oid.'.1.1.1.10'}->[1] += $main->{$prefix.'Access-Challenges'};
+ $snmp_data_n->{$oid.'.1.1.1.11'}->[1] += $main->{$prefix.'Auth-Malformed-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.12'}->[1] += 0;
+ $snmp_data_n->{$oid.'.1.1.1.13'}->[1] += $main->{$prefix.'Auth-Dropped-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.14'}->[1] += $main->{$prefix.'Auth-Unknown-Types'};
+
+ for (1 .. scalar @$clients) {
+ next unless exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IP-Address'} || exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IPv6-Address'};
+ $snmp_data_n->{$oid.'.1.1.1.16.1.5.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Access-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.6.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Auth-Duplicate-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.7.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Access-Accepts'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.8.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Access-Rejects'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.9.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Access-Challenges'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.10.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Auth-Malformed-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.11.'.$_}->[1] += 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.12.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Auth-Dropped-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.13.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Auth-Unknown-Types'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.16.1.14.'.$_}->[1] += 0;
+ }
+}
+
+#
+# Fill part of subtree with data from radius ACCT statistics
+#
+sub radius_snmp_stats_fill_acct {
+ my ( $snmp_data_n, $oid, $prefix, $main, $clients ) = @_;
+
+ my $time = time;
+
+ $snmp_data_n->{$oid.'.1.1.1.1'}->[1] = 'snmp(over)radius';
+ $snmp_data_n->{$oid.'.1.1.1.2'}->[1] = ($time - $main->{'FreeRADIUS-Stats-Start-Time'})*100;
+ $snmp_data_n->{$oid.'.1.1.1.3'}->[1] = ($time - $main->{'FreeRADIUS-Stats-HUP-Time'})*100;
+ $snmp_data_n->{$oid.'.1.1.1.4'}->[1] = 0;
+ $snmp_data_n->{$oid.'.1.1.1.5'}->[1] += $main->{$prefix.'Accounting-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.6'}->[1] += $main->{$prefix.'Acct-Invalid-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.7'}->[1] += $main->{$prefix.'Acct-Duplicate-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.8'}->[1] += $main->{$prefix.'Accounting-Responses'};
+ $snmp_data_n->{$oid.'.1.1.1.9'}->[1] += $main->{$prefix.'Acct-Malformed-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.10'}->[1] += 0;
+ $snmp_data_n->{$oid.'.1.1.1.11'}->[1] += $main->{$prefix.'Acct-Dropped-Requests'};
+ $snmp_data_n->{$oid.'.1.1.1.12'}->[1] += 0;
+ $snmp_data_n->{$oid.'.1.1.1.13'}->[1] += $main->{$prefix.'Acct-Unknown-Types'};
+
+ for (1 .. scalar @$clients) {
+ next unless exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IP-Address'} || exists $clients->[$_-1]->{'FreeRADIUS-Stats-Client-IPv6-Address'};
+ $snmp_data_n->{$oid.'.1.1.1.15.1.5.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Acct-Dropped-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.6.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Accounting-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.7.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Acct-Duplicate-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.8.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Accounting-Responses'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.9.'.$_}->[1] += 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.10.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Acct-Malformed-Requests'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.11.'.$_}->[1] += 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.12.'.$_}->[1] += $clients->[$_-1]->{$prefix.'Acct-Unknown-Types'} || 0;
+ $snmp_data_n->{$oid.'.1.1.1.15.1.13.'.$_}->[1] += 0;
+ }
+}
+
+#
+# Update statistics
+#
+sub radius_snmp_stats {
+ my ($main, $clients) = @_;
+
+ my %snmp_data_n;
+
+ #
+ # We have to go through all oid's
+ #
+ foreach my $oid_s ( keys %{$cfg->{snmp}->{oid_sub}} ) {
+
+ #
+ # We're rebuilding the tree for data. We could do it only once, but it
+ # will change when we will start handling more dynamic tree (clients)
+ #
+ my %types = map { $_ => 1 } map { /(?:proxy)?(\w+)/; $1 } @{$cfg->{snmp}->{oid_sub}->{$oid_s}};
+ WARN 'two conflicting types for oid '.$oid_s if scalar keys %types > 1;
+
+ if ((keys %types)[0] eq 'auth') {
+ radius_snmp_stats_init_auth(\%snmp_data_n, $cfg->{snmp}->{oid_root}.'.'.$oid_s, $clients);
+ }
+ elsif ( (keys %types)[0] eq 'acct' ) {
+ radius_snmp_stats_init_acct(\%snmp_data_n, $cfg->{snmp}->{oid_root}.'.'.$oid_s, $clients);
+ }
+ else {
+ WARN 'unknown subtree type '.(keys %types)[0];
+ }
+
+ #
+ # Refill the statistics
+ #
+ foreach my $type (@{$cfg->{snmp}->{oid_sub}->{$oid_s}}) {
+ if ($type eq 'auth') {
+ radius_snmp_stats_fill_auth(
+ \%snmp_data_n, $cfg->{snmp}->{oid_root}.'.'.$oid_s,
+ 'FreeRADIUS-Total-', $main, $clients);
+ }
+ elsif ($type eq 'proxyauth') {
+ radius_snmp_stats_fill_auth(
+ \%snmp_data_n, $cfg->{snmp}->{oid_root}.'.'.$oid_s,
+ 'FreeRADIUS-Total-Proxy-', $main, $clients);
+ }
+ elsif ($type eq 'acct') {
+ radius_snmp_stats_fill_acct(
+ \%snmp_data_n, $cfg->{snmp}->{oid_root}.'.'.$oid_s,
+ 'FreeRADIUS-Total-', $main, $clients);
+ }
+ elsif ($type eq 'proxyacct') {
+ radius_snmp_stats_fill_acct(
+ \%snmp_data_n, $cfg->{snmp}->{oid_root}.'.'.$oid_s,
+ 'FreeRADIUS-Total-Proxy-', $main, $clients);
+ }
+ else {
+ WARN 'unknown subtree type '.$type;
+ }
+
+ }
+ }
+
+ #
+ # Copy the rebuilt tree to the shared variables
+ #
+ my @k = map { oid_s($_) } sort { $a <=> $b } map { NetSNMP::OID->new($_) } keys %snmp_data_n;
+ my %snmp_next_n = ();
+ $snmp_next_n{$k[$_]} = $k[$_+1] for (0 .. $#k-1);
+
+ lock %snmp_data;
+ lock @snmp_data_k;
+ lock %snmp_next;
+
+ %snmp_data = %snmp_data_n;
+ @snmp_data_k = @k;
+ %snmp_next = %snmp_next_n;
+
+}
+
+=head1 NAME
+
+freeradius snmp agentx subagent
+
+=head1 VERSION
+
+=head1 SYNOPSIS
+
+make sure snmpd is agentx master (snmpd.conf):
+master agentx
+
+run the script (no demonizing support yet):
+
+./freeradius-snmp.pl
+
+then you can walk the tree (default oid):
+
+snmpbulkwalk -On -v2c -cpublic localhost .1.3.6.1.2.1.67
+
+if per-client stats collection is enabled then you can do the following:
+
+snmptable -v2c -cpublic localhost .1.3.6.1.2.1.67.1.1.1.1.16
+snmptable -v2c -cpublic localhost .1.3.6.1.2.1.67.2.1.1.1.15
+
+=head1 DESCRIPTION
+
+=head1 DEPENDENCIES
+
+Net-Radius (either 1.56 + net-radius-freeradius-dictionary.diff to use freeradius dictionaries
+ or vanilla upstream one + dictionary.hacked)
+NetSNMP perl modules (available with net-snmp distribution)
+Digest::HMAC
+Log::Log4perl
+
+=head1 AUTHOR
+
+Stanislaw Sawa <stanislaw.sawa(at)sns.bskyb.com>
+
+=head1 COPYRIGHT
+
+Copyright (C) 2008 Sky Network Services.
+Copyright (C) 2022 Network RADIUS.
+
+This program is free software; you can redistribute it and/or modify it
+under the same terms as Perl itself.
diff --git a/scripts/snmp-proxy/net-radius-freeradius-dictionary.diff b/scripts/snmp-proxy/net-radius-freeradius-dictionary.diff
new file mode 100644
index 0000000..f379cf4
--- /dev/null
+++ b/scripts/snmp-proxy/net-radius-freeradius-dictionary.diff
@@ -0,0 +1,81 @@
+--- Net-Radius-1.56.orig/Radius/Dictionary.pm 2008-06-20 14:08:57.000000000 +0100
++++ Net-Radius-1.56.1/Radius/Dictionary.pm 2008-06-20 15:54:33.000000000 +0100
+@@ -30,14 +30,23 @@
+
+ sub readfile {
+ my ($self, $filename) = @_;
++ my $dict;
+
+- open DICT, "<$filename";
++ open $dict, "<$filename";
+
+- while (defined(my $l = <DICT>)) {
++ my @in_vendor = ();
++
++ while (defined(my $l = <$dict>)) {
+ next if $l =~ /^\#/;
+ next unless my @l = split /\s+/, $l;
+
+- if ($l[0] =~ m/^vendor$/i)
++ if ($l[0] =~ m/^\$include$/i)
++ {
++ my @fn = split /\//, $filename;
++ $fn[$#fn] = $l[1];
++ $self->readfile(join '/', @fn);
++ }
++ elsif ($l[0] =~ m/^vendor$/i)
+ {
+ if (defined $l[1] and defined $l[2] and $l[2] =~ /^[xo0-9]+$/)
+ {
+@@ -53,8 +62,42 @@
+ warn "Garbled VENDOR line $l\n";
+ }
+ }
++ elsif ($l[0] =~ m/^begin-vendor$/i)
++ {
++ if ( defined $l[1] )
++ {
++ push @in_vendor, $l[1];
++ }
++ else
++ {
++ warn "Garbled BEGIN-VENDOR line $l\n";
++ }
++ }
++ elsif ($l[0] =~ m/^end-vendor$/i)
++ {
++ if ( defined $l[1] )
++ {
++ if ( $in_vendor[$#in_vendor] eq $l[1] ) {
++ pop @in_vendor;
++ }else {
++ warn "mismatched END-VENDOR line $l\n";
++ }
++ }
++ else
++ {
++ warn "Garbled END-VENDOR line $l\n";
++ }
++ }
+ elsif ($l[0] =~ m/^attribute$/i)
+ {
++ if (@l == 5) {
++ my @tags = grep { not ( m/^encrypt=\d$/ or m/^has_tag$/ ) } split /,/, pop @l;
++ push @l, join ',', @tags if scalar @tags;
++ }
++ if (@l == 4 and scalar @in_vendor) {
++ push @l, $in_vendor[$#in_vendor];
++ }
++
+ if (@l == 4)
+ {
+ $self->{attr}->{$l[1]} = [@l[2,3]];
+@@ -166,7 +209,7 @@
+ warn "Warning: Weird dictionary line: $l\n";
+ }
+ }
+- close DICT;
++ close $dict;
+ }
+
+ # Accessors for standard attributes
diff --git a/scripts/solaris/.gitignore b/scripts/solaris/.gitignore
new file mode 100644
index 0000000..91ef483
--- /dev/null
+++ b/scripts/solaris/.gitignore
@@ -0,0 +1 @@
+svc-radius
diff --git a/scripts/solaris/README.md b/scripts/solaris/README.md
new file mode 100644
index 0000000..8e1c02a
--- /dev/null
+++ b/scripts/solaris/README.md
@@ -0,0 +1,58 @@
+# Solaris startup scripts
+
+SMF is the Solaris version of upstart (or the reverse), it imports
+XML configuration file for services, and manages service
+dependencies. It will automatically restart daemons in they die,
+and provides a standard interface for checking the status of a
+service and administratively disabling/enabling it.
+
+
+# Installation/configuration
+
+## Solaris 10
+
+Do the following as the root user ``sudo -s``.
+
+Copy the service management script ``svc-radius`` to ``/lib/srv/method/``:
+
+```bash
+cp ./svc-radius /lib/svc/method/
+chown root:bin /lib/svc/method/svc-radius
+chmod 555 /lib/svc/method/svc-radius
+```
+
+Copy the ``radius.xml`` manifest to
+``/var/svc/manifest/network/``, and import it into SMF:
+
+```bash
+cp ./radius.xml /var/svc/manifest/network/
+svccfg import /var/svc/manifest/network/radius.xml
+```
+
+
+### Authorizing additional users
+
+First create an authorisation entry for the radius service:
+
+```bash
+echo "solaris.smf.manage.radius/server:::FreeRADIUS Server management::" >> /etc/security/auth_attr
+```
+
+Next instruct SMF to use RBAC for authorising actions on this
+particular service (only works with local accounts):
+
+```bash
+svccfg -s radius setprop general/action_authorization=astring: 'solaris.smf.manage.radius/server'
+```
+
+Then assign this authorisation to our one or more users:
+
+```bash
+usermod -A solaris.smf.manage.radius/server <user>
+```
+
+And finally test with (as authorized user):
+
+```bash
+svcs radius
+```
diff --git a/scripts/solaris/radius.xml b/scripts/solaris/radius.xml
new file mode 100644
index 0000000..d9b0506
--- /dev/null
+++ b/scripts/solaris/radius.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!DOCTYPE service_bundle SYSTEM
+"/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<service_bundle type='manifest' name='radius'>
+<service
+ name='network/radius'
+ type='service'
+ version='1'>
+ <create_default_instance enabled='false' />
+ <single_instance/>
+
+ <dependency name='fs-local'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/local' />
+ </dependency>
+
+ <dependency name='fs-autofs'
+ grouping='optional_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/system/filesystem/autofs' />
+ </dependency>
+
+ <dependency name='net-loopback'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/loopback' />
+ </dependency>
+
+ <dependency name='net-physical'
+ grouping='require_all'
+ restart_on='none'
+ type='service'>
+ <service_fmri value='svc:/network/physical' />
+ </dependency>
+
+ <exec_method
+ type='method'
+ name='start'
+ exec='/lib/svc/method/svc-radius %m'
+ timeout_seconds='10' />
+ <exec_method
+ type='method'
+ name='stop'
+ exec='/lib/svc/method/svc-radius %m %{restarter/contract}'
+ timeout_seconds='10' />
+ <exec_method
+ type='method'
+ name='refresh'
+ exec='/lib/svc/method/svc-radius %m'
+ timeout_seconds='10' />
+
+ <stability value='Unstable' />
+ <template>
+ <common_name>
+ <loctext xml:lang='C'> radius
+ </loctext>
+ </common_name>
+ <documentation>
+ <manpage title='radius' section='1M'
+ manpath='/usr/share/man' />
+ </documentation>
+ </template>
+</service>
+</service_bundle>
diff --git a/scripts/solaris/svc-radius.in b/scripts/solaris/svc-radius.in
new file mode 100755
index 0000000..f5aee81
--- /dev/null
+++ b/scripts/solaris/svc-radius.in
@@ -0,0 +1,99 @@
+#!/bin/sh
+. /lib/svc/share/smf_include.sh
+#
+#
+# radiusd Start the radius daemon.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Copyright (C) 2001-2012 The FreeRADIUS Project http://www.freeradius.org
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+sbindir=@sbindir@
+localstatedir=@localstatedir@
+logdir=@logdir@
+rundir=${localstatedir}/run/radiusd
+sysconfdir=@sysconfdir@
+
+#
+# If you have issues with OpenSSL, uncomment these next lines.
+#
+# Something similar may work for MySQL, and you may also
+# have to LD_PRELOAD libz.so
+#
+#LD_LIBRARY_PATH= -lcrypto -lssl -lcrypto
+#LD_RUN_PATH= -lcrypto -lssl -lcrypto:
+#LD_PRELOAD= -lcrypto -lssl -lcryptolibcrypto.so
+export LD_LIBRARY_PATH LD_RUN_PATH LD_PRELOAD
+
+RADIUSD=$sbindir/radiusd
+RADDBDIR=@raddbdir@
+DESC="FreeRADIUS"
+
+#
+# See 'man radiusd' for details on command-line options.
+#
+ARGS=""
+
+test -f $RADIUSD || exit $SMF_EXIT_ERR_CONFIG
+test -f $RADDBDIR/radiusd.conf || exit $SMF_EXIT_ERR_CONFIG
+
+#if [ ! -d $rundir ] ; then
+# mkdir $rundir
+# chown radmin:radius
+# chmod 775 $rundir
+#fi
+#
+#if [ ! -d $logdir ] ; then
+# mkdir $logdir
+# chown radmin:radius $logdir
+# chmod 770 $logdir
+# chmod g+s $logdir
+#fi
+#
+#if [ ! -f $logdir/radius.log ]; then
+# touch $logdir/radius.log
+#fi
+#
+#chown radmin:radius $logdir/radius.log
+#chmod 660 $logdir/radius.log
+
+case "$1" in
+ start)
+ echo -n "Starting $DESC: "
+ $RADIUSD $ARGS
+ echo "radiusd"
+ ;;
+ stop)
+ echo -n "Stopping $DESC: "
+ smf_kill_contract $2 TERM 1
+ [ $? -ne 0 ] && exit 1
+ echo "radiusd."
+ ;;
+ refresh)
+ echo "Reloading $DESC configuration files."
+ [ -f $rundir/radiusd.pid ] && kill -HUP `cat $rundir/radiusd.pid`
+ ;;
+ check)
+ $RADIUSD -CX $ARGS
+ exit $?
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|refresh|check}"
+ exit 1
+esac
+
+exit $SMF_EXIT_OK
diff --git a/scripts/sql/align_sql_pools.pl b/scripts/sql/align_sql_pools.pl
new file mode 100755
index 0000000..04d2d4b
--- /dev/null
+++ b/scripts/sql/align_sql_pools.pl
@@ -0,0 +1,311 @@
+#!/usr/bin/perl -Tw
+
+######################################################################
+#
+# Copyright (C) 2020 Network RADIUS
+#
+# $Id$
+#
+######################################################################
+#
+# Helper script for generating the SQL commands to align the SQL IP pools in a
+# database with a given specification.
+#
+# The radippool table is updated is a way that preserves existing leases,
+# provided that the corresponding IP addresses still exist in their pool.
+#
+# This script typically receives the output of the generate_pool_addresses.pl
+# script, as follows:
+#
+# generate_pool_addresses.pl <options> | align_sqlippools.pl <sql_dialect>
+#
+# For example:
+#
+# generate_pool_addresses.pl main_pool 10.0.1.0 10.0.1.255 | \
+# align_sqlippools.pl mariadb
+#
+# generate_pool_addresses.pl yaml pool_defs.yml existing_ips.txt | \
+# align_sqlippools.pl postgresql
+#
+# For the latter example the existing_ips.txt file might be created as
+# follows:
+#
+# psql radius -qtAc 'SELECT framedipaddress FROM radippool' > existing_ips.txt
+#
+# Note: The generate_pool_addresses.pl script describes the input format
+# expected by this script (as well as the format of the pool_defs.yml and
+# existing_ips.txt files.)
+#
+# Output:
+#
+# The output of this script is the SQL command sequence for aligning the pools
+# with the definition provided, which you should review before running them
+# against your database.
+#
+
+use strict;
+use Template;
+
+my %template=load_templates();
+
+if ($#ARGV != 0) {
+ print STDERR <<'EOF';
+Usage: generate_pool_addresses.pl ... | align_sqlippools.pl <dialect>
+
+EOF
+ exit 1;
+}
+
+my $dialect=$ARGV[0];
+
+unless (defined $template{$dialect}) {
+ print STDERR "Unknown dialect. Pick one of: ";
+ print STDERR "$_ " foreach sort keys %template;
+ print STDERR "\n";
+ exit 1;
+}
+
+my @ips=();
+
+my $line = 0;
+while (<STDIN>) {
+ $line++;
+
+ chomp;
+
+ next if $_ =~ /^#/ || $_ =~ /^\s*$/;
+
+ # The SQL works out what addresses to remove by itself
+ next if $_ =~ /^-/;
+
+ (my $action, my $pool_name, my $ip) = $_ =~ /^(.)\s+(.+)\s+([^\s]+)$/;
+
+ unless (defined $ip) {
+ warn "Unrecognised line $line: $_";
+ next;
+ }
+
+ push @ips, { poolname => $pool_name, ip => $ip };
+
+}
+
+my $tt=Template->new();
+$tt->process(\$template{$dialect}, {ips => \@ips, batchsize => 100}) || die $tt->error();
+
+exit 0;
+
+
+#
+# SQL dialect templates
+#
+
+sub load_templates {
+
+ my %template;
+
+#
+# MariaDB
+#
+ $template{'mariadb'} = <<'END_mariadb';
+-- Temporary table holds the provided IP pools
+DROP TEMPORARY TABLE IF EXISTS radippool_temp;
+CREATE TEMPORARY TABLE radippool_temp (
+ id int(11) unsigned NOT NULL auto_increment,
+ pool_name varchar(30) NOT NULL,
+ framedipaddress varchar(15) NOT NULL,
+ PRIMARY KEY (id),
+ KEY pool_name_framedipaddress (pool_name,framedipaddress)
+);
+
+-- Populate the temporary table
+[%- FOREACH m IN ips %]
+[%- "\n\nINSERT INTO radippool_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% m.poolname %]','[% m.ip %]');
+[%- ELSE %]
+('[% m.poolname %]','[% m.ip %]'),
+[%- END %]
+[%- END %]
+
+START TRANSACTION;
+
+-- Delete old pools that have been removed
+DELETE r FROM radippool r
+ LEFT JOIN radippool_temp t USING (pool_name,framedipaddress)
+ WHERE t.id IS NULL;
+
+-- Add new pools that have been created
+INSERT INTO radippool (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM radippool_temp t WHERE NOT EXISTS (
+ SELECT * FROM radippool r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );
+
+COMMIT;
+END_mariadb
+
+
+#
+# PostgreSQL
+#
+ $template{'postgresql'} = <<'END_postgresql';
+-- Temporary table holds the provided IP pools
+DROP TABLE IF EXISTS radippool_temp;
+CREATE TEMPORARY TABLE radippool_temp (
+ pool_name varchar(64) NOT NULL,
+ FramedIPAddress INET NOT NULL
+);
+CREATE INDEX radippool_temp_idx ON radippool_temp USING btree (pool_name,FramedIPAddress);
+
+-- Populate the temporary table
+[%- FOREACH m IN ips %]
+[%- "\n\nINSERT INTO radippool_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% m.poolname %]','[% m.ip %]');
+[%- ELSE %]
+('[% m.poolname %]','[% m.ip %]'),
+[%- END %]
+[%- END %]
+
+START TRANSACTION;
+
+-- Delete old pools that have been removed
+DELETE FROM radippool r WHERE NOT EXISTS (
+ SELECT FROM radippool_temp t
+ WHERE t.pool_name = r.pool_name AND t.framedipaddress = r.framedipaddress
+);
+
+-- Add new pools that have been created
+INSERT INTO radippool (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM radippool_temp t WHERE NOT EXISTS (
+ SELECT * FROM radippool r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );
+
+COMMIT;
+END_postgresql
+
+#
+# Oracle
+#
+ $template{'oracle'} = <<'END_oracle';
+-- Temporary table holds the provided IP pools
+CREATE GLOBAL TEMPORARY TABLE radippool_temp (
+ pool_name VARCHAR(30) NOT NULL,
+ FramedIPAddress VARCHAR(15) NOT NULL
+) ON COMMIT DELETE ROWS;
+CREATE INDEX radippool_temp_idx ON radippool_temp (pool_name,FramedIPAddress);
+
+-- Populate the temporary table
+[%- FOREACH m IN ips %]
+[%- "\nINSERT INTO radippool_temp (pool_name,framedipaddress) VALUES " %]('[% m.poolname %]','[% m.ip %]');
+[%- END %]
+
+-- Delete old pools that have been removed
+DELETE FROM radippool WHERE (pool_name, framedipaddress)
+ NOT IN (SELECT pool_name, framedipaddress FROM radippool_temp);
+
+-- Add new pools that have been created
+INSERT INTO radippool (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM radippool_temp t WHERE NOT EXISTS (
+ SELECT * FROM radippool r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );
+
+COMMIT;
+END_oracle
+
+#
+# SQLite
+#
+
+ $template{'sqlite'} = <<'END_sqlite';
+-- Temporary table holds the provided IP pools
+DROP TABLE IF EXISTS radippool_temp;
+CREATE TABLE radippool_temp (
+ pool_name varchar(30) NOT NULL,
+ framedipaddress varchar(15) NOT NULL
+);
+
+CREATE INDEX radippool_temp_idx ON radippool_temp (pool_name,FramedIPAddress);
+
+-- Populate the temporary table
+[%- FOREACH m IN ips %]
+[%- "\n\nINSERT INTO radippool_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% m.poolname %]','[% m.ip %]');
+[%- ELSE %]
+('[% m.poolname %]','[% m.ip %]'),
+[%- END %]
+[%- END %]
+
+BEGIN TRANSACTION;
+
+-- Delete old pools that have been removed
+DELETE FROM radippool WHERE rowid IN (
+ SELECT r.rowid FROM radippool r
+ LEFT JOIN radippool_temp t USING (pool_name,framedipaddress)
+ WHERE t.rowid IS NULL);
+
+-- Add new pools that have been created
+INSERT INTO radippool (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM radippool_temp t WHERE NOT EXISTS (
+ SELECT * FROM radippool r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );
+
+COMMIT;
+
+DROP TABLE radippool_temp;
+END_sqlite
+
+#
+# MS SQL
+#
+ $template{'mssql'} = <<'END_mssql';
+-- Temporary table holds the provided IP pools
+DROP TABLE IF EXISTS #radippool_temp;
+GO
+
+CREATE TABLE #radippool_temp (
+ id int identity(1, 1) NOT NULL,
+ pool_name varchar(30) NOT NULL,
+ framedipaddress varchar(15) NOT NULL,
+ PRIMARY KEY (id),
+);
+GO
+
+CREATE INDEX pool_name_framedipaddress ON #radippool_temp(pool_name, framedipaddress);
+GO
+
+-- Populate the temporary table
+[%- FOREACH m IN ips %]
+[%- "\n\nINSERT INTO #radippool_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% m.poolname %]','[% m.ip %]');
+GO
+[%- ELSE %]
+('[% m.poolname %]','[% m.ip %]'),
+[%- END %]
+[%- END %]
+
+BEGIN TRAN;
+
+-- Delete old pools that have been removed
+DELETE r FROM radippool r
+ LEFT JOIN #radippool_temp t ON r.pool_name = t.pool_name AND r.framedipaddress = t.framedipaddress
+ WHERE t.id IS NULL;
+
+-- Add new pools that have been created
+INSERT INTO radippool (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM #radippool_temp t WHERE NOT EXISTS (
+ SELECT * FROM radippool r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );
+
+COMMIT TRAN;
+END_mssql
+
+ return %template;
+
+}
diff --git a/scripts/sql/generate_pool_addresses.pl b/scripts/sql/generate_pool_addresses.pl
new file mode 100755
index 0000000..85fe030
--- /dev/null
+++ b/scripts/sql/generate_pool_addresses.pl
@@ -0,0 +1,456 @@
+#!/usr/bin/perl -Tw
+
+######################################################################
+#
+# Copyright (C) 2020 Network RADIUS
+#
+# $Id$
+#
+######################################################################
+#
+# Helper script for populating IP pools with address entries.
+#
+# This script generates output that is useful for populating an IP pool for
+# use with FreeRADIUS (and possibly other purposes). The pool may be
+# implemented as an SQL IP Pool (rlm_sqlippool) or any other backing store
+# that has one entry per IP address.
+#
+# This script output a list of address to add, retain and remove in order to
+# align a pool to a specification. It is likely that you will want to
+# process the output to generate the actual commands (e.g. SQL statements)
+# that make changes to the datastore. For example:
+#
+# generate_pool_addresses.pl ... | align_sql_pools.pl postgresql
+#
+#
+# Use with a single address range
+# -------------------------------
+#
+# For basic use, arguments can be provided to this script that denote the ends
+# of a single IP (v4 or v6) address range together with the pool_name.
+#
+# Optionally the number of IPs to sparsely populate the range with can be
+# provided. If the range is wider than a /16 then the population of the range
+# is capped at 65536 IPs, unless otherwise specified.
+#
+# In the case that a sparse range is defined, a file containing pre-existing
+# IP entries can be provided. The range will be populated with entries from
+# this file that fall within the range, prior to the remainder of the range
+# being populated with random address in the range.
+#
+# generate_pool_addresses.pl <pool_name> <range_start> <range_end> \
+# [ <capacity> [ <existing_ips_file> ] ]
+#
+# Note: Sparse ranges are populated using a deterministic, pseudo-random
+# function. This allows pools to be trivially extended without having to
+# supply the existing contents using a file. If you require
+# less-predictable randomness or a different random sequence then remove
+# or modify the line calling srand(), below.
+#
+#
+# Use with multiple pools and address ranges
+# ------------------------------------------
+#
+# For more complex us, the script allows a set of pool definitions to be
+# provided in a YAML file which describes a set of one or more pools, each
+# containing a set of one or more ranges. The first argument in this case is
+# always "yaml":
+#
+# generate_pool_addresses.pl yaml <pool_defs_yaml_file> [ <existing_ips_file> ]
+#
+# The format for the YAML file is demonstrated by the following example:
+#
+# pool_with_a_single_contiguous_range:
+# - start: 192.0.2.3
+# end: 192.0.2.250
+#
+# pool_with_a_single_sparse_range:
+# - start: 10.10.10.0
+# end: 10.10.20.255
+# capacity: 200
+#
+# pool_with_multiple_ranges:
+# - start: 10.10.10.1
+# end: 10.10.10.253
+# - start: 10.10.100.0
+# end: 10.10.199.255
+# capacity: 1000
+#
+# v6_pool_with_contiguous_range:
+# - start: '2001:db8:1:2:3:4:5:10'
+# end: '2001:db8:1:2:3:4:5:7f'
+#
+# v6_pool_with_sparse_range:
+# - start: '2001:db8:1:2::'
+# end: '2001:db8:1:2:ffff:ffff:ffff:ffff'
+# capacity: 200
+#
+# As with the basic use case, a file containing pre-existing IP entries can be
+# provided with which any sparse ranges will be populated ahead of any random
+# addresses.
+#
+#
+# Output
+# ------
+#
+# The script returns line-based output beginning with "+", "=" or "-", and
+# includes the pool_name and an IP address.
+#
+# + pool_name 192.0.2.10
+#
+# A new address to be added to the corresponding range in the pool.
+#
+# = pool_name 192.0.2.20
+#
+# A pre-existing address that is to be retained in the pool. (Only if a
+# pre-existing pool entries file is given.)
+#
+# - pool_name 192.0.2.30
+#
+# A pre-existing address that is to be removed from the corresponding
+# range in the pool. (Only if a pre-existing pool entries file is given.)
+#
+# # main_pool: 192.0.10.3 - 192.0.12.250 (500)
+#
+# Lines beginning with "#" are comments
+#
+#
+# Examples
+# --------
+#
+# generate_pool_addresses.pl main_pool 192.0.2.3 192.0.2.249
+#
+# Will create a pool from a full populated IPv4 range, i.e. all IPs in the
+# range available for allocation).
+#
+# generate_pool_addresses.pl main_pool 10.66.0.0 10.66.255.255 10000
+#
+# Will create a pool from a sparsely populated IPv4 range for a /16
+# network (maximum of 65.536 addresses), populating the range with 10,000
+# addreses. The effective size of the pool can be increased in future by
+# increasing the capacity of the range with:
+#
+# generate_pool_addresses.pl main_pool 10.66.0.0 10.66.255.255 20000
+#
+# This generates the same initial set of 10,000 addresses as the previous
+# example but will create 20,000 addresses overall, unless the random seed
+# has been amended since the initial run.
+#
+# generate_pool_addresses.pl main_pool 2001:db8:1:2:: \
+# 2001:db8:1:2:ffff:ffff:ffff:ffff
+#
+# Will create a pool from the IPv6 range 2001:db8:1:2::/64, initially
+# populating the range with 65536 (by default) addresses.
+#
+# generate_pool_addresses.pl main_pool 2001:db8:1:2:: \
+# 2001:db8:1:2:ffff:ffff:ffff:ffff \
+# 10000 existing_ips.txt
+#
+# Will create a pool using the same range as the previous example, but
+# this time the range will be populated with 10,000 addresses. The range
+# will be populated using lines extracted from the `existing_ips.txt` file
+# that represent IPs which fall within range.
+#
+# generate_pool_addresses.pl yaml pool_defs.yml existing_ips.txt
+#
+# Will create one of more pools using the definitions found in the
+# pool_defs.yml YAML file. The pools will contain one or more ranges with
+# each of the ranges first being populated with entries from the
+# existing_ips.txt file that fall within the range, before being filled
+# with random addresses to the defined capacity.
+#
+
+use strict;
+use Net::IP qw/ip_bintoip ip_iptobin ip_bincomp ip_binadd ip_is_ipv4 ip_is_ipv6/;
+
+my $yaml_available = 0;
+
+eval {
+ require YAML::XS;
+ YAML::XS->import('LoadFile');
+ $yaml_available = 1;
+};
+
+if ($#ARGV < 2 || $#ARGV > 4) {
+ usage();
+ exit 1;
+}
+
+if ($ARGV[0] eq 'yaml') {
+
+ if ($#ARGV > 3) {
+ usage();
+ exit 1;
+ }
+
+ unless ($yaml_available) {
+ die "ERROR: YAML is not available. Install the YAML::XS Perl module.";
+ }
+ process_yaml_file();
+
+ goto done;
+
+}
+
+process_commandline();
+
+done:
+
+exit 0;
+
+
+sub usage {
+ print STDERR <<'EOF'
+Usage:
+ generate_pool_addresses.pl <pool_name> <range_start> <range_end> [ <capacity> [ <existing_ips_file> ] ]
+or:
+ generate_pool_addresses.pl yaml <pool_defs_yaml_file> [ <existing_ips_file> ]
+
+EOF
+}
+
+
+sub process_commandline {
+
+ $SIG{__DIE__} = sub { usage(); die(@_); };
+
+ my $pool_name = $ARGV[0];
+ my $range_start = $ARGV[1];
+ my $range_end = $ARGV[2];
+ my $capacity = $ARGV[3];
+
+ my @entries = ();
+ @entries = load_entries($ARGV[4]) if ($#ARGV >= 4);
+
+ handle_range($pool_name, $range_start, $range_end, $capacity, @entries);
+
+}
+
+
+sub process_yaml_file {
+
+ my $yaml_file = $ARGV[1];
+
+ unless (-r $yaml_file) {
+ die "ERROR: Cannot open <pool_defs_yaml_file> for reading: $yaml_file";
+ }
+
+ my %pool_defs = %{LoadFile($yaml_file)};
+
+ my @entries = ();
+ @entries = load_entries($ARGV[2]) if ($#ARGV >= 2);
+
+ foreach my $pool_name (sort keys %pool_defs) {
+ foreach my $range (@{$pool_defs{$pool_name}}) {
+ my $range_start = $range->{start};
+ my $range_end = $range->{end};
+ my $capacity = $range->{capacity};
+ handle_range($pool_name, $range_start, $range_end, $capacity, @entries);
+ }
+ }
+
+}
+
+
+sub load_entries {
+
+ my $entries_file = shift;
+
+ my @entries = ();
+ unless (-r $entries_file) {
+ die "ERROR: Cannot open <existing_ips_file> for reading: $entries_file"
+ }
+ open(my $fh, "<", $entries_file) || die "Failed to open $entries_file";
+ while(<$fh>) {
+ chomp;
+ push @entries, $_;
+ }
+
+ return @entries;
+
+}
+
+
+sub handle_range {
+
+ my $pool_name = shift;
+ my $range_start = shift;
+ my $range_end = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ unless (ip_is_ipv4($range_start) || ip_is_ipv6($range_start)) {
+ die "ERROR: Incorrectly formatted IPv4/IPv6 address for range_start: $range_start";
+ }
+
+ unless (ip_is_ipv4($range_end) || ip_is_ipv6($range_end)) {
+ die "ERROR: Incorrectly formatted IPv4/IPv6 address for range_end: $range_end";
+ }
+
+ my $ip_start = new Net::IP($range_start);
+ my $ip_end = new Net::IP($range_end);
+ my $ip_range = new Net::IP("$range_start - $range_end");
+
+ unless (defined $ip_range) {
+ die "ERROR: The range defined by <range_start> - <range_end> is invalid: $range_start - $range_end";
+ }
+
+ my $range_size = $ip_range->size;
+ $capacity = $range_size < 65536 ? "$range_size" : 65536 unless defined $capacity;
+
+ if ($range_size < $capacity) {
+ $capacity = "$range_size";
+ warn "WARNING: Insufficent IPs in the range. Will create $capacity entries.";
+ }
+
+ # Prune the entries to only those within the specified range
+ for (my $i = 0; $i <= $#entries; $i++) {
+ my $version = ip_is_ipv4($entries[$i]) ? 4 : 6;
+ my $binip = ip_iptobin($entries[$i],$version);
+ if ($ip_start->version != $version ||
+ ip_bincomp($binip, 'lt', $ip_start->binip) == 1 ||
+ ip_bincomp($binip, 'gt', $ip_end->binip) == 1) {
+ $entries[$i]='';
+ }
+ }
+
+ #
+ # We use the sparse method if the number of entries available occupies < 80% of
+ # the network range, otherwise we use a method that involves walking the
+ # entire range.
+ #
+
+ srand(42); # Set the seed for the PRNG
+
+ if (length($range_size) > 9 || $capacity / "$range_size" < 0.8) { # From "BigInt" to FP
+ @entries = sparse_fill($pool_name, $ip_start, $ip_end, $capacity, @entries);
+ } else {
+ @entries = dense_fill($pool_name, $ip_start, $ip_end, $ip_range, $capacity, @entries);
+ }
+
+ print "# $pool_name: $range_start - $range_end ($capacity)\n";
+ print "$_\n" foreach @entries;
+ print "\n";
+
+}
+
+
+#
+# With this sparse fill method we randomly allocate within the scope of the
+# smallest enclosing network prefix, checking that we are within the given
+# range, retrying if we are outside or we hit a duplicate.
+#
+# This method can efficiently choose a small number of addresses relative to
+# the size of the range. It becomes slower as the population of a range nears
+# the range's limit since it is harder to choose a free address at random.
+#
+# It is useful for selecting a handful of addresses from an enourmous IPv6 /64
+# network for example.
+#
+sub sparse_fill {
+
+ my $pool_name = shift;
+ my $ip_start = shift;
+ my $ip_end = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ # Find the smallest network that encloses the given range
+ my $version = $ip_start->version;
+ ( $ip_start->binip ^ $ip_end->binip ) =~ /^\0*/;
+ my $net_prefix = $+[0];
+ my $net_bits = substr($ip_start->binip, 0, $net_prefix);
+ my $host_length = length($ip_start->binip) - $net_prefix;
+
+ my %ips = ();
+ my $i = 0;
+ while ($i < $capacity) {
+
+ # Use the given entries first
+ my $rand_ip;
+ my $given_lease = 0;
+ shift @entries while $#entries >= 0 && $entries[0] eq '';
+ if ($#entries >= 0) {
+ $rand_ip = ip_iptobin(shift @entries, $version);
+ $given_lease = 1;
+ } else {
+ $rand_ip = $net_bits;
+ $rand_ip .= [0..1]->[rand 2] for 1..$host_length;
+ # Check that we are inside the given range
+ next if ip_bincomp($rand_ip, 'lt', $ip_start->binip) == 1 ||
+ ip_bincomp($rand_ip, 'gt', $ip_end->binip) == 1;
+ }
+
+ next if defined $ips{$rand_ip};
+
+ $ips{$rand_ip} = $given_lease ? '=' : '+';
+ $i++;
+
+ }
+
+ # Allow the pool to be shrunk
+ $ips{ip_iptobin($_, $version)} = '-' foreach @entries;
+
+ return map { $ips{$_}." ".$pool_name." ".ip_bintoip($_, $version) } sort keys %ips;
+
+}
+
+
+#
+# With this dense fill method, after first selecting the given entries we walk
+# the network range picking IPs with evenly distributed probability.
+#
+# This method can efficiently choose a large number of addresses relative to
+# the size of a range, provided that the range isn't massive. It becomes
+# slower as the range size increases.
+#
+sub dense_fill {
+
+ my $pool_name = shift;
+ my $ip_start = shift;
+ my $ip_end = shift;
+ my $ip_range = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ my $version = $ip_start->version;
+
+ my $one = ("0"x($version == 4 ? 31 : 127)) . '1';
+
+ my %ips = ();
+ my $remaining_entries = $capacity;
+ my $remaining_ips = $ip_range->size;
+ my $ipbin = $ip_start->binip;
+
+ while ($remaining_entries > 0 && (ip_bincomp($ipbin, 'le', $ip_end->binip) == 1)) {
+
+ # Use the given entries first
+ shift @entries while $#entries >= 0 && $entries[0] eq '';
+ if ($#entries >= 0) {
+ $ips{ip_iptobin(shift @entries, $version)} = '=';
+ $remaining_entries--;
+ $remaining_ips--;
+ next;
+ }
+
+ goto next_ip if defined $ips{$ipbin};
+
+ # Skip the IP that we have already selected by given entries, otherwise
+ # randomly pick it
+ if (!defined $ips{$ipbin} &&
+ (rand) <= $remaining_entries / "$remaining_ips") { # From "BigInt" to FP
+ $ips{$ipbin} = '+';
+ $remaining_entries--;
+ }
+
+ $remaining_ips--;
+ $ipbin = ip_binadd($ipbin,$one);
+
+ }
+
+ # Allow the pool to be shrunk
+ $ips{ip_iptobin($_, $version)} = '-' foreach @entries;
+
+ return map { $ips{$_}." ".$pool_name." ".ip_bintoip($_, $version) } sort keys %ips;
+
+}
diff --git a/scripts/sql/radsqlrelay b/scripts/sql/radsqlrelay
new file mode 100755
index 0000000..74531ba
--- /dev/null
+++ b/scripts/sql/radsqlrelay
@@ -0,0 +1,342 @@
+#!/usr/bin/perl -w
+##
+## radsqlrelay.pl This program tails a SQL logfile and forwards
+## the queries to a database server. Used to
+## replicate accounting records to one (central)
+## database, even if the database has extended
+## downtime.
+##
+## Version: $Id$
+##
+## Author: Nicolas Baradakis <nicolas.baradakis@cegetel.net>
+##
+## Copyright (C) 2005 Cegetel
+## Copyright (C) 2019 Network RADIUS
+##
+## This program is free software; you can redistribute it and/or
+## modify it under the terms of the GNU General Public License
+## as published by the Free Software Foundation; either version 2
+## of the License, or (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+##
+
+use strict;
+
+use DBI;
+use Fcntl;
+use Getopt::Std;
+use POSIX qw(:unistd_h :errno_h);
+use Time::HiRes qw(clock_gettime usleep CLOCK_MONOTONIC);
+
+# We send watchdogs at half the indicated interval if the
+# Linux::Systemd::Daemon module is available and the WATCHDOG_USEC environment
+# variable is set by the systemd service manager.
+my $watchdog_usec;
+my $next_watchdog;
+eval {
+ require Linux::Systemd::Daemon;
+ Linux::Systemd::Daemon->import();
+ $watchdog_usec = $ENV{'WATCHDOG_USEC'} || undef;
+};
+
+# To synthetically test the watchdog functionality then uncomment these next
+# lines:
+#
+# $watchdog_usec = 30 *1000*1000;
+# sub sd_notify {}
+
+my $maxcollect = 100; # tunable, works for MySQL!
+
+my $lastinsert;
+my @values;
+
+my $need_exit = 0;
+my $debug = 0;
+
+sub got_signal()
+{
+ $need_exit = 1;
+ sd_notify(stopping => 1, status => 'Signalled. Shutting down.') if $watchdog_usec;
+}
+
+sub debug
+{
+ print shift if $debug;
+}
+
+# /!\ OS-dependent structure
+# Linux struct flock
+# short l_type;
+# short l_whence;
+# off_t l_start;
+# off_t l_len;
+# pid_t l_pid;
+# c2ph says: typedef='s2 l2 i', sizeof=16
+my $FLOCK_STRUCT = 's2l2i';
+
+sub setlock($;$$)
+{
+ my ($fh, $start, $len) = @_;
+ $start = 0 unless defined $start;
+ $len = 0 unless defined $len;
+
+ #type whence start till pid
+ my $packed = pack($FLOCK_STRUCT, F_WRLCK, SEEK_SET, $start, $len, 0);
+ if (fcntl($fh, F_SETLKW, $packed)) { return 1 }
+ else { return 0 }
+}
+
+sub usage()
+{
+ print STDERR <<HERE;
+usage: radsqlrelay [options] file_path
+options:
+ -? Print this help message.
+ -1 One-shot mode: push the file to database and exit.
+ -b database Name of the database to use.
+ -d sql_driver Driver to use: mysql, pg, oracle.
+ -f file Read password from file, instead of command line.
+ -h host Connect to host.
+ -P port Port number to use for connection.
+ -p password Password to use when connecting to server.
+ -u user User for login.
+ -x Turn on debugging.
+HERE
+}
+
+
+# Sleep for given amount of time, but don't oversleep the watchdog interval.
+# Send a watchdog notification if it is due.
+# interval can be given as 0 to just check whether a watchdog is due and send
+# it as necessary, in which case we do not yield.
+sub sleep_for ($)
+{
+ my $interval=shift;
+ $interval*=1000*1000;
+ if ($watchdog_usec) {
+ my $now=clock_gettime(CLOCK_MONOTONIC)*1000*1000;
+ if ($now >= $next_watchdog) {
+ $next_watchdog=$now+($watchdog_usec / 2);
+ debug "Sending watchdog\n";
+ sd_notify(watchdog => 1);
+ debug "Next watchdog due in ".(($next_watchdog-$now)/1000/1000)." secs.\n";
+ }
+ # Don't oversleep!
+ $interval=$next_watchdog-$now if $next_watchdog-$now < $interval;
+ }
+ return unless $interval; # Don't yield if we are not asked to sleep
+ debug "Sleeping for ".($interval/1000/1000)." secs.\n";
+ usleep ($interval);
+}
+
+
+sub connect_wait($)
+{
+ my $dbinfo = shift;
+ my $dbh;
+ debug "Connecting to " . $dbinfo->{base};
+ while (!$dbh) {
+ debug ".";
+ $dbh = DBI->connect($dbinfo->{base}, $dbinfo->{user}, $dbinfo->{pass},
+ { RaiseError => 0, PrintError => 0,
+ AutoCommit => 1 });
+ sleep_for (1) if !$dbh;
+ exit if $need_exit;
+ }
+ debug "\n";
+ $dbinfo->{handle} = $dbh;
+}
+
+
+
+sub process_file($$)
+{
+ my ($dbinfo, $path) = @_;
+
+ sub do_inserts($) {
+ my $dbinfo = shift;
+ debug "I";
+ if (scalar(@values) > 0) {
+ my $query = $lastinsert . " ";
+ $query .= join(" ), ( ",@values);
+ $query .= " );";
+ do_query($dbinfo,$query);
+ }
+ @values = ();
+ }
+
+ sub do_query($$) {
+ my ($dbinfo,$query) = @_;
+ debug ">";
+ until ($dbinfo->{handle}->do($query)) {
+ # If an error occured and we're disconnected then try to recomnnect
+ # and redo the query, otherwise give up so we don't become stuck.
+ print $dbinfo->{handle}->errstr."\n";
+ if ($dbinfo->{handle}->ping) {
+ sleep_for (1);
+ last;
+ } else {
+ print "error: Lost connection to database\n";
+ $dbinfo->{handle}->disconnect;
+ connect_wait($dbinfo);
+ }
+ }
+ sleep_for(0) if $watchdog_usec; # Send a watchdog if it is due
+ }
+
+ unless (-e $path.'.work') {
+ debug "Waiting for $path\n";
+ until (rename($path, $path.'.work')) {
+ if ($! == ENOENT) {
+ sleep_for(1);
+ return if $need_exit;
+ } else {
+ print STDERR "error: Couldn't move $path to $path.work: $!\n";
+ exit 1;
+ }
+ }
+ debug "Renamed $path to $path.work\n";
+ }
+
+ debug "\nOpening $path.work\n";
+ open(FILE, "+< $path.work") or die "error: Couldn't open $path.work: $!\n";
+ debug "Getting file lock\n";
+ setlock(\*FILE) or die "error: Couldn't lock $path.work: $!\n";
+
+ $lastinsert = "";
+ @values = ();
+
+ debug "Reading: ";
+ my $lines = 0;
+ while (<FILE>) {
+ chomp (my $line = $_);
+ $lines++;
+
+ if (!($line =~ /^\s*insert\s+into\s+`?\w+`?\s+(?:\(.*?\)\s+)?
+ values\s*\(.*\)\s*;\s*$/ix)) {
+ # This is no INSERT, so start new collection
+ do_inserts($dbinfo);
+ debug ".";
+ $lastinsert = "";
+ # must output this line
+ do_query($dbinfo, "$line");
+
+ } else {
+ # This is an INSERT, so collect it
+ debug "+";
+ my $insert = $line;
+ my $values = $line;
+ $insert =~ s/^\s*(insert\s+into\s+`?\w+`?\s+(?:\(.*?\)\s+)?
+ values\s*\().*\)\s*;\s*$/$1/ix;
+ $values =~ s/^\s*insert\s+into\s+`?\w+`?\s+(?:\(.*?\)\s+)?
+ values\s*\((.*)\)\s*;\s*$/$1/ix;
+
+ if (($lastinsert ne "") && ($insert ne $lastinsert)) {
+ # This is different from the last one
+ do_inserts($dbinfo);
+ }
+ push(@values, $values);
+ $lastinsert = $insert; # start new collection
+ }
+
+ # limit to $maxcollect collected lines
+ if (scalar(@values) >= $maxcollect) {
+ debug "hit maxcollect limit, doing inserts";
+ do_inserts($dbinfo);
+ }
+ }
+
+ # Cleanup
+ debug "\nNo more lines to read, doing any final inserts: ";
+ do_inserts($dbinfo);
+ debug "\n";
+
+ debug "Processed $lines lines\n";
+ debug "Removing and closing $path.work\n\n";
+ unlink($path.'.work');
+ close(FILE); # and unlock
+}
+
+# sub main()
+
+my %args = (
+ b => 'radius',
+ d => 'mysql',
+ h => 'localhost',
+ p => 'radius',
+ u => 'radius',
+);
+my $ret = getopts("b:d:f:h:P:p:u:x1?", \%args);
+if (!$ret or @ARGV != 1) {
+ usage();
+ exit 1;
+}
+if ($args{'?'}) {
+ usage();
+ exit 0;
+}
+$debug = 1 if $args{'x'};
+
+my $data_source;
+if (lc($args{d}) eq 'mysql') {
+ $data_source = "DBI:mysql:database=$args{b};host=$args{h}";
+} elsif (lc($args{d}) eq 'pg') {
+ $data_source = "DBI:Pg:dbname=$args{b};host=$args{h}";
+} elsif (lc($args{d}) eq 'oracle') {
+ $data_source = "DBI:Oracle:$args{b}";
+ # Oracle does not conform to the SQL standard for multirow INSERTs
+ $maxcollect = 1;
+} else {
+ print STDERR "error: SQL driver not supported yet: $args{d}\n";
+ exit 1;
+}
+$data_source .= ";port=$args{P}" if $args{'P'};
+
+my $pw;
+if($args{f}) {
+ open(FILE, "< $args{f}") or die "error: Couldn't open $args{f}: $!\n";
+ $pw = <FILE>;
+ chomp($pw);
+ close(FILE);
+} else {
+ # args{p} is always defined.
+ $pw = $args{p};
+}
+
+$SIG{INT} = \&got_signal;
+$SIG{TERM} = \&got_signal;
+
+if ($watchdog_usec) {
+ debug "Watchdog set to $watchdog_usec\n";
+ my $now=clock_gettime(CLOCK_MONOTONIC)*1000*1000;
+ $next_watchdog=$now+($watchdog_usec / 2);
+ sd_notify(ready => 1, status => 'Started');
+}
+
+my %dbinfo = (
+ base => $data_source,
+ user => $args{u},
+ pass => $pw,
+);
+connect_wait(\%dbinfo);
+
+my $path = shift @ARGV;
+
+until ($need_exit) {
+ process_file(\%dbinfo, $path);
+ last if ($args{1} || $need_exit);
+ debug "Sleeping\n";
+ sleep_for(10);
+}
+
+debug "Disconnecting from database\n";
+$dbinfo{handle}->disconnect;
+
diff --git a/scripts/sql/rlm_sqlippool_tool b/scripts/sql/rlm_sqlippool_tool
new file mode 100755
index 0000000..47a48fc
--- /dev/null
+++ b/scripts/sql/rlm_sqlippool_tool
@@ -0,0 +1,961 @@
+#!/usr/bin/perl -Tw
+
+######################################################################
+#
+# Copyright (C) 2020 Network RADIUS
+#
+# $Id$
+#
+######################################################################
+#
+# Helper script for populating IP pools with address entries.
+#
+# This script generates SQL output that is useful for populating an IP pool
+# for use with FreeRADIUS (and possibly other purposes). Alternatively,
+# if called with the -f option will directly operate on the database configured
+# within the FreeRADIUS configuration to update the IP pool table specified
+#
+# Note: Direct connection to databases is done using Perl DBI. You may need
+# to install the appropriate Perl DBD driver to enable this functionality.
+# Formatted SQL output is produced using Perl Template::Toolkit which
+# will need to be installed if this output is required.
+#
+#
+# Use with a single address range
+# -------------------------------
+#
+# For basic use, arguments can be provided to this script that denote the ends
+# of a single IP (v4 or v6) address range together with the pool_name and
+# SQL dialect or a raddb directory from which the database config will be
+# read.
+#
+# If a raddb directory is specified, then the instance of the FreeRADIUS sql
+# module to be found in the config can be specified. It defaults to "sql".
+#
+# Optionally the number of IPs to sparsely populate the range with can be
+# provided. If the range is wider than a /16 then the population of the range
+# is capped at 65536 IPs, unless otherwise specified.
+#
+# In the case that a sparse range is defined, a file containing pre-existing
+# IP entries can be provided. The range will be populated with entries from
+# this file that fall within the range, prior to the remainder of the range
+# being populated with random address in the range.
+#
+# rlm_sqlippool_tool -p <pool_name> -s <range_start> -e <range_end> \
+# -t <table_name> (-d <sql_dialect> | -f <raddb_dir> [ -i <instance> ]) \
+# [ -c <capacity> ] [ -x <existing_ips_file> ]
+#
+# Note: Sparse ranges are populated using a deterministic, pseudo-random
+# function. This allows pools to be trivially extended without having to
+# supply the existing contents using a file. If you require
+# less-predictable randomness or a different random sequence then remove
+# or modify the line calling srand(), below.
+#
+#
+# Use with multiple pools and address ranges
+# ------------------------------------------
+#
+# For more complex us, the script allows a set of pool definitions to be
+# provided in a YAML file which describes a set of one or more pools, each
+# containing a set of one or more ranges.
+#
+# rlm_sqlippool_tool -y <pool_defs_yaml_file> -t <table_name> \
+# ( -d <dialect> | -f <raddb_dir> [ -i <instance> ] ) \
+# [ -x <existing_ips_file> ]
+#
+# The format for the YAML file is demonstrated by the following example:
+#
+# pool_with_a_single_contiguous_range:
+# - start: 192.0.2.3
+# end: 192.0.2.250
+#
+# pool_with_a_single_sparse_range:
+# - start: 10.10.10.0
+# end: 10.10.20.255
+# capacity: 200
+#
+# pool_with_multiple_ranges:
+# - start: 10.10.10.1
+# end: 10.10.10.253
+# - start: 10.10.100.0
+# end: 10.10.199.255
+# capacity: 1000
+#
+# v6_pool_with_contiguous_range:
+# - start: '2001:db8:1:2:3:4:5:10'
+# end: '2001:db8:1:2:3:4:5:7f'
+#
+# v6_pool_with_sparse_range:
+# - start: '2001:db8:1:2::'
+# end: '2001:db8:1:2:ffff:ffff:ffff:ffff'
+# capacity: 200
+#
+# As with the basic use case, a file containing pre-existing IP entries can be
+# provided with which any sparse ranges will be populated ahead of any random
+# addresses.
+#
+#
+# Output
+# ------
+#
+# The script returns SQL formatted appropriately for one of a number of
+# different SQL dialects.
+#
+# The SQL first creates a temporary table to insert the new pools into,
+# inserts the addresses, then removes any exisitng entries from the pool
+# table that do not exist in the new pool. Finally any new entries that
+# don't exist in the existing pool table are copied from the temporary
+# table.
+#
+# The SQL templates assume that the pool name will be in a field called
+# "pool_name" and the IP address in a field named "framedipaddress",
+# matching the default schema for ippools and DHCP ippools as shipped with
+# FreeRADIUS.
+#
+#
+# Examples
+# --------
+#
+# rlm_sqlippool_tool -p main_pool -s 192.0.2.3 -e 192.0.2.249 \
+# -d postgresql -t radippool
+#
+# Will create a pool from a full populated IPv4 range, i.e. all IPs in the
+# range available for allocation, with SQL output suitable for PostgreSQL
+#
+# rlm_sqlippool_tool -p main_pool -s 10.66.0.0 -e 10.66.255.255 -c 10000 \
+# -d mysql -t radippool
+#
+# Will create a pool from a sparsely populated IPv4 range for a /16
+# network (maximum of 65.536 addresses), populating the range with 10,000
+# addreses. With SQL output suitable for MySQL.
+# The effective size of the pool can be increased in future by increasing
+# the capacity of the range with:
+#
+# rlm_sqlippool_tool -p main_pool -s 10.66.0.0 -e 10.66.255.255 -c 20000 \
+# -d mysql -t radippool
+#
+# This generates the same initial set of 10,000 addresses as the previous
+# example but will create 20,000 addresses overall, unless the random seed
+# has been amended since the initial run.
+#
+# rlm_sqlippool_tool -p main_pool -s 2001:db8:1:2:: \
+# -e 2001:db8:1:2:ffff:ffff:ffff:ffff -d mssql -t radippool
+#
+# Will create a pool from the IPv6 range 2001:db8:1:2::/64, initially
+# populating the range with 65536 (by default) addresses.
+#
+# rlm_sqlippool_tool -p main_pool -s 2001:db8:1:2:: \
+# -e 2001:db8:1:2:ffff:ffff:ffff:ffff \
+# -c 10000 -x existing_ips.txt -d mysql -t radippool
+#
+# Will create a pool using the same range as the previous example, but
+# this time the range will be populated with 10,000 addresses. The range
+# will be populated using lines extracted from the `existing_ips.txt` file
+# that represent IPs which fall within range.
+#
+# rlm_sqlippool_tool -y pool_defs.yml -d postgresql -t radippool \
+# -x existing_ips.txt
+#
+# Will create one of more pools using the definitions found in the
+# pool_defs.yml YAML file. The pools will contain one or more ranges with
+# each of the ranges first being populated with entries from the
+# existing_ips.txt file that fall within the range, before being filled
+# with random addresses to the defined capacity.
+#
+
+use strict;
+use Net::IP qw/ip_bintoip ip_iptobin ip_bincomp ip_binadd ip_is_ipv4 ip_is_ipv6/;
+
+#
+# Option defaults
+#
+my $opts = {
+ instance => 'sql',
+ capacity => 65536
+};
+
+#
+# Parse the command line arguments
+#
+my $opt = '';
+for (my $i = 0; $i <= $#ARGV; $i++) {
+ if ($ARGV[$i] =~ m/^-(.)$/) {
+ if ($1 eq 'p') {
+ $opt = 'pool_name';
+ } elsif ($1 eq 's') {
+ $opt = 'range_start';
+ } elsif ($1 eq 'e') {
+ $opt = 'range_end';
+ } elsif ($1 eq 'c') {
+ $opt = 'capacity';
+ } elsif ($1 eq 't') {
+ $opt = 'table_name';
+ } elsif ($1 eq 'd') {
+ $opt = 'dialect';
+ } elsif ($1 eq 'y') {
+ $opt = 'yaml';
+ } elsif ($1 eq 'x') {
+ $opt = 'entries';
+ } elsif ($1 eq 'f') {
+ $opt = 'raddb_dir';
+ } elsif ($1 eq 'i') {
+ $opt = 'instance';
+ } else {
+ usage();
+ exit 1;
+ }
+ } else {
+ if ($opt eq '') {
+ usage();
+ exit 1;
+ } else {
+ $opts->{$opt} = $ARGV[$i]
+ }
+ }
+}
+
+#
+# If a raddb dir is set then we parse the mods-enabled config
+#
+
+if ($opts->{raddb_dir}) {
+ my $found = 0;
+ if (-d $opts->{raddb_dir}.'/mods-enabled') {
+ opendir(my $dh, $opts->{raddb_dir}.'/mods-enabled') || die 'ERROR: Could not open directory '.$opts->{raddb_dir}.'/mods-enabled';
+ my @dir = grep { -f "$opts->{raddb_dir}/mods-enabled/$_" } readdir($dh);
+ closedir($dh);
+ my $instance = $opts->{instance};
+ foreach my $file (@dir) {
+ open (my $fh, $opts->{raddb_dir}.'/mods-enabled/'.$file);
+ my $level = 0;
+ my $section = '';
+ my $subsection = '';
+ while (<$fh>) {
+ if ($found) {
+ $_ =~ s/#.*//; # Remove comments
+ if ($_ =~ m/\s*([a-z_]+)\s*=\s*(.*)/) {
+ my $param = $1;
+ my $value = $2;
+ $value =~ s/^"//;
+ $value =~ s/"\s*$//;
+ if ($level == 1) {
+ $opts->{$param} = $value;
+ } elsif ($level == 2) {
+ $opts->{$section}->{$param} = $value;
+ } elsif ($level == 3) {
+ $opts->{$section}->{$subsection}->{$param} = $value;
+ }
+ }
+ if ($_ =~ m/([a-z_]*)\s+\{/) { # Find nested sectinos
+ $level++ ;
+ if ($level == 2) {
+ $section = $1;
+ } elsif ($level == 3) {
+ $subsection = $1;
+ }
+ }
+ $level-- if ($_ =~ m/\s+\}/); # Close of nesting
+ last if ($level == 0); # We've got to the end of the instance
+ }
+ if ($_ =~ m/\b$instance\s+\{/) {
+ # We've found the specified SQL instance
+ $found = 1;
+ $level = 1;
+ }
+ }
+ close ($fh);
+ if ($found) {
+ last;
+ }
+ }
+ } else {
+ die 'ERROR: Specified FreeRADIUS config directory does not contain mods-enabled';
+ }
+ if ($found == 0) {
+ die 'ERROR: SQL instance not found in FreeRADIUS config';
+ }
+}
+
+#
+# The SQL dialect and table name must be set
+#
+if ((!($opts->{dialect})) || (!($opts->{table_name}))) {
+ usage();
+ exit 1;
+}
+
+if ($opts->{yaml}) {
+ my $yaml_available = 0;
+
+ eval {
+ require YAML::XS;
+ YAML::XS->import('LoadFile');
+ $yaml_available = 1;
+ };
+
+ unless ($yaml_available) {
+ die "ERROR: YAML is not available. Install the YAML::XS Perl module.";
+ }
+ process_yaml_file($opts);
+
+ goto done;
+
+}
+
+
+if ((!($opts->{pool_name})) || (!($opts->{range_start})) || (!($opts->{range_end}))) {
+ usage();
+ exit 1;
+}
+
+process_commandline($opts);
+
+done:
+
+exit 0;
+
+
+sub usage {
+ print STDERR <<'EOF'
+Usage:
+ rlm_sqlippool_tool -p <pool_name> -s <range_start> -e <range_end> -t <table_name> (-d <sql_dialect> | -f <raddb_dir> [ -i <instance> ]) [ -c <capacity> ] [ -x <existing_ips_file> ]
+or:
+ rlm_sqlippool_tool -y <pool_defs_yaml_file> -t <table_name> (-d <dialect> | -f <raddb_dir> [ -i <instance> ]) [ -x <existing_ips_file> ]
+
+EOF
+}
+
+
+sub process_commandline {
+
+ my $opts = shift;
+ $SIG{__DIE__} = sub { usage(); die(@_); };
+
+ (my $template, my $queries)=load_templates($opts->{table_name});
+
+ unless (defined $template->{$opts->{dialect}}) {
+ print STDERR "Unknown dialect. Pick one of: ";
+ print STDERR "$_ " foreach sort keys %{$template};
+ print STDERR "\n";
+ exit 1;
+ }
+
+ my @entries = ();
+ @entries = load_entries($opts->{entries}) if ($opts->{entries});
+
+ @entries = handle_range($opts->{range_start}, $opts->{range_end}, $opts->{capacity}, @entries);
+
+ if ($opts->{radius_db}) {
+ &call_database($opts, $queries, @entries);
+ } else {
+ &output_sql($template->{$opts->{dialect}}, {ranges => [{pool_name => $opts->{pool_name}, ips => \@entries}], batchsize => 100, tablename => $opts->{table_name}});
+ }
+}
+
+sub process_yaml_file {
+
+ my $opts = shift;
+
+ unless (-r $opts->{yaml}) {
+ die "ERROR: Cannot open <pool_defs_yaml_file> for reading: $opts->{yaml}";
+ }
+
+ my %pool_defs = %{LoadFile($opts->{yaml})};
+
+ (my $template, my $queries)=load_templates($opts->{table_name});
+
+ unless (defined $template->{$opts->{dialect}}) {
+ print STDERR "Unknown dialect. Pick one of: ";
+ print STDERR "$_ " foreach sort keys %{$template};
+ print STDERR "\n";
+ exit 1;
+ }
+
+ my @entries = ();
+ @entries = load_entries($opts->{entries}) if ($opts->{entries});
+
+ my @ranges;
+ foreach my $pool_name (sort keys %pool_defs) {
+ foreach my $range (@{$pool_defs{$pool_name}}) {
+ my $range_start = $range->{start};
+ my $range_end = $range->{end};
+ my $capacity = $range->{capacity};
+ my @ips = handle_range($range_start, $range_end, $capacity, @entries);
+ push (@ranges, {pool_name => $pool_name, ips => \@ips});
+ }
+ }
+
+ if ($opts->{radius_db}) {
+ &call_database($opts, $queries, @entries);
+ } else {
+ &output_sql($template->{$opts->{dialect}}, {ranges => \@ranges, batchsize => 100, tablename => $opts->{table_name}});
+ }
+}
+
+sub output_sql {
+ my $template = shift();
+ my $vars = shift();
+
+ my $tt_available = 0;
+ eval {
+ require Template;
+ $tt_available = 1;
+ };
+ if ($tt_available) {
+ my $tt=Template->new();
+ $tt->process(\$template, $vars) || die $tt->error();
+ } else {
+ die "ERROR: Template Toolkit is not available. Install the Template Perl module.";
+ }
+}
+
+sub call_database {
+
+ my $opts = shift;
+ my $queries = shift;
+ my @entries = @_;
+
+ my $dbi_avail = 0;
+ eval {
+ require DBI;
+ $dbi_avail = 1;
+ };
+ unless($dbi_avail) {
+ die "ERROR: DBI is not available. Install the DBI Perl module.";
+ }
+
+ my $dsn;
+ if ($opts->{dialect} eq 'mysql') {
+ $dsn = "DBI:mysql:database=$opts->{radius_db};host=$opts->{server}";
+ if (defined($opts->{mysql}->{tls})) {
+ $dsn .= ';mysql_ssl=1';
+ $dsn .= ';mysql_ssl_ca_file='.$opts->{mysql}->{tls}->{ca_file} if ($opts->{mysql}->{tls}->{ca_file});
+ $dsn .= ';mysql_ssl_ca_path='.$opts->{mysql}->{tls}->{ca_path} if ($opts->{mysql}->{tls}->{ca_path});
+ $dsn .= ';mysql_ssl_client_key='.$opts->{mysql}->{tls}->{private_key_file} if ($opts->{mysql}->{tls}->{private_key_file});
+ $dsn .= ';mysql_ssl_client_cert='.$opts->{mysql}->{tls}->{certificate_file} if ($opts->{mysql}->{tls}->{certificate_file});
+ $dsn .= ';mysql_ssl_cipher='.$opts->{mysql}->{tls}->{cipher} if ($opts->{mysql}->{tls}->{cipher});
+ }
+ } elsif ($opts->{dialect} eq 'postgresql') {
+ # Parse FreeRADIUS alternative connection string
+ if ($opts->{radius_db} =~ m/host=(.+?)\b/) {
+ $opts->{server} = $1;
+ }
+ if ($opts->{radius_db} =~ m/user=(.+?)\b/) {
+ $opts->{login} = $1;
+ }
+ if ($opts->{radius_db} =~ m/password=(.+?)\b/) {
+ $opts->{password} = $1;
+ }
+ if ($opts->{radius_db} =~ m/sslmode=(.+?)\b/) {
+ $opts->{sslmode} = $1;
+ }
+ if ($opts->{radius_db} =~ m/dbname=(.+?)\b/) {
+ $opts->{radius_db} = $1;
+ }
+ $dsn = "DBI:Pg:dbname=$opts->{radius_db};host=$opts->{server}";
+ #
+ # DBD doesn't have all the options used by FreeRADIUS - just enable ssl if
+ # FreeRADIUS has SSL options enabled
+ #
+ $dsn .= ';sslmode=prefer' if ($opts->{sslmode});
+ } elsif ($opts->{dialect} eq 'sqlite') {
+ $dsn = "DBI:SQLite:dbname=$opts->{sqlite}->{filename}";
+ } elsif ($opts->{dialect} eq 'mssql') {
+ if ($opts->{driver} eq 'rlm_sql_unixodbc') {
+ $dsn = "DBI:ODBC:DSN=$opts->{server}";
+ } else {
+ $dsn = "DBI:Sybase:server=$opts->{server};database=$opts->{radius_db}";
+ }
+ } elsif ($opts->{dialect} eq 'oracle') {
+ # Extract data from Oracle connection string as used by FreeRADIUS
+ if ($opts->{radius_db} =~ m/HOST=(.+?)\)/) {
+ $opts->{server} = $1;
+ }
+ if ($opts->{radius_db} =~ m/PORT=(.+?)\)/) {
+ $opts->{port} =$1;
+ }
+ if ($opts->{radius_db} =~ m/SID=(.+?)\)/) {
+ $opts->{sid} = $1;
+ }
+ $dsn = "DBI:Oracle:host=$opts->{server};sid=$opts->{sid}";
+ } else {
+ $dsn = "DBI:$opts->{dialect}:database=$opts->{radius_db};host=$opts->{server}";
+ }
+ $dsn .= ";port=$opts->{port}" if ($opts->{port}) && ($opts->{driver} ne 'rlm_sql_unixodbc');
+
+ # Read the results by running our query against the database
+ my $dbh = DBI->connect($dsn, $opts->{login}, $opts->{password}) || die "Unable to connect to database";
+
+ foreach my $query (@{$queries->{$opts->{dialect}}->{pre}}) {
+ $dbh->do($query);
+ }
+
+ my $sth = $dbh->prepare($queries->{$opts->{dialect}}->{insert});
+ foreach my $ip (@entries) {
+ $sth->execute($opts->{pool_name}, $ip);
+ }
+ $sth->finish();
+
+ foreach my $query (@{$queries->{$opts->{dialect}}->{post}}) {
+ $dbh->do($query);
+ }
+
+ $dbh->disconnect();
+}
+
+sub load_entries {
+
+ my $entries_file = shift;
+
+ my @entries = ();
+ unless (-r $entries_file) {
+ die "ERROR: Cannot open <existing_ips_file> for reading: $entries_file"
+ }
+ open(my $fh, "<", $entries_file) || die "Failed to open $entries_file";
+ while(<$fh>) {
+ chomp;
+ push @entries, $_;
+ }
+
+ return @entries;
+
+}
+
+
+sub handle_range {
+
+ my $range_start = shift;
+ my $range_end = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ unless (ip_is_ipv4($range_start) || ip_is_ipv6($range_start)) {
+ die "ERROR: Incorrectly formatted IPv4/IPv6 address for range_start: $range_start";
+ }
+
+ unless (ip_is_ipv4($range_end) || ip_is_ipv6($range_end)) {
+ die "ERROR: Incorrectly formatted IPv4/IPv6 address for range_end: $range_end";
+ }
+
+ my $ip_start = new Net::IP($range_start);
+ my $ip_end = new Net::IP($range_end);
+ my $ip_range = new Net::IP("$range_start - $range_end");
+
+ unless (defined $ip_range) {
+ die "ERROR: The range defined by <range_start> - <range_end> is invalid: $range_start - $range_end";
+ }
+
+ my $range_size = $ip_range->size;
+
+ if ($range_size < $capacity) {
+ $capacity = "$range_size";
+ warn 'WARNING: Insufficent IPs in the range. Will create '.$capacity.' entries.';
+ }
+
+ # Prune the entries to only those within the specified range
+ for (my $i = 0; $i <= $#entries; $i++) {
+ my $version = ip_is_ipv4($entries[$i]) ? 4 : 6;
+ my $binip = ip_iptobin($entries[$i],$version);
+ if ($ip_start->version != $version ||
+ ip_bincomp($binip, 'lt', $ip_start->binip) == 1 ||
+ ip_bincomp($binip, 'gt', $ip_end->binip) == 1) {
+ $entries[$i]='';
+ }
+ }
+
+ #
+ # We use the sparse method if the number of entries available occupies < 80% of
+ # the network range, otherwise we use a method that involves walking the
+ # entire range.
+ #
+
+ srand(42); # Set the seed for the PRNG
+
+ if ($capacity / "$range_size" > 0.9) {
+ @entries = walk_fill($ip_start, $ip_end, $capacity, @entries);
+ } elsif (length($range_size) > 9 || $capacity / "$range_size" < 0.8) { # From "BigInt" to FP
+ @entries = sparse_fill($ip_start, $ip_end, $capacity, @entries);
+ } else {
+ @entries = dense_fill($ip_start, $ip_end, $ip_range, $capacity, @entries);
+ }
+
+ return @entries;
+}
+
+
+#
+# With this sparse fill method we randomly allocate within the scope of the
+# smallest enclosing network prefix, checking that we are within the given
+# range, retrying if we are outside or we hit a duplicate.
+#
+# This method can efficiently choose a small number of addresses relative to
+# the size of the range. It becomes slower as the population of a range nears
+# the range's limit since it is harder to choose a free address at random.
+#
+# It is useful for selecting a handful of addresses from an enourmous IPv6 /64
+# network for example.
+#
+sub sparse_fill {
+
+ my $ip_start = shift;
+ my $ip_end = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ # Find the smallest network that encloses the given range
+ my $version = $ip_start->version;
+ ( $ip_start->binip ^ $ip_end->binip ) =~ /^\0*/;
+ my $net_prefix = $+[0];
+ my $net_bits = substr($ip_start->binip, 0, $net_prefix);
+ my $host_length = length($ip_start->binip) - $net_prefix;
+
+ my %ips = ();
+ my $i = 0;
+ while ($i < $capacity) {
+
+ # Use the given entries first
+ my $rand_ip;
+ my $given_lease = 0;
+ shift @entries while $#entries >= 0 && $entries[0] eq '';
+ if ($#entries >= 0) {
+ $rand_ip = ip_iptobin(shift @entries, $version);
+ $given_lease = 1;
+ } else {
+ $rand_ip = $net_bits;
+ $rand_ip .= [0..1]->[rand 2] for 1..$host_length;
+ # Check that we are inside the given range
+ next if ip_bincomp($rand_ip, 'lt', $ip_start->binip) == 1 ||
+ ip_bincomp($rand_ip, 'gt', $ip_end->binip) == 1;
+ }
+
+ next if defined $ips{$rand_ip};
+
+ $ips{$rand_ip} = $given_lease ? '=' : '+';
+ $i++;
+
+ }
+
+ return map { ip_bintoip($_, $version) } keys %ips;
+
+}
+
+
+#
+# With this dense fill method, after first selecting the given entries we walk
+# the network range picking IPs with evenly distributed probability.
+#
+# This method can efficiently choose a large number of addresses relative to
+# the size of a range, provided that the range isn't massive. It becomes
+# slower as the range size increases.
+#
+sub dense_fill {
+
+ my $ip_start = shift;
+ my $ip_end = shift;
+ my $ip_range = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ my $version = $ip_start->version;
+
+ my $one = ("0"x($version == 4 ? 31 : 127)) . '1';
+
+ my %ips = ();
+ my $remaining_entries = $capacity;
+ my $remaining_ips = $ip_range->size;
+ my $ipbin = $ip_start->binip;
+
+ while ($remaining_entries > 0 && (ip_bincomp($ipbin, 'le', $ip_end->binip) == 1)) {
+
+ # Use the given entries first
+ shift @entries while $#entries >= 0 && $entries[0] eq '';
+ if ($#entries >= 0) {
+ $ips{ip_iptobin(shift @entries, $version)} = '=';
+ $remaining_entries--;
+ $remaining_ips--;
+ next;
+ }
+
+ goto next_ip if defined $ips{$ipbin};
+
+ # Skip the IP that we have already selected by given entries, otherwise
+ # randomly pick it
+ if (!defined $ips{$ipbin} &&
+ (rand) <= $remaining_entries / "$remaining_ips") { # From "BigInt" to FP
+ $ips{$ipbin} = '+';
+ $remaining_entries--;
+ }
+
+ $remaining_ips--;
+ $ipbin = ip_binadd($ipbin,$one);
+
+ }
+
+ return map { ip_bintoip($_, $version) } keys %ips;
+
+}
+
+#
+# With this walk fill method we walk the IP range from the beginning
+# for as many IPs as are required
+#
+# It is useful for selecting a fully populated network.
+#
+
+sub walk_fill {
+ my $ip_start = shift;
+ my $ip_end = shift;
+ my $capacity = shift;
+ my @entries = @_;
+
+ my $version = $ip_start->version;
+
+ my $one = ("0"x($version == 4 ? 31 : 127)) . '1';
+
+ my %ips = ();
+ my $remaining_entries = $capacity;
+ my $ipbin = $ip_start->binip;
+
+ # Sort existing IPs and remove any blank entries. Allows existing entries to be
+ # matched quickly in the new pool
+ my @sorted_entries = sort @entries;
+ shift @sorted_entries while $#sorted_entries >= 0 && $sorted_entries[0] eq '';
+
+ # Walk through the IP range from the beginning
+ while ($remaining_entries > 0 && (ip_bincomp($ipbin, 'le', $ip_end->binip) == 1)) {
+
+ if ($#sorted_entries >= 0) {
+ # If there are existing entries check if they match
+ $ips{$ipbin} = (ip_bincomp($ipbin, 'eq', ip_iptobin($sorted_entries[0]) == 1) && shift(@sorted_entries) ? '=' : '+');
+ } else {
+ $ips{$ipbin} = '+';
+ }
+ $remaining_entries--;
+ $ipbin = ip_binadd($ipbin,$one);
+
+ }
+
+ return map { ip_bintoip($_, $version) } keys %ips;
+
+}
+
+
+
+#
+# SQL dialect templates
+#
+
+sub load_templates {
+
+ my $tablename = shift;
+
+ my $template;
+ my $queries;
+#
+# MySQL / MariaDB
+#
+ $queries->{'mysql'}->{pre} = [
+ 'DROP TEMPORARY TABLE IF EXISTS '.$tablename.'_temp;',
+ 'CREATE TEMPORARY TABLE '.$tablename.'_temp (
+ id int(11) unsigned NOT NULL auto_increment,
+ pool_name varchar(30) NOT NULL,
+ framedipaddress varchar(15) NOT NULL,
+ PRIMARY KEY (id),
+ KEY pool_name_framedipaddress (pool_name,framedipaddress)
+);'
+ ];
+ $queries->{'mysql'}->{insert} = 'INSERT INTO '.$tablename.'_temp (pool_name,framedipaddress) VALUES (?, ?)';
+ $queries->{'mysql'}->{post} = [
+ 'START TRANSACTION;',
+ 'DELETE r FROM '.$tablename.' r
+ LEFT JOIN '.$tablename.'_temp t USING (pool_name,framedipaddress)
+ WHERE t.id IS NULL;',
+ 'INSERT INTO '.$tablename.' (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM '.$tablename.'_temp t WHERE NOT EXISTS (
+ SELECT * FROM '.$tablename.' r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );',
+ 'COMMIT;'
+ ];
+ $template->{'mysql'} = join("\n", @{$queries->{'mysql'}->{pre}})."\n";
+ $template->{'mysql'} .= <<'END_mysql';
+-- Populate the temporary table
+[%- FOREACH r IN ranges %]
+[%- FOREACH i IN r.ips %]
+[%- "\n\nINSERT INTO ${tablename}_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% r.pool_name %]','[% i %]');
+[%- ELSE %]
+('[% r.pool_name %]','[% i %]'),
+[%- END %]
+[%- END %]
+[%- END %]
+END_mysql
+ $template->{'mysql'} .= join("\n", @{$queries->{'mysql'}->{post}})."\n";
+
+#
+# PostgreSQL
+#
+ $queries->{'postgresql'}->{pre} = [
+ 'DROP TABLE IF EXISTS '.$tablename.'_temp;',
+ 'CREATE TEMPORARY TABLE '.$tablename.'_temp (
+ pool_name varchar(64) NOT NULL,
+ FramedIPAddress INET NOT NULL
+);',
+ 'CREATE INDEX '.$tablename.'_temp_idx ON '.$tablename.'_temp USING btree (pool_name,FramedIPAddress);'
+ ];
+ $queries->{'postgresql'}->{insert} = 'INSERT INTO '.$tablename.'_temp (pool_name,framedipaddress) VALUES (?, ?)';
+ $queries->{'postgresql'}->{post} = [
+ 'START TRANSACTION;',
+ 'DELETE FROM '.$tablename.' r WHERE NOT EXISTS (
+ SELECT FROM '.$tablename.'_temp t
+ WHERE t.pool_name = r.pool_name AND t.framedipaddress = r.framedipaddress
+);',
+ 'INSERT INTO '.$tablename.' (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM '.$tablename.'_temp t WHERE NOT EXISTS (
+ SELECT * FROM '.$tablename.' r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );',
+ 'COMMIT;'
+ ];
+ $template->{'postgresql'} = join("\n", @{$queries->{'postgresql'}->{pre}})."\n";
+ $template->{'postgresql'} .= <<'END_postgresql';
+-- Populate the temporary table
+[%- FOREACH r IN ranges %]
+[%- FOREACH i IN r.ips %]
+[%- "\n\nINSERT INTO ${tablename}_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% r.pool_name %]','[% i %]');
+[%- ELSE %]
+('[% r.pool_name %]','[% i %]'),
+[%- END %]
+[%- END %]
+[%- END %]
+END_postgresql
+ $template->{'postgresql'} .= join("\n", @{$queries->{'postgresql'}->{post}})."\n";
+#
+# Oracle
+#
+ $queries->{'oracle'}->{pre} = [
+ 'CREATE TABLE '.$tablename.'_temp (
+ pool_name VARCHAR(30) NOT NULL,
+ FramedIPAddress VARCHAR(15) NOT NULL
+)',
+ 'CREATE INDEX '.$tablename.'_temp_idx ON '.$tablename.'_temp (pool_name,FramedIPAddress)'
+ ];
+ $queries->{'oracle'}->{insert} = 'INSERT INTO '.$tablename.'_temp (pool_name,FramedIPAddress) VALUES (?, ?)';
+ $queries->{'oracle'}->{post} = [
+ 'DELETE FROM '.$tablename.' r WHERE NOT EXISTS
+ (SELECT * FROM '.$tablename.'_temp t WHERE
+ r.pool_name = t.pool_name AND r.framedipaddress = t.framedipaddress)',
+ 'INSERT INTO '.$tablename.' (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM '.$tablename.'_temp t WHERE NOT EXISTS (
+ SELECT * FROM '.$tablename.' r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ )',
+ 'DROP TABLE '.$tablename.'_temp',
+ 'COMMIT'
+ ];
+
+ $template->{'oracle'} = join(";\n", @{$queries->{'oracle'}->{pre}}).";\n";
+ $template->{'oracle'} .= <<'END_oracle';
+-- Populate the temporary table
+[%- FOREACH r IN ranges %]
+[%- FOREACH i IN r.ips %]
+[%- "\nINSERT INTO ${tablename}_temp (pool_name,framedipaddress) VALUES " %]('[% r.pool_name %]','[% i %]');
+[%- END %]
+[%- END %]
+END_oracle
+ $template->{'oracle'} .= join(";\n", @{$queries->{'oracle'}->{post}})."\n";
+
+#
+# SQLite
+#
+ $queries->{'sqlite'}->{pre} = [
+ 'DROP TABLE IF EXISTS '.$tablename.'_temp;',
+ 'CREATE TABLE '.$tablename.'_temp (
+ pool_name varchar(30) NOT NULL,
+ framedipaddress varchar(15) NOT NULL
+);',
+ 'CREATE INDEX '.$tablename.'_temp_idx ON '.$tablename.'_temp (pool_name,FramedIPAddress);'
+ ];
+ $queries->{'sqlite'}->{insert} = 'INSERT INTO '.$tablename.'_temp (pool_name,framedipaddress) VALUES (?, ?)';
+ $queries->{'sqlite'}->{post} = [
+ 'BEGIN TRANSACTION;',
+ 'DELETE FROM '.$tablename.' WHERE rowid IN (
+ SELECT r.rowid FROM '.$tablename.' r
+ LEFT JOIN '.$tablename.'_temp t USING (pool_name,framedipaddress)
+ WHERE t.rowid IS NULL);',
+ 'INSERT INTO '.$tablename.' (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM '.$tablename.'_temp t WHERE NOT EXISTS (
+ SELECT * FROM '.$tablename.' r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );',
+ 'COMMIT;',
+ 'DROP TABLE '.$tablename.'_temp;'
+ ];
+
+ $template->{'sqlite'} = join("\n", @{$queries->{'sqlite'}->{pre}})."\n";
+ $template->{'sqlite'} .= <<'END_sqlite';
+-- Populate the temporary table
+[%- FOREACH r IN ranges %]
+[%- FOREACH i IN r.ips %]
+[%- "\n\nINSERT INTO ${tablename}_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% r.pool_name %]','[% i %]');
+[%- ELSE %]
+('[% r.pool_name %]','[% i %]'),
+[%- END %]
+[%- END %]
+[%- END %]
+END_sqlite
+ $template->{'sqlite'} .= join("\n", @{$queries->{'sqlite'}->{post}})."\n";
+
+#
+# MS SQL
+#
+ $queries->{'mssql'}->{pre} = [
+ 'DROP TABLE IF EXISTS #'.$tablename.'_temp;',
+ 'CREATE TABLE #'.$tablename.'_temp (
+ id int identity(1, 1) NOT NULL,
+ pool_name varchar(30) NOT NULL,
+ framedipaddress varchar(15) NOT NULL,
+ PRIMARY KEY (id),
+);',
+ 'CREATE INDEX pool_name_framedipaddress ON #'.$tablename.'_temp(pool_name, framedipaddress);'
+ ];
+ $queries->{'mssql'}->{insert} = 'INSERT INTO #'.$tablename.'_temp (pool_name, framedipaddress) VALUES (?, ?)';
+ $queries->{'mssql'}->{post} = [
+ 'BEGIN TRAN;',
+ 'DELETE r FROM '.$tablename.' r
+ LEFT JOIN #'.$tablename.'_temp t ON r.pool_name = t.pool_name AND r.framedipaddress = t.framedipaddress
+ WHERE t.id IS NULL;',
+ 'INSERT INTO '.$tablename.' (pool_name,framedipaddress)
+ SELECT pool_name,framedipaddress FROM #'.$tablename.'_temp t WHERE NOT EXISTS (
+ SELECT * FROM '.$tablename.' r
+ WHERE r.pool_name=t.pool_name AND r.framedipaddress=t.framedipaddress
+ );',
+ 'COMMIT TRAN;'
+ ];
+
+ $template->{'mssql'} = join("\nGO\n", @{$queries->{'mssql'}->{pre}})."\nGO\n";
+ $template->{'mssql'} .= <<'END_mssql';
+-- Populate the temporary table
+[%- FOREACH r IN ranges %]
+[%- FOREACH i IN r.ips %]
+[%- "\n\nINSERT INTO #${tablename}_temp (pool_name,framedipaddress) VALUES" IF loop.index % batchsize == 0 %]
+[%- IF (loop.index+1) % batchsize == 0 OR loop.last %]
+('[% r.pool_name %]','[% i %]');
+GO
+[%- ELSE %]
+('[% r.pool_name %]','[% i %]'),
+[%- END %]
+[%- END %]
+[% END %]
+END_mssql
+ $template->{'mssql'} .= join("\n", @{$queries->{'mssql'}->{post}})."\n";
+
+ return ($template, $queries);
+
+}
+
diff --git a/scripts/sql/users2mysql.pl b/scripts/sql/users2mysql.pl
new file mode 100644
index 0000000..abaa9c0
--- /dev/null
+++ b/scripts/sql/users2mysql.pl
@@ -0,0 +1,157 @@
+#!/usr/bin/perl -w
+#
+# users2mysql.pl -- a script to parse a RADIUS users file and fill
+# a freeradius mysql database...
+#
+#
+# Script developed by Rich Puhek, Znet Telecom
+#
+# last change: Aug 8th, 2002.
+#
+
+
+
+#Modify to suit your db.
+$database="radius";
+$hostname="localhost";
+$user="radius";
+$password="passwd";
+
+
+#location of source users file:
+$users_file="/etc/raddb_cistron_backup/users";
+
+
+#The following are defaults from freeradius 0.7
+# ...shouldn't have to change.
+$groups_table="usergroup";
+$check_table="radcheck";
+$reply_table="radreply";
+
+$debug=3;
+
+use DBD::mysql;
+
+#open the users file, and the db.
+open USERS, $users_file or die "ERROR: Unable to open $users_file $!\n";
+$database = DBI->connect("DBI:mysql:$database:$hostname",$user, $password) or die "ERROR: Unable to connect to $database on $hostname $!\n";
+
+sub check_attribs {
+
+ if (!defined($_[0]) or !defined($_[1])) {
+ print "undefined parameter!\n";
+ return undef;
+ };
+
+ $attr = $_[0];
+ $val = $_[1];
+
+ if ($attr !~ /Password|Framed-IP-Address|Framed-IP-Netmask|Framed-IP-Routing|Framed-Routing|Framed-IP-Route|Password|Simultaneous-Use|Idle-Timeout|Auth-Type|Service-Type|Netmask|Framed-Protocol/ ) {
+ print "unrecognized attribute: $attr\n" if $debug>1;
+ return undef;
+ };
+
+ return undef if ( (! defined($val) ) or
+ ( ($attr =~ /Simultaneous\-Use/i) && ( $val !~ /^[0-9]*$/ ) )
+ );
+ print "attribs ok!\n" if $debug>3;
+ return "TRUE";
+};
+
+sub cleanup {
+ #clean up variables: strip leading/trailing spaces and trailing commas...
+ my $myval;
+ $myval = $_[0];
+ $myval =~ s/^\s//g;
+ $myval =~ s/\s$//g;
+ $myval =~ s/,$//;
+ return $myval;
+};
+
+
+sub user_attribute {
+ #push values into db...
+ $dtable=$_[0];
+ $duser=$_[1];
+ $dattrib=$_[2];
+ $dval=$_[3];
+
+ print "inserting \"$dattrib\", \"$dval\" for \"$duser\" in rad$dtable\n" if ( $dtable !~ /group/ and $debug>2);
+ print "inserting \"$duser\" into usergroup table as member of \"$dattrib\"\n" if ( $dtable =~ /group/ and $debug>2);
+
+ if ( $dtable =~ /group/ ) {
+ $table = "usergroup";
+ } elsif ( $dtable =~ /check/ ) {
+ $table = "radcheck";
+ } elsif ( $dtable =~ /reply/ ) {
+ $table = "radreply";
+ } else {
+ die "argh! what table is $dtable?\n";
+ };
+
+
+ if ( $table =~ /usergroup/ ) {
+ if ( $dattrib =~ /static/ ) {
+ #Delete the "dynamic" entry...
+ $return = $database->do ("DELETE FROM `$table` WHERE `UserName`='$duser' LIMIT 1");
+ };
+ $return = $database->do ("INSERT INTO `$table` SET `UserName`='$duser',`GroupName`='$dattrib'");
+
+ } else {
+ $return = $database->do ("INSERT INTO `$table` SET `UserName`='$duser',`Attribute`='$dattrib',`Value`='$dval', `op`=':='");
+ };
+ return $return;
+};
+
+
+while (<USERS>) {
+
+ chop;
+ #Skip comment lines and blank lines...
+ next if ( /^\#/ );
+ next if ( /^$/ );
+ next if ( /^\s*$/ );
+
+ if ( /^[a-zA-Z0-9]+/ ) {
+ print "located a user entry: $_\n" if $debug>6;
+ ($user,$rest) = split /\s/, $_, 2;
+ #Put user into usergroup as dynamic, if the user's attributes
+ # include an IP address, the script will change that later...
+ user_attribute("group",$user,"dynamic","");
+ @attribs = split /,/, $rest;
+ } else {
+ # Already found the user, now finding attributes...
+ @attribs = $_;
+ };
+
+ foreach $attr (@attribs) {
+ ($attrib,$value) = split /=/, $attr, 2;
+ #TODO: insert sanity checks here!
+ $value = cleanup($value) if (defined($value));
+ $attrib = cleanup($attrib) if (defined($attrib));
+ unless (check_attribs($attrib,$value)) {
+ print "ERROR: something bad with line $.: \"$attrib\", \"$value\"\n";
+ next;
+ };
+ print "attrib: $attrib has value: $value\n" if $debug>8;
+
+ if ( $attrib =~ /Framed-IP-Address/ ) {
+ #user is a static IP user...
+ $static{$user} = 1;
+ user_attribute("group",$user,"static","");
+ };
+
+ if ( $attrib =~ /Password|Simultaneous-Use/ ) {
+ #This is an individual check attribute, so we'll pass it along...
+ user_attribute("check",$user,$attrib,$value);
+ };
+ if ( $attrib =~ /Framed-IP-Address|Framed-IP-Routing|Framed-Routing/ ) {
+ #This is an individual reply attribute, so we'll pass this along...
+ user_attribute("reply",$user,$attrib,$value);
+ };
+ };
+
+};
+
+close USERS;
+exit($database->disconnect); \ No newline at end of file