diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 09:49:46 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 09:49:46 +0000 |
commit | 50b37d4a27d3295a29afca2286f1a5a086142cec (patch) | |
tree | 9212f763934ee090ef72d823f559f52ce387f268 /raddb/mods-config/sql/main/mongo | |
parent | Initial commit. (diff) | |
download | freeradius-upstream/3.2.1+dfsg.tar.xz freeradius-upstream/3.2.1+dfsg.zip |
Adding upstream version 3.2.1+dfsg.upstream/3.2.1+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'raddb/mods-config/sql/main/mongo')
-rw-r--r-- | raddb/mods-config/sql/main/mongo/queries.conf | 289 |
1 files changed, 289 insertions, 0 deletions
diff --git a/raddb/mods-config/sql/main/mongo/queries.conf b/raddb/mods-config/sql/main/mongo/queries.conf new file mode 100644 index 0000000..732e1e8 --- /dev/null +++ b/raddb/mods-config/sql/main/mongo/queries.conf @@ -0,0 +1,289 @@ +# -*- text -*- +# +# main/mongo/queries.conf -- Mongo configuration queries +# +# Note that as Mongo is a "schemaless" database, there is no +# default schema. +# +# Note also that the Mongo driver is a work in progress. If it works +# for you, great. If the queries do not work, please send a patch. +# But the FreeRADIUS team are not experts in Mongo, and cannot help +# with creating Mongo queries. +# +# $Id$ + +####################################################################### +# Query config: Username +####################################################################### +# This is the username that will get substituted, escaped, and added +# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used +# below everywhere a username substitution is needed so you you can +# be sure the username passed from the client is escaped properly. +# +# Uncomment the next line, if you want the sql_user_name to mean: +# +# Use Stripped-User-Name, if it's there. +# Else use User-Name, if it's there, +# Else use hard-coded string "none" as the user name. +# +#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}" + +sql_user_name = "%{User-Name}" + +####################################################################### +# Authorization Queries +####################################################################### +# These queries compare the check items for the user +# in ${authcheck_table} and setup the reply items in +# ${authreply_table}. You can use any query/tables +# you want, but the return data for each row MUST +# be in the following order: +# +# 0. Row ID (currently unused) +# 1. UserName/GroupName +# 2. Item Attr Name +# 3. Item Attr Value +# 4. Item Attr Operation +####################################################################### + +# +# Aggregate query that return like for SQL standard N rows with columns <id>,<username>,<attribute>,<value>,<op> +# +# Example of Result: +# +# { "id" : 0, "username": "bob", "attribute" : "User-Name", "Value" : "pippo", "op" : "==" } +# { "id" : 0, "username": "bob", "attribute" : "ClearText-Password", "value" : "pwd1", "op" : ":=" } +# { "id" : 0, "username": "bob", "attribute" : "Cache-TTL", "value" : 1000, "op" : ":=" } +# +authorize_check_query = "db.${authcheck_table}.aggregate([ \ + { \ + '$match': { \ + 'calling_station_id': '%{Calling-Station-id}', \ + 'auth_blocked': 'false' \ + } \ + }, \ + { \ + '$addFields': { \ + 'attributes.User-Name': '$usr', \ + 'attributes.ClearText-Password': '$pwd', \ + 'attributes.Cache-TTL': '$ttlcache', \ + 'attributes.Enable-Roaming': '$roaming', \ + 'attributes.Pool-Name': '$pool_name' \ + } \ + }, \ + { \ + '$project': { \ + 'calling_station_id': 1, \ + 'attributes': { \ + '$objectToArray': '$attributes' \ + } \ + } \ + }, \ + { \ + '$unwind': '$attributes' \ + }, \ + { \ + '$project': { \ + '_id': 0, \ + 'username': '', \ + 'attribute': '$attributes.k', \ + 'value': '$attributes.v', \ + 'op': ':=' \ + } \ + } \ +])" \ + +# TBD: fill in things here +authorize_reply_query = "" + +################################################################## + +# +# TBD: fill in things here +# +#authorize_group_check_query = "" +#authorize_group_reply_query = "" + +####################################################################### +# Group Membership Queries +####################################################################### +# group_membership_query - Check user group membership +####################################################################### + +# +# TBD: Fill in things here. +# +#group_membership_query = "" + + +####################################################################### +# Accounting and Post-Auth Queries +####################################################################### +# These queries insert/update accounting and authentication records. +# The query to use is determined by the value of 'reference'. +# This value is used as a configuration path and should resolve to one +# or more 'query's. If reference points to multiple queries, and a query +# fails, the next query is executed. +# +# Behaviour is identical to the old 1.x/2.x module, except we can now +# fail between N queries, and query selection can be based on any +# combination of attributes, or custom 'Acct-Status-Type' values. +####################################################################### + +accounting { + reference = "%{tolower:type.%{Acct-Status-Type}.query}" + + type { + + start { + query = "db.connections.findAndModify({ \ + 'query': { \ + 'calling_station_id': '%{Calling-Station-Id}', \ + 'pgw_node': '%{NAS-Identifier}', \ + 'acct_session_id': '%{Acct-Session-Id}', \ + }, \ + 'update': { \ + '$set': { \ + 'update_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } }, \ + 'ip': '%{Framed-IP-Address}', \ + 'start_time': '%{Packet-Original-Timestamp}', \ + }, \ + '$push': { \ + 'events_data': { \ + 'event_id': '%{sha256:%{tolower:%{Calling-Station-Id}', \ + 'event_type': 'Accounting-Start', \ + 'event_time': '%{Packet-Original-Timestamp}', \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, \ + '$setOnInsert': { \ + 'pool_name': '%{Control:Pool-Name}', \ + 'ip': '%{Framed-IP-Address}', \ + 'closed': false, \ + 'update_counter': 0, \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, \ + 'upsert': true \ + })" + + query = "db.simultaneous_connections.findAndModify({ \ + 'query': { \ + 'pool_name': '%{Control:Pool-Name}' \ + }, \ + 'update': { \ + '$inc': { \ + 'conns_counter': 1 \ + } \ + '$setOnInsert': { \ + 'pool_name': '%{Control:Pool-Name}', \ + 'conns_counter': 1 \ + }, \ + }, \ + 'upsert': true \ + })" + # End Start + } + + interim-update { + query = "db.connections.findAndModify({ \ + 'query': { \ + 'calling_station_id': '%{Calling-Station-Id}', \ + 'pgw_node': '%{NAS-Identifier}', \ + 'acct_session_id': '%{Acct-Session-Id}' \ + }, \ + 'update': { \ + '$set': { \ + 'update_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } }, \ + 'last_upd_interim': '%{Packet-Original-Timestamp}' \ + }, \ + '$inc': { \ + 'update_counter': 1 \ + }, \ + '$push': { \ + 'events_data': { \ + 'event_id': '%{sha256:%{tolower:%{Calling-Station-Id}', \ + 'event_type': 'Accounting-Interim-Update', \ + 'event_time': '%{Packet-Original-Timestamp}', \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, \ + '$setOnInsert': { \ + 'pool_name': '%{Control:Pool-Name}', \ + 'ip': '%{Framed-IP-Address}', \ + 'closed': false, \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, + 'upsert': true \ + })" + # End Interim-Update + } + + stop { + query = "db.connections.findAndModify({ \ + 'query': { \ + 'calling_station_id': '%{Calling-Station-Id}', \ + 'pgw_node': '%{NAS-Identifier}', \ + 'acct_session_id': '%{Acct-Session-Id}' \ + }, \ + 'update': { \ + '$set': { \ + 'update_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } }, \ + 'stop_time': '%{Packet-Original-Timestamp}', \ + 'closed': true \ + }, \ + '$push': { \ + 'events_data': { \ + 'event_id': '%{sha256:%{tolower:%{Calling-Station-Id}', \ + 'event_type': 'Accounting-Stop', \ + 'event_time': '%{Packet-Original-Timestamp}', \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, \ + '$setOnInsert': { \ + 'pool_name': '%{Control:Pool-Name}', \ + 'ip': '%{Framed-IP-Address}', \ + 'update_counter': 0, \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, \ + 'upsert': true \ + })" + + # End Stop + } + + } +} + + +####################################################################### +# Authentication Logging Queries +####################################################################### +# postauth_query - Insert some info after authentication +####################################################################### + +post-auth { + query = "db.post_auth.findAndModify({ \ + 'query': { \ + 'calling_station_id': '%{Calling-Station-Id}', \ + 'nas_ip': '%{NAS-Identifier}' \ + }, \ + 'update': { \ + '$set': { \ + 'update_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } }, \ + 'last_event_ts': '%{Packet-Original-Timestamp}' \ + }, \ + '$inc': { \ + 'reject_counter': 1 \ + }, \ + '$setOnInsert': { \ + 'calling_station_id': '%{Calling-Station-Id}', \ + 'nas_ip': '%{NAS-Identifier}', \ + 'creation_date': { '$date': { '$numberLong': '%{expr: (%l * 1000) + (%M / 1000)}' } } \ + } \ + }, \ + 'upsert': true \ + })" +} |