summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2022-03-20 09:22:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2022-11-05 17:45:23 +0000
commitbd093d76dd87eb32fea6d2b37bfd9b559eee7fa2 (patch)
tree527f252331e30ebd94f55876c3111bd644e1c554
parentInitial commit. (diff)
downloadisc-stork-bd093d76dd87eb32fea6d2b37bfd9b559eee7fa2.tar.xz
isc-stork-bd093d76dd87eb32fea6d2b37bfd9b559eee7fa2.zip
Adding upstream version 1.7.0.upstream/1.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.dockerignore13
-rw-r--r--.gitignore82
-rw-r--r--.gitlab-ci.yml439
-rw-r--r--.gitlab/issue_templates/bug_report.md77
-rw-r--r--.gitlab/issue_templates/feature_request.md39
-rw-r--r--.readthedocs.yaml12
-rw-r--r--AUTHORS.md28
-rw-r--r--CONTRIBUTING.md210
-rw-r--r--ChangeLog.md1734
-rw-r--r--Dangerfile42
-rw-r--r--LICENSE374
-rw-r--r--README.md58
-rw-r--r--Rakefile1
-rw-r--r--Vagrantfile9
-rw-r--r--api/dhcp-defs.yaml411
-rw-r--r--api/dhcp-paths.yaml365
-rw-r--r--api/events-defs.yaml24
-rw-r--r--api/events-paths.yaml41
-rw-r--r--api/search-defs.yaml17
-rw-r--r--api/search-paths.yaml25
-rw-r--r--api/services-defs.yaml540
-rw-r--r--api/services-paths.yaml682
-rw-r--r--api/settings-defs.yaml50
-rw-r--r--api/settings-paths.yaml78
-rw-r--r--api/swagger.in.yaml100
-rw-r--r--api/users-defs.yaml82
-rw-r--r--api/users-paths.yaml185
-rw-r--r--backend/.golangci.yml118
-rw-r--r--backend/agent/agent.go455
-rw-r--r--backend/agent/agent_test.go729
-rw-r--r--backend/agent/bind9.go460
-rw-r--r--backend/agent/bind9_test.go23
-rw-r--r--backend/agent/caclient.go131
-rw-r--r--backend/agent/caclient_test.go170
-rw-r--r--backend/agent/credentialsstore.go159
-rw-r--r--backend/agent/credentialsstore_test.go327
-rw-r--r--backend/agent/kea.go239
-rw-r--r--backend/agent/kea_test.go233
-rw-r--r--backend/agent/keaintercept.go186
-rw-r--r--backend/agent/keaintercept_test.go324
-rw-r--r--backend/agent/keainterceptfn.go39
-rw-r--r--backend/agent/keainterceptfn_test.go98
-rw-r--r--backend/agent/logtail.go89
-rw-r--r--backend/agent/logtail_test.go66
-rw-r--r--backend/agent/monitor.go328
-rw-r--r--backend/agent/monitor_test.go544
-rw-r--r--backend/agent/prombind9exporter.go1236
-rw-r--r--backend/agent/prombind9exporter_test.go388
-rw-r--r--backend/agent/promkeaexporter.go771
-rw-r--r--backend/agent/promkeaexporter_test.go485
-rw-r--r--backend/agent/register.go507
-rw-r--r--backend/agent/register_test.go623
-rw-r--r--backend/agent/testutils.go80
-rw-r--r--backend/api/agent.proto194
-rw-r--r--backend/appcfg/kea/controlagent.go85
-rw-r--r--backend/appcfg/kea/controlagent_test.go439
-rw-r--r--backend/appcfg/kea/decoder.go28
-rw-r--r--backend/appcfg/kea/decoder_test.go31
-rw-r--r--backend/appcfg/kea/indexedsubnets.go93
-rw-r--r--backend/appcfg/kea/indexedsubnets_test.go176
-rw-r--r--backend/appcfg/kea/kea_config.go528
-rw-r--r--backend/appcfg/kea/kea_config_test.go939
-rw-r--r--backend/appcfg/kea/kea_subnet.go175
-rw-r--r--backend/appcfg/kea/kea_subnet_test.go372
-rw-r--r--backend/appcfg/kea/option.go199
-rw-r--r--backend/appcfg/kea/option_test.go468
-rw-r--r--backend/appcfg/kea/optiondef.go114
-rw-r--r--backend/appcfg/kea/optiondef_test.go138
-rw-r--r--backend/appcfg/kea/optionfield.go569
-rw-r--r--backend/appcfg/kea/optionfield_test.go578
-rw-r--r--backend/appcfg/kea/reservation.go135
-rw-r--r--backend/appcfg/kea/reservation_test.go187
-rw-r--r--backend/appcfg/kea/stdoptiondef.go152
-rw-r--r--backend/appcfg/kea/stdoptiondef_test.go30
-rw-r--r--backend/appctrl/kea/keacommand.go279
-rw-r--r--backend/appctrl/kea/keacommand_test.go632
-rw-r--r--backend/appdata/kea/lease.go39
-rw-r--r--backend/cmd/stork-agent/main.go299
-rw-r--r--backend/cmd/stork-agent/main_test.go150
-rw-r--r--backend/cmd/stork-server/main.go112
-rw-r--r--backend/cmd/stork-tool/README.md13
-rw-r--r--backend/cmd/stork-tool/main.go579
-rw-r--r--backend/cmd/stork-tool/main_test.go224
-rw-r--r--backend/consts.go3
-rw-r--r--backend/go.mod82
-rw-r--r--backend/go.sum763
-rw-r--r--backend/pki/pki.go343
-rw-r--r--backend/pki/pki_test.go276
-rw-r--r--backend/server/.gitignore21
-rw-r--r--backend/server/agentcomm/agentcomm.go230
-rw-r--r--backend/server/agentcomm/agentcomm_test.go222
-rw-r--r--backend/server/agentcomm/bind9.go28
-rw-r--r--backend/server/agentcomm/bind9_test.go68
-rw-r--r--backend/server/agentcomm/grpcli.go714
-rw-r--r--backend/server/agentcomm/grpcli_test.go571
-rw-r--r--backend/server/agentcomm/manager.go120
-rw-r--r--backend/server/agentcomm/puller.go81
-rw-r--r--backend/server/agentcomm/puller_test.go143
-rw-r--r--backend/server/agentcomm/test/fake_agents.go196
-rw-r--r--backend/server/apps/bind9/appbind9.go188
-rw-r--r--backend/server/apps/bind9/appbind9_test.go129
-rw-r--r--backend/server/apps/bind9/statspuller.go114
-rw-r--r--backend/server/apps/bind9/statspuller_test.go247
-rw-r--r--backend/server/apps/kea/appkea.go684
-rw-r--r--backend/server/apps/kea/appkea_test.go449
-rw-r--r--backend/server/apps/kea/configmodule.go343
-rw-r--r--backend/server/apps/kea/configmodule_test.go1329
-rw-r--r--backend/server/apps/kea/host.go639
-rw-r--r--backend/server/apps/kea/host_test.go1868
-rw-r--r--backend/server/apps/kea/lease.go602
-rw-r--r--backend/server/apps/kea/lease_test.go1484
-rw-r--r--backend/server/apps/kea/rps.go359
-rw-r--r--backend/server/apps/kea/rps_test.go335
-rw-r--r--backend/server/apps/kea/service.go168
-rw-r--r--backend/server/apps/kea/service_test.go488
-rw-r--r--backend/server/apps/kea/statisticscounter.go356
-rw-r--r--backend/server/apps/kea/statisticscounter_test.go897
-rw-r--r--backend/server/apps/kea/statspuller.go404
-rw-r--r--backend/server/apps/kea/statspuller_test.go1071
-rw-r--r--backend/server/apps/kea/status.go394
-rw-r--r--backend/server/apps/kea/status_test.go728
-rw-r--r--backend/server/apps/kea/subnet.go222
-rw-r--r--backend/server/apps/kea/subnet_test.go701
-rw-r--r--backend/server/apps/manager.go419
-rw-r--r--backend/server/apps/manager_test.go711
-rw-r--r--backend/server/apps/pullers.go15
-rw-r--r--backend/server/apps/statepuller.go324
-rw-r--r--backend/server/apps/statepuller_test.go216
-rw-r--r--backend/server/apps/test/wrapper.go30
-rw-r--r--backend/server/auth/auth.go44
-rw-r--r--backend/server/auth/auth_test.go66
-rw-r--r--backend/server/certs/certs.go323
-rw-r--r--backend/server/certs/certs_test.go269
-rw-r--r--backend/server/config/app.go84
-rw-r--r--backend/server/config/app_test.go84
-rw-r--r--backend/server/config/config.go142
-rw-r--r--backend/server/config/config_test.go83
-rw-r--r--backend/server/config/context.go72
-rw-r--r--backend/server/config/context_test.go119
-rw-r--r--backend/server/config/daemon.go42
-rw-r--r--backend/server/config/daemon_test.go56
-rw-r--r--backend/server/config/error.go33
-rw-r--r--backend/server/config/error_test.go20
-rw-r--r--backend/server/config/machine.go36
-rw-r--r--backend/server/config/machine_test.go23
-rw-r--r--backend/server/configreview/checker.go36
-rw-r--r--backend/server/configreview/checker_test.go25
-rw-r--r--backend/server/configreview/checkercontroller.go101
-rw-r--r--backend/server/configreview/checkercontroller_test.go161
-rw-r--r--backend/server/configreview/dispatcher.go932
-rw-r--r--backend/server/configreview/dispatcher_test.go985
-rw-r--r--backend/server/configreview/keachecker.go833
-rw-r--r--backend/server/configreview/keachecker_test.go2447
-rw-r--r--backend/server/configreview/report.go91
-rw-r--r--backend/server/configreview/report_test.go92
-rw-r--r--backend/server/database/connection.go176
-rw-r--r--backend/server/database/migrations.go173
-rw-r--r--backend/server/database/migrations/10_hard_delete.go49
-rw-r--r--backend/server/database/migrations/11_add_client_class_to_subnet.go21
-rw-r--r--backend/server/database/migrations/12_add_stats_to_localsubnet.go38
-rw-r--r--backend/server/database/migrations/13_network_family.go60
-rw-r--r--backend/server/database/migrations/14_multi_ctrl.go119
-rw-r--r--backend/server/database/migrations/15_add_at_sufixes.go39
-rw-r--r--backend/server/database/migrations/16_add_setting_table.go25
-rw-r--r--backend/server/database/migrations/17_reservations.go105
-rw-r--r--backend/server/database/migrations/18_dangling_hosts_subnets.go98
-rw-r--r--backend/server/database/migrations/19_add_utilization_to_subnet.go42
-rw-r--r--backend/server/database/migrations/1_initial.go141
-rw-r--r--backend/server/database/migrations/20_ha_status.go76
-rw-r--r--backend/server/database/migrations/21_daemon.go78
-rw-r--r--backend/server/database/migrations/22_daemon_to_ha.go66
-rw-r--r--backend/server/database/migrations/23_ha_failover.go70
-rw-r--r--backend/server/database/migrations/24_hostname_reservations.go19
-rw-r--r--backend/server/database/migrations/25_add_event.go26
-rw-r--r--backend/server/database/migrations/26_logs.go51
-rw-r--r--backend/server/database/migrations/27_add_rps_intervals.go26
-rw-r--r--backend/server/database/migrations/28_add_monitored_to_daemon.go19
-rw-r--r--backend/server/database/migrations/29_add_details_to_event.go19
-rw-r--r--backend/server/database/migrations/2_add_admin_user.go88
-rw-r--r--backend/server/database/migrations/30_md5_to_bf_hash_algo.go31
-rw-r--r--backend/server/database/migrations/31_kea_daemon_config_hash.go21
-rw-r--r--backend/server/database/migrations/32_certs_data.go39
-rw-r--r--backend/server/database/migrations/33_app_name.go152
-rw-r--r--backend/server/database/migrations/34_use_secure_protocol.go20
-rw-r--r--backend/server/database/migrations/35_config_reports.go77
-rw-r--r--backend/server/database/migrations/36_config_reviews.go67
-rw-r--r--backend/server/database/migrations/37_remove_subnet_host_wipes.go116
-rw-r--r--backend/server/database/migrations/38_remove_app_from_local.go86
-rw-r--r--backend/server/database/migrations/39_stats_as_decimals.go31
-rw-r--r--backend/server/database/migrations/3_add_machine.go37
-rw-r--r--backend/server/database/migrations/40_remove_host_bulk_update_seq.go29
-rw-r--r--backend/server/database/migrations/41_config_schedule.go33
-rw-r--r--backend/server/database/migrations/42_host_dhcp_options.go21
-rw-r--r--backend/server/database/migrations/43_aggregation_stats.go25
-rw-r--r--backend/server/database/migrations/44_config_checker_preference.go31
-rw-r--r--backend/server/database/migrations/4_add_service.go38
-rw-r--r--backend/server/database/migrations/5_add_basic_groups.go97
-rw-r--r--backend/server/database/migrations/6_app_ctrl_address.go28
-rw-r--r--backend/server/database/migrations/7_app_ctrl_key.go21
-rw-r--r--backend/server/database/migrations/8_services.go154
-rw-r--r--backend/server/database/migrations/9_subnets.go105
-rw-r--r--backend/server/database/model/access_point.go30
-rw-r--r--backend/server/database/model/app.go553
-rw-r--r--backend/server/database/model/app_test.go1437
-rw-r--r--backend/server/database/model/common.go57
-rw-r--r--backend/server/database/model/configcheckerpreference.go178
-rw-r--r--backend/server/database/model/configcheckerpreference_test.go400
-rw-r--r--backend/server/database/model/configreport.go154
-rw-r--r--backend/server/database/model/configreport_test.go355
-rw-r--r--backend/server/database/model/configreview.go64
-rw-r--r--backend/server/database/model/configreview_test.go92
-rw-r--r--backend/server/database/model/daemon.go630
-rw-r--r--backend/server/database/model/daemon_test.go933
-rw-r--r--backend/server/database/model/dhcp_test.go93
-rw-r--r--backend/server/database/model/error.go7
-rw-r--r--backend/server/database/model/event.go98
-rw-r--r--backend/server/database/model/event_test.go169
-rw-r--r--backend/server/database/model/group.go63
-rw-r--r--backend/server/database/model/group_test.go50
-rw-r--r--backend/server/database/model/host.go1015
-rw-r--r--backend/server/database/model/host_test.go1733
-rw-r--r--backend/server/database/model/indexedsubnets.go35
-rw-r--r--backend/server/database/model/indexedsubnets_test.go79
-rw-r--r--backend/server/database/model/integerdecimal.go68
-rw-r--r--backend/server/database/model/integerdecimal_test.go148
-rw-r--r--backend/server/database/model/kea_config.go372
-rw-r--r--backend/server/database/model/kea_config_test.go679
-rw-r--r--backend/server/database/model/kea_subnet.go54
-rw-r--r--backend/server/database/model/kea_subnet_test.go714
-rw-r--r--backend/server/database/model/lease.go15
-rw-r--r--backend/server/database/model/log_target.go36
-rw-r--r--backend/server/database/model/log_target_test.go78
-rw-r--r--backend/server/database/model/machine.go278
-rw-r--r--backend/server/database/model/machine_test.go760
-rw-r--r--backend/server/database/model/metrics.go61
-rw-r--r--backend/server/database/model/metrics_test.go139
-rw-r--r--backend/server/database/model/option.go72
-rw-r--r--backend/server/database/model/option_test.go39
-rw-r--r--backend/server/database/model/optiondef.go48
-rw-r--r--backend/server/database/model/optiondef_test.go133
-rw-r--r--backend/server/database/model/pki.go57
-rw-r--r--backend/server/database/model/pki_test.go51
-rw-r--r--backend/server/database/model/pool.go137
-rw-r--r--backend/server/database/model/pool_test.go137
-rw-r--r--backend/server/database/model/rps_interval.go72
-rw-r--r--backend/server/database/model/rps_interval_test.go166
-rw-r--r--backend/server/database/model/scheduledconfigchange.go172
-rw-r--r--backend/server/database/model/scheduledconfigchange_test.go397
-rw-r--r--backend/server/database/model/service.go507
-rw-r--r--backend/server/database/model/service_test.go801
-rw-r--r--backend/server/database/model/setting.go271
-rw-r--r--backend/server/database/model/setting_test.go193
-rw-r--r--backend/server/database/model/shared_network.go329
-rw-r--r--backend/server/database/model/shared_network_test.go357
-rw-r--r--backend/server/database/model/stats.go95
-rw-r--r--backend/server/database/model/stats_test.go70
-rw-r--r--backend/server/database/model/subnet.go729
-rw-r--r--backend/server/database/model/subnet_test.go1135
-rw-r--r--backend/server/database/model/user.go301
-rw-r--r--backend/server/database/model/user_test.go474
-rw-r--r--backend/server/database/session/session_manager.go134
-rw-r--r--backend/server/database/session/session_manager_test.go106
-rw-r--r--backend/server/database/settings.go140
-rw-r--r--backend/server/database/test/connection_test.go60
-rw-r--r--backend/server/database/test/dbtest.go136
-rw-r--r--backend/server/database/test/migrations_test.go244
-rw-r--r--backend/server/database/test/settings_test.go143
-rw-r--r--backend/server/database/test/tls_test.go205
-rw-r--r--backend/server/database/tls.go162
-rw-r--r--backend/server/dumper/dump/basic.go121
-rw-r--r--backend/server/dumper/dump/basic_test.go102
-rw-r--r--backend/server/dumper/dump/dump.go48
-rw-r--r--backend/server/dumper/dump/events.go44
-rw-r--r--backend/server/dumper/dump/events_test.go92
-rw-r--r--backend/server/dumper/dump/logs.go90
-rw-r--r--backend/server/dumper/dump/logs_test.go78
-rw-r--r--backend/server/dumper/dump/machine.go47
-rw-r--r--backend/server/dumper/dump/machine_test.go160
-rw-r--r--backend/server/dumper/dump/settings.go33
-rw-r--r--backend/server/dumper/dump/settings_test.go48
-rw-r--r--backend/server/dumper/executor.go152
-rw-r--r--backend/server/dumper/executor_test.go249
-rw-r--r--backend/server/dumper/factory.go33
-rw-r--r--backend/server/dumper/factory_test.go80
-rw-r--r--backend/server/dumper/flow.go94
-rw-r--r--backend/server/dumper/flow_test.go158
-rw-r--r--backend/server/dumper/saver.go77
-rw-r--r--backend/server/dumper/saver_test.go142
-rw-r--r--backend/server/eventcenter/eventcenter.go187
-rw-r--r--backend/server/eventcenter/eventcenter_test.go112
-rw-r--r--backend/server/eventcenter/sse.go107
-rw-r--r--backend/server/eventcenter/sse_test.go50
-rw-r--r--backend/server/eventcenter/subscriber.go181
-rw-r--r--backend/server/eventcenter/subscriber_test.go305
-rw-r--r--backend/server/metrics/collector.go75
-rw-r--r--backend/server/metrics/collector_test.go150
-rw-r--r--backend/server/metrics/metrics.go142
-rw-r--r--backend/server/metrics/metrics_test.go38
-rw-r--r--backend/server/restservice/common_test.go12
-rw-r--r--backend/server/restservice/config.go431
-rw-r--r--backend/server/restservice/config_test.go1346
-rw-r--r--backend/server/restservice/events.go73
-rw-r--r--backend/server/restservice/events_test.go45
-rw-r--r--backend/server/restservice/hosts.go599
-rw-r--r--backend/server/restservice/hosts_test.go1368
-rw-r--r--backend/server/restservice/leases.go133
-rw-r--r--backend/server/restservice/leases_test.go682
-rw-r--r--backend/server/restservice/logs.go84
-rw-r--r--backend/server/restservice/logs_test.go158
-rw-r--r--backend/server/restservice/machines.go1560
-rw-r--r--backend/server/restservice/machines_test.go2274
-rw-r--r--backend/server/restservice/middleware.go281
-rw-r--r--backend/server/restservice/middleware_test.go218
-rw-r--r--backend/server/restservice/options.go177
-rw-r--r--backend/server/restservice/options_test.go528
-rw-r--r--backend/server/restservice/pullers.go100
-rw-r--r--backend/server/restservice/pullers_test.go102
-rw-r--r--backend/server/restservice/restservice.go431
-rw-r--r--backend/server/restservice/restservice_test.go111
-rw-r--r--backend/server/restservice/search.go100
-rw-r--r--backend/server/restservice/search_test.go377
-rw-r--r--backend/server/restservice/settings.go102
-rw-r--r--backend/server/restservice/settings_test.go59
-rw-r--r--backend/server/restservice/subnets.go184
-rw-r--r--backend/server/restservice/subnets_test.go541
-rw-r--r--backend/server/restservice/users.go396
-rw-r--r--backend/server/restservice/users_test.go425
-rw-r--r--backend/server/server.go318
-rw-r--r--backend/server/server_test.go281
-rw-r--r--backend/server/test/dbmodel/fake_dispatcher.go112
-rw-r--r--backend/server/test/dbmodel/fake_eventcenter.go35
-rw-r--r--backend/server/test/dbmodel/mock_dump.go23
-rw-r--r--backend/server/test/fake_metricscollector.go27
-rw-r--r--backend/server/test/gen/gen_kea_config.go204
-rw-r--r--backend/server/test/mock_gopg_reader.go51
-rw-r--r--backend/testutil/host.go292
-rw-r--r--backend/testutil/safebuffer.go31
-rw-r--r--backend/testutil/safebuffer_test.go100
-rw-r--r--backend/testutil/sandbox.go95
-rw-r--r--backend/testutil/sandbox_test.go135
-rw-r--r--backend/testutil/staticcerts.go134
-rw-r--r--backend/testutil/staticcerts_test.go16
-rw-r--r--backend/testutil/utils.go80
-rw-r--r--backend/testutil/utils_test.go186
-rw-r--r--backend/util/bigcounter.go174
-rw-r--r--backend/util/bigcounter_test.go338
-rw-r--r--backend/util/cidr.go247
-rw-r--r--backend/util/cidr_test.go301
-rw-r--r--backend/util/configs/README2
-rw-r--r--backend/util/configs/config-with-includes.json5
-rw-r--r--backend/util/configs/config-with-infinite-loop.json3
-rw-r--r--backend/util/configs/config-with-multiple-the-same-includes.json5
-rw-r--r--backend/util/configs/config-with-nested-includes.json3
-rw-r--r--backend/util/configs/config-with-non-existing-includes.json3
-rw-r--r--backend/util/configs/config-without-includes.json5
-rw-r--r--backend/util/fqdn.go102
-rw-r--r--backend/util/fqdn_test.go50
-rw-r--r--backend/util/hash.go29
-rw-r--r--backend/util/hash_test.go40
-rw-r--r--backend/util/json.go33
-rw-r--r--backend/util/json_test.go54
-rw-r--r--backend/util/periodicexecutor.go207
-rw-r--r--backend/util/periodicexecutor_test.go173
-rw-r--r--backend/util/tarballutil.go111
-rw-r--r--backend/util/tarballutil_test.go129
-rw-r--r--backend/util/tarballwritter.go81
-rw-r--r--backend/util/tarballwritter_test.go113
-rw-r--r--backend/util/util.go340
-rw-r--r--backend/util/util_test.go333
-rw-r--r--backend/version.go5
-rw-r--r--doc/backend-api.rst9
-rw-r--r--doc/conf.py140
-rw-r--r--doc/demo.rst235
-rw-r--r--doc/devel.rst1076
-rw-r--r--doc/index.rst36
-rw-r--r--doc/install.rst1304
-rw-r--r--doc/man/stork-agent.8.rst107
-rw-r--r--doc/man/stork-server.8.rst156
-rw-r--r--doc/man/stork-tool.8.rst273
-rw-r--r--doc/manpages.rst21
-rw-r--r--doc/overview.rst37
-rw-r--r--doc/src/arch.drawio1
-rw-r--r--doc/src/requirements.in1
-rw-r--r--doc/src/requirements.txt61
-rw-r--r--doc/static/arch.pngbin0 -> 10521 bytes
-rw-r--r--doc/static/kea-ha-status.pngbin0 -> 71041 bytes
-rw-r--r--doc/static/kea-subnets-list.pngbin0 -> 46569 bytes
-rw-r--r--doc/static/kea-subnets6.pngbin0 -> 55669 bytes
-rw-r--r--doc/static/stork-square-100px.pngbin0 -> 9426 bytes
-rw-r--r--doc/static/stork-square-200px.pngbin0 -> 21951 bytes
-rw-r--r--doc/static/stork.css22
-rw-r--r--doc/stork.css12
-rw-r--r--doc/troubleshooting.rst130
-rw-r--r--doc/uml/registration-agent.pngbin0 -> 66237 bytes
-rw-r--r--doc/uml/registration-agent.svg73
-rw-r--r--doc/uml/registration-agent.uml63
-rw-r--r--doc/usage.rst833
-rw-r--r--docker/README.md7
-rw-r--r--docker/config/agent-bind9/db.test4
-rw-r--r--docker/config/agent-bind9/named.conf64
-rw-r--r--docker/config/agent-kea-ha1/kea-ctrl-agent.conf80
-rw-r--r--docker/config/agent-kea-ha1/kea-dhcp4.conf139
-rw-r--r--docker/config/agent-kea-ha2/kea-ctrl-agent.conf80
-rw-r--r--docker/config/agent-kea-ha2/kea-dhcp4.conf140
-rw-r--r--docker/config/agent-kea-many-subnets/kea-ctrl-agent.conf80
-rw-r--r--docker/config/agent-kea-premium-one/init_mysql_query.sql22
-rw-r--r--docker/config/agent-kea-premium-one/kea-dhcp4.conf76
-rw-r--r--docker/config/agent-kea-premium-one/kea-dhcp6.conf88
-rw-r--r--docker/config/agent-kea-premium-two/init_mysql_query.sql29
-rw-r--r--docker/config/agent-kea-premium-two/kea-dhcp4.conf76
-rw-r--r--docker/config/agent-kea-premium-two/kea-dhcp6.conf88
-rw-r--r--docker/config/agent-kea/kea-ctrl-agent.conf80
-rw-r--r--docker/config/agent-kea/kea-dhcp4.conf340
-rw-r--r--docker/config/agent-kea6/kea-ctrl-agent.conf81
-rw-r--r--docker/config/agent-kea6/kea-dhcp6.conf491
-rw-r--r--docker/config/grafana/dashboards.yaml11
-rw-r--r--docker/config/grafana/datasource.yaml9
-rw-r--r--docker/config/grafana/grafana.ini0
-rw-r--r--docker/config/prometheus/prometheus.yml25
-rw-r--r--docker/config/supervisor/kea-agent.conf8
-rw-r--r--docker/config/supervisor/kea-dhcp4.conf8
-rw-r--r--docker/config/supervisor/kea-dhcp6.conf8
-rw-r--r--docker/config/supervisor/named.conf8
-rw-r--r--docker/config/supervisor/prometheus.conf8
-rw-r--r--docker/config/supervisor/stork-agent.conf8
-rw-r--r--docker/config/supervisor/stork-server.conf8
-rw-r--r--docker/config/supervisor/supervisord.conf14
-rw-r--r--docker/docker-compose-premium.yaml90
-rw-r--r--docker/docker-compose.yaml441
-rw-r--r--docker/images/ci/cloudsmith.Dockerfile14
-rw-r--r--docker/images/ci/redhat-ubi8.Dockerfile16
-rw-r--r--docker/images/ci/ubuntu-18-04.Dockerfile26
-rw-r--r--docker/images/simulator.Dockerfile54
-rw-r--r--docker/images/stork.Dockerfile380
-rwxr-xr-xdocker/init/init_db.sh14
-rwxr-xr-xdocker/init/init_mysql_db.sh67
-rw-r--r--docker/init/init_mysql_query.sql3
-rwxr-xr-xdocker/init/init_pgsql_db.sh67
-rw-r--r--docker/init/init_pgsql_query.sql4
-rwxr-xr-xdocker/tools/gen-kea-config.py414
-rw-r--r--etc/agent-credentials.json.template16
-rw-r--r--etc/agent.env37
-rw-r--r--etc/hooks/apk/isc-stork-agent.postinst25
-rw-r--r--etc/hooks/apk/isc-stork-agent.postrm3
-rw-r--r--etc/hooks/apk/isc-stork-agent.prerm3
-rw-r--r--etc/hooks/apk/isc-stork-server.postinst6
-rw-r--r--etc/hooks/apk/isc-stork-server.postrm3
-rw-r--r--etc/hooks/apk/isc-stork-server.prerm3
-rw-r--r--etc/hooks/deb/isc-stork-agent.postinst27
-rw-r--r--etc/hooks/deb/isc-stork-agent.postrm3
-rw-r--r--etc/hooks/deb/isc-stork-agent.prerm16
-rw-r--r--etc/hooks/deb/isc-stork-server.postinst7
-rw-r--r--etc/hooks/deb/isc-stork-server.postrm3
-rw-r--r--etc/hooks/deb/isc-stork-server.prerm14
-rw-r--r--etc/hooks/freebsd/isc-stork-agent.postinst15
-rw-r--r--etc/hooks/freebsd/isc-stork-agent.postrm3
-rw-r--r--etc/hooks/freebsd/isc-stork-agent.prerm3
-rw-r--r--etc/hooks/freebsd/isc-stork-server.postinst6
-rw-r--r--etc/hooks/freebsd/isc-stork-server.postrm3
-rw-r--r--etc/hooks/freebsd/isc-stork-server.prerm3
-rw-r--r--etc/hooks/rpm/isc-stork-agent.postinst27
-rw-r--r--etc/hooks/rpm/isc-stork-agent.postrm10
-rw-r--r--etc/hooks/rpm/isc-stork-agent.prerm24
-rw-r--r--etc/hooks/rpm/isc-stork-server.postinst8
-rw-r--r--etc/hooks/rpm/isc-stork-server.postrm10
-rw-r--r--etc/hooks/rpm/isc-stork-server.prerm14
-rw-r--r--etc/isc-stork-agent.service19
-rw-r--r--etc/isc-stork-server.service17
-rw-r--r--etc/nginx-stork.conf48
-rw-r--r--etc/server.env44
-rw-r--r--etc/setup-db.sql5
-rw-r--r--grafana/bind9-resolver.json1895
-rw-r--r--grafana/kea-dhcp4.json1265
-rw-r--r--grafana/kea-dhcp6.json1606
-rw-r--r--rakelib/00_init.rake746
-rw-r--r--rakelib/10_codebase.rake263
-rw-r--r--rakelib/20_build.rake260
-rw-r--r--rakelib/30_dev.rake540
-rw-r--r--rakelib/40_dist.rake362
-rw-r--r--rakelib/50_system_tests.rake251
-rw-r--r--rakelib/60_docker_demo.rake395
-rw-r--r--rakelib/70_release.rake178
-rw-r--r--rakelib/80_utils.rake39
-rw-r--r--rakelib/init_deps/danger.Gemfile13
-rw-r--r--rakelib/init_deps/danger.Gemfile.lock103
-rw-r--r--rakelib/init_deps/fpm.Gemfile7
-rw-r--r--rakelib/init_deps/fpm.Gemfile.lock43
-rw-r--r--rakelib/init_deps/pytest.in6
-rw-r--r--rakelib/init_deps/pytest.txt32
-rw-r--r--rakelib/init_deps/sphinx.in3
-rw-r--r--rakelib/init_deps/sphinx.txt58
-rwxr-xr-xstork-demo.sh143
-rwxr-xr-xtests/pkgs-install/test-pkgs-install.sh67
-rw-r--r--tests/sim/index.html199
-rw-r--r--tests/sim/requirements.in5
-rw-r--r--tests/sim/requirements.txt32
-rw-r--r--tests/sim/sim.py333
-rw-r--r--tests/system/config/bind/db.test4
-rw-r--r--tests/system/config/bind/named.conf19
-rw-r--r--tests/system/config/kea-basic-auth/agent-credentials.json10
-rw-r--r--tests/system/config/kea-basic-auth/kea-ctrl-agent-auth.json10
-rw-r--r--tests/system/config/kea-config-review/kea-dhcp4.conf85
-rw-r--r--tests/system/config/kea-premium-host-database/init_pgsql_query.sql1
-rw-r--r--tests/system/config/kea-premium-host-database/kea-hook-hostcmds.json3
-rw-r--r--tests/system/config/kea-premium-host-database/kea-host-database.json7
-rw-r--r--tests/system/config/kea-premium-radius/dictionary1
-rw-r--r--tests/system/config/kea-premium-radius/kea-dhcp4.conf202
-rw-r--r--tests/system/config/kea-tls/optional-client-cert.json4
-rw-r--r--tests/system/config/kea-tls/required-client-cert.json4
-rw-r--r--tests/system/config/kea/hook-disabled.json1
-rw-r--r--tests/system/config/kea/kea-ctrl-agent-auth.json1
-rw-r--r--tests/system/config/kea/kea-ctrl-agent-tls.json1
-rw-r--r--tests/system/config/kea/kea-ctrl-agent.conf87
-rw-r--r--tests/system/config/kea/kea-dhcp4.conf305
-rw-r--r--tests/system/config/kea/kea-dhcp6.conf176
-rw-r--r--tests/system/config/kea/kea-host-database.json1
-rw-r--r--tests/system/config/supervisor/stork-server.conf9
-rw-r--r--tests/system/conftest.py95
-rw-r--r--tests/system/core/__init__.py0
-rw-r--r--tests/system/core/compose.py615
-rw-r--r--tests/system/core/compose_factory.py35
-rw-r--r--tests/system/core/constants.py24
-rw-r--r--tests/system/core/fixtures.py370
-rw-r--r--tests/system/core/lease_generators.py96
-rw-r--r--tests/system/core/prometheus_parser.py265
-rw-r--r--tests/system/core/utils.py82
-rw-r--r--tests/system/core/version.py29
-rw-r--r--tests/system/core/wrappers/__init__.py6
-rw-r--r--tests/system/core/wrappers/agent.py99
-rw-r--r--tests/system/core/wrappers/base.py64
-rw-r--r--tests/system/core/wrappers/bind9.py28
-rw-r--r--tests/system/core/wrappers/external.py49
-rw-r--r--tests/system/core/wrappers/kea.py53
-rw-r--r--tests/system/core/wrappers/perfdhcp.py132
-rw-r--r--tests/system/core/wrappers/server.py575
-rw-r--r--tests/system/docker-compose.yaml342
-rw-r--r--tests/system/tests/core/commons.py9
-rw-r--r--tests/system/tests/core/data/stork_agent_metrics.txt215
-rw-r--r--tests/system/tests/core/test_compose.py847
-rw-r--r--tests/system/tests/core/test_compose_factory.py74
-rw-r--r--tests/system/tests/core/test_compose_on_containers.py31
-rw-r--r--tests/system/tests/core/test_fixtures.py23
-rw-r--r--tests/system/tests/core/test_utils.py211
-rw-r--r--tests/system/tests/core/test_version.py29
-rw-r--r--tests/system/tests/test_agent.py122
-rw-r--r--tests/system/tests/test_bind9.py27
-rw-r--r--tests/system/tests/test_config_backend.py62
-rw-r--r--tests/system/tests/test_database_ssl.py12
-rw-r--r--tests/system/tests/test_leases.py94
-rw-r--r--tests/system/tests/test_overload.py19
-rw-r--r--tests/system/tests/test_signals.py31
-rw-r--r--tests/system/tests/test_stats.py38
-rw-r--r--tests/system/tests/test_update_packages.py36
-rw-r--r--tests/system/tests/test_users.py25
-rw-r--r--tests/ui/selenium_checks.py253
-rw-r--r--tests/ui/tests_ui_basic.py506
-rwxr-xr-xutils/git-hooks-install23
-rwxr-xr-xutils/git-hooks/prepare-commit-msg38
-rw-r--r--webui/.editorconfig13
-rw-r--r--webui/.eslintrc.json40
-rw-r--r--webui/.gitignore8
-rw-r--r--webui/.prettierignore17
-rw-r--r--webui/.prettierrc8
-rw-r--r--webui/.storybook/main.js13
-rw-r--r--webui/.storybook/preview.js18
-rw-r--r--webui/.storybook/tsconfig.json10
-rw-r--r--webui/.storybook/typings.d.ts4
-rw-r--r--webui/README.md183
-rw-r--r--webui/angular.json145
-rw-r--r--webui/browserslist12
-rw-r--r--webui/e2e/protractor.conf.js30
-rw-r--r--webui/e2e/src/app.e2e-spec.ts25
-rw-r--r--webui/e2e/src/app.po.ts11
-rw-r--r--webui/e2e/tsconfig.json9
-rw-r--r--webui/karma.conf.js39
-rw-r--r--webui/nginx.conf51
-rw-r--r--webui/package-lock.json29694
-rw-r--r--webui/package.json85
-rw-r--r--webui/proxy.conf.json20
-rw-r--r--webui/src/app/app-daemons-status/app-daemons-status.component.html18
-rw-r--r--webui/src/app/app-daemons-status/app-daemons-status.component.sass0
-rw-r--r--webui/src/app/app-daemons-status/app-daemons-status.component.spec.ts41
-rw-r--r--webui/src/app/app-daemons-status/app-daemons-status.component.ts92
-rw-r--r--webui/src/app/app-overview/app-overview.component.html24
-rw-r--r--webui/src/app/app-overview/app-overview.component.sass11
-rw-r--r--webui/src/app/app-overview/app-overview.component.spec.ts100
-rw-r--r--webui/src/app/app-overview/app-overview.component.ts32
-rw-r--r--webui/src/app/app-routing.module.ts161
-rw-r--r--webui/src/app/app.component.html48
-rw-r--r--webui/src/app/app.component.sass70
-rw-r--r--webui/src/app/app.component.spec.ts103
-rw-r--r--webui/src/app/app.component.ts297
-rw-r--r--webui/src/app/app.module.ts226
-rw-r--r--webui/src/app/apps-page/apps-page.component.html128
-rw-r--r--webui/src/app/apps-page/apps-page.component.sass8
-rw-r--r--webui/src/app/apps-page/apps-page.component.spec.ts119
-rw-r--r--webui/src/app/apps-page/apps-page.component.ts327
-rw-r--r--webui/src/app/auth-interceptor.ts39
-rw-r--r--webui/src/app/auth.guard.spec.ts28
-rw-r--r--webui/src/app/auth.guard.ts37
-rw-r--r--webui/src/app/auth.service.spec.ts28
-rw-r--r--webui/src/app/auth.service.ts136
-rw-r--r--webui/src/app/bind9-app-tab/bind9-app-tab.component.html127
-rw-r--r--webui/src/app/bind9-app-tab/bind9-app-tab.component.sass8
-rw-r--r--webui/src/app/bind9-app-tab/bind9-app-tab.component.spec.ts148
-rw-r--r--webui/src/app/bind9-app-tab/bind9-app-tab.component.ts319
-rw-r--r--webui/src/app/breadcrumbs/breadcrumbs.component.html11
-rw-r--r--webui/src/app/breadcrumbs/breadcrumbs.component.sass4
-rw-r--r--webui/src/app/breadcrumbs/breadcrumbs.component.spec.ts30
-rw-r--r--webui/src/app/breadcrumbs/breadcrumbs.component.ts22
-rw-r--r--webui/src/app/config-checker-preference-page/config-checker-preference-page.component.html23
-rw-r--r--webui/src/app/config-checker-preference-page/config-checker-preference-page.component.sass0
-rw-r--r--webui/src/app/config-checker-preference-page/config-checker-preference-page.component.spec.ts67
-rw-r--r--webui/src/app/config-checker-preference-page/config-checker-preference-page.component.stories.ts138
-rw-r--r--webui/src/app/config-checker-preference-page/config-checker-preference-page.component.ts16
-rw-r--r--webui/src/app/config-checker-preference-picker/config-checker-preference-picker.component.html127
-rw-r--r--webui/src/app/config-checker-preference-picker/config-checker-preference-picker.component.sass71
-rw-r--r--webui/src/app/config-checker-preference-picker/config-checker-preference-picker.component.spec.ts379
-rw-r--r--webui/src/app/config-checker-preference-picker/config-checker-preference-picker.component.stories.ts91
-rw-r--r--webui/src/app/config-checker-preference-picker/config-checker-preference-picker.component.ts248
-rw-r--r--webui/src/app/config-checker-preference-updater/config-checker-preference-updater.component.html7
-rw-r--r--webui/src/app/config-checker-preference-updater/config-checker-preference-updater.component.sass0
-rw-r--r--webui/src/app/config-checker-preference-updater/config-checker-preference-updater.component.spec.ts218
-rw-r--r--webui/src/app/config-checker-preference-updater/config-checker-preference-updater.component.stories.ts115
-rw-r--r--webui/src/app/config-checker-preference-updater/config-checker-preference-updater.component.ts139
-rw-r--r--webui/src/app/config-review-panel/config-review-panel.component.html84
-rw-r--r--webui/src/app/config-review-panel/config-review-panel.component.sass38
-rw-r--r--webui/src/app/config-review-panel/config-review-panel.component.spec.ts379
-rw-r--r--webui/src/app/config-review-panel/config-review-panel.component.stories.ts155
-rw-r--r--webui/src/app/config-review-panel/config-review-panel.component.ts320
-rw-r--r--webui/src/app/dashboard/dashboard.component.html397
-rw-r--r--webui/src/app/dashboard/dashboard.component.sass31
-rw-r--r--webui/src/app/dashboard/dashboard.component.spec.ts271
-rw-r--r--webui/src/app/dashboard/dashboard.component.ts391
-rw-r--r--webui/src/app/dhcp-option-form/dhcp-option-form.component.html304
-rw-r--r--webui/src/app/dhcp-option-form/dhcp-option-form.component.sass34
-rw-r--r--webui/src/app/dhcp-option-form/dhcp-option-form.component.spec.ts379
-rw-r--r--webui/src/app/dhcp-option-form/dhcp-option-form.component.ts412
-rw-r--r--webui/src/app/dhcp-option-set-form/dhcp-option-set-form.component.html32
-rw-r--r--webui/src/app/dhcp-option-set-form/dhcp-option-set-form.component.sass0
-rw-r--r--webui/src/app/dhcp-option-set-form/dhcp-option-set-form.component.spec.ts90
-rw-r--r--webui/src/app/dhcp-option-set-form/dhcp-option-set-form.component.ts70
-rw-r--r--webui/src/app/dhcp-option-set-view/dhcp-option-set-view.component.html42
-rw-r--r--webui/src/app/dhcp-option-set-view/dhcp-option-set-view.component.sass9
-rw-r--r--webui/src/app/dhcp-option-set-view/dhcp-option-set-view.component.spec.ts236
-rw-r--r--webui/src/app/dhcp-option-set-view/dhcp-option-set-view.component.ts156
-rw-r--r--webui/src/app/dhcp-options.service.spec.ts56
-rw-r--r--webui/src/app/dhcp-options.service.ts1423
-rw-r--r--webui/src/app/entity-link/entity-link.component.html47
-rw-r--r--webui/src/app/entity-link/entity-link.component.sass0
-rw-r--r--webui/src/app/entity-link/entity-link.component.spec.ts146
-rw-r--r--webui/src/app/entity-link/entity-link.component.ts39
-rw-r--r--webui/src/app/event-text/event-text.component.html7
-rw-r--r--webui/src/app/event-text/event-text.component.sass0
-rw-r--r--webui/src/app/event-text/event-text.component.spec.ts24
-rw-r--r--webui/src/app/event-text/event-text.component.ts62
-rw-r--r--webui/src/app/events-page/events-page.component.html7
-rw-r--r--webui/src/app/events-page/events-page.component.sass0
-rw-r--r--webui/src/app/events-page/events-page.component.spec.ts50
-rw-r--r--webui/src/app/events-page/events-page.component.ts42
-rw-r--r--webui/src/app/events-panel/events-panel.component.html177
-rw-r--r--webui/src/app/events-panel/events-panel.component.sass30
-rw-r--r--webui/src/app/events-panel/events-panel.component.spec.ts171
-rw-r--r--webui/src/app/events-panel/events-panel.component.ts400
-rw-r--r--webui/src/app/forbidden-page/forbidden-page.component.html14
-rw-r--r--webui/src/app/forbidden-page/forbidden-page.component.sass0
-rw-r--r--webui/src/app/forbidden-page/forbidden-page.component.spec.ts26
-rw-r--r--webui/src/app/forbidden-page/forbidden-page.component.ts17
-rw-r--r--webui/src/app/forms/dhcp-option-field.spec.ts19
-rw-r--r--webui/src/app/forms/dhcp-option-field.ts87
-rw-r--r--webui/src/app/forms/dhcp-option-form.spec.ts18
-rw-r--r--webui/src/app/forms/dhcp-option-form.ts32
-rw-r--r--webui/src/app/forms/dhcp-option-set-form.service.spec.ts834
-rw-r--r--webui/src/app/forms/dhcp-option-set-form.service.ts503
-rw-r--r--webui/src/app/forms/host-form.spec.ts208
-rw-r--r--webui/src/app/forms/host-form.ts162
-rw-r--r--webui/src/app/forms/linked-form-group.spec.ts16
-rw-r--r--webui/src/app/forms/linked-form-group.ts46
-rw-r--r--webui/src/app/forms/selectable-daemon.ts34
-rw-r--r--webui/src/app/global-search/global-search.component.html62
-rw-r--r--webui/src/app/global-search/global-search.component.sass0
-rw-r--r--webui/src/app/global-search/global-search.component.spec.ts66
-rw-r--r--webui/src/app/global-search/global-search.component.ts80
-rw-r--r--webui/src/app/ha-status-panel/ha-status-panel.component.html139
-rw-r--r--webui/src/app/ha-status-panel/ha-status-panel.component.sass8
-rw-r--r--webui/src/app/ha-status-panel/ha-status-panel.component.spec.ts206
-rw-r--r--webui/src/app/ha-status-panel/ha-status-panel.component.ts568
-rw-r--r--webui/src/app/ha-status/ha-status.component.html36
-rw-r--r--webui/src/app/ha-status/ha-status.component.sass0
-rw-r--r--webui/src/app/ha-status/ha-status.component.spec.ts34
-rw-r--r--webui/src/app/ha-status/ha-status.component.ts266
-rw-r--r--webui/src/app/help-tip/help-tip.component.html13
-rw-r--r--webui/src/app/help-tip/help-tip.component.sass21
-rw-r--r--webui/src/app/help-tip/help-tip.component.spec.ts26
-rw-r--r--webui/src/app/help-tip/help-tip.component.ts40
-rw-r--r--webui/src/app/host-form/host-form.component.html349
-rw-r--r--webui/src/app/host-form/host-form.component.sass9
-rw-r--r--webui/src/app/host-form/host-form.component.spec.ts1553
-rw-r--r--webui/src/app/host-form/host-form.component.ts1149
-rw-r--r--webui/src/app/host-tab/host-tab.component.html249
-rw-r--r--webui/src/app/host-tab/host-tab.component.sass25
-rw-r--r--webui/src/app/host-tab/host-tab.component.spec.ts907
-rw-r--r--webui/src/app/host-tab/host-tab.component.ts471
-rw-r--r--webui/src/app/hosts-page/hosts-page.component.html213
-rw-r--r--webui/src/app/hosts-page/hosts-page.component.sass18
-rw-r--r--webui/src/app/hosts-page/hosts-page.component.spec.ts1076
-rw-r--r--webui/src/app/hosts-page/hosts-page.component.ts645
-rw-r--r--webui/src/app/hosts-table/hosts-table.component.html14
-rw-r--r--webui/src/app/hosts-table/hosts-table.component.sass0
-rw-r--r--webui/src/app/hosts-table/hosts-table.component.spec.ts26
-rw-r--r--webui/src/app/hosts-table/hosts-table.component.ts40
-rw-r--r--webui/src/app/identifier/identifier.component.html23
-rw-r--r--webui/src/app/identifier/identifier.component.sass0
-rw-r--r--webui/src/app/identifier/identifier.component.spec.ts168
-rw-r--r--webui/src/app/identifier/identifier.component.ts165
-rw-r--r--webui/src/app/identifier/identifier.stories.ts27
-rw-r--r--webui/src/app/iptype.ts7
-rw-r--r--webui/src/app/json-tree-root/json-tree-root.component.html7
-rw-r--r--webui/src/app/json-tree-root/json-tree-root.component.spec.ts126
-rw-r--r--webui/src/app/json-tree-root/json-tree-root.component.ts123
-rw-r--r--webui/src/app/json-tree-root/json-tree-root.stories.ts73
-rw-r--r--webui/src/app/json-tree/json-tree.component.html134
-rw-r--r--webui/src/app/json-tree/json-tree.component.sass169
-rw-r--r--webui/src/app/json-tree/json-tree.component.spec.ts774
-rw-r--r--webui/src/app/json-tree/json-tree.component.ts615
-rw-r--r--webui/src/app/json-tree/json-tree.stories.ts47
-rw-r--r--webui/src/app/kea-app-tab/kea-app-tab.component.html310
-rw-r--r--webui/src/app/kea-app-tab/kea-app-tab.component.sass42
-rw-r--r--webui/src/app/kea-app-tab/kea-app-tab.component.spec.ts265
-rw-r--r--webui/src/app/kea-app-tab/kea-app-tab.component.ts414
-rw-r--r--webui/src/app/kea-daemon-configuration-page/kea-daemon-configuration-page.component.html73
-rw-r--r--webui/src/app/kea-daemon-configuration-page/kea-daemon-configuration-page.component.sass17
-rw-r--r--webui/src/app/kea-daemon-configuration-page/kea-daemon-configuration-page.component.spec.ts202
-rw-r--r--webui/src/app/kea-daemon-configuration-page/kea-daemon-configuration-page.component.ts200
-rw-r--r--webui/src/app/lease-search-page/lease-search-page.component.html255
-rw-r--r--webui/src/app/lease-search-page/lease-search-page.component.sass9
-rw-r--r--webui/src/app/lease-search-page/lease-search-page.component.spec.ts712
-rw-r--r--webui/src/app/lease-search-page/lease-search-page.component.ts380
-rw-r--r--webui/src/app/loading.service.spec.ts12
-rw-r--r--webui/src/app/loading.service.ts47
-rw-r--r--webui/src/app/localtime.pipe.spec.ts16
-rw-r--r--webui/src/app/localtime.pipe.ts15
-rw-r--r--webui/src/app/log-view-page/log-view-page.component.html92
-rw-r--r--webui/src/app/log-view-page/log-view-page.component.sass16
-rw-r--r--webui/src/app/log-view-page/log-view-page.component.spec.ts66
-rw-r--r--webui/src/app/log-view-page/log-view-page.component.ts188
-rw-r--r--webui/src/app/login-screen/login-screen.component.html73
-rw-r--r--webui/src/app/login-screen/login-screen.component.sass0
-rw-r--r--webui/src/app/login-screen/login-screen.component.spec.ts45
-rw-r--r--webui/src/app/login-screen/login-screen.component.ts72
-rw-r--r--webui/src/app/machines-page/machines-page.component.html572
-rw-r--r--webui/src/app/machines-page/machines-page.component.sass11
-rw-r--r--webui/src/app/machines-page/machines-page.component.spec.ts259
-rw-r--r--webui/src/app/machines-page/machines-page.component.ts594
-rw-r--r--webui/src/app/password-change-page/password-change-page.component.html91
-rw-r--r--webui/src/app/password-change-page/password-change-page.component.sass5
-rw-r--r--webui/src/app/password-change-page/password-change-page.component.spec.ts59
-rw-r--r--webui/src/app/password-change-page/password-change-page.component.ts85
-rw-r--r--webui/src/app/profile-page/profile-page.component.html45
-rw-r--r--webui/src/app/profile-page/profile-page.component.sass0
-rw-r--r--webui/src/app/profile-page/profile-page.component.spec.ts64
-rw-r--r--webui/src/app/profile-page/profile-page.component.ts55
-rw-r--r--webui/src/app/rename-app-dialog/rename-app-dialog.component.html36
-rw-r--r--webui/src/app/rename-app-dialog/rename-app-dialog.component.sass0
-rw-r--r--webui/src/app/rename-app-dialog/rename-app-dialog.component.spec.ts290
-rw-r--r--webui/src/app/rename-app-dialog/rename-app-dialog.component.ts230
-rw-r--r--webui/src/app/server-data.service.spec.ts146
-rw-r--r--webui/src/app/server-data.service.ts188
-rw-r--r--webui/src/app/setting.service.spec.ts29
-rw-r--r--webui/src/app/setting.service.ts34
-rw-r--r--webui/src/app/settings-menu/settings-menu.component.html1
-rw-r--r--webui/src/app/settings-menu/settings-menu.component.sass0
-rw-r--r--webui/src/app/settings-menu/settings-menu.component.spec.ts35
-rw-r--r--webui/src/app/settings-menu/settings-menu.component.ts44
-rw-r--r--webui/src/app/settings-page/settings-page.component.html131
-rw-r--r--webui/src/app/settings-page/settings-page.component.sass0
-rw-r--r--webui/src/app/settings-page/settings-page.component.spec.ts65
-rw-r--r--webui/src/app/settings-page/settings-page.component.ts99
-rw-r--r--webui/src/app/shared-networks-page/shared-networks-page.component.html127
-rw-r--r--webui/src/app/shared-networks-page/shared-networks-page.component.sass12
-rw-r--r--webui/src/app/shared-networks-page/shared-networks-page.component.spec.ts150
-rw-r--r--webui/src/app/shared-networks-page/shared-networks-page.component.ts194
-rw-r--r--webui/src/app/subnet-bar/subnet-bar.component.html4
-rw-r--r--webui/src/app/subnet-bar/subnet-bar.component.sass28
-rw-r--r--webui/src/app/subnet-bar/subnet-bar.component.spec.ts216
-rw-r--r--webui/src/app/subnet-bar/subnet-bar.component.ts104
-rw-r--r--webui/src/app/subnets-page/subnets-page.component.html169
-rw-r--r--webui/src/app/subnets-page/subnets-page.component.sass8
-rw-r--r--webui/src/app/subnets-page/subnets-page.component.spec.ts167
-rw-r--r--webui/src/app/subnets-page/subnets-page.component.ts226
-rw-r--r--webui/src/app/subnets.spec.ts121
-rw-r--r--webui/src/app/subnets.ts58
-rw-r--r--webui/src/app/swagger-ui/swagger-ui.component.html1
-rw-r--r--webui/src/app/swagger-ui/swagger-ui.component.sass0
-rw-r--r--webui/src/app/swagger-ui/swagger-ui.component.spec.ts24
-rw-r--r--webui/src/app/swagger-ui/swagger-ui.component.ts23
-rw-r--r--webui/src/app/users-page/users-page.component.html395
-rw-r--r--webui/src/app/users-page/users-page.component.sass22
-rw-r--r--webui/src/app/users-page/users-page.component.spec.ts68
-rw-r--r--webui/src/app/users-page/users-page.component.ts563
-rw-r--r--webui/src/app/utils.spec.ts175
-rw-r--r--webui/src/app/utils.stories.ts12
-rw-r--r--webui/src/app/utils.ts407
-rw-r--r--webui/src/app/utiltypes.spec.ts31
-rw-r--r--webui/src/app/utiltypes.ts59
-rw-r--r--webui/src/app/validators.spec.ts97
-rw-r--r--webui/src/app/validators.ts135
-rw-r--r--webui/src/assets/.gitkeep0
-rw-r--r--webui/src/assets/stork-logo-big.pngbin0 -> 21951 bytes
-rw-r--r--webui/src/assets/stork-logo-small.pngbin0 -> 18016 bytes
-rw-r--r--webui/src/environments/environment.prod.ts4
-rw-r--r--webui/src/environments/environment.ts17
-rw-r--r--webui/src/favicon.icobin0 -> 4286 bytes
-rw-r--r--webui/src/index.html15
-rw-r--r--webui/src/main.ts13
-rw-r--r--webui/src/polyfills.ts62
-rw-r--r--webui/src/styles.sass34
-rw-r--r--webui/src/test.ts14
-rw-r--r--webui/tsconfig.app.json10
-rw-r--r--webui/tsconfig.json21
-rw-r--r--webui/tsconfig.spec.json9
821 files changed, 181876 insertions, 0 deletions
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..22ec6d3
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,13 @@
+.git
+tools/
+webui/node_modules/
+webui/dist
+webui/.angular
+webui/src/app/backend
+webui/src/assets/arm
+build-root/
+backend/server/gen
+doc/doctrees
+doc/_build
+doc/man/*.8
+dist/
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..56f1a0d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,82 @@
+# emacs temporary files
+*~
+
+# this folder is populated by Rakefile with various tools needed for development
+/tools
+
+# backend binaries
+/backend/cmd/stork-agent/stork-agent
+/backend/cmd/stork-server/stork-server
+/backend/cmd/stork-tool/stork-tool
+
+# generated by protoc
+/backend/api/agent.pb.go
+/backend/api/agent_grpc.pb.go
+
+# generated by go-swagger
+/backend/server/gen/*
+
+# generated by mockgen
+/backend/server/agentcomm/api_mock.go
+
+# vagrant
+/.vagrant
+
+# generated documentation
+/doc/_build
+/doc/doctrees
+
+# output from backend unit tests coverage
+/backend/coverage.out
+
+# ctags
+TAGS
+
+# generated while combining whole swagger API
+/api/swagger.yaml
+
+# venv for dhcp traffic simulator
+/tests/sim/venv
+/tests/sim/__pycache__/
+
+# files created by running docker containers
+/backend/cmd/stork-agent/supervisord.log
+/backend/cmd/stork-agent/supervisord.pid
+docker/config/agent-kea-many-subnets/kea-dhcp4.conf
+
+# generated docs
+/doc/man/stork-*.8
+/doc/_build/
+/webui/src/assets/arm/
+/webui/src/assets/pkgs/
+
+# build stuff
+/build-root
+/.pkgs-build
+/dist
+**/.bundle/config
+
+# tests venv and pycache
+/tests/system/venv/
+/tests/system/test-results/
+.vscode/
+/isc-stork-*.deb
+/isc-stork-*.rpm
+*.code-workspace
+/backend/api/stork/backend/api/agent/protocol/agent.pb.go
+root/
+tests/system/.env/
+test-results/
+__pycache__/
+tests/system/config/kea-many-subnets/kea-dhcp4.conf
+tests/system/config/kea/kea-leases4.csv
+tests/system/config/kea/kea-leases6.csv
+tests/system/config/certs/key.pem
+tests/system/config/certs/cert.pem
+
+# OpenAPI generator files for Python
+tests/system/openapi_client/
+tests/system/.openapi-generator/
+tests/system/.openapi-generator-ignore
+tests/system/openapi_client_README.md
+webui/documentation.json
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..115cc83
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,439 @@
+image: registry.gitlab.isc.org/isc-projects/stork/ci-base:latest
+
+# Only create pipelines for merge requests and pushes/merges to the main branch
+workflow:
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
+
+variables:
+ POSTGRES_ADDR: postgres:5432
+ POSTGRES_DB: storktest
+ POSTGRES_USER: storktest
+ POSTGRES_PASSWORD: storktest
+ SUPPRESS_DB_MAINTENANCE: "true"
+
+# this is used to build docker images of stork (using DIND)
+ DOCKER_DRIVER: overlay2
+ DOCKER_TLS_CERTDIR: "" # this is workaround for TLS problem https://about.gitlab.com/blog/2019/07/31/docker-in-docker-with-docker-19-dot-03/
+ OLD_CI: "yes"
+
+# stages order
+stages:
+ - checks
+ - system_tests
+ - build
+ - deploy
+
+# cache
+cache:
+ key: one-shared-key-2
+ paths:
+ - webui/node_modules/
+ - tools/
+
+### build jobs ###
+
+# common parts used in build jobs
+.base_build_debian:
+ stage: checks
+ rules:
+ - when: always
+ tags:
+ - linux
+ - amd64
+ - ssd
+ before_script:
+ - sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ - sysctl -w net.ipv6.conf.default.disable_ipv6=1
+ - apt-get update
+ - apt-get install -y ruby-dev make python3-venv
+ - rake prepare
+ - rake prepare:deps
+
+.base_build_ubi:
+ stage: checks
+ rules:
+ - when: always
+ cache:
+ key: one-shared-key-rpm-2
+ paths:
+ - webui/node_modules/
+ - tools/
+ image: registry.gitlab.isc.org/isc-projects/stork/pkgs-redhat-ubi8:latest
+ tags:
+ - linux
+ - amd64
+ - ssd
+ before_script:
+ - sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ - sysctl -w net.ipv6.conf.default.disable_ipv6=1
+ - rake prepare
+ - rake prepare:deps
+
+lint_backend:
+ extends: .base_build_debian
+ script:
+ - rake lint:backend
+
+unittest_backend:
+ extends: .base_build_debian
+ services:
+ - name: registry.gitlab.isc.org/isc-projects/stork/ci-postgres:11
+ alias: postgres
+ script:
+ - echo 'postgres:*:*:storktest:storktest' > ~/.pgpass
+ - chmod 600 ~/.pgpass
+ - rake unittest:backend
+
+lint_ui:
+ extends: .base_build_debian
+ script:
+ - rake lint:ui
+
+unittest_ui:
+ extends: .base_build_debian
+ script:
+ - rake unittest:ui
+
+build_ui:
+ extends: .base_build_debian
+ script:
+ - rake build:ui
+ artifacts:
+ name: "ui"
+ expire_in: 1 week
+ paths:
+ - webui/dist/stork/
+
+build_backend:
+ extends: .base_build_debian
+ script:
+ - rake build:backend
+ artifacts:
+ name: "backend"
+ expire_in: 1 week
+ paths:
+ - backend/cmd/stork-agent/stork-agent
+ - backend/cmd/stork-server/stork-server
+ - backend/cmd/stork-db-migrate/stork-db-migrate
+
+danger:
+ extends: .base_build_debian
+ stage: checks
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
+ tags:
+ - linux
+ - amd64
+ - ssd
+ before_script:
+ - export CI_MERGE_REQUEST_ID=$(git ls-remote -q origin merge-requests\*\head | grep $CI_COMMIT_SHA | sed 's/.*refs\/merge-requests\/\([0-9]*\)\/head/\1/g')
+ - export CI_PROJECT_PATH=$CI_PROJECT_ID #some version of gitlab has problems with searching by project path
+ - export DANGER_GITLAB_HOST=gitlab.isc.org
+ - export DANGER_GITLAB_API_BASE_URL=https://gitlab.isc.org/api/v4
+ script:
+ - sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ - sysctl -w net.ipv6.conf.default.disable_ipv6=1
+ - rake lint:git
+
+tarball:
+ stage: build
+ rules:
+ - when: always
+ tags:
+ - linux
+ - amd64
+ - ssd
+ script:
+ - rake release:tarball
+ artifacts:
+ paths:
+ - stork-*.tar.gz
+ expire_in: 1 week
+
+
+### build rpm & deb packages ###
+
+build_debs:
+ extends: .base_build_debian
+ stage: build
+ script:
+ - rake build:agent_pkg
+ - rake build:server_pkg
+ - cp dist/pkgs/*.deb ./
+ artifacts:
+ paths:
+ - isc-stork-*.deb
+ expire_in: 1 hour
+
+build_rpms:
+ extends: .base_build_ubi
+ stage: build
+ script:
+ - rake build:agent_pkg
+ - rake build:server_pkg
+ - cp dist/pkgs/*.rpm ./
+ artifacts:
+ paths:
+ - isc-stork-*.rpm
+ expire_in: 1 hour
+
+build_apks:
+ stage: build
+ rules:
+ - when: always
+ cache:
+ key: one-shared-key-apk-1
+ paths:
+ - webui/node_modules/
+ - tools/
+ image: golang:1.18-alpine3.15
+ tags:
+ - linux
+ - amd64
+ - ssd
+ before_script:
+ - sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ - sysctl -w net.ipv6.conf.default.disable_ipv6=1
+ # The initially installed TAR causes FPM to fail. It must be reinstalled: https://github.com/jordansissel/fpm/issues/1375#issuecomment-317571946
+ - apk add --no-cache ruby-rake ruby-dev openjdk11-jre-headless python3 nodejs npm protoc~3.18.1 make gcc musl-dev tar
+ - rake prepare:dist
+ - rake prepare:deps
+ script:
+ - rake build:agent_pkg
+ - rake build:server_pkg
+ - cp dist/pkgs/*.apk ./
+ artifacts:
+ paths:
+ - isc-stork-*.apk
+ expire_in: 1 hour
+
+packages:
+ stage: build
+ rules:
+ - when: on_success
+ needs:
+ - build_debs
+ - build_rpms
+ - build_apks
+ script:
+ - ":"
+ artifacts:
+ paths:
+ - isc-stork-*
+ expire_in: 1 week
+
+### install packages ###
+
+install_debs:
+ tags:
+ - linux
+ - amd64
+ - ssd
+ stage: build
+ script:
+ - "dpkg -i isc-stork-*.deb"
+ - "stork-agent --version"
+ - "stork-server --version"
+ - "stork-tool --version"
+ - "dpkg -i isc-stork-*.deb"
+ - "stork-agent --version"
+ - "stork-server --version"
+ - "stork-tool --version"
+ - "dpkg -r isc-stork-agent"
+ - "dpkg -r isc-stork-server"
+ needs:
+ - build_debs
+
+install_rpms:
+ image: registry.gitlab.isc.org/isc-projects/stork/pkgs-redhat-ubi8:latest
+ tags:
+ - linux
+ - amd64
+ - ssd
+ stage: build
+ script:
+ - "rpm -i isc-stork-*.rpm"
+ - "stork-agent --version"
+ - "stork-server --version"
+ - "stork-tool --version"
+ - "rpm -U --force isc-stork-*.rpm"
+ - "stork-agent --version"
+ - "stork-server --version"
+ - "stork-tool --version"
+ - "rpm -e isc-stork-agent"
+ - "rpm -e isc-stork-server"
+ needs:
+ - build_rpms
+
+install_apks:
+ image: alpine:3.15
+ tags:
+ - linux
+ - amd64
+ - ssd
+ stage: build
+ script:
+ - "apk add --no-cache --allow-untrusted isc-stork-*.apk"
+ - "stork-agent --version"
+ - "stork-server --version"
+ - "stork-tool --version"
+ - "apk add --no-cache --allow-untrusted isc-stork-*.apk"
+ - "stork-agent --version"
+ - "stork-server --version"
+ - "stork-tool --version"
+ - "apk del isc-stork-agent"
+ - "apk del isc-stork-server"
+ needs:
+ - build_apks
+
+### system testing ###
+
+system_testing:
+ stage: system_tests
+ allow_failure: false
+ tags:
+ - linux
+ - docker
+ - amd64
+ - ssd
+ services:
+ - docker:dind
+ rules:
+ - when: on_success
+ image: docker/compose:1.29.2
+ cache:
+ key: system-tests-key-2
+ paths:
+ - tools/
+ before_script:
+ - ip -6 route del default
+ - apk update
+ - apk add --no-cache openjdk11-jre-headless python3 openssl ruby-rake nodejs npm
+ variables:
+ # Alpine uses the libc-musl library, which isn't compatible with the NodeJS
+ # binary fetched by the Rake script. Additionally, the binaries for alpine
+ # are missing on the NodeJS page. We cannot use the image based on Debian
+ # because it is out-of-date.
+ USE_SYSTEM_NODEJS: "true"
+ # It must be an alias of the docker:dind service.
+ DEFAULT_MAPPED_ADDRESS: "docker"
+ # Don't block if the artifacts from the previous stages expired.
+ dependencies: []
+ script:
+ # - rake system_tests_ui
+ - rake systemtest
+ artifacts:
+ paths:
+ - tests/system/test-results/**/*.log
+ expire_in: 1 week
+ when: always
+
+### upload release notes and tarball to repo.isc.org ###
+
+upload_to_repo:
+ stage: deploy
+ when: manual
+ allow_failure: true
+ tags:
+ - linux
+ - amd64
+ - ssd
+ - stork-repo
+ needs:
+ - tarball
+ script:
+ - eval $(ssh-agent -s)
+ - echo "${REPO_SSH_PRIVATE_KEY}" | base64 -d | ssh-add -
+ - mkdir ~/.ssh
+ - ssh-keyscan -4 repo.isc.org >> ~/.ssh/known_hosts
+ - rake release:notes
+ - rake release:tarball:upload HOST=storkpush@repo.isc.org TARGET=/data/shared/sweng/stork/releases
+ artifacts:
+ name: release-notes
+ expire_in: 1 week
+ paths:
+ - Stork-*-ReleaseNotes.txt
+
+### demo deploy jobs ###
+
+deploy_demo:
+ stage: deploy
+ cache: []
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: manual
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
+ when: always
+ allow_failure: true
+ tags:
+ - linux
+ - amd64
+ - ssd
+ needs:
+ - danger
+ before_script:
+ - eval $(ssh-agent -s)
+ - echo "$SSH_PRIVATE_KEY_STORK" | base64 -d | ssh-add -
+ script:
+ # Print Docker info
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org docker info
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org docker version
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org docker-compose version
+ # Stop all containers
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org "docker ps -q | xargs docker kill || true"
+ # Force network cleanup and to make sure we have clean state do restart docker service
+ # They can be replaced with docker-compose down after migration from the old demo.
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org docker container prune -f
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org docker network prune -f
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org docker volume prune -f
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org sudo systemctl restart docker
+ # Copy source code
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org rm -rf deploy/*
+ - scp -o StrictHostKeyChecking=no -r $PWD/* jenkins@stork.lab.isc.org:~/deploy
+ # Build & Start Docker containers
+ - ssh -o StrictHostKeyChecking=no jenkins@stork.lab.isc.org CS_REPO_ACCESS_TOKEN=$CLOUDSMITH_REPO_ACCESS_TOKEN deploy/stork-demo.sh --no-prompt
+
+### upload packages to cloudsmith.io ###
+
+upload_packages:
+ stage: deploy
+ rules:
+ - when: manual
+ allow_failure: true
+ image: registry.gitlab.isc.org/isc-projects/stork/pkgs-cloudsmith:latest
+ tags:
+ - linux
+ - docker
+ - amd64
+ - ssd
+ needs:
+ - packages
+ before_script:
+ - sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ - sysctl -w net.ipv6.conf.default.disable_ipv6=1
+ script:
+ - export LANG='en_US.UTF-8' LC_ALL='en_US.UTF-8'
+ - rake release:packages:upload REPO=stork
+
+upload_test_packages:
+ stage: deploy
+ rules:
+ - when: manual
+ allow_failure: true
+ image: registry.gitlab.isc.org/isc-projects/stork/pkgs-cloudsmith:latest
+ tags:
+ - linux
+ - docker
+ - amd64
+ - ssd
+ needs:
+ - packages
+ before_script:
+ - sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ - sysctl -w net.ipv6.conf.default.disable_ipv6=1
+ script:
+ - export LANG='en_US.UTF-8' LC_ALL='en_US.UTF-8'
+ - rake release:packages:upload REPO=stork-testing
diff --git a/.gitlab/issue_templates/bug_report.md b/.gitlab/issue_templates/bug_report.md
new file mode 100644
index 0000000..46b53ed
--- /dev/null
+++ b/.gitlab/issue_templates/bug_report.md
@@ -0,0 +1,77 @@
+---
+name: Bug report
+about: Create a report to help us improve
+
+---
+
+If you believe your bug report is a security issue (e.g. a packet that can kill the server), DO NOT
+REPORT IT HERE. Please use https://www.isc.org/community/report-bug/ instead or send mail to
+security-office(at)isc(dot)org.
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Install BIND9, Kea, Stork (which versions?) and run them with the following configs: '...'
+2. I do the following: ...
+3. A device in my network does the following: ...
+4. Kea/BIND9 server does the following: ...
+5. Stork does the following: ...
+
+**Expected behavior**
+A clear and concise description of what you expected to happen:
+The Stork is supposed to report/do A, but didn't or did B instead.
+
+**Environment:**
+ - Kea version: which release? if it's compiled from git, which revision. Use kea-dhcp4 -V or
+ kea-dhcp6 -V to find out.
+ - BIND9 version: which release?
+ - Stork: which version?
+ - OS: [e.g. Ubuntu 16.04 x64]
+ - Kea: Which features were compiled in (in particular which backends)
+ - Kea: If/which hooks where loaded in
+
+**Additional Information**
+Add any other context about the problem here. In particular, feel free to share your config file
+and logs from around the time error occurred. Don't be shy to send more logs than you think are
+relevant. It is easy to grep large log files. It is tricky to guess what may have happened without
+any information.
+
+Make sure you anonymize your config files (at the very lease make sure you obfuscate your database
+credentials, but you may also replace your actual IP addresses and host names with example.com and
+10.0.0.0/8 or 2001:db8::/32).
+
+**Some initial questions**
+- Are you sure your feature is not already implemented in the latest Kea version?
+- Are you sure what you would like to do is not possible using some other mechanisms?
+- Have you discussed your idea on kea-users or kea-dev mailing lists?
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+It is very important to describe what you would like to do and why?
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context about the feature request here.
+
+**Funding its development**
+Kea is run by ISC, which is a small non-profit organization without any government funding or any
+permanent sponsorship organizations. Are you able and willing to participate financially in the
+development costs?
+
+**Participating in development**
+Are you willing to participate in the feature development? ISC team always tries to make a feature
+as generic as possible, so it can be used in wide variety of situations. That means the proposed
+solution may be a bit different that you initially thought. Are you willing to take part in the
+design discussions? Are you willing to test an unreleased engineering code?
+
+**Contacting you**
+How can ISC reach you to discuss this matter further? If you do not specify any means such as
+e-mail, jabber id or a telephone, we may send you a message on github with questions when we have
+them.
diff --git a/.gitlab/issue_templates/feature_request.md b/.gitlab/issue_templates/feature_request.md
new file mode 100644
index 0000000..2abe7cd
--- /dev/null
+++ b/.gitlab/issue_templates/feature_request.md
@@ -0,0 +1,39 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+
+---
+
+**Some initial questions**
+- Are you sure what you would like to do is not possible using some other mechanisms?
+- Stork is in very early stages of development. If your request is not simple, it
+ may be a while until anyone does anything with your request. Are you ok with that?
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+It is very important to describe what you would like to do and why?
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context about the feature request here.
+
+**Funding its development**
+Kea is run by ISC, which is a small non-profit organization without any government funding or any
+permanent sponsorship organizations. Are you able and willing to participate financially in the
+development costs?
+
+**Participating in development**
+Are you willing to participate in the feature development? ISC team always tries to make a feature
+as generic as possible, so it can be used in wide variety of situations. That means the proposed
+solution may be a bit different that you initially thought. Are you willing to take part in the
+design discussions? Are you willing to test an unreleased engineering code?
+
+**Contacting you**
+How can ISC reach you to discuss this matter further? If you do not specify any means such as
+e-mail, jabber id or a telephone, we may send you a message on github with questions when we
+have them.
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 0000000..e5ec4f6
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,12 @@
+build:
+ os: ubuntu-20.04
+ tools:
+ python: '3.9'
+formats: all
+python:
+ install:
+ - requirements: doc/src/requirements.txt
+sphinx:
+ configuration: doc/conf.py
+ fail_on_warning: true
+version: 2
diff --git a/AUTHORS.md b/AUTHORS.md
new file mode 100644
index 0000000..bf7b0b1
--- /dev/null
+++ b/AUTHORS.md
@@ -0,0 +1,28 @@
+ Stork authors and contributors
+------------------------------
+
+Primary developers:
+
+- Marcin Siodelski (database, server, agent, UI, Kea)
+- Michał Nowikowski (server, agent, UI, Prometheus, release engineering)
+- Tomek Mrugalski (documentation, testing, Kea, project management)
+- Vicky Risk (product management, marketing)
+- Matthijs Mekking (BIND 9, Prometheus)
+- Włodek Wencel (QA, release engineering)
+- Thomas Markwalder (Kea)
+- Andrei Pavel (release engineering)
+- Sławek Figiel (server, agent, UI, prometheus)
+- Marcin Godzina (QA)
+- Suzanne Goldlust (documentation)
+
+Primary area of work mentioned in parentheses. The list is in
+roughly chronological order.
+
+We have received the following contributions:
+
+ - Franek Górski
+ - 2019-09: Grafana template
+ - Yannick Martin (OVHCloud)
+ - 2021-05: Fix for Stork agent not honoring the listen-only flags
+ - Kevin Fleming
+ - 2022-03: Automatic Stork services restart on failure
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..d908af5
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,210 @@
+# Stork Contributor's Guide
+
+So you found a bug in Stork or plan to develop a feature and want to send us a patch? Great! This
+page will explain how to contribute your changes smoothly.
+
+Here's a quick list of how to contribute a patch:
+
+1. **Create an account** on [GitLab](https://gitlab.isc.org).
+2. **Open an issue** in the [Stork project](https://gitlab.isc.org/isc-projects/stork/issues/new); make sure
+ it describes what you want to change and **why**.
+3. **Ask someone from the ISC team to give you access/permission to fork Stork** (@tomek, @vicky, @ondrej,
+ @slawek, or anyone else on the Stork dev team).
+4. **Fork the Stork code**: go to the Stork project page and click the [Fork button](https://gitlab.isc.org/isc-projects/stork/-/forks/new).
+ If you can't, you didn't complete step 3.
+5. **Implement your fix or feature, and push the code** to your repo. Make sure it compiles, has unit-tests,
+ is documented, and does what it's supposed to do.
+6. **Open a merge request**: go to the Stork project [merge requests page](https://gitlab.isc.org/isc-projects/stork/-/merge_requests/)
+ and click [New merge request](https://gitlab.isc.org/isc-projects/stork/-/merge_requests/new). If you
+ don't see the button, you didn't complete step 3.
+7. **Participate in the code review**: once you submit the MR, someone from ISC will eventually get
+ to the issue and will review your code. Please make sure you respond to comments. It's likely
+ you'll be asked to update the code.
+
+For a much more detailed description with details, see the text below.
+
+## Writing a Patch
+
+Before you start working on a patch or a new feature, it is a good idea to discuss it first with
+the Stork developers. The [stork-dev](https://lists.isc.org/mailman/listinfo/stork-dev) mailing list
+is a great place to ask your questions.
+
+OK, so you have written a patch? Great! Before you submit it, make sure that your code
+compiles. This may seem obvious, but there's more to it. You have surely checked that it compiles on
+your system, but Stork is portable software. Besides Ubuntu 18.04, it is compiled and used on
+FreeBSD. Will your code compile and work there? What about endianness? It is likely that you used a
+regular x86 architecture machine to write your patch, but the software is expected to run on many
+other architectures.
+
+Does your patch conform to the
+[Stork coding guidelines](https://gitlab.isc.org/isc-projects/stork/wikis/processes/coding-guidelines)?
+You can submit a patch that does not adhere to them, but that will reduce its chances of being
+accepted. If the deviations are minor, one of the Stork engineers who does the review will likely fix
+the issues. However, if there are lots of issues, the reviewer may simply reject the patch and ask
+you to fix it before re-submitting.
+
+## Running Unit-Tests
+
+One of the ground rules in Stork development is that every piece of code has to be tested. Stork is
+still early in its development, but we want to get to the position that
+[Kea](https://gitlab.isc.org/isc-projects/kea) is in now (with tons of unit and system tests run on
+many platforms automatically). We hope to have an extensive set of unit-tests for every non-trivial
+function or method. Even if you are fixing something small, like a single line, you are
+encouraged to write unit-tests for that change. That is even more true for new code: if you write a
+new function, method, or class, you definitely should write unit-tests for it.
+
+To ensure that everything is tested, ISC uses a development method called [Test Driven Development
+(TDD)](https://en.wikipedia.org/wiki/Test-driven_development). In TDD, a feature is developed
+alongside the tests, preferably with the tests being written first. In detail, a test is written for
+a small piece of functionality and run against the existing code. (In the case where the test is a
+unit test for a function, it would be run against an empty (unimplemented) function.) The test
+should fail. A minimal amount of code is then written, just enough to get the test to pass. Then
+the process is repeated for the next small piece of functionality. This continues until all the
+desired functionality has been implemented.
+
+This approach has two advantages:
+
+ - By writing a test first and then only enough code to pass the test, that code is fully tested. By
+ repeating this process until the feature is fully implemented, all the code gets test
+ coverage. We avoid the situation where not enough tests have been written to check all the
+ code.
+
+ - By running the test before the code implementing the function is written and observing the test
+ fail, you can detect the situation where a bug in the test code will cause it to pass regardless
+ of the code being tested.
+
+Initially, some people unfamiliar with that approach react with "but my change is simple and I
+tested that it works." That approach is both insufficient and short-sighted. It is insufficient,
+because manual testing is by definition laborious and can't really be done on the multitude of
+systems we plan to run Stork on. It is short-sighted, because even with your best intentions you
+will not be able to dedicate any significant amount of time for repeated testing of your improved
+code. In general, ISC's projects are long-lasting. Take BIND 9 or ISC DHCP as examples: both have
+been around for more than two decades. Over such long periods, code tends to be refactored several
+times. The change you made may be affected by some other change or by the code that hasn't been
+written yet.
+
+## Submitting a Merge Request (also known as Sending Your Patch the Right Way)
+
+The first step in writing a patch or new feature should be to get the source code from our Git
+repository. The procedure is very easy and is
+[explained here](https://gitlab.isc.org/isc-projects/stork/wikis/processes/gitlab-howto). While it
+is possible to provide a patch against the latest release, it makes the review process much easier
+if it is for the latest code from the Git master branch.
+
+ISC uses [GitLab](https://gitlab.isc.org) to manage its source code. While we also maintain a presence
+on [GitHub](https://github.com/isc-projects/stork), the process of syncing GitLab to GitHub is mostly
+automated and Stork developers rarely look at GitHub.
+
+ISC's GitLab has been a target for spammers in the past, so it is now set up defensively. In
+particular, new users cannot fork the code on their own; someone from ISC has to manually
+grant the ability to fork projects. Fortunately, this is easy to do and we gladly do this for anyone
+who asks and provides a good reason. "I'd like to fix bug X or develop feature Y" is an excellent
+reason. The best place to ask is either via the stork-dev mailing list, requesting access to the Stork project,
+or by asking in a comment on your issue. Please make sure you tag one of our administrators (@tomek, @slawek,
+@marcin, or @vicky) by name to automatically notify them.
+
+Once you fork the Stork code in GitLab, you have your own copy; you can commit your changes there
+and push them to your copy of the Stork repo. Once you feel that your patch is ready, go to the Stork project
+and [submit a merge request](https://gitlab.isc.org/isc-projects/stork/-/merge_requests/new).
+
+## Send a Pull Request on GitHub
+
+If you can't send the patch on GitLab, the next best way is to send a pull request (PR) on
+[GitHub](https://github.com/isc-projects/stork).
+
+This is almost as good as sending an MR on GitLab, but the downside is that the Stork devs don't look at GitHub
+very frequently, so it may be a while before we notice it. And when we do, the chances are we will be
+busy with other things. With GitLab, your MR will stare at us the whole time, so we'll get around to
+it much quicker. But we understand that there are some cases where people may prefer GitHub over
+GitLab.
+
+See the excellent documentation on GitHub: https://help.github.com/articles/creating-a-pull-request/
+for details. In essence, you need a GitHub account (spam/hassle free, takes one minute to set
+up). Then you can fork the Stork repository, commit changes to your repo, and ask us to pull your
+changes into the official Stork repository. This has a number of advantages. First, it is made against a
+specific code version, which can be easily checked with the `git log` command. Second, this request pops
+up instantly on our list of open pull requests and will stay there. The third benefit is that the
+pull request mechanism is very flexible. Stork engineers (and other users) can comment on it,
+attach links, mention other users, etc. As a submitter, you can augment the patch by committing extra
+changes to your repository. Those extra commits will appear instantly in the pull request, which is
+really useful during the review. Finally, Stork developers can better assess all open pull requests
+and add labels to them, such as "enhancement", "bug", or "unit-tests missing". This makes our lives
+easier. Oh, and your commits will later be shown as yours in the GitHub history. If you care about that
+kind of thing, once the patch is merged, you'll be automatically listed as a contributor and Stork
+will be listed as a project you have contributed to.
+
+## If You Really Can't Do an MR on GitLab or a PR on GitHub...
+
+Well, you are out of luck. There are other ways, but they're really awkward and the chances of
+your patch being ignored are really high. Anyway, here they are:
+
+- Send a patch to the stork-dev list. This is the third-best method, if you can't or don't want
+ to use GitLab or GitHub. If you send a patch to the mailing list at an inconvenient time, e.g. shortly
+ before a release, the Stork developers may miss it, or they may see it but not have time to
+ look at it. Nevertheless, it is still doable and we have successfully accepted patches that way in other
+ projects. It just takes more time from everyone involved, so it's a slower process in general.
+
+- [Create an issue in the Stork GitLab](https://gitlab.isc.org/isc-projects/stork/issues/new) and
+ attach your patch to it. However, if you don't
+ specify the base version against which it was created, one of the Stork developers will have to guess,
+ or will have to figure it out by trial and error. If the code doesn't compile,
+ the reviewer will not know if the patch is broken or if maybe it was applied to the incorrect base
+ code. Another frequent problem is that it may be possible that the patch didn't include all the new
+ files you have added. If we happen to have any comments that you as submitter are expected to
+ address (and in the overwhelming majority of cases, we will), you will be asked to send an updated
+ patch. It is not uncommon to see several rounds of such reviews, so this can get very complicated
+ very quickly. Note that you should not add your issue to any milestone; the Stork team has a process to go
+ through issues that are unassigned to any milestone. Having an issue in GitLab ensures that the patch will never be
+ forgotten and it will show up on our GitLab reports. It's not required, but it will be much appreciated if
+ you send a short note to the stork-dev mailing list explaining what you did with the code and
+ announce the issue number.
+
+- Send a tarball with your modified code. This is really the worst way one can contribute a
+ patch, because someone will need to find out which
+ version the code was based on and generate the patch. It's not rocket science, but it
+ is time-consuming if the Stork developer does not know the version in advance. The mailing
+ list has a limit on message sizes (for good reason), so you'll likely need to upload it
+ somewhere else first. Stork developers often don't pick up new issues instantly, so it may have to wait
+ weeks before the tarball is looked at. The tarball does not benefit from most of the advantages
+ mentioned for GitLab or GitHub, like the ability to easily update the code, have a meaningful
+ discussion, or see what the exact scope of changes are. Nevertheless, if we are given the choice of
+ getting a tarball or not getting a patch at all, we prefer tarballs. Just keep in mind that
+ processing a tarball is really cumbersome for the Stork developers, so it may take significantly longer
+ than other ways.
+
+## Going Through a Review
+
+Once you make your patch available using one of the ways above, please create a GitLab issue. While
+we can create one ourselves, we prefer the original submitter
+to do it as he or she has the best understanding of the purpose of the change and should have any
+extra information about OS, version, why it was done this specific way, etc. If there is no MR and no
+GitLab issue, you risk the issue not showing up on ISC's radar. Depending on the subjective
+importance and urgency as perceived by the ISC engineering team, the issue or MR will be assigned to
+a milestone.
+
+At some point, a Stork developer will do a review, but it may not happen immediately. Unfortunately,
+developers are usually working under a tight schedule, so any extra unplanned review work may take
+a while. Having said that, we value external contributions very much and will do whatever we can to
+review patches in a timely manner. Don't get discouraged if your patch is not accepted after the first
+review. To keep the code quality high, we use the same review processes for external patches as we do
+for internal code. It may take several cycles of review/updated patch submissions before the code is finally
+accepted. The nature of the review process is that it emphasizes areas that need improvement. If you
+are not used to the review process, you may get the impression that all the feedback is negative. It is
+not: even the Stork developers seldom see initial reviews that say "All OK please merge."
+
+Once the process is almost complete, the developer will likely ask you how you would like to be
+credited (for example, by first and last name, by nickname, by company name, or
+anonymously). Typically we will add a note to the ChangeLog.md file, as well as set you as the author of the
+commit applying the patch and update the contributors section in the AUTHORS file. If the
+contributed feature is big or critical for whatever reason, it may also be mentioned in release
+notes.
+
+Sadly, we sometimes see patches that are submitted and then the submitter never responds to our
+comments or requests for an updated patch. Depending on the nature of the patch, we may either fix
+the outstanding issues on our own and get another Stork developer to review them, or the issue may end
+up in our Outstanding milestone. When a new release is started, we go through the issues in
+Outstanding, select a small number of them, and move them to the current milestone. Keep
+that in mind if you plan to submit a patch and forget about it. We may accept it eventually, but
+it's a much, much faster process if you participate in it.
+
+#### Thank you for contributing your time and experience to the Stork project!
diff --git a/ChangeLog.md b/ChangeLog.md
new file mode 100644
index 0000000..1b4f7f9
--- /dev/null
+++ b/ChangeLog.md
@@ -0,0 +1,1734 @@
+Stork 1.7.0 released on 2022-10-12.
+
+* 248 [bug] slawek
+
+ Fixed a bug continuously producing false disconnect and reconnect events
+ when Kea used a host backend without support for listing (e.g., RADIUS).
+ (Gitlab #817)
+
+* 247 [bug] slawek
+
+ Fixed a bug in the post-install hook when Stork was installed from the
+ packages on Debian with busybox. The package hooks now treat SystemD as
+ an optional dependency. The Stork user is joined to the Kea group by
+ default. Added installation CI checks for all package types.
+ (Gitlab #749, #860, #867, #869)
+
+* 246 [func] marcin
+
+ DHCP high availability state is included in the troubleshooting dump
+ tarball.
+ (Gitlab #819)
+
+* 245 [func] marcin
+
+ Stork server and agent reload upon receiving the SIGHUP signal.
+ (Gitlab #703)
+
+* 244 [bug] marcin
+
+ Fixed a bug on the application page causing wrong daemon tab selection.
+ (Gitlab #772)
+
+* 243 [doc] marcin
+
+ Described the configuration review usage in the ARM.
+ (Gitlab #847)
+
+* 242 [bug] marcin
+
+ Fixed a bug in the Stork server causing stale HA services when the HA
+ configuration has been updated in Kea. It could sometimes result in
+ showing outdated HA status on the dashboard page.
+ (Gitlab #818)
+
+* 241 [build] slawek
+
+ Integrated Storybook framework with the Stork build system for faster and
+ easier UI development.
+ (Gitlab #845)
+
+* 240 [func] marcin
+
+ Allow three levels of DHCP options encapsulation when a new host
+ reservation is created or an existing host reservation is updated.
+ Preliminary support for standard DHCP option definitions has been added.
+ A few standard DHCPv6 option definitions have been defined in Stork,
+ allowing to recognize option fields returned by the Kea servers
+ accurately.
+ (Gitlab #837)
+
+Stork 1.6.0 released on 2022-09-07.
+
+* 239 [build] slawek
+
+ Upgraded Angular to version 14.2.0.
+ (Gitlab #849)
+
+* 238 [bug] slawek
+
+ Prometheus exporter for BIND 9 sends the HTTP requests with an empty body.
+ BIND 9.18 periodically rejects requests that include a body, causing errors
+ in gathering the statistics.
+ (Gitlab #798)
+
+* 237 [func] slawek
+
+ Added the possibility to enable or disable the configuration review
+ checkers globally or only for selected daemons.
+ (Gitlab #610)
+
+* 236 [build] slawek
+
+ Refactored the FreeBSD and OpenBSD support for building agent packages.
+ (Gitlab #193)
+
+* 235 [func] marcin
+
+ Enable updating host reservations in Stork.
+ (Gitlab #838)
+
+* 234 [build] slawek
+
+ Added scripts for building native APK packages with Stork Server and
+ Stork Agent. They are prepared for Alpine 3.15.
+ (Gitlab #736)
+
+* 233 [func] slawek
+
+ Added two Kea configuration checkers. The first one finds the overlapping
+ subnets based on the subnet prefixes. The second checker validates the
+ subnet prefixes.
+ (Gitlab #763)
+
+* 232 [func] marcin
+
+ Display DHCP options in the host reservation view.
+ (Gitlab #827)
+
+* 231 [func] slawek
+
+ Added a system test validating Stork upgrade to the most recent version
+ using Cloudsmith repository.
+ (Gitlab #746)
+
+* 230 [bug] slawek
+
+ Fixed a problem with running the demo by shell script on macOS.
+ (Gitlab #824)
+
+* 229 [bug] slawek
+
+ Corrected a bug in the host puller, causing the Stork Server not to notice
+ an update in the reserved hostnames in Kea configuration when neither an
+ IP address nor DHCP identifier has been changed.
+ (Gitlab #814)
+
+* 228 [build] marcin
+
+ Migrated to PrimeFlex 3.0.1.
+ (Gitlab #731)
+
+Stork 1.5.0 released on 2022-07-13.
+
+* 227 [bug] slawek
+
+ Fixed the security vulnerabilities reported by the Github Dependabot.
+ (Gitlab #805)
+
+* 226 [func] slawek
+
+ Added a shell script to run the demo only with docker-compose. The users
+ can try the Stork with minimal effort.
+ (Gitlab #761)
+
+* 225 [func] marcin
+
+ Added multiple enhancements in the form for creating new host
+ reservations. The form now checks if the specified IP addresses belong
+ to the selected subnet. The maximum size of the DHCP identifier is
+ limited to 40 hexadecimal digits. Host reservations list can be refreshed
+ with a button above the list.
+ (Gitlab #728)
+
+* 224 [doc] marcin
+
+ Documented specification of the DHCP options with host reservations and
+ how to delete a host reservation.
+ (Gitlab #794)
+
+* 223 [bug] slawek
+
+ Fixed lease utilization statistics calculations for the HA pairs. The
+ statistics of the assigned addresses and delegated prefixes are no longer
+ doubled. Only the active server's leases are counted.
+ (Gitlab #710)
+
+* 222 [build] slawek
+
+ Introduced golang 1.18 and upgraded go-related dependencies.
+ (Gitlab #788)
+
+* 221 [func] marcin
+
+ Added an ability to specify DHCP options in the host reservations.
+ (Gitlab #725)
+
+* 220 [func] marcin
+
+ Enabled deleting host reservations from the Kea servers running host_cmds
+ hook library.
+ (Gitlab #785)
+
+* 219 [func] slawek
+
+ Refactored the system tests framework to use Docker instead of LXD. The
+ tests are more straightforward, readable, stable, faster, and require less
+ disk space.
+ (Gitlab #709)
+
+* 218 [bug] slawek
+
+ Ruby dependencies and their structure are now explicitly specified to
+ ensure that identical versions are used in all environments to avoid
+ problems with incompatible packages.
+ (Gitlab #781)
+
+* 217 [bug] slawek
+
+ Changed the permissions of the systemD service files used in DEB and RPM
+ packages. They are now more restricted and don't produce the writable
+ warning.
+ (Gitlab #783)
+
+* 216 [bug] slawek
+
+ Fixed non-visible navbar menus on small resolutions.
+ (Gitlab #698)
+
+* 215 [doc] slawek
+
+ Changed the bikesheding font color to black, i.e. the same as all "normal"
+ text, to unify with other projects' documentation styles. This has best
+ contrast and is way less distracting than it used to be.
+ (Gitlab #782)
+
+* 214 [bug] marcin
+
+ Fixed Stork binaries build date injection. Before this fix, the binaries
+ reported unset build date.
+ (Gitlab #744)
+
+* 213 [bug] marcin
+
+ Fixed broken password strength indicators.
+ (Gitlab #740)
+
+Stork 1.4.0 released on 2022-06-01.
+
+* 212 [bug] slawek
+
+ Corrected a bug, which resulted in returning a null value instead of a
+ list of events in a machine dump tarball.
+ (Gitlab #743)
+
+* 211 [bug] slawek
+
+ The Stork server no longer sends statistics queries to the Kea
+ servers not using the stat_cmds hooks library. Sending such
+ queries caused unnecessary commands processing by the Kea
+ servers and excessive error logs in Stork.
+ (Gitlab #742)
+
+* 210 [bug] slawek
+
+ Fixed the rake tasks that ran the database in a Docker container but were
+ connecting to the database on localhost.
+ (Gitlab #733)
+
+* 209 [func] slawek
+
+ Log a warning message when no monitored application is detected.
+ (Gitlab #713)
+
+Stork 1.3.0 released on 2022-05-11.
+
+* 208 [doc] marcin
+
+ Added "Creating Host Reservations" section in the ARM.
+ (Gitlab #729)
+
+* 207 [func] slawek
+
+ Refactored the Rakefile responsible for running the Stork demo. The demo
+ builds faster, uses the Docker layer caching, and produces smaller images.
+ (Gitlab #709)
+
+* 206 [bug] marcin
+
+ Disable escaping special characters in the machine dumps. It improves
+ the dumps' readability.
+ (Gitlab #665)
+
+* 205 [func] marcin
+
+ It is now possible to create host reservations in Kea using a
+ form on the host reservations page.
+ (Gitlab #717, #720)
+
+* 204 [build] kpfleming
+
+ Enable automatic Stork services restart with systemd on failure.
+ (Gitlab #721)
+
+* 203 [doc] sgoldlust
+
+ Editorial and grammatical corrections in docs, logs, and code comments.
+ (Gitlab #718)
+
+* 202 [func] slawek
+
+ Refactored the Rakefile responsible for building, installing, and testing
+ Stork. It results in changes to the usage of some rake tasks and the
+ specification of their arguments.
+ (Gitlab #709)
+
+Stork 1.2.0 released on 2022-03-02.
+
+* 201 [bug] slawek
+
+ Prevent issues with parsing and storing large Kea
+ configurations in the Stork database
+ (Gitlab #682)
+
+* 200 [bug] marcin
+
+ stork-tool db-create command creates the pgcrypto extension.
+ (Gitlab #701)
+
+* 199 [build] marcin
+
+ Updated Angular to version 13.2.2 and PrimeNG to version 13.1.0.
+ (Gitlab #690)
+
+Stork 1.1.0 released on 2022-02-02.
+
+* 198 [bug] slawek
+
+ Removed the underline on hover the Stork logo on the login page.
+ (Gitlab #669)
+
+* 197 [bug] slawek
+
+ Fix the utilization calculations for address and delegated
+ prefix statistics returned by the Kea DHCP servers. Stork
+ corrects the calculations by taking into account the
+ out-of-pool reservations that are not returned by the
+ DHCP servers in the statistics.
+ (Gitlab #560)
+
+* 196 [func] marcin
+
+ Improved Stork Server performance while it gathers host reservations
+ from the Kea servers. The server only updates the host reservations
+ in its database when it detects at least one change in the gathered
+ reservations. Moreover, it runs Kea configuration reviews only when
+ it detects at least one host reservation change.
+ (Gitlab #681)
+
+* 195 [bug] marcin
+
+ Renamed --token command line flag of the stork-agent to --server-token.
+ (Gitlab #625)
+
+* 194 [func] marcin
+
+ Display Kea and Bind access points in the app tab.
+ (Gitlab #586)
+
+* 193 [doc] sgoldust
+ Stork documentation review.
+ (Gitlab #646)
+
+* 192 [func] slawek
+
+ Support for the large statistic values. It handles
+ address/NA/PD counters for IPv6 subnets, large shared
+ networks, and globals. It fixed the problem when these
+ counters exceeded the int64 range. Prepared the Stork
+ for large statistic values from Kea.
+ (Gitlab #670)
+
+* 191 [func] marcin
+
+ stork-tool facilitates db-create and db-password-gen commands
+ to conveniently create PostgreSQL database for the Stork Server.
+ (Gitlab #620)
+
+* 190 [func] slawek
+
+ Added a flag to the Stork Agent to disable collecting
+ per subnet stats from Kea. It allows limiting data shared
+ with Prometheus/Grafana.
+ (Gitlab #614)
+
+* 189 [func] marcin
+
+ Various DHCP client identifiers (e.g. flex-id, circuit-id) can
+ be displayed in a textual format and toggled between this and
+ the hexadecimal format. It is now also possible to search host
+ reservations by an identifier using the textual format.
+ (Gitlab #639)
+
+* 188 [bug] slawek
+
+ Fixed the UI overlapping when subnet utilization is greater than 100%.
+ It improves the usability of the dashboard until the utilization
+ problems are finally resolved.
+ (Gitlab #560)
+
+* 187 [func] marcin
+
+ Config reviews are scheduled automatically after getting updated
+ host reservations via the host_cmds hooks library.
+ (Gitlab #680)
+
+* 186 [func] slawek
+
+ Implemented the support for Kea and Kea CA configuration
+ with comments. Stork (Server or Agent) can parse the JSONs
+ with C-Style single-line and block comments, and single-line
+ comments started with hash.
+ (Gitlab #264)
+
+* 185 [build] marcin
+
+ Upgraded go-pg package from version 9 to version 10.
+ (Gitlab #678)
+
+* 184 [func] marcin
+
+ Implemented new Kea configuration checkers. The first one
+ verifies if there are any empty shared networks or networks
+ with a single subnet. The second one verifies if there are
+ subnets containing no pools and no host reservations. The
+ third one verifies if there are subnets containing only out
+ of the pool addresses and delegated prefixes, in which case
+ the out-of-pool host reservation mode can be enabled.
+ (Gitlab #672)
+
+* 183 [build] marcin
+
+ Stork now uses golang 1.17 and protoc-gen-go 1.26.
+ (Gitlab #652)
+
+* 182 [bug] marcin
+
+ Fixed a bug in the host reservation database model. The bug caused
+ issues with updating a host reservation when it contained no IP
+ addresses.
+ (Gitlab #677)
+
+* 181 [bug] marcin
+
+ Corrected an issue in the database migrations which caused the
+ Stork 1.0.0 server to fail to start when the database contained
+ host reservations.
+ (Gitlab #676)
+
+Stork 1.0.0 released on 2021-12-08.
+
+* 180 [doc] tomek
+
+ Prometheus and Grafana alerting mechanisms are described briefly.
+ (Gitlab #600)
+
+* 179 [build] slawek
+
+ Renamed Stork Agent configuration variable STORK_AGENT_ADDRESS
+ to STORK_AGENT_HOST. This change requires modifications in the
+ existing agent.env files.
+ (Gitlab #641)
+
+* 178 [doc] slawek
+
+ Extended the comments in the files with environment variables.
+ Unified them with the man pages.
+ (Gitlab #632)
+
+* 177 [bug] slawek
+
+ Stork calculates properly the subnet, shared network,
+ and global utilizations. Fixed the problem with showing
+ more used addresses than available.
+ (Gitlab #560)
+
+* 176 [doc] slawek
+
+ Added the troubleshooting section in the documentation.
+ It contains some hints on how to resolve the agent-related
+ issues.
+ (Gitlab #475)
+
+* 175 [doc] slawek
+
+ Renamed STORK_ENABLE_METRICS server environment variable
+ to STORK_SERVER_ENABLE_METRICS.
+ (Gitlab #621)
+
+* 174 [bug] slawek
+
+ Ensured that the agent registration over IPv6 works correctly
+ excluding link-local scope.
+ (Gitlab #447)
+
+* 173 [build] marcin
+
+ Upgraded UI to use Angular and Primeng 13.
+ (Gitlab #606)
+
+* 172 [bug] marcin
+
+ Fixed a bug in the Stork server, which caused stale subnets,
+ hosts, and shared networks after reconfiguring a monitored
+ Kea server.
+ (Gitlab #473)
+
+* 171 [func] slawek
+
+ Dump machine configuration to file. After pressing the button
+ in the UI, all data related to a specific machine (database entries,
+ configurations, logs) are packed into a single tarball. Next,
+ they can be easily shared with technical support (e.g. as an
+ email attachment).
+ (Gitlab #43)
+
+* 170 [func] marcin
+
+ Kea configuration review can now be requested from the Kea
+ daemon tab in the UI. In addition, the server automatically
+ re-reviews the configurations whenever new configuration
+ checkers are available in the new Stork releases.
+ (Gitlab #609)
+
+* 169 [func] marcin
+
+ Server database connection can be protected with TLS.
+ (Gitlab #403)
+
+Stork 0.22.0 released on 2021-11-05.
+
+* 168 [func] slawek
+
+ The Stork Agent support for the Basic Authentication introduced
+ in Kea 1.9.0. User can define the credentials used to
+ establish connection with Kea CA.
+ (Gitlab #347)
+
+* 167 [func] slawek
+
+ The Stork Server has now the ability to export metrics to
+ Prometheus. It reports the machine states and pool utilization.
+ (Gitlab #576)
+
+* 166 [bug] slawek
+
+ Fixed the problem with too many log messages about
+ updating a machine state. Stork doesn't report that
+ a machine was updated anymore.
+ (Gitlab #595)
+
+* 165 [func] slawek
+
+ The Stork Agent reports the metrics to Prometheus with the
+ subnet prefix instead of sequential ID if the subnet_cmds
+ is installed.
+ (Gitlab #574)
+
+* 164 [func] marcin
+
+ Implemented Kea configuration review mechanism. It runs checks
+ on the Kea server configurations, and displays found issues in
+ the Kea daemon tabs.
+ (Gitlab #461)
+
+* 163 [func] marcin
+
+ Implemented host_cmds_presence configuration checker. It
+ verifies if the host_cmds hooks library is loaded when hosts
+ backends are used.
+ (Gitlab #601)
+
+* 162 [build] andrei
+
+ Rebuilt CI image to upgrade openssl and renew certificate
+ following the LetsEncrypt root certificate expiration on
+ the 30th of September 2021. The CI image now also has the psql
+ client preinstalled.
+ (Gitlab #596)
+
+Stork 0.21.0 released on 2021-10-06.
+
+* 161 [func] slawek
+
+ The Stork Agent now supports communication with Kea over TLS.
+ It automatically detects if the Kea Control Agent is configured
+ to use TLS.
+ (Gitlab #527)
+
+* 160 [build] slawek
+
+ Fix failed pipeline issues - bump CentOS version and related
+ packages, change some unit tests to avoid crashes in CI
+ environment.
+ (Gitlab #552)
+
+* 159 [bug] slawek
+
+ Eliminated memory leaks from the Stork Web UI.
+ (Gitlab #105)
+
+Stork 0.20.0 released on 2021-09-08.
+
+* 158 [build] marcin
+
+ Corrected issues with the nginx-server.conf, an example Nginx
+ configuration file providing the reverse proxy setup for Stork.
+ The proxy now correctly forwards calls to download the Stork
+ Agent installation script. The updated configuration also
+ allows for accurately determining the Stork server URL while
+ generating the downloaded script.
+ (Gitlab #557)
+
+* 157 [func] godfryd, slawek
+
+ Added cert-import command to stork-tool. This allows
+ importing CA key and cert, and server key and cert.
+ (Gitlab #570)
+
+* 156 [build] marcin
+ Running unit tests no longer requires specifying a password
+ explicitly. Renamed database connection specific environment
+ variables in the stork-tool.
+ (Gitlab #555)
+
+* 155 [func] slawek
+
+ Added resolving the include statement in Kea configuration when
+ an agent is detecting applications.
+ (Gitlab #564)
+
+* 154 [bug] slawek
+
+ Prevent agent re-registration after its restart.
+ (Gitlab #528 and #558)
+
+* 153 [bug] slawek
+
+ Corrected an issue with fetching Stork events from the
+ databases running on PostgreSQL 10. Also, the Stork
+ server requires PostgreSQL version 10 or later.
+ (Gitlab #571)
+
+* 152 [build] marcin
+ Resolved an issue with building Stork packages in Docker on
+ the MacOS.
+ (Gitlab #490)
+
+* 151 [func] slawek
+
+ Obfuscate sensitive Kea configuration parts displayed using
+ JSON viewer.
+ (Gitlab #561)
+
+Stork 0.19.0 released on 2021-08-11.
+
+* 150 [bug] godfryd
+
+ Fixed reading STORK_AGENT_PROMETHEUS_KEA_EXPORTER_ADDRESS
+ and STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_ADDRESS environment
+ variables in Stork Agent.
+ (Gitlab #559)
+
+* 149 [func] godfryd
+
+ Upgraded Angular and PrimeNG to version 12.x.
+ (Gitlab #405)
+
+Stork 0.18.0 released on 2021-06-02.
+
+* 148 [func] godfryd
+
+ Added including Grafana dashboard JSON files to RPM
+ and deb packages.
+ (Gitlab #544)
+
+* 147 [func] godfryd
+
+ Added system tests for collecting statistics from
+ various versions of Kea app.
+ (Gitlab #439)
+
+* 146 [func] marcin
+
+ Added new tab displaying selected host reservation's details.
+ The tab includes the information about the allocated leases
+ for the reservation, e.g. if the reservation is in use by the
+ client owning the reservation or there is a conflict (the lease
+ is allocated to a different client). It also indicates whether
+ the matching lease is declined, expired or there are no matching
+ leases.
+ (Gitlab #530)
+
+* 145 [func] godfryd
+
+ Refactored stork-db-migrate tool to stork-tool and added
+ commands for certificates management.
+ (Gitlab #515)
+
+* 144 [func] slawek
+
+ Added Kea daemon configuration preview in JSON format.
+ (Gitlab #531)
+
+* 143 [bug] ymartin-ovh
+
+ Fixed honoring the listen-only flags in Stork Agent.
+ (Gitlab #536)
+
+* 142 [func] marcin
+
+ Updated Stork demo to expose new features: leases search,
+ Kea database backends and files locations.
+ (Gitlab #542)
+
+* 141 [func] godfryd
+
+ Fixed and improved detecting various versions of BIND 9.
+ (Gitlab #474)
+
+Stork 0.17.0 released on 2021-05-07.
+
+* 140 [func] godfryd
+
+ Added displaying number of unauthorized machines on machines
+ page in select button.
+ (Gitlab #492)
+
+* 139 [func] marcin
+
+ New information presented in the Kea tab includes
+ locations of a lease file, forensic logging file and the
+ information about database backends used by the particular
+ Kea instance.
+ (Gitlab #299)
+
+* 138 [func] marcin
+
+ Implemented declined leases search.
+ (Gitlab #510)
+
+* 137 [func] godfryd
+
+ Added system tests for new agent registration.
+ (Gitlab #507)
+
+Stork 0.16.0 released on 2021-04-07.
+
+* 136 [func] marcin
+
+ Implemented Leases Search.
+ (Gitlab #509)
+
+* 135 [func] godfryd
+
+ Added Grafana dashboard for DHCPv6. Enabled generating
+ DHCPv6 traffic in Stork Simulator. Adjusted Stork demo
+ to handle DHCPv6 traffic.
+ (Gitlab #176)
+
+* 134 [bug] godfryd
+
+ Fixed getting host address for listening in agent.
+ (Gitlab #504)
+
+Stork 0.15.0 released on 2021-03-03.
+
+* 133 [doc] andrei
+
+ Spell checks
+ (Gitlab #497)
+
+* 132 [doc] sgoldlust
+
+ Updates to the Stork ARM.
+ (Gitlab #476)
+
+* 131 [ui] tomek
+
+ Added tooltips for the Grafana links on the dashboard and
+ subnets view.
+ (Gitlab #470)
+
+* 130 [func] marcin
+
+ Added a dialog box in the UI to rename apps.
+ (Gitlab #477)
+
+* 129 [doc] godfryd, marcin
+
+ Documented secure communication channel between the Stork Server
+ and the agents in the ARM. The new agent installation and
+ registration methods were described.
+ (Gitlab #486)
+
+* 128 [func] godfryd, marcin
+
+ Updated Stork demo setup to use new machines registration methods.
+ Machines automatically request registration using the agent token
+ method. Their registration can be approved in the machines view.
+ (Gitlab #485)
+
+* 127 [func] godfryd, tomek, marcin
+
+ Secured agent-server channel part 3. Implemented agent deployment
+ using script downloaded from the server. The script installs
+ deb/rpm packages with stork agent. Then the script registers
+ current machine in the server performing key and certs
+ exchange. Enabled TLS to gRPC traffic between agent and server
+ using certs that are set up during agent registration. Added
+ instruction on machines page how to install an agent. Added UI for
+ presenting and regenerating server token.
+ (Gitlab #483)
+
+* 126 [func] godfryd
+
+ This is the second part of secured agent-server channel
+ implementation. Added code for registering a machine in the server
+ and performing key and certs exchange but it is not used fully
+ yet. Added server-token and agent-token based agent
+ authorizations. Added REST API for presenting and regenerating
+ server token, but it is not used in UI yet. Updated content of
+ reference agent.env agent config file.
+ (Gitlab #481)
+
+* 125 [func] marcin
+
+ Assign friendly names to the apps monitored in Stork. The apps'
+ names are auto-generated using the following scheme:
+ [app-type]@[machine-address]%[app-unique-id], e.g.,
+ kea@machine1.example.org%123. The [app-unique-id] is not appended
+ to the name of the first first app of the given type on the
+ particular machine. Thus, the name can be kea@machine1.example.org.
+ The auto-generated apps' names are presented in the Web UI instead
+ of the previously used app ID. The names are not yet editable by a
+ user.
+ (Gitlab #458)
+
+* 124 [func] godfryd
+
+ The first part of secured agent-server channel implementation.
+ Added generating root CA and server keys and certs,
+ and server token generation during server startup.
+ (Gitlab #479)
+
+* 123 [bug] marcin
+
+ Corrected an issue with refreshing the events list on the page
+ displaying the machine information. Previously, when switched
+ to a different tab, the events list could remain stale.
+ (Gitlab #463)
+
+* 122 [func] godfryd
+
+ Migrated command line processing in agent from jessevdk/go-flags
+ to urfave/cli/v2. Thanks to this it is possible to define commands
+ in command line. Previously only switches were possible in command
+ line. This is a preparation for new agent command: register.
+ (Gitlab #468)
+
+Stork 0.14.0 released on 2020-12-09.
+
+* 121 [func] marcin
+
+ Events received over SSE and presented on various Stork pages are
+ now filtered and only the events appropriate for the current view
+ are shown. Prior to this change all events were always shown.
+ (Gitlab #429)
+
+* 120 [func] marcin
+
+ When Stork server pulls updated Kea configurations it detects those
+ configurations that did not change since last update using a fast
+ hashing algorithm. In case when there was no configuration change
+ for a daemon, Stork skips processing subnets and/or hosts within
+ this configuration. This improves efficiency of the configuration
+ pull and update. In addition, when configuration change is detected,
+ an event is displayed informing about such change in the web UI.
+ (Gitlab #460)
+
+* 119 [doc] tomek
+
+ Prometheus and Grafana integration is now documented. Also, updated
+ requirements section pointing out that stat_cmds hook is needed for
+ Stork to show Kea statistics correctly.
+ (Gitlab #433, #451)
+
+* 118 [bug] marcin
+
+ Prevent an issue whereby Stork server would attempt to fetch updated
+ machine state while the request to add this machine is still being
+ processed. This used to cause data conflict errors in the logs and
+ network congestion.
+ (Gitlab #446)
+
+* 117 [build] marcin
+
+ Upgraded Go from 1.13.5 to 1.15.5 and golangcilint from 1.21.0 to
+ 1.33.0.
+ (Gitlab #457)
+
+* 116 [perf] marcin
+
+ Improved performance of connecting large Kea installation with many
+ subnets to Stork. Adding subnets to the database is now much more
+ efficient as it avoids extensive subnet lookups. Instead it uses
+ indexing techniques.
+ (Gitlab #421)
+
+Stork 0.13.0 released on 2020-11-06.
+
+* 115 [func] marcin
+
+ Improved presentation of the HA server scopes. Added a help
+ tip describing expected HA scopes in various cases.
+ (Gitlab #387)
+
+* 114 [bug] godfryd
+
+ The links on the dashboard to subnets and shared networks have been
+ adjusted so they take into account DHCP version. This way subnets and
+ shared network pages automatically set filtering by protocol version
+ based on parameters provided in URL.
+ (Gitlab #389)
+
+* 113 [bug] godfryd
+
+ Fixed handling renamed statistics from Kea. In Kea 1.8 some
+ of the statistics have been renamed, e.g. total-addresses
+ to total-addresses. Now Stork supports both of the cases.
+ (Gitlab #413)
+
+* 112 [bug] godfryd
+
+ Fixed handling situation when IP address of Kea Control Agent has
+ changed. Till now Stork was not able to detect this and was still
+ communicating to the old address. Now it checks if address has
+ changed and updates it in the database.
+ (Gitlab #409)
+
+* 111 [bug] marcin
+
+ Corrected presentation of the HA state in the dashboard and
+ the HA status panel in cases when HA is enabled for a server
+ but the HA state information was not fetched yet. In such
+ cases a spinner icon and the 'fetching...' text is now
+ presented.
+ (Gitlab #277)
+
+* 110 [bug] marcin
+
+ The rake build_agent task now supports building the agent
+ using wget versions older than 1.19. Prior to this change,
+ the agent build was failing on Debian 9.
+ (Gitlab #423)
+
+* 109 [doc] tomek
+
+ Updated Prerequisites section. We now have a single list of
+ supported systems.
+ (Gitlab #431)
+
+* 108 [test] tomek, marcin
+
+ Corrected and extended existing boilerplate WebUI unit tests.
+ (Gitlab #164)
+
+* 107 [bug] godfryd
+
+ Fixed problem of adding Kea with 4500 subnets. Now messages
+ with Kea configuration sent from Stork Agent to Stork Server
+ are compressed so it is possible to sent huge configurations.
+ Added new Kea instance to Stork demo with 7000 subnets.
+ (Gitlab #398)
+
+* 106 [doc] godfryd
+
+ Added documentation for Stork system tests. The documentation
+ describes how to setup environment for running test tests,
+ how to run them and how to develop them.
+ (Gitlab #427)
+
+Stork 0.12.0 released on 2020-10-14.
+
+* 105 [func] godfryd
+
+ Added a new page with events table that allows filtering and
+ paging events. Improved event tables on dashboard, machines and
+ applications pages. Enabling and disabling monitoring now
+ generates events.
+ (Gitlab #380)
+
+* 104 [bug] matthijs
+
+ Stork was unable to parse inet_spec if there were multiple addresses
+ in the 'allow' clause. Also fix the same bug for 'keys'.
+ (Gitlab #411)
+
+* 103 [func] godfryd
+
+ Introduced breadcrumb that shows current location in Stork
+ web application.
+ (Gitlab #337)
+
+* 102 [func] tomek
+
+ The stork-db-migrate tool can now migrate up and down to specific
+ schema versions. The SQL tracing now works and can be used to
+ export SQL schema to external file.
+ (Gitlab #366)
+
+Stork 0.11.0 released on 2020-09-04.
+
+* 101 [func] godfryd
+
+ Merged Stork DHCP Traffic Simulator and Stork DNS Traffic
+ Simulator into one web application called Stork Environment
+ Simulator. Added there capabilities for adding all present
+ machines in demo setup and ability to stop and start Stork Agents,
+ Kea and BIND 9 daemons. This allows simulation of communication
+ issues between applications, Stork Agents and Stork Server.
+ (Gitlab #380)
+
+* 101 [func] marcin
+
+ Restrict log viewer's access to the remote files. The log viewer
+ can only access log files belonging to the monitored application.
+ (Gitlab #348)
+
+* 100 [func] godfryd
+
+ Improved user experience of updating machine address/port.
+ Improved visual aspects. Added refreshing state from the machine
+ after changing the address.
+ (Gitlab #283)
+
+* 99 [func] godfryd
+
+ The DHCP dashboard now is presenting only monitored daemons.
+ The daemons that have monitoring switched off are not visible
+ in the dashboard.
+ (Gitlab #365)
+
+* 98 [bug] marcin
+
+ Corrected an issue causing false errors about broken communication
+ with the monitored Kea application after the application was
+ brought back online.
+ (Gitlab #384)
+
+* 97 [bug] godfryd
+
+ Improved layout of various tables that they are displayed correctly
+ on smaller screens. Fixed address of the machine that is displayed
+ in the tables (previous it was always showing 127.0.0.1).
+ (Gitlab #295)
+
+* 96 [doc] matthijs
+
+ Add documentation on monitoring the BIND 9 application.
+ (Gitlab #382)
+
+* 95 [func] godfryd
+
+ Fixed presenting an application status on a machine tab
+ with BIND 9 application. Previously it was always red/inactive.
+ Now it is presented the same way as it is for Kea app: status
+ per each daemon of an app.
+ (Gitlab #379)
+
+* 94 [bug] marcin
+
+ Fixed an issue whereby the user was unable to login to Stork
+ when database password contained upper case letters. In addition,
+ passwords with spaces and quotes are now also supported.
+ (Gitlab #361)
+
+* 93 [func] marcin
+
+ Login credentials are passed in the message body rather than as
+ query parameters. In addition, the user information is obfuscated
+ when db tracing is enabled.
+ (Gitlab #375)
+
+Stork 0.10.0 released on 2020-08-13.
+
+* 92 [func] godfryd
+
+ Improved presenting application status on machines page. Now,
+ instead of summary app status, there are presented statuses for
+ each daemon of given application.
+ (Gitlab #297, #282)
+
+* 91 [doc] tomek
+
+ Update man pages and installation instructions.
+ (Gitlab #202, #266, #307)
+
+* 90 [ui] tomek
+
+ Clarified machines page, added tooltips. Updated color scheme
+ to improve readability of wide tables.
+ (Gitlab #112, #293)
+
+* 90 [bug] marcin
+
+ Fixed an issue with refreshing log displayed within the log viewer.
+ The issue was triggered by the periodic updates of the information
+ about monitored apps. As a result of the updates the log file
+ identifiers were changing which resulted in an error message
+ informing that the viewed file no longer exists.
+ (Gitlab #364)
+
+* 89 [func] godfryd
+
+ Changed md5 to blowfish as algorithm in hash function used to store
+ password in PostgreSQL database.
+ (Gitlab #356)
+
+* 88 [bug] godfryd
+
+ Fixed upgrading RPM agent and server packages. There was a problem
+ of re-adding stork-agent and stork-server users that already exist
+ in case of upgrade.
+ (Gitlab #334)
+
+* 87 [doc] marcin
+
+ Described Kea log viewer in the ARM.
+ (Gitlab #349)
+
+* 86 [func] tmark
+
+ Added tool tip to RPS columns on DHCP dashboard.
+ (Gitlab #363)
+
+* 85 [bug] marcin
+
+ Fixed regression in the log viewer functionality which removed links
+ to the log files on the Kea app pages. In addition, improved
+ error message presentation on the log viewer pages.
+ (Gitlab #359)
+
+* 84 [func] godfryd
+
+ Added stop/start monitoring button to better control which services
+ are monitored. Communication failures now generate events that are
+ recorded in the events system. Machine view now shows events.
+ (Gitlab #324, #339)
+
+* 83 [func] tmark
+
+ Added RPS (Response Per Second) statistics to DHCP Dashboard
+ (Gitlab #252)
+
+* 82 [func] marcin
+
+ Viewing the tail of the remote log files is enabled in the UI.
+ (Gitlab #344)
+
+* 81 [func] matthijs
+
+ Add more query details to BIND 9 exporter and Grafana dashboard:
+ queries by duration, which transport protocol is used, packet sizes.
+ (Gitlab #63)
+
+* 80 [func] marcin
+
+ List of loggers used by Kea server is fetched and displayed in the
+ Kea application tab.
+ (Gitlab #342)
+
+* 79 [ui] vicky, tomek, marcin
+
+ Added explicit link to DHCP dashboard.
+ (Gitlab #280)
+
+* 78 [bug] godfryd
+
+ Fixed crashes when empty requests were sent to ReST API endpoints
+ for users and machines.
+ (Gitlab #310, #311, #312)
+
+Stork 0.9.0 released on 2020-07-01.
+
+* 77 [bug] matthijs
+
+ BIND 9 process collector would not be created if named process was
+ started after Stork Agent.
+ (Gitlab #325)
+
+* 76 [func] marcin
+
+ Pool utilization in the Stork dashboard is shown with a progress bar.
+ (Gitlab #235)
+
+* 75 [bug] matthijs
+
+ Bind exporter did not unregister all Prometheus collectors on
+ shutdown.
+ (Gitlab #326)
+
+* 74 [bug] marcin
+
+ Fixed a security problem whereby an unlogged user had access to some
+ restricted pages. If the unlogged user tries to access a restricted
+ page, the user is redirected to the login page. If the user tries
+ to access a page without proper privileges, the HTTP 403 page is
+ displayed.
+ (Gitlab #119)
+
+* 73 [func] marcin
+
+ Monitor communication issues between Stork and the applications.
+ If there is a communication problem with any app it is highlighted
+ via appropriate icon and a text that describes the problem. The
+ server logs were adjusted to indicate if the communication issue
+ is new or has been occurring for a longer period of time.
+ (Gitlab #305)
+
+* 72 [func] tomek
+
+ Implemented version reporting in agent and server.
+ (Gitlab #265)
+
+Stork 0.8.0 released on 2020-06-10.
+
+* 71 [bug] godfryd
+
+ Prevent Stork Agent crashes encountered when unknown statistics
+ was returned by Kea.
+ (Gitlab #316)
+
+* 70 [func] matthijs
+
+ Implemented Bind exporter and embedded it in Stork Agent.
+ It is based on bind_exporter:
+ https://github.com/prometheus-community/bind_exporter
+ (Gitlab #218)
+
+* 69 [func] godfryd
+
+ Implemented basic events mechanism. The events pertaining to
+ machines, apps, daemons, subnets and other entities are displayed
+ on the dashboard page. The server-sent events (SSE) mechanism is
+ used by the browser to refresh the list of events.
+ (Gitlab #275)
+
+* 68 [func] marcin
+
+ Display last failure detected by High Availability for a daemon.
+ (Gitlab #308)
+
+* 67 [func] marcin
+
+ Hostname reservations are now fetched from Kea servers and displayed
+ in the UI. It is also possible to filter hosts by hostname
+ reservations.
+ (Gitlab #303)
+
+* 66 [bug] marcin
+
+ Corrected a bug which caused presenting duplicated subnets when
+ the subnets where filtered by text. This issue occurred when
+ multiple pools belonging to a subnet were matched by the
+ filtering text.
+ (Gitlab #245)
+
+* 65 [func] marcin
+
+ Extended High Availability information is displayed for Kea
+ versions 1.7.8 and later.
+ (Gitlab #276)
+
+* 64 [func] godfryd
+
+ Changed the syntax for search expressions (`is:<flags>` and
+ `not:<flag>`). E.g. `is:global` should be used instead of just
+ `global`.
+ (Gitlab #267)
+
+* 63 [func] tmark
+
+ Added --listen-prometheus-only and --listen-stork-only command line
+ flags to stork-agent.
+ (Gitlab #213)
+
+Stork 0.7.0 released on 2020-05-08.
+
+* 62 [func] marcin
+
+ Global host reservations in Kea are shown in the UI.
+ (Gitlab #263)
+
+* 61 [func] godfryd
+
+ Implemented global search. It allows for looking across different
+ entity types.
+ (Gitlab #256)
+
+* 60 [func] marcin
+
+ HA state is presented in the dashboard.
+ (Gitlab #251)
+
+* 59 [func] marcin
+
+ The list of hosts now includes a tag indicating if the host
+ has been specified in the Kea configuration file or a host
+ database. In addition, a bug has been fixed which caused some
+ hosts to be associated with more then one Kea app, even when
+ only one of them actually had them configured.
+ (Gitlab #246)
+
+* 58 [func] godfryd
+
+ Improved presenting Kea daemons on Kea app page. There have
+ been added links to subnet, shared network and host reservations
+ pages with filtering set to given app id.
+ (Gitlab #241)
+
+* 57 [bug] marcin
+
+ Fixed a bug in the HA service detection when new Kea app was
+ being added. The visible side effect of this bug was the lack
+ of the link to the remote server app in the HA status view
+ in the UI.
+ (Gitlab #240)
+
+* 56 [func] godfryd
+
+ Added links to Grafana. Added web page for managing global
+ settings.
+ (Gitlab #231)
+
+* 55 [bug] godfryd
+
+ Fixed starting Stork server: now if password to database
+ is set to empty it does not ask for password in terminal.
+ It asks only when the STORK_DATABASE_PASSWORD environment
+ variable does not exist.
+ (Gitlab #203)
+
+* 54 [func] marcin
+
+ Improved Kea High Availability status monitoring. The status is
+ cached in the database and thus it is available even if the
+ HA partners are offline. The presented status now includes
+ connectivity status between Stork and the Kea servers, the
+ time of the last failover event and others.
+ (Gitlab #226)
+
+* 53 [func] godfryd
+
+ Added a dashboard presenting DHCP and DNS overview.
+ (Gitlab #226)
+
+* 52 [func] godfryd
+
+ Added links to BIND 9 manual and Kea manual in Help menu.
+ (Gitlab #221)
+
+* 51 [bug] matthijs
+
+ Added querying named stats from Bind 9 apps periodically.
+ (Gitlab #211)
+
+Stork 0.6.0 released on 2020-04-03.
+
+* 50 [bug] marcin
+
+ Corrected a bug which caused unexpected deletion of the
+ host reservations fetched from the Kea configuration
+ files.
+ (Gitlab #225)
+
+* 49 [func] matthijs
+
+ Updated Prometheus & Grafana in the demo installation with BIND 9.
+
+ Implemented BIND 9 exporter in Go and embedded it in Stork
+ Agent for showing Cache Hit Ratio.
+
+ Implemented DNS traffic simulator as web app for the demo
+ installation. Internally it runs a single query with dig, or
+ starts flamethrower (a DNS performance tool) for selected server
+ with indicated parameters.
+ (Gitlab #10)
+
+* 48 [doc] marcin, sgoldlust
+
+ Documented the use of Host Reservations in Stork ARM.
+ (Gitlab #223)
+
+* 47 [func] marcin
+
+ Stork server periodically fetches host reservations from the Kea
+ instances having host_cmds hooks library loaded.
+ (Gitlab #214)
+
+* 46 [func] marcin
+
+ Host reservations are listed and the UI. It is possible to filter
+ reservations by reserved IP address or host identifier value.
+ (Gitlab #210)
+
+* 45 [func] matthijs
+
+ Retrieve some cache statistics from named and show Cache Hit
+ Ratio on the dashboard.
+ (Gitlab #64)
+
+* 44 [func] godfryd
+
+ Added possibility to run Stork server without Nginx or Apache,
+ ie. static files can be served by Stork server. Still it is
+ possible to run Stork server behind Nginx or Apache which
+ will do reverse proxy or serve static files.
+ (Gitlab #200)
+
+* 43 [func] marcin
+
+ Implemented data model for IP reservations and detection of IP
+ reservations specified within a Kea configuration file. Detected
+ reservations are not yet used in the UI.
+ (Gitlab #188, #206)
+
+* 42 [func] godfryd
+
+ Prepared scripts for building native RPM and deb packages
+ with Stork server and Stork agent (total 4 packages).
+ They are prepared for Ubuntu 18.04 and CentOS 8.
+ (Gitlab #187)
+
+* 41 [func] godfryd
+
+ Added settings in Stork. They are stored in database, in setting
+ table. No UI for settings yet.
+ (Gitlab #169)
+
+* 40 [func] godfryd
+
+ Exposed access to API docs and ARM docs in new Help menu.
+ (Gitlab #199)
+
+* 39 [func] matthijs
+
+ Update the data model such that applications can have multiple
+ access points. Parse named.conf to detect both "control"
+ and "statistics" access point.
+ (Gitlab #170)
+
+Stork 0.5.0 released on 2020-03-06.
+
+* 38 [doc] tomek
+
+ Updated Stork ARM with regards to networks view, installation
+ instructions and Java, Docker dependencies.
+ (Gitlab #163, #183)
+
+* 37 [bug] marcin
+
+ Improved shared network detection mechanism to take into account
+ the family of the subnets belonging to the shared network. This
+ prevents the issue whereby two IPv4 and IPv6 subnets belonging
+ to separate shared networks having the same name would be shown
+ as belonging to the same shared network in the UI.
+ (Gitlab #180)
+
+* 36 [func] godfryd
+
+ Added presenting IP addresses utilization within subnets and
+ subnet statistics, e.g. a number of assigned addresses, in the UI
+ (subnets and shared networks pages). Statistics are fetched
+ from the monitored Kea apps periodically and can be manually
+ refreshed in the UI.
+ (Gitlab #178, #185)
+
+* 35 [func] marcin
+
+ Corrected a bug in the Stork server which caused failures when
+ parsing prefix delegation pools from the Kea configurations.
+ The Server subsequently refused to monitor the Kea apps including
+ prefix delegation pools.
+ (Gitlab #179)
+
+* 34 [func] godfryd
+
+ Added support for Prometheus & Grafana in the demo installation.
+ Added preconfigured Prometheus & Grafana containers to
+ docker-compose. Added Kea and BIND 9 Prometheus exporters.
+
+ Implementated Kea exporter in Go and embedded it in Stork Agent.
+ It is based on kea_exporter in python:
+ https://github.com/mweinelt/kea-exporter
+
+ Implemented DHCP simulator as web app for the demo installation.
+ Internally it starts perfdhcp for selected subnet with indicated
+ parameters.
+ (Gitlab #167)
+
+* 33 [func] marcin
+
+ New data model is now used by the server to hold the information
+ about the subnets and shared networks. There is no visible change
+ to the UI yet. This change mostly affects how the data is stored
+ in the database.
+ (Gitlab #172)
+
+* 32 [func] marcin
+
+ Created data model for shared networks, subnets and pools and
+ implemented mechanism to match configurations of Kea apps with
+ these structures in the database. This mechanism is not yet used
+ by the server when adding new apps via the UI.
+ (Gitlab #165)
+
+* 31 [func] godfryd
+
+ Added querying lease stats from Kea apps periodically.
+ Stats are not yet presented in the UI.
+ (Gitlab #166)
+
+* 30 [func] marcin
+
+ Created data model for services and implemented a mechanism to
+ to automatically associate a new Kea application with a High
+ Availability service when the application is configured to use
+ High Availability. This mechanism is not yet used by the server
+ when the Kea application is added via the UI. The usage of
+ this mechanism will be added in the future tickets.
+ (Gitlab #137)
+
+* 29 [func] godfryd
+
+ Added initial support for DHCP shared networks. They are presented
+ on dedicated page. Subnets page now is also presenting subnets
+ that belong to shared networks.
+ (Gitlab #151)
+
+Stork 0.4.0 released on 2020-02-05.
+
+* 28 [doc] tomek
+
+ Subnets inspection is now documented.
+ (Gitlab #149)
+
+* 27 [func] matthijs
+
+ Show more status information for named: up time, last reloaded,
+ number of zones.
+ (Gitlab #140)
+
+* 26 [func] godfryd
+
+ Added initial support for DHCP subnets. They are presented
+ on dedicated page and on apps' pages. For now only these subnets
+ are listed which do not belong to shared networks.
+ (Gitlab #47)
+
+* 25 [func] matthijs
+
+ Improve getting configuration of the BIND 9 application.
+ Stork now retrieves the control address and port from
+ named.conf, as well as the rndc key, and uses this to interact
+ with the named daemon.
+ (Gitlab #130)
+
+* 24 [bug] godfryd
+
+ Apps are now deleted while the machine is being deleted.
+ (Gitlab #123)
+
+Stork 0.3.0 released on 2020-01-10.
+
+* 23 [func] godfryd
+
+ Added presenting number of all and misbehaving applications
+ on the dashboard page. If there are no applications added yet,
+ the dashboard redirects to the list of connected machines.
+ (Gitlab #120)
+
+* 22 [doc] marcin
+
+ Updated Stork ARM. Added documentation of the High Availability
+ status monitoring with Kea. Added new sections describing
+ applications management.
+ (Gitlab #122)
+
+* 21 [func] godfryd
+
+ Added new Rake tasks to build and start two containers
+ with Kea instances running as High Availability partners.
+ (Gitlab #126)
+
+* 20 [func] matthijs
+
+ Add BIND 9 application to Stork. Detects running BIND 9
+ application by looking for named process. Uses rndc to retrieve
+ version information.
+ (Gitlab #106)
+
+* 19 [func] marcin
+
+ Kea High Availability status is presented on the Kea application
+ page.
+ (Gitlab #110)
+
+* 18 [func] marcin
+
+ Logged user can now change his/her password. Also, users can be
+ associated with one of the two default permission groups: super-admin
+ and admin. The former can manage users' accounts. The latter is not
+ allowed to manage other users' accounts.
+ (Gitlab #97)
+
+* 17 [func] marcin
+
+ Implemented a mechanism by which it is possible to send a command
+ from the Stork server to Kea via Stork Agent and Kea Control
+ Agent.
+ (Gitlab #109)
+
+Stork 0.2.0 released on 2019-12-04.
+
+* 16 [bug] marcin
+
+ Fixed an issue with closing a tab on the user management page.
+ (Gitlab #100)
+
+* 15 [doc] tomek
+
+ Users and machines management is now documented in the Stork ARM.
+ (Gitlab #99)
+
+* 14 [doc] sgoldlust
+
+ Introduced new Stork logo in the documentation.
+ (Gitlab #95)
+
+* 13 [build] tomek
+
+ Extended the build system to be able to run on MacOS. Also updated
+ installation instructions regarding how to build and run Stork
+ natively.
+ (Gitlab #87)
+
+* 12 [func] marcin
+
+ Enabled creation and editing of Stork user accounts in the UI.
+ (Gitlab #25)
+
+* 11 [func] marcin
+
+ Stork server automatically migrates the database schema to the latest
+ version upon startup.
+ (Gitlab #33)
+
+Stork 0.1.0 released on 2019-11-06.
+
+* 10 [doc] marcin
+
+ Updated ARM with a description how to sign in to the system using the
+ default administrator account.
+ (Gitlab #84)
+
+* 9 [doc] tomek
+
+ Initial ARM version added.
+ (Gitlab #27)
+
+* 8 [func] marcin
+
+ Enabled sign-in/sign-out mechanism with HTTP sessions based on
+ cookies. The default admin account has been created with default
+ credentials.
+ (Gitlab #22)
+
+* 7 [func] godfryd
+
+ Added initial implementation of the page which allows for adding new
+ machines and listing them. The missing part of this implementation is
+ the actual storage of the machines in the database. In addition, the
+ agent has been extended to return a state of the machine.
+ (Gitlab #23)
+
+* 6 [func] godfryd
+
+ Added initial implementation of Stork Agent. Implemented basic
+ communication between Stork Agent and Stork Server using gRPC
+ (Server initiates connection to Agent).
+ (Gitlab #26)
+
+* 5 [func] marcin
+
+ Added stork-db-migrate tool to be used for migrating the database
+ schema between versions and returning the current schema version
+ number. Also, added basic schema with SQL tables holding system
+ users and session information.
+ (Gitlab #20)
+
+* 4 [doc] tomek
+
+ Added several text files: AUTHORS (lists project authors and
+ contributors), ChangeLog.md (contains all new user visible changes)
+ and CONTRIBUTING.md (Contributor's guide, explains how to get your
+ patches accepted in Stork project in a seamless and easy way.
+ (Gitlab #17)
+
+* 3 [func] godfryd
+
+ Added Swagger-based API for defining ReST API to Stork server.
+ Added initial Web UI based on Angular and PrimeNG. Added Rakefile
+ for building whole solution. Removed gin-gonic dependency.
+ (Gitlab #19)
+
+* 2 [build] godfryd
+
+ Added initial framework for backend, using go and gin-gonic.
+ (Gitlab #missing)
+
+* 1 [func] franek
+
+ Added initial proposal for Grafana dashboard.
+ (Gitlab #6)
+
+
+For complete code revision history, see
+ http://gitlab.isc.org/isc-projects/stork
+
+LEGEND
+* [bug] General bug fix. This is generally a backward compatible change,
+ unless it's deemed to be impossible or very hard to keep
+ compatibility to fix the bug.
+* [build] Compilation and installation infrastructure change.
+* [doc] Update to documentation. This shouldn't change run time behavior.
+* [func] new feature. In some cases this may be a backward incompatible
+ change, which would require a bump of major version.
+* [sec] Security hole fix. This is no different than a general bug
+ fix except that it will be handled as confidential and will cause
+ security patch releases.
+* [perf] Performance related change.
+* [ui] User Interface change.
+
+*: Backward incompatible or operational change.
diff --git a/Dangerfile b/Dangerfile
new file mode 100644
index 0000000..09884bf
--- /dev/null
+++ b/Dangerfile
@@ -0,0 +1,42 @@
+fail "Please provide a summary in the Merge Request description to help your colleagues to understand the MR purpose." if gitlab.mr_body.length < 5
+
+if git.modified_files.include? "Dangerfile"
+ warn "This MR modifies Dangerfile! Watch for the rules!"
+end
+
+# Checking MR size
+if not gitlab.mr_body.include?("#huge-sorry")
+ warn("Split the MR into separate ones. It's really big.") if git.lines_of_code > 3000
+ fail("Do not submit MRs over 5000 lines of code.") if git.lines_of_code > 5000
+end
+
+# Note when MRs don't reference a milestone, make the warning stick around on subsequent runs
+has_milestone = gitlab.mr_json["milestone"] != nil
+warn("This MR does not refer to an existing milestone", sticky: true) unless has_milestone
+
+# check commits' comments
+commit_lint.check warn: :all
+
+# check gitlab issue in commit message
+git.commits.each do |c|
+ m = c.message.match(/^\[\#(\d+)\]\ (.*)/)
+ if not m
+ warn "No GitLab issue in commit message: #{c}"
+ gl_issue_msg = nil
+ else
+ gl_issue_msg = m.captures[0]
+ end
+
+ mr_branch = gitlab.branch_for_head
+ m = mr_branch.match(/^(\d+).*/)
+ if not m
+ fail "Branch name does not start with GitLab issue: #{mr_branch}"
+ gl_issue_br = nil
+ else
+ gl_issue_br = m.captures[0]
+ end
+
+ if gl_issue_msg and gl_issue_br and gl_issue_msg != gl_issue_br
+ warn "GitLab issue ##{gl_issue_msg} in msg of commit #{c} and issue ##{gl_issue_br} from branch #{mr_branch} do not match"
+ end
+end
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..ef3e728
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,374 @@
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..4f644aa
--- /dev/null
+++ b/README.md
@@ -0,0 +1,58 @@
+# Stork
+
+<img align="right" src="/doc/static/stork-square-200px.png">
+
+Stork is an open source ISC project providing a monitoring application and dashboard for
+ISC Kea DHCP and (eventually) ISC BIND 9. A limited configuration management for Kea
+is available and is expected to grow substantially in the near future.
+
+The project is currently in rapid development, with monthly releases rolling out new features.
+See [wiki pages](https://gitlab.isc.org/isc-projects/stork/-/wikis/home) for useful
+links to download page, release notes, self-guided demo, screenshots and much more.
+
+For details, please see the [Stork Administrator Reference Manual](https://stork.readthedocs.io)
+or the [Stork wiki](https://gitlab.isc.org/isc-projects/stork/-/wikis/home).
+
+# Build Instructions
+
+The easiest way to run Stork is to install it using
+[RPM and deb packages](https://stork.readthedocs.io/en/latest/install.html#installing-from-packages).
+The second easiest way is to use Docker (`rake demo:up` or `./stork-demo.sh`). However, it is
+possible to run Stork without Docker. See the Installation section of the Stork ARM.
+
+# Reporting Issues
+
+Please use the issue tracker on [ISC's GitLab](https://gitlab.isc.org/isc-projects/stork/-/issues)
+to report issues and submit feature requests.
+
+# Getting Involved
+
+We have monthly development releases. If you'd like to get involved, feel free to subscribe to the
+[stork-dev mailing list](https://lists.isc.org/mailman/listinfo/stork-dev) or look
+at the [Stork project page](https://gitlab.isc.org/isc-projects/stork).
+We're also on [GitHub](https://github.com/isc-projects/stork).
+
+If you have a patch to send, by far the best way is to submit a
+[merge request (MR) on GitLab](https://gitlab.isc.org/isc-projects/stork/-/merge_requests).
+Stork developers use this system daily and you may expect a reasonably quick response.
+The second alternative is to submit a [pull request (PR) on GitHub](https://github.com/isc-projects/stork/pulls).
+This will also work, but this system is not monitored, so expect a delayed response.
+
+# Screenshots
+
+An example front page of the dashboard looks like this:
+![Stork dashboard](https://gitlab.isc.org/isc-projects/stork/-/wikis/uploads/22cf367aedaaad3ac8e42d066595dd7b/dashboard-1.1.png)
+
+Many more Stork screenshots are available on the [Screenshots gallery](https://gitlab.isc.org/isc-projects/stork/-/wikis/Screenshots).
+
+# Prometheus and Grafana
+
+Stork provides support for statistics export in Prometheus format, which can then easily be shown in Grafana.
+
+An example of Kea dashboard in Grafana, displaying data exported with Stork:
+![grafana-kea4](https://gitlab.isc.org/isc-projects/stork/-/wikis/uploads/97468f53d07c1b6eda7035c30fbd4de3/grafana-kea4.png)
+
+BIND9 dashboard in Grafana, displaying data exported with Stork:
+![grafana-bind2](https://gitlab.isc.org/isc-projects/stork/-/wikis/uploads/6673c0a19962c535bf7e47d9fd0f46e5/grafana-bind2.png)
+
+
diff --git a/Rakefile b/Rakefile
new file mode 100644
index 0000000..f9213c5
--- /dev/null
+++ b/Rakefile
@@ -0,0 +1 @@
+# See rakelib directory for source code of the tasks. \ No newline at end of file
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 0000000..77677f0
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,9 @@
+Vagrant.configure("2") do |config|
+ config.vm.box = "ubuntu/bionic64"
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = 4096
+ end
+
+ config.vm.provision "shell", inline: "apt-get install -y rake openjdk-11-jre-headless"
+end
diff --git a/api/dhcp-defs.yaml b/api/dhcp-defs.yaml
new file mode 100644
index 0000000..d6c6d9d
--- /dev/null
+++ b/api/dhcp-defs.yaml
@@ -0,0 +1,411 @@
+# A general purpose type used to diferentiate between various
+# IPv4 (or DHCPv4) and IPv6 (or DHCPv6 cases).
+ IPType:
+ type: integer
+ enum: &IPTYPE
+ - 4
+ - 6
+
+# Lease
+
+ Lease:
+ type: object
+ required:
+ - id
+ - appId
+ - appName
+ - cltt
+ - ipAddress
+ - state
+ - subnetId
+ - validLifetime
+ properties:
+ id:
+ type: integer
+ appId:
+ type: integer
+ appName:
+ type: string
+ clientId:
+ type: string
+ cltt:
+ type: integer
+ duid:
+ type: string
+ fqdnFwd:
+ type: boolean
+ fqdnRev:
+ type: boolean
+ hostname:
+ type: string
+ hwAddress:
+ type: string
+ iaid:
+ type: integer
+ ipAddress:
+ type: string
+ leaseType:
+ type: string
+ preferredLifetime:
+ type: integer
+ prefixLength:
+ type: integer
+ state:
+ type: integer
+ subnetId:
+ type: integer
+ userContext:
+ type: string
+ validLifetime:
+ type: integer
+
+ LeasesSearchErredApp:
+ type: object
+ required:
+ - id
+ - name
+ properties:
+ id:
+ type: integer
+ name:
+ type: string
+
+ Leases:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Lease'
+ conflicts:
+ type: array
+ items:
+ type: integer
+ erredApps:
+ type: array
+ items:
+ $ref: '#/definitions/LeasesSearchErredApp'
+ total:
+ type: integer
+
+# Option
+
+ DHCPOptionField:
+ type: object
+ properties:
+ fieldType:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+
+ DHCPOption:
+ type: object
+ properties:
+ code:
+ type: integer
+ alwaysSend:
+ type: boolean
+ encapsulate:
+ type: string
+ fields:
+ type: array
+ items:
+ $ref: '#/definitions/DHCPOptionField'
+ options:
+ type: array
+ items:
+ $ref: '#/definitions/DHCPOption'
+ universe:
+ type: integer
+ enum: *IPTYPE
+
+# Host
+
+ HostIdentifier:
+ type: object
+ properties:
+ idType:
+ type: string
+ idHexValue:
+ type: string
+
+ IPReservation:
+ type: object
+ properties:
+ address:
+ type: string
+
+ LocalHost:
+ type: object
+ properties:
+ appId:
+ type: integer
+ daemonId:
+ type: integer
+ appName:
+ type: string
+ dataSource:
+ type: string
+ options:
+ type: array
+ items:
+ $ref: '#/definitions/DHCPOption'
+ optionsHash:
+ type: string
+
+ Host:
+ type: object
+ properties:
+ id:
+ type: integer
+ subnetId:
+ type: integer
+ subnetPrefix:
+ type: string
+ hostIdentifiers:
+ type: array
+ items:
+ $ref: '#/definitions/HostIdentifier'
+ addressReservations:
+ type: array
+ items:
+ $ref: '#/definitions/IPReservation'
+ prefixReservations:
+ type: array
+ items:
+ $ref: '#/definitions/IPReservation'
+ hostname:
+ type: string
+ localHosts:
+ type: array
+ items:
+ $ref: '#/definitions/LocalHost'
+
+ Hosts:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Host'
+ total:
+ type: integer
+
+ CreateHostBeginResponse:
+ type: object
+ properties:
+ id:
+ type: integer
+ format: int64
+ daemons:
+ type: array
+ items:
+ $ref: '#/definitions/KeaDaemon'
+ subnets:
+ type: array
+ items:
+ $ref: '#/definitions/Subnet'
+
+ UpdateHostBeginResponse:
+ type: object
+ properties:
+ id:
+ type: integer
+ format: int64
+ host:
+ $ref: '#/definitions/Host'
+ daemons:
+ type: array
+ items:
+ $ref: '#/definitions/KeaDaemon'
+ subnets:
+ type: array
+ items:
+ $ref: '#/definitions/Subnet'
+
+# Subnet
+
+ LocalSubnet:
+ type: object
+ properties:
+ id:
+ type: integer
+ appId:
+ type: integer
+ daemonId:
+ type: integer
+ appName:
+ type: string
+ machineAddress:
+ type: string
+ machineHostname:
+ type: string
+ stats:
+ type: object
+ statsCollectedAt:
+ type: string
+ format: date-time
+
+ Subnet:
+ type: object
+ properties:
+ id:
+ type: integer
+ subnet:
+ type: string
+ pools:
+ type: array
+ items:
+ type: string
+ sharedNetwork:
+ type: string
+ clientClass:
+ type: string
+ addrUtilization:
+ type: number
+ stats:
+ type: object
+ statsCollectedAt:
+ type: string
+ format: date-time
+ localSubnets:
+ type: array
+ items:
+ $ref: '#/definitions/LocalSubnet'
+
+ Subnets:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Subnet'
+ total:
+ type: integer
+
+
+# Shared Network
+
+ SharedNetwork:
+ type: object
+ properties:
+ id:
+ type: integer
+ name:
+ type: string
+ subnets:
+ type: array
+ items:
+ $ref: '#/definitions/Subnet'
+ addrUtilization:
+ type: number
+ stats:
+ type: object
+ statsCollectedAt:
+ type: string
+ format: date-time
+
+ SharedNetworks:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/SharedNetwork'
+ total:
+ type: integer
+
+# Overview
+
+ Dhcp4Stats:
+ type: object
+ properties:
+ assignedAddresses:
+ type: string
+ format: bigint
+ totalAddresses:
+ type: string
+ format: bigint
+ declinedAddresses:
+ type: string
+ format: bigint
+
+ Dhcp6Stats:
+ type: object
+ properties:
+ assignedNAs:
+ type: string
+ format: bigint
+ totalNAs:
+ type: string
+ format: bigint
+ assignedPDs:
+ type: string
+ format: bigint
+ totalPDs:
+ type: string
+ format: bigint
+ declinedNAs:
+ type: string
+ format: bigint
+
+ DhcpDaemon:
+ type: object
+ properties:
+ machineId:
+ type: integer
+ machine:
+ type: string
+ appId:
+ type: integer
+ appName:
+ type: string
+ appVersion:
+ type: string
+ name:
+ type: string
+ active:
+ type: boolean
+ monitored:
+ type: boolean
+ rps1:
+ type: integer
+ rps2:
+ type: integer
+ haEnabled:
+ type: boolean
+ haState:
+ type: string
+ haFailureAt:
+ type: string
+ format: date-time
+ uptime:
+ type: integer
+ agentCommErrors:
+ type: integer
+ caCommErrors:
+ type: integer
+ daemonCommErrors:
+ type: integer
+
+ DhcpOverview:
+ type: object
+ properties:
+ subnets4:
+ $ref: '#/definitions/Subnets'
+ subnets6:
+ $ref: '#/definitions/Subnets'
+ sharedNetworks4:
+ $ref: '#/definitions/SharedNetworks'
+ sharedNetworks6:
+ $ref: '#/definitions/SharedNetworks'
+ dhcp4Stats:
+ $ref: '#/definitions/Dhcp4Stats'
+ dhcp6Stats:
+ $ref: '#/definitions/Dhcp6Stats'
+ dhcpDaemons:
+ type: array
+ items:
+ $ref: '#/definitions/DhcpDaemon'
diff --git a/api/dhcp-paths.yaml b/api/dhcp-paths.yaml
new file mode 100644
index 0000000..56b9021
--- /dev/null
+++ b/api/dhcp-paths.yaml
@@ -0,0 +1,365 @@
+ /leases:
+ get:
+ summary: Get leases allocated by DHCP servers.
+ description: >-
+ This call searches for leases allocated by monitored DHCP servers.
+ The text parameter may contain an IP address, delegated prefix,
+ MAC address, client identifier, or hostname. The Stork server
+ tries to identify the specified value type and sends queries to
+ the Kea servers to find a lease or multiple leases.
+ operationId: getLeases
+ tags:
+ - DHCP
+ parameters:
+ - name: text
+ in: query
+ description: >-
+ Should contain an IP address, MAC address, client id or hostname.
+ It is mutually exclusive with the hostId parameter.
+ type: string
+ - name: hostId
+ in: query
+ description: >-
+ Identifier of the host for which leases should be searched. It is
+ mutually exclusive with the text parameter.
+ type: integer
+ responses:
+ 200:
+ description: Success result. It may contain 0, 1 or more leases.
+ schema:
+ $ref: '#/definitions/Leases'
+ default:
+ description: Generic error message.
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /hosts:
+ get:
+ summary: Get list of DHCP host reservations.
+ description: >-
+ A list of hosts is returned in items field accompanied by total count
+ which indicates total available number of records for given filtering
+ parameters.
+ operationId: getHosts
+ tags:
+ - DHCP
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - name: appId
+ in: query
+ description: Limit returned list of hosts to these which are served by given app ID.
+ type: integer
+ - name: subnetId
+ in: query
+ description: Limit returned list of hosts to these which belong to a given subnet.
+ type: integer
+ - name: text
+ in: query
+ description: Limit returned list of hosts to the ones containing the given text.
+ type: string
+ - name: global
+ in: query
+ description: >-
+ If true then return only reservations from global scope, if false then return
+ only reservations from subnets, if null then both types of hosts are return.
+ type: boolean
+ responses:
+ 200:
+ description: List of hosts
+ schema:
+ $ref: "#/definitions/Hosts"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /hosts/{id}:
+ get:
+ summary: Get host reservation by ID.
+ description: Get host reservation by the database specific ID.
+ operationId: getHost
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Host ID.
+ responses:
+ 200:
+ description: Host reservation.
+ schema:
+ $ref: "#/definitions/Host"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ delete:
+ summary: Delete host by ID.
+ description: Delete host reservation from the DHCP servers.
+ operationId: deleteHost
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Host ID.
+ responses:
+ 200:
+ description: Host reservation successfully deleted.
+ default:
+ description: generic error respone
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /hosts/new/transaction:
+ post:
+ summary: Begin transaction for adding new host reservation.
+ description: >-
+ Creates a transaction in config manager to add a new host reservation. It returns
+ current list of the available DHCP servers and subnets. Both are required in
+ the form in which the user specifies the new host reservation.
+ operationId: createHostBegin
+ tags:
+ - DHCP
+ responses:
+ 200:
+ description: New transaction successfully started.
+ schema:
+ $ref: '#/definitions/CreateHostBeginResponse'
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /hosts/new/transaction/{id}:
+ delete:
+ summary: Cancel transaction to add new host reservation.
+ description: Cancels the transaction to add a new host reservation in the config manager.
+ operationId: createHostDelete
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Transaction ID returned when the transaction was created.
+ responses:
+ 200:
+ description: Transaction successfully deleted.
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /hosts/new/transaction/{id}/submit:
+ post:
+ summary: Submit transaction adding new host reservation.
+ description: >-
+ Submits a transaction causing the server to create host reservations on
+ respective DHCP servers. It applies and submits the transactions in Stork
+ config manager.
+ operationId:
+ createHostSubmit
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Transaction ID returned when the transaction was created.
+ - in: body
+ name: host
+ description: Updated host reservation information.
+ schema:
+ $ref: '#/definitions/Host'
+ responses:
+ 200:
+ description: Host reservation successfully submitted.
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /hosts/{hostId}/transaction:
+ post:
+ summary: Begin transaction for updating an existing host reservation.
+ description: >-
+ Creates a transaction in the config manager to update an existing host reseravtion.
+ It returns the existing host reservation information, a current list of available
+ DHCP servers and subnets. This information is required in the form in which the
+ user edits host reservation data.
+ operationId: updateHostBegin
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: hostId
+ type: integer
+ required: true
+ description: Host ID to which the transaction pertains.
+ responses:
+ 200:
+ description: New transaction successfully started.
+ schema:
+ $ref: '#/definitions/UpdateHostBeginResponse'
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /hosts/{hostId}/transaction/{id}:
+ delete:
+ summary: Cancel transaction to update a host reservation.
+ description: Cancels the transaction to update a host reservation in the config manager.
+ operationId: updateHostDelete
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: hostId
+ type: integer
+ required: true
+ description: Host ID to which the transaction pertains.
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Transaction ID returned when the transaction was created.
+ responses:
+ 200:
+ description: Transaction successfully deleted.
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /hosts/{hostId}/transaction/{id}/submit:
+ post:
+ summary: Submit transaction updating a host reservation.
+ description: >-
+ Submits a transaction causing the server to update host reservations on
+ respective DHCP servers. It applies and submits the transactions in Stork
+ config manager.
+ operationId:
+ updateHostSubmit
+ tags:
+ - DHCP
+ parameters:
+ - in: path
+ name: hostId
+ type: integer
+ required: true
+ description: Host ID to which the transaction pertains.
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Transaction ID returned when the transaction was created.
+ - in: body
+ name: host
+ description: Host reservation information.
+ schema:
+ $ref: '#/definitions/Host'
+ responses:
+ 200:
+ description: Host reservation successfully updated.
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /subnets:
+ get:
+ summary: Get list of DHCP subnets.
+ description: >-
+ A list of subnets is returned in items field accompanied by total count
+ which indicates total available number of records for given filtering
+ parameters.
+ operationId: getSubnets
+ tags:
+ - DHCP
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - name: appId
+ in: query
+ description: Limit returned list of subnets to these which are served by given app ID.
+ type: integer
+ - name: dhcpVersion
+ in: query
+ description: Limit returned list of subnets to either DHCPv4 (4) or DHCPv6 (6).
+ type: integer
+ - name: text
+ in: query
+ description: Limit returned list of subnets to the ones containing indicated text.
+ type: string
+ responses:
+ 200:
+ description: List of subnets
+ schema:
+ $ref: "#/definitions/Subnets"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /shared-networks:
+ get:
+ summary: Get list of DHCP shared networks.
+ description: >-
+ A list of shared networks is returned in items field accompanied by total count
+ which indicates total available number of records for given filtering
+ parameters.
+ operationId: getSharedNetworks
+ tags:
+ - DHCP
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - name: appId
+ in: query
+ description: Limit returned list of subnets to these which are served by given app ID.
+ type: integer
+ - name: dhcpVersion
+ in: query
+ description: Limit returned list of subnets to either DHCPv4 (4) or DHCPv6 (6).
+ type: integer
+ - name: text
+ in: query
+ description: Limit returned list of subnets to the ones containing indicated text.
+ type: string
+ responses:
+ 200:
+ description: List of shared networks
+ schema:
+ $ref: "#/definitions/SharedNetworks"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /overview:
+ get:
+ summary: Get overview of whole DHCP state.
+ description: >-
+ A bunch of different information about DHCP like most utilized subnets and shared networks,
+ and state of all Kea daemons.
+ operationId: getDhcpOverview
+ tags:
+ - DHCP
+ responses:
+ 200:
+ description: Overview of DHCP state.
+ schema:
+ $ref: "#/definitions/DhcpOverview"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
diff --git a/api/events-defs.yaml b/api/events-defs.yaml
new file mode 100644
index 0000000..f613cc9
--- /dev/null
+++ b/api/events-defs.yaml
@@ -0,0 +1,24 @@
+ Event:
+ type: object
+ properties:
+ id:
+ type: integer
+ createdAt:
+ type: string
+ format: date-time
+ text:
+ type: string
+ level:
+ type: integer
+ details:
+ type: string
+
+ Events:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Event'
+ total:
+ type: integer
diff --git a/api/events-paths.yaml b/api/events-paths.yaml
new file mode 100644
index 0000000..d182f15
--- /dev/null
+++ b/api/events-paths.yaml
@@ -0,0 +1,41 @@
+ /events:
+ get:
+ summary: Get list of most recent events.
+ description: >-
+ A list of most recent events is returned in items field accompanied by total count
+ which indicates total available number of events.
+ operationId: getEvents
+ tags:
+ - Events
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - name: level
+ in: query
+ description: Show all levels (0), warning and errors (1), errors only (2).
+ type: integer
+ - name: machine
+ in: query
+ description: Machine ID.
+ type: integer
+ - name: appType
+ in: query
+ description: App type, e.g. 'kea' or 'bind9'.
+ type: string
+ - name: daemonType
+ in: query
+ description: Daemon types, e.g. 'named', 'dhcp4', 'dhcp6', 'ca'.
+ type: string
+ - name: user
+ in: query
+ description: User ID.
+ type: integer
+ responses:
+ 200:
+ description: List of events.
+ schema:
+ $ref: "#/definitions/Events"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
diff --git a/api/search-defs.yaml b/api/search-defs.yaml
new file mode 100644
index 0000000..b20eece
--- /dev/null
+++ b/api/search-defs.yaml
@@ -0,0 +1,17 @@
+ SearchResult:
+ type: object
+ properties:
+ machines:
+ $ref: '#/definitions/Machines'
+ apps:
+ $ref: '#/definitions/Apps'
+ subnets:
+ $ref: '#/definitions/Subnets'
+ sharedNetworks:
+ $ref: '#/definitions/SharedNetworks'
+ hosts:
+ $ref: '#/definitions/Hosts'
+ users:
+ $ref: '#/definitions/Users'
+ groups:
+ $ref: '#/definitions/Groups'
diff --git a/api/search-paths.yaml b/api/search-paths.yaml
new file mode 100644
index 0000000..fe17f1b
--- /dev/null
+++ b/api/search-paths.yaml
@@ -0,0 +1,25 @@
+ /records:
+ get:
+ summary: Search for records of different types.
+ description: >-
+ A set of lists of records is returned. Each list is made of
+ items field accompanied by total count. Currently the
+ following lists are returned: subnets, shared networks, hosts,
+ machines, applications, users and groups.
+ operationId: searchRecords
+ tags:
+ - Search
+ parameters:
+ - name: text
+ in: query
+ description: Search for records containing the given text.
+ type: string
+ responses:
+ 200:
+ description: Search result. It includes several lists, one per record type.
+ schema:
+ $ref: "#/definitions/SearchResult"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
diff --git a/api/services-defs.yaml b/api/services-defs.yaml
new file mode 100644
index 0000000..bec726a
--- /dev/null
+++ b/api/services-defs.yaml
@@ -0,0 +1,540 @@
+ File:
+ type: object
+ properties:
+ filetype:
+ type: string
+ filename:
+ type: string
+
+ LogTarget:
+ type: object
+ properties:
+ id:
+ type: integer
+ readOnly: true
+ name:
+ type: string
+ severity:
+ type: string
+ output:
+ type: string
+
+ LogTail:
+ type: object
+ required:
+ - logTargetOutput
+ - appId
+ - appType
+ - appName
+ - contents
+ properties:
+ logTargetOutput:
+ readOnly: true
+ type: string
+ machine:
+ $ref: '#/definitions/AppMachine'
+ appId:
+ readOnly: true
+ type: integer
+ appType:
+ readOnly: true
+ type: string
+ appName:
+ readOnly: true
+ type: string
+ contents:
+ readOnly: true
+ type: array
+ items:
+ type: string
+ error:
+ readOnly: true
+ type: string
+
+ NewMachineReq:
+ type: object
+ required:
+ - address
+ - agentCSR
+ - agentToken
+ properties:
+ address:
+ type: string
+ agentPort:
+ type: integer
+ agentCSR:
+ type: string
+ description: Agent Certificate Signing Request.
+ serverToken:
+ type: string
+ description: >-
+ A token that is issued by the Stork server. It can be taken
+ from Machines page. If it is provided then an agent will
+ be immediately authorized in the server and will be operational.
+ It may be empty.
+ agentToken:
+ type: string
+ description: >-
+ A token that is generated by an agent. An agent traces it in
+ the logs during startup and stores it in
+ /var/lib/stork-agent/tokens/agent-token.txt. A machine added
+ this way to the Stork server requires separate authorization
+ that can be made in the Stork server UI or using server API.
+ It cannot be empty.
+
+ NewMachineResp:
+ type: object
+ properties:
+ id:
+ type: integer
+ description: The machine ID.
+ serverCACert:
+ type: string
+ readOnly: true
+ description: Server's CA certificate.
+ agentCert:
+ type: string
+ readOnly: true
+ description: Signed agent's certificate.
+
+ Machine:
+ type: object
+ required:
+ - address
+ properties:
+ id:
+ type: integer
+ readOnly: true
+ address:
+ type: string
+ agentPort:
+ type: integer
+ authorized:
+ type: boolean
+ agentToken:
+ type: string
+ agentVersion:
+ type: string
+ readOnly: true
+ hostname:
+ type: string
+ readOnly: true
+ cpus:
+ type: integer
+ readOnly: true
+ cpusLoad:
+ type: string
+ readOnly: true
+ memory:
+ type: integer
+ readOnly: true
+ usedMemory:
+ type: integer
+ readOnly: true
+ uptime:
+ type: integer
+ readOnly: true
+ os:
+ type: string
+ readOnly: true
+ platform:
+ type: string
+ readOnly: true
+ platformFamily:
+ type: string
+ readOnly: true
+ platformVersion:
+ type: string
+ readOnly: true
+ kernelVersion:
+ type: string
+ readOnly: true
+ kernelArch:
+ type: string
+ readOnly: true
+ virtualizationSystem:
+ type: string
+ readOnly: true
+ virtualizationRole:
+ type: string
+ readOnly: true
+ hostID:
+ type: string
+ readOnly: true
+ lastVisitedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ error:
+ type: string
+ readOnly: true
+ apps:
+ type: array
+ items:
+ $ref: '#/definitions/App'
+
+ Machines:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Machine'
+ total:
+ type: integer
+
+ AppAccessPoint:
+ type: object
+ properties:
+ type:
+ type: string
+ address:
+ type: string
+ port:
+ type: integer
+ key:
+ type: string
+ useSecureProtocol:
+ type: boolean
+
+ App:
+ type: object
+ properties:
+ id:
+ type: integer
+ readOnly: true
+ name:
+ type: string
+ type:
+ type: string
+ accessPoints:
+ type: array
+ items:
+ $ref: '#/definitions/AppAccessPoint'
+ version:
+ type: string
+ machine:
+ $ref: '#/definitions/AppMachine'
+ details:
+ allOf:
+ - $ref: '#/definitions/AppKea'
+ - $ref: '#/definitions/AppBind9'
+
+ KeaDaemonDatabase:
+ type: object
+ properties:
+ backendType:
+ type: string
+ database:
+ type: string
+ host:
+ type: string
+ dataTypes:
+ type: array
+ items:
+ type: string
+
+ KeaDaemon:
+ type: object
+ properties:
+ id:
+ type: integer
+ pid:
+ type: integer
+ name:
+ type: string
+ active:
+ type: boolean
+ monitored:
+ type: boolean
+ version:
+ type: string
+ extendedVersion:
+ type: string
+ uptime:
+ type: integer
+ reloadedAt:
+ type: string
+ format: date-time
+ hooks:
+ type: array
+ items:
+ type: string
+ files:
+ type: array
+ items:
+ $ref: '#/definitions/File'
+ backends:
+ type: array
+ items:
+ $ref: '#/definitions/KeaDaemonDatabase'
+ agentCommErrors:
+ type: integer
+ caCommErrors:
+ type: integer
+ daemonCommErrors:
+ type: integer
+ logTargets:
+ type: array
+ items:
+ $ref: '#/definitions/LogTarget'
+ app:
+ $ref: '#/definitions/App'
+
+ KeaDaemonConfig:
+ type: object
+ additionalProperties: true
+
+ AppKea:
+ type: object
+ properties:
+ extendedVersion:
+ type: string
+ daemons:
+ type: array
+ items:
+ $ref: '#/definitions/KeaDaemon'
+
+ Bind9Daemon:
+ type: object
+ properties:
+ id:
+ type: integer
+ pid:
+ type: integer
+ name:
+ type: string
+ active:
+ type: boolean
+ monitored:
+ type: boolean
+ version:
+ type: string
+ uptime:
+ type: integer
+ reloadedAt:
+ type: string
+ format: date-time
+ zoneCount:
+ type: integer
+ autoZoneCount:
+ type: integer
+ queryHitRatio:
+ type: number
+ queryHits:
+ type: integer
+ x-omitempty: false
+ queryMisses:
+ type: integer
+ x-omitempty: false
+ agentCommErrors:
+ type: integer
+ rndcCommErrors:
+ type: integer
+ statsCommErrors:
+ type: integer
+
+ AppBind9:
+ type: object
+ properties:
+ daemon:
+ $ref: '#/definitions/Bind9Daemon'
+
+ AppMachine:
+ type: object
+ properties:
+ id:
+ type: integer
+ readOnly: true
+ address:
+ type: string
+ hostname:
+ type: string
+
+ Apps:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/App'
+ total:
+ type: integer
+
+ AppsStats:
+ type: object
+ properties:
+ keaAppsTotal:
+ type: integer
+ keaAppsNotOk:
+ type: integer
+ bind9AppsTotal:
+ type: integer
+ bind9AppsNotOk:
+ type: integer
+
+ KeaHAServerStatus:
+ type: object
+ properties:
+ age:
+ type: integer
+ appId:
+ type: integer
+ controlAddress:
+ type: string
+ failoverTime:
+ type: string
+ format: date-time
+ id:
+ type: integer
+ inTouch:
+ type: boolean
+ role:
+ type: string
+ scopes:
+ type: array
+ items:
+ type: string
+ state:
+ type: string
+ statusTime:
+ type: string
+ format: date-time
+ commInterrupted:
+ type: integer
+ connectingClients:
+ type: integer
+ unackedClients:
+ type: integer
+ unackedClientsLeft:
+ type: integer
+ analyzedPackets:
+ type: integer
+
+ KeaStatus:
+ type: object
+ properties:
+ daemon:
+ type: string
+ haServers:
+ type: object
+ properties:
+ primaryServer:
+ $ref: '#/definitions/KeaHAServerStatus'
+ secondaryServer:
+ $ref: '#/definitions/KeaHAServerStatus'
+
+ ServiceStatus:
+ type: object
+ properties:
+ status:
+ allOf:
+ - $ref: '#/definitions/KeaStatus'
+
+ ServicesStatus:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/ServiceStatus'
+
+ ConfigReview:
+ type: object
+ properties:
+ id:
+ type: integer
+ daemonId:
+ type: integer
+ createdAt:
+ type: string
+ format: date-time
+
+ ConfigReport:
+ type: object
+ properties:
+ id:
+ type: integer
+ readOnly: true
+ createdAt:
+ type: string
+ format: date-time
+ checker:
+ type: string
+ content:
+ type: string
+
+ ConfigReports:
+ type: object
+ properties:
+ review:
+ $ref: '#/definitions/ConfigReview'
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/ConfigReport'
+ total:
+ type: integer
+
+ ConfigCheckerState:
+ type: string
+ enum: &CONFIGCHECKERSTATE
+ - "enabled"
+ - "disabled"
+ - "inherit"
+
+ ConfigChecker:
+ type: object
+ required:
+ - name
+ - triggers
+ - selectors
+ - state
+ - globallyEnabled
+ properties:
+ name:
+ type: string
+ readOnly: true
+ triggers:
+ type: array
+ readOnly: true
+ items:
+ type: string
+ selectors:
+ type: array
+ readOnly: true
+ items:
+ type: string
+ state:
+ enum: *CONFIGCHECKERSTATE
+ globallyEnabled:
+ type: boolean
+ readOnly: true
+
+ ConfigCheckers:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/ConfigChecker'
+ total:
+ type: integer
+
+ ConfigCheckerPreference:
+ type: object
+ properties:
+ name:
+ type: string
+ state:
+ enum: *CONFIGCHECKERSTATE
+
+ ConfigCheckerPreferences:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/ConfigCheckerPreference'
+ total:
+ type: integer \ No newline at end of file
diff --git a/api/services-paths.yaml b/api/services-paths.yaml
new file mode 100644
index 0000000..82f9c9d
--- /dev/null
+++ b/api/services-paths.yaml
@@ -0,0 +1,682 @@
+ /machines:
+ get:
+ summary: Get list of machines.
+ description: >-
+ It is possible to filter list of machines by several fields. It is also always paged.
+ Default page size is 10.
+ A list of machines is returned in items field accompanied by total count
+ which indicates total available number of records for given filtering
+ parameters.
+ operationId: getMachines
+ tags:
+ - Services
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - $ref: '#/parameters/filterTextParam'
+ - name: app
+ in: query
+ description: Limit returned list of machines to these which provide given app, possible values 'bind' or 'kea'.
+ type: string
+ - name: authorized
+ in: query
+ description: Indicate if authorized or unauthorized machines should be returned.
+ type: boolean
+ responses:
+ 200:
+ description: List of machines
+ schema:
+ $ref: "#/definitions/Machines"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ post:
+ summary: Add new machine.
+ description: >-
+ Register a new machine on the server. It requires two parameters: address and agentCSR.
+ It also requires agentToken and optionally serverToken depending on the registration method selected.
+ operationId: createMachine
+ # security disabled because anyone can add machine but it still requires
+ # either server token or manual authorization in web ui
+ security: []
+ tags:
+ - Services
+ parameters:
+ - name: machine
+ in: body
+ description: New machine basic information including CSR.
+ schema:
+ $ref: '#/definitions/NewMachineReq'
+ responses:
+ 200:
+ description: Registration information
+ schema:
+ $ref: '#/definitions/NewMachineResp'
+ 409:
+ description: Already registered
+ headers:
+ Location:
+ type: string
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /machines/directory:
+ get:
+ summary: Get a list of all machines' ids, addresses and/or names.
+ description: >-
+ Returns a list of all machines' ids, addresses and/or names. It excludes detailed
+ information about the machines to return reasonably compact information. Having
+ the machines' names and/or addresses is useful in the forms which require
+ validating user's input against the list of machines present in the system. It
+ may be also useful to create drop down lists holding the machines' addresses
+ or names. Each returned machine address or name is accompanied by its id.
+ operationId: getMachinesDirectory
+ tags:
+ - Services
+ responses:
+ 200:
+ description: List of machine ids and names/addresses.
+ schema:
+ $ref: '#/definitions/Machines'
+ default:
+ description: 'generic error response'
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /machines/{id}:
+ get:
+ summary: Get machine by ID.
+ description: Get machine by the database specific ID.
+ operationId: getMachine
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Machine ID.
+ responses:
+ 200:
+ description: A machine
+ schema:
+ $ref: "#/definitions/Machine"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ put:
+ summary: Update machine information.
+ description: Update machine information, e.g. IP address.
+ operationId: updateMachine
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Machine ID.
+ - name: machine
+ in: body
+ description: Machine details
+ schema:
+ $ref: '#/definitions/Machine'
+ responses:
+ 200:
+ description: Machine information.
+ schema:
+ $ref: "#/definitions/Machine"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ delete:
+ summary: Delete machine by Id
+ operationId: deleteMachine
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Machine ID.
+ responses:
+ 200:
+ description: Delete successful
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /machines/{id}/ping:
+ post:
+ summary: Check connectivity with machine.
+ operationId: pingMachine
+ security: []
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Machine ID.
+ - in: body
+ name: ping
+ description: >-
+ Body should contain proper server or agent token. If none
+ of them match the values stored by the server, the ping is
+ rejected.
+ schema:
+ type: object
+ properties:
+ serverToken:
+ type: string
+ description: Server access token.
+ agentToken:
+ type: string
+ description: Agent token.
+ responses:
+ 200:
+ description: The response is empty.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /machines/{id}/state:
+ get:
+ summary: Get machine's runtime state.
+ operationId: getMachineState
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Machine ID.
+ responses:
+ 200:
+ description: Machine
+ schema:
+ $ref: "#/definitions/Machine"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /machines/{id}/dump:
+ get:
+ summary: Dump machine configuration for troubleshooting purposes.
+ description: >-
+ The archive with the configuration dump can be used to diagnose service
+ problems offline. It's intended to easily download all necessary
+ information pertaining to a monitored machine from Stork and forward
+ it to the ISC Support Team.
+ operationId: getMachineDump
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Machine ID.
+ tags:
+ - Services
+ produces:
+ - application/octet-stream
+ responses:
+ 200:
+ description: The archive with the configuration dump.
+ headers:
+ Content-Disposition:
+ type: string
+ description: "The attachment filename"
+ Content-Type:
+ type: string
+ description: The content type"
+ schema:
+ type: string
+ format: binary
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /machines-server-token:
+ get:
+ summary: Get server token for registering machines.
+ description: >-
+ The server token is used in server token machine registration.
+ operationId: getMachinesServerToken
+ tags:
+ - Services
+ responses:
+ 200:
+ description: Current server token.
+ schema:
+ type: object
+ properties:
+ token:
+ type: string
+ description: Current server token.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ put:
+ summary: Regenerate server token.
+ description: >-
+ When there is probability that current server token
+ leaked then it should be regenerated.
+ operationId: regenerateMachinesServerToken
+ tags:
+ - Services
+ responses:
+ 200:
+ description: Regenerated server token.
+ schema:
+ type: object
+ properties:
+ token:
+ type: string
+ description: Regenerated server token.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /apps:
+ get:
+ summary: Get list of apps.
+ description: >-
+ It is possible to filter the list of apps by several fields. It is also always paged.
+ Default page size is 10.
+ A list of apps is returned in items field accompanied by total count
+ which indicates total available number of records for given filtering
+ parameters.
+ operationId: getApps
+ tags:
+ - Services
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - $ref: '#/parameters/filterTextParam'
+ - name: app
+ in: query
+ description: Limit returned list of apps, possible values 'bind' or 'kea'.
+ type: string
+ responses:
+ 200:
+ description: List of apps
+ schema:
+ $ref: "#/definitions/Apps"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /apps/directory:
+ get:
+ summary: Get a list of all apps' ids and names.
+ description: >-
+ Returns a list of all apps' ids and names. It excludes detailed information
+ about the apps to return reasonably compact information. Having the apps'
+ names is useful in the forms which require validating user's input against
+ the list of apps present in the system. It may be also useful to create
+ drop down lists holding the apps' names. Each returned name is accompanied
+ by its id.
+ operationId: getAppsDirectory
+ tags:
+ - Services
+ responses:
+ 200:
+ description: List of app ids and names.
+ schema:
+ $ref: '#/definitions/Apps'
+ default:
+ description: 'generic error response'
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /apps/{id}:
+ get:
+ summary: Get app by ID.
+ description: Get app by the database specific ID.
+ operationId: getApp
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: App ID.
+ responses:
+ 200:
+ description: A app
+ schema:
+ $ref: "#/definitions/App"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /apps-stats:
+ get:
+ summary: Get applications statistics.
+ description: It returns a number of apps of a given type, a number of apps with some inactive daemons, etc.
+ operationId: getAppsStats
+ tags:
+ - Services
+ responses:
+ 200:
+ description: Application statistics
+ schema:
+ $ref: "#/definitions/AppsStats"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /apps/{id}/services/status:
+ get:
+ summary: Get services status for a given application.
+ description: >-
+ Services status comprises runtime information about the services.
+ In particular, a Kea application status comprises information about
+ the High Availability service, such as HA state, partner's state etc.
+ operationId: getAppServicesStatus
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: App ID.
+ responses:
+ 200:
+ description: Services with their status.
+ schema:
+ $ref: '#/definitions/ServicesStatus'
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /apps/{id}/name:
+ put:
+ summary: Rename the specified app.
+ description: >-
+ Sets new name for the app identified by specified identifier.
+ This operation may fail if the new name is invalid or if the
+ given app does not exist.
+ operationId: renameApp
+ tags:
+ - Services
+ parameters:
+ - name: id
+ in: path
+ type: integer
+ required: true
+ description: App ID.
+ - name: newAppName
+ in: body
+ required: true
+ description: New app name.
+ schema:
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ type: string
+ responses:
+ 200:
+ description: App successfully renamed.
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /logs/{id}:
+ get:
+ summary: Gets the tail of the given log file.
+ description: >-
+ Returns the tail of the specified log file. It is possible to specify the offset
+ from which the log should be returned.
+ operationId: getLogTail
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Log file identifier in the database.
+ - in: query
+ name: maxLength
+ type: integer
+ required: false
+ description: Maximum length of the data fetched.
+ responses:
+ 200:
+ description: Tail of the log file returned successfully.
+ schema:
+ $ref: '#/definitions/LogTail'
+ default:
+ description: generic error response
+ schema:
+ $ref: '#/definitions/ApiError'
+
+ /daemons/{id}:
+ put:
+ summary: Update daemon.
+ description: Update daemon.
+ operationId: updateDaemon
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Daemon ID.
+ - in: body
+ name: daemon
+ description: Daemon details
+ schema:
+ type: object
+ properties:
+ monitored:
+ type: boolean
+ responses:
+ 200:
+ description: Daemon information.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /daemons/{id}/config:
+ get:
+ summary: Get daemon configuration
+ description: Get internal daemon configuration. Only Kea daemon supported.
+ operationId: getDaemonConfig
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Daemon ID
+ responses:
+ 200:
+ description: Daemon configuration information.
+ schema:
+ $ref: "#/definitions/KeaDaemonConfig"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /daemons/{id}/config-reports:
+ get:
+ summary: Get configuration review reports
+ description: >-
+ Configuration review reports describe issues or suggestions for changes
+ in the daemon configurations. This call retrieves the reports by daemon
+ ID.
+ operationId: getDaemonConfigReports
+ tags:
+ - Services
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - name: id
+ in: path
+ type: integer
+ required: true
+ description: Daemon ID
+ responses:
+ 200:
+ description: Daemon configuration review reports list.
+ schema:
+ $ref: "#/definitions/ConfigReports"
+ 202:
+ description: Config review for the daemon is in progress.
+ 204:
+ description: No config reports currently available for the daemon.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /daemons/{id}/config-review:
+ put:
+ summary: Attempt to begin a new configuration review.
+ description: >-
+ Configuration review is a background server task. This method schedules
+ a new review for selected daemon. If a review for this daemon is already
+ in progress the new review is not started.
+ operationId: putDaemonConfigReview
+ tags:
+ - Services
+ parameters:
+ - name: id
+ in: path
+ type: integer
+ required: true
+ description: Daemon ID
+ responses:
+ 202:
+ description: >-
+ New configuration review has been started but the review results
+ are not available yet. Poll config reports for the daemon to check
+ their availability.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /daemons/global/config-checkers:
+ get:
+ summary: Get global config checker preferences.
+ description: >-
+ Configuration review executes available checkers to find configuration
+ issues. The checkers may be disabled or enabled globally or per daemon.
+ This endpoint returns the global checkers' configuration metadata and
+ preferences.
+ operationId: getGlobalConfigCheckers
+ tags:
+ - Services
+ responses:
+ 200:
+ description: Global configuration checker list.
+ schema:
+ $ref: "#/definitions/ConfigCheckers"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ put:
+ summary: Update global config checker preferences.
+ description: Updates global config checker preferences.
+ operationId: putGlobalConfigCheckerPreferences
+ tags:
+ - Services
+ parameters:
+ - in: body
+ name: changes
+ description: Global checker preferences to change.
+ schema:
+ $ref: '#/definitions/ConfigCheckerPreferences'
+ responses:
+ 200:
+ description: List of global config checker metadata and preferences.
+ schema:
+ $ref: "#/definitions/ConfigCheckers"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /daemons/{id}/config-checkers:
+ get:
+ summary: Get config checkers for a given daemon.
+ description: >-
+ Configuration review executes multiple configuration checkers to found
+ various issues. The checkers may be disabled or enabled globally or
+ per daemon. This endpoint returns the checkers metadata and preferences
+ for a specific daemon.
+ operationId: getDaemonConfigCheckers
+ tags:
+ - Services
+ parameters:
+ - name: id
+ in: path
+ type: integer
+ required: true
+ description: Daemon ID
+ responses:
+ 200:
+ description: Daemon configuration checker list.
+ schema:
+ $ref: "#/definitions/ConfigCheckers"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ put:
+ summary: Update config checker preferences for a given daemon.
+ description: Updates config checker preferences for a given daemon.
+ operationId: putDaemonConfigCheckerPreferences
+ tags:
+ - Services
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: Daemon ID.
+ - in: body
+ name: changes
+ description: Configuration checker preferences to change.
+ schema:
+ $ref: '#/definitions/ConfigCheckerPreferences'
+ responses:
+ 200:
+ description: List of config checker preferences for a given daemon.
+ schema:
+ $ref: "#/definitions/ConfigCheckers"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError" \ No newline at end of file
diff --git a/api/settings-defs.yaml b/api/settings-defs.yaml
new file mode 100644
index 0000000..5ba962f
--- /dev/null
+++ b/api/settings-defs.yaml
@@ -0,0 +1,50 @@
+ Settings:
+ type: object
+ properties:
+ bind9_stats_puller_interval:
+ type: integer
+ grafana_url:
+ type: string
+ kea_hosts_puller_interval:
+ type: integer
+ kea_stats_puller_interval:
+ type: integer
+ kea_status_puller_interval:
+ type: integer
+ apps_state_puller_interval:
+ type: integer
+ prometheus_url:
+ type: string
+ metrics_collector_interval:
+ type: integer
+
+ Puller:
+ type: object
+ properties:
+ name:
+ type: string
+ readOnly: true
+ id:
+ type: string
+ readOnly: true
+ interval:
+ type: integer
+ readOnly: true
+ lastInvokedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ lastFinishedAt:
+ type: string
+ format: date-time
+ readOnly: true
+
+ Pullers:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Puller'
+ total:
+ type: integer
diff --git a/api/settings-paths.yaml b/api/settings-paths.yaml
new file mode 100644
index 0000000..366bc7d
--- /dev/null
+++ b/api/settings-paths.yaml
@@ -0,0 +1,78 @@
+ /settings:
+ get:
+ summary: Get a set of global settings.
+ description: >-
+ A set of global Stork settings.
+ operationId: getSettings
+ tags:
+ - Settings
+ responses:
+ 200:
+ description: A set of settings
+ schema:
+ $ref: "#/definitions/Settings"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ put:
+ summary: Update settings.
+ description: Update global settings.
+ operationId: updateSettings
+ tags:
+ - Settings
+ parameters:
+ - name: settings
+ in: body
+ description: Settings set
+ schema:
+ $ref: '#/definitions/Settings'
+ responses:
+ 200:
+ description: Settings set
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /pullers:
+ get:
+ summary: Get the puller statuses
+ description: >-
+ Returns a list of puller statuses
+ operationId: getPullers
+ tags:
+ - Settings
+ responses:
+ 200:
+ description: A set of pullers
+ schema:
+ $ref: "#/definitions/Pullers"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /pullers/{id}:
+ get:
+ summary: Get the puller status
+ description: >-
+ Returns a puller status with a given ID
+ operationId: getPuller
+ tags:
+ - Settings
+ parameters:
+ - in: path
+ name: id
+ type: string
+ required: true
+ description: Puller ID.
+ responses:
+ 200:
+ description: A puller
+ schema:
+ $ref: "#/definitions/Puller"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError" \ No newline at end of file
diff --git a/api/swagger.in.yaml b/api/swagger.in.yaml
new file mode 100644
index 0000000..d839a24
--- /dev/null
+++ b/api/swagger.in.yaml
@@ -0,0 +1,100 @@
+---
+swagger: "2.0"
+info:
+ description: An API for Stork
+ title: Stork API
+ version: 1.7.0
+consumes:
+ - application/json
+produces:
+ - application/json
+schemes:
+- http
+basePath: /api
+
+securityDefinitions:
+ Token:
+ type: apiKey
+ in: header
+ name: Cookie
+
+security:
+ - Token: []
+
+paths:
+ /version:
+ get:
+ summary: Get version.
+ operationId: getVersion
+ security: []
+ tags:
+ - General
+ responses:
+ 200:
+ description: Version of Stork
+ schema:
+ $ref: "#/definitions/Version"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ $include: users-paths.yaml
+ $include: services-paths.yaml
+ $include: dhcp-paths.yaml
+ $include: settings-paths.yaml
+ $include: search-paths.yaml
+ $include: events-paths.yaml
+
+
+parameters:
+ paginationStartParam:
+ name: start
+ in: query
+ description: First record to retrieve.
+ type: integer
+
+ paginationLimitParam:
+ name: limit
+ in: query
+ description: Number of records to retrieve.
+ type: integer
+
+ filterTextParam:
+ name: text
+ in: query
+ description: >-
+ Filtering text, e.g. hostname for the machines
+ or version for the apps.
+ type: string
+
+
+definitions:
+ Version:
+ type: object
+ required:
+ - version
+ - date
+ properties:
+ version:
+ type: string
+ date:
+ type: string
+
+ ApiError:
+ type: object
+ required:
+ - message
+ properties:
+ message:
+ type: string
+
+ principal:
+ type: string
+
+ $include: users-defs.yaml
+ $include: services-defs.yaml
+ $include: dhcp-defs.yaml
+ $include: settings-defs.yaml
+ $include: search-defs.yaml
+ $include: events-defs.yaml
diff --git a/api/users-defs.yaml b/api/users-defs.yaml
new file mode 100644
index 0000000..61d2782
--- /dev/null
+++ b/api/users-defs.yaml
@@ -0,0 +1,82 @@
+ User:
+ type: object
+ required:
+ - id
+ - login
+ - email
+ - name
+ - lastname
+ properties:
+ id:
+ type: integer
+ login:
+ type: string
+ email:
+ type: string
+ name:
+ type: string
+ lastname:
+ type: string
+ groups:
+ type: array
+ items:
+ type: integer
+
+ Password:
+ type: string
+
+ PasswordChange:
+ type: object
+ required:
+ - oldpassword
+ - newpassword
+ properties:
+ oldpassword:
+ $ref: "#/definitions/Password"
+ newpassword:
+ $ref: "#/definitions/Password"
+
+ UserAccount:
+ type: object
+ required:
+ - user
+ - password
+ properties:
+ user:
+ $ref: "#/definitions/User"
+ password:
+ $ref: "#/definitions/Password"
+
+ Users:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/User'
+ total:
+ type: integer
+
+ Group:
+ type: object
+ required:
+ - id
+ - name
+ - description
+ properties:
+ id:
+ type: integer
+ name:
+ type: string
+ description:
+ type: string
+
+ Groups:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: '#/definitions/Group'
+ total:
+ type: integer
diff --git a/api/users-paths.yaml b/api/users-paths.yaml
new file mode 100644
index 0000000..a1083b4
--- /dev/null
+++ b/api/users-paths.yaml
@@ -0,0 +1,185 @@
+ /sessions:
+ post:
+ summary: Logs in a user to the system
+ operationId: createSession
+ security: []
+ tags:
+ - Users
+ parameters:
+ - in: body
+ name: credentials
+ description: Credentials provided by the user upon logging in to the system
+ required: true
+ schema:
+ type: object
+ required:
+ - useremail
+ - userpassword
+ properties:
+ useremail:
+ description: E-mail of the user logging to the system
+ type: string
+ userpassword:
+ description: User password to log in to the system
+ type: string
+ responses:
+ 200:
+ description: Login successful
+ schema:
+ $ref: "#/definitions/User"
+ 400:
+ description: Invalid user email or password supplied
+ schema:
+ $ref: "#/definitions/ApiError"
+ delete:
+ summary: Logs out a user from the system
+ operationId: deleteSession
+ tags:
+ - Users
+ responses:
+ 200:
+ description: Logout successful
+ 400:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /users:
+ get:
+ summary: Get the list of users.
+ description: >-
+ Returns all users having an account in the system.
+ operationId: getUsers
+ tags:
+ - Users
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - $ref: '#/parameters/filterTextParam'
+ responses:
+ 200:
+ description: List of users returned.
+ schema:
+ $ref: "#/definitions/Users"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+ post:
+ summary: Creates new user account.
+ description: >-
+ Creates new user account in the system.
+ operationId: createUser
+ tags:
+ - Users
+ parameters:
+ - name: account
+ in: body
+ description: New user account including user information and password
+ schema:
+ $ref: "#/definitions/UserAccount"
+
+ responses:
+ 200:
+ description: User account successfully created.
+ schema:
+ $ref: "#/definitions/User"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ put:
+ summary: Updates existing user account.
+ description: >-
+ Updates existing user account in the system.
+ operationId: updateUser
+ tags:
+ - Users
+ parameters:
+ - name: account
+ in: body
+ description: Updated user account information and password
+ schema:
+ $ref: "#/definitions/UserAccount"
+
+ responses:
+ 200:
+ description: User account successfully updated.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /users/{id}:
+ get:
+ summary: Get the specific user.
+ description: Returns user by id.
+ operationId: getUser
+ tags:
+ - Users
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: User identifier in the database.
+ responses:
+ 200:
+ description: User information returned.
+ schema:
+ $ref: "#/definitions/User"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /users/{id}/password:
+ put:
+ summary: Updates user password.
+ description: Updates user password.
+ operationId: updateUserPassword
+ tags:
+ - Users
+ parameters:
+ - in: path
+ name: id
+ type: integer
+ required: true
+ description: User identifier in the database.
+ - in: body
+ name: passwords
+ description: Old and new password
+ schema:
+ $ref: "#/definitions/PasswordChange"
+
+ responses:
+ 200:
+ description: Password updated successfully.
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
+
+ /groups:
+ get:
+ summary: Get the list of groups.
+ description: >-
+ Returns all groups defined for the system.
+ operationId: getGroups
+ security: []
+ tags:
+ - Users
+ parameters:
+ - $ref: '#/parameters/paginationStartParam'
+ - $ref: '#/parameters/paginationLimitParam'
+ - $ref: '#/parameters/filterTextParam'
+ responses:
+ 200:
+ description: List of groups returned.
+ schema:
+ $ref: "#/definitions/Groups"
+ default:
+ description: generic error response
+ schema:
+ $ref: "#/definitions/ApiError"
diff --git a/backend/.golangci.yml b/backend/.golangci.yml
new file mode 100644
index 0000000..0f394c3
--- /dev/null
+++ b/backend/.golangci.yml
@@ -0,0 +1,118 @@
+linters:
+ enable:
+ - deadcode
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+ - asciicheck
+ - bodyclose
+ - depguard
+ - dogsled
+ - errorlint
+ - exhaustive
+ - exportloopref
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - goconst
+ - gocritic
+ - gocyclo
+ - godot
+ - goerr113
+ - gofmt
+ - gofumpt
+ - goheader
+ - goimports
+ - revive
+ - gomodguard
+ - goprintffuncname
+ - gosec
+ - misspell
+ - nakedret
+ - nestif
+ - noctx
+ - nolintlint
+ - rowserrcheck
+ - exportloopref
+ - sqlclosecheck
+ - stylecheck
+ - tparallel
+ - unconvert
+ - unparam
+ - whitespace
+ disable:
+ - godox
+ - maligned
+ - lll
+ - funlen
+ - prealloc
+ - wsl # TODO: reenable later
+ - interfacer # A tool that suggests interfaces is prone to bad suggestions, so its usefulness in real code is limited.
+ - dupl # there are some false positives (identical code but operating on different types so no possibility to unify)
+ - testpackage # Requires rewriting a lot of unit tests and blocks the ability to test unexported functions.
+ - wrapcheck # We currently use pkg/errors tool that deals with the issue on its own.
+ - exhaustivestruct # Most of the time you don't want to initialize all fields.
+ - gomnd # This may be useful but in many cases creating artificial constants, e.g. in tests is counter productive.
+ - paralleltest # This enforces t.Parallel() usage but some of our tests seem to work incorrectly. Need to fix it first.
+ - nlreturn # This seems to be overkill and sometimes new lines before continue or break is useless.
+ - gci # More less the same as goimports but it breaks imports grouping so it is not nice.
+
+linters-settings:
+ gocognit:
+ min-complexity: 65
+ nestif:
+ min-complexity: 13
+ exhaustive:
+ # Treats switch statements with default case as taking into account all possible cases.
+ # Without this setting the switch statements must enumerate all possible cases.
+ default-signifies-exhaustive: true
+
+issues:
+ max-same-issues: 3
+
+ exclude-rules:
+ # Exclude some linters from running on tests files.
+ - path: _test\.go
+ linters:
+ - gocyclo
+ - errcheck
+ - gosec
+ - goconst
+
+ - linters:
+ - gosec
+ text: "G107"
+
+ - linters:
+ - gosec
+ text: "G202"
+
+ - path: api/gomock
+ linters:
+ - gochecknoglobals
+ - wsl
+ - gofmt
+ - goimports
+
+ - path: server/database/migrations/
+ linters:
+ - gochecknoinits
+ - gochecknoglobals
+
+ - path: version.go
+ linters:
+ - gochecknoglobals
+
+ - path: server/database/model/
+ linters:
+ - gochecknoinits
+
+ - path: server/database/settings.go
+ linters:
+ - gochecknoinits
diff --git a/backend/agent/agent.go b/backend/agent/agent.go
new file mode 100644
index 0000000..7ff670d
--- /dev/null
+++ b/backend/agent/agent.go
@@ -0,0 +1,455 @@
+package agent
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/pkg/errors"
+ "github.com/shirou/gopsutil/host"
+ "github.com/shirou/gopsutil/load"
+ "github.com/shirou/gopsutil/mem"
+ log "github.com/sirupsen/logrus"
+ "github.com/urfave/cli/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/security/advancedtls"
+
+ "isc.org/stork"
+ agentapi "isc.org/stork/api"
+)
+
+// Global Stork Agent state.
+type StorkAgent struct {
+ Settings *cli.Context
+ AppMonitor AppMonitor
+ HTTPClient *HTTPClient // to communicate with Kea Control Agent and named statistics-channel
+ server *grpc.Server
+ logTailer *logTailer
+ keaInterceptor *keaInterceptor
+ shutdownOnce sync.Once
+
+ agentapi.UnimplementedAgentServer
+}
+
+// API exposed to Stork Server.
+func NewStorkAgent(settings *cli.Context, appMonitor AppMonitor) *StorkAgent {
+ logTailer := newLogTailer()
+
+ sa := &StorkAgent{
+ Settings: settings,
+ AppMonitor: appMonitor,
+ HTTPClient: NewHTTPClient(settings.Bool("skip-tls-cert-verification")),
+ logTailer: logTailer,
+ keaInterceptor: newKeaInterceptor(),
+ }
+
+ registerKeaInterceptFns(sa)
+
+ return sa
+}
+
+// Read the latest root CA cert from file for Stork Server's cert verification.
+func getRootCertificates(params *advancedtls.GetRootCAsParams) (*advancedtls.GetRootCAsResults, error) {
+ certPool := x509.NewCertPool()
+ ca, err := os.ReadFile(RootCAFile)
+ if err != nil {
+ err = errors.Wrapf(err, "could not read CA certificate: %s", RootCAFile)
+ log.Errorf("%+v", err)
+ return nil, err
+ }
+ // append the client certificates from the CA
+ if ok := certPool.AppendCertsFromPEM(ca); !ok {
+ err = errors.New("failed to append client certs")
+ log.Errorf("%+v", err)
+ return nil, err
+ }
+ log.Printf("Loaded CA cert: %s\n", RootCAFile)
+ return &advancedtls.GetRootCAsResults{
+ TrustCerts: certPool,
+ }, nil
+}
+
+// Read the latest Stork Agent's cert from file for presenting its identity to the Stork server.
+func getIdentityCertificatesForServer(info *tls.ClientHelloInfo) ([]*tls.Certificate, error) {
+ keyPEM, err := os.ReadFile(KeyPEMFile)
+ if err != nil {
+ err = errors.Wrapf(err, "could not load key PEM file: %s", KeyPEMFile)
+ log.Errorf("%+v", err)
+ return nil, err
+ }
+ certPEM, err := os.ReadFile(CertPEMFile)
+ if err != nil {
+ err = errors.Wrapf(err, "could not load cert PEM file: %s", CertPEMFile)
+ log.Errorf("%+v", err)
+ return nil, err
+ }
+ certificate, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ err = errors.Wrapf(err, "could not setup TLS key pair")
+ log.Errorf("%+v", err)
+ return nil, err
+ }
+ log.Printf("Loaded server cert: %s and key: %s\n", CertPEMFile, KeyPEMFile)
+ return []*tls.Certificate{&certificate}, nil
+}
+
+// Prepare gRPC server with configured TLS.
+func newGRPCServerWithTLS() (*grpc.Server, error) {
+ // Prepare structure for advanced TLS. It defines hook functions
+ // that dynamically load key and cert from files just before establishing
+ // connection. Thanks to this if these files changed in meantime then
+ // always latest version for new connections is used.
+ // Beside that there is enabled client authentication and forced
+ // cert and host verification.
+ options := &advancedtls.ServerOptions{
+ // pull latest root CA cert for stork server cert verification
+ RootOptions: advancedtls.RootCertificateOptions{
+ GetRootCertificates: getRootCertificates,
+ },
+ // pull latest stork agent cert for presenting its identity to stork server
+ IdentityOptions: advancedtls.IdentityCertificateOptions{
+ GetIdentityCertificatesForServer: getIdentityCertificatesForServer,
+ },
+ // force stork server cert verification
+ RequireClientCert: true,
+ // check cert and if it matches host IP
+ VType: advancedtls.CertAndHostVerification,
+ }
+ creds, err := advancedtls.NewServerCreds(options)
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot create server credentials for TLS")
+ }
+
+ srv := grpc.NewServer(grpc.Creds(creds))
+ return srv, nil
+}
+
+// Setup the agent as gRPC server endpoint.
+func (sa *StorkAgent) Setup() error {
+ server, err := newGRPCServerWithTLS()
+ if err != nil {
+ return err
+ }
+ sa.server = server
+ return nil
+}
+
+// Respond to ping request from the server. It assures the server that
+// the connection from the server to client is established. It is used
+// in server token registration procedure.
+func (sa *StorkAgent) Ping(ctx context.Context, in *agentapi.PingReq) (*agentapi.PingRsp, error) {
+ rsp := agentapi.PingRsp{}
+ return &rsp, nil
+}
+
+// Get state of machine.
+func (sa *StorkAgent) GetState(ctx context.Context, in *agentapi.GetStateReq) (*agentapi.GetStateRsp, error) {
+ vm, _ := mem.VirtualMemory()
+ hostInfo, _ := host.Info()
+ load, _ := load.Avg()
+ loadStr := fmt.Sprintf("%.2f %.2f %.2f", load.Load1, load.Load5, load.Load15)
+
+ var apps []*agentapi.App
+ for _, app := range sa.AppMonitor.GetApps() {
+ var accessPoints []*agentapi.AccessPoint
+ for _, point := range app.GetBaseApp().AccessPoints {
+ accessPoints = append(accessPoints, &agentapi.AccessPoint{
+ Type: point.Type,
+ Address: point.Address,
+ Port: point.Port,
+ Key: point.Key,
+ UseSecureProtocol: point.UseSecureProtocol,
+ })
+ }
+
+ apps = append(apps, &agentapi.App{
+ Type: app.GetBaseApp().Type,
+ AccessPoints: accessPoints,
+ })
+ }
+
+ state := agentapi.GetStateRsp{
+ AgentVersion: stork.Version,
+ Apps: apps,
+ Hostname: hostInfo.Hostname,
+ Cpus: int64(runtime.NumCPU()),
+ CpusLoad: loadStr,
+ Memory: int64(vm.Total / (1024 * 1024 * 1024)), // in GiB
+ UsedMemory: int64(vm.UsedPercent),
+ Uptime: int64(hostInfo.Uptime / (60 * 60 * 24)), // in days
+ Os: hostInfo.OS,
+ Platform: hostInfo.Platform,
+ PlatformFamily: hostInfo.PlatformFamily,
+ PlatformVersion: hostInfo.PlatformVersion,
+ KernelVersion: hostInfo.KernelVersion,
+ KernelArch: hostInfo.KernelArch,
+ VirtualizationSystem: hostInfo.VirtualizationSystem,
+ VirtualizationRole: hostInfo.VirtualizationRole,
+ HostID: hostInfo.HostID,
+ Error: "",
+ }
+
+ return &state, nil
+}
+
+// ForwardRndcCommand forwards one rndc command sent by the Stork Server to
+// the named daemon.
+func (sa *StorkAgent) ForwardRndcCommand(ctx context.Context, in *agentapi.ForwardRndcCommandReq) (*agentapi.ForwardRndcCommandRsp, error) {
+ rndcRsp := &agentapi.RndcResponse{
+ Status: &agentapi.Status{},
+ }
+ response := &agentapi.ForwardRndcCommandRsp{
+ Status: &agentapi.Status{
+ Code: agentapi.Status_OK, // all ok
+ },
+ RndcResponse: rndcRsp,
+ }
+
+ app := sa.AppMonitor.GetApp(AppTypeBind9, AccessPointControl, in.Address, in.Port)
+ if app == nil {
+ rndcRsp.Status.Code = agentapi.Status_ERROR
+ rndcRsp.Status.Message = "Cannot find BIND 9 app"
+ response.Status = rndcRsp.Status
+ return response, nil
+ }
+ bind9App := app.(*Bind9App)
+ if bind9App == nil {
+ rndcRsp.Status.Code = agentapi.Status_ERROR
+ rndcRsp.Status.Message = fmt.Sprintf("Incorrect app found: %s instead of BIND 9", app.GetBaseApp().Type)
+ response.Status = rndcRsp.Status
+ return response, nil
+ }
+
+ request := in.GetRndcRequest()
+
+ // Try to forward the command to rndc.
+ output, err := bind9App.sendCommand(strings.Fields(request.Request))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "Address": in.Address,
+ "Port": in.Port,
+ }).Errorf("Failed to forward commands to rndc: %+v", err)
+ rndcRsp.Status.Code = agentapi.Status_ERROR
+ rndcRsp.Status.Message = fmt.Sprintf("Failed to forward commands to rndc: %s", err.Error())
+ } else {
+ rndcRsp.Status.Code = agentapi.Status_OK
+ rndcRsp.Response = string(output)
+ }
+
+ response.Status = rndcRsp.Status
+ return response, nil
+}
+
+// ForwardToNamedStats forwards a statistics request to the named daemon.
+func (sa *StorkAgent) ForwardToNamedStats(ctx context.Context, in *agentapi.ForwardToNamedStatsReq) (*agentapi.ForwardToNamedStatsRsp, error) {
+ reqURL := in.GetUrl()
+ req := in.GetNamedStatsRequest()
+
+ response := &agentapi.ForwardToNamedStatsRsp{
+ Status: &agentapi.Status{
+ Code: agentapi.Status_OK, // all ok
+ },
+ }
+
+ rsp := &agentapi.NamedStatsResponse{
+ Status: &agentapi.Status{},
+ }
+
+ // Try to forward the command to named daemon.
+ namedRsp, err := sa.HTTPClient.Call(reqURL, bytes.NewBuffer([]byte(req.Request)))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to forward commands to named: %+v", err)
+ rsp.Status.Code = agentapi.Status_ERROR
+ rsp.Status.Message = fmt.Sprintf("Failed to forward commands to named: %s", err.Error())
+ response.NamedStatsResponse = rsp
+ return response, nil
+ }
+
+ // Read the response body.
+ body, err := io.ReadAll(namedRsp.Body)
+ namedRsp.Body.Close()
+ if err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to read the body of the named response: %+v", err)
+ rsp.Status.Code = agentapi.Status_ERROR
+ rsp.Status.Message = fmt.Sprintf("Failed to read the body of the named response: %s", err.Error())
+ response.NamedStatsResponse = rsp
+ return response, nil
+ }
+
+ // Everything looks good, so include the body in the response.
+ rsp.Response = string(body)
+ rsp.Status.Code = agentapi.Status_OK
+ response.NamedStatsResponse = rsp
+ return response, nil
+}
+
+// Forwards one or more Kea commands sent by the Stork Server to the appropriate Kea instance over
+// HTTP (via Control Agent).
+func (sa *StorkAgent) ForwardToKeaOverHTTP(ctx context.Context, in *agentapi.ForwardToKeaOverHTTPReq) (*agentapi.ForwardToKeaOverHTTPRsp, error) {
+ // prepare base response
+ response := &agentapi.ForwardToKeaOverHTTPRsp{
+ Status: &agentapi.Status{
+ Code: agentapi.Status_OK, // all ok
+ },
+ }
+
+ // check URL to CA
+ reqURL := in.GetUrl()
+ if reqURL == "" {
+ response.Status.Code = agentapi.Status_ERROR
+ response.Status.Message = "Incorrect URL to Kea CA"
+ return response, nil
+ }
+
+ requests := in.GetKeaRequests()
+
+ // forward requests to kea one by one
+ for _, req := range requests {
+ rsp := &agentapi.KeaResponse{
+ Status: &agentapi.Status{},
+ }
+ // Try to forward the command to Kea Control Agent.
+ keaRsp, err := sa.HTTPClient.Call(reqURL, bytes.NewBuffer([]byte(req.Request)))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to forward commands to Kea CA: %+v", err)
+ rsp.Status.Code = agentapi.Status_ERROR
+ rsp.Status.Message = fmt.Sprintf("Failed to forward commands to Kea: %s", err.Error())
+ response.KeaResponses = append(response.KeaResponses, rsp)
+ continue
+ }
+
+ // Read the response body.
+ body, err := io.ReadAll(keaRsp.Body)
+ keaRsp.Body.Close()
+ if err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to read the body of the Kea response to forwarded commands: %+v", err)
+ rsp.Status.Code = agentapi.Status_ERROR
+ rsp.Status.Message = fmt.Sprintf("Failed to read the body of the Kea response: %s", err.Error())
+ response.KeaResponses = append(response.KeaResponses, rsp)
+ continue
+ }
+
+ // Push Kea response for synchronous processing. It may modify the
+ // response body.
+ body, err = sa.keaInterceptor.syncHandle(sa, req, body)
+ if err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to apply synchronous interceptors on Kea response: %+v", err)
+ continue
+ }
+
+ // Push Kea response for async processing. It is done in background.
+ // One of the use cases is to extract log files used by Kea and to
+ // allow the log viewer to access them.
+ go sa.keaInterceptor.asyncHandle(sa, req, body)
+
+ // gzip json response received from Kea
+ var gzippedBuf bytes.Buffer
+ zw := gzip.NewWriter(&gzippedBuf)
+ _, err = zw.Write(body)
+ if err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to compress the Kea response: %+v", err)
+ rsp.Status.Code = agentapi.Status_ERROR
+ rsp.Status.Message = fmt.Sprintf("Failed to compress the Kea response: %s", err.Error())
+ response.KeaResponses = append(response.KeaResponses, rsp)
+ if err2 := zw.Close(); err2 != nil {
+ log.Errorf("Error while closing gzip writer: %s", err2)
+ }
+ continue
+ }
+ if err := zw.Close(); err != nil {
+ log.WithFields(log.Fields{
+ "URL": reqURL,
+ }).Errorf("Failed to finish compressing the Kea response: %+v", err)
+ rsp.Status.Code = agentapi.Status_ERROR
+ rsp.Status.Message = fmt.Sprintf("Failed to finish compressing the Kea response: %s", err.Error())
+ response.KeaResponses = append(response.KeaResponses, rsp)
+ continue
+ }
+ if len(body) > 0 {
+ log.Debugf("Compressing response from %d B to %d B, ratio %d%%", len(body), gzippedBuf.Len(), 100*gzippedBuf.Len()/len(body))
+ }
+
+ // Everything looks good, so include the gzipped body in the response.
+ rsp.Response = gzippedBuf.Bytes()
+ rsp.Status.Code = agentapi.Status_OK
+ response.KeaResponses = append(response.KeaResponses, rsp)
+ }
+
+ return response, nil
+}
+
+// Returns the tail of the specified file, typically a log file.
+func (sa *StorkAgent) TailTextFile(ctx context.Context, in *agentapi.TailTextFileReq) (*agentapi.TailTextFileRsp, error) {
+ response := &agentapi.TailTextFileRsp{
+ Status: &agentapi.Status{
+ Code: agentapi.Status_OK, // all ok
+ },
+ }
+
+ lines, err := sa.logTailer.tail(in.Path, in.Offset)
+ if err != nil {
+ response.Status.Code = agentapi.Status_ERROR
+ response.Status.Message = fmt.Sprintf("%s", err)
+ return response, nil
+ }
+ response.Lines = lines
+
+ return response, nil
+}
+
+func (sa *StorkAgent) Serve() error {
+ // Install gRPC API handlers.
+ agentapi.RegisterAgentServer(sa.server, sa)
+
+ // Prepare listener on configured address.
+ addr := net.JoinHostPort(sa.Settings.String("host"), strconv.Itoa(sa.Settings.Int("port")))
+ lis, err := net.Listen("tcp", addr)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to listen on: %s", addr)
+ }
+
+ // Start serving gRPC
+ log.WithFields(log.Fields{
+ "address": lis.Addr(),
+ }).Infof("Started serving Stork Agent")
+ if err := sa.server.Serve(lis); err != nil {
+ return errors.Wrapf(err, "Failed to serve on: %s", addr)
+ }
+ return nil
+}
+
+// Shuts down Stork Agent. The reload flag indicates if the Shutdown is called
+// as part of the agent reload (reload=true) or the process is terminating
+// (reload=false).
+func (sa *StorkAgent) Shutdown(reload bool) {
+ sa.shutdownOnce.Do(func() {
+ if !reload {
+ log.Info("Stopping Stork Agent")
+ }
+ if sa.server != nil {
+ sa.server.GracefulStop()
+ }
+ })
+}
diff --git a/backend/agent/agent_test.go b/backend/agent/agent_test.go
new file mode 100644
index 0000000..5ff73ff
--- /dev/null
+++ b/backend/agent/agent_test.go
@@ -0,0 +1,729 @@
+package agent
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+ "github.com/urfave/cli/v2"
+ "google.golang.org/grpc/security/advancedtls"
+ "gopkg.in/h2non/gock.v1"
+
+ "isc.org/stork"
+ agentapi "isc.org/stork/api"
+ "isc.org/stork/testutil"
+)
+
+type FakeAppMonitor struct {
+ Apps []App
+}
+
+// mockRndc mocks successful rndc output.
+func mockRndc(command []string) ([]byte, error) {
+ var output string
+
+ if len(command) > 0 && command[len(command)-1] == "status" {
+ output = "Server is up and running"
+ return []byte(output), nil
+ }
+
+ // unknown command.
+ output = "unknown command"
+ return []byte(output), nil
+}
+
+// mockRndcError mocks an error.
+func mockRndcError(command []string) ([]byte, error) {
+ log.Debugf("mock rndc: error")
+
+ return []byte(""), errors.Errorf("mocking an error")
+}
+
+// mockRndcEmpty mocks empty output.
+func mockRndcEmpty(command []string) ([]byte, error) {
+ log.Debugf("mock rndc: empty")
+
+ return []byte(""), nil
+}
+
+// Initializes StorkAgent instance and context used by the tests.
+func setupAgentTest() (*StorkAgent, context.Context) {
+ httpClient := NewHTTPClient(true)
+ gock.InterceptClient(httpClient.client)
+
+ fam := FakeAppMonitor{}
+ sa := &StorkAgent{
+ AppMonitor: &fam,
+ HTTPClient: httpClient,
+ logTailer: newLogTailer(),
+ keaInterceptor: newKeaInterceptor(),
+ }
+ sa.Setup()
+ ctx := context.Background()
+ return sa, ctx
+}
+
+func (fam *FakeAppMonitor) GetApps() []App {
+ return fam.Apps
+}
+
+// Stub function for AppMonitor. It behaves in the same way as original one.
+func (fam *FakeAppMonitor) GetApp(appType, apType, address string, port int64) App {
+ for _, app := range fam.Apps {
+ if app.GetBaseApp().Type != appType {
+ continue
+ }
+ for _, ap := range app.GetBaseApp().AccessPoints {
+ if ap.Type == apType && ap.Address == address && ap.Port == port {
+ return app
+ }
+ }
+ }
+ return nil
+}
+
+func (fam *FakeAppMonitor) Shutdown() {
+}
+
+func (fam *FakeAppMonitor) Start(storkAgent *StorkAgent) {
+}
+
+// makeAccessPoint is an utility to make single element app access point slice.
+func makeAccessPoint(tp, address, key string, port int64, useSecureProtocol bool) (ap []AccessPoint) {
+ return append(ap, AccessPoint{
+ Type: tp,
+ Address: address,
+ Port: port,
+ Key: key,
+ UseSecureProtocol: useSecureProtocol,
+ })
+}
+
+// Check if NewStorkAgent can be invoked and sets SA members.
+func TestNewStorkAgent(t *testing.T) {
+ fam := &FakeAppMonitor{}
+ settings := cli.NewContext(nil, flag.NewFlagSet("", 0), nil)
+ sa := NewStorkAgent(settings, fam)
+ require.NotNil(t, sa.AppMonitor)
+ require.NotNil(t, sa.HTTPClient)
+}
+
+// Check if an agent returns a response to a ping message..
+func TestPing(t *testing.T) {
+ sa, ctx := setupAgentTest()
+ args := &agentapi.PingReq{}
+ rsp, err := sa.Ping(ctx, args)
+ require.NoError(t, err)
+ require.NotNil(t, rsp)
+}
+
+// Check if GetState works.
+func TestGetState(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ // app monitor is empty, no apps should be returned by GetState
+ rsp, err := sa.GetState(ctx, &agentapi.GetStateReq{})
+ require.NoError(t, err)
+ require.Equal(t, rsp.AgentVersion, stork.Version)
+ require.Empty(t, rsp.Apps)
+
+ // add some apps to app monitor so GetState should return something
+ var apps []App
+ apps = append(apps, &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "1.2.3.1", "", 1234, false),
+ },
+ HTTPClient: nil,
+ })
+
+ accessPoints := makeAccessPoint(AccessPointControl, "2.3.4.4", "abcd", 2345, true)
+ accessPoints = append(accessPoints, AccessPoint{
+ Type: AccessPointStatistics,
+ Address: "2.3.4.5",
+ Port: 2346,
+ Key: "",
+ UseSecureProtocol: false,
+ })
+
+ apps = append(apps, &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ RndcClient: nil,
+ })
+ fam, _ := sa.AppMonitor.(*FakeAppMonitor)
+ fam.Apps = apps
+ rsp, err = sa.GetState(ctx, &agentapi.GetStateReq{})
+ require.NoError(t, err)
+ require.Equal(t, rsp.AgentVersion, stork.Version)
+ require.Equal(t, stork.Version, rsp.AgentVersion)
+ require.Len(t, rsp.Apps, 2)
+
+ keaApp := rsp.Apps[0]
+ require.Len(t, keaApp.AccessPoints, 1)
+ point := keaApp.AccessPoints[0]
+ require.Equal(t, AccessPointControl, point.Type)
+ require.Equal(t, "1.2.3.1", point.Address)
+ require.False(t, point.UseSecureProtocol)
+ require.EqualValues(t, 1234, point.Port)
+ require.Empty(t, point.Key)
+
+ bind9App := rsp.Apps[1]
+ require.Len(t, bind9App.AccessPoints, 2)
+ // sorted by port
+ point = bind9App.AccessPoints[0]
+ require.Equal(t, AccessPointControl, point.Type)
+ require.Equal(t, "2.3.4.4", point.Address)
+ require.EqualValues(t, 2345, point.Port)
+ require.Equal(t, "abcd", point.Key)
+ require.True(t, point.UseSecureProtocol)
+ point = bind9App.AccessPoints[1]
+ require.Equal(t, AccessPointStatistics, point.Type)
+ require.Equal(t, "2.3.4.5", point.Address)
+ require.EqualValues(t, 2346, point.Port)
+ require.False(t, point.UseSecureProtocol)
+ require.Empty(t, point.Key)
+}
+
+// Helper function for unzipping buffers. It does not return
+// any error, it expects that everything will go fine.
+func doGunzip(data []byte) string {
+ zr, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ panic("problem with gunzip: NewReader")
+ }
+ unpackedResp, err := io.ReadAll(zr)
+ if err != nil {
+ panic("problem with gunzip: ReadAll")
+ }
+ if err := zr.Close(); err != nil {
+ panic("problem with gunzip: Close")
+ }
+ return string(unpackedResp)
+}
+
+// Test forwarding command to Kea when HTTP 200 status code
+// is returned.
+func TestForwardToKeaOverHTTPSuccess(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ // Expect appropriate content type and the body. If they are not matched
+ // an error will be raised.
+ defer gock.Off()
+ gock.New("http://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ JSON(map[string]string{"command": "list-commands"}).
+ Post("/").
+ Reply(200).
+ JSON([]map[string]int{{"result": 0}})
+
+ // Forward the request with the expected body.
+ req := &agentapi.ForwardToKeaOverHTTPReq{
+ Url: "http://localhost:45634/",
+ KeaRequests: []*agentapi.KeaRequest{{Request: "{ \"command\": \"list-commands\"}"}},
+ }
+
+ // Kea should respond with non-empty body and the status code 200.
+ // This should result in no error and the body should be available
+ // in the response.
+ rsp, err := sa.ForwardToKeaOverHTTP(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Len(t, rsp.KeaResponses, 1)
+ require.JSONEq(t, "[{\"result\":0}]", doGunzip(rsp.KeaResponses[0].Response))
+}
+
+// Test forwarding command to Kea when HTTP 400 (Bad Request) status
+// code is returned.
+func TestForwardToKeaOverHTTPBadRequest(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ defer gock.Off()
+ gock.New("http://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ Post("/").
+ Reply(400).
+ JSON([]map[string]string{{"HttpCode": "Bad Request"}})
+
+ req := &agentapi.ForwardToKeaOverHTTPReq{
+ Url: "http://localhost:45634/",
+ KeaRequests: []*agentapi.KeaRequest{{Request: "{ \"command\": \"list-commands\"}"}},
+ }
+
+ // The response to the forwarded command should contain HTTP
+ // status code 400, but that should not raise an error in the
+ // agent.
+ rsp, err := sa.ForwardToKeaOverHTTP(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Len(t, rsp.KeaResponses, 1)
+ require.JSONEq(t, "[{\"HttpCode\":\"Bad Request\"}]", doGunzip(rsp.KeaResponses[0].Response))
+}
+
+// Test forwarding command to Kea when no body is returned.
+func TestForwardToKeaOverHTTPEmptyBody(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ defer gock.Off()
+ gock.New("http://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ Post("/").
+ Reply(200)
+
+ req := &agentapi.ForwardToKeaOverHTTPReq{
+ Url: "http://localhost:45634/",
+ KeaRequests: []*agentapi.KeaRequest{{Request: "{ \"command\": \"list-commands\"}"}},
+ }
+
+ // Forward the command to Kea. The response contains no body, but
+ // this should not result in an error. The command sender should
+ // deal with this as well as with other issues with the response
+ // formatting.
+ rsp, err := sa.ForwardToKeaOverHTTP(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Len(t, rsp.KeaResponses, 1)
+ require.Len(t, doGunzip(rsp.KeaResponses[0].Response), 0)
+}
+
+// Test forwarding command when Kea is unavailable.
+func TestForwardToKeaOverHTTPNoKea(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ req := &agentapi.ForwardToKeaOverHTTPReq{
+ Url: "http://localhost:45634/",
+ KeaRequests: []*agentapi.KeaRequest{{Request: "{ \"command\": \"list-commands\"}"}},
+ }
+
+ // Kea is unreachable, so we'll have to signal an error to the sender.
+ // The response should be empty.
+ rsp, err := sa.ForwardToKeaOverHTTP(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Len(t, rsp.KeaResponses, 1)
+ require.NotEqual(t, 0, rsp.KeaResponses[0].Status.Code)
+ require.Len(t, rsp.KeaResponses[0].Response, 0)
+}
+
+// Test successful forwarding stats request to named.
+func TestForwardToNamedStatsSuccess(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ // Expect appropriate content type and the body. If they are not matched
+ // an error will be raised.
+ defer gock.Off()
+ gock.New("http://localhost:45634/").
+ MatchHeader("Content-Type", "application/json").
+ Post("/").
+ Reply(200).
+ JSON([]map[string]int{{"result": 0}})
+
+ // Forward the request with the expected body.
+ req := &agentapi.ForwardToNamedStatsReq{
+ Url: "http://localhost:45634/json/v1",
+ NamedStatsRequest: &agentapi.NamedStatsRequest{Request: ""},
+ }
+
+ // Named should respond with non-empty body and the status code 200.
+ // This should result in no error and the body should be available
+ // in the response.
+ rsp, err := sa.ForwardToNamedStats(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.NotNil(t, rsp.NamedStatsResponse)
+ require.JSONEq(t, "[{\"result\":0}]", rsp.NamedStatsResponse.Response)
+}
+
+// Test forwarding command to named when HTTP 400 (Bad Request) status
+// code is returned.
+func TestForwardToNamedStatsBadRequest(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ defer gock.Off()
+ gock.New("http://localhost:45634/json/v1").
+ MatchHeader("Content-Type", "application/json").
+ Post("/").
+ Reply(400).
+ JSON([]map[string]string{{"HttpCode": "Bad Request"}})
+
+ req := &agentapi.ForwardToNamedStatsReq{
+ Url: "http://localhost:45634/json/v1",
+ NamedStatsRequest: &agentapi.NamedStatsRequest{Request: ""},
+ }
+
+ // The response to the forwarded command should contain HTTP
+ // status code 400, but that should not raise an error in the
+ // agent.
+ rsp, err := sa.ForwardToNamedStats(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.NotNil(t, rsp.NamedStatsResponse)
+ require.JSONEq(t, "[{\"HttpCode\":\"Bad Request\"}]", rsp.NamedStatsResponse.Response)
+ require.NotEqual(t, 0, rsp.NamedStatsResponse.Status.Code)
+}
+
+// Test forwarding command to named statistics-channel when no body is returned.
+func TestForwardToNamedStatsHTTPEmptyBody(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ defer gock.Off()
+ gock.New("http://localhost:45634/json/v1").
+ MatchHeader("Content-Type", "application/json").
+ Post("/").
+ Reply(200)
+
+ req := &agentapi.ForwardToNamedStatsReq{
+ Url: "http://localhost:45634/json/v1",
+ NamedStatsRequest: &agentapi.NamedStatsRequest{Request: ""},
+ }
+
+ // Forward the command to named statistics-channel.
+ // The response contains no body, but this should not result in an
+ // error. The command sender should deal with this as well as with
+ // other issues with the response formatting.
+ rsp, err := sa.ForwardToNamedStats(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.NotNil(t, rsp.NamedStatsResponse)
+ require.Len(t, rsp.NamedStatsResponse.Response, 0)
+}
+
+// Test forwarding statistics request when named is unavailable.
+func TestForwardToNamedStatsNoNamed(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ req := &agentapi.ForwardToNamedStatsReq{
+ Url: "http://localhost:45634/json/v1",
+ NamedStatsRequest: &agentapi.NamedStatsRequest{Request: ""},
+ }
+
+ // Named is unreachable, so we'll have to signal an error to the sender.
+ // The response should be empty.
+ rsp, err := sa.ForwardToNamedStats(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.NotNil(t, rsp.NamedStatsResponse)
+ require.NotEqual(t, 0, rsp.NamedStatsResponse.Status.Code)
+ require.Len(t, rsp.NamedStatsResponse.Response, 0)
+}
+
+// Test a successful rndc command.
+func TestForwardRndcCommandSuccess(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ accessPoints := makeAccessPoint(AccessPointControl, "127.0.0.1", "_", 1234, false)
+ var apps []App
+ apps = append(apps, &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ RndcClient: NewRndcClient(mockRndc),
+ })
+ fam, _ := sa.AppMonitor.(*FakeAppMonitor)
+ fam.Apps = apps
+
+ cmd := &agentapi.RndcRequest{Request: "status"}
+
+ req := &agentapi.ForwardRndcCommandReq{
+ Address: "127.0.0.1",
+ Port: 1234,
+ RndcRequest: cmd,
+ }
+
+ // Expect no error, an OK status code, and an empty status message.
+ rsp, err := sa.ForwardRndcCommand(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Equal(t, agentapi.Status_OK, rsp.Status.Code)
+ require.Empty(t, rsp.Status.Message)
+ // Check expected output.
+ require.Equal(t, rsp.RndcResponse.Response, "Server is up and running")
+
+ // Empty request.
+ cmd = &agentapi.RndcRequest{Request: ""}
+ req.RndcRequest = cmd
+ rsp, err = sa.ForwardRndcCommand(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Equal(t, agentapi.Status_OK, rsp.Status.Code)
+ require.Empty(t, rsp.Status.Message)
+ require.Equal(t, rsp.RndcResponse.Response, "unknown command")
+
+ // Unknown request.
+ cmd = &agentapi.RndcRequest{Request: "foobar"}
+ req.RndcRequest = cmd
+ rsp, err = sa.ForwardRndcCommand(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Equal(t, agentapi.Status_OK, rsp.Status.Code)
+ require.Empty(t, rsp.Status.Message)
+ require.Equal(t, rsp.RndcResponse.Response, "unknown command")
+}
+
+// Test rndc command failed to forward.
+func TestForwardRndcCommandError(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ accessPoints := makeAccessPoint(AccessPointControl, "127.0.0.1", "_", 1234, false)
+ var apps []App
+ apps = append(apps, &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ RndcClient: NewRndcClient(mockRndcError),
+ })
+ fam, _ := sa.AppMonitor.(*FakeAppMonitor)
+ fam.Apps = apps
+
+ cmd := &agentapi.RndcRequest{Request: "status"}
+
+ req := &agentapi.ForwardRndcCommandReq{
+ Address: "127.0.0.1",
+ Port: 1234,
+ RndcRequest: cmd,
+ }
+
+ // Expect an error status code and some message.
+ rsp, err := sa.ForwardRndcCommand(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Equal(t, agentapi.Status_ERROR, rsp.Status.Code)
+ require.NotEmpty(t, rsp.Status.Message)
+}
+
+// Test rndc command when there is no app.
+func TestForwardRndcCommandNoApp(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ cmd := &agentapi.RndcRequest{Request: "status"}
+
+ req := &agentapi.ForwardRndcCommandReq{
+ Address: "127.0.0.1",
+ Port: 1234,
+ RndcRequest: cmd,
+ }
+
+ // Expect an error status code and some message.
+ rsp, err := sa.ForwardRndcCommand(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Equal(t, agentapi.Status_ERROR, rsp.Status.Code)
+ require.EqualValues(t, "Cannot find BIND 9 app", rsp.Status.Message)
+}
+
+// Test rndc command successfully forwarded, but bad response.
+func TestForwardRndcCommandEmpty(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ accessPoints := makeAccessPoint(AccessPointControl, "127.0.0.1", "_", 1234, false)
+ var apps []App
+ apps = append(apps, &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ RndcClient: NewRndcClient(mockRndcEmpty),
+ })
+ fam, _ := sa.AppMonitor.(*FakeAppMonitor)
+ fam.Apps = apps
+
+ cmd := &agentapi.RndcRequest{Request: "status"}
+
+ req := &agentapi.ForwardRndcCommandReq{
+ Address: "127.0.0.1",
+ Port: 1234,
+ RndcRequest: cmd,
+ }
+
+ // Empty output is not normal, but we are just forwarding, so expect
+ // no error, an OK status code, and an empty status message.
+ rsp, err := sa.ForwardRndcCommand(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Equal(t, agentapi.Status_OK, rsp.Status.Code)
+ require.Empty(t, rsp.Status.Message)
+}
+
+// Test that the tail of the text file can be fetched.
+func TestTailTextFile(t *testing.T) {
+ sa, ctx := setupAgentTest()
+
+ rand.Seed(time.Now().UnixNano())
+ filename := fmt.Sprintf("test%d.log", rand.Int63())
+ f, err := os.Create(filename)
+ require.NoError(t, err)
+ defer func() {
+ _ = os.Remove(filename)
+ }()
+
+ fmt.Fprintln(f, "This is a file")
+ fmt.Fprintln(f, "which is used")
+ fmt.Fprintln(f, "in testing TailTextFile")
+
+ sa.logTailer.allow(filename)
+
+ // Forward the request with the expected body.
+ req := &agentapi.TailTextFileReq{
+ Offset: 38,
+ Path: filename,
+ }
+
+ rsp, err := sa.TailTextFile(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Len(t, rsp.Lines, 2)
+ require.Equal(t, "which is used", rsp.Lines[0])
+ require.Equal(t, "in testing TailTextFile", rsp.Lines[1])
+
+ // Test the case when the offset is beyond the file size.
+ req = &agentapi.TailTextFileReq{
+ Offset: 200,
+ Path: filename,
+ }
+
+ rsp, err = sa.TailTextFile(ctx, req)
+ require.NotNil(t, rsp)
+ require.NoError(t, err)
+ require.Len(t, rsp.Lines, 3)
+ require.Equal(t, "This is a file", rsp.Lines[0])
+ require.Equal(t, "which is used", rsp.Lines[1])
+ require.Equal(t, "in testing TailTextFile", rsp.Lines[2])
+}
+
+// Checks if getRootCertificates:
+// - returns an error if the cert file doesn't exist.
+func TestGetRootCertificatesForMissingOrInvalidFiles(t *testing.T) {
+ params := &advancedtls.GetRootCAsParams{}
+
+ // prepare temp dir for cert files
+ tmpDir, err := os.MkdirTemp("", "reg")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+ os.Mkdir(path.Join(tmpDir, "certs"), 0o755)
+ restoreCerts := RememberPaths()
+ defer restoreCerts()
+ RootCAFile = path.Join(tmpDir, "certs/ca.pem")
+
+ // missing cert file error
+ _, err = getRootCertificates(params)
+ require.EqualError(t, err,
+ fmt.Sprintf("could not read CA certificate: %s/certs/ca.pem: open %s/certs/ca.pem: no such file or directory",
+ tmpDir, tmpDir))
+
+ // store bad cert
+ err = os.WriteFile(RootCAFile, []byte("CACertPEM"), 0o600)
+ require.NoError(t, err)
+ _, err = getRootCertificates(params)
+ require.EqualError(t, err, "failed to append client certs")
+}
+
+// Checks if getRootCertificates reads and returns correct certificate successfully.
+func TestGetRootCertificates(t *testing.T) {
+ cleanup, err := GenerateSelfSignedCerts()
+ require.NoError(t, err)
+ defer cleanup()
+
+ // all should be ok
+ params := &advancedtls.GetRootCAsParams{}
+ result, err := getRootCertificates(params)
+ require.NoError(t, err)
+ require.NotNil(t, result)
+ require.NotNil(t, result.TrustCerts)
+}
+
+// Checks if getIdentityCertificatesForServer:
+// - returns an error if the key file doesn't exist,
+// - returns an error if the key or cert contents are invalid.
+func TestGetIdentityCertificatesForServerForMissingOrInvalid(t *testing.T) {
+ info := &tls.ClientHelloInfo{}
+
+ // prepare temp dir for cert files
+ tmpDir, err := os.MkdirTemp("", "reg")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+ os.Mkdir(path.Join(tmpDir, "certs"), 0o755)
+ os.Mkdir(path.Join(tmpDir, "tokens"), 0o755)
+ restoreCerts := RememberPaths()
+ defer restoreCerts()
+ KeyPEMFile = path.Join(tmpDir, "certs/key.pem")
+ CertPEMFile = path.Join(tmpDir, "certs/cert.pem")
+
+ // missing key files
+ _, err = getIdentityCertificatesForServer(info)
+ require.EqualError(t, err,
+ fmt.Sprintf("could not load key PEM file: %s/certs/key.pem: open %s/certs/key.pem: no such file or directory", tmpDir, tmpDir))
+
+ // store bad content to files
+ err = os.WriteFile(KeyPEMFile, []byte("KeyPEMFile"), 0o600)
+ require.NoError(t, err)
+ err = os.WriteFile(CertPEMFile, []byte("CertPEMFile"), 0o600)
+ require.NoError(t, err)
+ _, err = getIdentityCertificatesForServer(info)
+ require.EqualError(t, err, "could not setup TLS key pair: tls: failed to find any PEM data in certificate input")
+}
+
+// Checks if getIdentityCertificatesForServer reads and returns
+// correct key and certificate pair successfully.
+func TestGetIdentityCertificatesForServer(t *testing.T) {
+ cleanup, err := GenerateSelfSignedCerts()
+ require.NoError(t, err)
+ defer cleanup()
+
+ // now it should work
+ info := &tls.ClientHelloInfo{}
+ certs, err := getIdentityCertificatesForServer(info)
+ require.NoError(t, err)
+ require.NotEmpty(t, certs)
+}
+
+// Check if newGRPCServerWithTLS can create gRPC server.
+func TestNewGRPCServerWithTLS(t *testing.T) {
+ srv, err := newGRPCServerWithTLS()
+ require.NoError(t, err)
+ require.NotNil(t, srv)
+}
+
+// Check if the Stork Agent prints the host and port parameters.
+func TestHostAndPortParams(t *testing.T) {
+ // Arrange
+ sa, _ := setupAgentTest()
+
+ flags := flag.NewFlagSet("test", 0)
+ flags.String("host", "127.0.0.1", "usage")
+ flags.Int("port", 9876, "usage")
+ settings := cli.NewContext(nil, flags, nil)
+ sa.Settings = settings
+
+ // We shut down the server before starting. It causes the serve
+ // call fails and doesn't block the execution.
+ sa.Shutdown(false)
+
+ // Act
+ var serveErr error
+ stdout, _, err := testutil.CaptureOutput(func() {
+ serveErr = sa.Serve()
+ })
+
+ // Assert
+ require.Error(t, serveErr)
+ require.NoError(t, err)
+ stdoutStr := string(stdout)
+ require.Contains(t, stdoutStr, "127.0.0.1")
+ require.Contains(t, stdoutStr, "9876")
+}
diff --git a/backend/agent/bind9.go b/backend/agent/bind9.go
new file mode 100644
index 0000000..06c64e4
--- /dev/null
+++ b/backend/agent/bind9.go
@@ -0,0 +1,460 @@
+package agent
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+
+ storkutil "isc.org/stork/util"
+)
+
+type Bind9Daemon struct {
+ Pid int32
+ Name string
+ Version string
+ Active bool
+}
+
+type Bind9State struct {
+ Version string
+ Active bool
+ Daemon Bind9Daemon
+}
+
+// It holds common and BIND 9 specifc runtime information.
+type Bind9App struct {
+ BaseApp
+ RndcClient *RndcClient // to communicate with BIND 9 via rndc
+}
+
+// Get base information about BIND 9 app.
+func (ba *Bind9App) GetBaseApp() *BaseApp {
+ return &ba.BaseApp
+}
+
+// Detect allowed logs provided by BIND 9.
+// TODO: currently it is not implemented and not used,
+// it returns always empty list and no error.
+func (ba *Bind9App) DetectAllowedLogs() ([]string, error) {
+ return nil, nil
+}
+
+// List of BIND 9 executables used durint app detection.
+const (
+ namedCheckconfExec = "named-checkconf"
+ rndcExec = "rndc"
+)
+
+// rndc key file name.
+const RndcKeyFile = "rndc.key"
+
+// Default ports for rndc and stats channel.
+const (
+ RndcDefaultPort = 953
+ StatsChannelDefaultPort = 80
+)
+
+// Object for interacting with named using rndc.
+type RndcClient struct {
+ execute CommandExecutor
+ BaseCommand []string
+}
+
+// CommandExecutor takes an array of strings, with the first element of the
+// array being the program to call, followed by its arguments. It returns
+// the command output, and possibly an error (for example if running the
+// command failed).
+type CommandExecutor func([]string) ([]byte, error)
+
+// Create an rndc client to communicate with BIND 9 named daemon.
+func NewRndcClient(ce CommandExecutor) *RndcClient {
+ rndcClient := &RndcClient{
+ execute: ce,
+ }
+ return rndcClient
+}
+
+// Determine rndc details in the system.
+// It find rndc executable and prepare base command with all necessary
+// parameters including rndc secret key.
+func (rc *RndcClient) DetermineDetails(baseNamedDir, bind9ConfDir string, ctrlAddress string, ctrlPort int64, ctrlKey string) error {
+ rndcPath, err := determineBinPath(baseNamedDir, rndcExec)
+ if err != nil {
+ return err
+ }
+
+ cmd := []string{rndcPath, "-s", ctrlAddress, "-p", fmt.Sprintf("%d", ctrlPort)}
+
+ if len(ctrlKey) > 0 {
+ cmd = append(cmd, "-y")
+ cmd = append(cmd, ctrlKey)
+ } else {
+ keyPath := path.Join(bind9ConfDir, RndcKeyFile)
+ if _, err := os.Stat(keyPath); err == nil {
+ cmd = append(cmd, "-k")
+ cmd = append(cmd, keyPath)
+ } else {
+ return errors.New("cannot determine rndc key")
+ }
+ }
+ rc.BaseCommand = cmd
+ return nil
+}
+
+// Send command to named using rndc executable.
+func (rc *RndcClient) SendCommand(command []string) (output []byte, err error) {
+ var rndcCommand []string
+ rndcCommand = append(rndcCommand, rc.BaseCommand...)
+ rndcCommand = append(rndcCommand, command...)
+ log.Debugf("rndc: %+v", rndcCommand)
+
+ return rc.execute(rndcCommand)
+}
+
+// getRndcKey looks for the key with a given `name` in `contents`.
+//
+// Example key clause:
+//
+// key "name" {
+// algorithm "hmac-sha256";
+// secret "OmItW1lOyLVUEuvv+Fme+Q==";
+// };
+//
+func getRndcKey(contents, name string) (controlKey string) {
+ ptrn := regexp.MustCompile(`(?s)keys\s+\"(\S+)\"\s+\{(.*)\}\s*;`)
+ keys := ptrn.FindAllStringSubmatch(contents, -1)
+ if len(keys) == 0 {
+ return ""
+ }
+
+ for _, key := range keys {
+ if key[1] != name {
+ continue
+ }
+ ptrn = regexp.MustCompile(`(?s)algorithm\s+\"(\S+)\";`)
+ algorithm := ptrn.FindStringSubmatch(key[2])
+ if len(algorithm) < 2 {
+ log.Warnf("no key algorithm found for name %s", name)
+ return ""
+ }
+
+ ptrn = regexp.MustCompile(`(?s)secret\s+\"(\S+)\";`)
+ secret := ptrn.FindStringSubmatch(key[2])
+ if len(secret) < 2 {
+ log.Warnf("no key secret found for name %s", name)
+ return ""
+ }
+
+ // this key clause matches the name we are looking for
+ controlKey = fmt.Sprintf("%s:%s", algorithm[1], secret[1])
+ break
+ }
+
+ return controlKey
+}
+
+// parseInetSpec parses an inet statement from a named configuration excerpt.
+// The inet statement is defined by inet_spec:
+//
+// inet_spec = ( ip_addr | * ) [ port ip_port ]
+// allow { address_match_list }
+// keys { key_list };
+//
+// This function returns the ip_addr, port and the first key that is
+// referenced in the key_list. If instead of an ip_addr, the asterisk (*) is
+// specified, this function will return 'localhost' as an address.
+func parseInetSpec(config, excerpt string) (address string, port int64, key string) {
+ // This pattern is build up like this:
+ // - inet\s+ - inet
+ // - (\S+\s*\S*\s*\d*)\s+ - ( ip_addr | * ) [ port ip_port ]
+ // - allow\s* - allow
+ // - \{(?:\s*\S+\s*;\s*)+)\} - address_match_list
+ // - (.*); - keys { key_list }; (pattern matched below)
+ ptrn := regexp.MustCompile(`(?s)inet\s+(\S+\s*\S*\s*\d*)\s+allow\s*\{(?:\s*\S+\s*;\s*)+\}(.*);`)
+ match := ptrn.FindStringSubmatch(excerpt)
+ if len(match) == 0 {
+ log.Warnf("cannot parse BIND 9 inet configuration: no match (%+v)", config)
+ return "", 0, ""
+ }
+
+ inetSpec := regexp.MustCompile(`\s+`).Split(match[1], 3)
+ switch len(inetSpec) {
+ case 1:
+ address = inetSpec[0]
+ case 3:
+ address = inetSpec[0]
+ if inetSpec[1] != "port" {
+ log.Warnf("cannot parse BIND 9 control port: bad port statement (%+v)", inetSpec)
+ return "", 0, ""
+ }
+
+ iPort, err := strconv.Atoi(inetSpec[2])
+ if err != nil {
+ log.Warnf("cannot parse BIND 9 control port: %+v (%+v)", inetSpec, err)
+ return "", 0, ""
+ }
+ port = int64(iPort)
+ case 2:
+ default:
+ log.Warnf("cannot parse BIND 9 inet_spec configuration: no match (%+v)", inetSpec)
+ return "", 0, ""
+ }
+
+ if len(match) == 3 {
+ // Find a key clause. This pattern is build up like this:
+ // keys\s* - keys
+ // \{\s* - {
+ // \"(\S+)\"\s*; - key_list (first)
+ // (?:\s*\"\S+\"\s*;\s*)* - key_list (remainder)
+ // \s}\s* - }
+ ptrn = regexp.MustCompile(`(?s)keys\s*\{\s*\"(\S+)\"\s*;(?:\s*\"\S+\"\s*;\s*)*\}\s*`)
+ keyName := ptrn.FindStringSubmatch(match[2])
+ if len(keyName) > 1 {
+ key = getRndcKey(config, keyName[1])
+ }
+ }
+
+ if address == "*" {
+ address = "localhost"
+ }
+
+ return address, port, key
+}
+
+// getCtrlAddressFromBind9Config retrieves the rndc control access address,
+// port, and secret key (if configured) from the configuration `text`.
+//
+// Multiple controls clauses may be configured but currently this function
+// only matches the first one. Multiple access points may be listed inside
+// a single controls clause, but this function currently only matches the
+// first in the list. A controls clause may look like this:
+//
+// controls {
+// inet 127.0.0.1 allow {localhost;};
+// inet * port 7766 allow {"rndc-users";} keys {"rndc-remote";};
+// };
+//
+// In this example, "rndc-users" and "rndc-remote" refer to an acl and key
+// clauses.
+//
+// Finding the key is done by looking if the control access point has a
+// keys parameter and if so, it looks in `path` for a key clause with the
+// same name.
+func getCtrlAddressFromBind9Config(text string) (controlAddress string, controlPort int64, controlKey string) {
+ // Match the following clause:
+ // controls {
+ // inet inet_spec [inet_spec] ;
+ // };
+ ptrn := regexp.MustCompile(`(?s)controls\s*\{\s*(.*)\s*\}\s*;`)
+ controls := ptrn.FindStringSubmatch(text)
+ if len(controls) == 0 {
+ return "", 0, ""
+ }
+
+ // We only pick the first match, but the controls clause
+ // can list multiple control access points.
+ controlAddress, controlPort, controlKey = parseInetSpec(text, controls[1])
+ if controlAddress != "" {
+ // If no port was provided, use the default rndc port.
+ if controlPort == 0 {
+ controlPort = RndcDefaultPort
+ }
+ }
+ return controlAddress, controlPort, controlKey
+}
+
+// getStatisticsChannelFromBind9Config retrieves the statistics channel access
+// address, port, and secret key (if configured) from the configuration `text`.
+//
+// Multiple statistics-channels clauses may be configured but currently this
+// function only matches the first one. Multiple access points may be listed
+// inside a single controls clause, but this function currently only matches
+// the first in the list. A statistics-channels clause may look like this:
+//
+// statistics-channels {
+// inet 10.1.10.10 port 8080 allow { 192.168.2.10; 10.1.10.2; };
+// inet 127.0.0.1 port 8080 allow { "stats-clients" };
+// };
+//
+// In this example, "stats-clients" refers to an acl clause.
+//
+// Finding the key is done by looking if the control access point has a
+// keys parameter and if so, it looks in `path` for a key clause with the
+// same name.
+func getStatisticsChannelFromBind9Config(text string) (statsAddress string, statsPort int64, statsKey string) {
+ // Match the following clause:
+ // statistics-channels {
+ // inet inet_spec [inet_spec] ;
+ // };
+ ptrn := regexp.MustCompile(`(?s)statistics-channels\s*\{\s*(.*)\s*\}\s*;`)
+ channels := ptrn.FindStringSubmatch(text)
+ if len(channels) == 0 {
+ return "", 0, ""
+ }
+
+ // We only pick the first match, but the statistics-channels clause
+ // can list multiple control access points.
+ statsAddress, statsPort, statsKey = parseInetSpec(text, channels[1])
+ if statsAddress != "" {
+ // If no port was provided, use the default statschannel port.
+ if statsPort == 0 {
+ statsPort = StatsChannelDefaultPort
+ }
+ }
+ return statsAddress, statsPort, statsKey
+}
+
+// Determine executable using base named directory or system default paths.
+func determineBinPath(baseNamedDir, executable string) (string, error) {
+ // look for executable in base named directory and sbin or bin subdirectory
+ if baseNamedDir != "" {
+ for _, binDir := range []string{"sbin", "bin"} {
+ fullPath := path.Join(baseNamedDir, binDir, executable)
+ if _, err := os.Stat(fullPath); err == nil {
+ return fullPath, nil
+ }
+ }
+ }
+
+ // not found so try to find generally in the system
+ fullPath, err := exec.LookPath(executable)
+ if err != nil {
+ return "", errors.Errorf("cannot determine location of %s", executable)
+ }
+ return fullPath, nil
+}
+
+// Get potential locations of named.conf.
+func getPotentialNamedConfLocations() []string {
+ return []string{
+ "/etc/bind/named.conf",
+ "/etc/opt/isc/isc-bind/named.conf",
+ "/etc/opt/isc/scls/isc-bind/named.conf",
+ "/usr/local/etc/namedb/named.conf",
+ }
+}
+
+func detectBind9App(match []string, cwd string, cmdr storkutil.Commander) App {
+ if len(match) < 3 {
+ log.Warnf("problem with parsing BIND 9 cmdline: %s", match[0])
+ return nil
+ }
+
+ // try to find bind9 config file(s)
+ namedDir := match[1]
+ bind9Params := match[2]
+ bind9ConfPath := ""
+
+ // look for config file in cmd params
+ paramsPtrn := regexp.MustCompile(`-c\s+(\S+)`)
+ m := paramsPtrn.FindStringSubmatch(bind9Params)
+ if m != nil {
+ bind9ConfPath = m[1]
+ // if path to config is not absolute then join it with CWD of named
+ if !strings.HasPrefix(bind9ConfPath, "/") {
+ bind9ConfPath = path.Join(cwd, bind9ConfPath)
+ }
+ } else {
+ // config path not found in cmdline params so try to guess its location
+ for _, f := range getPotentialNamedConfLocations() {
+ if _, err := os.Stat(f); err == nil {
+ bind9ConfPath = f
+ break
+ }
+ }
+ }
+
+ // no config file so nothing to do
+ if bind9ConfPath == "" {
+ log.Warnf("cannot find config file for BIND 9")
+ return nil
+ }
+
+ // determine config directory
+ bind9ConfDir := path.Dir(bind9ConfPath)
+
+ // determine base named directory
+ baseNamedDir := ""
+ if namedDir != "" {
+ // remove sbin or bin at the end
+ baseNamedDir, _ = filepath.Split(strings.TrimRight(namedDir, "/"))
+ }
+
+ // run named-checkconf on main config file and get preprocessed content of whole config
+ namedCheckconfPath, err := determineBinPath(baseNamedDir, namedCheckconfExec)
+ if err != nil {
+ log.Warnf("cannot find BIND 9 %s: %s", namedCheckconfExec, err)
+ return nil
+ }
+ out, err := cmdr.Output(namedCheckconfPath, "-p", bind9ConfPath)
+ if err != nil {
+ log.Warnf("cannot parse BIND 9 config file %s: %+v; %s", bind9ConfPath, err, out)
+ return nil
+ }
+ cfgText := string(out)
+
+ // look for control address in config
+ ctrlAddress, ctrlPort, ctrlKey := getCtrlAddressFromBind9Config(cfgText)
+ if ctrlPort == 0 || len(ctrlAddress) == 0 {
+ log.Warnf("found BIND 9 config file (%s) but cannot parse controls clause", bind9ConfPath)
+ return nil
+ }
+ accessPoints := []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: ctrlAddress,
+ Port: ctrlPort,
+ },
+ }
+
+ // look for statistics channel address in config
+ address, port, key := getStatisticsChannelFromBind9Config(cfgText)
+ if port > 0 && len(address) != 0 {
+ accessPoints = append(accessPoints, AccessPoint{
+ Type: AccessPointStatistics,
+ Address: address,
+ Port: port,
+ Key: key,
+ })
+ } else {
+ log.Warnf("cannot parse BIND 9 statistics-channels clause")
+ }
+
+ // rndc is the command to interface with BIND 9.
+ rndc := func(command []string) ([]byte, error) {
+ cmd := exec.Command(command[0], command[1:]...) //nolint:gosec
+ return cmd.Output()
+ }
+
+ // determine rndc details
+ rndcClient := NewRndcClient(rndc)
+ err = rndcClient.DetermineDetails(baseNamedDir, bind9ConfDir, ctrlAddress, ctrlPort, ctrlKey)
+ if err != nil {
+ log.Warnf("cannot determine BIND 9 rndc details: %s", err)
+ return nil
+ }
+
+ // prepare final BIND 9 app
+ bind9App := &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ RndcClient: rndcClient,
+ }
+
+ return bind9App
+}
+
+// Send a command to named using rndc client.
+func (ba *Bind9App) sendCommand(command []string) (output []byte, err error) {
+ return ba.RndcClient.SendCommand(command)
+}
diff --git a/backend/agent/bind9_test.go b/backend/agent/bind9_test.go
new file mode 100644
index 0000000..bceae4a
--- /dev/null
+++ b/backend/agent/bind9_test.go
@@ -0,0 +1,23 @@
+package agent
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Test the function which extracts the list of log files from the Bind9
+// application by sending the request to the Kea Control Agent and the
+// daemons behind it.
+func TestBind9AllowedLogs(t *testing.T) {
+ ba := &Bind9App{}
+ paths, err := ba.DetectAllowedLogs()
+ require.NoError(t, err)
+ require.Len(t, paths, 0)
+}
+
+// Check if getPotentialNamedConfLocations returns paths.
+func TestGetPotentialNamedConfLocations(t *testing.T) {
+ paths := getPotentialNamedConfLocations()
+ require.Greater(t, len(paths), 1)
+}
diff --git a/backend/agent/caclient.go b/backend/agent/caclient.go
new file mode 100644
index 0000000..6cb7544
--- /dev/null
+++ b/backend/agent/caclient.go
@@ -0,0 +1,131 @@
+package agent
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+)
+
+// CredentialsFile path to a file holding credentials used in basic authentication of the agent in Kea.
+// It is being modified by tests so needs to be writable.
+var CredentialsFile = "/etc/stork/agent-credentials.json" // nolint:gochecknoglobals,gosec
+
+// HTTPClient is a normal http client.
+type HTTPClient struct {
+ client *http.Client
+ credentials *CredentialsStore
+}
+
+// Create a client to contact with Kea Control Agent or named statistics-channel.
+// If @skipTLSVerification is true then it doesn't verify the server credentials
+// over HTTPS. It may be useful when Kea uses a self-signed certificate.
+func NewHTTPClient(skipTLSVerification bool) *HTTPClient {
+ // Kea only supports HTTP/1.1. By default, the client here would use HTTP/2.
+ // The instance of the client which is created here disables HTTP/2 and should
+ // be used whenever the communication with the Kea servers is required.
+ // append the client certificates from the CA
+ tlsConfig := tls.Config{
+ InsecureSkipVerify: skipTLSVerification, //nolint:gosec
+ }
+
+ certPool, certificates, err := readTLSCredentials()
+ if err == nil {
+ tlsConfig.RootCAs = certPool
+ tlsConfig.Certificates = certificates
+ } else {
+ log.Warnf("cannot read TLS credentials, use HTTP protocol, %+v", err)
+ }
+
+ httpTransport := &http.Transport{
+ // Creating empty, non-nil map here disables the HTTP/2.
+ TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper),
+ TLSClientConfig: &tlsConfig,
+ }
+
+ httpClient := &http.Client{
+ Transport: httpTransport,
+ }
+
+ credentialsStore := NewCredentialsStore()
+ // Check if the credential file exist
+ if _, err := os.Stat(CredentialsFile); err == nil {
+ file, err := os.Open(CredentialsFile)
+ if err == nil {
+ defer file.Close()
+ err = credentialsStore.Read(file)
+ err = errors.WithMessagef(err, "cannot read the credentials file (%s)", CredentialsFile)
+ }
+ if err == nil {
+ log.Infof("configured to use the Basic Auth credentials from file (%s)", CredentialsFile)
+ } else {
+ log.Warnf("cannot read the Basic Auth credentials from file (%s), %+v", CredentialsFile, err)
+ }
+ } else {
+ log.Infof("the Basic Auth credentials file (%s) is missing - HTTP authentication is not used", CredentialsFile)
+ }
+
+ client := &HTTPClient{
+ client: httpClient,
+ credentials: credentialsStore,
+ }
+
+ return client
+}
+
+func (c *HTTPClient) Call(url string, payload io.Reader) (*http.Response, error) {
+ req, err := http.NewRequestWithContext(context.Background(), http.MethodPost, url, payload)
+ if err != nil {
+ err = errors.Wrapf(err, "problem creating POST request to %s", url)
+
+ return nil, err
+ }
+ req.Header.Add("Content-Type", "application/json")
+
+ if basicAuth, ok := c.credentials.GetBasicAuthByURL(url); ok {
+ secret := fmt.Sprintf("%s:%s", basicAuth.User, basicAuth.Password)
+ encodedSecret := base64.StdEncoding.EncodeToString([]byte(secret))
+ headerContent := fmt.Sprintf("Basic %s", encodedSecret)
+ req.Header.Add("Authorization", headerContent)
+ }
+
+ rsp, err := c.client.Do(req)
+ if err != nil {
+ err = errors.Wrapf(err, "problem sending POST to %s", url)
+ }
+ return rsp, err
+}
+
+// TLS support - inspired by https://sirsean.medium.com/mutually-authenticated-tls-from-a-go-client-92a117e605a1
+func readTLSCredentials() (*x509.CertPool, []tls.Certificate, error) {
+ // Certificates
+ certPool := x509.NewCertPool()
+ ca, err := os.ReadFile(RootCAFile)
+ if err != nil {
+ err = errors.Wrapf(err, "could not read CA certificate: %s", RootCAFile)
+ log.Errorf("%+v", err)
+ return nil, nil, err
+ }
+
+ if ok := certPool.AppendCertsFromPEM(ca); !ok {
+ err = errors.New("failed to append client certs")
+ log.Errorf("%+v", err)
+ return nil, nil, err
+ }
+
+ certificate, err := tls.LoadX509KeyPair(CertPEMFile, KeyPEMFile)
+ if err != nil {
+ err = errors.Wrapf(err, "could not setup TLS key pair")
+ log.Errorf("%+v", err)
+ return nil, nil, err
+ }
+
+ return certPool, []tls.Certificate{certificate}, nil
+}
diff --git a/backend/agent/caclient_test.go b/backend/agent/caclient_test.go
new file mode 100644
index 0000000..2f11c4e
--- /dev/null
+++ b/backend/agent/caclient_test.go
@@ -0,0 +1,170 @@
+package agent
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ storkutil "isc.org/stork/util"
+)
+
+// Check that HTTP client sets the TLS credentials if available.
+func TestCreateHTTPClientWithClientCerts(t *testing.T) {
+ cleanup, err := GenerateSelfSignedCerts()
+ require.NoError(t, err)
+ defer cleanup()
+
+ client := NewHTTPClient(false)
+ require.NotNil(t, client)
+
+ transport := client.client.Transport.(*http.Transport)
+ require.NotNil(t, transport)
+ require.NotNil(t, transport.TLSClientConfig)
+
+ transportConfig := transport.TLSClientConfig
+ require.False(t, transportConfig.InsecureSkipVerify)
+
+ require.NotNil(t, transportConfig.RootCAs)
+ require.NotNil(t, transportConfig.Certificates)
+
+ require.NotNil(t, client.credentials)
+}
+
+// Check that HTTP client doesn't set the TLS credentials if missing
+// (for example in the unit tests).
+func TestCreateHTTPClientWithoutClientCerts(t *testing.T) {
+ cleanup := RememberPaths()
+ defer cleanup()
+
+ KeyPEMFile = "/not/exists/path"
+ CertPEMFile = "/not/exists/path"
+ RootCAFile = "/not/exists/path"
+ AgentTokenFile = "/not/exists/path"
+
+ client := NewHTTPClient(false)
+ require.NotNil(t, client)
+
+ transport := client.client.Transport.(*http.Transport)
+ require.NotNil(t, transport)
+ require.NotNil(t, transport.TLSClientConfig)
+
+ transportConfig := transport.TLSClientConfig
+ require.False(t, transportConfig.InsecureSkipVerify)
+
+ require.Nil(t, transportConfig.RootCAs)
+ require.Nil(t, transportConfig.Certificates)
+}
+
+// Check that HTTP client may be set to skip a server
+// credentials validation.
+func TestCreateHTTPClientSkipVerification(t *testing.T) {
+ client := NewHTTPClient(true)
+ require.NotNil(t, client)
+
+ transport := client.client.Transport.(*http.Transport)
+ require.NotNil(t, transport)
+ require.NotNil(t, transport.TLSClientConfig)
+
+ transportConfig := transport.TLSClientConfig
+ require.True(t, transportConfig.InsecureSkipVerify)
+}
+
+// Test that an authorization header is added to the HTTP request
+// when the credentials file contains the credentials for specific
+// network location.
+func TestAddAuthorizationHeaderWhenBasicAuthCredentialsExist(t *testing.T) {
+ restorePaths := RememberPaths()
+ defer restorePaths()
+
+ // Create temp dir
+ tmpDir, err := os.MkdirTemp("", "reg")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ // Prepare test server
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ headerContent := r.Header.Get("Authorization")
+ require.NotEmpty(t, headerContent)
+ require.True(t, strings.HasPrefix(headerContent, "Basic "))
+ secret := strings.TrimPrefix(headerContent, "Basic ")
+ rawCredentials, err := base64.StdEncoding.DecodeString(secret)
+ require.NoError(t, err)
+ parts := strings.Split(string(rawCredentials), ":")
+ require.Len(t, parts, 2)
+ user := parts[0]
+ password := parts[1]
+ require.EqualValues(t, "foo", user)
+ require.EqualValues(t, "bar", password)
+ }))
+ defer ts.Close()
+
+ serverURL := ts.URL
+ serverIP, serverPort, _ := storkutil.ParseURL(serverURL)
+
+ // Create credentials file
+ CredentialsFile = path.Join(tmpDir, "credentials.json")
+ content := fmt.Sprintf(`{
+ "basic_auth": [
+ {
+ "ip": "%s",
+ "port": %d,
+ "user": "foo",
+ "password": "bar"
+ }
+ ]
+ }`, serverIP, serverPort)
+ err = os.WriteFile(CredentialsFile, []byte(content), 0o600)
+ require.NoError(t, err)
+
+ // Create HTTP Client
+ client := NewHTTPClient(true)
+ require.NotNil(t, client.credentials)
+
+ res, err := client.Call(ts.URL, bytes.NewBuffer([]byte{}))
+ require.NoError(t, err)
+ defer res.Body.Close()
+}
+
+// Test that an authorization header isn't added to the HTTP request
+// when the credentials file doesn't exist.
+func TestAddAuthorizationHeaderWhenBasicAuthCredentialsNonExist(t *testing.T) {
+ restorePaths := RememberPaths()
+ defer restorePaths()
+ CredentialsFile = path.Join("/path/that/not/exists.json")
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ headerContent := r.Header.Get("Authorization")
+ require.Empty(t, headerContent)
+ }))
+ defer ts.Close()
+
+ client := NewHTTPClient(true)
+ require.NotNil(t, client.credentials)
+
+ res, err := client.Call(ts.URL, bytes.NewBuffer([]byte{}))
+ require.NoError(t, err)
+ defer res.Body.Close()
+}
+
+// Test that missing body in request is accepted.
+func TestCallWithMissingBody(t *testing.T) {
+ restorePaths := RememberPaths()
+ defer restorePaths()
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ require.EqualValues(t, http.NoBody, r.Body)
+ }))
+ defer ts.Close()
+
+ client := NewHTTPClient(false)
+ res, err := client.Call(ts.URL, nil)
+ require.NoError(t, err)
+ defer res.Body.Close()
+}
diff --git a/backend/agent/credentialsstore.go b/backend/agent/credentialsstore.go
new file mode 100644
index 0000000..c3d18c8
--- /dev/null
+++ b/backend/agent/credentialsstore.go
@@ -0,0 +1,159 @@
+package agent
+
+// Store the agent credentials to Kea CA.
+// The data are read from a dedicated JSON file.
+//
+// The file structure is flexible to allow for future extensions,
+// for example:
+//
+// - Credentials may be assigned per network instead of IP/port.
+// - Store may contain different kinds of credentials
+
+import (
+ "encoding/json"
+ "io"
+
+ "github.com/pkg/errors"
+ storkutil "isc.org/stork/util"
+)
+
+// Kea CA location in the network. It is a key of the credentials store.
+// It is the internal structure of the credentials store.
+type location struct {
+ IP string
+ Port int64
+}
+
+// Basic authentication credentials.
+type BasicAuthCredentials struct {
+ User string
+ Password string
+}
+
+// Credentials store with an API to add/update/delete the content.
+type CredentialsStore struct {
+ basicAuthCredentials map[location]*BasicAuthCredentials
+}
+
+// Structure of the credentials JSON file.
+type CredentialsStoreContent struct {
+ BasicAuth []CredentialsStoreContentBasicAuthEntry `json:"basic_auth"`
+}
+
+// Single Basic Auth item of the credentials JSON file.
+type CredentialsStoreContentBasicAuthEntry struct {
+ IP *string
+ Port *int64
+ User *string
+ Password *string
+}
+
+// Constructor of the credentials store.
+func NewCredentialsStore() *CredentialsStore {
+ return &CredentialsStore{
+ basicAuthCredentials: make(map[location]*BasicAuthCredentials),
+ }
+}
+
+// Constructor of the Basic Auth credentials.
+func NewBasicAuthCredentials(user, password string) *BasicAuthCredentials {
+ return &BasicAuthCredentials{
+ User: user,
+ Password: password,
+ }
+}
+
+// Get Basic Auth credentials by URL
+// The Basic Auth is often used during HTTP calls. It is helper function
+// to retrieve the credentials based on the request URL. The URL contains
+// a protocol, URL segments and the query parameters.
+func (cs *CredentialsStore) GetBasicAuthByURL(url string) (*BasicAuthCredentials, bool) {
+ address, port, _ := storkutil.ParseURL(url)
+ return cs.GetBasicAuth(address, port)
+}
+
+// Get Basic Auth credentials by the network location (IP address and port).
+func (cs *CredentialsStore) GetBasicAuth(address string, port int64) (*BasicAuthCredentials, bool) {
+ location, err := newLocation(address, port)
+ if err != nil {
+ return nil, false
+ }
+ item, ok := cs.basicAuthCredentials[location]
+ return item, ok
+}
+
+// Add or update the Basic Auth credentials by the network location (IP address and port).
+// If the credentials already exist in the store then they will be override.
+func (cs *CredentialsStore) AddOrUpdateBasicAuth(address string, port int64, credentials *BasicAuthCredentials) error {
+ location, err := newLocation(address, port)
+ if err != nil {
+ return err
+ }
+ cs.basicAuthCredentials[location] = credentials
+ return nil
+}
+
+// Remove the Basic Auth credentials by the network location (IP address and port).
+// If the credentials don't exist then this function does nothing.
+func (cs *CredentialsStore) RemoveBasicAuth(address string, port int64) {
+ location, err := newLocation(address, port)
+ if err != nil {
+ return
+ }
+ delete(cs.basicAuthCredentials, location)
+}
+
+// Read the credentials store content from reader.
+// The file may contain IP addresses in the different forms,
+// they will be converted to canonical forms.
+func (cs *CredentialsStore) Read(reader io.Reader) error {
+ rawContent, err := io.ReadAll(reader)
+ if err != nil {
+ return errors.Wrap(err, "Cannot read the credentials")
+ }
+ var content CredentialsStoreContent
+ err = json.Unmarshal(rawContent, &content)
+ if err != nil {
+ return errors.Wrap(err, "Cannot parse the credentials")
+ }
+ return cs.loadContent(&content)
+}
+
+// Constructor of the network location (IP address and port).
+func newLocation(address string, port int64) (location, error) {
+ ip := storkutil.ParseIP(address)
+ if ip == nil {
+ return location{}, errors.Errorf("invalid IP address: %s", address)
+ }
+
+ return location{
+ IP: ip.NetworkAddress,
+ Port: port,
+ }, nil
+}
+
+// Load the content from JSON file to the credentials store.
+func (cs *CredentialsStore) loadContent(content *CredentialsStoreContent) error {
+ for _, entry := range content.BasicAuth {
+ // Check required fields
+ if entry.IP == nil {
+ return errors.New("missing IP address")
+ }
+ if entry.Port == nil {
+ return errors.New("missing port")
+ }
+ if entry.User == nil {
+ return errors.New("missing user")
+ }
+ if entry.Password == nil {
+ return errors.New("missing password")
+ }
+
+ credentials := NewBasicAuthCredentials(*entry.User, *entry.Password)
+ err := cs.AddOrUpdateBasicAuth(*entry.IP, *entry.Port, credentials)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/backend/agent/credentialsstore_test.go b/backend/agent/credentialsstore_test.go
new file mode 100644
index 0000000..af85cba
--- /dev/null
+++ b/backend/agent/credentialsstore_test.go
@@ -0,0 +1,327 @@
+package agent
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Test that the credentials store is constructed correctly.
+func TestCreateStore(t *testing.T) {
+ store := NewCredentialsStore()
+ require.NotNil(t, store)
+ require.Len(t, store.basicAuthCredentials, 0)
+}
+
+// Test that the Basic Auth credentials are constructed correctly.
+func TestCreateBasicAuthCredentials(t *testing.T) {
+ credentials := NewBasicAuthCredentials("foo", "bar")
+ require.NotNil(t, credentials)
+ require.EqualValues(t, "foo", credentials.User)
+ require.EqualValues(t, "bar", credentials.Password)
+}
+
+// Test that the Basic Auth credentials are added to store correctly.
+func TestAddBasicAuthCredentials(t *testing.T) {
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("foo", "bar")
+ err := store.AddOrUpdateBasicAuth("127.0.0.1", 1, credentials)
+ require.NoError(t, err)
+ fetchedCredentials, ok := store.GetBasicAuth("127.0.0.1", 1)
+ require.True(t, ok)
+ require.NotNil(t, fetchedCredentials)
+ require.EqualValues(t, "foo", fetchedCredentials.User)
+ require.EqualValues(t, "bar", fetchedCredentials.Password)
+}
+
+// Test that the store accepts only valid IP addresses.
+func TestAddBasicAuthCredentialsInvalidIPs(t *testing.T) {
+ ipAddresses := []string{
+ "",
+ "foo",
+ "ZZ:ZZ::",
+ "0",
+ ":",
+ ".",
+ "19216801",
+ "192..168.0.1",
+ "FF:::FF:FF::",
+ "FF:FF:FFFFFF::",
+ "-192.168.0.1",
+ }
+
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("foo", "bar")
+
+ for _, ip := range ipAddresses {
+ err := store.AddOrUpdateBasicAuth(ip, 1, credentials)
+ require.Error(t, err, "IP: %s", ip)
+ }
+}
+
+// Test that the empty Basic Auth credentials (without user and password)
+// are added to store correctly.
+func TestAddBasicAuthEmptyCredentials(t *testing.T) {
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("", "")
+ err := store.AddOrUpdateBasicAuth("127.0.0.1", 1, credentials)
+ require.NoError(t, err)
+ fetchedCredentials, ok := store.GetBasicAuth("127.0.0.1", 1)
+ require.True(t, ok)
+ require.NotNil(t, fetchedCredentials)
+ require.Empty(t, fetchedCredentials.User)
+ require.Empty(t, fetchedCredentials.Password)
+}
+
+// Test that the Basic Auth credentials are updated correctly.
+func TestUpdateBasicAuthCredentials(t *testing.T) {
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("foo", "bar")
+ err := store.AddOrUpdateBasicAuth("127.0.0.1", 1, credentials)
+ require.NoError(t, err)
+ credentials = NewBasicAuthCredentials("oof", "rab")
+ err = store.AddOrUpdateBasicAuth("127.0.0.1", 1, credentials)
+ require.NoError(t, err)
+ fetchedCredentials, ok := store.GetBasicAuth("127.0.0.1", 1)
+ require.True(t, ok)
+ require.NotNil(t, fetchedCredentials)
+ require.EqualValues(t, "oof", fetchedCredentials.User)
+ require.EqualValues(t, "rab", fetchedCredentials.Password)
+}
+
+// Test that the Basic Auth credentials are deleted correctly.
+func TestDeleteBasicAuthCredentials(t *testing.T) {
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("foo", "bar")
+ err := store.AddOrUpdateBasicAuth("127.0.0.1", 1, credentials)
+ require.NoError(t, err)
+ store.RemoveBasicAuth("127.0.0.1", 1)
+ fetchedCredentials, ok := store.GetBasicAuth("127.0.0.1", 1)
+ require.False(t, ok)
+ require.Nil(t, fetchedCredentials)
+}
+
+// Test fetching non-existing Basic Auth credentials. It should
+// return nil and proper (falsy) status.
+func TestGetMissingBasicAuthCredentials(t *testing.T) {
+ store := NewCredentialsStore()
+ fetchedCredentials, ok := store.GetBasicAuth("127.0.0.1", 1)
+ require.False(t, ok)
+ require.Nil(t, fetchedCredentials)
+}
+
+// Get the Basic Auth credentials by URL.
+func TestGetBasicAuthCredentialsByURL(t *testing.T) {
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("foo", "bar")
+ err := store.AddOrUpdateBasicAuth("127.0.0.1", 1, credentials)
+ require.NoError(t, err)
+
+ validURLs := []string{
+ "http://127.0.0.1:1",
+ "https://127.0.0.1:1",
+ "http://127.0.0.1:1/",
+ "http://127.0.0.1:1?query=param",
+ "http://127.0.0.1:1/segment",
+ }
+ invalidURLs := []string{
+ "http://baz:1",
+ "http://foo:1",
+ "http://127.0.0.1:2",
+ "http://:1",
+ "http://127.0.0.1",
+ "",
+ "127.0.0.1",
+ "1",
+ "protocol://127.0.0.1:1",
+ "127.0.0.1:1",
+ }
+
+ for _, url := range validURLs {
+ fetchedCredentials, ok := store.GetBasicAuthByURL(url)
+ require.True(t, ok, "URL: %s", url)
+ require.NotNil(t, fetchedCredentials)
+ require.EqualValues(t, "foo", fetchedCredentials.User)
+ require.EqualValues(t, "bar", fetchedCredentials.Password)
+ }
+
+ for _, url := range invalidURLs {
+ fetchedCredentials, ok := store.GetBasicAuthByURL(url)
+ require.False(t, ok)
+ require.Nil(t, fetchedCredentials)
+ }
+}
+
+// Test read the store from the proper JSON content.
+func TestReadStoreFromProperContent(t *testing.T) {
+ store := NewCredentialsStore()
+ content := strings.NewReader(`{
+ "basic_auth": [
+ {
+ "ip": "192.168.0.1",
+ "port": 1234,
+ "user": "foo",
+ "password": "bar"
+ }
+ ]
+ }`)
+
+ err := store.Read(content)
+ require.NoError(t, err)
+ credentials, ok := store.GetBasicAuth("192.168.0.1", 1234)
+ require.True(t, ok)
+ require.NotNil(t, credentials)
+ require.EqualValues(t, "foo", credentials.User)
+ require.EqualValues(t, "bar", credentials.Password)
+}
+
+// IP addresses may be written by humans in some different forms.
+// They may be defined using any or mixed letter case.
+// The credentials store should normalize all differences.
+func TestReadStoreFromFileWithAbbreviations(t *testing.T) {
+ store := NewCredentialsStore()
+ content := strings.NewReader(`{
+ "basic_auth": [
+ {
+ "ip": "127.0.0.1",
+ "port": 1,
+ "user": "a",
+ "password": "aa"
+ },
+ {
+ "ip": "::1",
+ "port": 2,
+ "user": "b",
+ "password": "bb"
+ },
+ {
+ "ip": "2001:db8:0000::",
+ "port": 3,
+ "user": "c",
+ "password": "cc"
+ },
+ {
+ "ip": "::1234:5678:91.123.4.56",
+ "port": 4,
+ "user": "d",
+ "password": "dd"
+ },
+ {
+ "ip": "2001:0000:0000:0000:0000:0000:0000:FFFF",
+ "port": 5,
+ "user": "e",
+ "password": "ee"
+ }
+ ]
+ }`)
+
+ err := store.Read(content)
+ require.NoError(t, err)
+
+ addresses := []string{
+ "127.0.0.1",
+ "::1",
+ "2001:db8::",
+ "::1234:5678:5b7b:438",
+ "2001::ffff",
+ }
+
+ for idx, address := range addresses {
+ port := idx + 1
+ expectedUser := string(rune('a' + idx))
+ expectedPassword := expectedUser + expectedUser
+ credentials, ok := store.GetBasicAuth(address, int64(port))
+ require.True(t, ok, "Address: %s", address)
+ require.NotNil(t, credentials)
+ require.EqualValues(t, expectedUser, credentials.User)
+ require.EqualValues(t, expectedPassword, credentials.Password)
+ }
+}
+
+// Test abbreviation normalization.
+func TestAbbreviationNormalization(t *testing.T) {
+ store := NewCredentialsStore()
+ credentials := NewBasicAuthCredentials("foo", "bar")
+ err := store.AddOrUpdateBasicAuth("FF:FF:0000:0000::", 42, credentials)
+ require.NoError(t, err)
+ credentials2, ok := store.GetBasicAuth("FF:FF::", 42)
+ require.True(t, ok)
+ require.EqualValues(t, credentials, credentials2)
+ store.RemoveBasicAuth("FF:FF:0000::", 42)
+ credentials3, ok := store.GetBasicAuth("FF:FF::", 42)
+ require.False(t, ok)
+ require.Nil(t, credentials3)
+}
+
+// Test read the store from the invalid JSON content.
+func TestReadStoreFromInvalidContent(t *testing.T) {
+ store := NewCredentialsStore()
+
+ type unitData struct {
+ Name string
+ Content string
+ }
+
+ items := []unitData{
+ {
+ "Empty content", ``,
+ },
+ {
+ "Port is not a number",
+ `{
+ "basic_auth": [
+ {
+ "ip": "192.168.0.1",
+ "port": "1234",
+ "user": "foo",
+ "password": "bar"
+ }
+ ]
+ }`,
+ },
+ {
+ "Missing port",
+ `{
+ "basic_auth": [
+ {
+ "ip": "192.168.0.1",
+ "user": "foo",
+ "password": "bar"
+ }
+ ]
+ }`,
+ },
+ {
+ "Missing all fields",
+ `{
+ "basic_auth": [
+ { }
+ ]
+ }`,
+ },
+ {
+ "Missing key quotes",
+ `{
+ basic_auth: [
+ {
+ ip: "192.168.0.1",
+ port: 8000
+ user: "foo",
+ password: "bar"
+ }
+ ]
+ }`,
+ },
+ }
+
+ for _, item := range items {
+ testContent := item.Content
+ t.Run(item.Name, func(t *testing.T) {
+ reader := strings.NewReader(testContent)
+ err := store.Read(reader)
+ require.Error(t, err, "Content:", testContent)
+ require.Len(t, store.basicAuthCredentials, 0)
+ })
+ }
+}
diff --git a/backend/agent/kea.go b/backend/agent/kea.go
new file mode 100644
index 0000000..ad92ba8
--- /dev/null
+++ b/backend/agent/kea.go
@@ -0,0 +1,239 @@
+package agent
+
+import (
+ "bytes"
+ "io"
+ "path"
+ "strings"
+
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+
+ keaconfig "isc.org/stork/appcfg/kea"
+ keactrl "isc.org/stork/appctrl/kea"
+ storkutil "isc.org/stork/util"
+)
+
+// It holds common and Kea specifc runtime information.
+type KeaApp struct {
+ BaseApp
+ HTTPClient *HTTPClient // to communicate with Kea Control Agent
+}
+
+// Get base information about Kea app.
+func (ka *KeaApp) GetBaseApp() *BaseApp {
+ return &ka.BaseApp
+}
+
+// Sends a command to Kea and returns a response.
+func (ka *KeaApp) sendCommand(command *keactrl.Command, responses interface{}) error {
+ ap := &ka.BaseApp.AccessPoints[0]
+ caURL := storkutil.HostWithPortURL(ap.Address, ap.Port, ap.UseSecureProtocol)
+
+ // Get the textual representation of the command.
+ request := command.Marshal()
+
+ // Send the command to the Kea server.
+ response, err := ka.HTTPClient.Call(caURL, bytes.NewBuffer([]byte(request)))
+ if err != nil {
+ return errors.WithMessagef(err, "failed to send command to Kea: %s", caURL)
+ }
+
+ // Read the response.
+ body, err := io.ReadAll(response.Body)
+ response.Body.Close()
+ if err != nil {
+ return errors.WithMessagef(err, "failed to read Kea response body received from %s", caURL)
+ }
+
+ // Parse the response.
+ err = keactrl.UnmarshalResponseList(command, body, responses)
+ if err != nil {
+ return errors.WithMessagef(err, "failed to parse Kea response body received from %s", caURL)
+ }
+ return nil
+}
+
+// Collect the list of log files which can be viewed by the Stork user
+// from the UI. The response variable holds the pointer to the
+// response to the config-get command returned by one of the Kea
+// daemons. If this response contains loggers' configuration the log
+// files are extracted from it and returned. This function is intended
+// to be called by the functions which intercept config-get commands
+// sent periodically by the server to the agents and by the
+// DetectAllowedLogs when the agent is started.
+func collectKeaAllowedLogs(response *keactrl.Response) []string {
+ if response.Result > 0 {
+ log.Warn("Skipped refreshing viewable log files because config-get returned unsuccessful result")
+ return nil
+ }
+ if response.Arguments == nil {
+ log.Warn("Skipped refreshing viewable log files because config-get response has no arguments")
+ return nil
+ }
+ cfg := keaconfig.New(response.Arguments)
+ if cfg == nil {
+ log.Warn("Skipped refreshing viewable log files because config-get response contains arguments which could not be parsed")
+ return nil
+ }
+
+ loggers := cfg.GetLoggers()
+ if len(loggers) == 0 {
+ log.Info("No loggers found in the returned configuration while trying to refresh the viewable log files")
+ return nil
+ }
+
+ // Go over returned loggers and collect those found in the returned configuration.
+ var paths []string
+ for _, l := range loggers {
+ for _, o := range l.OutputOptions {
+ if o.Output != "stdout" && o.Output != "stderr" && !strings.HasPrefix(o.Output, "syslog") {
+ paths = append(paths, o.Output)
+ }
+ }
+ }
+ return paths
+}
+
+// Sends config-get command to all running Kea daemons belonging to the given Kea app
+// to fetch logging configuration. The first config-get command is sent to the Kea CA,
+// to fetch its logging configuration and to find the daemons running behind it. Next, the
+// config-get command is sent to the daemons behind CA and their logging configuration
+// is fetched. The log files locations are stored in the logTailer instance of the
+// agent as allowed for viewing. This function should be called when the agent has
+// been started and the running Kea apps have been detected.
+func (ka *KeaApp) DetectAllowedLogs() ([]string, error) {
+ // Prepare config-get command to be sent to Kea Control Agent.
+ command := keactrl.NewCommand("config-get", nil, nil)
+ // Send the command to Kea.
+ responses := keactrl.ResponseList{}
+ err := ka.sendCommand(command, &responses)
+ if err != nil {
+ return nil, err
+ }
+
+ ap := ka.BaseApp.AccessPoints[0]
+
+ // There should be exactly one response received because we sent the command
+ // to only one daemon.
+ if len(responses) != 1 {
+ return nil, errors.Errorf("invalid response received from Kea CA to config-get command sent to %s:%d", ap.Address, ap.Port)
+ }
+
+ // It does not make sense to proceed if the CA returned non-success status
+ // because this response neither contains logging configuration nor
+ // sockets configurations.
+ if responses[0].Result != 0 {
+ return nil, errors.Errorf("unsuccessful response %d received from Kea CA to config-get command sent to %s:%d", responses[0].Result, ap.Address, ap.Port)
+ }
+
+ // Allow the log files used by the CA.
+ paths := collectKeaAllowedLogs(&responses[0])
+
+ // Arguments should be returned in response to the config-get command.
+ rawConfig := responses[0].Arguments
+ if rawConfig == nil {
+ return nil, errors.Errorf("empty arguments received from Kea CA in response to config-get command sent to %s:%d", ap.Address, ap.Port)
+ }
+ // The returned configuration has unexpected structure.
+ config := keaconfig.New(rawConfig)
+ if config == nil {
+ return nil, errors.Errorf("unable to parse the config received from Kea CA in response to config-get command sent to %s:%d", ap.Address, ap.Port)
+ }
+
+ // Control Agent should be configured to forward commands to some
+ // daemons behind it.
+ sockets := config.GetControlSockets()
+ daemonNames := sockets.ConfiguredDaemonNames()
+
+ // Apparently, it isn't configured to forward commands to the daemons behind it.
+ if len(daemonNames) == 0 {
+ return nil, nil
+ }
+
+ // Prepare config-get command to be sent to the daemons behind CA.
+ command = keactrl.NewCommand("config-get", daemonNames, nil)
+
+ // Send config-get to the daemons behind CA.
+ responses = keactrl.ResponseList{}
+ err = ka.sendCommand(command, &responses)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that we got responses for all daemons.
+ if len(responses) != len(daemonNames) {
+ return nil, errors.Errorf("invalid number of responses received from daemons to config-get command sent via %s:%d", ap.Address, ap.Port)
+ }
+
+ // For each daemon try to extract its logging configuration and allow view
+ // the log files it contains.
+ for i := range responses {
+ paths = append(paths, collectKeaAllowedLogs(&responses[i])...)
+ }
+
+ return paths, nil
+}
+
+func getCtrlTargetFromKeaConfig(path string) (address string, port int64, useSecureProtocol bool) {
+ text, err := storkutil.ReadFileWithIncludes(path)
+ if err != nil {
+ log.Warnf("Cannot read Kea config file: %+v", err)
+ return
+ }
+
+ config, err := keaconfig.NewFromJSON(text)
+ if err != nil {
+ log.Warnf("Cannot parse Kea Control Agent config file: %+v", err)
+ return
+ }
+
+ // Port
+ port, ok := config.GetHTTPPort()
+ if !ok {
+ log.Warn("Cannot parse the port")
+ return
+ }
+
+ // Address
+ address, _ = config.GetHTTPHost()
+
+ // Secure protocol
+ useSecureProtocol = config.UseSecureProtocol()
+ return
+}
+
+func detectKeaApp(match []string, cwd string, httpClient *HTTPClient) App {
+ if len(match) < 3 {
+ log.Warnf("Problem parsing Kea cmdline: %s", match[0])
+ return nil
+ }
+ keaConfPath := match[2]
+
+ // if path to config is not absolute then join it with CWD of kea
+ if !strings.HasPrefix(keaConfPath, "/") {
+ keaConfPath = path.Join(cwd, keaConfPath)
+ }
+
+ address, port, useSecureProtocol := getCtrlTargetFromKeaConfig(keaConfPath)
+ if address == "" || port == 0 {
+ return nil
+ }
+ accessPoints := []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: address,
+ Port: port,
+ UseSecureProtocol: useSecureProtocol,
+ },
+ }
+ keaApp := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: accessPoints,
+ },
+ HTTPClient: httpClient,
+ }
+
+ return keaApp
+}
diff --git a/backend/agent/kea_test.go b/backend/agent/kea_test.go
new file mode 100644
index 0000000..5de84c8
--- /dev/null
+++ b/backend/agent/kea_test.go
@@ -0,0 +1,233 @@
+package agent
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "gopkg.in/h2non/gock.v1"
+ keactrl "isc.org/stork/appctrl/kea"
+)
+
+// Test the case that the command is successfully sent to Kea.
+func TestSendCommand(t *testing.T) {
+ httpClient := NewHTTPClient(false)
+ gock.InterceptClient(httpClient.client)
+
+ // Expect appropriate content type and the body. If they are not matched
+ // an error will be raised.
+ defer gock.Off()
+ gock.New("http://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ JSON(map[string]string{"command": "list-commands"}).
+ Post("/").
+ Reply(200).
+ JSON([]map[string]int{{"result": 0}})
+
+ command := keactrl.NewCommand("list-commands", nil, nil)
+
+ ka := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "localhost", "", 45634, false),
+ },
+ HTTPClient: httpClient,
+ }
+ responses := keactrl.ResponseList{}
+ err := ka.sendCommand(command, &responses)
+ require.NoError(t, err)
+
+ require.Len(t, responses, 1)
+}
+
+// Test the case when Kea returns invalid response to the command.
+func TestSendCommandInvalidResponse(t *testing.T) {
+ httpClient := NewHTTPClient(false)
+ gock.InterceptClient(httpClient.client)
+
+ // Return invalid response. Arguments must be a map not an integer.
+ defer gock.Off()
+ gock.New("http://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ JSON(map[string]string{"command": "list-commands"}).
+ Post("/").
+ Reply(200).
+ JSON([]map[string]int{{"result": 0, "arguments": 1}})
+
+ command := keactrl.NewCommand("list-commands", nil, nil)
+
+ ka := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "localhost", "", 45634, false),
+ },
+ HTTPClient: httpClient,
+ }
+ responses := keactrl.ResponseList{}
+ err := ka.sendCommand(command, &responses)
+ require.Error(t, err)
+}
+
+// Test the case when Kea server is unreachable.
+func TestSendCommandNoKea(t *testing.T) {
+ command := keactrl.NewCommand("list-commands", nil, nil)
+ ka := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "localhost", "", 45634, false),
+ },
+ HTTPClient: NewHTTPClient(false),
+ }
+ responses := keactrl.ResponseList{}
+ err := ka.sendCommand(command, &responses)
+ require.Error(t, err)
+}
+
+// Test the function which extracts the list of log files from the Kea
+// application by sending the request to the Kea Control Agent and the
+// daemons behind it.
+func TestKeaAllowedLogs(t *testing.T) {
+ httpClient := NewHTTPClient(false)
+ gock.InterceptClient(httpClient.client)
+
+ // The first config-get command should go to the Kea Control Agent.
+ // The logs should be extracted from there and the subsequent config-get
+ // commands should be sent to the daemons with which the CA is configured
+ // to communicate.
+ defer gock.Off()
+ caResponseJSON := `[{
+ "result": 0,
+ "arguments": {
+ "CtrlAgent": {
+ "control-sockets": {
+ "dhcp4": {
+ "socket-name": "/tmp/dhcp4.sock"
+ },
+ "dhcp6": {
+ "socket-name": "/tmp/dhcp6.sock"
+ }
+ },
+ "loggers": [
+ {
+ "output_options": [
+ {
+ "output": "/tmp/kea-ctrl-agent.log"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }]`
+ caResponse := make([]map[string]interface{}, 1)
+ err := json.Unmarshal([]byte(caResponseJSON), &caResponse)
+ require.NoError(t, err)
+ gock.New("https://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ JSON(map[string]string{"command": "config-get"}).
+ Post("/").
+ Reply(200).
+ JSON(caResponse)
+
+ dhcpResponsesJSON := `[
+ {
+ "result": 0,
+ "arguments": {
+ "Dhcp4": {
+ "loggers": [
+ {
+ "output_options": [
+ {
+ "output": "/tmp/kea-dhcp4.log"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "result": 0,
+ "arguments": {
+ "Dhcp6": {
+ "loggers": [
+ {
+ "output_options": [
+ {
+ "output": "/tmp/kea-dhcp6.log"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]`
+ dhcpResponses := make([]map[string]interface{}, 2)
+ err = json.Unmarshal([]byte(dhcpResponsesJSON), &dhcpResponses)
+ require.NoError(t, err)
+
+ // The config-get command sent to the daemons behind CA should return
+ // configurations of the DHCPv4 and DHCPv6 daemons.
+ gock.New("https://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ JSON(map[string]interface{}{"command": "config-get", "service": []string{"dhcp4", "dhcp6"}}).
+ Post("/").
+ Reply(200).
+ JSON(dhcpResponses)
+
+ ka := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "localhost", "", 45634, true),
+ },
+ HTTPClient: httpClient,
+ }
+ paths, err := ka.DetectAllowedLogs()
+ require.NoError(t, err)
+
+ // We should have three log files recorded from the returned configurations.
+ // One from CA, one from DHCPv4 and one from DHCPv6.
+ require.Len(t, paths, 3)
+}
+
+// This test verifies that an error is returned when the number of responses
+// from the Kea daemons is lower than the number of services specified in the
+// command.
+func TestKeaAllowedLogsFewerResponses(t *testing.T) {
+ httpClient := NewHTTPClient(false)
+ gock.InterceptClient(httpClient.client)
+
+ defer gock.Off()
+
+ // Return only one response while the number of daemons is two.
+ dhcpResponsesJSON := `[
+ {
+ "result": 0,
+ "arguments": {
+ "Dhcp4": {
+ }
+ }
+ }
+ ]`
+ dhcpResponses := make([]map[string]interface{}, 1)
+ err := json.Unmarshal([]byte(dhcpResponsesJSON), &dhcpResponses)
+ require.NoError(t, err)
+
+ gock.New("https://localhost:45634").
+ MatchHeader("Content-Type", "application/json").
+ JSON(map[string]interface{}{"command": "config-get", "service": []string{"dhcp4", "dhcp6"}}).
+ Post("/").
+ Reply(200).
+ JSON(dhcpResponses)
+
+ ka := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "localhost", "", 45634, true),
+ },
+ HTTPClient: httpClient,
+ }
+ _, err = ka.DetectAllowedLogs()
+ require.Error(t, err)
+}
diff --git a/backend/agent/keaintercept.go b/backend/agent/keaintercept.go
new file mode 100644
index 0000000..e452647
--- /dev/null
+++ b/backend/agent/keaintercept.go
@@ -0,0 +1,186 @@
+package agent
+
+import (
+ "sync"
+
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+
+ agentapi "isc.org/stork/api"
+ keactrl "isc.org/stork/appctrl/kea"
+)
+
+// Structure containing a pointer to the callback function registered in
+// in the Kea interceptor and associated with one of the Kea commands.
+// The callback is invoked when the given command is received by the
+// agent and after it is forwarded to Kea.
+type keaInterceptorHandler struct {
+ callback func(*StorkAgent, *keactrl.Response) error
+}
+
+// Structure holding a collection of handlers/callabacks to be invoked
+// for a given Kea command.
+type keaInterceptorTarget struct {
+ // List of callbacks to be invoked for a command.
+ handlers []*keaInterceptorHandler
+}
+
+// The Kea interceptor is a generic mechanism for dispatching calls to
+// the registered callback functions when agent forwards a given command
+// to the Kea server.
+type keaInterceptor struct {
+ mutex *sync.Mutex
+ // Holds a list of async callbacks to be invoked for a given command.
+ asyncTargets map[string]*keaInterceptorTarget
+ // Holds a list of sync callbacks to be invoked for a given command.
+ syncTargets map[string]*keaInterceptorTarget
+}
+
+// Creates new Kea interceptor instance.
+func newKeaInterceptor() *keaInterceptor {
+ interceptor := &keaInterceptor{
+ mutex: new(sync.Mutex),
+ }
+ interceptor.asyncTargets = make(map[string]*keaInterceptorTarget)
+ interceptor.syncTargets = make(map[string]*keaInterceptorTarget)
+ return interceptor
+}
+
+// Registers an asynchronous callback function and associates it with a given command.
+// It is possible to register multiple callbacks for the same command.
+func (i *keaInterceptor) registerAsync(callback func(*StorkAgent, *keactrl.Response) error, commandName string) {
+ var (
+ target *keaInterceptorTarget
+ ok bool
+ )
+
+ // Make sure we don't collide with asyncHandle calls.
+ i.mutex.Lock()
+ defer i.mutex.Unlock()
+
+ // Check if the target for the given command already exists.
+ target, ok = i.asyncTargets[commandName]
+ if !ok {
+ // This is the first time we register callback for this command.
+ // Let's create the target instance.
+ target = &keaInterceptorTarget{}
+ i.asyncTargets[commandName] = target
+ }
+ // Create the handler from the callback and associate it with the
+ // given target/command.
+ h := &keaInterceptorHandler{
+ callback: callback,
+ }
+ target.handlers = append(target.handlers, h)
+}
+
+// Registers a synchronous callback function and associates it with a given command.
+// It is possible to register multiple callbacks for the same command.
+func (i *keaInterceptor) registerSync(callback func(*StorkAgent, *keactrl.Response) error, commandName string) {
+ var (
+ target *keaInterceptorTarget
+ ok bool
+ )
+
+ // Check if the target for the given command already exists.
+ target, ok = i.syncTargets[commandName]
+ if !ok {
+ // This is the first time we register callback for this command.
+ // Let's create the target instance.
+ target = &keaInterceptorTarget{}
+ i.syncTargets[commandName] = target
+ }
+ // Create the handler from the callback and associate it with the
+ // given target/command.
+ h := &keaInterceptorHandler{
+ callback: callback,
+ }
+ target.handlers = append(target.handlers, h)
+}
+
+// Triggers invocation of all sync callbacks registered for the given command. The
+// callback is invoked separately for each daemon which responded to the command.
+// The result of the callbacks may to affect the response forwarded to the Stork Server.
+// Synchronous handler is executed before asynchronous one.
+func (i *keaInterceptor) syncHandle(agent *StorkAgent, request *agentapi.KeaRequest, response []byte) ([]byte, error) {
+ changedResponse, err := i.handle(i.syncTargets, agent, request, response)
+ err = errors.WithMessage(err, "Failed to execute synchronous handlers")
+ return changedResponse, err
+}
+
+// Triggers invocation of all async callbacks registered for the given command. The
+// callback is invoked separately for each daemon which responded to the command.
+// This function should be invoked in the goroutine as it invokes the handlers
+// which can be run independently from the agent. The agent may send back the
+// response to the server while these callbacks are invoked. The result of the
+// callbacks do not affect the response forwarded to the Stork Server.
+func (i *keaInterceptor) asyncHandle(agent *StorkAgent, request *agentapi.KeaRequest, response []byte) {
+ // We don't want to run the handlers concurrently in case they update the same
+ // data structures. Also, we want to avoid registration of handlers while we're
+ // here.
+ i.mutex.Lock()
+ defer i.mutex.Unlock()
+
+ _, err := i.handle(i.asyncTargets, agent, request, response)
+ if err != nil {
+ log.Errorf("Failed to execute asynchronous handler: %+v", err)
+ }
+}
+
+// Common part of asynchronous and synchronous handlers. Returns the serialized
+// response after modifications performed by callbacks or error.
+func (i *keaInterceptor) handle(targets map[string]*keaInterceptorTarget, agent *StorkAgent, request *agentapi.KeaRequest, response []byte) ([]byte, error) {
+ // Parse the request to get the command name and service.
+ command, err := keactrl.NewCommandFromJSON(request.Request)
+ if err != nil {
+ err = errors.WithMessage(err, "Failed to parse Kea command")
+ return nil, err
+ }
+
+ // Check if there is any handler registered for this command.
+ target, ok := targets[command.Command]
+ if !ok {
+ return response, nil
+ }
+
+ // Parse the response. It will be passed to the callback so as the callback
+ // can "do something" with it.
+ var parsedResponse keactrl.ResponseList
+ err = keactrl.UnmarshalResponseList(command, response, &parsedResponse)
+ if err != nil {
+ err = errors.WithMessagef(err, "Failed to parse Kea responses for command %s", command.Command)
+ return nil, err
+ }
+
+ // Check what daemons the callbacks need to be invoked for.
+ var daemons []string
+ if command.Daemons == nil || len(command.Daemons) == 0 {
+ daemons = append(daemons, "ca")
+ } else {
+ daemons = command.Daemons
+ }
+ // Invoke callbacks for each handler registered for this command.
+ for i := range target.handlers {
+ // Invoke the handler for each daemon.
+ for j := range daemons {
+ if j < len(parsedResponse) {
+ callback := target.handlers[i].callback
+ if callback != nil {
+ err = callback(agent, &parsedResponse[j])
+ if err != nil {
+ err = errors.WithMessagef(err, "Callback returned an error for command %s", command.Command)
+ return nil, err
+ }
+ }
+ }
+ }
+ }
+
+ // Serialize response after modifications.
+ response, err = keactrl.MarshalResponseList(parsedResponse)
+ if err != nil {
+ err = errors.WithMessagef(err, "Failed to marshal changed responses for command %s", command.Command)
+ return nil, err
+ }
+ return response, nil
+}
diff --git a/backend/agent/keaintercept_test.go b/backend/agent/keaintercept_test.go
new file mode 100644
index 0000000..bdf9ae3
--- /dev/null
+++ b/backend/agent/keaintercept_test.go
@@ -0,0 +1,324 @@
+package agent
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+ agentapi "isc.org/stork/api"
+ keactrl "isc.org/stork/appctrl/kea"
+)
+
+// Test that new instance of the Kea interceptor is created successfully.
+func TestNewKeaInterceptor(t *testing.T) {
+ interceptor := newKeaInterceptor()
+ require.NotNil(t, interceptor)
+ require.NotNil(t, interceptor.asyncTargets)
+ require.NotNil(t, interceptor.syncTargets)
+ require.Empty(t, interceptor.asyncTargets)
+}
+
+// Test that it is possible to register async callbacks to intercept selected
+// commands and that these callbacks are invoked when the commands are
+// received.
+func TestKeaInterceptorAsyncHandle(t *testing.T) {
+ interceptor := newKeaInterceptor()
+ require.NotNil(t, interceptor)
+
+ // Record which command was invoked.
+ var commandInvoked string
+ // Record responses.
+ var capturedResponses []*keactrl.Response
+
+ // Register callback to be invoked for config-get commands.
+ interceptor.registerAsync(func(agent *StorkAgent, resp *keactrl.Response) error {
+ commandInvoked = "config-get"
+ capturedResponses = append(capturedResponses, resp)
+ return nil
+ }, "config-get")
+
+ // Register callback to be invoked for the subnet4-get.
+ interceptor.registerAsync(func(agent *StorkAgent, resp *keactrl.Response) error {
+ commandInvoked = "subnet4-get"
+ capturedResponses = append(capturedResponses, resp)
+ return nil
+ }, "subnet4-get")
+
+ // Simulate sending config-get command to the DHCPv4 and DHCPv6
+ // server.
+ command := keactrl.NewCommand("config-get", []string{"dhcp4", "dhcp6"}, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+ response := []byte(`[
+ {
+ "result": 0,
+ "text": "invoked successfully"
+ },
+ {
+ "result": 1,
+ "text": "invoked unsuccessfully"
+ }
+ ]`)
+
+ // Invoke the registered callbacks for config-get.
+ interceptor.asyncHandle(nil, request, response)
+ require.Equal(t, "config-get", commandInvoked)
+ // There should be two responses recorded, one for the DHCPv4 and
+ // one for DHCPv6.
+ require.Len(t, capturedResponses, 2)
+ // Check that the callback received the response correctly.
+ require.Zero(t, capturedResponses[0].Result)
+ require.Equal(t, "invoked successfully", capturedResponses[0].Text)
+ require.Equal(t, "dhcp4", capturedResponses[0].Daemon)
+ require.EqualValues(t, 1, capturedResponses[1].Result)
+ require.Equal(t, "invoked unsuccessfully", capturedResponses[1].Text)
+ require.Equal(t, "dhcp6", capturedResponses[1].Daemon)
+
+ // Make sure that we can invoke different callback when using different
+ // command.
+ command = keactrl.NewCommand("subnet4-get", []string{"dhcp4"}, nil)
+ request = &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+ interceptor.asyncHandle(nil, request, response)
+ require.Equal(t, "subnet4-get", commandInvoked)
+}
+
+// Test that async intercepting commands sent to Kea Control Agent works.
+func TestKeaInterceptorAsyncHandleControlAgent(t *testing.T) {
+ interceptor := newKeaInterceptor()
+ require.NotNil(t, interceptor)
+
+ var capturedResponses []*keactrl.Response
+ interceptor.registerAsync(func(agent *StorkAgent, resp *keactrl.Response) error {
+ capturedResponses = append(capturedResponses, resp)
+ return nil
+ }, "config-get")
+
+ // Simulate sending command to the Control Agent.
+ command := keactrl.NewCommand("config-get", nil, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+ response := []byte(`[
+ {
+ "result": 1,
+ "text": "invocation error"
+ }
+ ]`)
+
+ // Invoke the callbacks and validate the data recorded by this
+ // callback.
+ interceptor.asyncHandle(nil, request, response)
+ require.Len(t, capturedResponses, 1)
+ require.EqualValues(t, 1, capturedResponses[0].Result)
+ require.Equal(t, "invocation error", capturedResponses[0].Text)
+ require.Empty(t, capturedResponses[0].Daemon)
+}
+
+// Test that it is possible to register multiple async handlers for a single
+// command.
+func TestKeaInterceptorMultipleAsyncHandlers(t *testing.T) {
+ interceptor := newKeaInterceptor()
+ require.NotNil(t, interceptor)
+
+ // Register first handler
+ func1Invoked := false
+ interceptor.registerAsync(func(agent *StorkAgent, resp *keactrl.Response) error {
+ func1Invoked = true
+ return nil
+ }, "config-get")
+
+ // Register second handler.
+ func2Invoked := false
+ interceptor.registerAsync(func(agent *StorkAgent, resp *keactrl.Response) error {
+ func2Invoked = true
+ return nil
+ }, "config-get")
+
+ // Send the command matching the handlers.
+ command := keactrl.NewCommand("config-get", nil, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+ response := []byte(`[
+ {
+ "result": 0,
+ "text": "fine"
+ }
+ ]`)
+
+ // Make sure that both handlers have been invoked.
+ interceptor.asyncHandle(nil, request, response)
+ require.True(t, func1Invoked)
+ require.True(t, func2Invoked)
+}
+
+// Test that it is possible to register sync callbacks to intercept selected
+// commands.
+func TestKeaInterceptorSyncHandleRegister(t *testing.T) {
+ // Arrange
+ interceptor := newKeaInterceptor()
+ callback := func(agent *StorkAgent, resp *keactrl.Response) error {
+ return nil
+ }
+
+ // Act
+ interceptor.registerSync(callback, "foobar")
+
+ // Assert
+ require.Len(t, interceptor.syncTargets["foobar"].handlers, 1)
+}
+
+// Test that the registered sync callbacks are invoked when the commands are
+// received.
+func TestKeaInterceptorSyncHandleExecute(t *testing.T) {
+ // Arrange
+ interceptor := newKeaInterceptor()
+ callCount := 0
+ interceptor.registerSync(func(sa *StorkAgent, r *keactrl.Response) error {
+ callCount++
+ return nil
+ }, "foobar")
+
+ command := keactrl.NewCommand("foobar", []string{"dhcp4"}, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+ inResponse := []byte(`[
+ {
+ "result": 0,
+ "text": "fine"
+ }
+ ]`)
+ var buffer bytes.Buffer
+ _ = json.Compact(&buffer, inResponse)
+ expectedOutResponse := buffer.Bytes()
+
+ // Act
+ outResponse, err := interceptor.syncHandle(nil, request, inResponse)
+
+ // Assert
+ require.NoError(t, err)
+ require.EqualValues(t, expectedOutResponse, outResponse)
+ require.EqualValues(t, 1, callCount)
+}
+
+// Test that the multiple registered sync callbacks are invoked sequentially
+// when the commands are received.
+func TestKeaInterceptorMultipleSyncHandlesExecute(t *testing.T) {
+ // Arrange
+ interceptor := newKeaInterceptor()
+ callCount := map[string]int64{
+ "foo": 0,
+ "bar": 0,
+ }
+ interceptor.registerSync(func(sa *StorkAgent, r *keactrl.Response) error {
+ callCount["foo"]++
+ return nil
+ }, "foobar")
+ interceptor.registerSync(func(sa *StorkAgent, r *keactrl.Response) error {
+ callCount["bar"]++
+ return nil
+ }, "foobar")
+
+ command := keactrl.NewCommand("foobar", []string{"dhcp4"}, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+ inResponse := []byte(`[
+ {
+ "result": 0,
+ "text": "fine"
+ }
+ ]`)
+ var buffer bytes.Buffer
+ _ = json.Compact(&buffer, inResponse)
+ expectedOutResponse := buffer.Bytes()
+
+ // Act
+ outResponse, err := interceptor.syncHandle(nil, request, inResponse)
+
+ // Assert
+ require.NoError(t, err)
+ require.EqualValues(t, expectedOutResponse, outResponse)
+ require.EqualValues(t, 1, callCount["foo"])
+ require.EqualValues(t, 1, callCount["bar"])
+}
+
+// Test that the sync callback can rewrite the response.
+func TestKeaInterceptorSyncHandleRewriteResponse(t *testing.T) {
+ // Arrange
+ interceptor := newKeaInterceptor()
+ interceptor.registerSync(func(sa *StorkAgent, r *keactrl.Response) error {
+ r.Text = "barfoo"
+ r.Result = 42
+ return nil
+ }, "foobar")
+
+ command := keactrl.NewCommand("foobar", []string{"dhcp4"}, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+
+ inResponse := []byte(`[
+ {
+ "result": 0,
+ "text": "fine"
+ }
+ ]`)
+
+ expectedOutResponse := []byte(`[
+ {
+ "result": 42,
+ "text": "barfoo"
+ }
+ ]`)
+ var buffer bytes.Buffer
+ _ = json.Compact(&buffer, expectedOutResponse)
+ expectedOutResponse = buffer.Bytes()
+
+ // Act
+ outResponse, _ := interceptor.syncHandle(nil, request, inResponse)
+
+ // Assert
+ require.EqualValues(t, expectedOutResponse, outResponse)
+}
+
+// Test that the error throwing in the sync handler breaks execution and
+// returns.
+func TestKeaInterceptorSyncHandleReturnError(t *testing.T) {
+ // Arrange
+ interceptor := newKeaInterceptor()
+ interceptor.registerSync(func(sa *StorkAgent, r *keactrl.Response) error {
+ return errors.New("Expected error")
+ }, "foobar")
+ callCount := 0
+ interceptor.registerSync(func(sa *StorkAgent, r *keactrl.Response) error {
+ callCount++
+ return nil
+ }, "foobar")
+
+ command := keactrl.NewCommand("foobar", []string{"dhcp4"}, nil)
+ request := &agentapi.KeaRequest{
+ Request: command.Marshal(),
+ }
+
+ inResponse := []byte(`[
+ {
+ "result": 0,
+ "text": "fine"
+ }
+ ]`)
+
+ // Act
+ outResponse, err := interceptor.syncHandle(nil, request, inResponse)
+
+ // Assert
+ require.Nil(t, outResponse)
+ require.Error(t, err)
+ require.Zero(t, callCount)
+}
diff --git a/backend/agent/keainterceptfn.go b/backend/agent/keainterceptfn.go
new file mode 100644
index 0000000..e9f901c
--- /dev/null
+++ b/backend/agent/keainterceptfn.go
@@ -0,0 +1,39 @@
+package agent
+
+import (
+ keactrl "isc.org/stork/appctrl/kea"
+)
+
+// Intercept callback function for config-get. It records log files
+// found in the daemon's configuration, making them accessible by the
+// log viewer.
+func icptConfigGetLoggers(agent *StorkAgent, response *keactrl.Response) error {
+ paths := collectKeaAllowedLogs(response)
+ for _, p := range paths {
+ agent.logTailer.allow(p)
+ }
+ return nil
+}
+
+// Change the reservation-get-page response status if unsupported error is
+// returned.
+//
+// Kea 2.2 and below return a general error response if RADIUS is used as
+// the host backend. It causes Stork to generate a false disconnect event
+// and block pulling host reservations from other host backends.
+// See: https://gitlab.isc.org/isc-projects/stork/-/issues/792 and
+// https://gitlab.isc.org/isc-projects/kea/-/issues/2566 .
+func reservationGetPageUnsupported(agent *StorkAgent, response *keactrl.Response) error {
+ if response.Result == keactrl.ResponseError && response.Text == "not supported by the RADIUS backend" {
+ response.Result = keactrl.ResponseCommandUnsupported
+ }
+
+ return nil
+}
+
+// Registers all intercept functions defined in this file. It should
+// be extended every time a new intercept function is defined.
+func registerKeaInterceptFns(agent *StorkAgent) {
+ agent.keaInterceptor.registerAsync(icptConfigGetLoggers, "config-get")
+ agent.keaInterceptor.registerSync(reservationGetPageUnsupported, "reservation-get-page")
+}
diff --git a/backend/agent/keainterceptfn_test.go b/backend/agent/keainterceptfn_test.go
new file mode 100644
index 0000000..db8b680
--- /dev/null
+++ b/backend/agent/keainterceptfn_test.go
@@ -0,0 +1,98 @@
+package agent
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ keactrl "isc.org/stork/appctrl/kea"
+)
+
+// Tests that config-get is intercepted and loggers found in the returned
+// configuration are recorded. The log tailer is permitted to access only
+// those log files.
+func TestIcptConfigGetLoggers(t *testing.T) {
+ sa, _ := setupAgentTest()
+
+ responseArgsJSON := `{
+ "Dhcp4": {
+ "loggers": [
+ {
+ "output_options": [
+ {
+ "output": "/tmp/kea-dhcp4.log"
+ },
+ {
+ "output": "stderr"
+ }
+ ]
+ },
+ {
+ "output_options": [
+ {
+ "output": "/tmp/kea-dhcp4.log"
+ }
+ ]
+ },
+ {
+ "output_options": [
+ {
+ "output": "stdout"
+ }
+ ]
+ },
+ {
+ "output_options": [
+ {
+ "output": "/tmp/kea-dhcp4-allocations.log"
+ },
+ {
+ "output": "syslog:1"
+ }
+ ]
+ }
+ ]
+ }
+ }`
+ responseArgs := make(map[string]interface{})
+ err := json.Unmarshal([]byte(responseArgsJSON), &responseArgs)
+ require.NoError(t, err)
+
+ response := &keactrl.Response{
+ ResponseHeader: keactrl.ResponseHeader{
+ Result: 0,
+ Text: "Everything is fine",
+ Daemon: "dhcp4",
+ },
+ Arguments: &responseArgs,
+ }
+ err = icptConfigGetLoggers(sa, response)
+ require.NoError(t, err)
+ require.NotNil(t, sa.logTailer)
+ require.True(t, sa.logTailer.allowed("/tmp/kea-dhcp4.log"))
+ require.True(t, sa.logTailer.allowed("/tmp/kea-dhcp4-allocations.log"))
+ require.False(t, sa.logTailer.allowed("stdout"))
+ require.False(t, sa.logTailer.allowed("stderr"))
+ require.False(t, sa.logTailer.allowed("syslog:1"))
+}
+
+// Test that the result code is changed if the reservation-get-page command
+// returns an unsupported error.
+func TestReservationGetPageUnsupported(t *testing.T) {
+ // Arrange
+ sa, _ := setupAgentTest()
+
+ rsp := &keactrl.Response{
+ ResponseHeader: keactrl.ResponseHeader{
+ Result: keactrl.ResponseError,
+ Text: "not supported by the RADIUS backend",
+ },
+ }
+
+ // Act
+ err := reservationGetPageUnsupported(sa, rsp)
+
+ // Assert
+ require.NoError(t, err)
+ require.EqualValues(t, keactrl.ResponseCommandUnsupported, rsp.Result)
+}
diff --git a/backend/agent/logtail.go b/backend/agent/logtail.go
new file mode 100644
index 0000000..f5f95e5
--- /dev/null
+++ b/backend/agent/logtail.go
@@ -0,0 +1,89 @@
+package agent
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/pkg/errors"
+)
+
+// Log tailer provides means for viewing log files. It maintains the list of
+// unique files which can be viewed. If the file is not on the list of the allowed
+// files, an error is returned upon an attempt to view it.
+type logTailer struct {
+ allowedPaths map[string]bool
+ mutex *sync.Mutex
+}
+
+// Creates new instance of the log tailer.
+func newLogTailer() *logTailer {
+ lt := &logTailer{
+ allowedPaths: make(map[string]bool),
+ mutex: new(sync.Mutex),
+ }
+ return lt
+}
+
+// Adds a specified path to the list of files which can be viewed.
+func (lt *logTailer) allow(path string) {
+ lt.mutex.Lock()
+ defer lt.mutex.Unlock()
+ lt.allowedPaths[path] = true
+}
+
+// Checks if the given file can be viewed.
+func (lt *logTailer) allowed(path string) bool {
+ lt.mutex.Lock()
+ defer lt.mutex.Unlock()
+ _, ok := lt.allowedPaths[path]
+ return ok
+}
+
+// Returns the tail of the specified log file. The path specifies the absolute
+// location of the log file. The offset specifies the location relative to an
+// end of the file from which the tail should be returned. The offset must be
+// a positive value. If the file is not allowed, it does not exist an error
+// is returned. An error is also returned if an attempt to read the file fails.
+func (lt *logTailer) tail(path string, offset int64) (lines []string, err error) {
+ // Check if it is allowed to tail this file.
+ if !lt.allowed(path) {
+ err = errors.Errorf("access forbidden to the %s", path)
+ return lines, err
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ err = errors.WithMessagef(err, "failed to open file for tailing: %s", path)
+ return lines, err
+ }
+ defer func() {
+ _ = f.Close()
+ }()
+
+ stat, err := f.Stat()
+ if err != nil {
+ err = errors.WithMessagef(err, "failed to stat the file opened for tailing: %s", path)
+ return lines, err
+ }
+
+ // Can't go beyond the file size.
+ if offset > stat.Size() {
+ offset = stat.Size()
+ }
+
+ _, err = f.Seek(-offset, io.SeekEnd)
+ if err != nil {
+ err = errors.WithMessagef(err, "failed to seek in the file opened for tailing: %s", path)
+ return lines, err
+ }
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ lines = append(lines, s.Text())
+ }
+ if err = s.Err(); err != nil {
+ err = errors.WithMessagef(err, "failed to read the tailed file: %s", path)
+ }
+ return lines, err
+}
diff --git a/backend/agent/logtail_test.go b/backend/agent/logtail_test.go
new file mode 100644
index 0000000..a157ab5
--- /dev/null
+++ b/backend/agent/logtail_test.go
@@ -0,0 +1,66 @@
+package agent
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Test that the new instance of the log tailer can be created and that
+// the internal fields have been initialized.
+func TestNewLogTailer(t *testing.T) {
+ lt := newLogTailer()
+ require.NotNil(t, lt)
+ require.NotNil(t, lt.allowedPaths)
+ require.NotNil(t, lt.mutex)
+}
+
+// Test the mechanism which allows tailing selected files.
+func TestAllow(t *testing.T) {
+ lt := newLogTailer()
+ lt.allow("/tmp/kea-dhcp4.log")
+ require.True(t, lt.allowed("/tmp/kea-dhcp4.log"))
+ require.False(t, lt.allowed("/tmp/kea-dhcp6.log"))
+
+ // Make sure that it is ok to allow the same file twice.
+ require.NotPanics(t, func() { lt.allow("/tmp/kea-dhcp4.log") })
+ require.True(t, lt.allowed("/tmp/kea-dhcp4.log"))
+}
+
+// Test that if the file is not allowed an attempt to tail this file
+// results in an error.
+func TestTailForbidden(t *testing.T) {
+ // Crate the test file to make sure that the lack of file is not
+ // the reason for an error.
+ rand.Seed(time.Now().UnixNano())
+ filename := fmt.Sprintf("test%d.log", rand.Int63())
+ f, err := os.Create(filename)
+ require.NoError(t, err)
+ defer func() {
+ _ = os.Remove(filename)
+ }()
+ fmt.Fprintln(f, "Some contents")
+
+ // Tailing this file is initially not allowed, so an error should be returned.
+ lt := newLogTailer()
+ require.NotNil(t, lt)
+ _, err = lt.tail(filename, 100)
+ require.Error(t, err)
+
+ // Allow tailing the file. This time there should be no error
+ lt.allow(filename)
+ _, err = lt.tail(filename, 100)
+ require.NoError(t, err)
+}
+
+// Test that if the tailed file doesn't exist an error is returned.
+func TestTailNotExistingFile(t *testing.T) {
+ lt := newLogTailer()
+ require.NotNil(t, lt)
+ _, err := lt.tail("non-existing-file", 100)
+ require.Error(t, err)
+}
diff --git a/backend/agent/monitor.go b/backend/agent/monitor.go
new file mode 100644
index 0000000..1a2991d
--- /dev/null
+++ b/backend/agent/monitor.go
@@ -0,0 +1,328 @@
+package agent
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/shirou/gopsutil/process"
+ log "github.com/sirupsen/logrus"
+
+ storkutil "isc.org/stork/util"
+)
+
+// An access point for an application to retrieve information such
+// as status or metrics.
+type AccessPoint struct {
+ Type string
+ Address string
+ Port int64
+ UseSecureProtocol bool
+ Key string
+}
+
+// Currently supported types are: "control" and "statistics".
+const (
+ AccessPointControl = "control"
+ AccessPointStatistics = "statistics"
+)
+
+// Base application information. This structure is embedded
+// in other app specific structures like KeaApp and Bind9App.
+type BaseApp struct {
+ Pid int32
+ Type string
+ AccessPoints []AccessPoint
+}
+
+// Specific App like KeaApp or Bind9App have to implement
+// this interface. The methods should be implemented
+// in a specific way in given concrete App.
+type App interface {
+ GetBaseApp() *BaseApp
+ DetectAllowedLogs() ([]string, error)
+}
+
+// Currently supported types are: "kea" and "bind9".
+const (
+ AppTypeKea = "kea"
+ AppTypeBind9 = "bind9"
+)
+
+type AppMonitor interface {
+ GetApps() []App
+ GetApp(appType, apType, address string, port int64) App
+ Start(agent *StorkAgent)
+ Shutdown()
+}
+
+type appMonitor struct {
+ requests chan chan []App // input to app monitor, ie. channel for receiving requests
+ quit chan bool // channel for stopping app monitor
+ running bool
+ wg *sync.WaitGroup
+
+ apps []App // list of detected apps on the host
+}
+
+// Names of apps that are being detected.
+const (
+ keaProcName = "kea-ctrl-agent"
+ namedProcName = "named"
+)
+
+// Creates an AppMonitor instance. It used to start it as well, but this is now done
+// by a dedicated method Start(). Make sure you call Start() before using app monitor.
+func NewAppMonitor() AppMonitor {
+ sm := &appMonitor{
+ requests: make(chan chan []App),
+ quit: make(chan bool),
+ wg: &sync.WaitGroup{},
+ }
+ return sm
+}
+
+// This function starts the actual monitor. This start is delayed in case we want to only
+// do command line parameters parsing, e.g. to print version or help and quit.
+func (sm *appMonitor) Start(storkAgent *StorkAgent) {
+ sm.wg.Add(1)
+ go sm.run(storkAgent)
+}
+
+func (sm *appMonitor) run(storkAgent *StorkAgent) {
+ log.Printf("Started app monitor")
+
+ sm.running = true
+ defer sm.wg.Done()
+
+ // run app detection one time immediately at startup
+ sm.detectApps(storkAgent)
+
+ // For each detected Kea app, let's gather the logs which can be viewed
+ // from the UI.
+ sm.detectAllowedLogs(storkAgent)
+
+ // prepare ticker
+ const detectionInterval = 10 * time.Second
+ ticker := time.NewTicker(detectionInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case ret := <-sm.requests:
+ // process user request
+ ret <- sm.apps
+
+ case <-ticker.C:
+ // periodic detection
+ sm.detectApps(storkAgent)
+
+ case <-sm.quit:
+ // exit run
+ log.Printf("Stopped app monitor")
+ sm.running = false
+ return
+ }
+ }
+}
+
+func printNewOrUpdatedApps(newApps []App, oldApps []App) {
+ // look for new or updated apps
+ var newUpdatedApps []App
+ for _, an := range newApps {
+ appNew := an.GetBaseApp()
+ found := false
+ for _, ao := range oldApps {
+ appOld := ao.GetBaseApp()
+ if appOld.Type != appNew.Type {
+ continue
+ }
+ if len(appNew.AccessPoints) != len(appOld.AccessPoints) {
+ continue
+ }
+ for idx, acPtNew := range appNew.AccessPoints {
+ acPtOld := appOld.AccessPoints[idx]
+ if acPtNew.Type != acPtOld.Type {
+ continue
+ }
+ if acPtNew.Address != acPtOld.Address {
+ continue
+ }
+ if acPtNew.Port != acPtOld.Port {
+ continue
+ }
+ if acPtNew.UseSecureProtocol != acPtOld.UseSecureProtocol {
+ continue
+ }
+ }
+ found = true
+ }
+ if !found {
+ newUpdatedApps = append(newUpdatedApps, an)
+ }
+ }
+ // if found print new or updated apps
+ if len(newUpdatedApps) > 0 {
+ log.Printf("New or updated apps detected:")
+ for _, app := range newUpdatedApps {
+ var acPts []string
+ for _, acPt := range app.GetBaseApp().AccessPoints {
+ url := storkutil.HostWithPortURL(acPt.Address, acPt.Port, acPt.UseSecureProtocol)
+ s := fmt.Sprintf("%s: %s", acPt.Type, url)
+ acPts = append(acPts, s)
+ }
+ log.Printf(" %s: %s", app.GetBaseApp().Type, strings.Join(acPts, ", "))
+ }
+ } else if len(oldApps) == 0 {
+ // Agent is starting up but no app to monitor has been detected.
+ // Usually, the agent is installed with at least one monitored app.
+ // The below message is printed for easier troubleshooting.
+ log.Warnf("No Kea nor Bind9 app detected for monitoring; please check if they are running, and Stork can communicate with them.")
+ }
+}
+
+func (sm *appMonitor) detectApps(storkAgent *StorkAgent) {
+ // Kea app is being detected by browsing list of processes in the system
+ // where cmdline of the process contains given pattern with kea-ctrl-agent
+ // substring. Such found processes are being processed further and all other
+ // Kea daemons are discovered and queried for their versions, etc.
+ keaPtrn := regexp.MustCompile(`(.*?)kea-ctrl-agent\s+.*-c\s+(\S+)`)
+ // BIND 9 app is being detecting by browsing list of processes in the system
+ // where cmdline of the process contains given pattern with named substring.
+ bind9Ptrn := regexp.MustCompile(`(.*?)named\s+(.*)`)
+
+ var apps []App
+
+ procs, _ := process.Processes()
+ for _, p := range procs {
+ procName, _ := p.Name()
+ cmdline := ""
+ cwd := ""
+ var err error
+ if procName == keaProcName || procName == namedProcName {
+ cmdline, err = p.Cmdline()
+ if err != nil {
+ log.Warnf("Cannot get process command line: %+v", err)
+ continue
+ }
+ cwd, err = p.Cwd()
+ if err != nil {
+ log.Warnf("Cannot get process current working directory: %+v", err)
+ cwd = ""
+ }
+ }
+
+ if procName == keaProcName {
+ // detect kea
+ m := keaPtrn.FindStringSubmatch(cmdline)
+ if m != nil {
+ keaApp := detectKeaApp(m, cwd, storkAgent.HTTPClient)
+ if keaApp != nil {
+ keaApp.GetBaseApp().Pid = p.Pid
+ apps = append(apps, keaApp)
+ }
+ }
+ continue
+ }
+
+ if procName == namedProcName {
+ // detect bind9
+ m := bind9Ptrn.FindStringSubmatch(cmdline)
+ if m != nil {
+ cmdr := &storkutil.RealCommander{}
+ bind9App := detectBind9App(m, cwd, cmdr)
+ if bind9App != nil {
+ bind9App.GetBaseApp().Pid = p.Pid
+ apps = append(apps, bind9App)
+ }
+ }
+ continue
+ }
+ }
+
+ // check changes in apps and print them
+ printNewOrUpdatedApps(apps, sm.apps)
+
+ // remember detected apps
+ sm.apps = apps
+}
+
+// Gathers the configured log files for detected apps and enables them
+// for viewing from the UI.
+func (sm *appMonitor) detectAllowedLogs(storkAgent *StorkAgent) {
+ // Nothing to do if the agent is not set. It may be nil when running some
+ // tests.
+ if storkAgent == nil {
+ return
+ }
+ for _, app := range sm.apps {
+ paths, err := app.DetectAllowedLogs()
+ if err != nil {
+ ap := app.GetBaseApp().AccessPoints[0]
+ err = errors.WithMessagef(err, "Failed to detect log files for Kea")
+ log.WithFields(
+ log.Fields{
+ "address": ap.Address,
+ "port": ap.Port,
+ },
+ ).Warn(err)
+ } else {
+ for _, p := range paths {
+ storkAgent.logTailer.allow(p)
+ }
+ }
+ }
+}
+
+// Get a list of detected apps by a monitor.
+func (sm *appMonitor) GetApps() []App {
+ ret := make(chan []App)
+ sm.requests <- ret
+ srvs := <-ret
+ return srvs
+}
+
+// Get an app from a monitor that matches provided params.
+func (sm *appMonitor) GetApp(appType, apType, address string, port int64) App {
+ apps := sm.GetApps()
+ for _, app := range apps {
+ if app.GetBaseApp().Type != appType {
+ continue
+ }
+ for _, ap := range app.GetBaseApp().AccessPoints {
+ if ap.Type == apType && ap.Address == address && ap.Port == port {
+ return app
+ }
+ }
+ }
+ return nil
+}
+
+// Shut down monitor. Stop its background goroutine.
+func (sm *appMonitor) Shutdown() {
+ sm.quit <- true
+ sm.wg.Wait()
+}
+
+// getAccessPoint retrieves the requested type of access point from the app.
+func getAccessPoint(app App, accessType string) (*AccessPoint, error) {
+ for _, point := range app.GetBaseApp().AccessPoints {
+ if point.Type != accessType {
+ continue
+ }
+
+ if point.Port == 0 {
+ return nil, errors.Errorf("%s access point does not have port number", accessType)
+ } else if len(point.Address) == 0 {
+ return nil, errors.Errorf("%s access point does not have address", accessType)
+ }
+
+ // found a good access point
+ return &point, nil
+ }
+
+ return nil, errors.Errorf("%s access point not found", accessType)
+}
diff --git a/backend/agent/monitor_test.go b/backend/agent/monitor_test.go
new file mode 100644
index 0000000..7ef12ff
--- /dev/null
+++ b/backend/agent/monitor_test.go
@@ -0,0 +1,544 @@
+package agent
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "os"
+ "path"
+ "sync"
+ "testing"
+
+ pkgerrors "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+ "github.com/urfave/cli/v2"
+
+ "isc.org/stork/testutil"
+)
+
+func TestGetApps(t *testing.T) {
+ am := NewAppMonitor()
+ settings := cli.NewContext(nil, flag.NewFlagSet("", 0), nil)
+ sa := NewStorkAgent(settings, am)
+ am.Start(sa)
+ apps := am.GetApps()
+ require.Len(t, apps, 0)
+ am.Shutdown()
+}
+
+// Check if detected apps are returned by GetApp.
+func TestGetApp(t *testing.T) {
+ am := NewAppMonitor()
+
+ var apps []App
+ apps = append(apps, &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: makeAccessPoint(AccessPointControl, "1.2.3.1", "", 1234, true),
+ },
+ HTTPClient: nil,
+ })
+
+ accessPoints := makeAccessPoint(AccessPointControl, "2.3.4.4", "abcd", 2345, false)
+ accessPoints = append(accessPoints, AccessPoint{
+ Type: AccessPointStatistics,
+ Address: "2.3.4.5",
+ Port: 2346,
+ Key: "",
+ })
+
+ apps = append(apps, &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ })
+
+ // Monitor holds apps in background goroutine. So to get apps we need
+ // to send a request over a channel to this goroutine and wait for
+ // a response with detected apps. We do not want to spawn monitor background
+ // goroutine so we are calling GetApp in our background goroutine
+ // and are serving this request in the main thread.
+ // To make it in sync the wait group is used here.
+ var wg sync.WaitGroup
+
+ // find kea app
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ app := am.GetApp(AppTypeKea, AccessPointControl, "1.2.3.1", 1234)
+ require.NotNil(t, app)
+ require.EqualValues(t, AppTypeKea, app.GetBaseApp().Type)
+ }()
+ ret := <-am.(*appMonitor).requests
+ ret <- apps
+ wg.Wait()
+
+ // find bind app
+ wg.Add(1) // expect 1 Done in the wait group
+ go func() {
+ defer wg.Done()
+ app := am.GetApp(AppTypeBind9, AccessPointControl, "2.3.4.4", 2345)
+ require.NotNil(t, app)
+ require.EqualValues(t, AppTypeBind9, app.GetBaseApp().Type)
+ }()
+ ret = <-am.(*appMonitor).requests
+ ret <- apps
+ wg.Wait()
+
+ // find not existing app - should return nil
+ wg.Add(1) // expect 1 Done in the wait group
+ go func() {
+ defer wg.Done()
+ app := am.GetApp(AppTypeKea, AccessPointControl, "0.0.0.0", 1)
+ require.Nil(t, app)
+ }()
+ ret = <-am.(*appMonitor).requests
+ ret <- apps
+ wg.Wait()
+}
+
+func TestGetCtrlAddressFromKeaConfigNonExisting(t *testing.T) {
+ // check reading from non existing file
+ path := "/tmp/non-existing-path"
+ address, port, useSecureProtocol := getCtrlTargetFromKeaConfig(path)
+ require.Zero(t, port)
+ require.Empty(t, address)
+ require.False(t, useSecureProtocol)
+}
+
+func TestGetCtrlFromKeaConfigBadContent(t *testing.T) {
+ // prepare kea conf file
+ tmpFile, err := os.CreateTemp(os.TempDir(), "prefix-")
+ require.NoError(t, err)
+
+ defer os.Remove(tmpFile.Name())
+
+ text := []byte("random content")
+ _, err = tmpFile.Write(text)
+ require.NoError(t, err)
+
+ err = tmpFile.Close()
+ require.NoError(t, err)
+
+ // check reading from prepared file with bad content
+ // so 0 should be returned as port
+ address, port, useSecureProtocol := getCtrlTargetFromKeaConfig(tmpFile.Name())
+ require.Zero(t, port)
+ require.Empty(t, address)
+ require.False(t, useSecureProtocol)
+}
+
+func TestGetCtrlAddressFromKeaConfigOk(t *testing.T) {
+ // prepare kea conf file
+ tmpFile, err := os.CreateTemp(os.TempDir(), "prefix-")
+ require.NoError(t, err)
+
+ defer os.Remove(tmpFile.Name())
+
+ text := []byte(`{ "Control-agent": {
+ "http-host": "host.example.org",
+ "http-port": 1234
+ } }`)
+ _, err = tmpFile.Write(text)
+ require.NoError(t, err)
+
+ err = tmpFile.Close()
+ require.NoError(t, err)
+
+ // check reading from proper file
+ address, port, useSecureProtocol := getCtrlTargetFromKeaConfig(tmpFile.Name())
+ require.EqualValues(t, 1234, port)
+ require.Equal(t, "host.example.org", address)
+ require.False(t, useSecureProtocol)
+}
+
+func TestGetCtrlAddressFromKeaConfigAddress0000(t *testing.T) {
+ // prepare kea conf file
+ tmpFile, err := os.CreateTemp(os.TempDir(), "prefix-")
+ require.NoError(t, err)
+
+ defer os.Remove(tmpFile.Name())
+
+ text := []byte(`{ "Control-agent": {
+ "http-host": "0.0.0.0",
+ "http-port": 1234
+ } }`)
+ _, err = tmpFile.Write(text)
+ require.NoError(t, err)
+
+ err = tmpFile.Close()
+ require.NoError(t, err)
+
+ // check reading from proper file;
+ // if CA is listening on 0.0.0.0 then 127.0.0.1 should be returned
+ // as it is not possible to connect to 0.0.0.0
+ address, port, useSecureProtocol := getCtrlTargetFromKeaConfig(tmpFile.Name())
+ require.EqualValues(t, 1234, port)
+ require.Equal(t, "127.0.0.1", address)
+ require.False(t, useSecureProtocol)
+}
+
+func TestGetCtrlAddressFromKeaConfigAddressColons(t *testing.T) {
+ // prepare kea conf file
+ tmpFile, err := os.CreateTemp(os.TempDir(), "prefix-")
+ require.NoError(t, err)
+
+ defer os.Remove(tmpFile.Name())
+
+ text := []byte(`{ "Control-agent": {
+ "http-host": "::",
+ "http-port": 1234
+ } }`)
+ _, err = tmpFile.Write(text)
+ require.NoError(t, err)
+
+ err = tmpFile.Close()
+ require.NoError(t, err)
+
+ // check reading from proper file;
+ // if CA is listening on :: then ::1 should be returned
+ // as it is not possible to connect to ::
+ address, port, useSecureProtocol := getCtrlTargetFromKeaConfig(tmpFile.Name())
+ require.EqualValues(t, 1234, port)
+ require.Equal(t, "::1", address)
+ require.False(t, useSecureProtocol)
+}
+
+func TestDetectApps(t *testing.T) {
+ am := &appMonitor{}
+ settings := cli.NewContext(nil, flag.NewFlagSet("", 0), nil)
+ sa := NewStorkAgent(settings, am)
+ am.detectApps(sa)
+}
+
+// Test that detectAllowedLogs does not panic when Kea server is unreachable.
+func TestDetectAllowedLogsKeaUnreachable(t *testing.T) {
+ am := &appMonitor{}
+ am.apps = append(am.apps, &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: "localhost",
+ Port: 45678,
+ },
+ },
+ },
+ HTTPClient: NewHTTPClient(false),
+ })
+
+ settings := cli.NewContext(nil, flag.NewFlagSet("", 0), nil)
+ sa := NewStorkAgent(settings, am)
+
+ require.NotPanics(t, func() { am.detectAllowedLogs(sa) })
+}
+
+type TestCommander struct{}
+
+func (c TestCommander) Output(command string, args ...string) ([]byte, error) {
+ text := `keys "foo" {
+ algorithm "hmac-sha256";
+ secret "abcd";
+ };
+ controls {
+ inet 127.0.0.53 port 5353 allow { localhost; } keys { "foo"; "bar"; };
+ inet * port 5454 allow { localhost; 1.2.3.4; };
+ };
+ statistics-channels {
+ inet 127.0.0.80 port 80 allow { localhost; 1.2.3.4; };
+ inet 127.0.0.88 port 88 allow { localhost; 1.2.3.4; };
+ };`
+
+ return []byte(text), nil
+}
+
+// Check BIND 9 app detection when its conf file is absolute path.
+func TestDetectBind9AppAbsPath(t *testing.T) {
+ sb := testutil.NewSandbox()
+ defer sb.Close()
+
+ // check BIND 9 app detection
+ cmdr := &TestCommander{}
+ cfgPath, err := sb.Join("etc/path.cfg")
+ require.NoError(t, err)
+ namedDir, err := sb.JoinDir("usr/sbin")
+ require.NoError(t, err)
+ _, err = sb.Join("usr/bin/named-checkconf")
+ require.NoError(t, err)
+ _, err = sb.Join("usr/sbin/rndc")
+ require.NoError(t, err)
+ app := detectBind9App([]string{"", namedDir, fmt.Sprintf("-c %s", cfgPath)}, "", cmdr)
+ require.NotNil(t, app)
+ require.Equal(t, app.GetBaseApp().Type, AppTypeBind9)
+ require.Len(t, app.GetBaseApp().AccessPoints, 2)
+ point := app.GetBaseApp().AccessPoints[0]
+ require.Equal(t, AccessPointControl, point.Type)
+ require.Equal(t, "127.0.0.53", point.Address)
+ require.EqualValues(t, 5353, point.Port)
+ point = app.GetBaseApp().AccessPoints[1]
+ require.Equal(t, AccessPointStatistics, point.Type)
+ require.Equal(t, "127.0.0.80", point.Address)
+ require.EqualValues(t, 80, point.Port)
+ require.Empty(t, point.Key)
+}
+
+// Check BIND 9 app detection when its conf file is relative to CWD of its process.
+func TestDetectBind9AppRelativePath(t *testing.T) {
+ sb := testutil.NewSandbox()
+ defer sb.Close()
+
+ cmdr := &TestCommander{}
+ sb.Join("etc/path.cfg")
+ cfgDir, err := sb.JoinDir("etc")
+ require.NoError(t, err)
+ namedDir, err := sb.JoinDir("usr/sbin")
+ require.NoError(t, err)
+ _, err = sb.Join("usr/sbin/named-checkconf")
+ require.NoError(t, err)
+ _, err = sb.Join("usr/bin/rndc")
+ require.NoError(t, err)
+ app := detectBind9App([]string{"", namedDir, "-c path.cfg"}, cfgDir, cmdr)
+ require.NotNil(t, app)
+ require.Equal(t, app.GetBaseApp().Type, AppTypeBind9)
+}
+
+// Creates a basic Kea configuration file.
+// Caller is responsible for remove the file.
+func makeKeaConfFile() (*os.File, error) {
+ // prepare kea conf file
+ file, err := os.CreateTemp(os.TempDir(), "prefix-")
+ if err != nil {
+ return nil, pkgerrors.Wrap(err, "cannot create temporary file")
+ }
+
+ text := []byte(`{ "Control-agent": {
+ "http-host": "localhost",
+ "http-port": 45634
+ } }`)
+ if _, err = file.Write(text); err != nil {
+ return nil, pkgerrors.Wrap(err, "failed to write to temporary file")
+ }
+ if err := file.Close(); err != nil {
+ return nil, pkgerrors.Wrap(err, "failed to close a temporary file")
+ }
+
+ return file, nil
+}
+
+// Creates a basic Kea configuration file with include statement.
+// It returns both inner and outer files.
+// Caller is responsible for removing the files.
+func makeKeaConfFileWithInclude() (parentConfig *os.File, childConfig *os.File, err error) {
+ // prepare kea conf file
+ parentConfig, err = os.CreateTemp(os.TempDir(), "prefix-*.json")
+
+ if err != nil {
+ return nil, nil, pkgerrors.Wrap(err, "cannot create temporary file for parent config")
+ }
+
+ childConfig, err = os.CreateTemp(os.TempDir(), "prefix-*.json")
+ if err != nil {
+ return nil, nil, pkgerrors.Wrap(err, "cannot create temporary file for child config")
+ }
+
+ text := []byte(`{
+ "http-host": "localhost",
+ "http-port": 45634
+ }`)
+
+ if _, err = childConfig.Write(text); err != nil {
+ return nil, nil, pkgerrors.Wrap(err, "failed to write to temporary file")
+ }
+ if err := childConfig.Close(); err != nil {
+ return nil, nil, pkgerrors.Wrap(err, "failed to close to temporary file")
+ }
+
+ text = []byte(fmt.Sprintf("{ \"Control-agent\": <?include \"%s\"?> }", childConfig.Name()))
+ if _, err = parentConfig.Write(text); err != nil {
+ return nil, nil, pkgerrors.Wrap(err, "failed to write to temporary file")
+ }
+ if err := parentConfig.Close(); err != nil {
+ return nil, nil, pkgerrors.Wrap(err, "failed to close to temporary file")
+ }
+
+ return parentConfig, childConfig, nil
+}
+
+func TestDetectKeaApp(t *testing.T) {
+ tmpFile, err := makeKeaConfFile()
+ require.NoError(t, err)
+ tmpFilePath := tmpFile.Name()
+ defer os.Remove(tmpFilePath)
+
+ checkApp := func(app App) {
+ require.NotNil(t, app)
+ require.Equal(t, AppTypeKea, app.GetBaseApp().Type)
+ require.Len(t, app.GetBaseApp().AccessPoints, 1)
+ ctrlPoint := app.GetBaseApp().AccessPoints[0]
+ require.Equal(t, AccessPointControl, ctrlPoint.Type)
+ require.Equal(t, "localhost", ctrlPoint.Address)
+ require.EqualValues(t, 45634, ctrlPoint.Port)
+ require.Empty(t, ctrlPoint.Key)
+ }
+
+ httpClient := NewHTTPClient(false)
+
+ // check kea app detection
+ app := detectKeaApp([]string{"", "", tmpFilePath}, "", httpClient)
+ checkApp(app)
+
+ // check kea app detection when kea conf file is relative to CWD of kea process
+ cwd, file := path.Split(tmpFilePath)
+ app = detectKeaApp([]string{"", "", file}, cwd, httpClient)
+ checkApp(app)
+
+ // Check configuration with an include statement
+ tmpFile, nestedFile, err := makeKeaConfFileWithInclude()
+ require.NoError(t, err)
+ tmpFilePath = tmpFile.Name()
+ defer os.Remove(tmpFilePath)
+ defer os.Remove(nestedFile.Name())
+
+ // check kea app detection
+ app = detectKeaApp([]string{"", "", tmpFilePath}, "", httpClient)
+ checkApp(app)
+
+ // check kea app detection when kea conf file is relative to CWD of kea process
+ cwd, file = path.Split(tmpFilePath)
+ app = detectKeaApp([]string{"", "", file}, cwd, httpClient)
+ checkApp(app)
+}
+
+func TestGetAccessPoint(t *testing.T) {
+ bind9App := &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: "127.0.0.53",
+ Port: int64(5353),
+ Key: "hmac-sha256:abcd",
+ },
+ {
+ Type: AccessPointStatistics,
+ Address: "127.0.0.80",
+ Port: int64(80),
+ Key: "",
+ },
+ },
+ },
+ RndcClient: nil,
+ }
+
+ keaApp := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: "localhost",
+ Port: int64(45634),
+ Key: "",
+ },
+ },
+ },
+ HTTPClient: nil,
+ }
+
+ // test get bind 9 access points
+ point, err := getAccessPoint(bind9App, AccessPointControl)
+ require.NotNil(t, point)
+ require.NoError(t, err)
+ require.Equal(t, AccessPointControl, point.Type)
+ require.Equal(t, "127.0.0.53", point.Address)
+ require.EqualValues(t, 5353, point.Port)
+ require.Equal(t, "hmac-sha256:abcd", point.Key)
+
+ point, err = getAccessPoint(bind9App, AccessPointStatistics)
+ require.NotNil(t, point)
+ require.NoError(t, err)
+ require.Equal(t, AccessPointStatistics, point.Type)
+ require.Equal(t, "127.0.0.80", point.Address)
+ require.EqualValues(t, 80, point.Port)
+ require.Empty(t, point.Key)
+
+ // test get kea access points
+ point, err = getAccessPoint(keaApp, AccessPointControl)
+ require.NotNil(t, point)
+ require.NoError(t, err)
+ require.Equal(t, AccessPointControl, point.Type)
+ require.Equal(t, "localhost", point.Address)
+ require.EqualValues(t, 45634, point.Port)
+ require.Empty(t, point.Key)
+
+ point, err = getAccessPoint(keaApp, AccessPointStatistics)
+ require.Error(t, err)
+ require.Nil(t, point)
+}
+
+func TestPrintNewOrUpdatedApps(t *testing.T) {
+ bind9App := &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: "127.0.0.53",
+ Port: int64(5353),
+ Key: "hmac-sha256:abcd",
+ },
+ {
+ Type: AccessPointStatistics,
+ Address: "127.0.0.80",
+ Port: int64(80),
+ Key: "",
+ },
+ },
+ },
+ RndcClient: nil,
+ }
+
+ keaApp := &KeaApp{
+ BaseApp: BaseApp{
+ Type: AppTypeKea,
+ AccessPoints: []AccessPoint{
+ {
+ Type: AccessPointControl,
+ Address: "localhost",
+ Port: int64(45634),
+ Key: "",
+ },
+ },
+ },
+ HTTPClient: nil,
+ }
+
+ newApps := []App{bind9App, keaApp}
+ var oldApps []App
+
+ printNewOrUpdatedApps(newApps, oldApps)
+}
+
+// The monitor periodically searches for the Kea/Bind9 instances. Usually, at
+// least one application should be available. If no monitored app is found,
+// the Stork prints the warning message to indicate that something unexpected
+// happened.
+func TestPrintNewOrUpdatedAppsNoAppDetectedWarning(t *testing.T) {
+ // Arrange
+ output := logrus.StandardLogger().Out
+ defer func() {
+ logrus.SetOutput(output)
+ }()
+ var buffer bytes.Buffer
+ logrus.SetOutput(&buffer)
+
+ // Act
+ printNewOrUpdatedApps([]App{}, []App{})
+
+ // Assert
+ require.Contains(t, buffer.String(), "No Kea nor Bind9 app detected for monitoring")
+}
diff --git a/backend/agent/prombind9exporter.go b/backend/agent/prombind9exporter.go
new file mode 100644
index 0000000..78d0f23
--- /dev/null
+++ b/backend/agent/prombind9exporter.go
@@ -0,0 +1,1236 @@
+package agent
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ pkgerrors "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/prometheus/common/version"
+ log "github.com/sirupsen/logrus"
+ "github.com/urfave/cli/v2"
+
+ "isc.org/stork"
+ storkutil "isc.org/stork/util"
+)
+
+const (
+ namespace = "bind"
+ qryRTT = "QryRTT"
+)
+
+type PromBind9TrafficStats struct {
+ SizeCount map[string]float64
+}
+
+type PromBind9ViewStats struct {
+ ResolverCache map[string]float64
+ ResolverCachestats map[string]float64
+ ResolverQtypes map[string]float64
+ ResolverStats map[string]float64
+}
+
+// Statistics to be exported.
+type PromBind9ExporterStats struct {
+ BootTime time.Time
+ ConfigTime time.Time
+ CurrentTime time.Time
+ IncomingQueries map[string]float64
+ IncomingRequests map[string]float64
+ NsStats map[string]float64
+ TaskMgr map[string]float64
+ TrafficStats map[string]PromBind9TrafficStats
+ Views map[string]PromBind9ViewStats
+}
+
+// Main structure for Prometheus BIND 9 Exporter. It holds its config,
+// references to app monitor, HTTP client, HTTP server, and mappings
+// between BIND 9 stats names to prometheus stats.
+type PromBind9Exporter struct {
+ Settings *cli.Context
+
+ AppMonitor AppMonitor
+ HTTPClient *HTTPClient
+ HTTPServer *http.Server
+
+ up int
+ procID int32
+ procExporter prometheus.Collector
+ Registry *prometheus.Registry
+ serverStatsDesc map[string]*prometheus.Desc
+ trafficStatsDesc map[string]*prometheus.Desc
+ viewStatsDesc map[string]*prometheus.Desc
+
+ stats PromBind9ExporterStats
+}
+
+// Create new Prometheus BIND 9 Exporter.
+func NewPromBind9Exporter(settings *cli.Context, appMonitor AppMonitor) *PromBind9Exporter {
+ pbe := &PromBind9Exporter{
+ Settings: settings,
+ AppMonitor: appMonitor,
+ HTTPClient: NewHTTPClient(settings.Bool("skip-tls-cert-verification")),
+ Registry: prometheus.NewRegistry(),
+ }
+
+ // bind_exporter stats
+ serverStatsDesc := make(map[string]*prometheus.Desc)
+ trafficStatsDesc := make(map[string]*prometheus.Desc)
+ viewStatsDesc := make(map[string]*prometheus.Desc)
+
+ // boot_time_seconds
+ serverStatsDesc["boot-time"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "boot_time_seconds"),
+ "Start time of the BIND process since unix epoch in seconds.",
+ nil, nil)
+ // config_time_seconds
+ serverStatsDesc["config-time"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "config_time_seconds"),
+ "Time of the last reconfiguration since unix epoch in seconds.",
+ nil, nil)
+ // current_time_seconds
+ serverStatsDesc["current-time"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "current_time_seconds"),
+ "Current time unix epoch in seconds as reported by named.",
+ nil, nil)
+
+ // incoming_queries_total
+ serverStatsDesc["qtypes"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "incoming_queries_total"),
+ "Number of incoming DNS queries.",
+ []string{"type"}, nil)
+ // incoming_queries_tcp
+ serverStatsDesc["QryTCP"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "incoming_queries_tcp"),
+ "Number of incoming TCP queries.",
+ nil, nil)
+ // incoming_queries_udp
+ serverStatsDesc["QryUDP"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "incoming_queries_udp"),
+ "Number of incoming UDP queries.",
+ nil, nil)
+
+ // incoming_requests_total
+ serverStatsDesc["opcodes"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "incoming_requests_total"),
+ "Number of incoming DNS requests.",
+ []string{"opcode"}, nil)
+ // incoming_requests_tcp
+ serverStatsDesc["ReqTCP"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "incoming_requests_tcp"),
+ "Number of incoming TCP requests.",
+ nil, nil)
+
+ // traffic_incoming_requests_udp4_size_bucket
+ // traffic_incoming_requests_udp4_size_count
+ // traffic_incoming_requests_udp4_size_sum
+ trafficStatsDesc["dns-udp-requests-sizes-received-ipv4"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_incoming_requests_udp4_size"),
+ "Size of DNS requests (UDP/IPv4).",
+ nil, nil)
+ // traffic_incoming_requests_udp6_size_bucket
+ // traffic_incoming_requests_udp6_size_count
+ // traffic_incoming_requests_udp6_size_sum
+ trafficStatsDesc["dns-udp-requests-sizes-received-ipv6"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_incoming_requests_udp6_size"),
+ "Size of DNS requests (UDP/IPv6).",
+ nil, nil)
+ // traffic_incoming_requests_tcp4_size_bucket
+ // traffic_incoming_requests_tcp4_size_count
+ // traffic_incoming_requests_tcp4_size_sum
+ trafficStatsDesc["dns-tcp-requests-sizes-received-ipv4"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_incoming_requests_tcp4_size"),
+ "Size of DNS requests (TCP/IPv4).",
+ nil, nil)
+ // traffic_incoming_requests_tcp6_size_bucket
+ // traffic_incoming_requests_tcp6_size_count
+ // traffic_incoming_requests_tcp6_size_sum
+ trafficStatsDesc["dns-tcp-requests-sizes-received-ipv6"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_incoming_requests_tcp6_size"),
+ "Size of DNS requests (TCP/IPv6).",
+ nil, nil)
+ // traffic_incoming_requests_total_size_bucket
+ // traffic_incoming_requests_total_size_count
+ // traffic_incoming_requests_total_size_sum
+ trafficStatsDesc["dns-total-requests-sizes-sent"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_incoming_requests_total_size"),
+ "Size of DNS requests (any transport).",
+ nil, nil)
+
+ // traffic_responses_udp4_size_bucket
+ // traffic_responses_udp4_size_count
+ // traffic_responses_udp4_size_sum
+ trafficStatsDesc["dns-udp-responses-sizes-sent-ipv4"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_responses_udp4_size"),
+ "Size of DNS responses (UDP/IPv4).",
+ nil, nil)
+ // traffic_responses_udp6_size_bucket
+ // traffic_responses_udp6_size_count
+ // traffic_responses_udp6_size_sum
+ trafficStatsDesc["dns-udp-responses-sizes-sent-ipv6"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_responses_udp6_size"),
+ "Size of DNS responses (UDP/IPv6).",
+ nil, nil)
+ // traffic_responses_tcp4_size_bucket
+ // traffic_responses_tcp4_size_count
+ // traffic_responses_tcp4_size_sum
+ trafficStatsDesc["dns-tcp-responses-sizes-sent-ipv4"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_responses_tcp4_size"),
+ "Size of DNS responses (TCP/IPv4).",
+ nil, nil)
+ // traffic_responses_tcp6_size_bucket
+ // traffic_responses_tcp6_size_count
+ // traffic_responses_tcp6_size_sum
+ trafficStatsDesc["dns-tcp-responses-sizes-sent-ipv6"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_responses_tcp6_size"),
+ "Size of DNS responses (TCP/IPv6).",
+ nil, nil)
+ // traffic_responses_total_size_bucket
+ // traffic_responses_total_size_count
+ // traffic_responses_total_size_sum
+ trafficStatsDesc["dns-total-responses-sizes-sent"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "traffic_responses_total_size"),
+ "Size of DNS responses (any transport).",
+ nil, nil)
+
+ // query_duplicates_total
+ serverStatsDesc["QryDuplicate"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "query_duplicates_total"),
+ "Number of duplicated queries received.",
+ nil, nil)
+ // query_errors_total
+ serverStatsDesc["QryErrors"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "query_errors_total"),
+ "Number of query failures.",
+ []string{"error"}, nil)
+ // query_recursions_total
+ serverStatsDesc["QryRecursion"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "query_recursions_total"),
+ "Number of queries causing recursion.",
+ nil, nil)
+ // recursive_clients
+ serverStatsDesc["RecursClients"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "recursive_clients"),
+ "Number of current recursive clients.",
+ nil, nil)
+
+ // resolver_cache_hit_ratio
+ viewStatsDesc["CacheHitRatio"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "cache_hit_ratio"),
+ "Cache effectiveness (cache hit ratio).",
+ []string{"view"}, nil)
+ // resolver_cache_hits
+ viewStatsDesc["CacheHits"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "cache_hits"),
+ "Total number of cache hits.",
+ []string{"view"}, nil)
+ // resolver_cache_misses
+ viewStatsDesc["CacheMisses"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "cache_misses"),
+ "Total number of cache misses.",
+ []string{"view"}, nil)
+ // resolver_cache_rrsets
+ viewStatsDesc["cache"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "cache_rrsets"),
+ "Number of RRsets in cache database.",
+ []string{"view", "type"}, nil)
+ // resolver_query_hit_ratio
+ viewStatsDesc["QueryHitRatio"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_hit_ratio"),
+ "Query effectiveness (query hit ratio).",
+ []string{"view"}, nil)
+ // resolver_query_hits
+ viewStatsDesc["QueryHits"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_hits"),
+ "Total number of queries that were answered from cache.",
+ []string{"view"}, nil)
+ // resolver_query_misses
+ viewStatsDesc["QueryMisses"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_misses"),
+ "Total number of queries that were not in cache.",
+ []string{"view"}, nil)
+
+ // resolver_dnssec_validation_errors_total
+ viewStatsDesc["ValFail"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "dnssec_validation_errors_total"),
+ "Number of DNSSEC validation attempt errors.",
+ []string{"view"}, nil)
+ // resolver_dnssec_validation_success_total
+ viewStatsDesc["ValSuccess"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "dnssec_validation_success_total"),
+ "Number of successful DNSSEC validation attempts.",
+ []string{"view", "result"}, nil)
+
+ // resolver_queries_total
+ viewStatsDesc["ResolverQueries"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "queries_total"),
+ "Number of outgoing DNS queries.",
+ []string{"view", "type"}, nil)
+
+ // resolver_query_duration_seconds_bucket
+ // resolver_query_duration_seconds_count
+ // resolver_query_duration_seconds_sum
+ viewStatsDesc["QueryDuration"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_duration_seconds"),
+ "Resolver query round-trip time in seconds.",
+ []string{"view"}, nil)
+
+ // resolver_query_edns0_errors_total
+ viewStatsDesc["EDNS0Fail"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_edns0_errors_total"),
+ "Number of EDNS(0) query errors.",
+ []string{"view"}, nil)
+ // resolver_query_errors_total
+ viewStatsDesc["ResolverQueryErrors"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_errors_total"),
+ "Number of failed resolver queries.",
+ []string{"view", "error"}, nil)
+ // resolver_query_retries_total
+ viewStatsDesc["Retry"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "query_retries_total"),
+ "Number of resolver query retries.",
+ []string{"view"}, nil)
+
+ // resolver_response_errors_total
+ viewStatsDesc["ResolverResponseErrors"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "response_errors_total"),
+ "Number of resolver response errors received.",
+ []string{"view", "error"}, nil)
+ // resolver_response_lame_total
+ viewStatsDesc["Lame"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "response_lame_total"),
+ "Number of lame delegation responses received.",
+ []string{"view"}, nil)
+ // resolver_response_mismatch_total
+ viewStatsDesc["Mismatch"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "response_mismatch_total"),
+ "Number of mismatch responses received.",
+ []string{"view"}, nil)
+ // resolver_response_truncated_total
+ viewStatsDesc["Truncated"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "resolver", "response_truncated_total"),
+ "Number of truncated responses received.",
+ []string{"view"}, nil)
+
+ // responses_total
+ serverStatsDesc["ServerResponses"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "responses_total"),
+ "Number of responses sent.",
+ []string{"result"}, nil)
+
+ // tasks_running
+ serverStatsDesc["tasks-running"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "tasks_running"),
+ "Number of running tasks.",
+ nil, nil)
+ // up
+ serverStatsDesc["up"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "up"),
+ "Was the BIND instance query successful?",
+ nil, nil)
+ // worker_threads
+ serverStatsDesc["worker-threads"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "worker_threads"),
+ "Total number of available worker threads.",
+ nil, nil)
+
+ // zone_transfer_failure_total
+ serverStatsDesc["XfrFail"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "zone_transfer_failure_total"),
+ "Number of failed zone transfers.",
+ nil, nil)
+ // zone_transfer_rejected_total
+ serverStatsDesc["XfrRej"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "zone_transfer_rejected_total"),
+ "Number of rejected zone transfers.",
+ nil, nil)
+ // zone_transfer_success_total
+ serverStatsDesc["XfrSuccess"] = prometheus.NewDesc(
+ prometheus.BuildFQName(namespace, "", "zone_transfer_success_total"),
+ "Number of successful zone transfers.",
+ nil, nil)
+
+ pbe.serverStatsDesc = serverStatsDesc
+ pbe.trafficStatsDesc = trafficStatsDesc
+ pbe.viewStatsDesc = viewStatsDesc
+
+ incomingQueries := make(map[string]float64)
+ views := make(map[string]PromBind9ViewStats)
+ pbe.stats = PromBind9ExporterStats{
+ IncomingQueries: incomingQueries,
+ Views: views,
+ }
+
+ // prepare http handler
+ mux := http.NewServeMux()
+ hdlr := promhttp.HandlerFor(pbe.Registry, promhttp.HandlerOpts{})
+ mux.Handle("/metrics", hdlr)
+ pbe.HTTPServer = &http.Server{
+ Handler: mux,
+ }
+
+ return pbe
+}
+
+// Describe describes all exported metrics. It implements prometheus.Collector.
+func (pbe *PromBind9Exporter) Describe(ch chan<- *prometheus.Desc) {
+ for _, m := range pbe.serverStatsDesc {
+ ch <- m
+ }
+ for _, m := range pbe.viewStatsDesc {
+ ch <- m
+ }
+}
+
+// collectTime collects time stats.
+func (pbe *PromBind9Exporter) collectTime(ch chan<- prometheus.Metric, key string, timeStat time.Time) {
+ if !timeStat.IsZero() {
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc[key],
+ prometheus.GaugeValue,
+ float64(timeStat.Unix()))
+ }
+}
+
+// qryRTTHistogram collects a histogram from QryRTT statistics.
+// RTT buckets are per second, for example bucket[0.8] stores how many query
+// round trips took up to 800 milliseconds (cumulative counter).
+// The total sum of all observed values is exposed with sum, but since named
+// does not output the actual RTT values, this is not applicable.
+// The count of events that have been observed is exposed with count and is
+// identical to bucket[+Inf].
+func (pbe *PromBind9Exporter) qryRTTHistogram(stats map[string]float64) (uint64, float64, map[float64]uint64, error) {
+ buckets := map[float64]uint64{}
+
+ for statName, statValue := range stats {
+ // Find all statistics QryRTT<n>[+].
+ // Each statistic represents a bucket with the number of
+ // queries whose RTTs are up to <n> milliseconds, excluding
+ // the count of previous buckets. Furthermore, if the
+ // statistic ends in '+', this specifies the number of queries
+ // whose RTT was is higher than <n> milliseconds. So if we
+ // have the following statistics:
+ //
+ // QryRTT10: 5
+ // QryRTT50: 40
+ // QryRTT100: 10
+ // QryRTT100+: 1
+ //
+ // We have 5 queries whose RTT was below 10ms, 40 queries whose
+ // RTT was between 10ms and 50ms, 10 queries whose RTT was
+ // between 50ms and 100ms, and one query whose RTT was above
+ // 100ms.
+ // Each <n> represents a bucket and if the statistic ended in
+ // a '+' we will consider that those queries took up to an
+ // infinite time. Buckets are represented as seconds, so the
+ // expected buckets to return are:
+ //
+ // 0.01: 5
+ // 0.05: 45
+ // 0.1 : 55
+ // Inf: 56
+ if strings.HasPrefix(statName, qryRTT) {
+ var bucket float64
+ var err error
+ if strings.HasSuffix(statName, "+") {
+ bucket = math.Inf(0)
+ } else {
+ rtt := strings.TrimPrefix(statName, qryRTT)
+ bucket, err = strconv.ParseFloat(rtt, 64)
+ if err != nil {
+ return 0, math.NaN(), buckets, pkgerrors.Errorf("could not parse RTT: %s", rtt)
+ }
+ }
+ buckets[bucket/1000] = uint64(statValue)
+ }
+ }
+
+ // cumulative count
+ keys := make([]float64, 0, len(buckets))
+ for b := range buckets {
+ keys = append(keys, b)
+ }
+ sort.Float64s(keys)
+
+ var count uint64
+ for _, k := range keys {
+ count += buckets[k]
+ buckets[k] = count
+ }
+
+ return count, math.NaN(), buckets, nil
+}
+
+// trafficSizesHistogram collects a histogram from the traffic statistics as.
+// 'buckets'. Size buckets are in bytes, for example bucket[47] stores how
+// many packets were at most 47 bytes long (cumulative counter). The total
+// sum of all observed values is exposed with 'sum', but since named does not
+// output the actual sizes, this is not applicable. The count of events that
+// have been observed is exposed with 'count' and is identical to bucket[+Inf].
+func (pbe *PromBind9Exporter) trafficSizesHistogram(stats map[string]float64) (count uint64, sum float64, buckets map[float64]uint64, err error) {
+ count = 0
+ sum = math.NaN()
+ buckets = map[float64]uint64{}
+
+ buckets[math.Inf(0)] = 0
+ for statName, statValue := range stats {
+ // Find all traffic statistics.
+ var bucket float64
+ var err error
+ if strings.HasSuffix(statName, "+") {
+ bucket = math.Inf(0)
+ } else {
+ // The statistic name is of the format:
+ // <sizeMin>-<sizeMax>
+ // Fetch the maximum size and put in corresponding
+ // bucket.
+ sizes := strings.SplitAfter(statName, "-")
+ if len(sizes) != 2 {
+ // bad format
+ continue
+ }
+ bucket, err = strconv.ParseFloat(sizes[1], 64)
+ if err != nil {
+ return 0, math.NaN(), buckets, pkgerrors.Errorf("could not parse size: %s", sizes[1])
+ }
+ }
+ buckets[bucket] = uint64(statValue)
+ }
+
+ // cumulative count
+ keys := make([]float64, 0, len(buckets))
+ for b := range buckets {
+ keys = append(keys, b)
+ }
+ sort.Float64s(keys)
+
+ for _, k := range keys {
+ count += buckets[k]
+ buckets[k] = count
+ }
+
+ return count, sum, buckets, nil
+}
+
+// collectResolverStat fetches a specific resolver view statistic.
+func (pbe *PromBind9Exporter) collectResolverStat(statName, view string, viewStat PromBind9ViewStats, ch chan<- prometheus.Metric) {
+ statValue, ok := viewStat.ResolverStats[statName]
+ if !ok {
+ statValue = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.viewStatsDesc[statName],
+ prometheus.CounterValue, statValue, view)
+}
+
+// collectResolverLabelStat fetches a specific resolver view statistic
+// with a label.
+func (pbe *PromBind9Exporter) collectResolverLabelStat(statName, view string, viewStat PromBind9ViewStats, ch chan<- prometheus.Metric, labels []string) {
+ for _, label := range labels {
+ rstatValue, ok := viewStat.ResolverStats[label]
+ if !ok {
+ rstatValue = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.viewStatsDesc[statName],
+ prometheus.CounterValue, rstatValue, view, label)
+ }
+}
+
+// Collect fetches the stats from configured location and delivers them
+// as Prometheus metrics. It implements prometheus.Collector.
+func (pbe *PromBind9Exporter) Collect(ch chan<- prometheus.Metric) {
+ var err error
+ pbe.procID, err = pbe.collectStats()
+ if pbe.procID == 0 {
+ return
+ }
+
+ // up
+ ch <- prometheus.MustNewConstMetric(pbe.serverStatsDesc["up"], prometheus.GaugeValue, float64(pbe.up))
+
+ if err != nil {
+ log.Errorf("Some errors were encountered while collecting stats from BIND 9: %+v", err)
+ }
+
+ // if not up or error encountered, don't bother collecting.
+ if pbe.up == 0 || err != nil {
+ return
+ }
+
+ // boot_time_seconds
+ pbe.collectTime(ch, "boot-time", pbe.stats.BootTime)
+ // config_time_seconds
+ pbe.collectTime(ch, "config-time", pbe.stats.ConfigTime)
+ // current_time_seconds
+ pbe.collectTime(ch, "current-time", pbe.stats.CurrentTime)
+
+ // incoming_queries_total
+ for label, value := range pbe.stats.IncomingQueries {
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["qtypes"],
+ prometheus.CounterValue,
+ value, label)
+ }
+ // incoming_requests_total
+ for label, value := range pbe.stats.IncomingRequests {
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["opcodes"],
+ prometheus.CounterValue,
+ value, label)
+ }
+
+ // incoming_requests_tcp
+ value, ok := pbe.stats.NsStats["ReqTCP"]
+ if !ok {
+ value = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["ReqTCP"],
+ prometheus.CounterValue, value)
+ // query_tcp_total
+ value, ok = pbe.stats.NsStats["QryTCP"]
+ if !ok {
+ value = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["QryTCP"],
+ prometheus.CounterValue, value)
+ // query_udp_total
+ value, ok = pbe.stats.NsStats["QryUDP"]
+ if !ok {
+ value = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["QryUDP"],
+ prometheus.CounterValue, value)
+
+ // query_duplicates_total
+ value, ok = pbe.stats.NsStats["QryDuplicate"]
+ if !ok {
+ value = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["QryDuplicate"],
+ prometheus.CounterValue, value)
+ // query_errors_total
+ trimQryPrefix := func(name string) string {
+ return strings.TrimPrefix(name, "Qry")
+ }
+ qryErrors := []string{"QryDropped", "QryFailure"}
+ for _, label := range qryErrors {
+ value, ok = pbe.stats.NsStats[label]
+ if !ok {
+ value = 0
+ }
+
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["QryErrors"],
+ prometheus.CounterValue,
+ value, trimQryPrefix(label))
+ }
+ // query_recursion_total
+ value, ok = pbe.stats.NsStats["QryRecursion"]
+ if !ok {
+ value = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["QryRecursion"],
+ prometheus.CounterValue, value)
+ // recursive_clients
+ value, ok = pbe.stats.NsStats["RecursClients"]
+ if ok {
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["RecursClients"],
+ prometheus.CounterValue, value)
+ }
+
+ // responses_total
+ serverResponses := []string{
+ "QrySuccess",
+ "QryReferral",
+ "QryNxrrset",
+ "QrySERVFAIL",
+ "QryFORMERR",
+ "QryNXDOMAIN",
+ }
+ for _, label := range serverResponses {
+ value, ok = pbe.stats.NsStats[label]
+ if !ok {
+ value = 0
+ }
+
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc["ServerResponses"],
+ prometheus.CounterValue,
+ value, trimQryPrefix(label))
+ }
+
+ // tasks_running
+ // worker_threads
+ taskMgrStats := []string{"tasks-running", "worker-threads"}
+ for _, label := range taskMgrStats {
+ value, ok = pbe.stats.TaskMgr[label]
+ if !ok {
+ value = 0
+ }
+
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc[label],
+ prometheus.GaugeValue, value)
+ }
+
+ // zone_transfer_failure_total
+ // zone_transfer_rejected_total
+ // zone_transfer_success_total
+ xfrStats := []string{"XfrFail", "XfrRej", "XfrSuccess"}
+ for _, label := range xfrStats {
+ value, ok = pbe.stats.NsStats[label]
+ if !ok {
+ value = 0
+ }
+ ch <- prometheus.MustNewConstMetric(
+ pbe.serverStatsDesc[label],
+ prometheus.CounterValue, value)
+ }
+
+ // Traffic metrics.
+
+ // traffic_incoming_requests_udp4_size_{bucket,count,sum}
+ // traffic_incoming_requests_udp6_size_{bucket,count,sum}
+ // traffic_incoming_requests_tcp4_size_{bucket,count,sum}
+ // traffic_incoming_requests_tcp6_size_{bucket,count,sum}
+ // traffic_incoming_requests_total_size_{bucket,count,sum}
+ // traffic_responses_udp4_size_{bucket,count,sum}
+ // traffic_responses_udp6_size_{bucket,count,sum}
+ // traffic_responses_tcp4_size_{bucket,count,sum}
+ // traffic_responses_tcp6_size_{bucket,count,sum}
+ // traffic_responses_total_size_{bucket,count,sum}
+ for label, trafficStats := range pbe.stats.TrafficStats {
+ if count, sum, buckets, err := pbe.trafficSizesHistogram(trafficStats.SizeCount); err == nil {
+ ch <- prometheus.MustNewConstHistogram(
+ pbe.trafficStatsDesc[label],
+ count, sum, buckets)
+ }
+ }
+
+ // View metrics.
+ for view, viewStats := range pbe.stats.Views {
+ // resolver_cache_rrsets
+ for rrType, statValue := range viewStats.ResolverCache {
+ ch <- prometheus.MustNewConstMetric(
+ pbe.viewStatsDesc["cache"],
+ prometheus.CounterValue,
+ statValue, view, rrType)
+ }
+
+ // resolver_cache_hit_ratio
+ // resolver_cache_hits
+ // resolver_cache_misses
+ // resolver_query_hit_ratio
+ // resolver_query_hits
+ // resolver_query_misses
+ for statName, statValue := range viewStats.ResolverCachestats {
+ if desc, ok := pbe.viewStatsDesc[statName]; ok {
+ ch <- prometheus.MustNewConstMetric(
+ desc, prometheus.CounterValue,
+ statValue, view)
+ }
+ }
+
+ // resolver_query_duration_seconds_bucket
+ // resolver_query_duration_seconds_count
+ // resolver_query_duration_seconds_sum
+ if count, sum, buckets, err := pbe.qryRTTHistogram(viewStats.ResolverStats); err == nil {
+ ch <- prometheus.MustNewConstHistogram(
+ pbe.viewStatsDesc["QueryDuration"],
+ count, sum, buckets, view)
+ }
+
+ // resolver_query_edns0_errors_total
+ pbe.collectResolverStat("EDNS0Fail", view, viewStats, ch)
+
+ // resolver_query_errors_total
+ resolverQueryErrors := []string{"QueryAbort", "QuerySockFail", "QueryTimeout"}
+ pbe.collectResolverLabelStat("ResolverQueryErrors", view, viewStats, ch, resolverQueryErrors)
+ // resolver_query_retries_total
+ pbe.collectResolverStat("Retry", view, viewStats, ch)
+ // resolver_queries_total
+ for statName, statValue := range viewStats.ResolverQtypes {
+ ch <- prometheus.MustNewConstMetric(
+ pbe.viewStatsDesc["ResolverQueries"],
+ prometheus.CounterValue,
+ statValue, view, statName)
+ }
+
+ // resolver_response_errors_total
+ resolverResponseErrors := []string{"NXDOMAIN", "SERVFAIL", "FORMERR", "OtherError"}
+ pbe.collectResolverLabelStat("ResolverResponseErrors", view, viewStats, ch, resolverResponseErrors)
+ // resolver_response_lame_total
+ pbe.collectResolverStat("Lame", view, viewStats, ch)
+ // resolver_response_mismatch_total
+ pbe.collectResolverStat("Mismatch", view, viewStats, ch)
+ // resolver_response_truncated_total
+ pbe.collectResolverStat("Truncated", view, viewStats, ch)
+
+ // resolver_dnssec_validation_errors_total
+ pbe.collectResolverStat("ValFail", view, viewStats, ch)
+ // resolver_dnssec_validation_success_total
+ valSuccess := []string{"ValOk", "ValNegOk"}
+ pbe.collectResolverLabelStat("ValSuccess", view, viewStats, ch, valSuccess)
+ }
+}
+
+// Start goroutine with main loop for collecting stats and HTTP server for
+// exposing them to Prometheus.
+func (pbe *PromBind9Exporter) Start() {
+ // initial collect
+ var err error
+ pbe.procID, err = pbe.collectStats()
+ if err != nil {
+ log.Errorf("Some errors were encountered while collecting stats from BIND 9: %+v", err)
+ }
+
+ // register collectors
+ version.Version = stork.Version
+ pbe.Registry.MustRegister(pbe, version.NewCollector("bind_exporter"))
+ pbe.procExporter = prometheus.NewProcessCollector(
+ prometheus.ProcessCollectorOpts{
+ PidFn: func() (int, error) {
+ return int(pbe.procID), nil
+ },
+ Namespace: namespace,
+ })
+ pbe.Registry.MustRegister(pbe.procExporter)
+
+ // set address for listening from config
+ addrPort := net.JoinHostPort(pbe.Settings.String("prometheus-bind9-exporter-address"), strconv.Itoa(pbe.Settings.Int("prometheus-bind9-exporter-port")))
+ pbe.HTTPServer.Addr = addrPort
+
+ log.Printf("Prometheus BIND 9 Exporter listening on %s, stats pulling interval: %d seconds", addrPort, pbe.Settings.Int("prometheus-bind9-exporter-interval"))
+
+ // start HTTP server for metrics
+ go func() {
+ err := pbe.HTTPServer.ListenAndServe()
+ if err != nil && errors.Is(err, http.ErrServerClosed) {
+ log.Errorf("Problem serving Prometheus BIND 9 Exporter: %s", err.Error())
+ }
+ }()
+}
+
+// Shutdown exporter goroutines and unregister prometheus stats.
+func (pbe *PromBind9Exporter) Shutdown() {
+ log.Printf("Stopping Prometheus BIND 9 Exporter")
+
+ // stop http server
+ if pbe.HTTPServer != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ pbe.HTTPServer.SetKeepAlivesEnabled(false)
+ if err := pbe.HTTPServer.Shutdown(ctx); err != nil {
+ log.Warnf("Could not gracefully shut down the BIND 9 exporter: %v\n", err)
+ }
+ }
+
+ // unregister bind9 counters from prometheus framework
+ if pbe.procID > 0 {
+ pbe.Registry.Unregister(pbe.procExporter)
+ }
+ pbe.Registry.Unregister(pbe)
+
+ log.Printf("Stopped Prometheus BIND 9 Exporter")
+}
+
+// getStat is an utility to get a statistic from a map.
+func getStat(statMap map[string]interface{}, statName string) interface{} {
+ value, ok := statMap[statName]
+ if !ok {
+ log.Infof("No '%s' in response:", statName)
+ return nil
+ }
+ return value
+}
+
+// scrapeServerStat is an utility to get a server statistic from a map.
+func (pbe *PromBind9Exporter) scrapeServerStat(statMap map[string]interface{}, statName string) (map[string]float64, error) {
+ storageMap := make(map[string]float64)
+
+ statIfc := getStat(statMap, statName)
+ if statIfc != nil {
+ stats, ok := statIfc.(map[string]interface{})
+ if !ok {
+ return nil, pkgerrors.Errorf("problem casting '%s' interface", statName)
+ }
+ for labelName, labelValueIfc := range stats {
+ // get value
+ labelValue, ok := labelValueIfc.(float64)
+ if !ok {
+ continue
+ }
+ // store stat value
+ storageMap[labelName] = labelValue
+ }
+ }
+ return storageMap, nil
+}
+
+// scrapeTimeStats stores time related statistics from statMap.
+func (pbe *PromBind9Exporter) scrapeTimeStats(statMap map[string]interface{}) (err error) {
+ var timeVal time.Time
+ var timeStr string
+
+ // boot_time_seconds
+ timeStr = getStat(statMap, "boot-time").(string)
+ timeVal, err = time.Parse(time.RFC3339, timeStr)
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing time %+s: %+v", timeStr, err)
+ }
+ pbe.stats.BootTime = timeVal
+ // config_time_seconds
+ timeStr = getStat(statMap, "config-time").(string)
+ timeVal, err = time.Parse(time.RFC3339, timeStr)
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing time %+s: %+v", timeStr, err)
+ }
+ pbe.stats.ConfigTime = timeVal
+ // current_time_seconds
+ timeStr = getStat(statMap, "current-time").(string)
+ timeVal, err = time.Parse(time.RFC3339, timeStr)
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing time %+s: %+v", timeStr, err)
+ }
+ pbe.stats.CurrentTime = timeVal
+
+ return nil
+}
+
+func (pbe *PromBind9Exporter) scrapeViewStats(viewName string, viewStatsIfc interface{}) {
+ pbe.initViewStats(viewName)
+
+ viewStats, ok := viewStatsIfc.(map[string]interface{})
+ if !ok {
+ log.Errorf("Problem casting viewStatsIfc: %+v", viewStatsIfc)
+ return
+ }
+
+ // Parse resolver.
+ resolverIfc, ok := viewStats["resolver"]
+ if !ok {
+ log.Infof("No 'resolver' in viewStats: %+v", viewStats)
+ return
+ }
+ resolver, ok := resolverIfc.(map[string]interface{})
+ if !ok {
+ log.Errorf("Problem casting resolverIfc: %+v", resolverIfc)
+ return
+ }
+
+ // Parse stats.
+ statsIfc, ok := resolver["stats"]
+ if !ok {
+ log.Infof("No 'stats' in resolver: %+v", resolver)
+ return
+ }
+ resolverStats, ok := statsIfc.(map[string]interface{})
+ if !ok {
+ log.Errorf("Problem casting statsIfc: %+v", statsIfc)
+ return
+ }
+
+ // resolver_dnssec_validation_errors_total
+ // resolver_dnssec_validation_success_total
+ for statName, statValueIfc := range resolverStats {
+ // get stat value
+ statValue, ok := statValueIfc.(float64)
+ if !ok {
+ log.Errorf("Problem casting statValue: %+v", statValueIfc)
+ continue
+ }
+ // store stat value
+ pbe.stats.Views[viewName].ResolverStats[statName] = statValue
+ }
+
+ // Parse qtypes.
+ qtypesIfc, ok := resolver["qtypes"]
+ if !ok {
+ log.Infof("No 'qtypes' in resolver: %+v", resolver)
+ return
+ }
+ qtypes, ok := qtypesIfc.(map[string]interface{})
+ if !ok {
+ log.Errorf("Problem casting qtypesIfc: %+v", qtypesIfc)
+ return
+ }
+
+ // resolver_queries_total
+ for qtype, statValueIfc := range qtypes {
+ // get stat value
+ statValue, ok := statValueIfc.(float64)
+ if !ok {
+ log.Errorf("Problem casting statValue: %+v", statValueIfc)
+ continue
+ }
+ // store stat value
+ pbe.stats.Views[viewName].ResolverQtypes[qtype] = statValue
+ }
+
+ // Parse cache.
+ cacheIfc, ok := resolver["cache"]
+ if !ok {
+ log.Infof("No 'cachestats' in resolver: %+v", resolver)
+ return
+ }
+ cacheRRsets, ok := cacheIfc.(map[string]interface{})
+ if !ok {
+ log.Errorf("Problem casting cacheIfc: %+v", cacheIfc)
+ return
+ }
+
+ // resolver_cache_rrsets
+ for statName, statValueIfc := range cacheRRsets {
+ // get stat value
+ statValue, ok := statValueIfc.(float64)
+ if !ok {
+ log.Errorf("Problem casting statValue: %+v", statValueIfc)
+ continue
+ }
+ // store stat value
+ pbe.stats.Views[viewName].ResolverCache[statName] = statValue
+ }
+
+ // Parse cachestats.
+ cachestatsIfc, ok := resolver["cachestats"]
+ if !ok {
+ log.Infof("No 'cachestats' in resolver: %+v", resolver)
+ return
+ }
+ cachestats, ok := cachestatsIfc.(map[string]interface{})
+ if !ok {
+ log.Errorf("Problem casting cachestatsIfc: %+v", cachestatsIfc)
+ return
+ }
+
+ // resolver_cache_hit_ratio
+ // resolver_cache_hits
+ // resolver_cache_misses
+ // resolver_query_hit_ratio
+ // resolver_query_hits
+ // resolver_query_misses
+ var cacheHits float64
+ var cacheMisses float64
+ var queryHits float64
+ var queryMisses float64
+ for statName, statValueIfc := range cachestats {
+ // get stat value
+ statValue, ok := statValueIfc.(float64)
+ if !ok {
+ log.Errorf("Problem casting statValue: %+v", statValueIfc)
+ continue
+ }
+ switch statName {
+ case "CacheHits":
+ cacheHits = statValue
+ case "CacheMisses":
+ cacheMisses = statValue
+ case "QueryHits":
+ queryHits = statValue
+ case "QueryMisses":
+ queryMisses = statValue
+ }
+
+ // store stat value
+ pbe.stats.Views[viewName].ResolverCachestats[statName] = statValue
+ }
+
+ total := cacheHits + cacheMisses
+ if total > 0 {
+ pbe.stats.Views[viewName].ResolverCachestats["CacheHitRatio"] = cacheHits / total
+ }
+ total = queryHits + queryMisses
+ if total > 0 {
+ pbe.stats.Views[viewName].ResolverCachestats["QueryHitRatio"] = queryHits / total
+ }
+}
+
+// setDaemonStats stores the stat values from a daemon in the proper prometheus object.
+func (pbe *PromBind9Exporter) setDaemonStats(rspIfc interface{}) (ret error) {
+ rsp, ok := rspIfc.(map[string]interface{})
+ if !ok {
+ return pkgerrors.Errorf("problem casting rspIfc: %+v", rspIfc)
+ }
+
+ // boot_time_seconds
+ // config_time_seconds
+ // current_time_seconds
+ err := pbe.scrapeTimeStats(rsp)
+ if err != nil {
+ return err
+ }
+
+ // incoming_queries_total
+ pbe.stats.IncomingQueries, err = pbe.scrapeServerStat(rsp, "qtypes")
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing 'qtypes': %+v", err)
+ }
+ // incoming_requests_total
+ pbe.stats.IncomingRequests, err = pbe.scrapeServerStat(rsp, "opcodes")
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing 'opcodes': %+v", err)
+ }
+
+ // query_duplicates_total
+ // query_errors_total
+ // query_recursion_total
+ // recursive_clients
+ // zone_transfer_failure_total
+ // zone_transfer_rejected_total
+ // zone_transfer_success_total
+ pbe.stats.NsStats, err = pbe.scrapeServerStat(rsp, "nsstats")
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing 'nsstats': %+v", err)
+ }
+
+ // tasks_running
+ // worker_threads
+ pbe.stats.TaskMgr, err = pbe.scrapeServerStat(rsp, "taskmgr")
+ if err != nil {
+ return pkgerrors.Errorf("problem parsing 'nsstats': %+v", err)
+ }
+
+ // Parse traffic stats.
+ trafficIfc, ok := rsp["traffic"]
+ if !ok {
+ return pkgerrors.Errorf("No 'traffic' in response: %+v", rsp)
+ }
+ traffic, ok := trafficIfc.(map[string]interface{})
+ if !ok {
+ return pkgerrors.Errorf("problem casting trafficIfc: %+v", trafficIfc)
+ }
+ trafficMap := make(map[string]PromBind9TrafficStats)
+ for trafficName, trafficStatsIfc := range traffic {
+ sizeCounts := make(map[string]float64)
+ trafficStats, ok := trafficStatsIfc.(map[string]interface{})
+ if !ok {
+ return pkgerrors.Errorf("problem casting '%s' interface", trafficName)
+ }
+ for labelName, labelValueIfc := range trafficStats {
+ // get value
+ labelValue, ok := labelValueIfc.(float64)
+ if !ok {
+ continue
+ }
+ // store stat value
+ sizeCounts[labelName] = labelValue
+ }
+ trafficMap[trafficName] = PromBind9TrafficStats{
+ SizeCount: sizeCounts,
+ }
+ }
+ pbe.stats.TrafficStats = trafficMap
+
+ // Parse views.
+ viewsIfc, ok := rsp["views"]
+ if !ok {
+ return pkgerrors.Errorf("no 'views' in response: %+v", rsp)
+ }
+
+ views := viewsIfc.(map[string]interface{})
+ if !ok {
+ return pkgerrors.Errorf("problem casting viewsIfc: %+v", viewsIfc)
+ }
+
+ for viewName, viewStatsIfc := range views {
+ pbe.scrapeViewStats(viewName, viewStatsIfc)
+ }
+ return nil
+}
+
+// collectStats collects stats from all bind9 apps.
+func (pbe *PromBind9Exporter) collectStats() (bind9Pid int32, lastErr error) {
+ pbe.up = 0
+
+ // go through all bind9 apps discovered by monitor and query them for stats
+ apps := pbe.AppMonitor.GetApps()
+ for _, app := range apps {
+ // ignore non-bind9 apps
+ if app.GetBaseApp().Type != AppTypeBind9 {
+ continue
+ }
+ bind9Pid = app.GetBaseApp().Pid
+
+ // get stats from named
+ sap, err := getAccessPoint(app, AccessPointStatistics)
+ if err != nil {
+ lastErr = err
+ log.Errorf("Problem getting stats from BIND 9, bad access statistics point: %+v", err)
+ continue
+ }
+ address := storkutil.HostWithPortURL(sap.Address, sap.Port, sap.UseSecureProtocol)
+ path := "json/v1"
+ url := fmt.Sprintf("%s%s", address, path)
+ httpRsp, err := pbe.HTTPClient.Call(url, nil)
+ if err != nil {
+ lastErr = err
+ log.Errorf("Problem getting stats from BIND 9: %+v", err)
+ continue
+ }
+ body, err := io.ReadAll(httpRsp.Body)
+ httpRsp.Body.Close()
+ if err != nil {
+ lastErr = err
+ log.Errorf("Problem reading stats response from BIND 9: %+v", err)
+ continue
+ }
+
+ // parse response
+ var rspIfc interface{}
+ response := string(body)
+ err = json.Unmarshal([]byte(response), &rspIfc)
+ if err != nil {
+ lastErr = err
+ log.Errorf("Failed to parse responses from BIND 9: %s", err)
+ continue
+ }
+
+ err = pbe.setDaemonStats(rspIfc)
+ if err != nil {
+ lastErr = err
+ log.Errorf("Cannot get stat from daemon: %+v", err)
+ continue
+ }
+
+ pbe.up = 1
+ }
+
+ return bind9Pid, lastErr
+}
+
+// initViewStats initializes the maps for storing metrics.
+func (pbe *PromBind9Exporter) initViewStats(viewName string) {
+ _, ok := pbe.stats.Views[viewName]
+ if !ok {
+ resolverCache := make(map[string]float64)
+ resolverCachestats := make(map[string]float64)
+ resolverQtypes := make(map[string]float64)
+ resolverStats := make(map[string]float64)
+
+ pbe.stats.Views[viewName] = PromBind9ViewStats{
+ ResolverCache: resolverCache,
+ ResolverCachestats: resolverCachestats,
+ ResolverQtypes: resolverQtypes,
+ ResolverStats: resolverStats,
+ }
+ }
+}
diff --git a/backend/agent/prombind9exporter_test.go b/backend/agent/prombind9exporter_test.go
new file mode 100644
index 0000000..2c51d6e
--- /dev/null
+++ b/backend/agent/prombind9exporter_test.go
@@ -0,0 +1,388 @@
+package agent
+
+import (
+ "flag"
+ "math"
+ "net/http"
+ "testing"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+ "github.com/urfave/cli/v2"
+ "gopkg.in/h2non/gock.v1"
+)
+
+// Fake app monitor that returns some predefined list of apps.
+type PromFakeBind9AppMonitor struct {
+ Apps []App
+}
+
+func (fam *PromFakeBind9AppMonitor) GetApps() []App {
+ log.Println("GetApps")
+ accessPoints := makeAccessPoint(AccessPointStatistics, "1.2.3.4", "", 1234, false)
+ accessPoints = append(accessPoints, AccessPoint{
+ Type: AccessPointControl,
+ Address: "1.9.5.3",
+ Port: 1953,
+ Key: "abcd",
+ })
+ ba := &Bind9App{
+ BaseApp: BaseApp{
+ Type: AppTypeBind9,
+ AccessPoints: accessPoints,
+ },
+ RndcClient: nil,
+ }
+ return []App{ba}
+}
+
+func (fam *PromFakeBind9AppMonitor) GetApp(appType, apType, address string, port int64) App {
+ return nil
+}
+
+func (fam *PromFakeBind9AppMonitor) Shutdown() {
+}
+
+func (fam *PromFakeBind9AppMonitor) Start(storkAgent *StorkAgent) {
+}
+
+// Check creating PromBind9Exporter, check if prometheus stats are set up.
+func TestNewPromBind9ExporterBasic(t *testing.T) {
+ fam := &PromFakeBind9AppMonitor{}
+ settings := cli.NewContext(nil, flag.NewFlagSet("", 0), nil)
+ pbe := NewPromBind9Exporter(settings, fam)
+ defer pbe.Shutdown()
+
+ require.NotNil(t, pbe.HTTPClient)
+ require.NotNil(t, pbe.HTTPServer)
+ require.Len(t, pbe.serverStatsDesc, 19)
+ require.Len(t, pbe.viewStatsDesc, 18)
+}
+
+// Check starting PromBind9Exporter and collecting stats.
+func TestPromBind9ExporterStart(t *testing.T) {
+ defer gock.Off()
+ gock.New("http://1.2.3.4:1234/").
+ Post("/").
+ AddMatcher(func(r1 *http.Request, r2 *gock.Request) (bool, error) {
+ // Require empty body
+ return r1.Body == nil, nil
+ }).
+ Persist().
+ Reply(200).
+ BodyString(`{ "json-stats-version": "1.2",
+ "boot-time": "2020-04-21T07:13:08.888Z",
+ "config-time": "2020-04-21T07:13:09.989Z",
+ "current-time": "2020-04-21T07:19:28.258Z",
+ "version":"9.16.2",
+ "qtypes": {
+ "A": 201,
+ "AAAA": 200,
+ "DNSKEY": 53
+ },
+ "opcodes": {
+ "QUERY": 454,
+ "IQUERY": 0,
+ "UPDATE": 1
+ },
+ "nsstats": {
+ "ReqEdns0":100,
+ "Requestv4":206,
+ "RespEDNS0":123,
+ "Response":454,
+ "QryDropped":9,
+ "QryDuplicate":15,
+ "QryFailure":3,
+ "QryNoauthAns":222,
+ "QryRecursion":303,
+ "QryNxrrset":5,
+ "QryNXDOMAIN":55,
+ "QrySERVFAIL":555,
+ "QrySuccess":111,
+ "QryUDP":404,
+ "QryTCP":303,
+ "XfrFail": 2,
+ "XfrRej": 11,
+ "XfrSuccess": 22
+ },
+ "taskmgr": {
+ "tasks-running": 1,
+ "worker-threads": 4
+ },
+ "traffic": {
+ "dns-udp-requests-sizes-received-ipv4":{
+ "32-47":206,
+ "128+":24
+ },
+ "dns-udp-responses-sizes-sent-ipv4":{
+ "96-111":196,
+ "112-127":10
+ },
+ "dns-tcp-requests-sizes-received-ipv4":{
+ "32-47":12
+ },
+ "dns-tcp-responses-sizes-sent-ipv4":{
+ "128-143":12
+ },
+ "dns-tcp-requests-sizes-received-ipv6":{
+ },
+ "dns-tcp-responses-sizes-sent-ipv6":{
+ }
+ },
+ "views": {
+ "_default": {
+ "resolver": {
+ "cache": {
+ "A": 37,
+ "AAAA": 38,
+ "DS": 2
+ },
+ "cachestats": {
+ "CacheHits": 40,
+ "CacheMisses": 10,
+ "QueryHits": 30,
+ "QueryMisses": 20
+ },
+ "qtypes": {
+ "A": 37,
+ "NS": 7,
+ "AAAA": 36,
+ "DS": 6,
+ "RRSIG": 21,
+ "DNSKEY": 4
+ },
+ "stats": {
+ "EDNS0Fail": 5,
+ "FORMERR": 13,
+ "NXDOMAIN": 50,
+ "SERVFAIL": 404,
+ "OtherError": 42,
+ "Lame": 9,
+ "Mismatch": 10,
+ "Truncated": 7,
+ "QueryAbort": 1,
+ "QueryTimeout": 10,
+ "QryRTT10": 2,
+ "QryRTT100": 18,
+ "QryRTT500": 37,
+ "QryRTT800": 3,
+ "QryRTT1600": 1,
+ "QryRTT1600+": 4,
+ "Retry": 71,
+ "ValAttempt": 25,
+ "ValFail": 5,
+ "ValNegOk": 3,
+ "ValOk": 17
+ }
+ }
+ }
+ }
+ }`)
+ fam := &PromFakeBind9AppMonitor{}
+ flags := flag.NewFlagSet("test", 0)
+ flags.Int("prometheus-bind9-exporter-port", 9119, "usage")
+ flags.Int("prometheus-bind9-exporter-interval", 10, "usage")
+ settings := cli.NewContext(nil, flags, nil)
+ settings.Set("prometheus-bind9-exporter-port", "1234")
+ settings.Set("prometheus-bind9-exporter-interval", "1")
+ pbe := NewPromBind9Exporter(settings, fam)
+ defer pbe.Shutdown()
+
+ gock.InterceptClient(pbe.HTTPClient.client)
+
+ // start exporter
+ pbe.Start()
+ require.EqualValues(t, 1, pbe.up)
+
+ // boot_time_seconds
+ expect, _ := time.Parse(time.RFC3339, "2020-04-21T07:13:08.888Z")
+ require.EqualValues(t, expect, pbe.stats.BootTime)
+ // config_time_seconds
+ expect, _ = time.Parse(time.RFC3339, "2020-04-21T07:13:09.989Z")
+ require.EqualValues(t, expect, pbe.stats.ConfigTime)
+ // current_time_seconds
+ expect, _ = time.Parse(time.RFC3339, "2020-04-21T07:19:28.258Z")
+ require.EqualValues(t, expect, pbe.stats.CurrentTime)
+
+ // incoming_queries_total
+ require.EqualValues(t, 201.0, pbe.stats.IncomingQueries["A"])
+ require.EqualValues(t, 200.0, pbe.stats.IncomingQueries["AAAA"])
+ require.EqualValues(t, 53.0, pbe.stats.IncomingQueries["DNSKEY"])
+ // incoming_requests_total
+ require.EqualValues(t, 454.0, pbe.stats.IncomingRequests["QUERY"])
+ require.EqualValues(t, 1.0, pbe.stats.IncomingRequests["UPDATE"])
+ require.EqualValues(t, 0.0, pbe.stats.IncomingRequests["IQUERY"])
+
+ // incoming_queries_tcp
+ require.EqualValues(t, 303.0, pbe.stats.NsStats["QryTCP"])
+ // incoming_queries_udp
+ require.EqualValues(t, 404.0, pbe.stats.NsStats["QryUDP"])
+
+ // query_duplicates_total
+ require.EqualValues(t, 15.0, pbe.stats.NsStats["QryDuplicate"])
+ // query_errors_total
+ require.EqualValues(t, 9.0, pbe.stats.NsStats["QryDropped"])
+ require.EqualValues(t, 3.0, pbe.stats.NsStats["QryFailure"])
+ // query_recursion_total
+ require.EqualValues(t, 303.0, pbe.stats.NsStats["QryRecursion"])
+ // recursive_clients (unset value)
+ require.EqualValues(t, 0.0, pbe.stats.NsStats["RecursClients"])
+
+ // resolver_cache_rrsets
+ require.EqualValues(t, 37.0, pbe.stats.Views["_default"].ResolverCache["A"])
+ require.EqualValues(t, 38.0, pbe.stats.Views["_default"].ResolverCache["AAAA"])
+ require.EqualValues(t, 2.0, pbe.stats.Views["_default"].ResolverCache["DS"])
+
+ // resolver_cache_hit_ratio
+ require.EqualValues(t, 0.8, pbe.stats.Views["_default"].ResolverCachestats["CacheHitRatio"])
+ // resolver_cache_hits
+ require.EqualValues(t, 40.0, pbe.stats.Views["_default"].ResolverCachestats["CacheHits"])
+ // resolver_cache_misses
+ require.EqualValues(t, 10.0, pbe.stats.Views["_default"].ResolverCachestats["CacheMisses"])
+ // resolver_query_hit_ratio
+ require.EqualValues(t, 0.6, pbe.stats.Views["_default"].ResolverCachestats["QueryHitRatio"])
+ // resolver_query_hits
+ require.EqualValues(t, 30.0, pbe.stats.Views["_default"].ResolverCachestats["QueryHits"])
+ // resolver_query_misses
+ require.EqualValues(t, 20.0, pbe.stats.Views["_default"].Resolve